remove some debian patches that conflict with rpi stuff
authorRaspbian kernel package updater <root@raspbian.org>
Sun, 23 Jul 2017 01:53:10 +0000 (01:53 +0000)
committerRaspbian kernel package updater <root@raspbian.org>
Sun, 23 Jul 2017 01:53:10 +0000 (01:53 +0000)
25 files changed:
Documentation/kernel-parameters.txt
arch/arc/mm/mmap.c
arch/arm/mm/mmap.c
arch/frv/mm/elf-fdpic.c
arch/mips/mm/mmap.c
arch/parisc/kernel/sys_parisc.c
arch/powerpc/mm/hugetlbpage-radix.c
arch/powerpc/mm/mmap.c
arch/powerpc/mm/slice.c
arch/s390/mm/mmap.c
arch/sh/mm/mmap.c
arch/sparc/kernel/sys_sparc_64.c
arch/sparc/mm/hugetlbpage.c
arch/tile/mm/hugetlbpage.c
arch/x86/kernel/sys_x86_64.c
arch/x86/mm/hugetlbpage.c
arch/xtensa/kernel/syscall.c
debian/patches/series
drivers/pinctrl/intel/pinctrl-cherryview.c
fs/hugetlbfs/inode.c
fs/proc/task_mmu.c
include/linux/mm.h
mm/gup.c
mm/memory.c
mm/mmap.c

index cbe09c71fcebe78676e977e454941def73facd2a..be36da42fe0656810d0a327d7fff8a8b283f5d70 100644 (file)
@@ -3932,13 +3932,6 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
        spia_pedr=
        spia_peddr=
 
-       stack_guard_gap=        [MM]
-                       override the default stack gap protection. The value
-                       is in page units and it defines how many pages prior
-                       to (for stacks growing down) resp. after (for stacks
-                       growing up) the main stack are reserved for no other
-                       mapping. Default value is 256 pages.
-
        stacktrace      [FTRACE]
                        Enabled the stack tracer on boot up.
 
index cf4ae6958240074d265b4f26cb6f0d4271105b92..2e06d56e987bf84c773b01bbfa6a7b4af1ac6fc9 100644 (file)
@@ -64,7 +64,7 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
 
                vma = find_vma(mm, addr);
                if (TASK_SIZE - len >= addr &&
-                   (!vma || addr + len <= vm_start_gap(vma)))
+                   (!vma || addr + len <= vma->vm_start))
                        return addr;
        }
 
index 641334ebf46dcd09832d83843b8521989af3b26f..66353caa35b9f78fa2aa4754dea3ce813593303f 100644 (file)
@@ -89,7 +89,7 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
 
                vma = find_vma(mm, addr);
                if (TASK_SIZE - len >= addr &&
-                   (!vma || addr + len <= vm_start_gap(vma)))
+                   (!vma || addr + len <= vma->vm_start))
                        return addr;
        }
 
@@ -140,7 +140,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
                        addr = PAGE_ALIGN(addr);
                vma = find_vma(mm, addr);
                if (TASK_SIZE - len >= addr &&
-                               (!vma || addr + len <= vm_start_gap(vma)))
+                               (!vma || addr + len <= vma->vm_start))
                        return addr;
        }
 
index efa59f1f80226e6c951182ad0124aeccd38c7787..836f14707a627f156343154f359ac1f7758e9fd4 100644 (file)
@@ -74,7 +74,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
                addr = PAGE_ALIGN(addr);
                vma = find_vma(current->mm, addr);
                if (TASK_SIZE - len >= addr &&
-                   (!vma || addr + len <= vm_start_gap(vma)))
+                   (!vma || addr + len <= vma->vm_start))
                        goto success;
        }
 
index a44052c05f93efe23db370205b5ee58606128e23..d08ea3ff0f53345e7501dd168f32c2177976f6ee 100644 (file)
@@ -92,7 +92,7 @@ static unsigned long arch_get_unmapped_area_common(struct file *filp,
 
                vma = find_vma(mm, addr);
                if (TASK_SIZE - len >= addr &&
-                   (!vma || addr + len <= vm_start_gap(vma)))
+                   (!vma || addr + len <= vma->vm_start))
                        return addr;
        }
 
index 1d7691fa8ab25a4df935e4df5a6841697a3cbae0..0a393a04e89182cba498fa64774dd32177860eb7 100644 (file)
@@ -88,7 +88,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
                unsigned long len, unsigned long pgoff, unsigned long flags)
 {
        struct mm_struct *mm = current->mm;
-       struct vm_area_struct *vma, *prev;
+       struct vm_area_struct *vma;
        unsigned long task_size = TASK_SIZE;
        int do_color_align, last_mmap;
        struct vm_unmapped_area_info info;
@@ -115,10 +115,9 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
                else
                        addr = PAGE_ALIGN(addr);
 
-               vma = find_vma_prev(mm, addr, &prev);
+               vma = find_vma(mm, addr);
                if (task_size - len >= addr &&
-                   (!vma || addr + len <= vm_start_gap(vma)) &&
-                   (!prev || addr >= vm_end_gap(prev)))
+                   (!vma || addr + len <= vma->vm_start))
                        goto found_addr;
        }
 
@@ -142,7 +141,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
                          const unsigned long len, const unsigned long pgoff,
                          const unsigned long flags)
 {
-       struct vm_area_struct *vma, *prev;
+       struct vm_area_struct *vma;
        struct mm_struct *mm = current->mm;
        unsigned long addr = addr0;
        int do_color_align, last_mmap;
@@ -176,11 +175,9 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
                        addr = COLOR_ALIGN(addr, last_mmap, pgoff);
                else
                        addr = PAGE_ALIGN(addr);
-
-               vma = find_vma_prev(mm, addr, &prev);
+               vma = find_vma(mm, addr);
                if (TASK_SIZE - len >= addr &&
-                   (!vma || addr + len <= vm_start_gap(vma)) &&
-                   (!prev || addr >= vm_end_gap(prev)))
+                   (!vma || addr + len <= vma->vm_start))
                        goto found_addr;
        }
 
index a2b2d97f7edacf7ed8bfbc591323d25572f41bb4..35254a6784561b6f5f70399822cb163f8e59b14d 100644 (file)
@@ -65,7 +65,7 @@ radix__hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
                addr = ALIGN(addr, huge_page_size(h));
                vma = find_vma(mm, addr);
                if (TASK_SIZE - len >= addr &&
-                   (!vma || addr + len <= vm_start_gap(vma)))
+                   (!vma || addr + len <= vma->vm_start))
                        return addr;
        }
        /*
index 5bc2845cddf416711ad2e038bcf469cf2491d0a1..2f1e44362198d3f16d85fdd4656d37e618b51824 100644 (file)
@@ -106,7 +106,7 @@ radix__arch_get_unmapped_area(struct file *filp, unsigned long addr,
                addr = PAGE_ALIGN(addr);
                vma = find_vma(mm, addr);
                if (TASK_SIZE - len >= addr && addr >= mmap_min_addr &&
-                   (!vma || addr + len <= vm_start_gap(vma)))
+                   (!vma || addr + len <= vma->vm_start))
                        return addr;
        }
 
@@ -142,7 +142,7 @@ radix__arch_get_unmapped_area_topdown(struct file *filp,
                addr = PAGE_ALIGN(addr);
                vma = find_vma(mm, addr);
                if (TASK_SIZE - len >= addr && addr >= mmap_min_addr &&
-                               (!vma || addr + len <= vm_start_gap(vma)))
+                               (!vma || addr + len <= vma->vm_start))
                        return addr;
        }
 
index c4d5c9c61e0fc16847854015931be0807c80344e..2b27458902ee888d1ba3480191497c105a184e94 100644 (file)
@@ -105,7 +105,7 @@ static int slice_area_is_free(struct mm_struct *mm, unsigned long addr,
        if ((mm->task_size - len) < addr)
                return 0;
        vma = find_vma(mm, addr);
-       return (!vma || (addr + len) <= vm_start_gap(vma));
+       return (!vma || (addr + len) <= vma->vm_start);
 }
 
 static int slice_low_has_vma(struct mm_struct *mm, unsigned long slice)
index 812368f274c96bacdf2cbba41aefabe2f44ef5a8..eb9df2822da193250fc2c72589a31e620110a599 100644 (file)
@@ -98,7 +98,7 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
                addr = PAGE_ALIGN(addr);
                vma = find_vma(mm, addr);
                if (TASK_SIZE - len >= addr && addr >= mmap_min_addr &&
-                   (!vma || addr + len <= vm_start_gap(vma)))
+                   (!vma || addr + len <= vma->vm_start))
                        return addr;
        }
 
@@ -136,7 +136,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
                addr = PAGE_ALIGN(addr);
                vma = find_vma(mm, addr);
                if (TASK_SIZE - len >= addr && addr >= mmap_min_addr &&
-                               (!vma || addr + len <= vm_start_gap(vma)))
+                               (!vma || addr + len <= vma->vm_start))
                        return addr;
        }
 
index 7df7d59441889aa8bc4a748cd4a40dc94ab89cff..6777177807c26f5d6630ec48535bb4f7d5eba0e2 100644 (file)
@@ -63,7 +63,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
 
                vma = find_vma(mm, addr);
                if (TASK_SIZE - len >= addr &&
-                   (!vma || addr + len <= vm_start_gap(vma)))
+                   (!vma || addr + len <= vma->vm_start))
                        return addr;
        }
 
@@ -113,7 +113,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
 
                vma = find_vma(mm, addr);
                if (TASK_SIZE - len >= addr &&
-                   (!vma || addr + len <= vm_start_gap(vma)))
+                   (!vma || addr + len <= vma->vm_start))
                        return addr;
        }
 
index 02e05e221b94516387c58fdc1ec572c6ccef036f..fe8b8ee8e6602307bebf6813704146da7d9bc30e 100644 (file)
@@ -118,7 +118,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
 
                vma = find_vma(mm, addr);
                if (task_size - len >= addr &&
-                   (!vma || addr + len <= vm_start_gap(vma)))
+                   (!vma || addr + len <= vma->vm_start))
                        return addr;
        }
 
@@ -181,7 +181,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
 
                vma = find_vma(mm, addr);
                if (task_size - len >= addr &&
-                   (!vma || addr + len <= vm_start_gap(vma)))
+                   (!vma || addr + len <= vma->vm_start))
                        return addr;
        }
 
index 58cde8d9be8a25c3be709811b27df4dd4f7b45b2..988acc8b1b80a387d9119782f53f1d41dbe53c4e 100644 (file)
@@ -116,7 +116,7 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
                addr = ALIGN(addr, HPAGE_SIZE);
                vma = find_vma(mm, addr);
                if (task_size - len >= addr &&
-                   (!vma || addr + len <= vm_start_gap(vma)))
+                   (!vma || addr + len <= vma->vm_start))
                        return addr;
        }
        if (mm->get_unmapped_area == arch_get_unmapped_area)
index 67508b249ede3dd4d3810fe7a997655f6e3f0db4..77ceaa343fcef10956b73222f7033031035c3897 100644 (file)
@@ -232,7 +232,7 @@ unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
                addr = ALIGN(addr, huge_page_size(h));
                vma = find_vma(mm, addr);
                if (TASK_SIZE - len >= addr &&
-                   (!vma || addr + len <= vm_start_gap(vma)))
+                   (!vma || addr + len <= vma->vm_start))
                        return addr;
        }
        if (current->mm->get_unmapped_area == arch_get_unmapped_area)
index 1119414ab419a6ec67f310d03b12502f24bc1503..a55ed63b9f91b0d45dbb476a22af9a19c4ab5fc8 100644 (file)
@@ -140,7 +140,7 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
                addr = PAGE_ALIGN(addr);
                vma = find_vma(mm, addr);
                if (end - len >= addr &&
-                   (!vma || addr + len <= vm_start_gap(vma)))
+                   (!vma || addr + len <= vma->vm_start))
                        return addr;
        }
 
@@ -183,7 +183,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
                addr = PAGE_ALIGN(addr);
                vma = find_vma(mm, addr);
                if (TASK_SIZE - len >= addr &&
-                               (!vma || addr + len <= vm_start_gap(vma)))
+                               (!vma || addr + len <= vma->vm_start))
                        return addr;
        }
 
index fe342e8ed5299c40a1270c1f9a020d0b69289811..2ae8584b44c73d7c93b30b80ca643109f6e5bcf5 100644 (file)
@@ -144,7 +144,7 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
                addr = ALIGN(addr, huge_page_size(h));
                vma = find_vma(mm, addr);
                if (TASK_SIZE - len >= addr &&
-                   (!vma || addr + len <= vm_start_gap(vma)))
+                   (!vma || addr + len <= vma->vm_start))
                        return addr;
        }
        if (mm->get_unmapped_area == arch_get_unmapped_area)
index 3aaaae18417c4e4b6ab758709d996f9dfeedef46..83cf49685373867080689b163bc5fcf3ed89f3c7 100644 (file)
@@ -87,7 +87,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
                /* At this point:  (!vmm || addr < vmm->vm_end). */
                if (TASK_SIZE - len < addr)
                        return -ENOMEM;
-               if (!vmm || addr + len <= vm_start_gap(vmm))
+               if (!vmm || addr + len <= vmm->vm_start)
                        return addr;
                addr = vmm->vm_end;
                if (flags & MAP_SHARED)
index 8acf87f4fc4b249e08c5fdcd6b665eb380a36243..e38bde46260e8a1c2da4115e8cabda2e7c2e8edd 100644 (file)
@@ -46,7 +46,6 @@ debian/btrfs-warn-about-raid5-6-being-experimental-at-mount.patch
 debian/amd64-don-t-warn-about-expected-w+x-pages-on-xen.patch
 # Arch bug fixes
 bugfix/arm/arm-dts-kirkwood-fix-sata-pinmux-ing-for-ts419.patch
-bugfix/x86/pinctrl-cherryview-add-a-quirk-to-make-acer-chromebo.patch
 # Arch features
 features/mips/MIPS-increase-MAX-PHYSMEM-BITS-on-Loongson-3-only.patch
 features/mips/MIPS-Loongson-3-Add-Loongson-LS3A-RS780E-1-way-machi.patch
@@ -108,7 +107,6 @@ bugfix/all/sctp-do-not-inherit-ipv6_-mc-ac-fl-_list-from-parent.patch
 bugfix/all/ipv6-dccp-do-not-inherit-ipv6_mc_list-from-parent.patch
 bugfix/all/crypto-skcipher-Add-missing-api-setkey-checks.patch
 bugfix/all/ipv6-fix-out-of-bound-writes-in-__ip6_append_data.patch
-bugfix/all/mm-larger-stack-guard-gap-between-vmas.patch
 bugfix/all/mm-fix-new-crash-in-unmapped_area_topdown.patch
 # Fix exported symbol versions
 bugfix/ia64/revert-ia64-move-exports-to-definitions.patch
index 007770facdebb0344e377ba5ef234223293ea152..c43b1e9a06aff0ba3fa94f49967ca4259ed77ced 100644 (file)
@@ -13,7 +13,6 @@
  * published by the Free Software Foundation.
  */
 
-#include <linux/dmi.h>
 #include <linux/kernel.h>
 #include <linux/module.h>
 #include <linux/init.h>
@@ -1525,31 +1524,10 @@ static void chv_gpio_irq_handler(struct irq_desc *desc)
        chained_irq_exit(chip, desc);
 }
 
-/*
- * Certain machines seem to hardcode Linux IRQ numbers in their ACPI
- * tables. Since we leave GPIOs that are not capable of generating
- * interrupts out of the irqdomain the numbering will be different and
- * cause devices using the hardcoded IRQ numbers fail. In order not to
- * break such machines we will only mask pins from irqdomain if the machine
- * is not listed below.
- */
-static const struct dmi_system_id chv_no_valid_mask[] = {
-       {
-               /* See https://bugzilla.kernel.org/show_bug.cgi?id=194945 */
-               .ident = "Acer Chromebook (CYAN)",
-               .matches = {
-                       DMI_MATCH(DMI_SYS_VENDOR, "GOOGLE"),
-                       DMI_MATCH(DMI_PRODUCT_NAME, "Edgar"),
-                       DMI_MATCH(DMI_BIOS_DATE, "05/21/2016"),
-               },
-       }
-};
-
 static int chv_gpio_probe(struct chv_pinctrl *pctrl, int irq)
 {
        const struct chv_gpio_pinrange *range;
        struct gpio_chip *chip = &pctrl->chip;
-       bool need_valid_mask = !dmi_check_system(chv_no_valid_mask);
        int ret, i, offset;
 
        *chip = chv_gpio_chip;
@@ -1558,7 +1536,7 @@ static int chv_gpio_probe(struct chv_pinctrl *pctrl, int irq)
        chip->label = dev_name(pctrl->dev);
        chip->parent = pctrl->dev;
        chip->base = -1;
-       chip->irq_need_valid_mask = need_valid_mask;
+       chip->irq_need_valid_mask = true;
 
        ret = devm_gpiochip_add_data(pctrl->dev, chip, pctrl);
        if (ret) {
@@ -1589,7 +1567,7 @@ static int chv_gpio_probe(struct chv_pinctrl *pctrl, int irq)
                intsel &= CHV_PADCTRL0_INTSEL_MASK;
                intsel >>= CHV_PADCTRL0_INTSEL_SHIFT;
 
-               if (need_valid_mask && intsel >= pctrl->community->nirqs)
+               if (intsel >= pctrl->community->nirqs)
                        clear_bit(i, chip->irq_valid_mask);
        }
 
index 704fa0b17309286f4581d3273d87560b3b114785..4fb7b10f3a05e0f3c0c0cbe11d3355c82b2a6e06 100644 (file)
@@ -191,7 +191,7 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
                addr = ALIGN(addr, huge_page_size(h));
                vma = find_vma(mm, addr);
                if (TASK_SIZE - len >= addr &&
-                   (!vma || addr + len <= vm_start_gap(vma)))
+                   (!vma || addr + len <= vma->vm_start))
                        return addr;
        }
 
index 77a4ec2a0ebdce1e0e21ee9d76c2b8241e6e2e75..f539b33a79304806552a99333ba2fc34a3239cc2 100644 (file)
@@ -302,7 +302,11 @@ show_map_vma(struct seq_file *m, struct vm_area_struct *vma, int is_pid)
 
        /* We don't show the stack guard page in /proc/maps */
        start = vma->vm_start;
+       if (stack_guard_page_start(vma, start))
+               start += PAGE_SIZE;
        end = vma->vm_end;
+       if (stack_guard_page_end(vma, end))
+               end -= PAGE_SIZE;
 
        seq_setwidth(m, 25 + sizeof(void *) * 6 - 1);
        seq_printf(m, "%08lx-%08lx %c%c%c%c %08llx %02x:%02x %lu ",
index bea7c55250d6b5d77f42f9a5ea67efbf0c555657..77b1438f5b3b6f81f6e87971015684fac32d4029 100644 (file)
@@ -1378,11 +1378,39 @@ int clear_page_dirty_for_io(struct page *page);
 
 int get_cmdline(struct task_struct *task, char *buffer, int buflen);
 
+/* Is the vma a continuation of the stack vma above it? */
+static inline int vma_growsdown(struct vm_area_struct *vma, unsigned long addr)
+{
+       return vma && (vma->vm_end == addr) && (vma->vm_flags & VM_GROWSDOWN);
+}
+
 static inline bool vma_is_anonymous(struct vm_area_struct *vma)
 {
        return !vma->vm_ops;
 }
 
+static inline int stack_guard_page_start(struct vm_area_struct *vma,
+                                            unsigned long addr)
+{
+       return (vma->vm_flags & VM_GROWSDOWN) &&
+               (vma->vm_start == addr) &&
+               !vma_growsdown(vma->vm_prev, addr);
+}
+
+/* Is the vma a continuation of the stack vma below it? */
+static inline int vma_growsup(struct vm_area_struct *vma, unsigned long addr)
+{
+       return vma && (vma->vm_start == addr) && (vma->vm_flags & VM_GROWSUP);
+}
+
+static inline int stack_guard_page_end(struct vm_area_struct *vma,
+                                          unsigned long addr)
+{
+       return (vma->vm_flags & VM_GROWSUP) &&
+               (vma->vm_end == addr) &&
+               !vma_growsup(vma->vm_next, addr);
+}
+
 int vma_is_stack_for_current(struct vm_area_struct *vma);
 
 extern unsigned long move_page_tables(struct vm_area_struct *vma,
@@ -2121,7 +2149,6 @@ void page_cache_async_readahead(struct address_space *mapping,
                                pgoff_t offset,
                                unsigned long size);
 
-extern unsigned long stack_guard_gap;
 /* Generic expand stack which grows the stack according to GROWS{UP,DOWN} */
 extern int expand_stack(struct vm_area_struct *vma, unsigned long address);
 
@@ -2150,30 +2177,6 @@ static inline struct vm_area_struct * find_vma_intersection(struct mm_struct * m
        return vma;
 }
 
-static inline unsigned long vm_start_gap(struct vm_area_struct *vma)
-{
-       unsigned long vm_start = vma->vm_start;
-
-       if (vma->vm_flags & VM_GROWSDOWN) {
-               vm_start -= stack_guard_gap;
-               if (vm_start > vma->vm_start)
-                       vm_start = 0;
-       }
-       return vm_start;
-}
-
-static inline unsigned long vm_end_gap(struct vm_area_struct *vma)
-{
-       unsigned long vm_end = vma->vm_end;
-
-       if (vma->vm_flags & VM_GROWSUP) {
-               vm_end += stack_guard_gap;
-               if (vm_end < vma->vm_end)
-                       vm_end = -PAGE_SIZE;
-       }
-       return vm_end;
-}
-
 static inline unsigned long vma_pages(struct vm_area_struct *vma)
 {
        return (vma->vm_end - vma->vm_start) >> PAGE_SHIFT;
index c63a0341ae38238820a0a972782331b468704a42..ec4f82704b6f368bf4e128d3feb7356a8c482022 100644 (file)
--- a/mm/gup.c
+++ b/mm/gup.c
@@ -370,6 +370,11 @@ static int faultin_page(struct task_struct *tsk, struct vm_area_struct *vma,
        /* mlock all present pages, but do not fault in new pages */
        if ((*flags & (FOLL_POPULATE | FOLL_MLOCK)) == FOLL_MLOCK)
                return -ENOENT;
+       /* For mm_populate(), just skip the stack guard page. */
+       if ((*flags & FOLL_POPULATE) &&
+                       (stack_guard_page_start(vma, address) ||
+                        stack_guard_page_end(vma, address + PAGE_SIZE)))
+               return -ENOENT;
        if (*flags & FOLL_WRITE)
                fault_flags |= FAULT_FLAG_WRITE;
        if (*flags & FOLL_REMOTE)
index 25facf71e40087b1c0360638077e98912d93f272..150737f0a1dc228023180604f29d8e058b157b86 100644 (file)
@@ -2698,6 +2698,40 @@ out_release:
        return ret;
 }
 
+/*
+ * This is like a special single-page "expand_{down|up}wards()",
+ * except we must first make sure that 'address{-|+}PAGE_SIZE'
+ * doesn't hit another vma.
+ */
+static inline int check_stack_guard_page(struct vm_area_struct *vma, unsigned long address)
+{
+       address &= PAGE_MASK;
+       if ((vma->vm_flags & VM_GROWSDOWN) && address == vma->vm_start) {
+               struct vm_area_struct *prev = vma->vm_prev;
+
+               /*
+                * Is there a mapping abutting this one below?
+                *
+                * That's only ok if it's the same stack mapping
+                * that has gotten split..
+                */
+               if (prev && prev->vm_end == address)
+                       return prev->vm_flags & VM_GROWSDOWN ? 0 : -ENOMEM;
+
+               return expand_downwards(vma, address - PAGE_SIZE);
+       }
+       if ((vma->vm_flags & VM_GROWSUP) && address + PAGE_SIZE == vma->vm_end) {
+               struct vm_area_struct *next = vma->vm_next;
+
+               /* As VM_GROWSDOWN but s/below/above/ */
+               if (next && next->vm_start == address + PAGE_SIZE)
+                       return next->vm_flags & VM_GROWSUP ? 0 : -ENOMEM;
+
+               return expand_upwards(vma, address + PAGE_SIZE);
+       }
+       return 0;
+}
+
 /*
  * We enter with non-exclusive mmap_sem (to exclude vma changes,
  * but allow concurrent faults), and pte mapped but not yet locked.
@@ -2714,6 +2748,10 @@ static int do_anonymous_page(struct fault_env *fe)
        if (vma->vm_flags & VM_SHARED)
                return VM_FAULT_SIGBUS;
 
+       /* Check if we need to add a guard page to the stack */
+       if (check_stack_guard_page(vma, fe->address) < 0)
+               return VM_FAULT_SIGSEGV;
+
        /*
         * Use pte_alloc() instead of pte_alloc_map().  We can't run
         * pte_offset_map() on pmds where a huge pmd might be created
index ccbf085caab81361657a1634fd17949968ab2b1e..67c56294a8d47c0020980770711b36974ca54b17 100644 (file)
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -183,7 +183,6 @@ SYSCALL_DEFINE1(brk, unsigned long, brk)
        unsigned long retval;
        unsigned long newbrk, oldbrk;
        struct mm_struct *mm = current->mm;
-       struct vm_area_struct *next;
        unsigned long min_brk;
        bool populate;
 
@@ -229,8 +228,7 @@ SYSCALL_DEFINE1(brk, unsigned long, brk)
        }
 
        /* Check against existing mmap mappings. */
-       next = find_vma(mm, oldbrk);
-       if (next && newbrk + PAGE_SIZE > vm_start_gap(next))
+       if (find_vma_intersection(mm, oldbrk, newbrk+PAGE_SIZE))
                goto out;
 
        /* Ok, looks good - let it rip. */
@@ -253,22 +251,10 @@ out:
 
 static long vma_compute_subtree_gap(struct vm_area_struct *vma)
 {
-       unsigned long max, prev_end, subtree_gap;
-
-       /*
-        * Note: in the rare case of a VM_GROWSDOWN above a VM_GROWSUP, we
-        * allow two stack_guard_gaps between them here, and when choosing
-        * an unmapped area; whereas when expanding we only require one.
-        * That's a little inconsistent, but keeps the code here simpler.
-        */
-       max = vm_start_gap(vma);
-       if (vma->vm_prev) {
-               prev_end = vm_end_gap(vma->vm_prev);
-               if (max > prev_end)
-                       max -= prev_end;
-               else
-                       max = 0;
-       }
+       unsigned long max, subtree_gap;
+       max = vma->vm_start;
+       if (vma->vm_prev)
+               max -= vma->vm_prev->vm_end;
        if (vma->vm_rb.rb_left) {
                subtree_gap = rb_entry(vma->vm_rb.rb_left,
                                struct vm_area_struct, vm_rb)->rb_subtree_gap;
@@ -364,7 +350,7 @@ static void validate_mm(struct mm_struct *mm)
                        anon_vma_unlock_read(anon_vma);
                }
 
-               highest_address = vm_end_gap(vma);
+               highest_address = vma->vm_end;
                vma = vma->vm_next;
                i++;
        }
@@ -553,7 +539,7 @@ void __vma_link_rb(struct mm_struct *mm, struct vm_area_struct *vma,
        if (vma->vm_next)
                vma_gap_update(vma->vm_next);
        else
-               mm->highest_vm_end = vm_end_gap(vma);
+               mm->highest_vm_end = vma->vm_end;
 
        /*
         * vma->vm_prev wasn't known when we followed the rbtree to find the
@@ -868,7 +854,7 @@ again:
                        vma_gap_update(vma);
                if (end_changed) {
                        if (!next)
-                               mm->highest_vm_end = vm_end_gap(vma);
+                               mm->highest_vm_end = end;
                        else if (!adjust_next)
                                vma_gap_update(next);
                }
@@ -953,7 +939,7 @@ again:
                         * mm->highest_vm_end doesn't need any update
                         * in remove_next == 1 case.
                         */
-                       VM_WARN_ON(mm->highest_vm_end != vm_end_gap(vma));
+                       VM_WARN_ON(mm->highest_vm_end != end);
                }
        }
        if (insert && file)
@@ -1797,7 +1783,7 @@ unsigned long unmapped_area(struct vm_unmapped_area_info *info)
 
        while (true) {
                /* Visit left subtree if it looks promising */
-               gap_end = vm_start_gap(vma);
+               gap_end = vma->vm_start;
                if (gap_end >= low_limit && vma->vm_rb.rb_left) {
                        struct vm_area_struct *left =
                                rb_entry(vma->vm_rb.rb_left,
@@ -1808,7 +1794,7 @@ unsigned long unmapped_area(struct vm_unmapped_area_info *info)
                        }
                }
 
-               gap_start = vma->vm_prev ? vm_end_gap(vma->vm_prev) : 0;
+               gap_start = vma->vm_prev ? vma->vm_prev->vm_end : 0;
 check_current:
                /* Check if current node has a suitable gap */
                if (gap_start > high_limit)
@@ -1836,8 +1822,8 @@ check_current:
                        vma = rb_entry(rb_parent(prev),
                                       struct vm_area_struct, vm_rb);
                        if (prev == vma->vm_rb.rb_left) {
-                               gap_start = vm_end_gap(vma->vm_prev);
-                               gap_end = vm_start_gap(vma);
+                               gap_start = vma->vm_prev->vm_end;
+                               gap_end = vma->vm_start;
                                goto check_current;
                        }
                }
@@ -1901,7 +1887,7 @@ unsigned long unmapped_area_topdown(struct vm_unmapped_area_info *info)
 
        while (true) {
                /* Visit right subtree if it looks promising */
-               gap_start = vma->vm_prev ? vm_end_gap(vma->vm_prev) : 0;
+               gap_start = vma->vm_prev ? vma->vm_prev->vm_end : 0;
                if (gap_start <= high_limit && vma->vm_rb.rb_right) {
                        struct vm_area_struct *right =
                                rb_entry(vma->vm_rb.rb_right,
@@ -1914,7 +1900,7 @@ unsigned long unmapped_area_topdown(struct vm_unmapped_area_info *info)
 
 check_current:
                /* Check if current node has a suitable gap */
-               gap_end = vm_start_gap(vma);
+               gap_end = vma->vm_start;
                if (gap_end < low_limit)
                        return -ENOMEM;
                if (gap_start <= high_limit &&
@@ -1941,7 +1927,7 @@ check_current:
                                       struct vm_area_struct, vm_rb);
                        if (prev == vma->vm_rb.rb_right) {
                                gap_start = vma->vm_prev ?
-                                       vm_end_gap(vma->vm_prev) : 0;
+                                       vma->vm_prev->vm_end : 0;
                                goto check_current;
                        }
                }
@@ -1979,7 +1965,7 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
                unsigned long len, unsigned long pgoff, unsigned long flags)
 {
        struct mm_struct *mm = current->mm;
-       struct vm_area_struct *vma, *prev;
+       struct vm_area_struct *vma;
        struct vm_unmapped_area_info info;
 
        if (len > TASK_SIZE - mmap_min_addr)
@@ -1990,10 +1976,9 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
 
        if (addr) {
                addr = PAGE_ALIGN(addr);
-               vma = find_vma_prev(mm, addr, &prev);
+               vma = find_vma(mm, addr);
                if (TASK_SIZE - len >= addr && addr >= mmap_min_addr &&
-                   (!vma || addr + len <= vm_start_gap(vma)) &&
-                   (!prev || addr >= vm_end_gap(prev)))
+                   (!vma || addr + len <= vma->vm_start))
                        return addr;
        }
 
@@ -2016,7 +2001,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
                          const unsigned long len, const unsigned long pgoff,
                          const unsigned long flags)
 {
-       struct vm_area_struct *vma, *prev;
+       struct vm_area_struct *vma;
        struct mm_struct *mm = current->mm;
        unsigned long addr = addr0;
        struct vm_unmapped_area_info info;
@@ -2031,10 +2016,9 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
        /* requesting a specific address */
        if (addr) {
                addr = PAGE_ALIGN(addr);
-               vma = find_vma_prev(mm, addr, &prev);
+               vma = find_vma(mm, addr);
                if (TASK_SIZE - len >= addr && addr >= mmap_min_addr &&
-                               (!vma || addr + len <= vm_start_gap(vma)) &&
-                               (!prev || addr >= vm_end_gap(prev)))
+                               (!vma || addr + len <= vma->vm_start))
                        return addr;
        }
 
@@ -2169,19 +2153,21 @@ find_vma_prev(struct mm_struct *mm, unsigned long addr,
  * update accounting. This is shared with both the
  * grow-up and grow-down cases.
  */
-static int acct_stack_growth(struct vm_area_struct *vma,
-                            unsigned long size, unsigned long grow)
+static int acct_stack_growth(struct vm_area_struct *vma, unsigned long size, unsigned long grow)
 {
        struct mm_struct *mm = vma->vm_mm;
        struct rlimit *rlim = current->signal->rlim;
-       unsigned long new_start;
+       unsigned long new_start, actual_size;
 
        /* address space limit tests */
        if (!may_expand_vm(mm, vma->vm_flags, grow))
                return -ENOMEM;
 
        /* Stack limit test */
-       if (size > READ_ONCE(rlim[RLIMIT_STACK].rlim_cur))
+       actual_size = size;
+       if (size && (vma->vm_flags & (VM_GROWSUP | VM_GROWSDOWN)))
+               actual_size -= PAGE_SIZE;
+       if (actual_size > READ_ONCE(rlim[RLIMIT_STACK].rlim_cur))
                return -ENOMEM;
 
        /* mlock limit tests */
@@ -2219,29 +2205,16 @@ static int acct_stack_growth(struct vm_area_struct *vma,
 int expand_upwards(struct vm_area_struct *vma, unsigned long address)
 {
        struct mm_struct *mm = vma->vm_mm;
-       struct vm_area_struct *next;
-       unsigned long gap_addr;
        int error = 0;
 
        if (!(vma->vm_flags & VM_GROWSUP))
                return -EFAULT;
 
        /* Guard against wrapping around to address 0. */
-       address &= PAGE_MASK;
-       address += PAGE_SIZE;
-       if (!address)
-               return -ENOMEM;
-
-       /* Enforce stack_guard_gap */
-       gap_addr = address + stack_guard_gap;
-       if (gap_addr < address)
+       if (address < PAGE_ALIGN(address+4))
+               address = PAGE_ALIGN(address+4);
+       else
                return -ENOMEM;
-       next = vma->vm_next;
-       if (next && next->vm_start < gap_addr) {
-               if (!(next->vm_flags & VM_GROWSUP))
-                       return -ENOMEM;
-               /* Check that both stack segments have the same anon_vma? */
-       }
 
        /* We must make sure the anon_vma is allocated. */
        if (unlikely(anon_vma_prepare(vma)))
@@ -2286,7 +2259,7 @@ int expand_upwards(struct vm_area_struct *vma, unsigned long address)
                                if (vma->vm_next)
                                        vma_gap_update(vma->vm_next);
                                else
-                                       mm->highest_vm_end = vm_end_gap(vma);
+                                       mm->highest_vm_end = address;
                                spin_unlock(&mm->page_table_lock);
 
                                perf_event_mmap(vma);
@@ -2307,8 +2280,6 @@ int expand_downwards(struct vm_area_struct *vma,
                                   unsigned long address)
 {
        struct mm_struct *mm = vma->vm_mm;
-       struct vm_area_struct *prev;
-       unsigned long gap_addr;
        int error;
 
        address &= PAGE_MASK;
@@ -2316,17 +2287,6 @@ int expand_downwards(struct vm_area_struct *vma,
        if (error)
                return error;
 
-       /* Enforce stack_guard_gap */
-       gap_addr = address - stack_guard_gap;
-       if (gap_addr > address)
-               return -ENOMEM;
-       prev = vma->vm_prev;
-       if (prev && prev->vm_end > gap_addr) {
-               if (!(prev->vm_flags & VM_GROWSDOWN))
-                       return -ENOMEM;
-               /* Check that both stack segments have the same anon_vma? */
-       }
-
        /* We must make sure the anon_vma is allocated. */
        if (unlikely(anon_vma_prepare(vma)))
                return -ENOMEM;
@@ -2381,25 +2341,28 @@ int expand_downwards(struct vm_area_struct *vma,
        return error;
 }
 
-/* enforced gap between the expanding stack and other mappings. */
-unsigned long stack_guard_gap = 256UL<<PAGE_SHIFT;
-
-static int __init cmdline_parse_stack_guard_gap(char *p)
-{
-       unsigned long val;
-       char *endptr;
-
-       val = simple_strtoul(p, &endptr, 10);
-       if (!*endptr)
-               stack_guard_gap = val << PAGE_SHIFT;
-
-       return 0;
-}
-__setup("stack_guard_gap=", cmdline_parse_stack_guard_gap);
-
+/*
+ * Note how expand_stack() refuses to expand the stack all the way to
+ * abut the next virtual mapping, *unless* that mapping itself is also
+ * a stack mapping. We want to leave room for a guard page, after all
+ * (the guard page itself is not added here, that is done by the
+ * actual page faulting logic)
+ *
+ * This matches the behavior of the guard page logic (see mm/memory.c:
+ * check_stack_guard_page()), which only allows the guard page to be
+ * removed under these circumstances.
+ */
 #ifdef CONFIG_STACK_GROWSUP
 int expand_stack(struct vm_area_struct *vma, unsigned long address)
 {
+       struct vm_area_struct *next;
+
+       address &= PAGE_MASK;
+       next = vma->vm_next;
+       if (next && next->vm_start == address + PAGE_SIZE) {
+               if (!(next->vm_flags & VM_GROWSUP))
+                       return -ENOMEM;
+       }
        return expand_upwards(vma, address);
 }
 
@@ -2421,6 +2384,14 @@ find_extend_vma(struct mm_struct *mm, unsigned long addr)
 #else
 int expand_stack(struct vm_area_struct *vma, unsigned long address)
 {
+       struct vm_area_struct *prev;
+
+       address &= PAGE_MASK;
+       prev = vma->vm_prev;
+       if (prev && prev->vm_end == address) {
+               if (!(prev->vm_flags & VM_GROWSDOWN))
+                       return -ENOMEM;
+       }
        return expand_downwards(vma, address);
 }
 
@@ -2518,7 +2489,7 @@ detach_vmas_to_be_unmapped(struct mm_struct *mm, struct vm_area_struct *vma,
                vma->vm_prev = prev;
                vma_gap_update(vma);
        } else
-               mm->highest_vm_end = prev ? vm_end_gap(prev) : 0;
+               mm->highest_vm_end = prev ? prev->vm_end : 0;
        tail_vma->vm_next = NULL;
 
        /* Kill the cache */