spia_pedr=
spia_peddr=
- stack_guard_gap= [MM]
- override the default stack gap protection. The value
- is in page units and it defines how many pages prior
- to (for stacks growing down) resp. after (for stacks
- growing up) the main stack are reserved for no other
- mapping. Default value is 256 pages.
-
stacktrace [FTRACE]
Enabled the stack tracer on boot up.
vma = find_vma(mm, addr);
if (TASK_SIZE - len >= addr &&
- (!vma || addr + len <= vm_start_gap(vma)))
+ (!vma || addr + len <= vma->vm_start))
return addr;
}
vma = find_vma(mm, addr);
if (TASK_SIZE - len >= addr &&
- (!vma || addr + len <= vm_start_gap(vma)))
+ (!vma || addr + len <= vma->vm_start))
return addr;
}
addr = PAGE_ALIGN(addr);
vma = find_vma(mm, addr);
if (TASK_SIZE - len >= addr &&
- (!vma || addr + len <= vm_start_gap(vma)))
+ (!vma || addr + len <= vma->vm_start))
return addr;
}
addr = PAGE_ALIGN(addr);
vma = find_vma(current->mm, addr);
if (TASK_SIZE - len >= addr &&
- (!vma || addr + len <= vm_start_gap(vma)))
+ (!vma || addr + len <= vma->vm_start))
goto success;
}
vma = find_vma(mm, addr);
if (TASK_SIZE - len >= addr &&
- (!vma || addr + len <= vm_start_gap(vma)))
+ (!vma || addr + len <= vma->vm_start))
return addr;
}
unsigned long len, unsigned long pgoff, unsigned long flags)
{
struct mm_struct *mm = current->mm;
- struct vm_area_struct *vma, *prev;
+ struct vm_area_struct *vma;
unsigned long task_size = TASK_SIZE;
int do_color_align, last_mmap;
struct vm_unmapped_area_info info;
else
addr = PAGE_ALIGN(addr);
- vma = find_vma_prev(mm, addr, &prev);
+ vma = find_vma(mm, addr);
if (task_size - len >= addr &&
- (!vma || addr + len <= vm_start_gap(vma)) &&
- (!prev || addr >= vm_end_gap(prev)))
+ (!vma || addr + len <= vma->vm_start))
goto found_addr;
}
const unsigned long len, const unsigned long pgoff,
const unsigned long flags)
{
- struct vm_area_struct *vma, *prev;
+ struct vm_area_struct *vma;
struct mm_struct *mm = current->mm;
unsigned long addr = addr0;
int do_color_align, last_mmap;
addr = COLOR_ALIGN(addr, last_mmap, pgoff);
else
addr = PAGE_ALIGN(addr);
-
- vma = find_vma_prev(mm, addr, &prev);
+ vma = find_vma(mm, addr);
if (TASK_SIZE - len >= addr &&
- (!vma || addr + len <= vm_start_gap(vma)) &&
- (!prev || addr >= vm_end_gap(prev)))
+ (!vma || addr + len <= vma->vm_start))
goto found_addr;
}
addr = ALIGN(addr, huge_page_size(h));
vma = find_vma(mm, addr);
if (TASK_SIZE - len >= addr &&
- (!vma || addr + len <= vm_start_gap(vma)))
+ (!vma || addr + len <= vma->vm_start))
return addr;
}
/*
addr = PAGE_ALIGN(addr);
vma = find_vma(mm, addr);
if (TASK_SIZE - len >= addr && addr >= mmap_min_addr &&
- (!vma || addr + len <= vm_start_gap(vma)))
+ (!vma || addr + len <= vma->vm_start))
return addr;
}
addr = PAGE_ALIGN(addr);
vma = find_vma(mm, addr);
if (TASK_SIZE - len >= addr && addr >= mmap_min_addr &&
- (!vma || addr + len <= vm_start_gap(vma)))
+ (!vma || addr + len <= vma->vm_start))
return addr;
}
if ((mm->task_size - len) < addr)
return 0;
vma = find_vma(mm, addr);
- return (!vma || (addr + len) <= vm_start_gap(vma));
+ return (!vma || (addr + len) <= vma->vm_start);
}
static int slice_low_has_vma(struct mm_struct *mm, unsigned long slice)
addr = PAGE_ALIGN(addr);
vma = find_vma(mm, addr);
if (TASK_SIZE - len >= addr && addr >= mmap_min_addr &&
- (!vma || addr + len <= vm_start_gap(vma)))
+ (!vma || addr + len <= vma->vm_start))
return addr;
}
addr = PAGE_ALIGN(addr);
vma = find_vma(mm, addr);
if (TASK_SIZE - len >= addr && addr >= mmap_min_addr &&
- (!vma || addr + len <= vm_start_gap(vma)))
+ (!vma || addr + len <= vma->vm_start))
return addr;
}
vma = find_vma(mm, addr);
if (TASK_SIZE - len >= addr &&
- (!vma || addr + len <= vm_start_gap(vma)))
+ (!vma || addr + len <= vma->vm_start))
return addr;
}
vma = find_vma(mm, addr);
if (TASK_SIZE - len >= addr &&
- (!vma || addr + len <= vm_start_gap(vma)))
+ (!vma || addr + len <= vma->vm_start))
return addr;
}
vma = find_vma(mm, addr);
if (task_size - len >= addr &&
- (!vma || addr + len <= vm_start_gap(vma)))
+ (!vma || addr + len <= vma->vm_start))
return addr;
}
vma = find_vma(mm, addr);
if (task_size - len >= addr &&
- (!vma || addr + len <= vm_start_gap(vma)))
+ (!vma || addr + len <= vma->vm_start))
return addr;
}
addr = ALIGN(addr, HPAGE_SIZE);
vma = find_vma(mm, addr);
if (task_size - len >= addr &&
- (!vma || addr + len <= vm_start_gap(vma)))
+ (!vma || addr + len <= vma->vm_start))
return addr;
}
if (mm->get_unmapped_area == arch_get_unmapped_area)
addr = ALIGN(addr, huge_page_size(h));
vma = find_vma(mm, addr);
if (TASK_SIZE - len >= addr &&
- (!vma || addr + len <= vm_start_gap(vma)))
+ (!vma || addr + len <= vma->vm_start))
return addr;
}
if (current->mm->get_unmapped_area == arch_get_unmapped_area)
addr = PAGE_ALIGN(addr);
vma = find_vma(mm, addr);
if (end - len >= addr &&
- (!vma || addr + len <= vm_start_gap(vma)))
+ (!vma || addr + len <= vma->vm_start))
return addr;
}
addr = PAGE_ALIGN(addr);
vma = find_vma(mm, addr);
if (TASK_SIZE - len >= addr &&
- (!vma || addr + len <= vm_start_gap(vma)))
+ (!vma || addr + len <= vma->vm_start))
return addr;
}
addr = ALIGN(addr, huge_page_size(h));
vma = find_vma(mm, addr);
if (TASK_SIZE - len >= addr &&
- (!vma || addr + len <= vm_start_gap(vma)))
+ (!vma || addr + len <= vma->vm_start))
return addr;
}
if (mm->get_unmapped_area == arch_get_unmapped_area)
/* At this point: (!vmm || addr < vmm->vm_end). */
if (TASK_SIZE - len < addr)
return -ENOMEM;
- if (!vmm || addr + len <= vm_start_gap(vmm))
+ if (!vmm || addr + len <= vmm->vm_start)
return addr;
addr = vmm->vm_end;
if (flags & MAP_SHARED)
debian/amd64-don-t-warn-about-expected-w+x-pages-on-xen.patch
# Arch bug fixes
bugfix/arm/arm-dts-kirkwood-fix-sata-pinmux-ing-for-ts419.patch
-bugfix/x86/pinctrl-cherryview-add-a-quirk-to-make-acer-chromebo.patch
# Arch features
features/mips/MIPS-increase-MAX-PHYSMEM-BITS-on-Loongson-3-only.patch
features/mips/MIPS-Loongson-3-Add-Loongson-LS3A-RS780E-1-way-machi.patch
bugfix/all/ipv6-dccp-do-not-inherit-ipv6_mc_list-from-parent.patch
bugfix/all/crypto-skcipher-Add-missing-api-setkey-checks.patch
bugfix/all/ipv6-fix-out-of-bound-writes-in-__ip6_append_data.patch
-bugfix/all/mm-larger-stack-guard-gap-between-vmas.patch
bugfix/all/mm-fix-new-crash-in-unmapped_area_topdown.patch
# Fix exported symbol versions
bugfix/ia64/revert-ia64-move-exports-to-definitions.patch
* published by the Free Software Foundation.
*/
-#include <linux/dmi.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/init.h>
chained_irq_exit(chip, desc);
}
-/*
- * Certain machines seem to hardcode Linux IRQ numbers in their ACPI
- * tables. Since we leave GPIOs that are not capable of generating
- * interrupts out of the irqdomain the numbering will be different and
- * cause devices using the hardcoded IRQ numbers fail. In order not to
- * break such machines we will only mask pins from irqdomain if the machine
- * is not listed below.
- */
-static const struct dmi_system_id chv_no_valid_mask[] = {
- {
- /* See https://bugzilla.kernel.org/show_bug.cgi?id=194945 */
- .ident = "Acer Chromebook (CYAN)",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "GOOGLE"),
- DMI_MATCH(DMI_PRODUCT_NAME, "Edgar"),
- DMI_MATCH(DMI_BIOS_DATE, "05/21/2016"),
- },
- }
-};
-
static int chv_gpio_probe(struct chv_pinctrl *pctrl, int irq)
{
const struct chv_gpio_pinrange *range;
struct gpio_chip *chip = &pctrl->chip;
- bool need_valid_mask = !dmi_check_system(chv_no_valid_mask);
int ret, i, offset;
*chip = chv_gpio_chip;
chip->label = dev_name(pctrl->dev);
chip->parent = pctrl->dev;
chip->base = -1;
- chip->irq_need_valid_mask = need_valid_mask;
+ chip->irq_need_valid_mask = true;
ret = devm_gpiochip_add_data(pctrl->dev, chip, pctrl);
if (ret) {
intsel &= CHV_PADCTRL0_INTSEL_MASK;
intsel >>= CHV_PADCTRL0_INTSEL_SHIFT;
- if (need_valid_mask && intsel >= pctrl->community->nirqs)
+ if (intsel >= pctrl->community->nirqs)
clear_bit(i, chip->irq_valid_mask);
}
addr = ALIGN(addr, huge_page_size(h));
vma = find_vma(mm, addr);
if (TASK_SIZE - len >= addr &&
- (!vma || addr + len <= vm_start_gap(vma)))
+ (!vma || addr + len <= vma->vm_start))
return addr;
}
/* We don't show the stack guard page in /proc/maps */
start = vma->vm_start;
+ if (stack_guard_page_start(vma, start))
+ start += PAGE_SIZE;
end = vma->vm_end;
+ if (stack_guard_page_end(vma, end))
+ end -= PAGE_SIZE;
seq_setwidth(m, 25 + sizeof(void *) * 6 - 1);
seq_printf(m, "%08lx-%08lx %c%c%c%c %08llx %02x:%02x %lu ",
int get_cmdline(struct task_struct *task, char *buffer, int buflen);
+/* Is the vma a continuation of the stack vma above it? */
+static inline int vma_growsdown(struct vm_area_struct *vma, unsigned long addr)
+{
+ return vma && (vma->vm_end == addr) && (vma->vm_flags & VM_GROWSDOWN);
+}
+
static inline bool vma_is_anonymous(struct vm_area_struct *vma)
{
return !vma->vm_ops;
}
+static inline int stack_guard_page_start(struct vm_area_struct *vma,
+ unsigned long addr)
+{
+ return (vma->vm_flags & VM_GROWSDOWN) &&
+ (vma->vm_start == addr) &&
+ !vma_growsdown(vma->vm_prev, addr);
+}
+
+/* Is the vma a continuation of the stack vma below it? */
+static inline int vma_growsup(struct vm_area_struct *vma, unsigned long addr)
+{
+ return vma && (vma->vm_start == addr) && (vma->vm_flags & VM_GROWSUP);
+}
+
+static inline int stack_guard_page_end(struct vm_area_struct *vma,
+ unsigned long addr)
+{
+ return (vma->vm_flags & VM_GROWSUP) &&
+ (vma->vm_end == addr) &&
+ !vma_growsup(vma->vm_next, addr);
+}
+
int vma_is_stack_for_current(struct vm_area_struct *vma);
extern unsigned long move_page_tables(struct vm_area_struct *vma,
pgoff_t offset,
unsigned long size);
-extern unsigned long stack_guard_gap;
/* Generic expand stack which grows the stack according to GROWS{UP,DOWN} */
extern int expand_stack(struct vm_area_struct *vma, unsigned long address);
return vma;
}
-static inline unsigned long vm_start_gap(struct vm_area_struct *vma)
-{
- unsigned long vm_start = vma->vm_start;
-
- if (vma->vm_flags & VM_GROWSDOWN) {
- vm_start -= stack_guard_gap;
- if (vm_start > vma->vm_start)
- vm_start = 0;
- }
- return vm_start;
-}
-
-static inline unsigned long vm_end_gap(struct vm_area_struct *vma)
-{
- unsigned long vm_end = vma->vm_end;
-
- if (vma->vm_flags & VM_GROWSUP) {
- vm_end += stack_guard_gap;
- if (vm_end < vma->vm_end)
- vm_end = -PAGE_SIZE;
- }
- return vm_end;
-}
-
static inline unsigned long vma_pages(struct vm_area_struct *vma)
{
return (vma->vm_end - vma->vm_start) >> PAGE_SHIFT;
/* mlock all present pages, but do not fault in new pages */
if ((*flags & (FOLL_POPULATE | FOLL_MLOCK)) == FOLL_MLOCK)
return -ENOENT;
+ /* For mm_populate(), just skip the stack guard page. */
+ if ((*flags & FOLL_POPULATE) &&
+ (stack_guard_page_start(vma, address) ||
+ stack_guard_page_end(vma, address + PAGE_SIZE)))
+ return -ENOENT;
if (*flags & FOLL_WRITE)
fault_flags |= FAULT_FLAG_WRITE;
if (*flags & FOLL_REMOTE)
return ret;
}
+/*
+ * This is like a special single-page "expand_{down|up}wards()",
+ * except we must first make sure that 'address{-|+}PAGE_SIZE'
+ * doesn't hit another vma.
+ */
+static inline int check_stack_guard_page(struct vm_area_struct *vma, unsigned long address)
+{
+ address &= PAGE_MASK;
+ if ((vma->vm_flags & VM_GROWSDOWN) && address == vma->vm_start) {
+ struct vm_area_struct *prev = vma->vm_prev;
+
+ /*
+ * Is there a mapping abutting this one below?
+ *
+ * That's only ok if it's the same stack mapping
+ * that has gotten split..
+ */
+ if (prev && prev->vm_end == address)
+ return prev->vm_flags & VM_GROWSDOWN ? 0 : -ENOMEM;
+
+ return expand_downwards(vma, address - PAGE_SIZE);
+ }
+ if ((vma->vm_flags & VM_GROWSUP) && address + PAGE_SIZE == vma->vm_end) {
+ struct vm_area_struct *next = vma->vm_next;
+
+ /* As VM_GROWSDOWN but s/below/above/ */
+ if (next && next->vm_start == address + PAGE_SIZE)
+ return next->vm_flags & VM_GROWSUP ? 0 : -ENOMEM;
+
+ return expand_upwards(vma, address + PAGE_SIZE);
+ }
+ return 0;
+}
+
/*
* We enter with non-exclusive mmap_sem (to exclude vma changes,
* but allow concurrent faults), and pte mapped but not yet locked.
if (vma->vm_flags & VM_SHARED)
return VM_FAULT_SIGBUS;
+ /* Check if we need to add a guard page to the stack */
+ if (check_stack_guard_page(vma, fe->address) < 0)
+ return VM_FAULT_SIGSEGV;
+
/*
* Use pte_alloc() instead of pte_alloc_map(). We can't run
* pte_offset_map() on pmds where a huge pmd might be created
unsigned long retval;
unsigned long newbrk, oldbrk;
struct mm_struct *mm = current->mm;
- struct vm_area_struct *next;
unsigned long min_brk;
bool populate;
}
/* Check against existing mmap mappings. */
- next = find_vma(mm, oldbrk);
- if (next && newbrk + PAGE_SIZE > vm_start_gap(next))
+ if (find_vma_intersection(mm, oldbrk, newbrk+PAGE_SIZE))
goto out;
/* Ok, looks good - let it rip. */
static long vma_compute_subtree_gap(struct vm_area_struct *vma)
{
- unsigned long max, prev_end, subtree_gap;
-
- /*
- * Note: in the rare case of a VM_GROWSDOWN above a VM_GROWSUP, we
- * allow two stack_guard_gaps between them here, and when choosing
- * an unmapped area; whereas when expanding we only require one.
- * That's a little inconsistent, but keeps the code here simpler.
- */
- max = vm_start_gap(vma);
- if (vma->vm_prev) {
- prev_end = vm_end_gap(vma->vm_prev);
- if (max > prev_end)
- max -= prev_end;
- else
- max = 0;
- }
+ unsigned long max, subtree_gap;
+ max = vma->vm_start;
+ if (vma->vm_prev)
+ max -= vma->vm_prev->vm_end;
if (vma->vm_rb.rb_left) {
subtree_gap = rb_entry(vma->vm_rb.rb_left,
struct vm_area_struct, vm_rb)->rb_subtree_gap;
anon_vma_unlock_read(anon_vma);
}
- highest_address = vm_end_gap(vma);
+ highest_address = vma->vm_end;
vma = vma->vm_next;
i++;
}
if (vma->vm_next)
vma_gap_update(vma->vm_next);
else
- mm->highest_vm_end = vm_end_gap(vma);
+ mm->highest_vm_end = vma->vm_end;
/*
* vma->vm_prev wasn't known when we followed the rbtree to find the
vma_gap_update(vma);
if (end_changed) {
if (!next)
- mm->highest_vm_end = vm_end_gap(vma);
+ mm->highest_vm_end = end;
else if (!adjust_next)
vma_gap_update(next);
}
* mm->highest_vm_end doesn't need any update
* in remove_next == 1 case.
*/
- VM_WARN_ON(mm->highest_vm_end != vm_end_gap(vma));
+ VM_WARN_ON(mm->highest_vm_end != end);
}
}
if (insert && file)
while (true) {
/* Visit left subtree if it looks promising */
- gap_end = vm_start_gap(vma);
+ gap_end = vma->vm_start;
if (gap_end >= low_limit && vma->vm_rb.rb_left) {
struct vm_area_struct *left =
rb_entry(vma->vm_rb.rb_left,
}
}
- gap_start = vma->vm_prev ? vm_end_gap(vma->vm_prev) : 0;
+ gap_start = vma->vm_prev ? vma->vm_prev->vm_end : 0;
check_current:
/* Check if current node has a suitable gap */
if (gap_start > high_limit)
vma = rb_entry(rb_parent(prev),
struct vm_area_struct, vm_rb);
if (prev == vma->vm_rb.rb_left) {
- gap_start = vm_end_gap(vma->vm_prev);
- gap_end = vm_start_gap(vma);
+ gap_start = vma->vm_prev->vm_end;
+ gap_end = vma->vm_start;
goto check_current;
}
}
while (true) {
/* Visit right subtree if it looks promising */
- gap_start = vma->vm_prev ? vm_end_gap(vma->vm_prev) : 0;
+ gap_start = vma->vm_prev ? vma->vm_prev->vm_end : 0;
if (gap_start <= high_limit && vma->vm_rb.rb_right) {
struct vm_area_struct *right =
rb_entry(vma->vm_rb.rb_right,
check_current:
/* Check if current node has a suitable gap */
- gap_end = vm_start_gap(vma);
+ gap_end = vma->vm_start;
if (gap_end < low_limit)
return -ENOMEM;
if (gap_start <= high_limit &&
struct vm_area_struct, vm_rb);
if (prev == vma->vm_rb.rb_right) {
gap_start = vma->vm_prev ?
- vm_end_gap(vma->vm_prev) : 0;
+ vma->vm_prev->vm_end : 0;
goto check_current;
}
}
unsigned long len, unsigned long pgoff, unsigned long flags)
{
struct mm_struct *mm = current->mm;
- struct vm_area_struct *vma, *prev;
+ struct vm_area_struct *vma;
struct vm_unmapped_area_info info;
if (len > TASK_SIZE - mmap_min_addr)
if (addr) {
addr = PAGE_ALIGN(addr);
- vma = find_vma_prev(mm, addr, &prev);
+ vma = find_vma(mm, addr);
if (TASK_SIZE - len >= addr && addr >= mmap_min_addr &&
- (!vma || addr + len <= vm_start_gap(vma)) &&
- (!prev || addr >= vm_end_gap(prev)))
+ (!vma || addr + len <= vma->vm_start))
return addr;
}
const unsigned long len, const unsigned long pgoff,
const unsigned long flags)
{
- struct vm_area_struct *vma, *prev;
+ struct vm_area_struct *vma;
struct mm_struct *mm = current->mm;
unsigned long addr = addr0;
struct vm_unmapped_area_info info;
/* requesting a specific address */
if (addr) {
addr = PAGE_ALIGN(addr);
- vma = find_vma_prev(mm, addr, &prev);
+ vma = find_vma(mm, addr);
if (TASK_SIZE - len >= addr && addr >= mmap_min_addr &&
- (!vma || addr + len <= vm_start_gap(vma)) &&
- (!prev || addr >= vm_end_gap(prev)))
+ (!vma || addr + len <= vma->vm_start))
return addr;
}
* update accounting. This is shared with both the
* grow-up and grow-down cases.
*/
-static int acct_stack_growth(struct vm_area_struct *vma,
- unsigned long size, unsigned long grow)
+static int acct_stack_growth(struct vm_area_struct *vma, unsigned long size, unsigned long grow)
{
struct mm_struct *mm = vma->vm_mm;
struct rlimit *rlim = current->signal->rlim;
- unsigned long new_start;
+ unsigned long new_start, actual_size;
/* address space limit tests */
if (!may_expand_vm(mm, vma->vm_flags, grow))
return -ENOMEM;
/* Stack limit test */
- if (size > READ_ONCE(rlim[RLIMIT_STACK].rlim_cur))
+ actual_size = size;
+ if (size && (vma->vm_flags & (VM_GROWSUP | VM_GROWSDOWN)))
+ actual_size -= PAGE_SIZE;
+ if (actual_size > READ_ONCE(rlim[RLIMIT_STACK].rlim_cur))
return -ENOMEM;
/* mlock limit tests */
int expand_upwards(struct vm_area_struct *vma, unsigned long address)
{
struct mm_struct *mm = vma->vm_mm;
- struct vm_area_struct *next;
- unsigned long gap_addr;
int error = 0;
if (!(vma->vm_flags & VM_GROWSUP))
return -EFAULT;
/* Guard against wrapping around to address 0. */
- address &= PAGE_MASK;
- address += PAGE_SIZE;
- if (!address)
- return -ENOMEM;
-
- /* Enforce stack_guard_gap */
- gap_addr = address + stack_guard_gap;
- if (gap_addr < address)
+ if (address < PAGE_ALIGN(address+4))
+ address = PAGE_ALIGN(address+4);
+ else
return -ENOMEM;
- next = vma->vm_next;
- if (next && next->vm_start < gap_addr) {
- if (!(next->vm_flags & VM_GROWSUP))
- return -ENOMEM;
- /* Check that both stack segments have the same anon_vma? */
- }
/* We must make sure the anon_vma is allocated. */
if (unlikely(anon_vma_prepare(vma)))
if (vma->vm_next)
vma_gap_update(vma->vm_next);
else
- mm->highest_vm_end = vm_end_gap(vma);
+ mm->highest_vm_end = address;
spin_unlock(&mm->page_table_lock);
perf_event_mmap(vma);
unsigned long address)
{
struct mm_struct *mm = vma->vm_mm;
- struct vm_area_struct *prev;
- unsigned long gap_addr;
int error;
address &= PAGE_MASK;
if (error)
return error;
- /* Enforce stack_guard_gap */
- gap_addr = address - stack_guard_gap;
- if (gap_addr > address)
- return -ENOMEM;
- prev = vma->vm_prev;
- if (prev && prev->vm_end > gap_addr) {
- if (!(prev->vm_flags & VM_GROWSDOWN))
- return -ENOMEM;
- /* Check that both stack segments have the same anon_vma? */
- }
-
/* We must make sure the anon_vma is allocated. */
if (unlikely(anon_vma_prepare(vma)))
return -ENOMEM;
return error;
}
-/* enforced gap between the expanding stack and other mappings. */
-unsigned long stack_guard_gap = 256UL<<PAGE_SHIFT;
-
-static int __init cmdline_parse_stack_guard_gap(char *p)
-{
- unsigned long val;
- char *endptr;
-
- val = simple_strtoul(p, &endptr, 10);
- if (!*endptr)
- stack_guard_gap = val << PAGE_SHIFT;
-
- return 0;
-}
-__setup("stack_guard_gap=", cmdline_parse_stack_guard_gap);
-
+/*
+ * Note how expand_stack() refuses to expand the stack all the way to
+ * abut the next virtual mapping, *unless* that mapping itself is also
+ * a stack mapping. We want to leave room for a guard page, after all
+ * (the guard page itself is not added here, that is done by the
+ * actual page faulting logic)
+ *
+ * This matches the behavior of the guard page logic (see mm/memory.c:
+ * check_stack_guard_page()), which only allows the guard page to be
+ * removed under these circumstances.
+ */
#ifdef CONFIG_STACK_GROWSUP
int expand_stack(struct vm_area_struct *vma, unsigned long address)
{
+ struct vm_area_struct *next;
+
+ address &= PAGE_MASK;
+ next = vma->vm_next;
+ if (next && next->vm_start == address + PAGE_SIZE) {
+ if (!(next->vm_flags & VM_GROWSUP))
+ return -ENOMEM;
+ }
return expand_upwards(vma, address);
}
#else
int expand_stack(struct vm_area_struct *vma, unsigned long address)
{
+ struct vm_area_struct *prev;
+
+ address &= PAGE_MASK;
+ prev = vma->vm_prev;
+ if (prev && prev->vm_end == address) {
+ if (!(prev->vm_flags & VM_GROWSDOWN))
+ return -ENOMEM;
+ }
return expand_downwards(vma, address);
}
vma->vm_prev = prev;
vma_gap_update(vma);
} else
- mm->highest_vm_end = prev ? vm_end_gap(prev) : 0;
+ mm->highest_vm_end = prev ? prev->vm_end : 0;
tail_vma->vm_next = NULL;
/* Kill the cache */