[rsbac] patching rejects with linux-2.6.6 + patch-2.6.6-v1.2.3-pre6 + pax-linux-2.6.6-200405182000.patch

sftf at yandex.ru sftf at yandex.ru
Mon Jun 14 08:34:40 CEST 2004


Hello !
  Patching problems with:
  linux-2.6.6 + patch-2.6.6-v1.2.3-pre6 + pax-linux-2.6.6-200405182000.patch or
  linux-2.6.6 + pax-linux-2.6.6-200405182000.patch + patch-2.6.6-v1.2.3-pre6.

linux-2.6.6 + patch-2.6.6-v1.2.3-pre6 + pax-linux-2.6.6-200405182000.patch patching results:
patching file arch/alpha/kernel/osf_sys.c
patching file arch/alpha/mm/fault.c
patching file arch/i386/Kconfig
patching file arch/i386/kernel/apm.c
patching file arch/i386/kernel/cpu/common.c
patching file arch/i386/kernel/entry.S
patching file arch/i386/kernel/head.S
patching file arch/i386/kernel/ldt.c
patching file arch/i386/kernel/process.c
Hunk #1 succeeded at 365 (offset 17 lines).
Hunk #3 succeeded at 521 (offset 17 lines).
Hunk #5 succeeded at 719 (offset 17 lines).
Hunk #7 succeeded at 820 (offset 17 lines).
patching file arch/i386/kernel/reboot.c
patching file arch/i386/kernel/setup.c
patching file arch/i386/kernel/signal.c
patching file arch/i386/kernel/sys_i386.c
patching file arch/i386/kernel/sysenter.c
patching file arch/i386/kernel/trampoline.S
patching file arch/i386/kernel/traps.c
patching file arch/i386/kernel/vmlinux.lds.S
patching file arch/i386/mm/fault.c
patching file arch/i386/mm/init.c
patching file arch/i386/pci/pcbios.c
patching file arch/ia64/ia32/binfmt_elf32.c
patching file arch/ia64/ia32/ia32priv.h
patching file arch/ia64/ia32/sys_ia32.c
patching file arch/ia64/kernel/sys_ia64.c
patching file arch/ia64/mm/fault.c
patching file arch/mips/kernel/binfmt_elfn32.c
patching file arch/mips/kernel/binfmt_elfo32.c
patching file arch/mips/kernel/syscall.c
patching file arch/mips/mm/fault.c
patching file arch/parisc/kernel/sys_parisc.c
patching file arch/parisc/kernel/sys_parisc32.c
patching file arch/parisc/kernel/traps.c
patching file arch/parisc/mm/fault.c
patching file arch/ppc/kernel/syscalls.c
patching file arch/ppc/mm/fault.c
patching file arch/sparc/kernel/sys_sparc.c
patching file arch/sparc/kernel/sys_sunos.c
patching file arch/sparc/mm/fault.c
patching file arch/sparc/mm/init.c
patching file arch/sparc/mm/srmmu.c
patching file arch/sparc64/kernel/itlb_base.S
patching file arch/sparc64/kernel/sys_sparc.c
patching file arch/sparc64/kernel/sys_sparc32.c
patching file arch/sparc64/kernel/sys_sunos32.c
patching file arch/sparc64/mm/fault.c
patching file arch/sparc64/solaris/misc.c
patching file arch/x86_64/ia32/ia32_binfmt.c
patching file arch/x86_64/ia32/sys_ia32.c
patching file arch/x86_64/kernel/setup64.c
patching file arch/x86_64/kernel/sys_x86_64.c
patching file arch/x86_64/mm/fault.c
patching file drivers/char/mem.c
Hunk #1 succeeded at 416 (offset 5 lines).
patching file drivers/char/random.c
patching file drivers/ide/ide-disk.c
patching file drivers/pnp/pnpbios/bioscalls.c
patching file drivers/scsi/scsi_devinfo.c
patching file drivers/video/vesafb.c
patching file fs/binfmt_aout.c
patching file fs/binfmt_elf.c
patching file fs/binfmt_flat.c
patching file fs/binfmt_misc.c
patching file fs/exec.c
Hunk #1 succeeded at 46 with fuzz 2.
Hunk #2 succeeded at 68 (offset 5 lines).
Hunk #3 succeeded at 383 (offset 65 lines).
Hunk #4 succeeded at 347 (offset 5 lines).
Hunk #5 succeeded at 445 (offset 65 lines).
Hunk #6 succeeded at 448 (offset 5 lines).
Hunk #7 succeeded at 543 (offset 65 lines).
Hunk #8 succeeded at 498 (offset 5 lines).
Hunk #9 succeeded at 586 (offset 65 lines).
Hunk #10 succeeded at 937 (offset 12 lines).
Hunk #11 succeeded at 1268 (offset 72 lines).
Hunk #12 succeeded at 1482 (offset 62 lines).
patching file fs/proc/array.c
Hunk #1 succeeded at 276 (offset 5 lines).
Hunk #2 succeeded at 331 (offset 29 lines).
patching file fs/proc/task_mmu.c
patching file include/asm-alpha/a.out.h
patching file include/asm-alpha/elf.h
patching file include/asm-alpha/mman.h
patching file include/asm-alpha/page.h
patching file include/asm-alpha/pgtable.h
patching file include/asm-i386/a.out.h
patching file include/asm-i386/desc.h
patching file include/asm-i386/elf.h
patching file include/asm-i386/mach-default/apm.h
patching file include/asm-i386/mach-pc9800/apm.h
patching file include/asm-i386/mman.h
patching file include/asm-i386/mmu.h
patching file include/asm-i386/mmu_context.h
patching file include/asm-i386/page.h
patching file include/asm-i386/pgalloc.h
patching file include/asm-i386/pgtable.h
patching file include/asm-i386/processor.h
patching file include/asm-i386/system.h
patching file include/asm-ia64/elf.h
patching file include/asm-ia64/mman.h
patching file include/asm-ia64/page.h
patching file include/asm-ia64/pgtable.h
patching file include/asm-ia64/ustack.h
patching file include/asm-mips/a.out.h
patching file include/asm-mips/elf.h
patching file include/asm-mips/page.h
patching file include/asm-parisc/a.out.h
patching file include/asm-parisc/elf.h
patching file include/asm-parisc/mman.h
patching file include/asm-parisc/page.h
patching file include/asm-parisc/pgtable.h
patching file include/asm-ppc/a.out.h
patching file include/asm-ppc/elf.h
patching file include/asm-ppc/mman.h
patching file include/asm-ppc/page.h
patching file include/asm-ppc/pgtable.h
patching file include/asm-sparc/a.out.h
patching file include/asm-sparc/elf.h
patching file include/asm-sparc/mman.h
patching file include/asm-sparc/page.h
patching file include/asm-sparc/pgtable.h
patching file include/asm-sparc/pgtsrmmu.h
patching file include/asm-sparc/uaccess.h
patching file include/asm-sparc64/a.out.h
patching file include/asm-sparc64/elf.h
patching file include/asm-sparc64/mman.h
patching file include/asm-sparc64/page.h
patching file include/asm-sparc64/pgtable.h
patching file include/asm-x86_64/a.out.h
patching file include/asm-x86_64/elf.h
patching file include/asm-x86_64/mman.h
patching file include/asm-x86_64/page.h
patching file include/asm-x86_64/pgalloc.h
patching file include/asm-x86_64/pgtable.h
patching file include/linux/a.out.h
patching file include/linux/binfmts.h
patching file include/linux/elf.h
patching file include/linux/kernel.h
patching file include/linux/mm.h
patching file include/linux/mman.h
patching file include/linux/random.h
patching file include/linux/sched.h
patching file include/linux/sysctl.h
patching file init/main.c
patching file kernel/fork.c
Hunk #1 succeeded at 284 (offset 6 lines).
patching file kernel/sys.c
Hunk #1 succeeded at 521 (offset 74 lines).
Hunk #3 succeeded at 547 (offset 74 lines).
patching file kernel/sysctl.c
Hunk #1 succeeded at 148 (offset 5 lines).
patching file mm/filemap.c
patching file mm/madvise.c
patching file mm/memory.c
patching file mm/mlock.c
patching file mm/mmap.c
Hunk #1 succeeded at 332 (offset 5 lines).
Hunk #3 succeeded at 548 with fuzz 2 (offset 12 lines).
Hunk #5 succeeded at 611 (offset 12 lines).
Hunk #6 succeeded at 776 (offset 38 lines).
Hunk #7 succeeded at 786 (offset 12 lines).
Hunk #8 succeeded at 1592 with fuzz 2 (offset 770 lines).
Hunk #9 FAILED at 1612.
Hunk #10 FAILED at 1632.
Hunk #11 FAILED at 1660.
Hunk #12 FAILED at 1820.
Hunk #13 FAILED at 1874.
Hunk #14 FAILED at 1916.
Hunk #15 FAILED at 1972.
Hunk #16 FAILED at 2074.
Hunk #17 FAILED at 2144.
Hunk #18 FAILED at 2280.
Hunk #19 FAILED at 2320.
Hunk #20 FAILED at 2342.
Hunk #21 FAILED at 2365.
Hunk #22 FAILED at 2384.
Hunk #23 FAILED at 2401.
Hunk #24 FAILED at 2436.
Hunk #25 FAILED at 2480.
Hunk #26 FAILED at 2510.
Hunk #27 succeeded at 1069 (offset -693 lines).
18 out of 27 hunks FAILED -- saving rejects to file mm/mmap.c.rej
patching file mm/mprotect.c
Hunk #1 succeeded at 17 with fuzz 2.
Hunk #2 succeeded at 162 (offset 5 lines).
Hunk #4 succeeded at 278 (offset 5 lines).
Hunk #6 succeeded at 412 (offset 14 lines).
Hunk #8 succeeded at 478 (offset 14 lines).
Hunk #9 succeeded at 546 with fuzz 1 (offset 39 lines).
patching file mm/mremap.c
patching file security/Kconfig
----------------------------------------------------------------------------------------------
mmap.c.rej
----------------------------------------------------------------------------------------------
***************
*** 1508,1513 ****
  }
  
  EXPORT_SYMBOL(do_mmap_pgoff);
  
  /* Get an address range which is currently unmapped.
   * For shmat() with addr=0.
--- 1612,1618 ----
  }
  
  EXPORT_SYMBOL(do_mmap_pgoff);
+ EXPORT_SYMBOL(__do_mmap_pgoff);
  
  /* Get an address range which is currently unmapped.
   * For shmat() with addr=0.
***************
*** 1527,1537 ****
  {
        struct mm_struct *mm = current->mm;
        struct vm_area_struct *vma;
-       unsigned long start_addr;
  
        if (len > TASK_SIZE)
                return -ENOMEM;
  
        if (addr) {
                addr = PAGE_ALIGN(addr);
                vma = find_vma(mm, addr);
--- 1632,1648 ----
  {
        struct mm_struct *mm = current->mm;
        struct vm_area_struct *vma;
+       unsigned long start_addr, task_unmapped_base = TASK_UNMAPPED_BASE;
  
        if (len > TASK_SIZE)
                return -ENOMEM;
  
+ #ifdef CONFIG_PAX_RANDMMAP
+       if (current->flags & PF_PAX_RANDMMAP)
+               task_unmapped_base += mm->delta_mmap;
+       if (!(current->flags & PF_PAX_RANDMMAP) || !filp)
+ #endif
+ 
        if (addr) {
                addr = PAGE_ALIGN(addr);
                vma = find_vma(mm, addr);
***************
*** 1549,1556 ****
                         * Start a new search - just in case we missed
                         * some holes.
                         */
-                       if (start_addr != TASK_UNMAPPED_BASE) {
-                               start_addr = addr = TASK_UNMAPPED_BASE;
                                goto full_search;
                        }
                        return -ENOMEM;
--- 1660,1667 ----
                         * Start a new search - just in case we missed
                         * some holes.
                         */
+                       if (start_addr != task_unmapped_base) {
+                               start_addr = addr = task_unmapped_base;
                                goto full_search;
                        }
                        return -ENOMEM;
***************
*** 1709,1718 ****
                spin_unlock(&vma->vm_mm->page_table_lock);
                return -ENOMEM;
        }
-       
        if (address - vma->vm_start > current->rlim[RLIMIT_STACK].rlim_cur ||
                        ((vma->vm_mm->total_vm + grow) << PAGE_SHIFT) >
-                       current->rlim[RLIMIT_AS].rlim_cur) {
                spin_unlock(&vma->vm_mm->page_table_lock);
                vm_unacct_memory(grow);
                return -ENOMEM;
--- 1820,1870 ----
                spin_unlock(&vma->vm_mm->page_table_lock);
                return -ENOMEM;
        }
+ 
+ #if defined(CONFIG_PAX_SEGMEXEC) || defined(CONFIG_PAX_RANDEXEC)
+       if (vma->vm_flags & VM_MIRROR) {
+               struct vm_area_struct * vma_m;
+               unsigned long address_m;
+ 
+               address_m = vma->vm_start + (unsigned long)vma->vm_private_data;
+               vma_m = find_vma(vma->vm_mm, address_m);
+               if (!vma_m || vma_m->vm_start != address_m ||
+                               !(vma_m->vm_flags & VM_MIRROR) ||
+                               vma->vm_end - vma->vm_start !=
+                               vma_m->vm_end - vma_m->vm_start) {
+                       spin_unlock(&vma->vm_mm->page_table_lock);
+                       vm_unacct_memory(grow);
+                       printk(KERN_ERR "PAX: VMMIRROR: expand bug, %08lx, %08lx, %08lx, %08lx, %08lx\n",
+                              address, vma->vm_start, vma_m->vm_start, vma->vm_end, vma_m->vm_end);
+                       return -ENOMEM;
+               }
+ 
+               address_m = address + (unsigned long)vma->vm_private_data;
+               if (address_m - vma_m->vm_start > current->rlim[RLIMIT_STACK].rlim_cur ||
+                               ((vma_m->vm_mm->total_vm + 2*grow) << PAGE_SHIFT) >
+                               current->rlim[RLIMIT_AS].rlim_cur ||
+                               ((vma_m->vm_flags & VM_LOCKED) &&
+                               ((vma_m->vm_mm->locked_vm + 2*grow) << PAGE_SHIFT) >
+                               current->rlim[RLIMIT_MEMLOCK].rlim_cur)) {
+                       spin_unlock(&vma->vm_mm->page_table_lock);
+                       vm_unacct_memory(grow);
+                       return -ENOMEM;
+               }
+ 
+               vma_m->vm_end = address_m;
+               vma_m->vm_mm->total_vm += grow;
+               if (vma_m->vm_flags & VM_LOCKED)
+                       vma_m->vm_mm->locked_vm += grow;
+               track_exec_limit(vma_m->vm_mm, vma_m->vm_start, vma_m->vm_end, vma_m->vm_flags);
+       } else
+ #endif
+ 
        if (address - vma->vm_start > current->rlim[RLIMIT_STACK].rlim_cur ||
                        ((vma->vm_mm->total_vm + grow) << PAGE_SHIFT) >
+                       current->rlim[RLIMIT_AS].rlim_cur ||
+                       ((vma->vm_flags & VM_LOCKED) &&
+                       ((vma->vm_mm->locked_vm + grow) << PAGE_SHIFT) >
+                       current->rlim[RLIMIT_MEMLOCK].rlim_cur)) {
                spin_unlock(&vma->vm_mm->page_table_lock);
                vm_unacct_memory(grow);
                return -ENOMEM;
***************
*** 1722,1727 ****
        if (vma->vm_flags & VM_LOCKED)
                vma->vm_mm->locked_vm += grow;
        spin_unlock(&vma->vm_mm->page_table_lock);
        return 0;
  }
  
--- 1874,1880 ----
        if (vma->vm_flags & VM_LOCKED)
                vma->vm_mm->locked_vm += grow;
        spin_unlock(&vma->vm_mm->page_table_lock);
+       track_exec_limit(vma->vm_mm, vma->vm_start, vma->vm_end, vma->vm_flags);
        return 0;
  }
  
***************
*** 1763,1772 ****
                spin_unlock(&vma->vm_mm->page_table_lock);
                return -ENOMEM;
        }
-       
        if (vma->vm_end - address > current->rlim[RLIMIT_STACK].rlim_cur ||
                        ((vma->vm_mm->total_vm + grow) << PAGE_SHIFT) >
-                       current->rlim[RLIMIT_AS].rlim_cur) {
                spin_unlock(&vma->vm_mm->page_table_lock);
                vm_unacct_memory(grow);
                return -ENOMEM;
--- 1916,1967 ----
                spin_unlock(&vma->vm_mm->page_table_lock);
                return -ENOMEM;
        }
+ 
+ #if defined(CONFIG_PAX_SEGMEXEC) || defined(CONFIG_PAX_RANDEXEC)
+       if (vma->vm_flags & VM_MIRROR) {
+               struct vm_area_struct * vma_m;
+               unsigned long address_m;
+ 
+               address_m = vma->vm_start + (unsigned long)vma->vm_private_data;
+               vma_m = find_vma(vma->vm_mm, address_m);
+               if (!vma_m || vma_m->vm_start != address_m ||
+                               !(vma_m->vm_flags & VM_MIRROR) ||
+                               vma->vm_end - vma->vm_start !=
+                               vma_m->vm_end - vma_m->vm_start) {
+                       spin_unlock(&vma->vm_mm->page_table_lock);
+                       vm_unacct_memory(grow);
+                       printk(KERN_ERR "PAX: VMMIRROR: expand bug, %08lx, %08lx, %08lx, %08lx, %08lx\n",
+                              address, vma->vm_start, vma_m->vm_start, vma->vm_end, vma_m->vm_end);
+                       return -ENOMEM;
+               }
+ 
+               address_m = address + (unsigned long)vma->vm_private_data;
+               if (vma_m->vm_end - address_m > current->rlim[RLIMIT_STACK].rlim_cur ||
+                               ((vma_m->vm_mm->total_vm + 2*grow) << PAGE_SHIFT) >
+                               current->rlim[RLIMIT_AS].rlim_cur ||
+                               ((vma_m->vm_flags & VM_LOCKED) &&
+                               ((vma_m->vm_mm->locked_vm + 2*grow) << PAGE_SHIFT) >
+                               current->rlim[RLIMIT_MEMLOCK].rlim_cur)) {
+                       spin_unlock(&vma->vm_mm->page_table_lock);
+                       vm_unacct_memory(grow);
+                       return -ENOMEM;
+               }
+ 
+               vma_m->vm_start = address_m;
+               vma_m->vm_pgoff -= grow;
+               vma_m->vm_mm->total_vm += grow;
+               if (vma_m->vm_flags & VM_LOCKED)
+                       vma_m->vm_mm->locked_vm += grow;
+               track_exec_limit(vma_m->vm_mm, vma_m->vm_start, vma_m->vm_end, vma_m->vm_flags);
+       } else
+ #endif
+ 
        if (vma->vm_end - address > current->rlim[RLIMIT_STACK].rlim_cur ||
                        ((vma->vm_mm->total_vm + grow) << PAGE_SHIFT) >
+                       current->rlim[RLIMIT_AS].rlim_cur ||
+                       ((vma->vm_flags & VM_LOCKED) &&
+                       ((vma->vm_mm->locked_vm + grow) << PAGE_SHIFT) >
+                       current->rlim[RLIMIT_MEMLOCK].rlim_cur)) {
                spin_unlock(&vma->vm_mm->page_table_lock);
                vm_unacct_memory(grow);
                return -ENOMEM;
***************
*** 1777,1782 ****
        if (vma->vm_flags & VM_LOCKED)
                vma->vm_mm->locked_vm += grow;
        spin_unlock(&vma->vm_mm->page_table_lock);
        return 0;
  }
  
--- 1972,1978 ----
        if (vma->vm_flags & VM_LOCKED)
                vma->vm_mm->locked_vm += grow;
        spin_unlock(&vma->vm_mm->page_table_lock);
+       track_exec_limit(vma->vm_mm, vma->vm_start, vma->vm_end, vma->vm_flags);
        return 0;
  }
  
***************
*** 1878,1892 ****
  {
        size_t len = area->vm_end - area->vm_start;
  
-       area->vm_mm->total_vm -= len >> PAGE_SHIFT;
        if (area->vm_flags & VM_LOCKED)
-               area->vm_mm->locked_vm -= len >> PAGE_SHIFT;
        /*
         * Is this a new hole at the lowest possible address?
         */
        if (area->vm_start >= TASK_UNMAPPED_BASE &&
-                               area->vm_start < area->vm_mm->free_area_cache)
-             area->vm_mm->free_area_cache = area->vm_start;
  
        remove_shared_vm_struct(area);
  
--- 2074,2088 ----
  {
        size_t len = area->vm_end - area->vm_start;
  
+       mm->total_vm -= len >> PAGE_SHIFT;
        if (area->vm_flags & VM_LOCKED)
+               mm->locked_vm -= len >> PAGE_SHIFT;
        /*
         * Is this a new hole at the lowest possible address?
         */
        if (area->vm_start >= TASK_UNMAPPED_BASE &&
+                               area->vm_start < mm->free_area_cache)
+             mm->free_area_cache = area->vm_start;
  
        remove_shared_vm_struct(area);
  
***************
*** 1948,1968 ****
   */
  static void
  detach_vmas_to_be_unmapped(struct mm_struct *mm, struct vm_area_struct *vma,
-       struct vm_area_struct *prev, unsigned long end)
  {
        struct vm_area_struct **insertion_point;
        struct vm_area_struct *tail_vma = NULL;
  
        insertion_point = (prev ? &prev->vm_next : &mm->mmap);
        do {
                rb_erase(&vma->vm_rb, &mm->mm_rb);
                mm->map_count--;
                tail_vma = vma;
                vma = vma->vm_next;
-       } while (vma && vma->vm_start < end);
        *insertion_point = vma;
        tail_vma->vm_next = NULL;
        mm->mmap_cache = NULL;          /* Kill the cache. */
  }
  
  /*
--- 2144,2216 ----
   */
  static void
  detach_vmas_to_be_unmapped(struct mm_struct *mm, struct vm_area_struct *vma,
+       struct vm_area_struct *prev, unsigned long *start, unsigned long *end)
  {
        struct vm_area_struct **insertion_point;
        struct vm_area_struct *tail_vma = NULL;
  
+ #if defined(CONFIG_PAX_SEGMEXEC) || defined(CONFIG_PAX_RANDEXEC)
+       unsigned long start_m;
+       struct vm_area_struct *vma_m, *head_vma = vma, *mirrors = NULL, *head_vma_m = NULL;
+ #endif
+ 
        insertion_point = (prev ? &prev->vm_next : &mm->mmap);
        do {
                rb_erase(&vma->vm_rb, &mm->mm_rb);
                mm->map_count--;
+ 
+ #if defined(CONFIG_PAX_SEGMEXEC) || defined(CONFIG_PAX_RANDEXEC)
+               if ((vma->vm_flags & VM_MIRROR) &&
+                   vma->vm_start + (unsigned long)vma->vm_private_data >= *start &&
+                   vma->vm_start + (unsigned long)vma->vm_private_data < *end)
+               {
+                       mm->mmap_cache = NULL;          /* Kill the cache. */
+                       start_m = vma->vm_start + (unsigned long)vma->vm_private_data;
+                       vma_m = find_vma(mm, start_m);
+                       if (vma_m && (vma_m->vm_flags & VM_MIRROR) && vma_m->vm_start == start_m) {
+                               vma->vm_flags &= ~VM_MIRROR;
+                               vma_m->vm_flags &= ~VM_MIRROR;
+                       } else
+                               printk("PAX: VMMIRROR: munmap bug in %s, %08lx\n", current->comm, vma->vm_start);
+               }
+ #endif
+ 
                tail_vma = vma;
                vma = vma->vm_next;
+       } while (vma && vma->vm_start < *end);
        *insertion_point = vma;
        tail_vma->vm_next = NULL;
        mm->mmap_cache = NULL;          /* Kill the cache. */
+ 
+ #if defined(CONFIG_PAX_SEGMEXEC) || defined(CONFIG_PAX_RANDEXEC)
+       for (; head_vma; head_vma = head_vma->vm_next) {
+               struct vm_area_struct *prev_m;
+ 
+               if (!(head_vma->vm_flags & VM_MIRROR))
+                       continue;
+ 
+               start_m = head_vma->vm_start + (unsigned long)head_vma->vm_private_data;
+               vma_m = find_vma_prev(mm, start_m, &prev_m);
+               rb_erase(&vma_m->vm_rb, &mm->mm_rb);
+               mm->map_count--;
+               insertion_point = prev_m ? &prev_m->vm_next : &mm->mmap;
+               *insertion_point = vma_m->vm_next;
+               if (head_vma_m) {
+                       mirrors->vm_next = vma_m;
+                       mirrors = vma_m;
+               } else
+                       head_vma_m = mirrors = vma_m;
+               mirrors->vm_next = NULL;
+               if (vma_m->vm_start < *start)
+                       *start = vma_m->vm_start;
+               if (vma_m->vm_end > *end)
+                       *end = vma_m->vm_end;
+               mm->mmap_cache = NULL;          /* Kill the cache. */
+       }
+       if (head_vma_m)
+               tail_vma->vm_next = head_vma_m;
+ #endif
+ 
  }
  
  /*
***************
*** 2032,2037 ****
        unsigned long end;
        struct vm_area_struct *mpnt, *prev, *last;
  
        if ((start & ~PAGE_MASK) || start > TASK_SIZE || len > TASK_SIZE-start)
                return -EINVAL;
  
--- 2280,2289 ----
        unsigned long end;
        struct vm_area_struct *mpnt, *prev, *last;
  
+ #if defined(CONFIG_PAX_SEGMEXEC) || defined(CONFIG_PAX_RANDEXEC)
+       struct vm_area_struct *mpnt_m = NULL, *last_m;
+ #endif
+ 
        if ((start & ~PAGE_MASK) || start > TASK_SIZE || len > TASK_SIZE-start)
                return -EINVAL;
  
***************
*** 2068,2073 ****
         * places tmp vma above, and higher split_vma places tmp vma below.
         */
        if (start > mpnt->vm_start) {
                if (split_vma(mm, mpnt, start, 0))
                        return -ENOMEM;
                prev = mpnt;
--- 2320,2339 ----
         * places tmp vma above, and higher split_vma places tmp vma below.
         */
        if (start > mpnt->vm_start) {
+ 
+ #if defined(CONFIG_PAX_SEGMEXEC) || defined(CONFIG_PAX_RANDEXEC)
+               if (mpnt->vm_flags & VM_MIRROR) {
+                       unsigned long start_m = mpnt->vm_start + (unsigned long)mpnt->vm_private_data;
+ 
+                       mpnt_m = find_vma(mm, start_m);
+                       if (!mpnt_m || (!mpnt_m->vm_flags & VM_MIRROR) || mpnt_m->vm_start != start_m)
+                               return -EINVAL;
+                       start_m = start + (unsigned long)mpnt->vm_private_data;
+                       if (split_vma(mm, mpnt_m, start_m, 0))
+                               return -ENOMEM;
+               }
+ #endif
+ 
                if (split_vma(mm, mpnt, start, 0))
                        return -ENOMEM;
                prev = mpnt;
***************
*** 2076,2081 ****
        /* Does it split the last one? */
        last = find_vma(mm, end);
        if (last && end > last->vm_start) {
                if (split_vma(mm, last, end, 1))
                        return -ENOMEM;
        }
--- 2342,2361 ----
        /* Does it split the last one? */
        last = find_vma(mm, end);
        if (last && end > last->vm_start) {
+ 
+ #if defined(CONFIG_PAX_SEGMEXEC) || defined(CONFIG_PAX_RANDEXEC)
+               if (last->vm_flags & VM_MIRROR) {
+                       unsigned long end_m = last->vm_start + (unsigned long)last->vm_private_data;
+ 
+                       last_m = find_vma(mm, end_m);
+                       if (!last_m || (!last_m->vm_flags & VM_MIRROR) || last_m->vm_start != end_m)
+                               return -EINVAL;
+                       end_m = end + (unsigned long)last->vm_private_data;
+                       if (split_vma(mm, last_m, end_m, 1))
+                               return -ENOMEM;
+               }
+ #endif
+ 
                if (split_vma(mm, last, end, 1))
                        return -ENOMEM;
        }
***************
*** 2085,2097 ****
         * Remove the vma's, and unmap the actual pages
         */
        spin_lock(&mm->page_table_lock);
-       detach_vmas_to_be_unmapped(mm, mpnt, prev, end);
        unmap_region(mm, mpnt, prev, start, end);
        spin_unlock(&mm->page_table_lock);
  
        /* Fix up all other VM information */
        unmap_vma_list(mm, mpnt);
  
        return 0;
  }
  
--- 2365,2379 ----
         * Remove the vma's, and unmap the actual pages
         */
        spin_lock(&mm->page_table_lock);
+       detach_vmas_to_be_unmapped(mm, mpnt, prev, &start, &end);
        unmap_region(mm, mpnt, prev, start, end);
        spin_unlock(&mm->page_table_lock);
  
        /* Fix up all other VM information */
        unmap_vma_list(mm, mpnt);
  
+       track_exec_limit(mm, start, end, 0UL);
+ 
        return 0;
  }
  
***************
*** 2102,2107 ****
        int ret;
        struct mm_struct *mm = current->mm;
  
        down_write(&mm->mmap_sem);
        ret = do_munmap(mm, addr, len);
        up_write(&mm->mmap_sem);
--- 2384,2395 ----
        int ret;
        struct mm_struct *mm = current->mm;
  
+ #ifdef CONFIG_PAX_SEGMEXEC
+       if ((current->flags & PF_PAX_SEGMEXEC) &&
+           (len > SEGMEXEC_TASK_SIZE || addr > SEGMEXEC_TASK_SIZE-len))
+               return -EINVAL;
+ #endif
+ 
        down_write(&mm->mmap_sem);
        ret = do_munmap(mm, addr, len);
        up_write(&mm->mmap_sem);
***************
*** 2113,2119 ****
   *  anonymous maps.  eventually we may be able to do some
   *  brk-specific accounting here.
   */
  unsigned long do_brk(unsigned long addr, unsigned long len)
  {
        struct mm_struct * mm = current->mm;
        struct vm_area_struct * vma, * prev;
--- 2401,2431 ----
   *  anonymous maps.  eventually we may be able to do some
   *  brk-specific accounting here.
   */
+ #if defined(CONFIG_PAX_SEGMEXEC) && defined(CONFIG_PAX_MPROTECT)
+ unsigned long __do_brk(unsigned long addr, unsigned long len);
+ 
+ unsigned long do_brk(unsigned long addr, unsigned long len)
+ {
+       unsigned long ret;
+ 
+       ret = __do_brk(addr, len);
+       if (ret == addr && (current->flags & (PF_PAX_SEGMEXEC | PF_PAX_MPROTECT)) == PF_PAX_SEGMEXEC) {
+               unsigned long ret_m;
+ 
+               ret_m = __do_mmap_pgoff(NULL, addr + SEGMEXEC_TASK_SIZE, 0UL, PROT_NONE, MAP_PRIVATE | MAP_FIXED | MAP_MIRROR, addr);
+               if (ret_m > TASK_SIZE) {
+                       do_munmap(current->mm, addr, len);
+                       ret = ret_m;
+               }
+       }
+ 
+       return ret;
+ }
+ 
+ unsigned long __do_brk(unsigned long addr, unsigned long len)
+ #else
  unsigned long do_brk(unsigned long addr, unsigned long len)
+ #endif
  {
        struct mm_struct * mm = current->mm;
        struct vm_area_struct * vma, * prev;
***************
*** 2124,2129 ****
        if (!len)
                return addr;
  
        if ((addr + len) > TASK_SIZE || (addr + len) < addr)
                return -EINVAL;
  
--- 2436,2448 ----
        if (!len)
                return addr;
  
+ #ifdef CONFIG_PAX_SEGMEXEC
+       if (current->flags & PF_PAX_SEGMEXEC) {
+               if ((addr + len) > SEGMEXEC_TASK_SIZE || (addr + len) < addr)
+                       return -EINVAL;
+       } else
+ #endif
+ 
        if ((addr + len) > TASK_SIZE || (addr + len) < addr)
                return -EINVAL;
  
***************
*** 2161,2166 ****
  
        flags = VM_DATA_DEFAULT_FLAGS | VM_ACCOUNT | mm->def_flags;
  
        /* Can we just expand an old anonymous mapping? */
        if (rb_parent && vma_merge(mm, prev, rb_parent, addr, addr + len,
                                        flags, NULL, 0))
--- 2480,2497 ----
  
        flags = VM_DATA_DEFAULT_FLAGS | VM_ACCOUNT | mm->def_flags;
  
+ #if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
+       if (current->flags & (PF_PAX_PAGEEXEC | PF_PAX_SEGMEXEC)) {
+               flags &= ~VM_EXEC;
+ 
+ #ifdef CONFIG_PAX_MPROTECT
+               if (current->flags & PF_PAX_MPROTECT)
+                       flags &= ~VM_MAYEXEC;
+ #endif
+ 
+       }
+ #endif
+ 
        /* Can we just expand an old anonymous mapping? */
        if (rb_parent && vma_merge(mm, prev, rb_parent, addr, addr + len,
                                        flags, NULL, 0))
***************
*** 2179,2184 ****
        vma->vm_start = addr;
        vma->vm_end = addr + len;
        vma->vm_flags = flags;
        vma->vm_page_prot = protection_map[flags & 0x0f];
        vma->vm_ops = NULL;
        vma->vm_pgoff = 0;
--- 2510,2522 ----
        vma->vm_start = addr;
        vma->vm_end = addr + len;
        vma->vm_flags = flags;
+ 
+ #ifdef CONFIG_PAX_PAGEEXEC
+       if (!(current->flags & PF_PAX_PAGEEXEC) && (flags & (VM_READ|VM_WRITE)))
+               vma->vm_page_prot = protection_map[(flags | VM_EXEC) & 0x0f];
+       else
+ #endif
+ 
        vma->vm_page_prot = protection_map[flags & 0x0f];
        vma->vm_ops = NULL;
        vma->vm_pgoff = 0; 
Thanks !
mailto:sftf at yandex.ru



More information about the rsbac mailing list