[rsbac] Hunk #4 FAILED at 793 == [ PaX afterwards ] ==

Roy Lanek lanek at novenine.com
Tue May 25 13:17:55 CEST 2004


> Could you please also test, whether it works when patching RSBAC in first 
> and PaX afterwards? This is what I usually do myself.

Hunk #9 FAILED at 1612.
Hunk #10 FAILED at 1632.
Hunk #11 FAILED at 1660.
Hunk #12 FAILED at 1820.
Hunk #13 FAILED at 1874.
Hunk #14 FAILED at 1916.
Hunk #15 FAILED at 1972.
Hunk #16 FAILED at 2074.
Hunk #17 FAILED at 2144.
Hunk #18 FAILED at 2280.
Hunk #19 FAILED at 2320.
Hunk #20 FAILED at 2342.
Hunk #21 FAILED at 2365.
Hunk #22 FAILED at 2384.
Hunk #23 FAILED at 2401.
Hunk #24 FAILED at 2436.
Hunk #25 FAILED at 2480.
Hunk #26 FAILED at 2510.

18 out of 27 hunks FAILED -- saving rejects to file
mm/mmap.c.rej

see attachment. (PaX-first unchanged ... "very likely,"
the diff between the two PaX patches--I am using the
most recent one--being "too tiny," I believe ... from
the perspective of RSBAC; the two `Hunk #some-number at
some-line' correspond as a minimum.)

diff pax-linux-2.6.6-200405172250.patch \
		pax-linux-2.6.6-200405182000.patch

5807c5807
< +++ linux-2.6.6-pax/include/asm-i386/processor.h	2004-05-15 11:39:23.000000000 +0200
---
> +++ linux-2.6.6-pax/include/asm-i386/processor.h	2004-05-18 16:55:01.590173880 +0200
5817c5817
< @@ -296,10 +296,19 @@ extern unsigned int mca_pentium_flag;
---
> @@ -296,10 +296,23 @@ extern unsigned int mca_pentium_flag;
5829c5829
< +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
---
> +#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_PAX_SEGMEXEC)
5830a5831,5834
> +#elif defined(CONFIG_PAX_PAGEEXEC)
> +#define TASK_UNMAPPED_BASE	(PAGE_ALIGN((current->flags & PF_PAX_PAGEEXEC)? 0x00110000UL : TASK_SIZE/3))
> +#elif defined(CONFIG_PAX_SEGMEXEC)
> +#define TASK_UNMAPPED_BASE	(PAGE_ALIGN((current->flags & PF_PAX_SEGMEXEC) ? SEGMEXEC_TASK_SIZE/3 : TASK_SIZE/3))
5837c5841
< @@ -493,16 +502,12 @@ void show_trace(struct task_struct *task
---
> @@ -493,16 +506,12 @@ void show_trace(struct task_struct *task
5856c5860
< @@ -626,7 +631,7 @@ static inline void rep_nop(void)
---
> @@ -626,7 +635,7 @@ static inline void rep_nop(void)
5865c5869
< @@ -640,7 +645,7 @@ extern inline void prefetch(const void *
---
> @@ -640,7 +649,7 @@ extern inline void prefetch(const void *



/Roy

-------------- next part --------------
***************
*** 1508,1513 ****
  }
  
  EXPORT_SYMBOL(do_mmap_pgoff);
  
  /* Get an address range which is currently unmapped.
   * For shmat() with addr=0.
--- 1612,1618 ----
  }
  
  EXPORT_SYMBOL(do_mmap_pgoff);
+ EXPORT_SYMBOL(__do_mmap_pgoff);
  
  /* Get an address range which is currently unmapped.
   * For shmat() with addr=0.
***************
*** 1527,1537 ****
  {
  	struct mm_struct *mm = current->mm;
  	struct vm_area_struct *vma;
- 	unsigned long start_addr;
  
  	if (len > TASK_SIZE)
  		return -ENOMEM;
  
  	if (addr) {
  		addr = PAGE_ALIGN(addr);
  		vma = find_vma(mm, addr);
--- 1632,1648 ----
  {
  	struct mm_struct *mm = current->mm;
  	struct vm_area_struct *vma;
+ 	unsigned long start_addr, task_unmapped_base = TASK_UNMAPPED_BASE;
  
  	if (len > TASK_SIZE)
  		return -ENOMEM;
  
+ #ifdef CONFIG_PAX_RANDMMAP
+ 	if (current->flags & PF_PAX_RANDMMAP)
+ 		task_unmapped_base += mm->delta_mmap;
+ 	if (!(current->flags & PF_PAX_RANDMMAP) || !filp)
+ #endif
+ 
  	if (addr) {
  		addr = PAGE_ALIGN(addr);
  		vma = find_vma(mm, addr);
***************
*** 1549,1556 ****
  			 * Start a new search - just in case we missed
  			 * some holes.
  			 */
- 			if (start_addr != TASK_UNMAPPED_BASE) {
- 				start_addr = addr = TASK_UNMAPPED_BASE;
  				goto full_search;
  			}
  			return -ENOMEM;
--- 1660,1667 ----
  			 * Start a new search - just in case we missed
  			 * some holes.
  			 */
+ 			if (start_addr != task_unmapped_base) {
+ 				start_addr = addr = task_unmapped_base;
  				goto full_search;
  			}
  			return -ENOMEM;
***************
*** 1709,1718 ****
  		spin_unlock(&vma->vm_mm->page_table_lock);
  		return -ENOMEM;
  	}
- 	
  	if (address - vma->vm_start > current->rlim[RLIMIT_STACK].rlim_cur ||
  			((vma->vm_mm->total_vm + grow) << PAGE_SHIFT) >
- 			current->rlim[RLIMIT_AS].rlim_cur) {
  		spin_unlock(&vma->vm_mm->page_table_lock);
  		vm_unacct_memory(grow);
  		return -ENOMEM;
--- 1820,1870 ----
  		spin_unlock(&vma->vm_mm->page_table_lock);
  		return -ENOMEM;
  	}
+ 
+ #if defined(CONFIG_PAX_SEGMEXEC) || defined(CONFIG_PAX_RANDEXEC)
+ 	if (vma->vm_flags & VM_MIRROR) {
+ 		struct vm_area_struct * vma_m;
+ 		unsigned long address_m;
+ 
+ 		address_m = vma->vm_start + (unsigned long)vma->vm_private_data;
+ 		vma_m = find_vma(vma->vm_mm, address_m);
+ 		if (!vma_m || vma_m->vm_start != address_m ||
+ 				!(vma_m->vm_flags & VM_MIRROR) ||
+ 				vma->vm_end - vma->vm_start !=
+ 				vma_m->vm_end - vma_m->vm_start) {
+ 			spin_unlock(&vma->vm_mm->page_table_lock);
+ 			vm_unacct_memory(grow);
+ 			printk(KERN_ERR "PAX: VMMIRROR: expand bug, %08lx, %08lx, %08lx, %08lx, %08lx\n",
+ 			       address, vma->vm_start, vma_m->vm_start, vma->vm_end, vma_m->vm_end);
+ 			return -ENOMEM;
+ 		}
+ 
+ 		address_m = address + (unsigned long)vma->vm_private_data;
+ 		if (address_m - vma_m->vm_start > current->rlim[RLIMIT_STACK].rlim_cur ||
+ 				((vma_m->vm_mm->total_vm + 2*grow) << PAGE_SHIFT) >
+ 				current->rlim[RLIMIT_AS].rlim_cur ||
+ 				((vma_m->vm_flags & VM_LOCKED) &&
+ 				((vma_m->vm_mm->locked_vm + 2*grow) << PAGE_SHIFT) >
+ 				current->rlim[RLIMIT_MEMLOCK].rlim_cur)) {
+ 			spin_unlock(&vma->vm_mm->page_table_lock);
+ 			vm_unacct_memory(grow);
+ 			return -ENOMEM;
+ 		}
+ 
+ 		vma_m->vm_end = address_m;
+ 		vma_m->vm_mm->total_vm += grow;
+ 		if (vma_m->vm_flags & VM_LOCKED)
+ 			vma_m->vm_mm->locked_vm += grow;
+ 		track_exec_limit(vma_m->vm_mm, vma_m->vm_start, vma_m->vm_end, vma_m->vm_flags);
+ 	} else
+ #endif
+ 
  	if (address - vma->vm_start > current->rlim[RLIMIT_STACK].rlim_cur ||
  			((vma->vm_mm->total_vm + grow) << PAGE_SHIFT) >
+ 			current->rlim[RLIMIT_AS].rlim_cur ||
+ 			((vma->vm_flags & VM_LOCKED) &&
+ 			((vma->vm_mm->locked_vm + grow) << PAGE_SHIFT) >
+ 			current->rlim[RLIMIT_MEMLOCK].rlim_cur)) {
  		spin_unlock(&vma->vm_mm->page_table_lock);
  		vm_unacct_memory(grow);
  		return -ENOMEM;
***************
*** 1722,1727 ****
  	if (vma->vm_flags & VM_LOCKED)
  		vma->vm_mm->locked_vm += grow;
  	spin_unlock(&vma->vm_mm->page_table_lock);
  	return 0;
  }
  
--- 1874,1880 ----
  	if (vma->vm_flags & VM_LOCKED)
  		vma->vm_mm->locked_vm += grow;
  	spin_unlock(&vma->vm_mm->page_table_lock);
+ 	track_exec_limit(vma->vm_mm, vma->vm_start, vma->vm_end, vma->vm_flags);
  	return 0;
  }
  
***************
*** 1763,1772 ****
  		spin_unlock(&vma->vm_mm->page_table_lock);
  		return -ENOMEM;
  	}
- 	
  	if (vma->vm_end - address > current->rlim[RLIMIT_STACK].rlim_cur ||
  			((vma->vm_mm->total_vm + grow) << PAGE_SHIFT) >
- 			current->rlim[RLIMIT_AS].rlim_cur) {
  		spin_unlock(&vma->vm_mm->page_table_lock);
  		vm_unacct_memory(grow);
  		return -ENOMEM;
--- 1916,1967 ----
  		spin_unlock(&vma->vm_mm->page_table_lock);
  		return -ENOMEM;
  	}
+ 
+ #if defined(CONFIG_PAX_SEGMEXEC) || defined(CONFIG_PAX_RANDEXEC)
+ 	if (vma->vm_flags & VM_MIRROR) {
+ 		struct vm_area_struct * vma_m;
+ 		unsigned long address_m;
+ 
+ 		address_m = vma->vm_start + (unsigned long)vma->vm_private_data;
+ 		vma_m = find_vma(vma->vm_mm, address_m);
+ 		if (!vma_m || vma_m->vm_start != address_m ||
+ 				!(vma_m->vm_flags & VM_MIRROR) ||
+ 				vma->vm_end - vma->vm_start !=
+ 				vma_m->vm_end - vma_m->vm_start) {
+ 			spin_unlock(&vma->vm_mm->page_table_lock);
+ 			vm_unacct_memory(grow);
+ 			printk(KERN_ERR "PAX: VMMIRROR: expand bug, %08lx, %08lx, %08lx, %08lx, %08lx\n",
+ 			       address, vma->vm_start, vma_m->vm_start, vma->vm_end, vma_m->vm_end);
+ 			return -ENOMEM;
+ 		}
+ 
+ 		address_m = address + (unsigned long)vma->vm_private_data;
+ 		if (vma_m->vm_end - address_m > current->rlim[RLIMIT_STACK].rlim_cur ||
+ 				((vma_m->vm_mm->total_vm + 2*grow) << PAGE_SHIFT) >
+ 				current->rlim[RLIMIT_AS].rlim_cur ||
+ 				((vma_m->vm_flags & VM_LOCKED) &&
+ 				((vma_m->vm_mm->locked_vm + 2*grow) << PAGE_SHIFT) >
+ 				current->rlim[RLIMIT_MEMLOCK].rlim_cur)) {
+ 			spin_unlock(&vma->vm_mm->page_table_lock);
+ 			vm_unacct_memory(grow);
+ 			return -ENOMEM;
+ 		}
+ 
+ 		vma_m->vm_start = address_m;
+ 		vma_m->vm_pgoff -= grow;
+ 		vma_m->vm_mm->total_vm += grow;
+ 		if (vma_m->vm_flags & VM_LOCKED)
+ 			vma_m->vm_mm->locked_vm += grow;
+ 		track_exec_limit(vma_m->vm_mm, vma_m->vm_start, vma_m->vm_end, vma_m->vm_flags);
+ 	} else
+ #endif
+ 
  	if (vma->vm_end - address > current->rlim[RLIMIT_STACK].rlim_cur ||
  			((vma->vm_mm->total_vm + grow) << PAGE_SHIFT) >
+ 			current->rlim[RLIMIT_AS].rlim_cur ||
+ 			((vma->vm_flags & VM_LOCKED) &&
+ 			((vma->vm_mm->locked_vm + grow) << PAGE_SHIFT) >
+ 			current->rlim[RLIMIT_MEMLOCK].rlim_cur)) {
  		spin_unlock(&vma->vm_mm->page_table_lock);
  		vm_unacct_memory(grow);
  		return -ENOMEM;
***************
*** 1777,1782 ****
  	if (vma->vm_flags & VM_LOCKED)
  		vma->vm_mm->locked_vm += grow;
  	spin_unlock(&vma->vm_mm->page_table_lock);
  	return 0;
  }
  
--- 1972,1978 ----
  	if (vma->vm_flags & VM_LOCKED)
  		vma->vm_mm->locked_vm += grow;
  	spin_unlock(&vma->vm_mm->page_table_lock);
+ 	track_exec_limit(vma->vm_mm, vma->vm_start, vma->vm_end, vma->vm_flags);
  	return 0;
  }
  
***************
*** 1878,1892 ****
  {
  	size_t len = area->vm_end - area->vm_start;
  
- 	area->vm_mm->total_vm -= len >> PAGE_SHIFT;
  	if (area->vm_flags & VM_LOCKED)
- 		area->vm_mm->locked_vm -= len >> PAGE_SHIFT;
  	/*
  	 * Is this a new hole at the lowest possible address?
  	 */
  	if (area->vm_start >= TASK_UNMAPPED_BASE &&
- 				area->vm_start < area->vm_mm->free_area_cache)
- 	      area->vm_mm->free_area_cache = area->vm_start;
  
  	remove_shared_vm_struct(area);
  
--- 2074,2088 ----
  {
  	size_t len = area->vm_end - area->vm_start;
  
+ 	mm->total_vm -= len >> PAGE_SHIFT;
  	if (area->vm_flags & VM_LOCKED)
+ 		mm->locked_vm -= len >> PAGE_SHIFT;
  	/*
  	 * Is this a new hole at the lowest possible address?
  	 */
  	if (area->vm_start >= TASK_UNMAPPED_BASE &&
+ 				area->vm_start < mm->free_area_cache)
+ 	      mm->free_area_cache = area->vm_start;
  
  	remove_shared_vm_struct(area);
  
***************
*** 1948,1968 ****
   */
  static void
  detach_vmas_to_be_unmapped(struct mm_struct *mm, struct vm_area_struct *vma,
- 	struct vm_area_struct *prev, unsigned long end)
  {
  	struct vm_area_struct **insertion_point;
  	struct vm_area_struct *tail_vma = NULL;
  
  	insertion_point = (prev ? &prev->vm_next : &mm->mmap);
  	do {
  		rb_erase(&vma->vm_rb, &mm->mm_rb);
  		mm->map_count--;
  		tail_vma = vma;
  		vma = vma->vm_next;
- 	} while (vma && vma->vm_start < end);
  	*insertion_point = vma;
  	tail_vma->vm_next = NULL;
  	mm->mmap_cache = NULL;		/* Kill the cache. */
  }
  
  /*
--- 2144,2216 ----
   */
  static void
  detach_vmas_to_be_unmapped(struct mm_struct *mm, struct vm_area_struct *vma,
+ 	struct vm_area_struct *prev, unsigned long *start, unsigned long *end)
  {
  	struct vm_area_struct **insertion_point;
  	struct vm_area_struct *tail_vma = NULL;
  
+ #if defined(CONFIG_PAX_SEGMEXEC) || defined(CONFIG_PAX_RANDEXEC)
+ 	unsigned long start_m;
+ 	struct vm_area_struct *vma_m, *head_vma = vma, *mirrors = NULL, *head_vma_m = NULL;
+ #endif
+ 
  	insertion_point = (prev ? &prev->vm_next : &mm->mmap);
  	do {
  		rb_erase(&vma->vm_rb, &mm->mm_rb);
  		mm->map_count--;
+ 
+ #if defined(CONFIG_PAX_SEGMEXEC) || defined(CONFIG_PAX_RANDEXEC)
+ 		if ((vma->vm_flags & VM_MIRROR) &&
+ 		    vma->vm_start + (unsigned long)vma->vm_private_data >= *start &&
+ 		    vma->vm_start + (unsigned long)vma->vm_private_data < *end)
+ 		{
+ 			mm->mmap_cache = NULL;		/* Kill the cache. */
+ 			start_m = vma->vm_start + (unsigned long)vma->vm_private_data;
+ 			vma_m = find_vma(mm, start_m);
+ 			if (vma_m && (vma_m->vm_flags & VM_MIRROR) && vma_m->vm_start == start_m) {
+ 				vma->vm_flags &= ~VM_MIRROR;
+ 				vma_m->vm_flags &= ~VM_MIRROR;
+ 			} else
+ 				printk("PAX: VMMIRROR: munmap bug in %s, %08lx\n", current->comm, vma->vm_start);
+ 		}
+ #endif
+ 
  		tail_vma = vma;
  		vma = vma->vm_next;
+ 	} while (vma && vma->vm_start < *end);
  	*insertion_point = vma;
  	tail_vma->vm_next = NULL;
  	mm->mmap_cache = NULL;		/* Kill the cache. */
+ 
+ #if defined(CONFIG_PAX_SEGMEXEC) || defined(CONFIG_PAX_RANDEXEC)
+ 	for (; head_vma; head_vma = head_vma->vm_next) {
+ 		struct vm_area_struct *prev_m;
+ 
+ 		if (!(head_vma->vm_flags & VM_MIRROR))
+ 			continue;
+ 
+ 		start_m = head_vma->vm_start + (unsigned long)head_vma->vm_private_data;
+ 		vma_m = find_vma_prev(mm, start_m, &prev_m);
+ 		rb_erase(&vma_m->vm_rb, &mm->mm_rb);
+ 		mm->map_count--;
+ 		insertion_point = prev_m ? &prev_m->vm_next : &mm->mmap;
+ 		*insertion_point = vma_m->vm_next;
+ 		if (head_vma_m) {
+ 			mirrors->vm_next = vma_m;
+ 			mirrors = vma_m;
+ 		} else
+ 			head_vma_m = mirrors = vma_m;
+ 		mirrors->vm_next = NULL;
+ 		if (vma_m->vm_start < *start)
+ 			*start = vma_m->vm_start;
+ 		if (vma_m->vm_end > *end)
+ 			*end = vma_m->vm_end;
+ 		mm->mmap_cache = NULL;		/* Kill the cache. */
+ 	}
+ 	if (head_vma_m)
+ 		tail_vma->vm_next = head_vma_m;
+ #endif
+ 
  }
  
  /*
***************
*** 2032,2037 ****
  	unsigned long end;
  	struct vm_area_struct *mpnt, *prev, *last;
  
  	if ((start & ~PAGE_MASK) || start > TASK_SIZE || len > TASK_SIZE-start)
  		return -EINVAL;
  
--- 2280,2289 ----
  	unsigned long end;
  	struct vm_area_struct *mpnt, *prev, *last;
  
+ #if defined(CONFIG_PAX_SEGMEXEC) || defined(CONFIG_PAX_RANDEXEC)
+ 	struct vm_area_struct *mpnt_m = NULL, *last_m;
+ #endif
+ 
  	if ((start & ~PAGE_MASK) || start > TASK_SIZE || len > TASK_SIZE-start)
  		return -EINVAL;
  
***************
*** 2068,2073 ****
  	 * places tmp vma above, and higher split_vma places tmp vma below.
  	 */
  	if (start > mpnt->vm_start) {
  		if (split_vma(mm, mpnt, start, 0))
  			return -ENOMEM;
  		prev = mpnt;
--- 2320,2339 ----
  	 * places tmp vma above, and higher split_vma places tmp vma below.
  	 */
  	if (start > mpnt->vm_start) {
+ 
+ #if defined(CONFIG_PAX_SEGMEXEC) || defined(CONFIG_PAX_RANDEXEC)
+ 		if (mpnt->vm_flags & VM_MIRROR) {
+ 			unsigned long start_m = mpnt->vm_start + (unsigned long)mpnt->vm_private_data;
+ 
+ 			mpnt_m = find_vma(mm, start_m);
+ 			if (!mpnt_m || (!mpnt_m->vm_flags & VM_MIRROR) || mpnt_m->vm_start != start_m)
+ 				return -EINVAL;
+ 			start_m = start + (unsigned long)mpnt->vm_private_data;
+ 			if (split_vma(mm, mpnt_m, start_m, 0))
+ 				return -ENOMEM;
+ 		}
+ #endif
+ 
  		if (split_vma(mm, mpnt, start, 0))
  			return -ENOMEM;
  		prev = mpnt;
***************
*** 2076,2081 ****
  	/* Does it split the last one? */
  	last = find_vma(mm, end);
  	if (last && end > last->vm_start) {
  		if (split_vma(mm, last, end, 1))
  			return -ENOMEM;
  	}
--- 2342,2361 ----
  	/* Does it split the last one? */
  	last = find_vma(mm, end);
  	if (last && end > last->vm_start) {
+ 
+ #if defined(CONFIG_PAX_SEGMEXEC) || defined(CONFIG_PAX_RANDEXEC)
+ 		if (last->vm_flags & VM_MIRROR) {
+ 			unsigned long end_m = last->vm_start + (unsigned long)last->vm_private_data;
+ 
+ 			last_m = find_vma(mm, end_m);
+ 			if (!last_m || (!last_m->vm_flags & VM_MIRROR) || last_m->vm_start != end_m)
+ 				return -EINVAL;
+ 			end_m = end + (unsigned long)last->vm_private_data;
+ 			if (split_vma(mm, last_m, end_m, 1))
+ 				return -ENOMEM;
+ 		}
+ #endif
+ 
  		if (split_vma(mm, last, end, 1))
  			return -ENOMEM;
  	}
***************
*** 2085,2097 ****
  	 * Remove the vma's, and unmap the actual pages
  	 */
  	spin_lock(&mm->page_table_lock);
- 	detach_vmas_to_be_unmapped(mm, mpnt, prev, end);
  	unmap_region(mm, mpnt, prev, start, end);
  	spin_unlock(&mm->page_table_lock);
  
  	/* Fix up all other VM information */
  	unmap_vma_list(mm, mpnt);
  
  	return 0;
  }
  
--- 2365,2379 ----
  	 * Remove the vma's, and unmap the actual pages
  	 */
  	spin_lock(&mm->page_table_lock);
+ 	detach_vmas_to_be_unmapped(mm, mpnt, prev, &start, &end);
  	unmap_region(mm, mpnt, prev, start, end);
  	spin_unlock(&mm->page_table_lock);
  
  	/* Fix up all other VM information */
  	unmap_vma_list(mm, mpnt);
  
+ 	track_exec_limit(mm, start, end, 0UL);
+ 
  	return 0;
  }
  
***************
*** 2102,2107 ****
  	int ret;
  	struct mm_struct *mm = current->mm;
  
  	down_write(&mm->mmap_sem);
  	ret = do_munmap(mm, addr, len);
  	up_write(&mm->mmap_sem);
--- 2384,2395 ----
  	int ret;
  	struct mm_struct *mm = current->mm;
  
+ #ifdef CONFIG_PAX_SEGMEXEC
+ 	if ((current->flags & PF_PAX_SEGMEXEC) &&
+ 	    (len > SEGMEXEC_TASK_SIZE || addr > SEGMEXEC_TASK_SIZE-len))
+ 		return -EINVAL;
+ #endif
+ 
  	down_write(&mm->mmap_sem);
  	ret = do_munmap(mm, addr, len);
  	up_write(&mm->mmap_sem);
***************
*** 2113,2119 ****
   *  anonymous maps.  eventually we may be able to do some
   *  brk-specific accounting here.
   */
  unsigned long do_brk(unsigned long addr, unsigned long len)
  {
  	struct mm_struct * mm = current->mm;
  	struct vm_area_struct * vma, * prev;
--- 2401,2431 ----
   *  anonymous maps.  eventually we may be able to do some
   *  brk-specific accounting here.
   */
+ #if defined(CONFIG_PAX_SEGMEXEC) && defined(CONFIG_PAX_MPROTECT)
+ unsigned long __do_brk(unsigned long addr, unsigned long len);
+ 
+ unsigned long do_brk(unsigned long addr, unsigned long len)
+ {
+ 	unsigned long ret;
+ 
+ 	ret = __do_brk(addr, len);
+ 	if (ret == addr && (current->flags & (PF_PAX_SEGMEXEC | PF_PAX_MPROTECT)) == PF_PAX_SEGMEXEC) {
+ 		unsigned long ret_m;
+ 
+ 		ret_m = __do_mmap_pgoff(NULL, addr + SEGMEXEC_TASK_SIZE, 0UL, PROT_NONE, MAP_PRIVATE | MAP_FIXED | MAP_MIRROR, addr);
+ 		if (ret_m > TASK_SIZE) {
+ 			do_munmap(current->mm, addr, len);
+ 			ret = ret_m;
+ 		}
+ 	}
+ 
+ 	return ret;
+ }
+ 
+ unsigned long __do_brk(unsigned long addr, unsigned long len)
+ #else
  unsigned long do_brk(unsigned long addr, unsigned long len)
+ #endif
  {
  	struct mm_struct * mm = current->mm;
  	struct vm_area_struct * vma, * prev;
***************
*** 2124,2129 ****
  	if (!len)
  		return addr;
  
  	if ((addr + len) > TASK_SIZE || (addr + len) < addr)
  		return -EINVAL;
  
--- 2436,2448 ----
  	if (!len)
  		return addr;
  
+ #ifdef CONFIG_PAX_SEGMEXEC
+ 	if (current->flags & PF_PAX_SEGMEXEC) {
+ 		if ((addr + len) > SEGMEXEC_TASK_SIZE || (addr + len) < addr)
+ 			return -EINVAL;
+ 	} else
+ #endif
+ 
  	if ((addr + len) > TASK_SIZE || (addr + len) < addr)
  		return -EINVAL;
  
***************
*** 2161,2166 ****
  
  	flags = VM_DATA_DEFAULT_FLAGS | VM_ACCOUNT | mm->def_flags;
  
  	/* Can we just expand an old anonymous mapping? */
  	if (rb_parent && vma_merge(mm, prev, rb_parent, addr, addr + len,
  					flags, NULL, 0))
--- 2480,2497 ----
  
  	flags = VM_DATA_DEFAULT_FLAGS | VM_ACCOUNT | mm->def_flags;
  
+ #if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
+ 	if (current->flags & (PF_PAX_PAGEEXEC | PF_PAX_SEGMEXEC)) {
+ 		flags &= ~VM_EXEC;
+ 
+ #ifdef CONFIG_PAX_MPROTECT
+ 		if (current->flags & PF_PAX_MPROTECT)
+ 			flags &= ~VM_MAYEXEC;
+ #endif
+ 
+ 	}
+ #endif
+ 
  	/* Can we just expand an old anonymous mapping? */
  	if (rb_parent && vma_merge(mm, prev, rb_parent, addr, addr + len,
  					flags, NULL, 0))
***************
*** 2179,2184 ****
  	vma->vm_start = addr;
  	vma->vm_end = addr + len;
  	vma->vm_flags = flags;
  	vma->vm_page_prot = protection_map[flags & 0x0f];
  	vma->vm_ops = NULL;
  	vma->vm_pgoff = 0;
--- 2510,2522 ----
  	vma->vm_start = addr;
  	vma->vm_end = addr + len;
  	vma->vm_flags = flags;
+ 
+ #ifdef CONFIG_PAX_PAGEEXEC
+ 	if (!(current->flags & PF_PAX_PAGEEXEC) && (flags & (VM_READ|VM_WRITE)))
+ 		vma->vm_page_prot = protection_map[(flags | VM_EXEC) & 0x0f];
+ 	else
+ #endif
+ 
  	vma->vm_page_prot = protection_map[flags & 0x0f];
  	vma->vm_ops = NULL;
  	vma->vm_pgoff = 0;


More information about the rsbac mailing list