<html><head><meta name="color-scheme" content="light dark"></head><body><pre style="word-wrap: break-word; white-space: pre-wrap;">diff -urN /md0/kernels/2.4/v2.4.9/include/linux/mm.h foo/include/linux/mm.h
--- /md0/kernels/2.4/v2.4.9/include/linux/mm.h	Tue Aug  7 17:52:06 2001
+++ foo/include/linux/mm.h	Thu Aug 16 16:59:34 2001
@@ -515,6 +515,7 @@
 extern int do_munmap(struct mm_struct *, unsigned long, size_t);
 
 extern unsigned long do_brk(unsigned long, unsigned long);
+extern void merge_anon_vmas(struct mm_struct *mm, unsigned long start, unsigned long end);
 
 struct zone_t;
 /* filemap.c */
diff -urN /md0/kernels/2.4/v2.4.9/mm/mmap.c foo/mm/mmap.c
--- /md0/kernels/2.4/v2.4.9/mm/mmap.c	Fri May 25 22:48:10 2001
+++ foo/mm/mmap.c	Thu Aug 16 16:59:35 2001
@@ -17,6 +17,8 @@
 #include &lt;asm/uaccess.h&gt;
 #include &lt;asm/pgalloc.h&gt;
 
+static inline void attempt_merge_next(struct mm_struct *mm, struct vm_area_struct *vma);
+
 /* description of effects of mapping type and prot in current implementation.
  * this is due to the limited x86 page protection hardware.  The expected
  * behavior is in parens:
@@ -309,7 +311,7 @@
 
 	/* Can we just expand an old anonymous mapping? */
 	if (addr &amp;&amp; !file &amp;&amp; !(vm_flags &amp; VM_SHARED)) {
-		struct vm_area_struct * vma = find_vma(mm, addr-1);
+		vma = find_vma(mm, addr-1);
 		if (vma &amp;&amp; vma-&gt;vm_end == addr &amp;&amp; !vma-&gt;vm_file &amp;&amp; 
 		    vma-&gt;vm_flags == vm_flags) {
 			vma-&gt;vm_end = addr + len;
@@ -365,12 +367,17 @@
 	if (correct_wcount)
 		atomic_inc(&amp;file-&gt;f_dentry-&gt;d_inode-&gt;i_writecount);
 
-out:	
+out:
 	mm-&gt;total_vm += len &gt;&gt; PAGE_SHIFT;
 	if (vm_flags &amp; VM_LOCKED) {
 		mm-&gt;locked_vm += len &gt;&gt; PAGE_SHIFT;
 		make_pages_present(addr, addr + len);
 	}
+
+	/* Can we merge this anonymous mapping with the one following it? */
+	if (!file &amp;&amp; !(vm_flags &amp; VM_SHARED))
+		attempt_merge_next(mm, vma);
+
 	return addr;
 
 unmap_and_free_vma:
@@ -1004,4 +1011,34 @@
 	__insert_vm_struct(mm, vmp);
 	spin_unlock(&amp;current-&gt;mm-&gt;page_table_lock);
 	unlock_vma_mappings(vmp);
+}
+
+static inline void attempt_merge_next(struct mm_struct *mm, struct vm_area_struct *vma)
+{
+	struct vm_area_struct *next = vma-&gt;vm_next;
+	if (next &amp;&amp; vma-&gt;vm_end == next-&gt;vm_start &amp;&amp; !next-&gt;vm_file &amp;&amp; 
+	    vma-&gt;vm_flags == next-&gt;vm_flags) {
+		spin_lock(&amp;mm-&gt;page_table_lock);
+		vma-&gt;vm_next = next-&gt;vm_next;
+		if (mm-&gt;mmap_avl)
+			avl_remove(next, &amp;mm-&gt;mmap_avl);
+		vma-&gt;vm_end = next-&gt;vm_end;
+		mm-&gt;mmap_cache = vma;	/* Kill the cache. */
+		mm-&gt;map_count--;
+		spin_unlock(&amp;mm-&gt;page_table_lock);
+
+		kmem_cache_free(vm_area_cachep, next);
+	}
+}
+
+void merge_anon_vmas(struct mm_struct *mm, unsigned long start, unsigned long end)
+{
+	struct vm_area_struct *vma;
+	if (start)
+		start--;
+
+	for (vma = find_vma(mm, start); vma &amp;&amp; vma-&gt;vm_start &lt;= end;
+	     vma = vma-&gt;vm_next)
+		if (!vma-&gt;vm_file &amp;&amp; !(vma-&gt;vm_flags &amp; VM_SHARED))
+			attempt_merge_next(mm, vma);
 }
diff -urN /md0/kernels/2.4/v2.4.9/mm/mprotect.c foo/mm/mprotect.c
--- /md0/kernels/2.4/v2.4.9/mm/mprotect.c	Thu Apr  5 11:53:46 2001
+++ foo/mm/mprotect.c	Thu Aug 16 16:59:35 2001
@@ -278,6 +278,7 @@
 			break;
 		}
 	}
+	merge_anon_vmas(current-&gt;mm, start, end);
 out:
 	up_write(&amp;current-&gt;mm-&gt;mmap_sem);
 	return error;
diff -urN /md0/kernels/2.4/v2.4.9/mm/mremap.c foo/mm/mremap.c
--- /md0/kernels/2.4/v2.4.9/mm/mremap.c	Thu May  3 11:22:20 2001
+++ foo/mm/mremap.c	Thu Aug 16 16:59:35 2001
@@ -128,10 +128,23 @@
 	unsigned long new_addr)
 {
 	struct vm_area_struct * new_vma;
+	int allocated_vma = 0;
 
-	new_vma = kmem_cache_alloc(vm_area_cachep, SLAB_KERNEL);
-	if (new_vma) {
-		if (!move_page_tables(current-&gt;mm, new_addr, addr, old_len)) {
+	/* First, check if we can merge a mapping. -ben */
+	new_vma = find_vma(current-&gt;mm, new_addr-1);
+	if (new_vma &amp;&amp; !vma-&gt;vm_file &amp;&amp; !(vma-&gt;vm_flags &amp; VM_SHARED) &amp;&amp;
+	    new_vma-&gt;vm_end == new_addr &amp;&amp; !new_vma-&gt;vm_file &amp;&amp; 
+		new_vma-&gt;vm_flags == vma-&gt;vm_flags) {
+		new_vma-&gt;vm_end = new_addr + new_len;
+	} else {
+		new_vma = kmem_cache_alloc(vm_area_cachep, SLAB_KERNEL);
+		if (!new_vma)
+			goto no_mem;
+		allocated_vma = 1;
+	}
+
+	if (!move_page_tables(current-&gt;mm, new_addr, addr, old_len)) {
+		if (allocated_vma) {
 			*new_vma = *vma;
 			new_vma-&gt;vm_start = new_addr;
 			new_vma-&gt;vm_end = new_addr+new_len;
@@ -142,17 +155,20 @@
 			if (new_vma-&gt;vm_ops &amp;&amp; new_vma-&gt;vm_ops-&gt;open)
 				new_vma-&gt;vm_ops-&gt;open(new_vma);
 			insert_vm_struct(current-&gt;mm, new_vma);
-			do_munmap(current-&gt;mm, addr, old_len);
-			current-&gt;mm-&gt;total_vm += new_len &gt;&gt; PAGE_SHIFT;
-			if (new_vma-&gt;vm_flags &amp; VM_LOCKED) {
-				current-&gt;mm-&gt;locked_vm += new_len &gt;&gt; PAGE_SHIFT;
-				make_pages_present(new_vma-&gt;vm_start,
-						   new_vma-&gt;vm_end);
-			}
-			return new_addr;
 		}
-		kmem_cache_free(vm_area_cachep, new_vma);
+		do_munmap(current-&gt;mm, addr, old_len);
+		current-&gt;mm-&gt;total_vm += new_len &gt;&gt; PAGE_SHIFT;
+		if (new_vma-&gt;vm_flags &amp; VM_LOCKED) {
+			current-&gt;mm-&gt;locked_vm += new_len &gt;&gt; PAGE_SHIFT;
+			make_pages_present(new_vma-&gt;vm_start,
+					   new_vma-&gt;vm_end);
+		}
+		return new_addr;
 	}
+	if (allocated_vma)
+		kmem_cache_free(vm_area_cachep, new_vma);
+
+no_mem:
 	return -ENOMEM;
 }
 
</pre></body></html>