diff -urN linux-2.5.8-pre2/mm/memory.c linux/mm/memory.c
--- linux-2.5.8-pre2/mm/memory.c	Fri Apr  5 20:18:12 2002
+++ linux/mm/memory.c	Mon Apr  8 01:03:20 2002
@@ -397,50 +397,60 @@
 	return freed;
 }
 
-/*
- * remove user pages in a given range.
+#define ZAP_BLOCK_SIZE	(256 * PAGE_SIZE)
+
+/**
+ * zap_page_range - remove user pages in a given range
+ * @vma: vm_area_struct containing the applicable pages
+ * @address: starting address of pages to zap
+ * @size: number of bytes to zap
  */
 void zap_page_range(struct vm_area_struct *vma, unsigned long address, unsigned long size)
 {
 	struct mm_struct *mm = vma->vm_mm;
 	mmu_gather_t *tlb;
 	pgd_t * dir;
-	unsigned long start = address, end = address + size;
-	int freed = 0;
-
-	dir = pgd_offset(mm, address);
+	unsigned long start, end, addr, block;
+	int freed;
 
-	/*
-	 * This is a long-lived spinlock. That's fine.
-	 * There's no contention, because the page table
-	 * lock only protects against kswapd anyway, and
-	 * even if kswapd happened to be looking at this
-	 * process we _want_ it to get stuck.
-	 */
-	if (address >= end)
-		BUG();
-	spin_lock(&mm->page_table_lock);
-	flush_cache_range(vma, address, end);
-	tlb = tlb_gather_mmu(vma);
-
-	do {
-		freed += zap_pmd_range(tlb, dir, address, end - address);
-		address = (address + PGDIR_SIZE) & PGDIR_MASK;
-		dir++;
-	} while (address && (address < end));
-
-	/* this will flush any remaining tlb entries */
-	tlb_finish_mmu(tlb, start, end);
-
-	/*
-	 * Update rss for the mm_struct (not necessarily current->mm)
-	 * Notice that rss is an unsigned long.
-	 */
-	if (mm->rss > freed)
-		mm->rss -= freed;
-	else
-		mm->rss = 0;
-	spin_unlock(&mm->page_table_lock);
+	/* break the work up into blocks of ZAP_BLOCK_SIZE pages */
+	while (size) {
+		if (size > ZAP_BLOCK_SIZE)
+			block = ZAP_BLOCK_SIZE;
+		else
+			block = size;
+
+		freed = 0;
+		start = addr = address;
+		end = address + block;
+		dir = pgd_offset(mm, address);
+
+		BUG_ON(address >= end);
+
+		spin_lock(&mm->page_table_lock);
+		flush_cache_range(vma, start, end);
+		tlb = tlb_gather_mmu(vma);
+
+		do {
+			freed += zap_pmd_range(tlb, dir, addr, end - addr);
+			addr = (addr + PGDIR_SIZE) & PGDIR_MASK;
+			dir++;
+		} while (addr && (addr < end));
+
+		/* this will flush any remaining tlb entries */
+		tlb_finish_mmu(tlb, start, end);
+
+		/* Update rss for the mm_struct (need not be current->mm) */
+		if (mm->rss > freed)
+			mm->rss -= freed;
+		else
+			mm->rss = 0;
+
+		spin_unlock(&mm->page_table_lock);
+
+		address += block;
+		size -= block;
+	}
 }
 
 /*
