diff -urN linux-2.4.17-rmap-virgin/include/asm-i386/pgtable.h linux-2.4.17-rmap/include/asm-i386/pgtable.h
--- linux-2.4.17-rmap-virgin/include/asm-i386/pgtable.h	Thu Nov 22 11:46:19 2001
+++ linux-2.4.17-rmap/include/asm-i386/pgtable.h	Thu Dec  6 11:23:02 2001
@@ -267,7 +267,28 @@
  * Permanent address of a page. Obviously must never be
  * called on a highmem page.
  */
+#ifdef CONFIG_HIGHMEM
 #define page_address(page) ((page)->virtual)
+#else /* !CONFIG_HIGHMEM */
+#define page_address(page) __va(((page) - mem_map) << PAGE_SHIFT)
+#endif /* !CONFIG_HIGHMEM */
+
+#define PageZone(page) \
+	(zone_table[((page)->flags >> (BITS_PER_LONG - 5)) & 0x3UL])
+
+#define SetPageZone(page,zone_id)					\
+	do {								\
+		(page)->flags &= ~(0x3UL << (BITS_PER_LONG - 5));	\
+		(page)->flags |= ((zone_id)&0x3UL)<<(BITS_PER_LONG-5);	\
+	} while(0)
+
+#define PageAge(page) (((page)->flags >> 21) & 0x3FUL)
+#define SetPageAge(page, age)						\
+	do {								\
+		(page)->flags &= ~(0x3FUL << 21);			\
+		(page)->flags |= ((age) & 0x3FUL) << 21;		\
+	} while(0)
+
 #define pages_to_mb(x) ((x) >> (20-PAGE_SHIFT))
 
 /*
diff -urN linux-2.4.17-rmap-virgin/include/linux/mm.h linux-2.4.17-rmap/include/linux/mm.h
--- linux-2.4.17-rmap-virgin/include/linux/mm.h	Thu Dec  6 11:31:04 2001
+++ linux-2.4.17-rmap/include/linux/mm.h	Thu Dec  6 11:23:02 2001
@@ -162,14 +162,14 @@
 					   updated asynchronously */
 	struct list_head lru;		/* Pageout list, eg. active_list;
 					   protected by pagemap_lru_lock !! */
-	unsigned long age;		/* Page aging counter. */
 	struct pte_chain * pte_chain;	/* Reverse pte mapping pointer. */
 	wait_queue_head_t wait;		/* Page locked?  Stand in line... */
 	struct page **pprev_hash;	/* Complement to *next_hash. */
 	struct buffer_head * buffers;	/* Buffer maps us to a disk block. */
+#ifdef CONFIG_HIGHMEM
 	void *virtual;			/* Kernel virtual address (NULL if
 					   not kmapped, ie. highmem) */
-	struct zone_struct *zone;	/* Memory zone we are in. */
+#endif /* CONFIG_HIGHMEM */
 } mem_map_t;
 
 /*
diff -urN linux-2.4.17-rmap-virgin/include/linux/mmzone.h linux-2.4.17-rmap/include/linux/mmzone.h
--- linux-2.4.17-rmap-virgin/include/linux/mmzone.h	Thu Dec  6 11:31:04 2001
+++ linux-2.4.17-rmap/include/linux/mmzone.h	Thu Dec  6 11:23:02 2001
@@ -65,6 +65,8 @@
 	unsigned long		size;
 } zone_t;
 
+extern zone_t *zone_table[];
+
 #define ZONE_DMA		0
 #define ZONE_NORMAL		1
 #define ZONE_HIGHMEM		2
diff -urN linux-2.4.17-rmap-virgin/include/linux/swap.h linux-2.4.17-rmap/include/linux/swap.h
--- linux-2.4.17-rmap-virgin/include/linux/swap.h	Thu Dec  6 11:31:04 2001
+++ linux-2.4.17-rmap/include/linux/swap.h	Thu Dec  6 11:23:02 2001
@@ -3,6 +3,7 @@
 
 #include <linux/spinlock.h>
 #include <asm/page.h>
+#include <asm/pgtable.h>
 
 #define SWAP_FLAG_PREFER	0x8000	/* set if swap priority specified */
 #define SWAP_FLAG_PRIO_MASK	0x7fff
@@ -190,7 +191,7 @@
 #define PAGE_AGE_START 5
 #define PAGE_AGE_ADV 3
 #define PAGE_AGE_DECL 1
-#define PAGE_AGE_MAX 64
+#define PAGE_AGE_MAX 63
 
 /*
  * List add/del helper macros. These must be called
@@ -218,14 +219,14 @@
 	SetPageInactiveDirty(page); \
 	list_add(&(page)->lru, &inactive_dirty_list); \
 	nr_inactive_dirty_pages++; \
-	page->zone->inactive_dirty_pages++; \
+	PageZone(page)->inactive_dirty_pages++; \
 }
 
 #define add_page_to_inactive_clean_list(page) { \
 	DEBUG_LRU_PAGE(page); \
 	SetPageInactiveClean(page); \
-	list_add(&(page)->lru, &page->zone->inactive_clean_list); \
-	page->zone->inactive_clean_pages++; \
+	list_add(&(page)->lru, &PageZone(page)->inactive_clean_list); \
+	PageZone(page)->inactive_clean_pages++; \
 	nr_inactive_clean_pages++; \
 }
 
@@ -240,14 +241,14 @@
 	list_del(&(page)->lru); \
 	ClearPageInactiveDirty(page); \
 	nr_inactive_dirty_pages--; \
-	page->zone->inactive_dirty_pages--; \
+	PageZone(page)->inactive_dirty_pages--; \
 	DEBUG_LRU_PAGE(page); \
 }
 
 #define del_page_from_inactive_clean_list(page) { \
 	list_del(&(page)->lru); \
 	ClearPageInactiveClean(page); \
-	page->zone->inactive_clean_pages--; \
+	PageZone(page)->inactive_clean_pages--; \
 	nr_inactive_clean_pages--; \
 	DEBUG_LRU_PAGE(page); \
 }
diff -urN linux-2.4.17-rmap-virgin/mm/Makefile linux-2.4.17-rmap/mm/Makefile
--- linux-2.4.17-rmap-virgin/mm/Makefile	Thu Dec  6 11:31:04 2001
+++ linux-2.4.17-rmap/mm/Makefile	Wed Dec  5 23:15:42 2001
@@ -9,7 +9,7 @@
 
 O_TARGET := mm.o
 
-export-objs := shmem.o filemap.o
+export-objs := shmem.o filemap.o page_alloc.o
 
 obj-y	 := memory.o mmap.o filemap.o mprotect.o mlock.o mremap.o \
 	    vmalloc.o slab.o bootmem.o swap.o vmscan.o page_io.o \
diff -urN linux-2.4.17-rmap-virgin/mm/page_alloc.c linux-2.4.17-rmap/mm/page_alloc.c
--- linux-2.4.17-rmap-virgin/mm/page_alloc.c	Thu Dec  6 11:31:05 2001
+++ linux-2.4.17-rmap/mm/page_alloc.c	Wed Dec  5 22:43:33 2001
@@ -16,7 +16,9 @@
 #include <linux/interrupt.h>
 #include <linux/pagemap.h>
 #include <linux/bootmem.h>
+#include <linux/mmzone.h>
 #include <linux/slab.h>
+#include <linux/module.h>
 #include <linux/compiler.h>
 
 int nr_swap_pages;
@@ -32,6 +34,9 @@
 static int zone_balance_min[MAX_NR_ZONES] __initdata = { 20 , 20, 20, };
 static int zone_balance_max[MAX_NR_ZONES] __initdata = { 255 , 255, 255, };
 
+zone_t *zone_table[MAX_NR_ZONES];
+EXPORT_SYMBOL(zone_table);
+
 /*
  * Free_page() adds the page to the free lists. This is optimized for
  * fast normal cases (no error jumps taken normally).
@@ -55,7 +60,13 @@
 /*
  * Temporary debugging check.
  */
-#define BAD_RANGE(zone,x) (((zone) != (x)->zone) || (((x)-mem_map) < (zone)->zone_start_mapnr) || (((x)-mem_map) >= (zone)->zone_start_mapnr+(zone)->size))
+#define BAD_RANGE(zone, page)						\
+(									\
+		(zone) != PageZone(page)				\
+	||	(((page) - mem_map) < (zone)->zone_start_mapnr)		\
+	||	(((page) - mem_map) >= ((zone)->zone_start_mapnr	\
+					+ (zone)->size))		\
+)
 
 /*
  * Buddy system. Hairy. You really aren't expected to understand this
@@ -90,9 +101,9 @@
 	if (page->pte_chain)
 		BUG();
 	page->flags &= ~((1<<PG_referenced) | (1<<PG_dirty));
-	page->age = PAGE_AGE_START;
+	SetPageAge(page, PAGE_AGE_START);
 	
-	zone = page->zone;
+	zone = PageZone(page);
 
 	mask = (~0UL) << order;
 	base = zone->zone_mem_map;
@@ -796,6 +807,7 @@
 	for (i = 0; i < MAX_NR_ZONES; i++) {
 		unsigned long size = zones_size[i];
 		totalpages += size;
+		zone_table[i] = pgdat->node_zones + i;
 	}
 	realtotalpages = totalpages;
 	if (zholes_size)
@@ -894,9 +906,11 @@
 
 		for (i = 0; i < size; i++) {
 			struct page *page = mem_map + offset + i;
-			page->zone = zone;
+			SetPageZone(page, j);
+#ifdef CONFIG_HIGHMEM
 			if (j != ZONE_HIGHMEM)
 				page->virtual = __va(zone_start_paddr);
+#endif
 			zone_start_paddr += PAGE_SIZE;
 		}
 
diff -urN linux-2.4.17-rmap-virgin/mm/swap.c linux-2.4.17-rmap/mm/swap.c
--- linux-2.4.17-rmap-virgin/mm/swap.c	Thu Dec  6 11:31:05 2001
+++ linux-2.4.17-rmap/mm/swap.c	Wed Dec  5 22:21:51 2001
@@ -14,6 +14,7 @@
  */
 
 #include <linux/mm.h>
+#include <linux/mmzone.h>
 #include <linux/kernel_stat.h>
 #include <linux/swap.h>
 #include <linux/swapctl.h>
@@ -67,7 +68,7 @@
 	 */
 	ClearPageReferenced(page);
 	if (PageActive(page)) {
-		page->age = 0;
+		SetPageAge(page, 0);
 		del_page_from_active_list(page);
 		add_page_to_inactive_dirty_list(page);
 	}
@@ -96,7 +97,7 @@
 	}
 
 	/* Make sure the page gets a fair chance at staying active. */
-	page->age = max((int)page->age, PAGE_AGE_START);
+	SetPageAge(page, max((int)PageAge(page), PAGE_AGE_START));
 }
 
 void FASTCALL(activate_page(struct page *));
diff -urN linux-2.4.17-rmap-virgin/mm/vmscan.c linux-2.4.17-rmap/mm/vmscan.c
--- linux-2.4.17-rmap-virgin/mm/vmscan.c	Thu Dec  6 11:31:05 2001
+++ linux-2.4.17-rmap/mm/vmscan.c	Wed Dec  5 22:24:38 2001
@@ -36,12 +36,12 @@
 
 static inline void age_page_up(struct page *page)
 {
-	page->age = min((int) (page->age + PAGE_AGE_ADV), PAGE_AGE_MAX); 
+	SetPageAge(page, min((int)(PageAge(page)+PAGE_AGE_ADV), PAGE_AGE_MAX));
 }
 
 static inline void age_page_down(struct page *page)
 {
-	page->age -= min(PAGE_AGE_DECL, (int)page->age);
+	SetPageAge(page, PageAge(page) - min(PAGE_AGE_DECL,(int)PageAge(page)));
 }
 
 /*
@@ -133,7 +133,7 @@
 		if (unlikely(!PageInactiveClean(page))) {
 			printk("VM: reclaim_page, wrong page on list.\n");
 			list_del(page_lru);
-			page->zone->inactive_clean_pages--;
+			PageZone(page)->inactive_clean_pages--;
 			continue;
 		}
 
@@ -182,7 +182,7 @@
 	if (entry.val)
 		swap_free(entry);
 	UnlockPage(page);
-	page->age = PAGE_AGE_START;
+	SetPageAge(page, PAGE_AGE_START);
 	if (page_count(page) != 1)
 		printk("VM: reclaim_page, found page with count %d!\n",
 				page_count(page));
@@ -253,7 +253,7 @@
 			printk("VM: page_launder, wrong page on list.\n");
 			list_del(entry);
 			nr_inactive_dirty_pages--;
-			page->zone->inactive_dirty_pages--;
+			PageZone(page)->inactive_dirty_pages--;
 			continue;
 		}
 
@@ -261,11 +261,11 @@
 		 * The page is in active use or really unfreeable. Move to
 		 * the active list and adjust the page age if needed.
 		 */
-		if ((PageReferenced(page) || page->age) &&
+		if ((PageReferenced(page) || PageAge(page)) &&
 				page_mapping_inuse(page)) {
 			del_page_from_inactive_dirty_list(page);
 			add_page_to_active_list(page);
-			page->age = max((int)page->age, PAGE_AGE_START);
+			SetPageAge(page,max((int)PageAge(page),PAGE_AGE_START));
 			continue;
 		}
 
@@ -290,7 +290,7 @@
 		 * on cleaning it but only move clean pages out of the way
 		 * so we won't have to scan those again.
 		 */
-		if (zone_free_plenty(page->zone) || page_count(page) == 0) {
+		if (zone_free_plenty(PageZone(page)) || page_count(page) == 0) {
 			continue;
 		}
 
@@ -501,8 +501,8 @@
 		 * Don't deactivate pages from zones which have
 		 * plenty inactive pages.
 		 */
-		if (unlikely(zone_inactive_plenty(page->zone) &&
-				zone_free_plenty(page->zone))) {
+		if (unlikely(zone_inactive_plenty(PageZone(page)) &&
+				zone_free_plenty(PageZone(page)))) {
 			goto skip_page;
 		}
 
@@ -511,7 +511,7 @@
 		 * is in is still in use, we keep the page. Otherwise
 		 * we move it to the inactive_dirty list.
 		 */
-		if (page->age && page_mapping_inuse(page)) {
+		if (PageAge(page) && page_mapping_inuse(page)) {
 skip_page:
 			list_del(page_lru);
 			list_add(page_lru, &active_list);