diff -puN include/linux/sched.h~as-handle-exitted-tasks include/linux/sched.h
--- 25/include/linux/sched.h~as-handle-exitted-tasks	Mon Feb 24 15:10:37 2003
+++ 25-akpm/include/linux/sched.h	Mon Feb 24 15:10:37 2003
@@ -315,6 +315,8 @@ struct k_itimer {
 };
 
 
+struct as_io_context;			/* Anticipatory scheduler */
+void exit_as_io_context(struct as_io_context *);
 
 struct task_struct {
 	volatile long state;	/* -1 unrunnable, 0 runnable, >0 stopped */
@@ -438,6 +440,8 @@ struct task_struct {
 	struct dentry *proc_dentry;
 	struct backing_dev_info *backing_dev_info;
 
+	struct as_io_context *as_io_context;
+
 	unsigned long ptrace_message;
 	siginfo_t *last_siginfo; /* For ptrace use.  */
 };
diff -puN kernel/exit.c~as-handle-exitted-tasks kernel/exit.c
--- 25/kernel/exit.c~as-handle-exitted-tasks	Mon Feb 24 15:10:37 2003
+++ 25-akpm/kernel/exit.c	Mon Feb 24 15:10:37 2003
@@ -694,6 +694,8 @@ NORET_TYPE void do_exit(long code)
 		panic("Attempted to kill the idle task!");
 	if (unlikely(tsk->pid == 1))
 		panic("Attempted to kill init!");
+	if (tsk->as_io_context)
+		exit_as_io_context(tsk->as_io_context);
 	tsk->flags |= PF_EXITING;
 	del_timer_sync(&tsk->real_timer);
 
diff -puN kernel/fork.c~as-handle-exitted-tasks kernel/fork.c
--- 25/kernel/fork.c~as-handle-exitted-tasks	Mon Feb 24 15:10:37 2003
+++ 25-akpm/kernel/fork.c	Mon Feb 24 15:10:37 2003
@@ -850,6 +850,7 @@ static struct task_struct *copy_process(
 	p->lock_depth = -1;		/* -1 = no lock */
 	p->start_time = get_jiffies_64();
 	p->security = NULL;
+	p->as_io_context = NULL;
 
 	retval = -ENOMEM;
 	if (security_task_alloc(p))
--- linux-2.5/drivers/block/as-iosched.c.orig	2003-02-25 20:02:37.000000000 +1100
+++ linux-2.5/drivers/block/as-iosched.c	2003-02-25 21:03:18.000000000 +1100
@@ -32,10 +32,11 @@ struct ant_stats {
 	int expired_fifo_reads;
 	int expired_fifo_writes;
 	int close_requests;
 	int matching_ids;
 	int broken_by_write;
+	int exitted_tasks;
 
 	int ant_delay_hist[100];	/* milliseconds */
 
 	/*
 	 * This is a logarithmic (base 2) histogram
@@ -79,10 +80,30 @@ static unsigned long antic_expire = HZ /
 #define ANTIC_OFF	0	/* Not anticipating (normal operation)	*/
 #define ANTIC_WAIT	1	/* Currently anticipating a request	*/
 #define ANTIC_FINISHED	2	/* Anticipating but have found a candidate
 				   or timed out	*/
 
+
+
+/*
+ * This is the per-process anticipatory I/O scheduler state.  It is refcounted
+ * and kmalloc'ed.
+ *
+ * At present it is merely used to determine whether the task is still running.
+ */
+
+struct as_io_context {
+	atomic_t refcount;
+	pid_t pid;
+	unsigned long state;
+};
+
+/* Bits in as_io_context.state */
+enum as_io_states {
+	AS_IO_RUNNING=0,	/* Process has not exitted */
+};
+
 struct as_data {
 	/*
 	 * run time data
 	 */
 
@@ -105,11 +126,11 @@ struct as_data {
 
 	int antic_status;
 	unsigned long antic_start;	/* jiffies: when it started */
 	struct timer_list antic_timer;	/* anticipatory scheduling timer */
 	struct work_struct antic_work;	/* anticipatory scheduling work */
-	unsigned long current_id;	/* Identify the expected process */
+	struct as_io_context *as_io_context;/* Identify the expected process */
 
 	/*
 	 * settings that change how the i/o scheduler behaves
 	 */
 	unsigned long fifo_expire[2];
@@ -117,22 +138,22 @@ struct as_data {
 	unsigned long front_merges;
 	unsigned long antic_expire;
 };
 
 /*
- * pre-request data.
+ * per-request data.
  */
 struct as_rq {
 	/*
 	 * rbtree index, key is the starting offset
 	 */
 	struct rb_node rb_node;
 	sector_t rb_key;
 
 	struct request *request;
 
-	unsigned long request_id;
+	struct as_io_context *as_io_context;	/* The submitting task */
 
 	/*
 	 * request hash, key is the ending offset (for back merge lookup)
 	 */
 	struct list_head hash;
@@ -147,13 +168,75 @@ struct as_rq {
 
 #define RQ_DATA(rq)	((struct as_rq *) (rq)->elevator_private)
 
 static kmem_cache_t *arq_pool;
 
-static inline unsigned long request_id(void)
+/* 
+ * IO Context helper functions
+ */
+/* Debug */
+static atomic_t nr_as_io_requests = ATOMIC_INIT(0);
+
+static void put_as_io_context(struct as_io_context **paic)
+{
+	struct as_io_context *aic = *paic;
+
+	if (aic == NULL)
+		return;
+
+	BUG_ON(atomic_read(&aic->refcount) == 0);
+	if (atomic_dec_and_test(&aic->refcount)) {
+		atomic_dec(&nr_as_io_requests);
+		printk("kfreeing %p\n", aic);
+		kfree(aic);
+		*paic = NULL;
+	}
+}
+
+/* Called by the exitting task */
+void exit_as_io_context(struct as_io_context *aic)
+{
+	clear_bit(AS_IO_RUNNING, &aic->state);
+	put_as_io_context(&aic);
+}
+
+/*
+ * Called from process context, by the task which is submitting I/O.  If the
+ * task has no IO context then create one and initialise it.  If it does have
+ * a context, take a ref on it.
+ */
+static struct as_io_context *get_as_io_context(void)
 {
-	return (unsigned long)current->pid;
+	struct task_struct *tsk = current;
+	struct as_io_context *ret = tsk->as_io_context;
+
+	if (ret == NULL) {
+		ret = kmalloc(sizeof(*ret), GFP_ATOMIC);
+		if (ret) {
+			printk("kmalloc returned %p\n", ret);
+			atomic_inc(&nr_as_io_requests);
+			atomic_set(&ret->refcount, 1);
+			ret->pid = tsk->pid;
+			ret->state = 1 << AS_IO_RUNNING;
+			tsk->as_io_context = ret;
+		}
+	}
+	atomic_inc(&ret->refcount);
+	return ret;
+}
+
+static void
+copy_as_io_context(struct as_io_context **pdst, struct as_io_context **psrc)
+{
+	struct as_io_context *src = *psrc;
+
+	if (src) {
+		BUG_ON(atomic_read(&src->refcount) == 0);
+		atomic_inc(&src->refcount);
+		put_as_io_context(pdst);
+		*pdst = src;
+	}
 }
 
 /*
  * the back merge hash support functions
  */
@@ -328,11 +411,12 @@ static void as_update_arq(struct as_data
  */
 static void as_add_request(struct as_data *ad, struct as_rq *arq)
 {
 	const int data_dir = rq_data_dir(arq->request);
 
-	arq->request_id = request_id();
+	put_as_io_context(&arq->as_io_context);
+	arq->as_io_context = get_as_io_context();
 
 	as_add_arq_rb(ad, arq);
 
 	as_update_arq(ad, arq); /* keep state machine up to date */
 
@@ -501,18 +585,20 @@ as_merged_requests(request_queue_t *q, s
 	 */
 	if (!list_empty(&arq->fifo) && !list_empty(&anext->fifo)) {
 		if (time_before(anext->expires, arq->expires)) {
 			list_move(&arq->fifo, &anext->fifo);
 			arq->expires = anext->expires;
-			arq->request_id = anext->request_id;
+			copy_as_io_context(&arq->as_io_context,
+					&anext->as_io_context);
 		}
 	}
 
 	/*
 	 * kill knowledge of next, this one is a goner
 	 */
 	as_remove_request(q, next);
+	put_as_io_context(&arq->as_io_context);
 }
 
 /*
  * move an entry to dispatch queue
  */
@@ -525,17 +611,18 @@ static void as_move_to_dispatch(struct a
 	ad->last_sector[data_dir] = arq->request->sector
 					+ arq->request->nr_sectors;
 
 	if (data_dir == READ)
 		/* In case we have to anticipate after this */
-		ad->current_id = arq->request_id;
+		copy_as_io_context(&ad->as_io_context, &arq->as_io_context);
 	
 	/*
 	 * take it off the sort and fifo list, move
 	 * to dispatch queue
 	 */
 	as_remove_request(ad->q, arq->request);
+	put_as_io_context(&arq->as_io_context);
 	list_add_tail(&arq->request->queuelist, ad->dispatch);
 }
 
 #define list_entry_fifo(ptr)	list_entry((ptr), struct as_rq, fifo)
 
@@ -592,11 +679,10 @@ static inline int as_batch_expired(struc
 
 /*
  * anticipatory scheduling functions follow
  */
 
-
 static int as_queue_empty(request_queue_t *q);
 
 /*
  * as_anticipate_work is scheduled by as_anticipate_timeout. It
  * stops anticipation, ie. resumes dispatching requests to a device.
@@ -694,23 +780,35 @@ as_close_req(struct as_data *ad, struct 
  *
  * It also returns true if the process against which we are anticipating
  * submits a write - that's presumably an fsync, O_SYNC write, etc. We want to
  * dispatch it ASAP, because we know that application will not be submitting
  * any new reads.
+ *
+ * If the task whcih has submitted the request has exitted, break anticipation.
  */
 static int as_can_break_anticipation(struct as_data *ad, struct as_rq *arq)
 {
+	struct as_io_context *aic;
+	
 	if (rq_data_dir(arq->request) == READ && as_close_req(ad, arq)) {
 		ant_stats.close_requests++;
 		return 1;
 	}
-	if (ad->current_id == arq->request_id) {
+	
+	if (ad->as_io_context == arq->as_io_context) {
 		ant_stats.matching_ids++;
 		if (rq_data_dir(arq->request) == WRITE)
 			ant_stats.broken_by_write++;
 		return 1;
 	}
+
+	aic = ad->as_io_context;
+	if (aic && !test_bit(AS_IO_RUNNING, &aic->state)) {
+		ant_stats.exitted_tasks++;
+		return 1;
+	}
+	
 	return 0;
 }
 
 /*
  * as_update_arq must be called whenever a request (arq) is added to
@@ -837,20 +935,20 @@ as_choose_req(struct as_data *ad, struct
 	 * from the same process!
 	 */
 	if (s1 >= last)
 		d1 = s1 - last;
 	else if (data_dir == READ
-			&& ad->current_id == arq1->request_id
+			&& ad->as_io_context == arq1->as_io_context
 			&& s1+maxback >= last)
 				d1 = (last - s1)*2;
 	else
 		goto elevator_wrap;
 
 	if (s2 >= last)
 		d2 = s2 - last;
 	else if (data_dir == READ
-			&& ad->current_id == arq2->request_id
+			&& ad->as_io_context == arq2->as_io_context
 			&& s2+maxback >= last)
 				d2 = (last - s2)*2;
 	else
 		goto elevator_wrap;
 
@@ -1122,15 +1220,17 @@ static void as_exit(request_queue_t *q, 
 			rq = list_entry_rq(entry);
 
 			if ((arq = RQ_DATA(rq)) == NULL)
 				continue;
 
+			put_as_io_context(&arq->as_io_context);
 			rq->elevator_private = NULL;
 			kmem_cache_free(arq_pool, arq);
 		}
 	}
 
+	put_as_io_context(&ad->as_io_context);
 	kfree(ad->hash);
 	kfree(ad);
 }
 
 /*