Patch from Nick Piggin <piggin@cyberone.com.au>

* cosmetic fixes from debug patch

* no longer hold ad->as_io_context over a write batch, just so we can't
  get confused (not that I think it ever did). search for put_as_io_context.

* in get rid of queue_empty and call queue_notready in as_antic_work.
  queue_empty does not have a well defined meaning, especially from
  q->request_fn. search for as_queue_empty.

* tighten up a few conditions in as_can_anticipate

* the big fix is that when a request has finished, we set its as_io_context
  status to AS_REQ_FINISHED so that if we end up anticipating on it, so if it
  is set we can go straight to ANTIC_WAIT_NEXT.  Unfortunately as_io_context
  is not per-request, and is cleared if the same process submits another IO. 
  Add a flag to keep track of that instead of AS_REQ_FINISHED.



 drivers/block/as-iosched.c |   88 +++++++++++++++++++++++----------------------
 1 files changed, 45 insertions(+), 43 deletions(-)

diff -puN drivers/block/as-iosched.c~as-jumbo-fix drivers/block/as-iosched.c
--- 25/drivers/block/as-iosched.c~as-jumbo-fix	2003-03-15 21:36:43.000000000 -0800
+++ 25-akpm/drivers/block/as-iosched.c	2003-03-15 22:57:06.000000000 -0800
@@ -114,7 +114,6 @@ struct as_io_context {
 enum as_io_states {
 	AS_TASK_RUNNING=0,	/* Process has not exitted */
 	AS_TASK_IORUNNING,	/* Process has completed some IO */
-	AS_REQ_FINISHED,	/* Set in ad->as_io_context upon completion */
 };
 
 struct as_data {
@@ -144,6 +143,7 @@ struct as_data {
 	struct timer_list antic_timer;	/* anticipatory scheduling timer */
 	struct work_struct antic_work;	/* anticipatory scheduling work */
 	struct as_io_context *as_io_context;/* Identify the expected process */
+	int aic_finished; /* IO associated with as_io_context finished */
 
 	/*
 	 * settings that change how the i/o scheduler behaves
@@ -527,13 +527,13 @@ static void as_complete_arq(struct as_da
 		return;
 
 	if (rq_data_dir(arq->request) == READ) {
-		set_bit(AS_REQ_FINISHED, &arq->as_io_context->state);
 		set_bit(AS_TASK_IORUNNING, &arq->as_io_context->state);
 		arq->as_io_context->last_end_request = jiffies;
 	}
 
 	if (ad->as_io_context == arq->as_io_context) {
 		ad->antic_start = jiffies;
+		ad->aic_finished = 1;
 		if (ad->antic_status == ANTIC_WAIT_REQ) {
 			/*
 			 * We were waiting on this request, now anticipate
@@ -557,7 +557,6 @@ static void as_add_request(struct as_dat
 	arq->as_io_context = get_as_io_context();
 	if (arq->as_io_context) {
 		atomic_inc(&arq->as_io_context->nr_queued);
-		clear_bit(AS_REQ_FINISHED, &arq->as_io_context->state);
 
 		if (data_dir == READ)
 			as_update_iohist(arq->as_io_context);
@@ -796,7 +795,9 @@ static void as_move_to_dispatch(struct a
 	if (data_dir == READ) {
 		/* In case we have to anticipate after this */
 		copy_as_io_context(&ad->as_io_context, &arq->as_io_context);
-	}
+		ad->aic_finished = 0;
+	} else
+		put_as_io_context(&ad->as_io_context);
 
 	ad->next_arq[data_dir] = as_find_next_arq(ad, arq);
 
@@ -869,7 +870,7 @@ static inline int as_batch_expired(struc
  * anticipatory scheduling functions follow
  */
 
-static int as_queue_empty(request_queue_t *q);
+static int as_queue_notready(request_queue_t *q);
 
 /*
  * as_antic_work is scheduled by as_antic_timeout. It
@@ -881,7 +882,7 @@ static void as_antic_work(void *data)
 	unsigned long flags;
 
 	spin_lock_irqsave(q->queue_lock, flags);
-	if (!as_queue_empty(q))
+	if (!as_queue_notready(q))
 		q->request_fn(q);
 	spin_unlock_irqrestore(q->queue_lock, flags);
 }
@@ -892,10 +893,9 @@ static void as_antic_waitreq(struct as_d
 	if (ad->antic_status == ANTIC_OFF) {
 		ant_stats.anticipate_starts++;
 
-		if (!ad->as_io_context || test_bit(AS_REQ_FINISHED,
-						&ad->as_io_context->state))
+		if (!ad->as_io_context || ad->aic_finished)
 			as_antic_waitnext(ad);
-		else
+		else 
 			ad->antic_status = ANTIC_WAIT_REQ;
 	}
 }
@@ -992,6 +992,14 @@ static int as_can_break_anticipation(str
 {
 	struct as_io_context *aic;
 	
+	if (ad->antic_status == ANTIC_WAIT_NEXT && as_antic_expired(ad)) {
+		/*
+		 * In this situation status should really be FINISHED,
+		 * however the timer hasn't had the chance to run yet.
+		 */
+		return 1;
+	}
+
 	if (rq_data_dir(arq->request) == READ && as_close_req(ad, arq)) {
 		ant_stats.close_requests++;
 		return 1;
@@ -1088,20 +1096,20 @@ static void as_update_arq(struct as_data
  */
 static int as_can_anticipate(struct as_data *ad, struct as_rq *arq)
 {
-	if (ad->antic_status == ANTIC_FINISHED)
+	BUG_ON(ad->antic_status == ANTIC_WAIT_REQ ||
+		ad->antic_status == ANTIC_WAIT_NEXT);
+
+	if (!ad->as_io_context)
 		/*
-		 * Don't restart if we have just finished. Run the next request
+		 * Last request submitted was a write
 		 */
 		return 0;
 
-	if (ad->antic_status == ANTIC_WAIT_NEXT && as_antic_expired(ad)) {
+	if (ad->antic_status == ANTIC_FINISHED)
 		/*
-		 * In this situation status should really be FINISHED,
-		 * however the timer hasn't had the chance to run yet.
+		 * Don't restart if we have just finished. Run the next request
 		 */
-		as_antic_stop(ad);
 		return 0;
-	}
 
 	if (arq && as_can_break_anticipation(ad, arq))
 		/*
@@ -1111,11 +1119,8 @@ static int as_can_anticipate(struct as_d
 		return 0;
 
 	/*
-	 * OK from here, we haven't finished, haven't timed out, and don't
-	 * have a decent request!
-	 * Status can be: ANTIC_OFF so start waiting,
-	 * ANTIC_WAIT_REQ so continue to wait for request to complete,
-	 * ANTIC_WAIT_NEXT so continue to wait for timeout or suitable request.
+	 * OK from here, we haven't finished, and don't have a decent request!
+	 * Status is ANTIC_OFF so start waiting.
 	 */
 
 	return 1;
@@ -1321,6 +1326,7 @@ static struct request *as_next_request(r
 	 */
 	if (!list_empty(ad->dispatch) || as_dispatch_request(ad))
 		rq = list_entry_rq(ad->dispatch->next);
+
 	return rq;
 }
 
@@ -1363,18 +1369,6 @@ as_insert_request(request_queue_t *q, st
 	as_add_request(ad, arq);
 }
 
-static int as_queue_empty(request_queue_t *q)
-{
-	struct as_data *ad = q->elevator.elevator_data;
-
-	if (!list_empty(&ad->fifo_list[WRITE])
-		|| !list_empty(&ad->fifo_list[READ])
-		|| !list_empty(ad->dispatch) )
-			return 0;
-
-	return 1;
-}
-
 /*
  * as_queue_notready tells us weather or not as_next_request
  * will return us a request or NULL. With the previous work conserving
@@ -1384,18 +1378,24 @@ static int as_queue_empty(request_queue_
  */
 static int as_queue_notready(request_queue_t *q)
 {
+	int ret = 0;
 	struct as_data *ad = q->elevator.elevator_data;
 
 	if (!list_empty(ad->dispatch))
-		return 0;
+		goto out;
 	
-	if (ad->antic_status == ANTIC_WAIT_REQ || ad->antic_status == ANTIC_WAIT_NEXT)
-		return 1;
+	if (ad->antic_status == ANTIC_WAIT_REQ || ad->antic_status == ANTIC_WAIT_NEXT) {
+		ret = 1;
+		goto out;
+	}
 				
-	if (!as_dispatch_request(ad))
-		return 1;
+	if (!as_dispatch_request(ad)) {
+		ret = 1;
+		goto out;
+	}
 
-	return 0;
+out:
+	return ret;
 }
 
 static struct request *
@@ -1403,11 +1403,12 @@ as_former_request(request_queue_t *q, st
 {
 	struct as_rq *arq = RQ_DATA(rq);
 	struct rb_node *rbprev = rb_prev(&arq->rb_node);
+	struct request *ret = NULL;
 
 	if (rbprev)
-		return rb_entry_arq(rbprev)->request;
+		ret = rb_entry_arq(rbprev)->request;
 
-	return NULL;
+	return ret;
 }
 
 static struct request *
@@ -1415,11 +1416,12 @@ as_latter_request(request_queue_t *q, st
 {
 	struct as_rq *arq = RQ_DATA(rq);
 	struct rb_node *rbnext = rb_next(&arq->rb_node);
+	struct request *ret = NULL;
 
 	if (rbnext)
-		return rb_entry_arq(rbnext)->request;
+		ret = rb_entry_arq(rbnext)->request;
 
-	return NULL;
+	return ret;
 }
 
 static void as_exit(request_queue_t *q, elevator_t *e)

_