Patch from Nick Piggin <piggin@cyberone.com.au>




 drivers/block/as-iosched.c |   32 +++++++++++++++++++-------------
 1 files changed, 19 insertions(+), 13 deletions(-)

diff -puN drivers/block/as-iosched.c~as-np-reads-1 drivers/block/as-iosched.c
--- 25/drivers/block/as-iosched.c~as-np-reads-1	2003-03-18 01:43:48.000000000 -0800
+++ 25-akpm/drivers/block/as-iosched.c	2003-03-18 01:43:51.000000000 -0800
@@ -38,6 +38,7 @@ struct ant_stats {
 	int broken_by_write;
 	int exitted_tasks;
 	int queued_request;
+	int dispatched_request;
 	int big_thinktime;
 
 	int ant_delay_hist[100];	/* milliseconds */
@@ -686,7 +687,7 @@ static int as_close_req(struct as_data *
 	sector_t next = arq->request->sector;
 	sector_t delta;	/* acceptable close offset (in sectors) */
 
-	if (ad->antic_status == ANTIC_OFF || ad->antic_status == ANTIC_WAIT_REQ)
+	if (ad->antic_status == ANTIC_OFF || !ad->aic_finished)
 		delay = 0;
 	else
 		delay = ((jiffies - ad->antic_start) * 1000) / HZ;
@@ -718,27 +719,27 @@ static int as_can_break_anticipation(str
 {
 	struct as_io_context *aic;
 	
-	if (ad->antic_status == ANTIC_WAIT_NEXT && as_antic_expired(ad)) {
+	if (rq_data_dir(arq->request) == READ && as_close_req(ad, arq)) {
+		ant_stats.close_requests++;
+		return 1;
+	}
+	
+	if (ad->aic_finished && as_antic_expired(ad)) {
 		/*
 		 * In this situation status should really be FINISHED,
 		 * however the timer hasn't had the chance to run yet.
 		 */
+		ant_stats.timeouts++;
 		return 1;
 	}
 
-	if (rq_data_dir(arq->request) == READ && as_close_req(ad, arq)) {
-		ant_stats.close_requests++;
-		return 1;
-	}
-	
-	if (ad->as_io_context == arq->as_io_context) {
+	aic = ad->as_io_context;
+	if (aic == arq->as_io_context) {
 		ant_stats.matching_ids++;
 		if (rq_data_dir(arq->request) == WRITE)
 			ant_stats.broken_by_write++;
 		return 1;
 	}
-
-	aic = ad->as_io_context;
 	if (aic && !test_bit(AS_TASK_RUNNING, &aic->state)) {
 		ant_stats.exitted_tasks++;
 		return 1;
@@ -749,6 +750,11 @@ static int as_can_break_anticipation(str
 		return 1;
 	}
 
+	if (aic && atomic_read(&aic->nr_dispatched) > 0) {
+		ant_stats.dispatched_request++;
+		return 1;
+	}
+
 	if (aic && aic->mean_thinktime > max(HZ/200, 1)) {
 		ant_stats.big_thinktime++;
 		return 1;
@@ -975,7 +981,7 @@ static void as_remove_dispatched_request
 		aic = arq->as_io_context;
 		if (aic) {
 			WARN_ON(!atomic_read(&aic->nr_dispatched));
-			atomic_dec(&arq->as_io_context->nr_dispatched);
+			atomic_dec(&aic->nr_dispatched);
 		}
 	}
 }
@@ -1064,9 +1070,9 @@ static void as_move_to_dispatch(struct a
 	if (data_dir == READ) {
 		/* In case we have to anticipate after this */
 		copy_as_io_context(&ad->as_io_context, &arq->as_io_context);
-		ad->aic_finished = 0;
 	} else
 		put_as_io_context(&ad->as_io_context);
+	ad->aic_finished = 0;
 
 	ad->next_arq[data_dir] = as_find_next_arq(ad, arq);
 
@@ -1212,9 +1218,9 @@ static void as_add_request(struct as_dat
 	const int data_dir = rq_data_dir(arq->request);
 
 	arq->as_io_context = get_as_io_context();
+
 	if (arq->as_io_context) {
 		atomic_inc(&arq->as_io_context->nr_queued);
-
 		if (data_dir == READ)
 			as_update_iohist(arq->as_io_context);
 	}

_