From: Nick Piggin <piggin@cyberone.com.au>

This is hopefully the last big thing AS needs.  It reintroduces
write_batch_expire to be a time value, and adjusts the number of requests
allowed in a write batch based on this target time.




 25-akpm/drivers/block/as-iosched.c |   73 ++++++++++++++++++++++++++++++-------
 1 files changed, 60 insertions(+), 13 deletions(-)

diff -puN drivers/block/as-iosched.c~as-autotune-write-batches drivers/block/as-iosched.c
--- 25/drivers/block/as-iosched.c~as-autotune-write-batches	Thu Jun  5 14:07:49 2003
+++ 25-akpm/drivers/block/as-iosched.c	Thu Jun  5 14:07:49 2003
@@ -47,10 +47,12 @@ static unsigned long write_expire = HZ /
 static unsigned long read_batch_expire = HZ / 5;
 
 /*
- * write_batch_expire describes how many write request we allow before looking
- * to see whether it is time to switch over to reads.
+ * write_batch_expire describes how long we want a stream of writes to run for.
+ * This is not a hard limit, but a target we set for the auto-tuning thingy.
+ * See, the problem is: we can send a lot of writes to disk cache / TCQ in
+ * a short amount of time...
  */
-static unsigned long write_batch_expire = 5;
+static unsigned long write_batch_expire = HZ / 20;
 
 /*
  * max time we may wait to anticipate a read (default around 6ms)
@@ -131,6 +133,9 @@ struct as_data {
 	unsigned long last_check_fifo[2];
 	int changed_batch;
 	int batch_data_dir;		/* current batch REQ_SYNC / REQ_ASYNC */
+	int write_batch_count;		/* max # of reqs in a write batch */
+	int current_write_count;	/* how many requests left this batch */
+	int write_batch_idled;		/* has the write batch gone idle? */
 	mempool_t *arq_pool;
 
 	enum anticipation_status antic_status;
@@ -913,6 +918,35 @@ static void as_update_arq(struct as_data
 }
 
 /*
+ * Gathers timings and resizes the write batch automatically
+ */
+void update_write_batch(struct as_data *ad)
+{
+	unsigned long batch = ad->batch_expire[REQ_ASYNC];
+	long write_time;
+
+	write_time = (jiffies - ad->current_batch_expires) + batch;
+	if (write_time < 0)
+		write_time = 0;
+
+	if (write_time > batch + 5 && !ad->write_batch_idled) {
+		if (write_time / batch > 2)
+			ad->write_batch_count /= 2;
+		else
+			ad->write_batch_count--;
+		
+	} else if (write_time + 5 < batch && ad->current_write_count == 0) {
+		if (batch / write_time > 2)
+			ad->write_batch_count *= 2;
+		else
+			ad->write_batch_count++;
+	}
+
+	if (ad->write_batch_count < 1)
+		ad->write_batch_count = 1;
+}
+
+/*
  * as_completed_request is to be called when a request has completed and
  * returned something to the requesting process, be it an error or data.
  */
@@ -927,8 +961,7 @@ static void as_completed_request(request
 		return;
 	}
 
-	if (blk_fs_request(rq) && arq->state == AS_RQ_NEW)
-		printk(KERN_INFO "warning: as_completed_request got bad request\n");
+	WARN_ON(blk_fs_request(rq) && arq->state == AS_RQ_NEW);
 				
 	if (arq->state != AS_RQ_DISPATCHED)
 		return;
@@ -946,6 +979,7 @@ static void as_completed_request(request
 	 */
 	if (ad->batch_data_dir == REQ_SYNC && ad->changed_batch
 			&& ad->batch_data_dir == arq->is_sync) {
+		update_write_batch(ad);
 		ad->current_batch_expires = jiffies +
 				ad->batch_expire[REQ_SYNC];
 		ad->changed_batch = 0;
@@ -1090,10 +1124,11 @@ static inline int as_batch_expired(struc
 		return 0;
 
 	if (ad->batch_data_dir == REQ_SYNC)
-		return time_after(jiffies, ad->current_batch_expires) &&
-		 	time_after(jiffies, ad->fifo_expire[REQ_SYNC]);
+		/* TODO! add a check so a complete fifo gets written? */
+		return time_after(jiffies, ad->current_batch_expires);
 
-	return !ad->current_batch_expires;
+	return time_after(jiffies, ad->current_batch_expires)
+		|| ad->current_write_count == 0;
 }
 
 /*
@@ -1122,8 +1157,8 @@ static void as_move_to_dispatch(struct a
 		copy_as_io_context(&ad->as_io_context, &arq->as_io_context);
 	} else {
 		put_as_io_context(&ad->as_io_context);
-		if (ad->current_batch_expires)
-			ad->current_batch_expires--;
+		if (ad->current_write_count != 0)
+			ad->current_write_count--;
 	}
 	ad->aic_finished = 0;
 
@@ -1152,6 +1187,12 @@ static int as_dispatch_request(struct as
 	const int reads = !list_empty(&ad->fifo_list[REQ_SYNC]);
 	const int writes = !list_empty(&ad->fifo_list[REQ_ASYNC]);
 
+	/* Signal that the write batch was uncontended, so we can't time it */
+	if (ad->batch_data_dir == REQ_ASYNC && !reads) {
+		if (ad->current_write_count == 0 || !writes)
+			ad->write_batch_idled = 1;
+	}
+	
 	if (!(reads || writes)
 		|| ad->antic_status == ANTIC_WAIT_REQ
 		|| ad->antic_status == ANTIC_WAIT_NEXT
@@ -1216,7 +1257,8 @@ dispatch_writes:
  		if (ad->batch_data_dir == REQ_SYNC)
  			ad->changed_batch = 1;
 		ad->batch_data_dir = REQ_ASYNC;
-		ad->current_batch_expires = ad->batch_expire[REQ_ASYNC];
+		ad->current_write_count = ad->write_batch_count;
+		ad->write_batch_idled = 0;
 		arq = ad->next_arq[ad->batch_data_dir];
 		goto dispatch_request;
 	}
@@ -1238,9 +1280,11 @@ fifo_expired:
 	if (ad->changed_batch) {
 		if (ad->changed_batch == 1 && ad->nr_dispatched)
 			return 0;
-		if (ad->changed_batch == 1 && ad->batch_data_dir == REQ_ASYNC)
+		if (ad->batch_data_dir == REQ_ASYNC) {
+			ad->current_batch_expires = jiffies +
+					ad->batch_expire[REQ_ASYNC];
 			ad->changed_batch = 0;
-		else
+		} else
 			ad->changed_batch = 2;
 		arq->request->flags |= REQ_HARDBARRIER;
 	}
@@ -1635,6 +1679,9 @@ static int as_init(request_queue_t *q, e
 	e->elevator_data = ad;
 
 	ad->current_batch_expires = jiffies + ad->batch_expire[REQ_SYNC];
+	ad->write_batch_count = ad->batch_expire[REQ_ASYNC] / 10;
+	if (ad->write_batch_count < 2)
+		ad->write_batch_count = 2;
 	return 0;
 }
 

_