From: Nick Piggin <piggin@cyberone.com.au>

The batch_requests function got lost during the merge of the dynamic request
allocation patch. 

We need it for the anticipatory scheduler - when the number of threads
exceeds the number of requests, the anticipated-upon task will undesirably
sleep in get_request_wait().

And apparently some block devices which use small requests need it so they
string a decent number together.

Jens has acked this patch.


 drivers/block/ll_rw_blk.c |   34 ++++++++++++++++++++++++++++++----
 include/linux/blkdev.h    |    1 +
 2 files changed, 31 insertions(+), 4 deletions(-)

diff -puN drivers/block/ll_rw_blk.c~resurrect-batch_requests drivers/block/ll_rw_blk.c
--- 25/drivers/block/ll_rw_blk.c~resurrect-batch_requests	2003-06-14 14:52:10.000000000 -0700
+++ 25-akpm/drivers/block/ll_rw_blk.c	2003-06-14 14:52:12.000000000 -0700
@@ -53,6 +53,11 @@ int blk_nohighio = 0;
 
 static wait_queue_head_t congestion_wqh[2];
 
+static inline int batch_requests(void)
+{
+	return min(BLKDEV_MAX_RQ / 8, 8);
+}
+
 /*
  * Return the threshold (number of free requests) at which the queue is
  * considered to be congested.  It include a little hysteresis to keep the
@@ -1182,6 +1187,8 @@ static int blk_init_free_list(request_qu
 	struct request_list *rl = &q->rq;
 
 	rl->count[READ] = rl->count[WRITE] = 0;
+	init_waitqueue_head(&rl->wait[READ]);
+	init_waitqueue_head(&rl->wait[WRITE]);
 
 	rl->rq_pool = mempool_create(BLKDEV_MIN_RQ, mempool_alloc_slab, mempool_free_slab, request_cachep);
 
@@ -1327,18 +1334,33 @@ out:
 }
 
 /*
- * No available requests for this queue, unplug the device.
+ * No available requests for this queue, unplug the device and wait for some
+ * requests to become available.
  */
 static struct request *get_request_wait(request_queue_t *q, int rw)
 {
+	DEFINE_WAIT(wait);
 	struct request *rq;
 
 	generic_unplug_device(q);
 	do {
 		rq = get_request(q, rw, GFP_NOIO);
 
-		if (!rq)
-			blk_congestion_wait(rw, HZ / 50);
+		if (!rq) {
+			struct request_list *rl = &q->rq;
+
+			prepare_to_wait_exclusive(&rl->wait[rw], &wait,
+						TASK_UNINTERRUPTIBLE);
+			/*
+			 * If _all_ the requests were suddenly returned then
+			 * no wakeup will be delivered.  So now we're on the
+			 * waitqueue, go check for that.
+			 */
+			rq = get_request(q, rw, GFP_ATOMIC & ~__GFP_HIGH);
+			if (!rq)
+				io_schedule();
+			finish_wait(&rl->wait[rw], &wait);
+		}
 	} while (!rq);
 
 	return rq;
@@ -1500,8 +1522,12 @@ void __blk_put_request(request_queue_t *
 		blk_free_request(q, req);
 
 		rl->count[rw]--;
-		if ((BLKDEV_MAX_RQ - rl->count[rw]) >= queue_congestion_off_threshold())
+		if ((BLKDEV_MAX_RQ - rl->count[rw]) >=
+				queue_congestion_off_threshold())
 			clear_queue_congested(q, rw);
+		if ((BLKDEV_MAX_RQ - rl->count[rw]) >= batch_requests() &&
+				waitqueue_active(&rl->wait[rw]))
+			wake_up(&rl->wait[rw]);
 	}
 }
 
diff -puN include/linux/blkdev.h~resurrect-batch_requests include/linux/blkdev.h
--- 25/include/linux/blkdev.h~resurrect-batch_requests	2003-06-14 14:52:10.000000000 -0700
+++ 25-akpm/include/linux/blkdev.h	2003-06-14 14:52:12.000000000 -0700
@@ -27,6 +27,7 @@ struct request_pm_state;
 struct request_list {
 	int count[2];
 	mempool_t *rq_pool;
+	wait_queue_head_t wait[2];
 };
 
 /*

_