From: Nick Piggin <piggin@cyberone.com.au>

Now that we are counting requests (not requests free), this patch changes
the congested & batch watermarks to be more logical.  Also a minor fix to
the sysfs code.



 drivers/block/ll_rw_blk.c |   45 ++++++++++++++++++++++-----------------------
 1 files changed, 22 insertions(+), 23 deletions(-)

diff -puN drivers/block/ll_rw_blk.c~blk-invert-watermarks drivers/block/ll_rw_blk.c
--- 25/drivers/block/ll_rw_blk.c~blk-invert-watermarks	2003-06-11 22:36:15.000000000 -0700
+++ 25-akpm/drivers/block/ll_rw_blk.c	2003-06-11 22:36:15.000000000 -0700
@@ -54,21 +54,23 @@ static wait_queue_head_t congestion_wqh[
 
 static inline int batch_requests(struct request_queue *q)
 {
-	return min(q->nr_requests / 8, 8UL);
+	return q->nr_requests - min(q->nr_requests / 8, 8UL);
 }
 
 /*
- * Return the threshold (number of free requests) at which the queue is
+ * Return the threshold (number of used requests) at which the queue is
  * considered to be congested.  It include a little hysteresis to keep the
  * context switch rate down.
  */
 static inline int queue_congestion_on_threshold(struct request_queue *q)
 {
 	int ret;
+	
+	ret = q->nr_requests - (q->nr_requests / 8) + 1;
+	
+	if (ret > q->nr_requests)
+		ret = q->nr_requests;
 
-	ret = q->nr_requests / 8 - 1;
-	if (ret < 0)
-		ret = 1;
 	return ret;
 }
 
@@ -79,9 +81,11 @@ static inline int queue_congestion_off_t
 {
 	int ret;
 
-	ret = q->nr_requests / 8 + 1;
-	if (ret > q->nr_requests)
-		ret = q->nr_requests;
+	ret = q->nr_requests - (q->nr_requests / 8) - 1;
+
+	if (ret < 1)
+		ret = 1;
+
 	return ret;
 }
 
@@ -1326,7 +1330,7 @@ static struct request *get_request(reque
 		goto out;
 	}
 	rl->count[rw]++;
-	if ((q->nr_requests - rl->count[rw]) < queue_congestion_on_threshold(q))
+	if (rl->count[rw] >= queue_congestion_on_threshold(q))
 		set_queue_congested(q, rw);
 	spin_unlock_irq(q->queue_lock);
 
@@ -1334,7 +1338,7 @@ static struct request *get_request(reque
 	if (!rq) {
 		spin_lock_irq(q->queue_lock);
 		rl->count[rw]--;
-		if ((q->nr_requests - rl->count[rw]) >= queue_congestion_off_threshold(q))
+		if (rl->count[rw] < queue_congestion_off_threshold(q))
                         clear_queue_congested(q, rw);
 		spin_unlock_irq(q->queue_lock);
 		goto out;
@@ -1555,10 +1559,9 @@ void __blk_put_request(request_queue_t *
 		blk_free_request(q, req);
 
 		rl->count[rw]--;
-		if ((q->nr_requests - rl->count[rw]) >=
-				queue_congestion_off_threshold(q))
+		if (rl->count[rw] < queue_congestion_off_threshold(q))
 			clear_queue_congested(q, rw);
-		if ((q->nr_requests - rl->count[rw]) >= batch_requests(q) &&
+		if (rl->count[rw] < batch_requests(q) &&
 				waitqueue_active(&rl->wait[rw]))
 			wake_up(&rl->wait[rw]);
 	}
@@ -2412,19 +2415,15 @@ queue_requests_store(struct request_queu
 	if (q->nr_requests < BLKDEV_MIN_RQ)
 		q->nr_requests = BLKDEV_MIN_RQ;
 	
-	if ((q->nr_requests - rl->count[READ]) <
-				queue_congestion_on_threshold(q))
+	if (rl->count[READ] >= queue_congestion_on_threshold(q))
 		set_queue_congested(q, READ);
-	else if ((q->nr_requests - rl->count[READ]) >=
-				queue_congestion_off_threshold(q))
+	else if (rl->count[READ] < queue_congestion_off_threshold(q))
 		clear_queue_congested(q, READ);
 
-	if ((q->nr_requests - rl->count[READ]) <
-				queue_congestion_on_threshold(q))
-		set_queue_congested(q, READ);
-	else if ((q->nr_requests - rl->count[READ]) >=
-				queue_congestion_off_threshold(q))
-		clear_queue_congested(q, READ);
+	if (rl->count[WRITE] >= queue_congestion_on_threshold(q))
+		set_queue_congested(q, WRITE);
+	else if (rl->count[WRITE] < queue_congestion_off_threshold(q))
+		clear_queue_congested(q, WRITE);
 	
 	return ret;
 }

_