Experimental, for people to test, to see if this is worth putting more
effort into.



 core/datagram.c |    9 +++------
 core/sock.c     |   17 +++++++----------
 ipv4/af_inet.c  |   10 ++++------
 ipv4/tcp.c      |   42 +++++++++++++++---------------------------
 ipv4/tcp_ipv4.c |    9 ++++-----
 unix/af_unix.c  |   17 ++++++-----------
 6 files changed, 39 insertions(+), 65 deletions(-)

diff -puN net/ipv4/tcp.c~tcp-wakeups net/ipv4/tcp.c
--- 25/net/ipv4/tcp.c~tcp-wakeups	2003-02-14 21:30:09.000000000 -0800
+++ 25-akpm/net/ipv4/tcp.c	2003-02-14 21:30:09.000000000 -0800
@@ -658,7 +658,7 @@ static int wait_for_tcp_connect(struct s
 {
 	struct tcp_opt *tp = tcp_sk(sk);
 	struct task_struct *tsk = current;
-	DECLARE_WAITQUEUE(wait, tsk);
+	DEFINE_WAIT(wait);
 
 	while ((1 << sk->state) & ~(TCPF_ESTABLISHED | TCPF_CLOSE_WAIT)) {
 		if (sk->err)
@@ -670,16 +670,14 @@ static int wait_for_tcp_connect(struct s
 		if (signal_pending(tsk))
 			return sock_intr_errno(*timeo_p);
 
-		__set_task_state(tsk, TASK_INTERRUPTIBLE);
-		add_wait_queue(sk->sleep, &wait);
+		prepare_to_wait(sk->sleep, &wait, TASK_INTERRUPTIBLE);
 		tp->write_pending++;
 
 		release_sock(sk);
 		*timeo_p = schedule_timeout(*timeo_p);
 		lock_sock(sk);
 
-		__set_task_state(tsk, TASK_RUNNING);
-		remove_wait_queue(sk->sleep, &wait);
+		finish_wait(sk->sleep, &wait);
 		tp->write_pending--;
 	}
 	return 0;
@@ -699,16 +697,15 @@ static int wait_for_tcp_memory(struct so
 	int err = 0;
 	long vm_wait = 0;
 	long current_timeo = *timeo;
-	DECLARE_WAITQUEUE(wait, current);
+	DEFINE_WAIT(wait);
 
 	if (tcp_memory_free(sk))
 		current_timeo = vm_wait = (net_random() % (HZ / 5)) + 2;
 
-	add_wait_queue(sk->sleep, &wait);
 	for (;;) {
 		set_bit(SOCK_ASYNC_NOSPACE, &sk->socket->flags);
 
-		set_current_state(TASK_INTERRUPTIBLE);
+		prepare_to_wait(sk->sleep, &wait, TASK_INTERRUPTIBLE);
 
 		if (sk->err || (sk->shutdown & SEND_SHUTDOWN))
 			goto do_error;
@@ -739,8 +736,7 @@ static int wait_for_tcp_memory(struct so
 		*timeo = current_timeo;
 	}
 out:
-	current->state = TASK_RUNNING;
-	remove_wait_queue(sk->sleep, &wait);
+	finish_wait(sk->sleep, &wait);
 	return err;
 
 do_error:
@@ -1372,11 +1368,9 @@ static void cleanup_rbuf(struct sock *sk
 
 static long tcp_data_wait(struct sock *sk, long timeo)
 {
-	DECLARE_WAITQUEUE(wait, current);
+	DEFINE_WAIT(wait);
 
-	add_wait_queue(sk->sleep, &wait);
-
-	__set_current_state(TASK_INTERRUPTIBLE);
+	prepare_to_wait(sk->sleep, &wait, TASK_INTERRUPTIBLE);
 
 	set_bit(SOCK_ASYNC_WAITDATA, &sk->socket->flags);
 	release_sock(sk);
@@ -1387,8 +1381,7 @@ static long tcp_data_wait(struct sock *s
 	lock_sock(sk);
 	clear_bit(SOCK_ASYNC_WAITDATA, &sk->socket->flags);
 
-	remove_wait_queue(sk->sleep, &wait);
-	__set_current_state(TASK_RUNNING);
+	finish_wait(sk->sleep, &wait);
 	return timeo;
 }
 
@@ -2014,12 +2007,10 @@ void tcp_close(struct sock *sk, long tim
 
 	if (timeout) {
 		struct task_struct *tsk = current;
-		DECLARE_WAITQUEUE(wait, current);
-
-		add_wait_queue(sk->sleep, &wait);
+		DEFINE_WAIT(wait);
 
 		do {
-			set_current_state(TASK_INTERRUPTIBLE);
+			prepare_to_wait(sk->sleep, &wait, TASK_INTERRUPTIBLE);
 			if (!closing(sk))
 				break;
 			release_sock(sk);
@@ -2027,8 +2018,7 @@ void tcp_close(struct sock *sk, long tim
 			lock_sock(sk);
 		} while (!signal_pending(tsk) && timeout);
 
-		tsk->state = TASK_RUNNING;
-		remove_wait_queue(sk->sleep, &wait);
+		finish_wait(sk->sleep, &wait);
 	}
 
 adjudge_to_death:
@@ -2188,7 +2178,7 @@ int tcp_disconnect(struct sock *sk, int 
 static int wait_for_connect(struct sock *sk, long timeo)
 {
 	struct tcp_opt *tp = tcp_sk(sk);
-	DECLARE_WAITQUEUE(wait, current);
+	DEFINE_WAIT(wait);
 	int err;
 
 	/*
@@ -2205,9 +2195,8 @@ static int wait_for_connect(struct sock 
 	 * our exclusiveness temporarily when we get woken up without
 	 * having to remove and re-insert us on the wait queue.
 	 */
-	add_wait_queue_exclusive(sk->sleep, &wait);
 	for (;;) {
-		current->state = TASK_INTERRUPTIBLE;
+		prepare_to_wait_exclusive(sk->sleep, &wait, TASK_INTERRUPTIBLE);
 		release_sock(sk);
 		if (!tp->accept_queue)
 			timeo = schedule_timeout(timeo);
@@ -2225,8 +2214,7 @@ static int wait_for_connect(struct sock 
 		if (!timeo)
 			break;
 	}
-	current->state = TASK_RUNNING;
-	remove_wait_queue(sk->sleep, &wait);
+	finish_wait(sk->sleep, &wait);
 	return err;
 }
 
diff -puN net/ipv4/tcp_ipv4.c~tcp-wakeups net/ipv4/tcp_ipv4.c
--- 25/net/ipv4/tcp_ipv4.c~tcp-wakeups	2003-02-14 21:30:09.000000000 -0800
+++ 25-akpm/net/ipv4/tcp_ipv4.c	2003-02-14 21:30:09.000000000 -0800
@@ -328,11 +328,11 @@ void tcp_listen_wlock(void)
 	write_lock(&tcp_lhash_lock);
 
 	if (atomic_read(&tcp_lhash_users)) {
-		DECLARE_WAITQUEUE(wait, current);
+		DEFINE_WAIT(wait);
 
-		add_wait_queue_exclusive(&tcp_lhash_wait, &wait);
 		for (;;) {
-			set_current_state(TASK_UNINTERRUPTIBLE);
+			prepare_to_wait_exclusive(&tcp_lhash_wait,
+						&wait, TASK_UNINTERRUPTIBLE);
 			if (!atomic_read(&tcp_lhash_users))
 				break;
 			write_unlock_bh(&tcp_lhash_lock);
@@ -340,8 +340,7 @@ void tcp_listen_wlock(void)
 			write_lock_bh(&tcp_lhash_lock);
 		}
 
-		__set_current_state(TASK_RUNNING);
-		remove_wait_queue(&tcp_lhash_wait, &wait);
+		finish_wait(&tcp_lhash_wait, &wait);
 	}
 }
 
diff -puN net/ipv4/af_inet.c~tcp-wakeups net/ipv4/af_inet.c
--- 25/net/ipv4/af_inet.c~tcp-wakeups	2003-02-14 21:30:09.000000000 -0800
+++ 25-akpm/net/ipv4/af_inet.c	2003-02-14 21:30:09.000000000 -0800
@@ -561,10 +561,9 @@ int inet_dgram_connect(struct socket *so
 
 static long inet_wait_for_connect(struct sock *sk, long timeo)
 {
-	DECLARE_WAITQUEUE(wait, current);
+	DEFINE_WAIT(wait);
 
-	__set_current_state(TASK_INTERRUPTIBLE);
-	add_wait_queue(sk->sleep, &wait);
+	prepare_to_wait(sk->sleep, &wait, TASK_INTERRUPTIBLE);
 
 	/* Basic assumption: if someone sets sk->err, he _must_
 	 * change state of the socket from TCP_SYN_*.
@@ -577,10 +576,9 @@ static long inet_wait_for_connect(struct
 		lock_sock(sk);
 		if (signal_pending(current) || !timeo)
 			break;
-		set_current_state(TASK_INTERRUPTIBLE);
+		prepare_to_wait(sk->sleep, &wait, TASK_INTERRUPTIBLE);
 	}
-	__set_current_state(TASK_RUNNING);
-	remove_wait_queue(sk->sleep, &wait);
+	finish_wait(sk->sleep, &wait);
 	return timeo;
 }
 
diff -puN net/core/datagram.c~tcp-wakeups net/core/datagram.c
--- 25/net/core/datagram.c~tcp-wakeups	2003-02-14 21:30:09.000000000 -0800
+++ 25-akpm/net/core/datagram.c	2003-02-14 21:30:09.000000000 -0800
@@ -68,11 +68,9 @@ static inline int connection_based(struc
 static int wait_for_packet(struct sock *sk, int *err, long *timeo_p)
 {
 	int error;
+	DEFINE_WAIT(wait);
 
-	DECLARE_WAITQUEUE(wait, current);
-
-	__set_current_state(TASK_INTERRUPTIBLE);
-	add_wait_queue_exclusive(sk->sleep, &wait);
+	prepare_to_wait_exclusive(sk->sleep, &wait, TASK_INTERRUPTIBLE);
 
 	/* Socket errors? */
 	error = sock_error(sk);
@@ -101,8 +99,7 @@ static int wait_for_packet(struct sock *
 	error = 0;
 	*timeo_p = schedule_timeout(*timeo_p);
 out:
-	current->state = TASK_RUNNING;
-	remove_wait_queue(sk->sleep, &wait);
+	finish_wait(sk->sleep, &wait);
 	return error;
 interrupted:
 	error = sock_intr_errno(*timeo_p);
diff -puN net/core/sock.c~tcp-wakeups net/core/sock.c
--- 25/net/core/sock.c~tcp-wakeups	2003-02-14 21:30:09.000000000 -0800
+++ 25-akpm/net/core/sock.c	2003-02-14 21:30:09.000000000 -0800
@@ -740,17 +740,16 @@ void sock_kfree_s(struct sock *sk, void 
  */
 static long sock_wait_for_wmem(struct sock * sk, long timeo)
 {
-	DECLARE_WAITQUEUE(wait, current);
+	DEFINE_WAIT(wait);
 
 	clear_bit(SOCK_ASYNC_NOSPACE, &sk->socket->flags);
-	add_wait_queue(sk->sleep, &wait);
 	for (;;) {
 		if (!timeo)
 			break;
 		if (signal_pending(current))
 			break;
 		set_bit(SOCK_NOSPACE, &sk->socket->flags);
-		set_current_state(TASK_INTERRUPTIBLE);
+		prepare_to_wait(sk->sleep, &wait, TASK_INTERRUPTIBLE);
 		if (atomic_read(&sk->wmem_alloc) < sk->sndbuf)
 			break;
 		if (sk->shutdown & SEND_SHUTDOWN)
@@ -759,8 +758,7 @@ static long sock_wait_for_wmem(struct so
 			break;
 		timeo = schedule_timeout(timeo);
 	}
-	__set_current_state(TASK_RUNNING);
-	remove_wait_queue(sk->sleep, &wait);
+	finish_wait(sk->sleep, &wait);
 	return timeo;
 }
 
@@ -853,19 +851,18 @@ struct sk_buff *sock_alloc_send_skb(stru
 
 void __lock_sock(struct sock *sk)
 {
-	DECLARE_WAITQUEUE(wait, current);
+	DEFINE_WAIT(wait);
 
-	add_wait_queue_exclusive(&sk->lock.wq, &wait);
 	for(;;) {
-		current->state = TASK_UNINTERRUPTIBLE;
+		prepare_to_wait_exclusive(&sk->lock.wq, &wait,
+					TASK_UNINTERRUPTIBLE);
 		spin_unlock_bh(&sk->lock.slock);
 		schedule();
 		spin_lock_bh(&sk->lock.slock);
 		if(!sock_owned_by_user(sk))
 			break;
 	}
-	current->state = TASK_RUNNING;
-	remove_wait_queue(&sk->lock.wq, &wait);
+	finish_wait(&sk->lock.wq, &wait);
 }
 
 void __release_sock(struct sock *sk)
diff -puN net/unix/af_unix.c~tcp-wakeups net/unix/af_unix.c
--- 25/net/unix/af_unix.c~tcp-wakeups	2003-02-14 21:30:09.000000000 -0800
+++ 25-akpm/net/unix/af_unix.c	2003-02-14 21:30:09.000000000 -0800
@@ -858,10 +858,9 @@ static long unix_wait_for_peer(unix_sock
 {
 	struct unix_sock *u = unix_sk(other);
 	int sched;
-	DECLARE_WAITQUEUE(wait, current);
+	DEFINE_WAIT(wait);
 
-	__set_current_state(TASK_INTERRUPTIBLE);
-	add_wait_queue_exclusive(&u->peer_wait, &wait);
+	prepare_to_wait_exclusive(&u->peer_wait, &wait, TASK_INTERRUPTIBLE);
 
 	sched = (!other->dead &&
 		 !(other->shutdown&RCV_SHUTDOWN) &&
@@ -872,8 +871,7 @@ static long unix_wait_for_peer(unix_sock
 	if (sched)
 		timeo = schedule_timeout(timeo);
 
-	__set_current_state(TASK_RUNNING);
-	remove_wait_queue(&u->peer_wait, &wait);
+	finish_wait(&u->peer_wait, &wait);
 	return timeo;
 }
 
@@ -1510,14 +1508,12 @@ out:
  
 static long unix_stream_data_wait(unix_socket * sk, long timeo)
 {
-	DECLARE_WAITQUEUE(wait, current);
+	DEFINE_WAIT(wait);
 
 	unix_state_rlock(sk);
 
-	add_wait_queue(sk->sleep, &wait);
-
 	for (;;) {
-		set_current_state(TASK_INTERRUPTIBLE);
+		prepare_to_wait(sk->sleep, &wait, TASK_INTERRUPTIBLE);
 
 		if (skb_queue_len(&sk->receive_queue) ||
 		    sk->err ||
@@ -1533,8 +1529,7 @@ static long unix_stream_data_wait(unix_s
 		clear_bit(SOCK_ASYNC_WAITDATA, &sk->socket->flags);
 	}
 
-	__set_current_state(TASK_RUNNING);
-	remove_wait_queue(sk->sleep, &wait);
+	finish_wait(sk->sleep, &wait);
 	unix_state_runlock(sk);
 	return timeo;
 }

_