Using async down for aio write, ppc64



 25-akpm/arch/ppc64/kernel/ppc_ksyms.c |    2 +-
 25-akpm/arch/ppc64/kernel/semaphore.c |   24 +++++++++++++++++++-----
 25-akpm/include/asm-ppc64/semaphore.h |   14 +++++++++++---
 3 files changed, 31 insertions(+), 9 deletions(-)

diff -puN arch/ppc64/kernel/ppc_ksyms.c~aio-10-down_wq-ppc64 arch/ppc64/kernel/ppc_ksyms.c
--- 25/arch/ppc64/kernel/ppc_ksyms.c~aio-10-down_wq-ppc64	Fri May 16 16:29:47 2003
+++ 25-akpm/arch/ppc64/kernel/ppc_ksyms.c	Fri May 16 16:29:47 2003
@@ -91,7 +91,7 @@ EXPORT_SYMBOL(strncmp);
 EXPORT_SYMBOL(__down_interruptible);
 EXPORT_SYMBOL(__up);
 EXPORT_SYMBOL(naca);
-EXPORT_SYMBOL(__down);
+EXPORT_SYMBOL(__down_wq);
 
 /* EXPORT_SYMBOL(csum_partial); already in net/netsyms.c */
 EXPORT_SYMBOL(csum_partial_copy_generic);
diff -puN arch/ppc64/kernel/semaphore.c~aio-10-down_wq-ppc64 arch/ppc64/kernel/semaphore.c
--- 25/arch/ppc64/kernel/semaphore.c~aio-10-down_wq-ppc64	Fri May 16 16:29:47 2003
+++ 25-akpm/arch/ppc64/kernel/semaphore.c	Fri May 16 16:29:47 2003
@@ -70,13 +70,18 @@ void __up(struct semaphore *sem)
  * Thus it is only when we decrement count from some value > 0
  * that we have actually got the semaphore.
  */
-void __down(struct semaphore *sem)
+int __down_wq(struct semaphore *sem, wait_queue_t *wait)
 {
 	struct task_struct *tsk = current;
-	DECLARE_WAITQUEUE(wait, tsk);
+	DECLARE_WAITQUEUE(local_wait, tsk);
+	unsigned long flags;
 
-	tsk->state = TASK_UNINTERRUPTIBLE;
-	add_wait_queue_exclusive(&sem->wait, &wait);
+	if (!is_sync_wait(wait))
+		tsk->state = TASK_UNINTERRUPTIBLE;
+	if (!wait)
+		wait = &local_wait;
+
+	add_wait_queue_exclusive(&sem->wait, wait);
 	smp_wmb();
 
 	/*
@@ -86,10 +91,17 @@ void __down(struct semaphore *sem)
 	 * that we are asleep, and then sleep.
 	 */
 	while (__sem_update_count(sem, -1) <= 0) {
+		if (!is_sync_wait(wait))
+			return -EIOCBRETRY;
 		schedule();
 		tsk->state = TASK_UNINTERRUPTIBLE;
 	}
-	remove_wait_queue(&sem->wait, &wait);
+	spin_lock_irqsave(&sem->wait.lock, flags)
+	if (is_sync_wait(wait) || !list_empty(&wait->task_list)) {
+		remove_wait_queue_locked(&sem->wait, wait);
+		INIT_LIST_HEAD(&wait->task_list);
+	}
+	spin_unlock_irqrestore(&sem->wait.lock, flags);
 	tsk->state = TASK_RUNNING;
 
 	/*
@@ -98,6 +110,8 @@ void __down(struct semaphore *sem)
 	 * indicating that there are still processes sleeping.
 	 */
 	wake_up(&sem->wait);
+
+	return 0;
 }
 
 int __down_interruptible(struct semaphore * sem)
diff -puN include/asm-ppc64/semaphore.h~aio-10-down_wq-ppc64 include/asm-ppc64/semaphore.h
--- 25/include/asm-ppc64/semaphore.h~aio-10-down_wq-ppc64	Fri May 16 16:29:47 2003
+++ 25-akpm/include/asm-ppc64/semaphore.h	Fri May 16 16:29:47 2003
@@ -68,12 +68,14 @@ static inline void init_MUTEX_LOCKED (st
 	sema_init(sem, 0);
 }
 
-extern void __down(struct semaphore * sem);
+extern void __down_wq(struct semaphore * sem, wait_queue_t *wait);
 extern int  __down_interruptible(struct semaphore * sem);
 extern void __up(struct semaphore * sem);
 
-static inline void down(struct semaphore * sem)
+static inline int down_wq(struct semaphore * sem, wait_queue_t *wait)
 {
+	int ret = 0;
+
 #ifdef WAITQUEUE_DEBUG
 	CHECK_MAGIC(sem->__magic);
 #endif
@@ -83,8 +85,14 @@ static inline void down(struct semaphore
 	 * Try to get the semaphore, take the slow path if we fail.
 	 */
 	if (atomic_dec_return(&sem->count) < 0)
-		__down(sem);
+		ret =__down_wq(sem, wait);
 	smp_wmb();
+	return ret;
+}
+
+static inline void down(struct semaphore * sem)
+{
+	down_wq(sem, NULL);
 }
 
 static inline int down_interruptible(struct semaphore * sem)

_