From: Suparna Bhattacharya, Hugh Dickins.

Actually lock the buffer.


 fs/aio.c                    |    7 +++++--
 include/linux/buffer_head.h |   24 +++++++++++++-----------
 include/linux/pagemap.h     |   34 ++++++++++++++++------------------
 mm/filemap.c                |    6 ------
 4 files changed, 34 insertions(+), 37 deletions(-)

diff -puN fs/aio.c~lock_buffer_wq-fix fs/aio.c
--- 25/fs/aio.c~lock_buffer_wq-fix	2003-06-16 23:16:45.000000000 -0700
+++ 25-akpm/fs/aio.c	2003-06-16 23:16:45.000000000 -0700
@@ -53,6 +53,7 @@ static kmem_cache_t	*kiocb_cachep;
 static kmem_cache_t	*kioctx_cachep;
 
 static struct workqueue_struct *aio_wq;
+static struct workqueue_struct *aio_fput_wq;
 
 /* Used for rare fput completion. */
 static void aio_fput_routine(void *);
@@ -80,6 +81,7 @@ static int __init aio_setup(void)
 		panic("unable to create kioctx cache");
 
 	aio_wq = create_workqueue("aio");
+	aio_fput_wq = create_workqueue("aio_fput");
 
 	pr_debug("aio_setup: sizeof(struct page) = %d\n", (int)sizeof(struct page));
 
@@ -506,7 +508,7 @@ static int __aio_put_req(struct kioctx *
 		spin_lock(&fput_lock);
 		list_add(&req->ki_list, &fput_head);
 		spin_unlock(&fput_lock);
-		queue_work(aio_wq, &fput_work);
+		queue_work(aio_fput_wq, &fput_work);
 	} else
 		really_put_req(ctx, req);
 	return 1;
@@ -582,7 +584,8 @@ void unuse_mm(struct mm_struct *mm)
 {
 	current->mm = NULL;
 	/* active_mm is still 'mm' */
-	enter_lazy_tlb(mm, current, smp_processor_id());
+	enter_lazy_tlb(mm, current, get_cpu());
+	put_cpu();
 }
 
 /*
diff -puN include/linux/buffer_head.h~lock_buffer_wq-fix include/linux/buffer_head.h
--- 25/include/linux/buffer_head.h~lock_buffer_wq-fix	2003-06-16 23:16:45.000000000 -0700
+++ 25-akpm/include/linux/buffer_head.h	2003-06-16 23:16:45.000000000 -0700
@@ -269,32 +269,34 @@ map_bh(struct buffer_head *bh, struct su
  * __wait_on_buffer() just to trip a debug check.  Because debug code in inline
  * functions is bloaty.
  */
-static inline void wait_on_buffer(struct buffer_head *bh)
-{
-	if (buffer_locked(bh) || atomic_read(&bh->b_count) == 0)
-		__wait_on_buffer(bh);
-}
 
 static inline int wait_on_buffer_wq(struct buffer_head *bh, wait_queue_t *wait)
 {
-	if (buffer_locked(bh))
+	if (buffer_locked(bh) || atomic_read(&bh->b_count) == 0)
 		return __wait_on_buffer_wq(bh, wait);
 
 	return 0;
 }
 
-static inline void lock_buffer(struct buffer_head *bh)
+static inline void wait_on_buffer(struct buffer_head *bh)
 {
-	while (test_set_buffer_locked(bh))
-		__wait_on_buffer(bh);
+	wait_on_buffer_wq(bh, NULL);
 }
 
 static inline int lock_buffer_wq(struct buffer_head *bh, wait_queue_t *wait)
 {
-	if (test_set_buffer_locked(bh))
-		return __wait_on_buffer_wq(bh, wait);
+	while (test_set_buffer_locked(bh)) {
+		int ret = __wait_on_buffer_wq(bh, wait);
+		if (ret)
+			return ret;
+	}
 
 	return 0;
 }
 
+static inline void lock_buffer(struct buffer_head *bh)
+{
+	lock_buffer_wq(bh, NULL);
+}
+
 #endif /* _LINUX_BUFFER_HEAD_H */
diff -puN include/linux/pagemap.h~lock_buffer_wq-fix include/linux/pagemap.h
--- 25/include/linux/pagemap.h~lock_buffer_wq-fix	2003-06-16 23:16:45.000000000 -0700
+++ 25-akpm/include/linux/pagemap.h	2003-06-16 23:19:13.000000000 -0700
@@ -130,11 +130,6 @@ static inline void ___add_to_page_cache(
 extern void FASTCALL(__lock_page(struct page *page));
 extern void FASTCALL(unlock_page(struct page *page));
 
-static inline void lock_page(struct page *page)
-{
-	if (TestSetPageLocked(page))
-		__lock_page(page);
-}
 
 extern int FASTCALL(__lock_page_wq(struct page *page, wait_queue_t *wait));
 static inline int lock_page_wq(struct page *page, wait_queue_t *wait)
@@ -145,12 +140,17 @@ static inline int lock_page_wq(struct pa
 		return 0;
 }
 
+static inline void lock_page(struct page *page)
+{
+	lock_page_wq(page, NULL);
+}
 	
 /*
  * This is exported only for wait_on_page_locked/wait_on_page_writeback.
  * Never use this directly!
  */
-extern void FASTCALL(wait_on_page_bit(struct page *page, int bit_nr));
+extern int FASTCALL(wait_on_page_bit_wq(struct page *page, int bit_nr, 
+	wait_queue_t *wait));
 
 /* 
  * Wait for a page to be unlocked.
@@ -159,14 +159,7 @@ extern void FASTCALL(wait_on_page_bit(st
  * ie with increased "page->count" so that the page won't
  * go away during the wait..
  */
-static inline void wait_on_page_locked(struct page *page)
-{
-	if (PageLocked(page))
-		wait_on_page_bit(page, PG_locked);
-}
 
-extern int FASTCALL(wait_on_page_bit_wq(struct page *page, int bit_nr, 
-	wait_queue_t *wait));
 static inline int wait_on_page_locked_wq(struct page *page, wait_queue_t *wait)
 {
 	if (PageLocked(page))
@@ -174,14 +167,14 @@ static inline int wait_on_page_locked_wq
 	return 0;
 }
 
+static inline void wait_on_page_locked(struct page *page)
+{
+	wait_on_page_locked_wq(page, NULL);
+}
+
 /* 
  * Wait for a page to complete writeback
  */
-static inline void wait_on_page_writeback(struct page *page)
-{
-	if (PageWriteback(page))
-		wait_on_page_bit(page, PG_writeback);
-}
 
 static inline int wait_on_page_writeback_wq(struct page *page, 
 	wait_queue_t *wait)
@@ -191,6 +184,11 @@ static inline int wait_on_page_writeback
 	return 0;
 }
 
+static inline void wait_on_page_writeback(struct page *page)
+{
+	wait_on_page_writeback_wq(page, NULL);
+}
+
 extern void end_page_writeback(struct page *page);
 
 /*
diff -puN mm/filemap.c~lock_buffer_wq-fix mm/filemap.c
--- 25/mm/filemap.c~lock_buffer_wq-fix	2003-06-16 23:16:45.000000000 -0700
+++ 25-akpm/mm/filemap.c	2003-06-16 23:16:45.000000000 -0700
@@ -302,12 +302,6 @@ int wait_on_page_bit_wq(struct page *pag
 }
 EXPORT_SYMBOL(wait_on_page_bit_wq);
 
-void wait_on_page_bit(struct page *page, int bit_nr)
-{
-	wait_on_page_bit_wq(page, bit_nr, NULL);
-}
-EXPORT_SYMBOL(wait_on_page_bit);
-
 /**
  * unlock_page() - unlock a locked page
  *

_