Implement the designed locking around journal->j_tail.


 25-akpm/fs/jbd/checkpoint.c |    8 +++++---
 25-akpm/fs/jbd/journal.c    |   11 ++++++++---
 fs/jbd/commit.c             |    0 
 fs/jbd/recovery.c           |    0 
 4 files changed, 13 insertions(+), 6 deletions(-)

diff -puN fs/jbd/checkpoint.c~jbd-200-j_tail-locking fs/jbd/checkpoint.c
--- 25/fs/jbd/checkpoint.c~jbd-200-j_tail-locking	Thu Jun  5 15:14:27 2003
+++ 25-akpm/fs/jbd/checkpoint.c	Thu Jun  5 15:14:27 2003
@@ -416,13 +416,14 @@ int cleanup_journal_tail(journal_t *jour
 		blocknr = journal->j_head;
 	}
 	spin_unlock(&journal->j_list_lock);
-	spin_unlock(&journal->j_state_lock);
-	J_ASSERT (blocknr != 0);
+	J_ASSERT(blocknr != 0);
 
 	/* If the oldest pinned transaction is at the tail of the log
            already then there's not much we can do right now. */
-	if (journal->j_tail_sequence == first_tid)
+	if (journal->j_tail_sequence == first_tid) {
+		spin_unlock(&journal->j_state_lock);
 		return 1;
+	}
 
 	/* OK, update the superblock to recover the freed space.
 	 * Physical blocks come first: have we wrapped beyond the end of
@@ -439,6 +440,7 @@ int cleanup_journal_tail(journal_t *jour
 	journal->j_free += freed;
 	journal->j_tail_sequence = first_tid;
 	journal->j_tail = blocknr;
+	spin_unlock(&journal->j_state_lock);
 	if (!(journal->j_flags & JFS_ABORT))
 		journal_update_superblock(journal, 1);
 	return 0;
diff -puN fs/jbd/commit.c~jbd-200-j_tail-locking fs/jbd/commit.c
diff -puN fs/jbd/journal.c~jbd-200-j_tail-locking fs/jbd/journal.c
--- 25/fs/jbd/journal.c~jbd-200-j_tail-locking	Thu Jun  5 15:14:27 2003
+++ 25-akpm/fs/jbd/journal.c	Thu Jun  5 15:14:27 2003
@@ -850,12 +850,14 @@ void journal_update_superblock(journal_t
 	journal_superblock_t *sb = journal->j_superblock;
 	struct buffer_head *bh = journal->j_sb_buffer;
 
+	spin_lock(&journal->j_state_lock);
 	jbd_debug(1,"JBD: updating superblock (start %ld, seq %d, errno %d)\n",
 		  journal->j_tail, journal->j_tail_sequence, journal->j_errno);
 
 	sb->s_sequence = htonl(journal->j_tail_sequence);
 	sb->s_start    = htonl(journal->j_tail);
 	sb->s_errno    = htonl(journal->j_errno);
+	spin_unlock(&journal->j_state_lock);
 
 	BUFFER_TRACE(bh, "marking dirty");
 	mark_buffer_dirty(bh);
@@ -1260,18 +1262,21 @@ int journal_flush(journal_t *journal)
 	 * the magic code for a fully-recovered superblock.  Any future
 	 * commits of data to the journal will restore the current
 	 * s_start value. */
+	spin_lock(&journal->j_state_lock);
 	old_tail = journal->j_tail;
 	journal->j_tail = 0;
+	spin_unlock(&journal->j_state_lock);
 	journal_update_superblock(journal, 1);
+	spin_lock(&journal->j_state_lock);
 	journal->j_tail = old_tail;
 
-	unlock_journal(journal);
-
 	J_ASSERT(!journal->j_running_transaction);
 	J_ASSERT(!journal->j_committing_transaction);
 	J_ASSERT(!journal->j_checkpoint_transactions);
 	J_ASSERT(journal->j_head == journal->j_tail);
 	J_ASSERT(journal->j_tail_sequence == journal->j_transaction_sequence);
+	spin_unlock(&journal->j_state_lock);
+	unlock_journal(journal);
 	
 	return err;
 }
@@ -1289,7 +1294,7 @@ int journal_flush(journal_t *journal)
  * we merely suppress recovery.
  */
 
-int journal_wipe (journal_t *journal, int write)
+int journal_wipe(journal_t *journal, int write)
 {
 	journal_superblock_t *sb;
 	int err = 0;
diff -puN fs/jbd/recovery.c~jbd-200-j_tail-locking fs/jbd/recovery.c

_