fs/ext2/balloc.c |   63 ++++++++++++++++++++++++++++---------------------------
 1 files changed, 33 insertions(+), 30 deletions(-)

diff -puN fs/ext2/balloc.c~ext2-no-lock-super-whitespace-fixes fs/ext2/balloc.c
--- 25/fs/ext2/balloc.c~ext2-no-lock-super-whitespace-fixes	2003-03-13 23:44:59.000000000 -0800
+++ 25-akpm/fs/ext2/balloc.c	2003-03-13 23:48:08.000000000 -0800
@@ -94,15 +94,16 @@ error_out:
 	return bh;
 }
 
-static inline int group_reserve_blocks(struct ext2_sb_info *sbi, struct ext2_bg_info *bgi, 
-					struct ext2_group_desc *desc,
-					struct buffer_head *bh, int count, int use_reserve)
+static inline int
+group_reserve_blocks(struct ext2_sb_info *sbi, struct ext2_bg_info *bgi, 
+		struct ext2_group_desc *desc, struct buffer_head *bh,
+		int count, int use_reserve)
 {
 	unsigned free_blocks;
 	unsigned root_blocks;
 
 	spin_lock(&bgi->balloc_lock);
-	
+
 	free_blocks = le16_to_cpu(desc->bg_free_blocks_count);
 	if (free_blocks < count)
 		count = free_blocks;
@@ -113,23 +114,24 @@ static inline int group_reserve_blocks(s
 		spin_unlock(&bgi->balloc_lock);
 		return 0;
 	}
-	
-        if (free_blocks <  bgi->reserved + count && !capable(CAP_SYS_RESOURCE) &&
-            sbi->s_resuid != current->fsuid &&
-            (sbi->s_resgid == 0 || !in_group_p (sbi->s_resgid))) {
-                /*
-                 * We are too close to reserve and we are not privileged.
-                 * Can we allocate anything at all?
-                 */
-                if (free_blocks > bgi->reserved)
-                        count = free_blocks - bgi->reserved;
-                else {
+
+	if (free_blocks <  bgi->reserved + count &&
+			!capable(CAP_SYS_RESOURCE) &&
+			sbi->s_resuid != current->fsuid &&
+			(sbi->s_resgid == 0 || !in_group_p (sbi->s_resgid))) {
+		/*
+		 * We are too close to reserve and we are not privileged.
+		 * Can we allocate anything at all?
+		 */
+		if (free_blocks > bgi->reserved) {
+			count = free_blocks - bgi->reserved;
+		} else {
 			spin_unlock(&bgi->balloc_lock);
-                        return 0;
+			return 0;
 		}
 	}
 	desc->bg_free_blocks_count = cpu_to_le16(free_blocks - count);
-	
+
 	spin_unlock(&bgi->balloc_lock);
 
 	mark_buffer_dirty(bh);
@@ -142,14 +144,14 @@ static inline void group_release_blocks(
 {
 	if (count) {
 		unsigned free_blocks;
-		
+
 		spin_lock(&bgi->balloc_lock);
-		
+
 		free_blocks = le16_to_cpu(desc->bg_free_blocks_count);
 		desc->bg_free_blocks_count = cpu_to_le16(free_blocks + count);
-		
+
 		spin_unlock(&bgi->balloc_lock);
-		
+
 		mark_buffer_dirty(bh);
 	}
 }
@@ -303,15 +305,15 @@ got_it:
  * bitmap, and then for any free bit if that fails.
  * This function also updates quota and i_blocks field.
  */
-int ext2_new_block (struct inode * inode, unsigned long goal,
-    u32 * prealloc_count, u32 * prealloc_block, int * err)
+int ext2_new_block(struct inode *inode, unsigned long goal,
+			u32 *prealloc_count, u32 *prealloc_block, int *err)
 {
 	struct buffer_head *bitmap_bh = NULL;
 	struct buffer_head *gdp_bh;	/* bh2 */
 	struct ext2_group_desc *desc;
 	int group_no;			/* i */
 	int ret_block;			/* j */
-	int bit;		/* k */
+	int bit;			/* k */
 	int target_block;		/* tmp */
 	int block = 0, use_reserve = 0;
 	struct super_block *sb = inode->i_sb;
@@ -364,7 +366,8 @@ int ext2_new_block (struct inode * inode
 				group_size, ret_block);
 		if (ret_block >= 0)
 			goto got_block;
-		group_release_blocks(&sbi->s_bgi[group_no], desc, gdp_bh, group_alloc);
+		group_release_blocks(&sbi->s_bgi[group_no], desc,
+					gdp_bh, group_alloc);
 		group_alloc = 0;
 	}
 
@@ -384,7 +387,7 @@ repeat:
 		if (!desc)
 			goto io_error;
 		group_alloc = group_reserve_blocks(sbi, &sbi->s_bgi[group_no],
-						desc, gdp_bh, es_alloc, use_reserve);
+					desc, gdp_bh, es_alloc, use_reserve);
 	}
 	if (!use_reserve) {
 		/* first time we did not try to allocate
@@ -450,7 +453,8 @@ got_block:
 
 		for (n = 0; n < group_alloc && ++ret_block < group_size; n++) {
 			if (ext2_set_bit_atomic(&sbi->s_bgi[group_no].balloc_lock,
-						ret_block, (void*) bitmap_bh->b_data))
+						ret_block,
+						(void*) bitmap_bh->b_data))
  				break;
 		}
 		*prealloc_block = block + 1;
@@ -524,9 +528,8 @@ unsigned long ext2_count_free_blocks (st
 #endif
 }
 
-static inline int block_in_use (unsigned long block,
-				struct super_block * sb,
-				unsigned char * map)
+static inline int
+block_in_use(unsigned long block, struct super_block *sb, unsigned char *map)
 {
 	return ext2_test_bit ((block - le32_to_cpu(EXT2_SB(sb)->s_es->s_first_data_block)) %
 			 EXT2_BLOCKS_PER_GROUP(sb), map);

_