summaryrefslogtreecommitdiff
path: root/fs/btrfs
diff options
context:
space:
mode:
authorJosef Bacik <jbacik@redhat.com>2008-10-01 19:11:18 -0400
committerChris Mason <chris.mason@oracle.com>2008-10-01 19:11:18 -0400
commitcf749823857230017c86504bfdc70524f929ba96 (patch)
treebea09bb1df8954b1ed1629ea4269bc596ad83ef6 /fs/btrfs
parent83afeac42c5680b0b70d64fb8c4724cf05483fc2 (diff)
Btrfs: fix deadlock between alloc_mutex/chunk_mutex
This fixes a deadlock that happens between the alloc_mutex and chunk_mutex. Process A comes in, decides to do a do_chunk_alloc, which takes the chunk_mutex, and is holding the alloc_mutex because the only way you get to do_chunk_alloc is by holding the alloc_mutex. btrfs_alloc_chunk does its thing and goes to insert a new item, which results in a cow of the block. We get into del_pending_extents from there, where if we need to be rescheduled we drop the alloc_mutex and schedule. At this point process B comes in to do an allocation and gets the alloc_mutex, and because process A did not do the chunk allocation completely it thinks its a good time to do a chunk allocation as well, and hangs on the chunk_mutex. Process A wakes up and tries to take the alloc_mutex and cannot. The way to fix this is do a mutex_trylock() on chunk_mutex. If we return 0 we didn't get the lock, and if this is just a "hey it may be a good time to allocate a chunk" then we just exit. If we are trying to force an allocation then we reschedule and keep trying to acquire the chunk_mutex. If once we acquire it the space is already full then we can just exit, otherwise we can continue with the chunk allocation. Thank you, Signed-off-by: Josef Bacik <jbacik@redhat.com>
Diffstat (limited to 'fs/btrfs')
-rw-r--r--fs/btrfs/extent-tree.c15
1 files changed, 13 insertions, 2 deletions
diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c
index 677d5e774fad..db37b867e4f1 100644
--- a/fs/btrfs/extent-tree.c
+++ b/fs/btrfs/extent-tree.c
@@ -1505,7 +1505,7 @@ static int do_chunk_alloc(struct btrfs_trans_handle *trans,
u64 thresh;
u64 start;
u64 num_bytes;
- int ret = 0;
+ int ret = 0, waited = 0;
flags = reduce_alloc_profile(extent_root, flags);
@@ -1530,7 +1530,18 @@ static int do_chunk_alloc(struct btrfs_trans_handle *trans,
space_info->bytes_reserved + alloc_bytes) < thresh)
goto out;
- mutex_lock(&extent_root->fs_info->chunk_mutex);
+ while (!mutex_trylock(&extent_root->fs_info->chunk_mutex)) {
+ if (!force)
+ goto out;
+ mutex_unlock(&extent_root->fs_info->alloc_mutex);
+ cond_resched();
+ mutex_lock(&extent_root->fs_info->alloc_mutex);
+ waited = 1;
+ }
+
+ if (waited && space_info->full)
+ goto out_unlock;
+
ret = btrfs_alloc_chunk(trans, extent_root, &start, &num_bytes, flags);
if (ret == -ENOSPC) {
printk("space info full %Lu\n", flags);