diff options
author | Josef Bacik <jbacik@fusionio.com> | 2012-07-20 16:11:08 -0400 |
---|---|---|
committer | Chris Mason <chris.mason@fusionio.com> | 2012-07-23 16:28:09 -0400 |
commit | 594831c4b232b094d645503ecedec2e35dcebdf3 (patch) | |
tree | 27867ddba8f36d9dc4946c5beee2f18d7be8c861 | |
parent | e64860aa05048fa7a8483ca698b17c2caf5625cf (diff) | |
download | linux-3.10-594831c4b232b094d645503ecedec2e35dcebdf3.tar.gz linux-3.10-594831c4b232b094d645503ecedec2e35dcebdf3.tar.bz2 linux-3.10-594831c4b232b094d645503ecedec2e35dcebdf3.zip |
Btrfs: fix potential race in extent buffer freeing
This sounds sort of impossible but it is the only thing I can think of and
at the very least it is theoretically possible so here it goes.
If we are in try_release_extent_buffer we will check that the ref count on
the extent buffer is 1 and not under IO, and then go down and clear the tree
ref. If between this check and clearing the tree ref somebody else comes in
and grabs a ref on the eb and the marks it dirty before
try_release_extent_buffer() does it's tree ref clear we can end up with a
dirty eb that will be freed while it is still dirty which will result in a
panic. Thanks,
Signed-off-by: Josef Bacik <jbacik@fusionio.com>
-rw-r--r-- | fs/btrfs/extent_io.c | 9 |
1 files changed, 3 insertions, 6 deletions
diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c index e6243f78743..e1939a6c747 100644 --- a/fs/btrfs/extent_io.c +++ b/fs/btrfs/extent_io.c @@ -4123,11 +4123,10 @@ static void check_buffer_tree_ref(struct extent_buffer *eb) * So bump the ref count first, then set the bit. If someone * beat us to it, drop the ref we added. */ - if (!test_bit(EXTENT_BUFFER_TREE_REF, &eb->bflags)) { + spin_lock(&eb->refs_lock); + if (!test_and_set_bit(EXTENT_BUFFER_TREE_REF, &eb->bflags)) atomic_inc(&eb->refs); - if (test_and_set_bit(EXTENT_BUFFER_TREE_REF, &eb->bflags)) - atomic_dec(&eb->refs); - } + spin_unlock(&eb->refs_lock); } static void mark_extent_buffer_accessed(struct extent_buffer *eb) @@ -4239,9 +4238,7 @@ again: goto free_eb; } /* add one reference for the tree */ - spin_lock(&eb->refs_lock); check_buffer_tree_ref(eb); - spin_unlock(&eb->refs_lock); spin_unlock(&tree->buffer_lock); radix_tree_preload_end(); |