summaryrefslogtreecommitdiff
path: root/fs/btrfs
diff options
context:
space:
mode:
authorJosef Bacik <josef@redhat.com>2011-04-11 20:20:11 -0400
committerChris Mason <chris.mason@oracle.com>2011-04-16 07:10:56 -0400
commit6d74119f1a3efad9dc7f79a16c201242324b731f (patch)
treefaf541835e82e9d56c7474c04b2d785821fe61d3 /fs/btrfs
parent0d399205edf3a4c290e76ebb36e541593af4a1b4 (diff)
downloadlinux-3.10-6d74119f1a3efad9dc7f79a16c201242324b731f.tar.gz
linux-3.10-6d74119f1a3efad9dc7f79a16c201242324b731f.tar.bz2
linux-3.10-6d74119f1a3efad9dc7f79a16c201242324b731f.zip
Btrfs: avoid taking the chunk_mutex in do_chunk_alloc
Everytime we try to allocate disk space we try and see if we can pre-emptively allocate a chunk, but in the common case we don't allocate anything, so there is no sense in taking the chunk_mutex at all. So instead if we are allocating a chunk, mark it in the space_info so we don't get two people trying to allocate at the same time. Thanks, Signed-off-by: Josef Bacik <josef@redhat.com> Reviewed-by: Liu Bo <liubo2009@cn.fujitsu.com>
Diffstat (limited to 'fs/btrfs')
-rw-r--r--fs/btrfs/ctree.h4
-rw-r--r--fs/btrfs/extent-tree.c30
2 files changed, 28 insertions, 6 deletions
diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h
index 0d00a07b5b2..2e61fe1b6b8 100644
--- a/fs/btrfs/ctree.h
+++ b/fs/btrfs/ctree.h
@@ -740,8 +740,10 @@ struct btrfs_space_info {
*/
unsigned long reservation_progress;
- int full; /* indicates that we cannot allocate any more
+ int full:1; /* indicates that we cannot allocate any more
chunks for this space */
+ int chunk_alloc:1; /* set if we are allocating a chunk */
+
int force_alloc; /* set if we need to force a chunk alloc for
this space */
diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c
index 26479484180..31f33ba56fe 100644
--- a/fs/btrfs/extent-tree.c
+++ b/fs/btrfs/extent-tree.c
@@ -3039,6 +3039,7 @@ static int update_space_info(struct btrfs_fs_info *info, u64 flags,
found->bytes_may_use = 0;
found->full = 0;
found->force_alloc = CHUNK_ALLOC_NO_FORCE;
+ found->chunk_alloc = 0;
*space_info = found;
list_add_rcu(&found->list, &info->space_info);
atomic_set(&found->caching_threads, 0);
@@ -3318,10 +3319,9 @@ static int do_chunk_alloc(struct btrfs_trans_handle *trans,
{
struct btrfs_space_info *space_info;
struct btrfs_fs_info *fs_info = extent_root->fs_info;
+ int wait_for_alloc = 0;
int ret = 0;
- mutex_lock(&fs_info->chunk_mutex);
-
flags = btrfs_reduce_alloc_profile(extent_root, flags);
space_info = __find_space_info(extent_root->fs_info, flags);
@@ -3332,21 +3332,40 @@ static int do_chunk_alloc(struct btrfs_trans_handle *trans,
}
BUG_ON(!space_info);
+again:
spin_lock(&space_info->lock);
if (space_info->force_alloc)
force = space_info->force_alloc;
if (space_info->full) {
spin_unlock(&space_info->lock);
- goto out;
+ return 0;
}
if (!should_alloc_chunk(extent_root, space_info, alloc_bytes, force)) {
spin_unlock(&space_info->lock);
- goto out;
+ return 0;
+ } else if (space_info->chunk_alloc) {
+ wait_for_alloc = 1;
+ } else {
+ space_info->chunk_alloc = 1;
}
spin_unlock(&space_info->lock);
+ mutex_lock(&fs_info->chunk_mutex);
+
+ /*
+ * The chunk_mutex is held throughout the entirety of a chunk
+ * allocation, so once we've acquired the chunk_mutex we know that the
+ * other guy is done and we need to recheck and see if we should
+ * allocate.
+ */
+ if (wait_for_alloc) {
+ mutex_unlock(&fs_info->chunk_mutex);
+ wait_for_alloc = 0;
+ goto again;
+ }
+
/*
* If we have mixed data/metadata chunks we want to make sure we keep
* allocating mixed chunks instead of individual chunks.
@@ -3372,9 +3391,10 @@ static int do_chunk_alloc(struct btrfs_trans_handle *trans,
space_info->full = 1;
else
ret = 1;
+
space_info->force_alloc = CHUNK_ALLOC_NO_FORCE;
+ space_info->chunk_alloc = 0;
spin_unlock(&space_info->lock);
-out:
mutex_unlock(&extent_root->fs_info->chunk_mutex);
return ret;
}