summaryrefslogtreecommitdiff
path: root/fs/xfs
diff options
context:
space:
mode:
authorNathan Scott <nathans@sgi.com>2006-09-28 11:02:09 +1000
committerTim Shimmin <tes@sgi.com>2006-09-28 11:02:09 +1000
commitbb3c7d2936b6db6f5ded9abf4d215abe97af8372 (patch)
tree0958e0102e703cceb42c4d4947227ac29642456e /fs/xfs
parent2627509330323efc88b5818065cba737e000de5c (diff)
downloadlinux-stable-bb3c7d2936b6db6f5ded9abf4d215abe97af8372.tar.gz
linux-stable-bb3c7d2936b6db6f5ded9abf4d215abe97af8372.tar.bz2
linux-stable-bb3c7d2936b6db6f5ded9abf4d215abe97af8372.zip
[XFS] Increase the size of the buffer holding the local inode cluster
list, to increase our potential readahead window and in turn improve bulkstat performance. SGI-PV: 944409 SGI-Modid: xfs-linux-melb:xfs-kern:26607a Signed-off-by: Nathan Scott <nathans@sgi.com> Signed-off-by: Tim Shimmin <tes@sgi.com>
Diffstat (limited to 'fs/xfs')
-rw-r--r--fs/xfs/xfs_itable.c22
1 files changed, 16 insertions, 6 deletions
diff --git a/fs/xfs/xfs_itable.c b/fs/xfs/xfs_itable.c
index e6dbe6ba6fbd..315c9bcd3be3 100644
--- a/fs/xfs/xfs_itable.c
+++ b/fs/xfs/xfs_itable.c
@@ -324,6 +324,8 @@ xfs_bulkstat(
xfs_agino_t gino; /* current btree rec's start inode */
int i; /* loop index */
int icount; /* count of inodes good in irbuf */
+ int irbsize; /* size of irec buffer in bytes */
+ unsigned int kmflags; /* flags for allocating irec buffer */
xfs_ino_t ino; /* inode number (filesystem) */
xfs_inobt_rec_incore_t *irbp; /* current irec buffer pointer */
xfs_inobt_rec_incore_t *irbuf; /* start of irec buffer */
@@ -369,12 +371,20 @@ xfs_bulkstat(
nimask = ~(nicluster - 1);
nbcluster = nicluster >> mp->m_sb.sb_inopblog;
/*
- * Allocate a page-sized buffer for inode btree records.
- * We could try allocating something smaller, but for normal
- * calls we'll always (potentially) need the whole page.
+ * Allocate a local buffer for inode cluster btree records.
+ * This caps our maximum readahead window (so don't be stingy)
+ * but we must handle the case where we can't get a contiguous
+ * multi-page buffer, so we drop back toward pagesize; the end
+ * case we ensure succeeds, via appropriate allocation flags.
*/
- irbuf = kmem_alloc(NBPC, KM_SLEEP);
- nirbuf = NBPC / sizeof(*irbuf);
+ irbsize = NBPP * 4;
+ kmflags = KM_SLEEP | KM_MAYFAIL;
+ while (!(irbuf = kmem_alloc(irbsize, kmflags))) {
+ if ((irbsize >>= 1) <= NBPP)
+ kmflags = KM_SLEEP;
+ }
+ nirbuf = irbsize / sizeof(*irbuf);
+
/*
* Loop over the allocation groups, starting from the last
* inode returned; 0 means start of the allocation group.
@@ -672,7 +682,7 @@ xfs_bulkstat(
/*
* Done, we're either out of filesystem or space to put the data.
*/
- kmem_free(irbuf, NBPC);
+ kmem_free(irbuf, irbsize);
*ubcountp = ubelem;
if (agno >= mp->m_sb.sb_agcount) {
/*