Commit 26275093 authored by Nathan Scott's avatar Nathan Scott Committed by Tim Shimmin

[XFS] Drop unneeded endian conversion in bulkstat and start readahead for

batches of inode cluster buffers at once, before any blocking reads are
issued.

SGI-PV: 944409
SGI-Modid: xfs-linux-melb:xfs-kern:26606a
Signed-off-by: default avatarNathan Scott <nathans@sgi.com>
Signed-off-by: default avatarTim Shimmin <tes@sgi.com>
parent 51bdd706
...@@ -325,9 +325,9 @@ xfs_bulkstat( ...@@ -325,9 +325,9 @@ xfs_bulkstat(
int i; /* loop index */ int i; /* loop index */
int icount; /* count of inodes good in irbuf */ int icount; /* count of inodes good in irbuf */
xfs_ino_t ino; /* inode number (filesystem) */ xfs_ino_t ino; /* inode number (filesystem) */
xfs_inobt_rec_t *irbp; /* current irec buffer pointer */ xfs_inobt_rec_incore_t *irbp; /* current irec buffer pointer */
xfs_inobt_rec_t *irbuf; /* start of irec buffer */ xfs_inobt_rec_incore_t *irbuf; /* start of irec buffer */
xfs_inobt_rec_t *irbufend; /* end of good irec buffer entries */ xfs_inobt_rec_incore_t *irbufend; /* end of good irec buffer entries */
xfs_ino_t lastino=0; /* last inode number returned */ xfs_ino_t lastino=0; /* last inode number returned */
int nbcluster; /* # of blocks in a cluster */ int nbcluster; /* # of blocks in a cluster */
int nicluster; /* # of inodes in a cluster */ int nicluster; /* # of inodes in a cluster */
...@@ -398,7 +398,7 @@ xfs_bulkstat( ...@@ -398,7 +398,7 @@ xfs_bulkstat(
* Allocate and initialize a btree cursor for ialloc btree. * Allocate and initialize a btree cursor for ialloc btree.
*/ */
cur = xfs_btree_init_cursor(mp, NULL, agbp, agno, XFS_BTNUM_INO, cur = xfs_btree_init_cursor(mp, NULL, agbp, agno, XFS_BTNUM_INO,
(xfs_inode_t *)0, 0); (xfs_inode_t *)0, 0);
irbp = irbuf; irbp = irbuf;
irbufend = irbuf + nirbuf; irbufend = irbuf + nirbuf;
end_of_ag = 0; end_of_ag = 0;
...@@ -435,9 +435,9 @@ xfs_bulkstat( ...@@ -435,9 +435,9 @@ xfs_bulkstat(
gcnt++; gcnt++;
} }
gfree |= XFS_INOBT_MASKN(0, chunkidx); gfree |= XFS_INOBT_MASKN(0, chunkidx);
irbp->ir_startino = cpu_to_be32(gino); irbp->ir_startino = gino;
irbp->ir_freecount = cpu_to_be32(gcnt); irbp->ir_freecount = gcnt;
irbp->ir_free = cpu_to_be64(gfree); irbp->ir_free = gfree;
irbp++; irbp++;
agino = gino + XFS_INODES_PER_CHUNK; agino = gino + XFS_INODES_PER_CHUNK;
icount = XFS_INODES_PER_CHUNK - gcnt; icount = XFS_INODES_PER_CHUNK - gcnt;
...@@ -491,11 +491,27 @@ xfs_bulkstat( ...@@ -491,11 +491,27 @@ xfs_bulkstat(
} }
/* /*
* If this chunk has any allocated inodes, save it. * If this chunk has any allocated inodes, save it.
* Also start read-ahead now for this chunk.
*/ */
if (gcnt < XFS_INODES_PER_CHUNK) { if (gcnt < XFS_INODES_PER_CHUNK) {
irbp->ir_startino = cpu_to_be32(gino); /*
irbp->ir_freecount = cpu_to_be32(gcnt); * Loop over all clusters in the next chunk.
irbp->ir_free = cpu_to_be64(gfree); * Do a readahead if there are any allocated
* inodes in that cluster.
*/
for (agbno = XFS_AGINO_TO_AGBNO(mp, gino),
chunkidx = 0;
chunkidx < XFS_INODES_PER_CHUNK;
chunkidx += nicluster,
agbno += nbcluster) {
if (XFS_INOBT_MASKN(chunkidx,
nicluster) & ~gfree)
xfs_btree_reada_bufs(mp, agno,
agbno, nbcluster);
}
irbp->ir_startino = gino;
irbp->ir_freecount = gcnt;
irbp->ir_free = gfree;
irbp++; irbp++;
icount += XFS_INODES_PER_CHUNK - gcnt; icount += XFS_INODES_PER_CHUNK - gcnt;
} }
...@@ -518,34 +534,12 @@ xfs_bulkstat( ...@@ -518,34 +534,12 @@ xfs_bulkstat(
irbufend = irbp; irbufend = irbp;
for (irbp = irbuf; for (irbp = irbuf;
irbp < irbufend && ubleft >= statstruct_size; irbp++) { irbp < irbufend && ubleft >= statstruct_size; irbp++) {
/*
* Read-ahead the next chunk's worth of inodes.
*/
if (&irbp[1] < irbufend) {
/*
* Loop over all clusters in the next chunk.
* Do a readahead if there are any allocated
* inodes in that cluster.
*/
for (agbno = XFS_AGINO_TO_AGBNO(mp,
be32_to_cpu(irbp[1].ir_startino)),
chunkidx = 0;
chunkidx < XFS_INODES_PER_CHUNK;
chunkidx += nicluster,
agbno += nbcluster) {
if (XFS_INOBT_MASKN(chunkidx,
nicluster) &
~(be64_to_cpu(irbp[1].ir_free)))
xfs_btree_reada_bufs(mp, agno,
agbno, nbcluster);
}
}
/* /*
* Now process this chunk of inodes. * Now process this chunk of inodes.
*/ */
for (agino = be32_to_cpu(irbp->ir_startino), chunkidx = 0, clustidx = 0; for (agino = irbp->ir_startino, chunkidx = clustidx = 0;
ubleft > 0 && ubleft > 0 &&
be32_to_cpu(irbp->ir_freecount) < XFS_INODES_PER_CHUNK; irbp->ir_freecount < XFS_INODES_PER_CHUNK;
chunkidx++, clustidx++, agino++) { chunkidx++, clustidx++, agino++) {
ASSERT(chunkidx < XFS_INODES_PER_CHUNK); ASSERT(chunkidx < XFS_INODES_PER_CHUNK);
/* /*
...@@ -565,7 +559,7 @@ xfs_bulkstat( ...@@ -565,7 +559,7 @@ xfs_bulkstat(
*/ */
if ((chunkidx & (nicluster - 1)) == 0) { if ((chunkidx & (nicluster - 1)) == 0) {
agbno = XFS_AGINO_TO_AGBNO(mp, agbno = XFS_AGINO_TO_AGBNO(mp,
be32_to_cpu(irbp->ir_startino)) + irbp->ir_startino) +
((chunkidx & nimask) >> ((chunkidx & nimask) >>
mp->m_sb.sb_inopblog); mp->m_sb.sb_inopblog);
...@@ -605,13 +599,13 @@ xfs_bulkstat( ...@@ -605,13 +599,13 @@ xfs_bulkstat(
/* /*
* Skip if this inode is free. * Skip if this inode is free.
*/ */
if (XFS_INOBT_MASK(chunkidx) & be64_to_cpu(irbp->ir_free)) if (XFS_INOBT_MASK(chunkidx) & irbp->ir_free)
continue; continue;
/* /*
* Count used inodes as free so we can tell * Count used inodes as free so we can tell
* when the chunk is used up. * when the chunk is used up.
*/ */
be32_add(&irbp->ir_freecount, 1); irbp->ir_freecount++;
ino = XFS_AGINO_TO_INO(mp, agno, agino); ino = XFS_AGINO_TO_INO(mp, agno, agino);
bno = XFS_AGB_TO_DADDR(mp, agno, agbno); bno = XFS_AGB_TO_DADDR(mp, agno, agbno);
if (!xfs_bulkstat_use_dinode(mp, flags, bp, if (!xfs_bulkstat_use_dinode(mp, flags, bp,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment