Commit 6441e549 authored by David Chinner's avatar David Chinner Committed by Lachlan McIlroy

[XFS] factor xfs_iget_core() into hit and miss cases

There are really two cases in xfs_iget_core(). The first is the cache hit
case, the second is the miss case. They share very little code, and hence
can easily be factored out into separate functions. This makes the code
much easier to understand and subsequently modify.

SGI-PV: 988141

SGI-Modid: xfs-linux-melb:xfs-kern:32317a
Signed-off-by: default avatarDavid Chinner <david@fromorbit.com>
Signed-off-by: default avatarLachlan McIlroy <lachlan@sgi.com>
Signed-off-by: default avatarChristoph Hellwig <hch@infradead.org>
parent 3471394b
...@@ -40,161 +40,119 @@ ...@@ -40,161 +40,119 @@
#include "xfs_utils.h" #include "xfs_utils.h"
/* /*
* Look up an inode by number in the given file system. * Check the validity of the inode we just found it the cache
* The inode is looked up in the cache held in each AG.
* If the inode is found in the cache, attach it to the provided
* vnode.
*
* If it is not in core, read it in from the file system's device,
* add it to the cache and attach the provided vnode.
*
* The inode is locked according to the value of the lock_flags parameter.
* This flag parameter indicates how and if the inode's IO lock and inode lock
* should be taken.
*
* mp -- the mount point structure for the current file system. It points
* to the inode hash table.
* tp -- a pointer to the current transaction if there is one. This is
* simply passed through to the xfs_iread() call.
* ino -- the number of the inode desired. This is the unique identifier
* within the file system for the inode being requested.
* lock_flags -- flags indicating how to lock the inode. See the comment
* for xfs_ilock() for a list of valid values.
* bno -- the block number starting the buffer containing the inode,
* if known (as by bulkstat), else 0.
*/ */
STATIC int static int
xfs_iget_core( xfs_iget_cache_hit(
struct inode *inode, struct inode *inode,
xfs_mount_t *mp, struct xfs_perag *pag,
xfs_trans_t *tp, struct xfs_inode *ip,
xfs_ino_t ino, int flags,
uint flags, int lock_flags) __releases(pag->pag_ici_lock)
uint lock_flags,
xfs_inode_t **ipp,
xfs_daddr_t bno)
{ {
struct inode *old_inode; struct xfs_mount *mp = ip->i_mount;
xfs_inode_t *ip; struct inode *old_inode;
int error; int error = 0;
unsigned long first_index, mask;
xfs_perag_t *pag;
xfs_agino_t agino;
/* the radix tree exists only in inode capable AGs */ /*
if (XFS_INO_TO_AGNO(mp, ino) >= mp->m_maxagi) * If INEW is set this inode is being set up
return EINVAL; * Pause and try again.
*/
/* get the perag structure and ensure that it's inode capable */ if (xfs_iflags_test(ip, XFS_INEW)) {
pag = xfs_get_perag(mp, ino); error = EAGAIN;
if (!pag->pagi_inodeok) XFS_STATS_INC(xs_ig_frecycle);
return EINVAL; goto out_error;
ASSERT(pag->pag_ici_init); }
agino = XFS_INO_TO_AGINO(mp, ino);
again:
read_lock(&pag->pag_ici_lock);
ip = radix_tree_lookup(&pag->pag_ici_root, agino);
if (ip != NULL) { old_inode = ip->i_vnode;
if (old_inode == NULL) {
/* /*
* If INEW is set this inode is being set up * If IRECLAIM is set this inode is
* on its way out of the system,
* we need to pause and try again. * we need to pause and try again.
*/ */
if (xfs_iflags_test(ip, XFS_INEW)) { if (xfs_iflags_test(ip, XFS_IRECLAIM)) {
read_unlock(&pag->pag_ici_lock); error = EAGAIN;
delay(1);
XFS_STATS_INC(xs_ig_frecycle); XFS_STATS_INC(xs_ig_frecycle);
goto out_error;
}
ASSERT(xfs_iflags_test(ip, XFS_IRECLAIMABLE));
goto again; /*
* If lookup is racing with unlink, then we
* should return an error immediately so we
* don't remove it from the reclaim list and
* potentially leak the inode.
*/
if ((ip->i_d.di_mode == 0) &&
!(flags & XFS_IGET_CREATE)) {
error = ENOENT;
goto out_error;
} }
xfs_itrace_exit_tag(ip, "xfs_iget.alloc");
old_inode = ip->i_vnode; xfs_iflags_clear(ip, XFS_IRECLAIMABLE);
if (old_inode == NULL) { read_unlock(&pag->pag_ici_lock);
/*
* If IRECLAIM is set this inode is XFS_MOUNT_ILOCK(mp);
* on its way out of the system, list_del_init(&ip->i_reclaim);
* we need to pause and try again. XFS_MOUNT_IUNLOCK(mp);
*/
if (xfs_iflags_test(ip, XFS_IRECLAIM)) { } else if (inode != old_inode) {
read_unlock(&pag->pag_ici_lock); /* The inode is being torn down, pause and
delay(1); * try again.
XFS_STATS_INC(xs_ig_frecycle); */
if (old_inode->i_state & (I_FREEING | I_CLEAR)) {
goto again; error = EAGAIN;
} XFS_STATS_INC(xs_ig_frecycle);
ASSERT(xfs_iflags_test(ip, XFS_IRECLAIMABLE)); goto out_error;
}
/*
* If lookup is racing with unlink, then we
* should return an error immediately so we
* don't remove it from the reclaim list and
* potentially leak the inode.
*/
if ((ip->i_d.di_mode == 0) &&
!(flags & XFS_IGET_CREATE)) {
read_unlock(&pag->pag_ici_lock);
xfs_put_perag(mp, pag);
return ENOENT;
}
xfs_itrace_exit_tag(ip, "xfs_iget.alloc");
XFS_STATS_INC(xs_ig_found);
xfs_iflags_clear(ip, XFS_IRECLAIMABLE);
read_unlock(&pag->pag_ici_lock);
XFS_MOUNT_ILOCK(mp);
list_del_init(&ip->i_reclaim);
XFS_MOUNT_IUNLOCK(mp);
goto finish_inode;
} else if (inode != old_inode) {
/* The inode is being torn down, pause and
* try again.
*/
if (old_inode->i_state & (I_FREEING | I_CLEAR)) {
read_unlock(&pag->pag_ici_lock);
delay(1);
XFS_STATS_INC(xs_ig_frecycle);
goto again;
}
/* Chances are the other vnode (the one in the inode) is being torn /* Chances are the other vnode (the one in the inode) is being torn
* down right now, and we landed on top of it. Question is, what do * down right now, and we landed on top of it. Question is, what do
* we do? Unhook the old inode and hook up the new one? * we do? Unhook the old inode and hook up the new one?
*/ */
cmn_err(CE_PANIC, cmn_err(CE_PANIC,
"xfs_iget_core: ambiguous vns: vp/0x%p, invp/0x%p", "xfs_iget_core: ambiguous vns: vp/0x%p, invp/0x%p",
old_inode, inode); old_inode, inode);
} } else {
/*
* Inode cache hit
*/
read_unlock(&pag->pag_ici_lock); read_unlock(&pag->pag_ici_lock);
XFS_STATS_INC(xs_ig_found); }
finish_inode: if (ip->i_d.di_mode == 0 && !(flags & XFS_IGET_CREATE)) {
if (ip->i_d.di_mode == 0 && !(flags & XFS_IGET_CREATE)) { error = ENOENT;
xfs_put_perag(mp, pag); goto out;
return ENOENT; }
}
if (lock_flags != 0) if (lock_flags != 0)
xfs_ilock(ip, lock_flags); xfs_ilock(ip, lock_flags);
xfs_iflags_clear(ip, XFS_ISTALE); xfs_iflags_clear(ip, XFS_ISTALE);
xfs_itrace_exit_tag(ip, "xfs_iget.found"); xfs_itrace_exit_tag(ip, "xfs_iget.found");
goto return_ip; XFS_STATS_INC(xs_ig_found);
} return 0;
/* out_error:
* Inode cache miss
*/
read_unlock(&pag->pag_ici_lock); read_unlock(&pag->pag_ici_lock);
XFS_STATS_INC(xs_ig_missed); out:
return error;
}
static int
xfs_iget_cache_miss(
struct xfs_mount *mp,
struct xfs_perag *pag,
xfs_trans_t *tp,
xfs_ino_t ino,
struct xfs_inode **ipp,
xfs_daddr_t bno,
int flags,
int lock_flags) __releases(pag->pag_ici_lock)
{
struct xfs_inode *ip;
int error;
unsigned long first_index, mask;
xfs_agino_t agino = XFS_INO_TO_AGINO(mp, ino);
/* /*
* Read the disk inode attributes into a new inode structure and get * Read the disk inode attributes into a new inode structure and get
...@@ -202,17 +160,14 @@ finish_inode: ...@@ -202,17 +160,14 @@ finish_inode:
*/ */
error = xfs_iread(mp, tp, ino, &ip, bno, error = xfs_iread(mp, tp, ino, &ip, bno,
(flags & XFS_IGET_BULKSTAT) ? XFS_IMAP_BULKSTAT : 0); (flags & XFS_IGET_BULKSTAT) ? XFS_IMAP_BULKSTAT : 0);
if (error) { if (error)
xfs_put_perag(mp, pag);
return error; return error;
}
xfs_itrace_exit_tag(ip, "xfs_iget.alloc"); xfs_itrace_exit_tag(ip, "xfs_iget.alloc");
if ((ip->i_d.di_mode == 0) && !(flags & XFS_IGET_CREATE)) { if ((ip->i_d.di_mode == 0) && !(flags & XFS_IGET_CREATE)) {
xfs_idestroy(ip); error = ENOENT;
xfs_put_perag(mp, pag); goto out_destroy;
return ENOENT;
} }
/* /*
...@@ -220,9 +175,8 @@ finish_inode: ...@@ -220,9 +175,8 @@ finish_inode:
* write spinlock. * write spinlock.
*/ */
if (radix_tree_preload(GFP_KERNEL)) { if (radix_tree_preload(GFP_KERNEL)) {
xfs_idestroy(ip); error = EAGAIN;
delay(1); goto out_destroy;
goto again;
} }
if (lock_flags) if (lock_flags)
...@@ -231,32 +185,104 @@ finish_inode: ...@@ -231,32 +185,104 @@ finish_inode:
mask = ~(((XFS_INODE_CLUSTER_SIZE(mp) >> mp->m_sb.sb_inodelog)) - 1); mask = ~(((XFS_INODE_CLUSTER_SIZE(mp) >> mp->m_sb.sb_inodelog)) - 1);
first_index = agino & mask; first_index = agino & mask;
write_lock(&pag->pag_ici_lock); write_lock(&pag->pag_ici_lock);
/*
* insert the new inode /* insert the new inode */
*/
error = radix_tree_insert(&pag->pag_ici_root, agino, ip); error = radix_tree_insert(&pag->pag_ici_root, agino, ip);
if (unlikely(error)) { if (unlikely(error)) {
BUG_ON(error != -EEXIST); WARN_ON(error != -EEXIST);
write_unlock(&pag->pag_ici_lock);
radix_tree_preload_end();
if (lock_flags)
xfs_iunlock(ip, lock_flags);
xfs_idestroy(ip);
XFS_STATS_INC(xs_ig_dup); XFS_STATS_INC(xs_ig_dup);
goto again; error = EAGAIN;
goto out_unlock;
} }
/* /* These values _must_ be set before releasing the radix tree lock! */
* These values _must_ be set before releasing the radix tree lock!
*/
ip->i_udquot = ip->i_gdquot = NULL; ip->i_udquot = ip->i_gdquot = NULL;
xfs_iflags_set(ip, XFS_INEW); xfs_iflags_set(ip, XFS_INEW);
write_unlock(&pag->pag_ici_lock); write_unlock(&pag->pag_ici_lock);
radix_tree_preload_end(); radix_tree_preload_end();
*ipp = ip;
return 0;
out_unlock:
write_unlock(&pag->pag_ici_lock);
radix_tree_preload_end();
out_destroy:
xfs_idestroy(ip);
return error;
}
/*
* Look up an inode by number in the given file system.
* The inode is looked up in the cache held in each AG.
* If the inode is found in the cache, attach it to the provided
* vnode.
*
* If it is not in core, read it in from the file system's device,
* add it to the cache and attach the provided vnode.
*
* The inode is locked according to the value of the lock_flags parameter.
* This flag parameter indicates how and if the inode's IO lock and inode lock
* should be taken.
*
* mp -- the mount point structure for the current file system. It points
* to the inode hash table.
* tp -- a pointer to the current transaction if there is one. This is
* simply passed through to the xfs_iread() call.
* ino -- the number of the inode desired. This is the unique identifier
* within the file system for the inode being requested.
* lock_flags -- flags indicating how to lock the inode. See the comment
* for xfs_ilock() for a list of valid values.
* bno -- the block number starting the buffer containing the inode,
* if known (as by bulkstat), else 0.
*/
STATIC int
xfs_iget_core(
struct inode *inode,
xfs_mount_t *mp,
xfs_trans_t *tp,
xfs_ino_t ino,
uint flags,
uint lock_flags,
xfs_inode_t **ipp,
xfs_daddr_t bno)
{
xfs_inode_t *ip;
int error;
xfs_perag_t *pag;
xfs_agino_t agino;
/* the radix tree exists only in inode capable AGs */
if (XFS_INO_TO_AGNO(mp, ino) >= mp->m_maxagi)
return EINVAL;
/* get the perag structure and ensure that it's inode capable */
pag = xfs_get_perag(mp, ino);
if (!pag->pagi_inodeok)
return EINVAL;
ASSERT(pag->pag_ici_init);
agino = XFS_INO_TO_AGINO(mp, ino);
again:
error = 0;
read_lock(&pag->pag_ici_lock);
ip = radix_tree_lookup(&pag->pag_ici_root, agino);
if (ip) {
error = xfs_iget_cache_hit(inode, pag, ip, flags, lock_flags);
if (error)
goto out_error_or_again;
} else {
read_unlock(&pag->pag_ici_lock);
XFS_STATS_INC(xs_ig_missed);
error = xfs_iget_cache_miss(mp, pag, tp, ino, &ip, bno,
flags, lock_flags);
if (error)
goto out_error_or_again;
}
xfs_put_perag(mp, pag); xfs_put_perag(mp, pag);
return_ip:
ASSERT(ip->i_df.if_ext_max == ASSERT(ip->i_df.if_ext_max ==
XFS_IFORK_DSIZE(ip) / sizeof(xfs_bmbt_rec_t)); XFS_IFORK_DSIZE(ip) / sizeof(xfs_bmbt_rec_t));
...@@ -276,6 +302,14 @@ finish_inode: ...@@ -276,6 +302,14 @@ finish_inode:
if (ip->i_d.di_mode != 0) if (ip->i_d.di_mode != 0)
xfs_setup_inode(ip); xfs_setup_inode(ip);
return 0; return 0;
out_error_or_again:
if (error == EAGAIN) {
delay(1);
goto again;
}
xfs_put_perag(mp, pag);
return error;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment