Commit 794ee1ba authored by Jes Sorensen's avatar Jes Sorensen Committed by Ingo Molnar

[PATCH] mutex subsystem, semaphore to mutex: XFS

This patch switches XFS over to use the new mutex code directly as
opposed to the previous workaround patch I posted earlier that avoided
the namespace clash by forcing it back to semaphores. This falls in the
'works for me<tm>' category.
Signed-off-by: default avatarJes Sorensen <jes@trained-monkey.org>
Signed-off-by: default avatarIngo Molnar <mingo@elte.hu>
parent de5097c2
...@@ -19,7 +19,7 @@ ...@@ -19,7 +19,7 @@
#define __XFS_SUPPORT_MUTEX_H__ #define __XFS_SUPPORT_MUTEX_H__
#include <linux/spinlock.h> #include <linux/spinlock.h>
#include <asm/semaphore.h> #include <linux/mutex.h>
/* /*
* Map the mutex'es from IRIX to Linux semaphores. * Map the mutex'es from IRIX to Linux semaphores.
...@@ -28,12 +28,8 @@ ...@@ -28,12 +28,8 @@
* callers. * callers.
*/ */
#define MUTEX_DEFAULT 0x0 #define MUTEX_DEFAULT 0x0
typedef struct semaphore mutex_t;
#define mutex_init(lock, type, name) sema_init(lock, 1) typedef struct mutex mutex_t;
#define mutex_destroy(lock) sema_init(lock, -99) //#define mutex_destroy(lock) do{}while(0)
#define mutex_lock(lock, num) down(lock)
#define mutex_trylock(lock) (down_trylock(lock) ? 0 : 1)
#define mutex_unlock(lock) up(lock)
#endif /* __XFS_SUPPORT_MUTEX_H__ */ #endif /* __XFS_SUPPORT_MUTEX_H__ */
...@@ -104,7 +104,7 @@ xfs_qm_dqinit( ...@@ -104,7 +104,7 @@ xfs_qm_dqinit(
*/ */
if (brandnewdquot) { if (brandnewdquot) {
dqp->dq_flnext = dqp->dq_flprev = dqp; dqp->dq_flnext = dqp->dq_flprev = dqp;
mutex_init(&dqp->q_qlock, MUTEX_DEFAULT, "xdq"); mutex_init(&dqp->q_qlock);
initnsema(&dqp->q_flock, 1, "fdq"); initnsema(&dqp->q_flock, 1, "fdq");
sv_init(&dqp->q_pinwait, SV_DEFAULT, "pdq"); sv_init(&dqp->q_pinwait, SV_DEFAULT, "pdq");
...@@ -1382,7 +1382,7 @@ void ...@@ -1382,7 +1382,7 @@ void
xfs_dqlock( xfs_dqlock(
xfs_dquot_t *dqp) xfs_dquot_t *dqp)
{ {
mutex_lock(&(dqp->q_qlock), PINOD); mutex_lock(&(dqp->q_qlock));
} }
void void
......
...@@ -167,7 +167,7 @@ xfs_Gqm_init(void) ...@@ -167,7 +167,7 @@ xfs_Gqm_init(void)
xqm->qm_dqfree_ratio = XFS_QM_DQFREE_RATIO; xqm->qm_dqfree_ratio = XFS_QM_DQFREE_RATIO;
xqm->qm_nrefs = 0; xqm->qm_nrefs = 0;
#ifdef DEBUG #ifdef DEBUG
mutex_init(&qcheck_lock, MUTEX_DEFAULT, "qchk"); xfs_mutex_init(&qcheck_lock, MUTEX_DEFAULT, "qchk");
#endif #endif
return xqm; return xqm;
} }
...@@ -1166,7 +1166,7 @@ xfs_qm_init_quotainfo( ...@@ -1166,7 +1166,7 @@ xfs_qm_init_quotainfo(
qinf->qi_dqreclaims = 0; qinf->qi_dqreclaims = 0;
/* mutex used to serialize quotaoffs */ /* mutex used to serialize quotaoffs */
mutex_init(&qinf->qi_quotaofflock, MUTEX_DEFAULT, "qoff"); mutex_init(&qinf->qi_quotaofflock);
/* Precalc some constants */ /* Precalc some constants */
qinf->qi_dqchunklen = XFS_FSB_TO_BB(mp, XFS_DQUOT_CLUSTER_SIZE_FSB); qinf->qi_dqchunklen = XFS_FSB_TO_BB(mp, XFS_DQUOT_CLUSTER_SIZE_FSB);
...@@ -1285,7 +1285,7 @@ xfs_qm_list_init( ...@@ -1285,7 +1285,7 @@ xfs_qm_list_init(
char *str, char *str,
int n) int n)
{ {
mutex_init(&list->qh_lock, MUTEX_DEFAULT, str); mutex_init(&list->qh_lock);
list->qh_next = NULL; list->qh_next = NULL;
list->qh_version = 0; list->qh_version = 0;
list->qh_nelems = 0; list->qh_nelems = 0;
...@@ -2762,7 +2762,7 @@ STATIC void ...@@ -2762,7 +2762,7 @@ STATIC void
xfs_qm_freelist_init(xfs_frlist_t *ql) xfs_qm_freelist_init(xfs_frlist_t *ql)
{ {
ql->qh_next = ql->qh_prev = (xfs_dquot_t *) ql; ql->qh_next = ql->qh_prev = (xfs_dquot_t *) ql;
mutex_init(&ql->qh_lock, MUTEX_DEFAULT, "dqf"); mutex_init(&ql->qh_lock);
ql->qh_version = 0; ql->qh_version = 0;
ql->qh_nelems = 0; ql->qh_nelems = 0;
} }
...@@ -2772,7 +2772,7 @@ xfs_qm_freelist_destroy(xfs_frlist_t *ql) ...@@ -2772,7 +2772,7 @@ xfs_qm_freelist_destroy(xfs_frlist_t *ql)
{ {
xfs_dquot_t *dqp, *nextdqp; xfs_dquot_t *dqp, *nextdqp;
mutex_lock(&ql->qh_lock, PINOD); mutex_lock(&ql->qh_lock);
for (dqp = ql->qh_next; for (dqp = ql->qh_next;
dqp != (xfs_dquot_t *)ql; ) { dqp != (xfs_dquot_t *)ql; ) {
xfs_dqlock(dqp); xfs_dqlock(dqp);
......
...@@ -165,7 +165,7 @@ typedef struct xfs_dquot_acct { ...@@ -165,7 +165,7 @@ typedef struct xfs_dquot_acct {
#define XFS_QM_IWARNLIMIT 5 #define XFS_QM_IWARNLIMIT 5
#define XFS_QM_RTBWARNLIMIT 5 #define XFS_QM_RTBWARNLIMIT 5
#define XFS_QM_LOCK(xqm) (mutex_lock(&xqm##_lock, PINOD)) #define XFS_QM_LOCK(xqm) (mutex_lock(&xqm##_lock))
#define XFS_QM_UNLOCK(xqm) (mutex_unlock(&xqm##_lock)) #define XFS_QM_UNLOCK(xqm) (mutex_unlock(&xqm##_lock))
#define XFS_QM_HOLD(xqm) ((xqm)->qm_nrefs++) #define XFS_QM_HOLD(xqm) ((xqm)->qm_nrefs++)
#define XFS_QM_RELE(xqm) ((xqm)->qm_nrefs--) #define XFS_QM_RELE(xqm) ((xqm)->qm_nrefs--)
......
...@@ -363,7 +363,7 @@ xfs_qm_init(void) ...@@ -363,7 +363,7 @@ xfs_qm_init(void)
KERN_INFO "SGI XFS Quota Management subsystem\n"; KERN_INFO "SGI XFS Quota Management subsystem\n";
printk(message); printk(message);
mutex_init(&xfs_Gqm_lock, MUTEX_DEFAULT, "xfs_qmlock"); mutex_init(&xfs_Gqm_lock);
vfs_bhv_set_custom(&xfs_qmops, &xfs_qmcore_xfs); vfs_bhv_set_custom(&xfs_qmops, &xfs_qmcore_xfs);
xfs_qm_init_procfs(); xfs_qm_init_procfs();
} }
......
...@@ -233,7 +233,7 @@ xfs_qm_scall_quotaoff( ...@@ -233,7 +233,7 @@ xfs_qm_scall_quotaoff(
*/ */
ASSERT(mp->m_quotainfo); ASSERT(mp->m_quotainfo);
if (mp->m_quotainfo) if (mp->m_quotainfo)
mutex_lock(&(XFS_QI_QOFFLOCK(mp)), PINOD); mutex_lock(&(XFS_QI_QOFFLOCK(mp)));
ASSERT(mp->m_quotainfo); ASSERT(mp->m_quotainfo);
...@@ -508,7 +508,7 @@ xfs_qm_scall_quotaon( ...@@ -508,7 +508,7 @@ xfs_qm_scall_quotaon(
/* /*
* Switch on quota enforcement in core. * Switch on quota enforcement in core.
*/ */
mutex_lock(&(XFS_QI_QOFFLOCK(mp)), PINOD); mutex_lock(&(XFS_QI_QOFFLOCK(mp)));
mp->m_qflags |= (flags & XFS_ALL_QUOTA_ENFD); mp->m_qflags |= (flags & XFS_ALL_QUOTA_ENFD);
mutex_unlock(&(XFS_QI_QOFFLOCK(mp))); mutex_unlock(&(XFS_QI_QOFFLOCK(mp)));
...@@ -617,7 +617,7 @@ xfs_qm_scall_setqlim( ...@@ -617,7 +617,7 @@ xfs_qm_scall_setqlim(
* a quotaoff from happening). (XXXThis doesn't currently happen * a quotaoff from happening). (XXXThis doesn't currently happen
* because we take the vfslock before calling xfs_qm_sysent). * because we take the vfslock before calling xfs_qm_sysent).
*/ */
mutex_lock(&(XFS_QI_QOFFLOCK(mp)), PINOD); mutex_lock(&(XFS_QI_QOFFLOCK(mp)));
/* /*
* Get the dquot (locked), and join it to the transaction. * Get the dquot (locked), and join it to the transaction.
...@@ -1426,7 +1426,7 @@ xfs_qm_internalqcheck( ...@@ -1426,7 +1426,7 @@ xfs_qm_internalqcheck(
xfs_log_force(mp, (xfs_lsn_t)0, XFS_LOG_FORCE | XFS_LOG_SYNC); xfs_log_force(mp, (xfs_lsn_t)0, XFS_LOG_FORCE | XFS_LOG_SYNC);
XFS_bflush(mp->m_ddev_targp); XFS_bflush(mp->m_ddev_targp);
mutex_lock(&qcheck_lock, PINOD); mutex_lock(&qcheck_lock);
/* There should be absolutely no quota activity while this /* There should be absolutely no quota activity while this
is going on. */ is going on. */
qmtest_udqtab = kmem_zalloc(qmtest_hashmask * qmtest_udqtab = kmem_zalloc(qmtest_hashmask *
......
...@@ -51,7 +51,7 @@ ...@@ -51,7 +51,7 @@
#define XFS_QI_MPLNEXT(mp) ((mp)->m_quotainfo->qi_dqlist.qh_next) #define XFS_QI_MPLNEXT(mp) ((mp)->m_quotainfo->qi_dqlist.qh_next)
#define XFS_QI_MPLNDQUOTS(mp) ((mp)->m_quotainfo->qi_dqlist.qh_nelems) #define XFS_QI_MPLNDQUOTS(mp) ((mp)->m_quotainfo->qi_dqlist.qh_nelems)
#define XQMLCK(h) (mutex_lock(&((h)->qh_lock), PINOD)) #define XQMLCK(h) (mutex_lock(&((h)->qh_lock)))
#define XQMUNLCK(h) (mutex_unlock(&((h)->qh_lock))) #define XQMUNLCK(h) (mutex_unlock(&((h)->qh_lock)))
#ifdef DEBUG #ifdef DEBUG
struct xfs_dqhash; struct xfs_dqhash;
......
...@@ -24,7 +24,7 @@ static uuid_t *uuid_table; ...@@ -24,7 +24,7 @@ static uuid_t *uuid_table;
void void
uuid_init(void) uuid_init(void)
{ {
mutex_init(&uuid_monitor, MUTEX_DEFAULT, "uuid_monitor"); mutex_init(&uuid_monitor);
} }
/* /*
...@@ -94,7 +94,7 @@ uuid_table_insert(uuid_t *uuid) ...@@ -94,7 +94,7 @@ uuid_table_insert(uuid_t *uuid)
{ {
int i, hole; int i, hole;
mutex_lock(&uuid_monitor, PVFS); mutex_lock(&uuid_monitor);
for (i = 0, hole = -1; i < uuid_table_size; i++) { for (i = 0, hole = -1; i < uuid_table_size; i++) {
if (uuid_is_nil(&uuid_table[i])) { if (uuid_is_nil(&uuid_table[i])) {
hole = i; hole = i;
...@@ -122,7 +122,7 @@ uuid_table_remove(uuid_t *uuid) ...@@ -122,7 +122,7 @@ uuid_table_remove(uuid_t *uuid)
{ {
int i; int i;
mutex_lock(&uuid_monitor, PVFS); mutex_lock(&uuid_monitor);
for (i = 0; i < uuid_table_size; i++) { for (i = 0; i < uuid_table_size; i++) {
if (uuid_is_nil(&uuid_table[i])) if (uuid_is_nil(&uuid_table[i]))
continue; continue;
......
...@@ -117,7 +117,7 @@ xfs_mount_init(void) ...@@ -117,7 +117,7 @@ xfs_mount_init(void)
AIL_LOCKINIT(&mp->m_ail_lock, "xfs_ail"); AIL_LOCKINIT(&mp->m_ail_lock, "xfs_ail");
spinlock_init(&mp->m_sb_lock, "xfs_sb"); spinlock_init(&mp->m_sb_lock, "xfs_sb");
mutex_init(&mp->m_ilock, MUTEX_DEFAULT, "xfs_ilock"); mutex_init(&mp->m_ilock);
initnsema(&mp->m_growlock, 1, "xfs_grow"); initnsema(&mp->m_growlock, 1, "xfs_grow");
/* /*
* Initialize the AIL. * Initialize the AIL.
......
...@@ -533,7 +533,7 @@ typedef struct xfs_mod_sb { ...@@ -533,7 +533,7 @@ typedef struct xfs_mod_sb {
int msb_delta; /* Change to make to specified field */ int msb_delta; /* Change to make to specified field */
} xfs_mod_sb_t; } xfs_mod_sb_t;
#define XFS_MOUNT_ILOCK(mp) mutex_lock(&((mp)->m_ilock), PINOD) #define XFS_MOUNT_ILOCK(mp) mutex_lock(&((mp)->m_ilock))
#define XFS_MOUNT_IUNLOCK(mp) mutex_unlock(&((mp)->m_ilock)) #define XFS_MOUNT_IUNLOCK(mp) mutex_unlock(&((mp)->m_ilock))
#define XFS_SB_LOCK(mp) mutex_spinlock(&(mp)->m_sb_lock) #define XFS_SB_LOCK(mp) mutex_spinlock(&(mp)->m_sb_lock)
#define XFS_SB_UNLOCK(mp,s) mutex_spinunlock(&(mp)->m_sb_lock,(s)) #define XFS_SB_UNLOCK(mp,s) mutex_spinunlock(&(mp)->m_sb_lock,(s))
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment