Commit fd8fbfc1 authored by Dmitry Monakhov's avatar Dmitry Monakhov Committed by Jan Kara

quota: decouple fs reserved space from quota reservation

Currently inode_reservation is managed by fs itself and this
reservation is transfered on dquot_transfer(). This means what
inode_reservation must always be in sync with
dquot->dq_dqb.dqb_rsvspace. Otherwise dquot_transfer() will result
in incorrect quota(WARN_ON in dquot_claim_reserved_space() will be
triggered)
This is not easy because of complex locking order issues
for example http://bugzilla.kernel.org/show_bug.cgi?id=14739

The patch introduce quota reservation field for each fs-inode
(fs specific inode is used in order to prevent bloating generic
vfs inode). This reservation is managed by quota code internally
similar to i_blocks/i_bytes and may not be always in sync with
internal fs reservation.

Also perform some code rearrangement:
- Unify dquot_reserve_space() and dquot_reserve_space()
- Unify dquot_release_reserved_space() and dquot_free_space()
- Also this patch add missing warning update to release_rsv()
  dquot_release_reserved_space() must call flush_warnings() as
  dquot_free_space() does.
Signed-off-by: default avatarDmitry Monakhov <dmonakhov@openvz.org>
Signed-off-by: default avatarJan Kara <jack@suse.cz>
parent b462707e
......@@ -1318,6 +1318,67 @@ void vfs_dq_drop(struct inode *inode)
}
EXPORT_SYMBOL(vfs_dq_drop);
/*
* inode_reserved_space is managed internally by quota, and protected by
* i_lock similar to i_blocks+i_bytes.
*/
static qsize_t *inode_reserved_space(struct inode * inode)
{
/* Filesystem must explicitly define it's own method in order to use
* quota reservation interface */
BUG_ON(!inode->i_sb->dq_op->get_reserved_space);
return inode->i_sb->dq_op->get_reserved_space(inode);
}
static void inode_add_rsv_space(struct inode *inode, qsize_t number)
{
spin_lock(&inode->i_lock);
*inode_reserved_space(inode) += number;
spin_unlock(&inode->i_lock);
}
static void inode_claim_rsv_space(struct inode *inode, qsize_t number)
{
spin_lock(&inode->i_lock);
*inode_reserved_space(inode) -= number;
__inode_add_bytes(inode, number);
spin_unlock(&inode->i_lock);
}
static void inode_sub_rsv_space(struct inode *inode, qsize_t number)
{
spin_lock(&inode->i_lock);
*inode_reserved_space(inode) -= number;
spin_unlock(&inode->i_lock);
}
static qsize_t inode_get_rsv_space(struct inode *inode)
{
qsize_t ret;
spin_lock(&inode->i_lock);
ret = *inode_reserved_space(inode);
spin_unlock(&inode->i_lock);
return ret;
}
static void inode_incr_space(struct inode *inode, qsize_t number,
int reserve)
{
if (reserve)
inode_add_rsv_space(inode, number);
else
inode_add_bytes(inode, number);
}
static void inode_decr_space(struct inode *inode, qsize_t number, int reserve)
{
if (reserve)
inode_sub_rsv_space(inode, number);
else
inode_sub_bytes(inode, number);
}
/*
* Following four functions update i_blocks+i_bytes fields and
* quota information (together with appropriate checks)
......@@ -1336,6 +1397,21 @@ int __dquot_alloc_space(struct inode *inode, qsize_t number,
int cnt, ret = QUOTA_OK;
char warntype[MAXQUOTAS];
/*
* First test before acquiring mutex - solves deadlocks when we
* re-enter the quota code and are already holding the mutex
*/
if (IS_NOQUOTA(inode)) {
inode_incr_space(inode, number, reserve);
goto out;
}
down_read(&sb_dqopt(inode->i_sb)->dqptr_sem);
if (IS_NOQUOTA(inode)) {
inode_incr_space(inode, number, reserve);
goto out_unlock;
}
for (cnt = 0; cnt < MAXQUOTAS; cnt++)
warntype[cnt] = QUOTA_NL_NOWARN;
......@@ -1346,7 +1422,8 @@ int __dquot_alloc_space(struct inode *inode, qsize_t number,
if (check_bdq(inode->i_dquot[cnt], number, warn, warntype+cnt)
== NO_QUOTA) {
ret = NO_QUOTA;
goto out_unlock;
spin_unlock(&dq_data_lock);
goto out_flush_warn;
}
}
for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
......@@ -1357,64 +1434,32 @@ int __dquot_alloc_space(struct inode *inode, qsize_t number,
else
dquot_incr_space(inode->i_dquot[cnt], number);
}
if (!reserve)
inode_add_bytes(inode, number);
out_unlock:
inode_incr_space(inode, number, reserve);
spin_unlock(&dq_data_lock);
flush_warnings(inode->i_dquot, warntype);
return ret;
}
int dquot_alloc_space(struct inode *inode, qsize_t number, int warn)
{
int cnt, ret = QUOTA_OK;
/*
* First test before acquiring mutex - solves deadlocks when we
* re-enter the quota code and are already holding the mutex
*/
if (IS_NOQUOTA(inode)) {
inode_add_bytes(inode, number);
goto out;
}
down_read(&sb_dqopt(inode->i_sb)->dqptr_sem);
if (IS_NOQUOTA(inode)) {
inode_add_bytes(inode, number);
goto out_unlock;
}
ret = __dquot_alloc_space(inode, number, warn, 0);
if (ret == NO_QUOTA)
goto out_unlock;
if (reserve)
goto out_flush_warn;
/* Dirtify all the dquots - this can block when journalling */
for (cnt = 0; cnt < MAXQUOTAS; cnt++)
if (inode->i_dquot[cnt])
mark_dquot_dirty(inode->i_dquot[cnt]);
out_flush_warn:
flush_warnings(inode->i_dquot, warntype);
out_unlock:
up_read(&sb_dqopt(inode->i_sb)->dqptr_sem);
out:
return ret;
}
int dquot_alloc_space(struct inode *inode, qsize_t number, int warn)
{
return __dquot_alloc_space(inode, number, warn, 0);
}
EXPORT_SYMBOL(dquot_alloc_space);
int dquot_reserve_space(struct inode *inode, qsize_t number, int warn)
{
int ret = QUOTA_OK;
if (IS_NOQUOTA(inode))
goto out;
down_read(&sb_dqopt(inode->i_sb)->dqptr_sem);
if (IS_NOQUOTA(inode))
goto out_unlock;
ret = __dquot_alloc_space(inode, number, warn, 1);
out_unlock:
up_read(&sb_dqopt(inode->i_sb)->dqptr_sem);
out:
return ret;
return __dquot_alloc_space(inode, number, warn, 1);
}
EXPORT_SYMBOL(dquot_reserve_space);
......@@ -1471,14 +1516,14 @@ int dquot_claim_space(struct inode *inode, qsize_t number)
int ret = QUOTA_OK;
if (IS_NOQUOTA(inode)) {
inode_add_bytes(inode, number);
inode_claim_rsv_space(inode, number);
goto out;
}
down_read(&sb_dqopt(inode->i_sb)->dqptr_sem);
if (IS_NOQUOTA(inode)) {
up_read(&sb_dqopt(inode->i_sb)->dqptr_sem);
inode_add_bytes(inode, number);
inode_claim_rsv_space(inode, number);
goto out;
}
......@@ -1490,7 +1535,7 @@ int dquot_claim_space(struct inode *inode, qsize_t number)
number);
}
/* Update inode bytes */
inode_add_bytes(inode, number);
inode_claim_rsv_space(inode, number);
spin_unlock(&dq_data_lock);
/* Dirtify all the dquots - this can block when journalling */
for (cnt = 0; cnt < MAXQUOTAS; cnt++)
......@@ -1502,39 +1547,10 @@ out:
}
EXPORT_SYMBOL(dquot_claim_space);
/*
* Release reserved quota space
*/
void dquot_release_reserved_space(struct inode *inode, qsize_t number)
{
int cnt;
if (IS_NOQUOTA(inode))
goto out;
down_read(&sb_dqopt(inode->i_sb)->dqptr_sem);
if (IS_NOQUOTA(inode))
goto out_unlock;
spin_lock(&dq_data_lock);
/* Release reserved dquots */
for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
if (inode->i_dquot[cnt])
dquot_free_reserved_space(inode->i_dquot[cnt], number);
}
spin_unlock(&dq_data_lock);
out_unlock:
up_read(&sb_dqopt(inode->i_sb)->dqptr_sem);
out:
return;
}
EXPORT_SYMBOL(dquot_release_reserved_space);
/*
* This operation can block, but only after everything is updated
*/
int dquot_free_space(struct inode *inode, qsize_t number)
int __dquot_free_space(struct inode *inode, qsize_t number, int reserve)
{
unsigned int cnt;
char warntype[MAXQUOTAS];
......@@ -1543,7 +1559,7 @@ int dquot_free_space(struct inode *inode, qsize_t number)
* re-enter the quota code and are already holding the mutex */
if (IS_NOQUOTA(inode)) {
out_sub:
inode_sub_bytes(inode, number);
inode_decr_space(inode, number, reserve);
return QUOTA_OK;
}
......@@ -1558,20 +1574,42 @@ out_sub:
if (!inode->i_dquot[cnt])
continue;
warntype[cnt] = info_bdq_free(inode->i_dquot[cnt], number);
dquot_decr_space(inode->i_dquot[cnt], number);
if (reserve)
dquot_free_reserved_space(inode->i_dquot[cnt], number);
else
dquot_decr_space(inode->i_dquot[cnt], number);
}
inode_sub_bytes(inode, number);
inode_decr_space(inode, number, reserve);
spin_unlock(&dq_data_lock);
if (reserve)
goto out_unlock;
/* Dirtify all the dquots - this can block when journalling */
for (cnt = 0; cnt < MAXQUOTAS; cnt++)
if (inode->i_dquot[cnt])
mark_dquot_dirty(inode->i_dquot[cnt]);
out_unlock:
flush_warnings(inode->i_dquot, warntype);
up_read(&sb_dqopt(inode->i_sb)->dqptr_sem);
return QUOTA_OK;
}
int dquot_free_space(struct inode *inode, qsize_t number)
{
return __dquot_free_space(inode, number, 0);
}
EXPORT_SYMBOL(dquot_free_space);
/*
* Release reserved quota space
*/
void dquot_release_reserved_space(struct inode *inode, qsize_t number)
{
__dquot_free_space(inode, number, 1);
}
EXPORT_SYMBOL(dquot_release_reserved_space);
/*
* This operation can block, but only after everything is updated
*/
......@@ -1609,19 +1647,6 @@ int dquot_free_inode(const struct inode *inode, qsize_t number)
}
EXPORT_SYMBOL(dquot_free_inode);
/*
* call back function, get reserved quota space from underlying fs
*/
qsize_t dquot_get_reserved_space(struct inode *inode)
{
qsize_t reserved_space = 0;
if (sb_any_quota_active(inode->i_sb) &&
inode->i_sb->dq_op->get_reserved_space)
reserved_space = inode->i_sb->dq_op->get_reserved_space(inode);
return reserved_space;
}
/*
* Transfer the number of inode and blocks from one diskquota to an other.
*
......@@ -1665,7 +1690,7 @@ int dquot_transfer(struct inode *inode, struct iattr *iattr)
}
spin_lock(&dq_data_lock);
cur_space = inode_get_bytes(inode);
rsv_space = dquot_get_reserved_space(inode);
rsv_space = inode_get_rsv_space(inode);
space = cur_space + rsv_space;
/* Build the transfer_from list and check the limits */
for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
......
......@@ -315,8 +315,9 @@ struct dquot_operations {
int (*claim_space) (struct inode *, qsize_t);
/* release rsved quota for delayed alloc */
void (*release_rsv) (struct inode *, qsize_t);
/* get reserved quota for delayed alloc */
qsize_t (*get_reserved_space) (struct inode *);
/* get reserved quota for delayed alloc, value returned is managed by
* quota code only */
qsize_t *(*get_reserved_space) (struct inode *);
};
/* Operations handling requests from userspace */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment