Commit 1c1afa3c authored by Linus Torvalds's avatar Linus Torvalds

Merge master.kernel.org:/pub/scm/linux/kernel/git/steve/gfs2-2.6-nmw

* master.kernel.org:/pub/scm/linux/kernel/git/steve/gfs2-2.6-nmw: (73 commits)
  [DLM] Clean up lowcomms
  [GFS2] Change gfs2_fsync() to use write_inode_now()
  [GFS2] Fix indent in recovery.c
  [GFS2] Don't flush everything on fdatasync
  [GFS2] Add a comment about reading the super block
  [GFS2] Mount problem with the GFS2 code
  [GFS2] Remove gfs2_check_acl()
  [DLM] fix format warnings in rcom.c and recoverd.c
  [GFS2] lock function parameter
  [DLM] don't accept replies to old recovery messages
  [DLM] fix size of STATUS_REPLY message
  [GFS2] fs/gfs2/log.c:log_bmap() fix printk format warning
  [DLM] fix add_requestqueue checking nodes list
  [GFS2] Fix recursive locking in gfs2_getattr
  [GFS2] Fix recursive locking in gfs2_permission
  [GFS2] Reduce number of arguments to meta_io.c:getbuf()
  [GFS2] Move gfs2_meta_syncfs() into log.c
  [GFS2] Fix journal flush problem
  [GFS2] mark_inode_dirty after write to stuffed file
  [GFS2] Fix glock ordering on inode creation
  ...
parents 0a01707b ac33d071
menu "Distributed Lock Manager"
depends on INET && IP_SCTP && EXPERIMENTAL
depends on EXPERIMENTAL && INET
config DLM
tristate "Distributed Lock Manager (DLM)"
depends on IPV6 || IPV6=n
select CONFIGFS_FS
select IP_SCTP if DLM_SCTP
help
A general purpose distributed lock manager for kernel or userspace
applications.
choice
prompt "Select DLM communications protocol"
depends on DLM
default DLM_TCP
help
The DLM Can use TCP or SCTP for it's network communications.
SCTP supports multi-homed operations whereas TCP doesn't.
However, SCTP seems to have stability problems at the moment.
config DLM_TCP
bool "TCP/IP"
config DLM_SCTP
bool "SCTP"
endchoice
config DLM_DEBUG
bool "DLM debugging"
depends on DLM
......
......@@ -4,7 +4,6 @@ dlm-y := ast.o \
dir.o \
lock.o \
lockspace.o \
lowcomms.o \
main.o \
member.o \
memory.o \
......@@ -17,3 +16,6 @@ dlm-y := ast.o \
util.o
dlm-$(CONFIG_DLM_DEBUG) += debug_fs.o
dlm-$(CONFIG_DLM_TCP) += lowcomms-tcp.o
dlm-$(CONFIG_DLM_SCTP) += lowcomms-sctp.o
\ No newline at end of file
......@@ -471,6 +471,7 @@ struct dlm_ls {
char *ls_recover_buf;
int ls_recover_nodeid; /* for debugging */
uint64_t ls_rcom_seq;
spinlock_t ls_rcom_spin;
struct list_head ls_recover_list;
spinlock_t ls_recover_list_lock;
int ls_recover_list_count;
......@@ -488,7 +489,8 @@ struct dlm_ls {
#define LSFL_RUNNING 1
#define LSFL_RECOVERY_STOP 2
#define LSFL_RCOM_READY 3
#define LSFL_UEVENT_WAIT 4
#define LSFL_RCOM_WAIT 4
#define LSFL_UEVENT_WAIT 5
/* much of this is just saving user space pointers associated with the
lock that we pass back to the user lib with an ast */
......
......@@ -2372,6 +2372,7 @@ static int send_lookup_reply(struct dlm_ls *ls, struct dlm_message *ms_in,
static void receive_flags(struct dlm_lkb *lkb, struct dlm_message *ms)
{
lkb->lkb_exflags = ms->m_exflags;
lkb->lkb_sbflags = ms->m_sbflags;
lkb->lkb_flags = (lkb->lkb_flags & 0xFFFF0000) |
(ms->m_flags & 0x0000FFFF);
}
......@@ -3028,10 +3029,17 @@ int dlm_receive_message(struct dlm_header *hd, int nodeid, int recovery)
while (1) {
if (dlm_locking_stopped(ls)) {
if (!recovery)
dlm_add_requestqueue(ls, nodeid, hd);
error = -EINTR;
goto out;
if (recovery) {
error = -EINTR;
goto out;
}
error = dlm_add_requestqueue(ls, nodeid, hd);
if (error == -EAGAIN)
continue;
else {
error = -EINTR;
goto out;
}
}
if (lock_recovery_try(ls))
......
......@@ -22,6 +22,7 @@
#include "memory.h"
#include "lock.h"
#include "recover.h"
#include "requestqueue.h"
#ifdef CONFIG_DLM_DEBUG
int dlm_create_debug_file(struct dlm_ls *ls);
......@@ -478,6 +479,8 @@ static int new_lockspace(char *name, int namelen, void **lockspace,
ls->ls_recoverd_task = NULL;
mutex_init(&ls->ls_recoverd_active);
spin_lock_init(&ls->ls_recover_lock);
spin_lock_init(&ls->ls_rcom_spin);
get_random_bytes(&ls->ls_rcom_seq, sizeof(uint64_t));
ls->ls_recover_status = 0;
ls->ls_recover_seq = 0;
ls->ls_recover_args = NULL;
......@@ -684,6 +687,7 @@ static int release_lockspace(struct dlm_ls *ls, int force)
* Free structures on any other lists
*/
dlm_purge_requestqueue(ls);
kfree(ls->ls_recover_args);
dlm_clear_free_entries(ls);
dlm_clear_members(ls);
......
This diff is collapsed.
......@@ -14,8 +14,6 @@
#ifndef __LOWCOMMS_DOT_H__
#define __LOWCOMMS_DOT_H__
int dlm_lowcomms_init(void);
void dlm_lowcomms_exit(void);
int dlm_lowcomms_start(void);
void dlm_lowcomms_stop(void);
int dlm_lowcomms_close(int nodeid);
......
......@@ -16,7 +16,6 @@
#include "lock.h"
#include "user.h"
#include "memory.h"
#include "lowcomms.h"
#include "config.h"
#ifdef CONFIG_DLM_DEBUG
......@@ -47,20 +46,14 @@ static int __init init_dlm(void)
if (error)
goto out_config;
error = dlm_lowcomms_init();
if (error)
goto out_debug;
error = dlm_user_init();
if (error)
goto out_lowcomms;
goto out_debug;
printk("DLM (built %s %s) installed\n", __DATE__, __TIME__);
return 0;
out_lowcomms:
dlm_lowcomms_exit();
out_debug:
dlm_unregister_debugfs();
out_config:
......@@ -76,7 +69,6 @@ static int __init init_dlm(void)
static void __exit exit_dlm(void)
{
dlm_user_exit();
dlm_lowcomms_exit();
dlm_config_exit();
dlm_memory_exit();
dlm_lockspace_exit();
......
......@@ -186,6 +186,14 @@ int dlm_recover_members(struct dlm_ls *ls, struct dlm_recover *rv, int *neg_out)
struct dlm_member *memb, *safe;
int i, error, found, pos = 0, neg = 0, low = -1;
/* previously removed members that we've not finished removing need to
count as a negative change so the "neg" recovery steps will happen */
list_for_each_entry(memb, &ls->ls_nodes_gone, list) {
log_debug(ls, "prev removed member %d", memb->nodeid);
neg++;
}
/* move departed members from ls_nodes to ls_nodes_gone */
list_for_each_entry_safe(memb, safe, &ls->ls_nodes, list) {
......
......@@ -90,13 +90,28 @@ static int check_config(struct dlm_ls *ls, struct rcom_config *rf, int nodeid)
return 0;
}
static void allow_sync_reply(struct dlm_ls *ls, uint64_t *new_seq)
{
spin_lock(&ls->ls_rcom_spin);
*new_seq = ++ls->ls_rcom_seq;
set_bit(LSFL_RCOM_WAIT, &ls->ls_flags);
spin_unlock(&ls->ls_rcom_spin);
}
static void disallow_sync_reply(struct dlm_ls *ls)
{
spin_lock(&ls->ls_rcom_spin);
clear_bit(LSFL_RCOM_WAIT, &ls->ls_flags);
clear_bit(LSFL_RCOM_READY, &ls->ls_flags);
spin_unlock(&ls->ls_rcom_spin);
}
int dlm_rcom_status(struct dlm_ls *ls, int nodeid)
{
struct dlm_rcom *rc;
struct dlm_mhandle *mh;
int error = 0;
memset(ls->ls_recover_buf, 0, dlm_config.buffer_size);
ls->ls_recover_nodeid = nodeid;
if (nodeid == dlm_our_nodeid()) {
......@@ -108,12 +123,14 @@ int dlm_rcom_status(struct dlm_ls *ls, int nodeid)
error = create_rcom(ls, nodeid, DLM_RCOM_STATUS, 0, &rc, &mh);
if (error)
goto out;
rc->rc_id = ++ls->ls_rcom_seq;
allow_sync_reply(ls, &rc->rc_id);
memset(ls->ls_recover_buf, 0, dlm_config.buffer_size);
send_rcom(ls, mh, rc);
error = dlm_wait_function(ls, &rcom_response);
clear_bit(LSFL_RCOM_READY, &ls->ls_flags);
disallow_sync_reply(ls);
if (error)
goto out;
......@@ -150,14 +167,21 @@ static void receive_rcom_status(struct dlm_ls *ls, struct dlm_rcom *rc_in)
static void receive_sync_reply(struct dlm_ls *ls, struct dlm_rcom *rc_in)
{
if (rc_in->rc_id != ls->ls_rcom_seq) {
log_debug(ls, "reject old reply %d got %llx wanted %llx",
rc_in->rc_type, rc_in->rc_id, ls->ls_rcom_seq);
return;
spin_lock(&ls->ls_rcom_spin);
if (!test_bit(LSFL_RCOM_WAIT, &ls->ls_flags) ||
rc_in->rc_id != ls->ls_rcom_seq) {
log_debug(ls, "reject reply %d from %d seq %llx expect %llx",
rc_in->rc_type, rc_in->rc_header.h_nodeid,
(unsigned long long)rc_in->rc_id,
(unsigned long long)ls->ls_rcom_seq);
goto out;
}
memcpy(ls->ls_recover_buf, rc_in, rc_in->rc_header.h_length);
set_bit(LSFL_RCOM_READY, &ls->ls_flags);
clear_bit(LSFL_RCOM_WAIT, &ls->ls_flags);
wake_up(&ls->ls_wait_general);
out:
spin_unlock(&ls->ls_rcom_spin);
}
static void receive_rcom_status_reply(struct dlm_ls *ls, struct dlm_rcom *rc_in)
......@@ -171,7 +195,6 @@ int dlm_rcom_names(struct dlm_ls *ls, int nodeid, char *last_name, int last_len)
struct dlm_mhandle *mh;
int error = 0, len = sizeof(struct dlm_rcom);
memset(ls->ls_recover_buf, 0, dlm_config.buffer_size);
ls->ls_recover_nodeid = nodeid;
if (nodeid == dlm_our_nodeid()) {
......@@ -185,12 +208,14 @@ int dlm_rcom_names(struct dlm_ls *ls, int nodeid, char *last_name, int last_len)
if (error)
goto out;
memcpy(rc->rc_buf, last_name, last_len);
rc->rc_id = ++ls->ls_rcom_seq;
allow_sync_reply(ls, &rc->rc_id);
memset(ls->ls_recover_buf, 0, dlm_config.buffer_size);
send_rcom(ls, mh, rc);
error = dlm_wait_function(ls, &rcom_response);
clear_bit(LSFL_RCOM_READY, &ls->ls_flags);
disallow_sync_reply(ls);
out:
return error;
}
......@@ -370,9 +395,10 @@ static void receive_rcom_lock_reply(struct dlm_ls *ls, struct dlm_rcom *rc_in)
static int send_ls_not_ready(int nodeid, struct dlm_rcom *rc_in)
{
struct dlm_rcom *rc;
struct rcom_config *rf;
struct dlm_mhandle *mh;
char *mb;
int mb_len = sizeof(struct dlm_rcom);
int mb_len = sizeof(struct dlm_rcom) + sizeof(struct rcom_config);
mh = dlm_lowcomms_get_buffer(nodeid, mb_len, GFP_KERNEL, &mb);
if (!mh)
......@@ -391,6 +417,9 @@ static int send_ls_not_ready(int nodeid, struct dlm_rcom *rc_in)
rc->rc_id = rc_in->rc_id;
rc->rc_result = -ESRCH;
rf = (struct rcom_config *) rc->rc_buf;
rf->rf_lvblen = -1;
dlm_rcom_out(rc);
dlm_lowcomms_commit_buffer(mh);
......@@ -412,9 +441,10 @@ void dlm_receive_rcom(struct dlm_header *hd, int nodeid)
ls = dlm_find_lockspace_global(hd->h_lockspace);
if (!ls) {
log_print("lockspace %x from %d not found",
hd->h_lockspace, nodeid);
send_ls_not_ready(nodeid, rc);
log_print("lockspace %x from %d type %x not found",
hd->h_lockspace, nodeid, rc->rc_type);
if (rc->rc_type == DLM_RCOM_STATUS)
send_ls_not_ready(nodeid, rc);
return;
}
......
......@@ -252,6 +252,7 @@ static void recover_list_clear(struct dlm_ls *ls)
spin_lock(&ls->ls_recover_list_lock);
list_for_each_entry_safe(r, s, &ls->ls_recover_list, res_recover_list) {
list_del_init(&r->res_recover_list);
r->res_recover_locks_count = 0;
dlm_put_rsb(r);
ls->ls_recover_list_count--;
}
......
......@@ -45,7 +45,7 @@ static int ls_recover(struct dlm_ls *ls, struct dlm_recover *rv)
unsigned long start;
int error, neg = 0;
log_debug(ls, "recover %llx", rv->seq);
log_debug(ls, "recover %llx", (unsigned long long)rv->seq);
mutex_lock(&ls->ls_recoverd_active);
......@@ -93,14 +93,6 @@ static int ls_recover(struct dlm_ls *ls, struct dlm_recover *rv)
goto fail;
}
/*
* Purge directory-related requests that are saved in requestqueue.
* All dir requests from before recovery are invalid now due to the dir
* rebuild and will be resent by the requesting nodes.
*/
dlm_purge_requestqueue(ls);
/*
* Wait for all nodes to complete directory rebuild.
*/
......@@ -164,10 +156,31 @@ static int ls_recover(struct dlm_ls *ls, struct dlm_recover *rv)
*/
dlm_recover_rsbs(ls);
} else {
/*
* Other lockspace members may be going through the "neg" steps
* while also adding us to the lockspace, in which case they'll
* be doing the recover_locks (RS_LOCKS) barrier.
*/
dlm_set_recover_status(ls, DLM_RS_LOCKS);
error = dlm_recover_locks_wait(ls);
if (error) {
log_error(ls, "recover_locks_wait failed %d", error);
goto fail;
}
}
dlm_release_root_list(ls);
/*
* Purge directory-related requests that are saved in requestqueue.
* All dir requests from before recovery are invalid now due to the dir
* rebuild and will be resent by the requesting nodes.
*/
dlm_purge_requestqueue(ls);
dlm_set_recover_status(ls, DLM_RS_DONE);
error = dlm_recover_done_wait(ls);
if (error) {
......@@ -199,7 +212,8 @@ static int ls_recover(struct dlm_ls *ls, struct dlm_recover *rv)
dlm_astd_wake();
log_debug(ls, "recover %llx done: %u ms", rv->seq,
log_debug(ls, "recover %llx done: %u ms",
(unsigned long long)rv->seq,
jiffies_to_msecs(jiffies - start));
mutex_unlock(&ls->ls_recoverd_active);
......@@ -207,11 +221,16 @@ static int ls_recover(struct dlm_ls *ls, struct dlm_recover *rv)
fail:
dlm_release_root_list(ls);
log_debug(ls, "recover %llx error %d", rv->seq, error);
log_debug(ls, "recover %llx error %d",
(unsigned long long)rv->seq, error);
mutex_unlock(&ls->ls_recoverd_active);
return error;
}
/* The dlm_ls_start() that created the rv we take here may already have been
stopped via dlm_ls_stop(); in that case we need to leave the RECOVERY_STOP
flag set. */
static void do_ls_recovery(struct dlm_ls *ls)
{
struct dlm_recover *rv = NULL;
......@@ -219,7 +238,8 @@ static void do_ls_recovery(struct dlm_ls *ls)
spin_lock(&ls->ls_recover_lock);
rv = ls->ls_recover_args;
ls->ls_recover_args = NULL;
clear_bit(LSFL_RECOVERY_STOP, &ls->ls_flags);
if (rv && ls->ls_recover_seq == rv->seq)
clear_bit(LSFL_RECOVERY_STOP, &ls->ls_flags);
spin_unlock(&ls->ls_recover_lock);
if (rv) {
......
......@@ -30,26 +30,36 @@ struct rq_entry {
* lockspace is enabled on some while still suspended on others.
*/
void dlm_add_requestqueue(struct dlm_ls *ls, int nodeid, struct dlm_header *hd)
int dlm_add_requestqueue(struct dlm_ls *ls, int nodeid, struct dlm_header *hd)
{
struct rq_entry *e;
int length = hd->h_length;
if (dlm_is_removed(ls, nodeid))
return;
int rv = 0;
e = kmalloc(sizeof(struct rq_entry) + length, GFP_KERNEL);
if (!e) {
log_print("dlm_add_requestqueue: out of memory\n");
return;
return 0;
}
e->nodeid = nodeid;
memcpy(e->request, hd, length);
/* We need to check dlm_locking_stopped() after taking the mutex to
avoid a race where dlm_recoverd enables locking and runs
process_requestqueue between our earlier dlm_locking_stopped check
and this addition to the requestqueue. */
mutex_lock(&ls->ls_requestqueue_mutex);
list_add_tail(&e->list, &ls->ls_requestqueue);
if (dlm_locking_stopped(ls))
list_add_tail(&e->list, &ls->ls_requestqueue);
else {
log_debug(ls, "dlm_add_requestqueue skip from %d", nodeid);
kfree(e);
rv = -EAGAIN;
}
mutex_unlock(&ls->ls_requestqueue_mutex);
return rv;
}
int dlm_process_requestqueue(struct dlm_ls *ls)
......@@ -120,6 +130,10 @@ static int purge_request(struct dlm_ls *ls, struct dlm_message *ms, int nodeid)
{
uint32_t type = ms->m_type;
/* the ls is being cleaned up and freed by release_lockspace */
if (!ls->ls_count)
return 1;
if (dlm_is_removed(ls, nodeid))
return 1;
......
......@@ -13,7 +13,7 @@
#ifndef __REQUESTQUEUE_DOT_H__
#define __REQUESTQUEUE_DOT_H__
void dlm_add_requestqueue(struct dlm_ls *ls, int nodeid, struct dlm_header *hd);
int dlm_add_requestqueue(struct dlm_ls *ls, int nodeid, struct dlm_header *hd);
int dlm_process_requestqueue(struct dlm_ls *ls);
void dlm_wait_requestqueue(struct dlm_ls *ls);
void dlm_purge_requestqueue(struct dlm_ls *ls);
......
......@@ -2,6 +2,7 @@ config GFS2_FS
tristate "GFS2 file system support"
depends on EXPERIMENTAL
select FS_POSIX_ACL
select CRC32
help
A cluster filesystem.
......
......@@ -74,11 +74,11 @@ int gfs2_acl_validate_remove(struct gfs2_inode *ip, int access)
{
if (!GFS2_SB(&ip->i_inode)->sd_args.ar_posix_acl)
return -EOPNOTSUPP;
if (current->fsuid != ip->i_di.di_uid && !capable(CAP_FOWNER))
if (current->fsuid != ip->i_inode.i_uid && !capable(CAP_FOWNER))
return -EPERM;
if (S_ISLNK(ip->i_di.di_mode))
if (S_ISLNK(ip->i_inode.i_mode))
return -EOPNOTSUPP;
if (!access && !S_ISDIR(ip->i_di.di_mode))
if (!access && !S_ISDIR(ip->i_inode.i_mode))
return -EACCES;
return 0;
......@@ -145,14 +145,14 @@ out:
}
/**
* gfs2_check_acl_locked - Check an ACL to see if we're allowed to do something
* gfs2_check_acl - Check an ACL to see if we're allowed to do something
* @inode: the file we want to do something to
* @mask: what we want to do
*
* Returns: errno
*/
int gfs2_check_acl_locked(struct inode *inode, int mask)
int gfs2_check_acl(struct inode *inode, int mask)
{
struct posix_acl *acl = NULL;
int error;
......@@ -170,21 +170,6 @@ int gfs2_check_acl_locked(struct inode *inode, int mask)
return -EAGAIN;
}
int gfs2_check_acl(struct inode *inode, int mask)
{
struct gfs2_inode *ip = GFS2_I(inode);
struct gfs2_holder i_gh;
int error;
error = gfs2_glock_nq_init(ip->i_gl, LM_ST_SHARED, LM_FLAG_ANY, &i_gh);
if (!error) {
error = gfs2_check_acl_locked(inode, mask);
gfs2_glock_dq_uninit(&i_gh);
}
return error;
}
static int munge_mode(struct gfs2_inode *ip, mode_t mode)
{
struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
......@@ -198,10 +183,10 @@ static int munge_mode(struct gfs2_inode *ip, mode_t mode)
error = gfs2_meta_inode_buffer(ip, &dibh);
if (!error) {
gfs2_assert_withdraw(sdp,
(ip->i_di.di_mode & S_IFMT) == (mode & S_IFMT));
ip->i_di.di_mode = mode;
(ip->i_inode.i_mode & S_IFMT) == (mode & S_IFMT));
ip->i_inode.i_mode = mode;
gfs2_trans_add_bh(ip->i_gl, dibh, 1);
gfs2_dinode_out(&ip->i_di, dibh->b_data);
gfs2_dinode_out(ip, dibh->b_data);
brelse(dibh);
}
......@@ -215,12 +200,12 @@ int gfs2_acl_create(struct gfs2_inode *dip, struct gfs2_inode *ip)
struct gfs2_sbd *sdp = GFS2_SB(&dip->i_inode);
struct posix_acl *acl = NULL, *clone;
struct gfs2_ea_request er;
mode_t mode = ip->i_di.di_mode;
mode_t mode = ip->i_inode.i_mode;
int error;
if (!sdp->sd_args.ar_posix_acl)
return 0;
if (S_ISLNK(ip->i_di.di_mode))
if (S_ISLNK(ip->i_inode.i_mode))
return 0;
memset(&er, 0, sizeof(struct gfs2_ea_request));
......@@ -232,7 +217,7 @@ int gfs2_acl_create(struct gfs2_inode *dip, struct gfs2_inode *ip)
return error;
if (!acl) {
mode &= ~current->fs->umask;
if (mode != ip->i_di.di_mode)
if (mode != ip->i_inode.i_mode)
error = munge_mode(ip, mode);
return error;
}
......@@ -244,7 +229,7 @@ int gfs2_acl_create(struct gfs2_inode *dip, struct gfs2_inode *ip)
posix_acl_release(acl);
acl = clone;
if (S_ISDIR(ip->i_di.di_mode)) {
if (S_ISDIR(ip->i_inode.i_mode)) {
er.er_name = GFS2_POSIX_ACL_DEFAULT;
er.er_name_len = GFS2_POSIX_ACL_DEFAULT_LEN;
error = gfs2_system_eaops.eo_set(ip, &er);
......
......@@ -31,7 +31,6 @@ int gfs2_acl_validate_set(struct gfs2_inode *ip, int access,
struct gfs2_ea_request *er,
int *remove, mode_t *mode);
int gfs2_acl_validate_remove(struct gfs2_inode *ip, int access);
int gfs2_check_acl_locked(struct inode *inode, int mask);
int gfs2_check_acl(struct inode *inode, int mask);
int gfs2_acl_create(struct gfs2_inode *dip, struct gfs2_inode *ip);
int gfs2_acl_chmod(struct gfs2_inode *ip, struct iattr *attr);
......
This diff is collapsed.
......@@ -112,6 +112,7 @@ int gfs2_logd(void *data)
struct gfs2_sbd *sdp = data;
struct gfs2_holder ji_gh;
unsigned long t;
int need_flush;
while (!kthread_should_stop()) {
/* Advance the log tail */
......@@ -120,8 +121,10 @@ int gfs2_logd(void *data)
gfs2_tune_get(sdp, gt_log_flush_secs) * HZ;
gfs2_ail1_empty(sdp, DIO_ALL);
if (time_after_eq(jiffies, t)) {
gfs2_log_lock(sdp);
need_flush = sdp->sd_log_num_buf > gfs2_tune_get(sdp, gt_incore_log_blocks);
gfs2_log_unlock(sdp);
if (need_flush || time_after_eq(jiffies, t)) {
gfs2_log_flush(sdp, NULL);
sdp->sd_log_flush_time = jiffies;
}
......
This diff is collapsed.
......@@ -31,17 +31,17 @@ struct gfs2_inum;
typedef int (*gfs2_filldir_t) (void *opaque,
const char *name, unsigned int length,
u64 offset,
struct gfs2_inum *inum, unsigned int type);
struct gfs2_inum_host *inum, unsigned int type);
int gfs2_dir_search(struct inode *dir, const struct qstr *filename,
struct gfs2_inum *inum, unsigned int *type);
struct gfs2_inum_host *inum, unsigned int *type);
int gfs2_dir_add(struct inode *inode, const struct qstr *filename,
const struct gfs2_inum *inum, unsigned int type);
const struct gfs2_inum_host *inum, unsigned int type);
int gfs2_dir_del(struct gfs2_inode *dip, const struct qstr *filename);
int gfs2_dir_read(struct inode *inode, u64 * offset, void *opaque,
gfs2_filldir_t filldir);
int gfs2_dir_mvino(struct gfs2_inode *dip, const struct qstr *filename,
struct gfs2_inum *new_inum, unsigned int new_type);
struct gfs2_inum_host *new_inum, unsigned int new_type);
int gfs2_dir_exhash_dealloc(struct gfs2_inode *dip);
......
......@@ -120,7 +120,7 @@ static int system_eo_set(struct gfs2_inode *ip, struct gfs2_ea_request *er)
if (GFS2_ACL_IS_ACCESS(er->er_name, er->er_name_len)) {
if (!(er->er_flags & GFS2_ERF_MODE)) {
er->er_mode = ip->i_di.di_mode;
er->er_mode = ip->i_inode.i_mode;
er->er_flags |= GFS2_ERF_MODE;
}
error = gfs2_acl_validate_set(ip, 1, er,
......
......@@ -112,7 +112,7 @@ fail:
static int ea_foreach(struct gfs2_inode *ip, ea_call_t ea_call, void *data)
{
struct buffer_head *bh, *eabh;
u64 *eablk, *end;
__be64 *eablk, *end;
int error;
error = gfs2_meta_read(ip->i_gl, ip->i_di.di_eattr, DIO_WAIT, &bh);
......@@ -129,7 +129,7 @@ static int ea_foreach(struct gfs2_inode *ip, ea_call_t ea_call, void *data)
goto out;
}
eablk = (u64 *)(bh->b_data + sizeof(struct gfs2_meta_header));
eablk = (__be64 *)(bh->b_data + sizeof(struct gfs2_meta_header));
end = eablk + GFS2_SB(&ip->i_inode)->sd_inptrs;
for (; eablk < end; eablk++) {
......@@ -224,7 +224,8 @@ static int ea_dealloc_unstuffed(struct gfs2_inode *ip, struct buffer_head *bh,
struct gfs2_rgrpd *rgd;
struct gfs2_holder rg_gh;
struct buffer_head *dibh;
u64 *dataptrs, bn = 0;
__be64 *dataptrs;
u64 bn = 0;
u64 bstart = 0;
unsigned int blen = 0;
unsigned int blks = 0;
......@@ -280,6 +281,7 @@ static int ea_dealloc_unstuffed(struct gfs2_inode *ip, struct buffer_head *bh,
if (!ip->i_di.di_blocks)
gfs2_consist_inode(ip);
ip->i_di.di_blocks--;
gfs2_set_inode_blocks(&ip->i_inode);
}
if (bstart)
gfs2_free_meta(ip, bstart, blen);
......@@ -299,9 +301,9 @@ static int ea_dealloc_unstuffed(struct gfs2_inode *ip, struct buffer_head *bh,
error = gfs2_meta_inode_buffer(ip, &dibh);
if (!error) {
ip->i_di.di_ctime = get_seconds();
ip->i_inode.i_ctime.tv_sec = get_seconds();
gfs2_trans_add_bh(ip->i_gl, dibh, 1);
gfs2_dinode_out(&ip->i_di, dibh->b_data);
gfs2_dinode_out(ip, dibh->b_data);
brelse(dibh);
}
......@@ -444,7 +446,7 @@ static int ea_get_unstuffed(struct gfs2_inode *ip, struct gfs2_ea_header *ea,
struct buffer_head **bh;
unsigned int amount = GFS2_EA_DATA_LEN(ea);
unsigned int nptrs = DIV_ROUND_UP(amount, sdp->sd_jbsize);
u64 *dataptrs = GFS2_EA2DATAPTRS(ea);
__be64 *dataptrs = GFS2_EA2DATAPTRS(ea);
unsigned int x;
int error = 0;
......@@ -597,6 +599,7 @@ static int ea_alloc_blk(struct gfs2_inode *ip, struct buffer_head **bhp)
ea->ea_num_ptrs = 0;
ip->i_di.di_blocks++;
gfs2_set_inode_blocks(&ip->i_inode);
return 0;
}
......@@ -629,7 +632,7 @@ static int ea_write(struct gfs2_inode *ip, struct gfs2_ea_header *ea,
ea->ea_num_ptrs = 0;
memcpy(GFS2_EA2DATA(ea), er->er_data, er->er_data_len);
} else {
u64 *dataptr = GFS2_EA2DATAPTRS(ea);
__be64 *dataptr = GFS2_EA2DATAPTRS(ea);
const char *data = er->er_data;
unsigned int data_len = er->er_data_len;
unsigned int copy;
......@@ -648,6 +651,7 @@ static int ea_write(struct gfs2_inode *ip, struct gfs2_ea_header *ea,
gfs2_metatype_set(bh, GFS2_METATYPE_ED, GFS2_FORMAT_ED);
ip->i_di.di_blocks++;
gfs2_set_inode_blocks(&ip->i_inode);
copy = data_len > sdp->sd_jbsize ? sdp->sd_jbsize :
data_len;
......@@ -686,7 +690,7 @@ static int ea_alloc_skeleton(struct gfs2_inode *ip, struct gfs2_ea_request *er,
if (error)
goto out;
error = gfs2_quota_check(ip, ip->i_di.di_uid, ip->i_di.di_gid);
error = gfs2_quota_check(ip, ip->i_inode.i_uid, ip->i_inode.i_gid);
if (error)
goto out_gunlock_q;
......@@ -710,13 +714,13 @@ static int ea_alloc_skeleton(struct gfs2_inode *ip, struct gfs2_ea_request *er,
if (!error) {
if (er->er_flags & GFS2_ERF_MODE) {
gfs2_assert_withdraw(GFS2_SB(&ip->i_inode),
(ip->i_di.di_mode & S_IFMT) ==
(ip->i_inode.i_mode & S_IFMT) ==
(er->er_mode & S_IFMT));
ip->i_di.di_mode = er->er_mode;
ip->i_inode.i_mode = er->er_mode;
}
ip->i_di.di_ctime = get_seconds();
ip->i_inode.i_ctime.tv_sec = get_seconds();
gfs2_trans_add_bh(ip->i_gl, dibh, 1);
gfs2_dinode_out(&ip->i_di, dibh->b_data);
gfs2_dinode_out(ip, dibh->b_data);
brelse(dibh);
}
......@@ -846,12 +850,12 @@ static int ea_set_simple_noalloc(struct gfs2_inode *ip, struct buffer_head *bh,
if (er->er_flags & GFS2_ERF_MODE) {
gfs2_assert_withdraw(GFS2_SB(&ip->i_inode),
(ip->i_di.di_mode & S_IFMT) == (er->er_mode & S_IFMT));
ip->i_di.di_mode = er->er_mode;
(ip->i_inode.i_mode & S_IFMT) == (er->er_mode & S_IFMT));
ip->i_inode.i_mode = er->er_mode;
}
ip->i_di.di_ctime = get_seconds();
ip->i_inode.i_ctime.tv_sec = get_seconds();
gfs2_trans_add_bh(ip->i_gl, dibh, 1);
gfs2_dinode_out(&ip->i_di, dibh->b_data);
gfs2_dinode_out(ip, dibh->b_data);
brelse(dibh);
out:
gfs2_trans_end(GFS2_SB(&ip->i_inode));
......@@ -931,12 +935,12 @@ static int ea_set_block(struct gfs2_inode *ip, struct gfs2_ea_request *er,
{
struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
struct buffer_head *indbh, *newbh;
u64 *eablk;
__be64 *eablk;
int error;
int mh_size = sizeof(struct gfs2_meta_header);
if (ip->i_di.di_flags & GFS2_DIF_EA_INDIRECT) {
u64 *end;
__be64 *end;
error = gfs2_meta_read(ip->i_gl, ip->i_di.di_eattr, DIO_WAIT,
&indbh);
......@@ -948,7 +952,7 @@ static int ea_set_block(struct gfs2_inode *ip, struct gfs2_ea_request *er,
goto out;
}
eablk = (u64 *)(indbh->b_data + mh_size);
eablk = (__be64 *)(indbh->b_data + mh_size);
end = eablk + sdp->sd_inptrs;
for (; eablk < end; eablk++)
......@@ -971,11 +975,12 @@ static int ea_set_block(struct gfs2_inode *ip, struct gfs2_ea_request *er,
gfs2_metatype_set(indbh, GFS2_METATYPE_IN, GFS2_FORMAT_IN);
gfs2_buffer_clear_tail(indbh, mh_size);
eablk = (u64 *)(indbh->b_data + mh_size);
eablk = (__be64 *)(indbh->b_data + mh_size);
*eablk = cpu_to_be64(ip->i_di.di_eattr);
ip->i_di.di_eattr = blk;
ip->i_di.di_flags |= GFS2_DIF_EA_INDIRECT;
ip->i_di.di_blocks++;
gfs2_set_inode_blocks(&ip->i_inode);
eablk++;
}
......@@ -1129,9 +1134,9 @@ static int ea_remove_stuffed(struct gfs2_inode *ip, struct gfs2_ea_location *el)
error = gfs2_meta_inode_buffer(ip, &dibh);
if (!error) {
ip->i_di.di_ctime = get_seconds();
ip->i_inode.i_ctime.tv_sec = get_seconds();
gfs2_trans_add_bh(ip->i_gl, dibh, 1);
gfs2_dinode_out(&ip->i_di, dibh->b_data);
gfs2_dinode_out(ip, dibh->b_data);
brelse(dibh);
}
......@@ -1202,7 +1207,7 @@ static int ea_acl_chmod_unstuffed(struct gfs2_inode *ip,
struct buffer_head **bh;
unsigned int amount = GFS2_EA_DATA_LEN(ea);
unsigned int nptrs = DIV_ROUND_UP(amount, sdp->sd_jbsize);
u64 *dataptrs = GFS2_EA2DATAPTRS(ea);
__be64 *dataptrs = GFS2_EA2DATAPTRS(ea);
unsigned int x;
int error;
......@@ -1284,9 +1289,8 @@ int gfs2_ea_acl_chmod(struct gfs2_inode *ip, struct gfs2_ea_location *el,
if (!error) {
error = inode_setattr(&ip->i_inode, attr);
gfs2_assert_warn(GFS2_SB(&ip->i_inode), !error);
gfs2_inode_attr_out(ip);
gfs2_trans_add_bh(ip->i_gl, dibh, 1);
gfs2_dinode_out(&ip->i_di, dibh->b_data);
gfs2_dinode_out(ip, dibh->b_data);
brelse(dibh);
}
......@@ -1300,7 +1304,7 @@ static int ea_dealloc_indirect(struct gfs2_inode *ip)
struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
struct gfs2_rgrp_list rlist;
struct buffer_head *indbh, *dibh;
u64 *eablk, *end;
__be64 *eablk, *end;
unsigned int rg_blocks = 0;
u64 bstart = 0;
unsigned int blen = 0;
......@@ -1319,7 +1323,7 @@ static int ea_dealloc_indirect(struct gfs2_inode *ip)
goto out;
}
eablk = (u64 *)(indbh->b_data + sizeof(struct gfs2_meta_header));
eablk = (__be64 *)(indbh->b_data + sizeof(struct gfs2_meta_header));
end = eablk + sdp->sd_inptrs;
for (; eablk < end; eablk++) {
......@@ -1363,7 +1367,7 @@ static int ea_dealloc_indirect(struct gfs2_inode *ip)
gfs2_trans_add_bh(ip->i_gl, indbh, 1);
eablk = (u64 *)(indbh->b_data + sizeof(struct gfs2_meta_header));
eablk = (__be64 *)(indbh->b_data + sizeof(struct gfs2_meta_header));
bstart = 0;
blen = 0;
......@@ -1387,6 +1391,7 @@ static int ea_dealloc_indirect(struct gfs2_inode *ip)
if (!ip->i_di.di_blocks)
gfs2_consist_inode(ip);
ip->i_di.di_blocks--;
gfs2_set_inode_blocks(&ip->i_inode);
}
if (bstart)
gfs2_free_meta(ip, bstart, blen);
......@@ -1396,7 +1401,7 @@ static int ea_dealloc_indirect(struct gfs2_inode *ip)
error = gfs2_meta_inode_buffer(ip, &dibh);
if (!error) {
gfs2_trans_add_bh(ip->i_gl, dibh, 1);
gfs2_dinode_out(&ip->i_di, dibh->b_data);
gfs2_dinode_out(ip, dibh->b_data);
brelse(dibh);
}
......@@ -1441,11 +1446,12 @@ static int ea_dealloc_block(struct gfs2_inode *ip)
if (!ip->i_di.di_blocks)
gfs2_consist_inode(ip);
ip->i_di.di_blocks--;
gfs2_set_inode_blocks(&ip->i_inode);
error = gfs2_meta_inode_buffer(ip, &dibh);
if (!error) {
gfs2_trans_add_bh(ip->i_gl, dibh, 1);
gfs2_dinode_out(&ip->i_di, dibh->b_data);
gfs2_dinode_out(ip, dibh->b_data);
brelse(dibh);
}
......
......@@ -19,7 +19,7 @@ struct iattr;
#define GFS2_EA_SIZE(ea) \
ALIGN(sizeof(struct gfs2_ea_header) + (ea)->ea_name_len + \
((GFS2_EA_IS_STUFFED(ea)) ? GFS2_EA_DATA_LEN(ea) : \
(sizeof(u64) * (ea)->ea_num_ptrs)), 8)
(sizeof(__be64) * (ea)->ea_num_ptrs)), 8)
#define GFS2_EA_IS_STUFFED(ea) (!(ea)->ea_num_ptrs)
#define GFS2_EA_IS_LAST(ea) ((ea)->ea_flags & GFS2_EAFLAG_LAST)
......@@ -29,13 +29,13 @@ ALIGN(sizeof(struct gfs2_ea_header) + (er)->er_name_len + (er)->er_data_len, 8)
#define GFS2_EAREQ_SIZE_UNSTUFFED(sdp, er) \
ALIGN(sizeof(struct gfs2_ea_header) + (er)->er_name_len + \
sizeof(u64) * DIV_ROUND_UP((er)->er_data_len, (sdp)->sd_jbsize), 8)
sizeof(__be64) * DIV_ROUND_UP((er)->er_data_len, (sdp)->sd_jbsize), 8)
#define GFS2_EA2NAME(ea) ((char *)((struct gfs2_ea_header *)(ea) + 1))
#define GFS2_EA2DATA(ea) (GFS2_EA2NAME(ea) + (ea)->ea_name_len)
#define GFS2_EA2DATAPTRS(ea) \
((u64 *)(GFS2_EA2NAME(ea) + ALIGN((ea)->ea_name_len, 8)))
((__be64 *)(GFS2_EA2NAME(ea) + ALIGN((ea)->ea_name_len, 8)))
#define GFS2_EA2NEXT(ea) \
((struct gfs2_ea_header *)((char *)(ea) + GFS2_EA_REC_LEN(ea)))
......
......@@ -96,7 +96,7 @@ static inline rwlock_t *gl_lock_addr(unsigned int x)
return &gl_hash_locks[x & (GL_HASH_LOCK_SZ-1)];
}
#else /* not SMP, so no spinlocks required */
static inline rwlock_t *gl_lock_addr(x)
static inline rwlock_t *gl_lock_addr(unsigned int x)
{
return NULL;
}
......@@ -769,7 +769,7 @@ restart:
} else {
spin_unlock(&gl->gl_spin);
new_gh = gfs2_holder_get(gl, state, LM_FLAG_TRY, GFP_KERNEL);
new_gh = gfs2_holder_get(gl, state, LM_FLAG_TRY, GFP_NOFS);
if (!new_gh)
return;
set_bit(HIF_DEMOTE, &new_gh->gh_iflags);
......@@ -785,21 +785,6 @@ out:
gfs2_holder_put(new_gh);
}
void gfs2_glock_inode_squish(struct inode *inode)
{
struct gfs2_holder gh;
struct gfs2_glock *gl = GFS2_I(inode)->i_gl;
gfs2_holder_init(gl, LM_ST_UNLOCKED, 0, &gh);
set_bit(HIF_DEMOTE, &gh.gh_iflags);
spin_lock(&gl->gl_spin);
gfs2_assert(inode->i_sb->s_fs_info, list_empty(&gl->gl_holders));
list_add_tail(&gh.gh_list, &gl->gl_waiters2);
run_queue(gl);
spin_unlock(&gl->gl_spin);
wait_for_completion(&gh.gh_wait);
gfs2_holder_uninit(&gh);
}
/**
* state_change - record that the glock is now in a different state
* @gl: the glock
......@@ -847,12 +832,12 @@ static void xmote_bh(struct gfs2_glock *gl, unsigned int ret)
if (prev_state != LM_ST_UNLOCKED && !(ret & LM_OUT_CACHEABLE)) {
if (glops->go_inval)
glops->go_inval(gl, DIO_METADATA | DIO_DATA);
glops->go_inval(gl, DIO_METADATA);
} else if (gl->gl_state == LM_ST_DEFERRED) {
/* We might not want to do this here.
Look at moving to the inode glops. */
if (glops->go_inval)
glops->go_inval(gl, DIO_DATA);
glops->go_inval(gl, 0);
}
/* Deal with each possible exit condition */
......@@ -954,7 +939,7 @@ void gfs2_glock_xmote_th(struct gfs2_glock *gl, unsigned int state, int flags)
gfs2_assert_warn(sdp, state != gl->gl_state);
if (gl->gl_state == LM_ST_EXCLUSIVE && glops->go_sync)
glops->go_sync(gl, DIO_METADATA | DIO_DATA | DIO_RELEASE);
glops->go_sync(gl);
gfs2_glock_hold(gl);
gl->gl_req_bh = xmote_bh;
......@@ -995,7 +980,7 @@ static void drop_bh(struct gfs2_glock *gl, unsigned int ret)
state_change(gl, LM_ST_UNLOCKED);
if (glops->go_inval)
glops->go_inval(gl, DIO_METADATA | DIO_DATA);
glops->go_inval(gl, DIO_METADATA);
if (gh) {
spin_lock(&gl->gl_spin);
......@@ -1041,7 +1026,7 @@ void gfs2_glock_drop_th(struct gfs2_glock *gl)
gfs2_assert_warn(sdp, gl->gl_state != LM_ST_UNLOCKED);
if (gl->gl_state == LM_ST_EXCLUSIVE && glops->go_sync)
glops->go_sync(gl, DIO_METADATA | DIO_DATA | DIO_RELEASE);
glops->go_sync(gl);
gfs2_glock_hold(gl);
gl->gl_req_bh = drop_bh;
......@@ -1244,9 +1229,6 @@ restart:
clear_bit(GLF_PREFETCH, &gl->gl_flags);
if (error == GLR_TRYFAILED && (gh->gh_flags & GL_DUMP))
dump_glock(gl);
return error;
}
......@@ -1923,7 +1905,7 @@ out:
static void scan_glock(struct gfs2_glock *gl)
{
if (gl->gl_ops == &gfs2_inode_glops)
if (gl->gl_ops == &gfs2_inode_glops && gl->gl_object)
return;
if (gfs2_glmutex_trylock(gl)) {
......@@ -2078,7 +2060,7 @@ static int dump_inode(struct gfs2_inode *ip)
printk(KERN_INFO " num = %llu %llu\n",
(unsigned long long)ip->i_num.no_formal_ino,
(unsigned long long)ip->i_num.no_addr);
printk(KERN_INFO " type = %u\n", IF2DT(ip->i_di.di_mode));
printk(KERN_INFO " type = %u\n", IF2DT(ip->i_inode.i_mode));
printk(KERN_INFO " i_flags =");
for (x = 0; x < 32; x++)
if (test_bit(x, &ip->i_flags))
......
......@@ -27,8 +27,6 @@
#define GL_ATIME 0x00000200
#define GL_NOCACHE 0x00000400
#define GL_NOCANCEL 0x00001000
#define GL_AOP 0x00004000
#define GL_DUMP 0x00008000
#define GLR_TRYFAILED 13
#define GLR_CANCELED 14
......@@ -108,7 +106,6 @@ void gfs2_glock_dq_uninit_m(unsigned int num_gh, struct gfs2_holder *ghs);
void gfs2_glock_prefetch_num(struct gfs2_sbd *sdp, u64 number,
const struct gfs2_glock_operations *glops,
unsigned int state, int flags);
void gfs2_glock_inode_squish(struct inode *inode);
/**
* gfs2_glock_nq_init - intialize a holder and enqueue it on a glock
......
......@@ -92,7 +92,7 @@ static void gfs2_pte_inval(struct gfs2_glock *gl)
ip = gl->gl_object;
inode = &ip->i_inode;
if (!ip || !S_ISREG(ip->i_di.di_mode))
if (!ip || !S_ISREG(inode->i_mode))
return;
if (!test_bit(GIF_PAGED, &ip->i_flags))
......@@ -106,90 +106,21 @@ static void gfs2_pte_inval(struct gfs2_glock *gl)
clear_bit(GIF_SW_PAGED, &ip->i_flags);
}
/**
* gfs2_page_inval - Invalidate all pages associated with a glock
* @gl: the glock
*
*/
static void gfs2_page_inval(struct gfs2_glock *gl)
{
struct gfs2_inode *ip;
struct inode *inode;
ip = gl->gl_object;
inode = &ip->i_inode;
if (!ip || !S_ISREG(ip->i_di.di_mode))
return;
truncate_inode_pages(inode->i_mapping, 0);
gfs2_assert_withdraw(GFS2_SB(&ip->i_inode), !inode->i_mapping->nrpages);
clear_bit(GIF_PAGED, &ip->i_flags);
}
/**
* gfs2_page_wait - Wait for writeback of data
* @gl: the glock
*
* Syncs data (not metadata) for a regular file.
* No-op for all other types.
*/
static void gfs2_page_wait(struct gfs2_glock *gl)
{
struct gfs2_inode *ip = gl->gl_object;
struct inode *inode = &ip->i_inode;
struct address_space *mapping = inode->i_mapping;
int error;
if (!S_ISREG(ip->i_di.di_mode))
return;
error = filemap_fdatawait(mapping);
/* Put back any errors cleared by filemap_fdatawait()
so they can be caught by someone who can pass them
up to user space. */
if (error == -ENOSPC)
set_bit(AS_ENOSPC, &mapping->flags);
else if (error)
set_bit(AS_EIO, &mapping->flags);
}
static void gfs2_page_writeback(struct gfs2_glock *gl)
{
struct gfs2_inode *ip = gl->gl_object;
struct inode *inode = &ip->i_inode;
struct address_space *mapping = inode->i_mapping;
if (!S_ISREG(ip->i_di.di_mode))
return;
filemap_fdatawrite(mapping);
}
/**
* meta_go_sync - sync out the metadata for this glock
* @gl: the glock
* @flags: DIO_*
*
* Called when demoting or unlocking an EX glock. We must flush
* to disk all dirty buffers/pages relating to this glock, and must not
* not return to caller to demote/unlock the glock until I/O is complete.
*/
static void meta_go_sync(struct gfs2_glock *gl, int flags)
static void meta_go_sync(struct gfs2_glock *gl)
{
if (!(flags & DIO_METADATA))
return;
if (test_and_clear_bit(GLF_DIRTY, &gl->gl_flags)) {
gfs2_log_flush(gl->gl_sbd, gl);
gfs2_meta_sync(gl);
if (flags & DIO_RELEASE)
gfs2_ail_empty_gl(gl);
gfs2_ail_empty_gl(gl);
}
}
......@@ -264,31 +195,31 @@ static void inode_go_drop_th(struct gfs2_glock *gl)
/**
* inode_go_sync - Sync the dirty data and/or metadata for an inode glock
* @gl: the glock protecting the inode
* @flags:
*
*/
static void inode_go_sync(struct gfs2_glock *gl, int flags)
static void inode_go_sync(struct gfs2_glock *gl)
{
int meta = (flags & DIO_METADATA);
int data = (flags & DIO_DATA);
struct gfs2_inode *ip = gl->gl_object;
if (ip && !S_ISREG(ip->i_inode.i_mode))
ip = NULL;
if (test_bit(GLF_DIRTY, &gl->gl_flags)) {
if (meta && data) {
gfs2_page_writeback(gl);
gfs2_log_flush(gl->gl_sbd, gl);
gfs2_meta_sync(gl);
gfs2_page_wait(gl);
clear_bit(GLF_DIRTY, &gl->gl_flags);
} else if (meta) {
gfs2_log_flush(gl->gl_sbd, gl);
gfs2_meta_sync(gl);
} else if (data) {
gfs2_page_writeback(gl);
gfs2_page_wait(gl);
gfs2_log_flush(gl->gl_sbd, gl);
if (ip)
filemap_fdatawrite(ip->i_inode.i_mapping);
gfs2_meta_sync(gl);
if (ip) {
struct address_space *mapping = ip->i_inode.i_mapping;
int error = filemap_fdatawait(mapping);
if (error == -ENOSPC)
set_bit(AS_ENOSPC, &mapping->flags);
else if (error)
set_bit(AS_EIO, &mapping->flags);
}
if (flags & DIO_RELEASE)
gfs2_ail_empty_gl(gl);
clear_bit(GLF_DIRTY, &gl->gl_flags);
gfs2_ail_empty_gl(gl);
}
}
......@@ -301,15 +232,20 @@ static void inode_go_sync(struct gfs2_glock *gl, int flags)
static void inode_go_inval(struct gfs2_glock *gl, int flags)
{
struct gfs2_inode *ip = gl->gl_object;
int meta = (flags & DIO_METADATA);
int data = (flags & DIO_DATA);
if (meta) {
gfs2_meta_inval(gl);
gl->gl_vn++;
if (ip)
set_bit(GIF_INVALID, &ip->i_flags);
}
if (ip && S_ISREG(ip->i_inode.i_mode)) {
truncate_inode_pages(ip->i_inode.i_mapping, 0);
gfs2_assert_withdraw(GFS2_SB(&ip->i_inode), !ip->i_inode.i_mapping->nrpages);
clear_bit(GIF_PAGED, &ip->i_flags);
}
if (data)
gfs2_page_inval(gl);
}
/**
......@@ -351,11 +287,10 @@ static int inode_go_lock(struct gfs2_holder *gh)
if (!ip)
return 0;
if (ip->i_vn != gl->gl_vn) {
if (test_bit(GIF_INVALID, &ip->i_flags)) {
error = gfs2_inode_refresh(ip);
if (error)
return error;
gfs2_inode_attr_in(ip);
}
if ((ip->i_di.di_flags & GFS2_DIF_TRUNC_IN_PROG) &&
......@@ -379,11 +314,8 @@ static void inode_go_unlock(struct gfs2_holder *gh)
struct gfs2_glock *gl = gh->gh_gl;
struct gfs2_inode *ip = gl->gl_object;
if (ip == NULL)
return;
if (test_bit(GLF_DIRTY, &gl->gl_flags))
gfs2_inode_attr_in(ip);
gfs2_meta_cache_flush(ip);
if (ip)
gfs2_meta_cache_flush(ip);
}
/**
......@@ -491,13 +423,13 @@ static void trans_go_xmote_bh(struct gfs2_glock *gl)
struct gfs2_sbd *sdp = gl->gl_sbd;
struct gfs2_inode *ip = GFS2_I(sdp->sd_jdesc->jd_inode);
struct gfs2_glock *j_gl = ip->i_gl;
struct gfs2_log_header head;
struct gfs2_log_header_host head;
int error;
if (gl->gl_state != LM_ST_UNLOCKED &&
test_bit(SDF_JOURNAL_LIVE, &sdp->sd_flags)) {
gfs2_meta_cache_flush(GFS2_I(sdp->sd_jdesc->jd_inode));
j_gl->gl_ops->go_inval(j_gl, DIO_METADATA | DIO_DATA);
j_gl->gl_ops->go_inval(j_gl, DIO_METADATA);
error = gfs2_find_jhead(sdp->sd_jdesc, &head);
if (error)
......
......@@ -14,8 +14,6 @@
#define DIO_WAIT 0x00000010
#define DIO_METADATA 0x00000020
#define DIO_DATA 0x00000040
#define DIO_RELEASE 0x00000080
#define DIO_ALL 0x00000100
struct gfs2_log_operations;
......@@ -41,7 +39,7 @@ struct gfs2_log_operations {
void (*lo_before_commit) (struct gfs2_sbd *sdp);
void (*lo_after_commit) (struct gfs2_sbd *sdp, struct gfs2_ail *ai);
void (*lo_before_scan) (struct gfs2_jdesc *jd,
struct gfs2_log_header *head, int pass);
struct gfs2_log_header_host *head, int pass);
int (*lo_scan_elements) (struct gfs2_jdesc *jd, unsigned int start,
struct gfs2_log_descriptor *ld, __be64 *ptr,
int pass);
......@@ -67,8 +65,8 @@ struct gfs2_rgrpd {
struct list_head rd_list_mru;
struct list_head rd_recent; /* Recently used rgrps */
struct gfs2_glock *rd_gl; /* Glock for this rgrp */
struct gfs2_rindex rd_ri;
struct gfs2_rgrp rd_rg;
struct gfs2_rindex_host rd_ri;
struct gfs2_rgrp_host rd_rg;
u64 rd_rg_vn;
struct gfs2_bitmap *rd_bits;
unsigned int rd_bh_count;
......@@ -103,18 +101,17 @@ struct gfs2_bufdata {
};
struct gfs2_glock_operations {
void (*go_xmote_th) (struct gfs2_glock * gl, unsigned int state,
int flags);
void (*go_xmote_bh) (struct gfs2_glock * gl);
void (*go_drop_th) (struct gfs2_glock * gl);
void (*go_drop_bh) (struct gfs2_glock * gl);
void (*go_sync) (struct gfs2_glock * gl, int flags);
void (*go_inval) (struct gfs2_glock * gl, int flags);
int (*go_demote_ok) (struct gfs2_glock * gl);
int (*go_lock) (struct gfs2_holder * gh);
void (*go_unlock) (struct gfs2_holder * gh);
void (*go_callback) (struct gfs2_glock * gl, unsigned int state);
void (*go_greedy) (struct gfs2_glock * gl);
void (*go_xmote_th) (struct gfs2_glock *gl, unsigned int state, int flags);
void (*go_xmote_bh) (struct gfs2_glock *gl);
void (*go_drop_th) (struct gfs2_glock *gl);
void (*go_drop_bh) (struct gfs2_glock *gl);
void (*go_sync) (struct gfs2_glock *gl);
void (*go_inval) (struct gfs2_glock *gl, int flags);
int (*go_demote_ok) (struct gfs2_glock *gl);
int (*go_lock) (struct gfs2_holder *gh);
void (*go_unlock) (struct gfs2_holder *gh);
void (*go_callback) (struct gfs2_glock *gl, unsigned int state);
void (*go_greedy) (struct gfs2_glock *gl);
const int go_type;
};
......@@ -217,6 +214,7 @@ struct gfs2_alloc {
};
enum {
GIF_INVALID = 0,
GIF_QD_LOCKED = 1,
GIF_PAGED = 2,
GIF_SW_PAGED = 3,
......@@ -224,12 +222,11 @@ enum {
struct gfs2_inode {
struct inode i_inode;
struct gfs2_inum i_num;
struct gfs2_inum_host i_num;
unsigned long i_flags; /* GIF_... */
u64 i_vn;
struct gfs2_dinode i_di; /* To be replaced by ref to block */
struct gfs2_dinode_host i_di; /* To be replaced by ref to block */
struct gfs2_glock *i_gl; /* Move into i_gh? */
struct gfs2_holder i_iopen_gh;
......@@ -450,7 +447,7 @@ struct gfs2_sbd {
struct super_block *sd_vfs_meta;
struct kobject sd_kobj;
unsigned long sd_flags; /* SDF_... */
struct gfs2_sb sd_sb;
struct gfs2_sb_host sd_sb;
/* Constants computed on mount */
......@@ -503,8 +500,8 @@ struct gfs2_sbd {
spinlock_t sd_statfs_spin;
struct mutex sd_statfs_mutex;
struct gfs2_statfs_change sd_statfs_master;
struct gfs2_statfs_change sd_statfs_local;
struct gfs2_statfs_change_host sd_statfs_master;
struct gfs2_statfs_change_host sd_statfs_local;
unsigned long sd_statfs_sync_time;
/* Resource group stuff */
......
This diff is collapsed.
......@@ -22,13 +22,19 @@ static inline int gfs2_is_jdata(struct gfs2_inode *ip)
static inline int gfs2_is_dir(struct gfs2_inode *ip)
{
return S_ISDIR(ip->i_di.di_mode);
return S_ISDIR(ip->i_inode.i_mode);
}
static inline void gfs2_set_inode_blocks(struct inode *inode)
{
struct gfs2_inode *ip = GFS2_I(inode);
inode->i_blocks = ip->i_di.di_blocks <<
(GFS2_SB(inode)->sd_sb.sb_bsize_shift - GFS2_BASIC_BLOCK_SHIFT);
}
void gfs2_inode_attr_in(struct gfs2_inode *ip);
void gfs2_inode_attr_out(struct gfs2_inode *ip);
struct inode *gfs2_inode_lookup(struct super_block *sb, struct gfs2_inum *inum, unsigned type);
struct inode *gfs2_ilookup(struct super_block *sb, struct gfs2_inum *inum);
struct inode *gfs2_inode_lookup(struct super_block *sb, struct gfs2_inum_host *inum, unsigned type);
struct inode *gfs2_ilookup(struct super_block *sb, struct gfs2_inum_host *inum);
int gfs2_inode_refresh(struct gfs2_inode *ip);
......@@ -37,19 +43,15 @@ int gfs2_change_nlink(struct gfs2_inode *ip, int diff);
struct inode *gfs2_lookupi(struct inode *dir, const struct qstr *name,
int is_root, struct nameidata *nd);
struct inode *gfs2_createi(struct gfs2_holder *ghs, const struct qstr *name,
unsigned int mode);
unsigned int mode, dev_t dev);
int gfs2_rmdiri(struct gfs2_inode *dip, const struct qstr *name,
struct gfs2_inode *ip);
int gfs2_unlink_ok(struct gfs2_inode *dip, const struct qstr *name,
struct gfs2_inode *ip);
int gfs2_ok_to_move(struct gfs2_inode *this, struct gfs2_inode *to);
int gfs2_readlinki(struct gfs2_inode *ip, char **buf, unsigned int *len);
int gfs2_glock_nq_atime(struct gfs2_holder *gh);
int gfs2_glock_nq_m_atime(unsigned int num_gh, struct gfs2_holder *ghs);
int gfs2_setattr_simple(struct gfs2_inode *ip, struct iattr *attr);
struct inode *gfs2_lookup_simple(struct inode *dip, const char *name);
#endif /* __INODE_DOT_H__ */
......
......@@ -15,6 +15,7 @@
#include <linux/gfs2_ondisk.h>
#include <linux/crc32.h>
#include <linux/lm_interface.h>
#include <linux/delay.h>
#include "gfs2.h"
#include "incore.h"
......@@ -142,7 +143,7 @@ static int gfs2_ail1_empty_one(struct gfs2_sbd *sdp, struct gfs2_ail *ai, int fl
return list_empty(&ai->ai_ail1_list);
}
void gfs2_ail1_start(struct gfs2_sbd *sdp, int flags)
static void gfs2_ail1_start(struct gfs2_sbd *sdp, int flags)
{
struct list_head *head = &sdp->sd_ail1_list;
u64 sync_gen;
......@@ -261,6 +262,12 @@ static void ail2_empty(struct gfs2_sbd *sdp, unsigned int new_tail)
* @sdp: The GFS2 superblock
* @blks: The number of blocks to reserve
*
* Note that we never give out the last 6 blocks of the journal. Thats
* due to the fact that there is are a small number of header blocks
* associated with each log flush. The exact number can't be known until
* flush time, so we ensure that we have just enough free blocks at all
* times to avoid running out during a log flush.
*
* Returns: errno
*/
......@@ -274,7 +281,7 @@ int gfs2_log_reserve(struct gfs2_sbd *sdp, unsigned int blks)
mutex_lock(&sdp->sd_log_reserve_mutex);
gfs2_log_lock(sdp);
while(sdp->sd_log_blks_free <= blks) {
while(sdp->sd_log_blks_free <= (blks + 6)) {
gfs2_log_unlock(sdp);
gfs2_ail1_empty(sdp, 0);
gfs2_log_flush(sdp, NULL);
......@@ -319,7 +326,8 @@ static u64 log_bmap(struct gfs2_sbd *sdp, unsigned int lbn)
bh_map.b_size = 1 << inode->i_blkbits;
error = gfs2_block_map(inode, lbn, 0, &bh_map);
if (error || !bh_map.b_blocknr)
printk(KERN_INFO "error=%d, dbn=%llu lbn=%u", error, bh_map.b_blocknr, lbn);
printk(KERN_INFO "error=%d, dbn=%llu lbn=%u", error,
(unsigned long long)bh_map.b_blocknr, lbn);
gfs2_assert_withdraw(sdp, !error && bh_map.b_blocknr);
return bh_map.b_blocknr;
......@@ -643,12 +651,9 @@ void gfs2_log_commit(struct gfs2_sbd *sdp, struct gfs2_trans *tr)
up_read(&sdp->sd_log_flush_lock);
gfs2_log_lock(sdp);
if (sdp->sd_log_num_buf > gfs2_tune_get(sdp, gt_incore_log_blocks)) {
gfs2_log_unlock(sdp);
gfs2_log_flush(sdp, NULL);
} else {
gfs2_log_unlock(sdp);
}
if (sdp->sd_log_num_buf > gfs2_tune_get(sdp, gt_incore_log_blocks))
wake_up_process(sdp->sd_logd_process);
gfs2_log_unlock(sdp);
}
/**
......@@ -686,3 +691,21 @@ void gfs2_log_shutdown(struct gfs2_sbd *sdp)
up_write(&sdp->sd_log_flush_lock);
}
/**
* gfs2_meta_syncfs - sync all the buffers in a filesystem
* @sdp: the filesystem
*
*/
void gfs2_meta_syncfs(struct gfs2_sbd *sdp)
{
gfs2_log_flush(sdp, NULL);
for (;;) {
gfs2_ail1_start(sdp, DIO_ALL);
if (gfs2_ail1_empty(sdp, DIO_ALL))
break;
msleep(10);
}
}
......@@ -48,7 +48,6 @@ static inline void gfs2_log_pointers_init(struct gfs2_sbd *sdp,
unsigned int gfs2_struct2blk(struct gfs2_sbd *sdp, unsigned int nstruct,
unsigned int ssize);
void gfs2_ail1_start(struct gfs2_sbd *sdp, int flags);
int gfs2_ail1_empty(struct gfs2_sbd *sdp, int flags);
int gfs2_log_reserve(struct gfs2_sbd *sdp, unsigned int blks);
......@@ -61,5 +60,6 @@ void gfs2_log_flush(struct gfs2_sbd *sdp, struct gfs2_glock *gl);
void gfs2_log_commit(struct gfs2_sbd *sdp, struct gfs2_trans *trans);
void gfs2_log_shutdown(struct gfs2_sbd *sdp);
void gfs2_meta_syncfs(struct gfs2_sbd *sdp);
#endif /* __LOG_DOT_H__ */
......@@ -182,7 +182,7 @@ static void buf_lo_after_commit(struct gfs2_sbd *sdp, struct gfs2_ail *ai)
}
static void buf_lo_before_scan(struct gfs2_jdesc *jd,
struct gfs2_log_header *head, int pass)
struct gfs2_log_header_host *head, int pass)
{
struct gfs2_sbd *sdp = GFS2_SB(jd->jd_inode);
......@@ -328,7 +328,7 @@ static void revoke_lo_before_commit(struct gfs2_sbd *sdp)
}
static void revoke_lo_before_scan(struct gfs2_jdesc *jd,
struct gfs2_log_header *head, int pass)
struct gfs2_log_header_host *head, int pass)
{
struct gfs2_sbd *sdp = GFS2_SB(jd->jd_inode);
......@@ -509,7 +509,7 @@ static void databuf_lo_before_commit(struct gfs2_sbd *sdp)
{
LIST_HEAD(started);
struct gfs2_bufdata *bd1 = NULL, *bd2, *bdt;
struct buffer_head *bh = NULL;
struct buffer_head *bh = NULL,*bh1 = NULL;
unsigned int offset = sizeof(struct gfs2_log_descriptor);
struct gfs2_log_descriptor *ld;
unsigned int limit;
......@@ -537,8 +537,13 @@ static void databuf_lo_before_commit(struct gfs2_sbd *sdp)
list_for_each_entry_safe_continue(bd1, bdt,
&sdp->sd_log_le_databuf,
bd_le.le_list) {
/* store off the buffer head in a local ptr since
* gfs2_bufdata might change when we drop the log lock
*/
bh1 = bd1->bd_bh;
/* An ordered write buffer */
if (bd1->bd_bh && !buffer_pinned(bd1->bd_bh)) {
if (bh1 && !buffer_pinned(bh1)) {
list_move(&bd1->bd_le.le_list, &started);
if (bd1 == bd2) {
bd2 = NULL;
......@@ -547,20 +552,21 @@ static void databuf_lo_before_commit(struct gfs2_sbd *sdp)
bd_le.le_list);
}
total_dbuf--;
if (bd1->bd_bh) {
get_bh(bd1->bd_bh);
if (buffer_dirty(bd1->bd_bh)) {
if (bh1) {
if (buffer_dirty(bh1)) {
get_bh(bh1);
gfs2_log_unlock(sdp);
wait_on_buffer(bd1->bd_bh);
ll_rw_block(WRITE, 1,
&bd1->bd_bh);
ll_rw_block(SWRITE, 1, &bh1);
brelse(bh1);
gfs2_log_lock(sdp);
}
brelse(bd1->bd_bh);
continue;
}
continue;
} else if (bd1->bd_bh) { /* A journaled buffer */
} else if (bh1) { /* A journaled buffer */
int magic;
gfs2_log_unlock(sdp);
if (!bh) {
......@@ -582,16 +588,16 @@ static void databuf_lo_before_commit(struct gfs2_sbd *sdp)
ld->ld_data2 = cpu_to_be32(0);
memset(ld->ld_reserved, 0, sizeof(ld->ld_reserved));
}
magic = gfs2_check_magic(bd1->bd_bh);
*ptr++ = cpu_to_be64(bd1->bd_bh->b_blocknr);
magic = gfs2_check_magic(bh1);
*ptr++ = cpu_to_be64(bh1->b_blocknr);
*ptr++ = cpu_to_be64((__u64)magic);
clear_buffer_escaped(bd1->bd_bh);
clear_buffer_escaped(bh1);
if (unlikely(magic != 0))
set_buffer_escaped(bd1->bd_bh);
set_buffer_escaped(bh1);
gfs2_log_lock(sdp);
if (n++ > num)
break;
} else if (!bd1->bd_bh) {
} else if (!bh1) {
total_dbuf--;
sdp->sd_log_num_databuf--;
list_del_init(&bd1->bd_le.le_list);
......
......@@ -60,7 +60,7 @@ static inline void lops_after_commit(struct gfs2_sbd *sdp, struct gfs2_ail *ai)
}
static inline void lops_before_scan(struct gfs2_jdesc *jd,
struct gfs2_log_header *head,
struct gfs2_log_header_host *head,
unsigned int pass)
{
int x;
......
......@@ -127,17 +127,17 @@ void gfs2_meta_sync(struct gfs2_glock *gl)
/**
* getbuf - Get a buffer with a given address space
* @sdp: the filesystem
* @aspace: the address space
* @gl: the glock
* @blkno: the block number (filesystem scope)
* @create: 1 if the buffer should be created
*
* Returns: the buffer
*/
static struct buffer_head *getbuf(struct gfs2_sbd *sdp, struct inode *aspace,
u64 blkno, int create)
static struct buffer_head *getbuf(struct gfs2_glock *gl, u64 blkno, int create)
{
struct address_space *mapping = gl->gl_aspace->i_mapping;
struct gfs2_sbd *sdp = gl->gl_sbd;
struct page *page;
struct buffer_head *bh;
unsigned int shift;
......@@ -150,13 +150,13 @@ static struct buffer_head *getbuf(struct gfs2_sbd *sdp, struct inode *aspace,
if (create) {
for (;;) {
page = grab_cache_page(aspace->i_mapping, index);
page = grab_cache_page(mapping, index);
if (page)
break;
yield();
}
} else {
page = find_lock_page(aspace->i_mapping, index);
page = find_lock_page(mapping, index);
if (!page)
return NULL;
}
......@@ -202,7 +202,7 @@ static void meta_prep_new(struct buffer_head *bh)
struct buffer_head *gfs2_meta_new(struct gfs2_glock *gl, u64 blkno)
{
struct buffer_head *bh;
bh = getbuf(gl->gl_sbd, gl->gl_aspace, blkno, CREATE);
bh = getbuf(gl, blkno, CREATE);
meta_prep_new(bh);
return bh;
}
......@@ -220,7 +220,7 @@ struct buffer_head *gfs2_meta_new(struct gfs2_glock *gl, u64 blkno)
int gfs2_meta_read(struct gfs2_glock *gl, u64 blkno, int flags,
struct buffer_head **bhp)
{
*bhp = getbuf(gl->gl_sbd, gl->gl_aspace, blkno, CREATE);
*bhp = getbuf(gl, blkno, CREATE);
if (!buffer_uptodate(*bhp))
ll_rw_block(READ_META, 1, bhp);
if (flags & DIO_WAIT) {
......@@ -379,11 +379,10 @@ void gfs2_unpin(struct gfs2_sbd *sdp, struct buffer_head *bh,
void gfs2_meta_wipe(struct gfs2_inode *ip, u64 bstart, u32 blen)
{
struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
struct inode *aspace = ip->i_gl->gl_aspace;
struct buffer_head *bh;
while (blen) {
bh = getbuf(sdp, aspace, bstart, NO_CREATE);
bh = getbuf(ip->i_gl, bstart, NO_CREATE);
if (bh) {
struct gfs2_bufdata *bd = bh->b_private;
......@@ -472,6 +471,9 @@ int gfs2_meta_indirect_buffer(struct gfs2_inode *ip, int height, u64 num,
struct buffer_head *bh = NULL, **bh_slot = ip->i_cache + height;
int in_cache = 0;
BUG_ON(!gl);
BUG_ON(!sdp);
spin_lock(&ip->i_spin);
if (*bh_slot && (*bh_slot)->b_blocknr == num) {
bh = *bh_slot;
......@@ -481,7 +483,7 @@ int gfs2_meta_indirect_buffer(struct gfs2_inode *ip, int height, u64 num,
spin_unlock(&ip->i_spin);
if (!bh)
bh = getbuf(gl->gl_sbd, gl->gl_aspace, num, CREATE);
bh = getbuf(gl, num, CREATE);
if (!bh)
return -ENOBUFS;
......@@ -532,7 +534,6 @@ err:
struct buffer_head *gfs2_meta_ra(struct gfs2_glock *gl, u64 dblock, u32 extlen)
{
struct gfs2_sbd *sdp = gl->gl_sbd;
struct inode *aspace = gl->gl_aspace;
struct buffer_head *first_bh, *bh;
u32 max_ra = gfs2_tune_get(sdp, gt_max_readahead) >>
sdp->sd_sb.sb_bsize_shift;
......@@ -544,7 +545,7 @@ struct buffer_head *gfs2_meta_ra(struct gfs2_glock *gl, u64 dblock, u32 extlen)
if (extlen > max_ra)
extlen = max_ra;
first_bh = getbuf(sdp, aspace, dblock, CREATE);
first_bh = getbuf(gl, dblock, CREATE);
if (buffer_uptodate(first_bh))
goto out;
......@@ -555,7 +556,7 @@ struct buffer_head *gfs2_meta_ra(struct gfs2_glock *gl, u64 dblock, u32 extlen)
extlen--;
while (extlen) {
bh = getbuf(sdp, aspace, dblock, CREATE);
bh = getbuf(gl, dblock, CREATE);
if (!buffer_uptodate(bh) && !buffer_locked(bh))
ll_rw_block(READA, 1, &bh);
......@@ -571,20 +572,3 @@ out:
return first_bh;
}
/**
* gfs2_meta_syncfs - sync all the buffers in a filesystem
* @sdp: the filesystem
*
*/
void gfs2_meta_syncfs(struct gfs2_sbd *sdp)
{
gfs2_log_flush(sdp, NULL);
for (;;) {
gfs2_ail1_start(sdp, DIO_ALL);
if (gfs2_ail1_empty(sdp, DIO_ALL))
break;
msleep(10);
}
}
......@@ -67,7 +67,6 @@ static inline int gfs2_meta_inode_buffer(struct gfs2_inode *ip,
}
struct buffer_head *gfs2_meta_ra(struct gfs2_glock *gl, u64 dblock, u32 extlen);
void gfs2_meta_syncfs(struct gfs2_sbd *sdp);
#define buffer_busy(bh) \
((bh)->b_state & ((1ul << BH_Dirty) | (1ul << BH_Lock) | (1ul << BH_Pinned)))
......
This diff is collapsed.
......@@ -156,19 +156,6 @@ out_ignore:
return 0;
}
static int zero_readpage(struct page *page)
{
void *kaddr;
kaddr = kmap_atomic(page, KM_USER0);
memset(kaddr, 0, PAGE_CACHE_SIZE);
kunmap_atomic(kaddr, KM_USER0);
SetPageUptodate(page);
return 0;
}
/**
* stuffed_readpage - Fill in a Linux page with stuffed file data
* @ip: the inode
......@@ -183,9 +170,7 @@ static int stuffed_readpage(struct gfs2_inode *ip, struct page *page)
void *kaddr;
int error;
/* Only the first page of a stuffed file might contain data */
if (unlikely(page->index))
return zero_readpage(page);
BUG_ON(page->index);
error = gfs2_meta_inode_buffer(ip, &dibh);
if (error)
......@@ -230,9 +215,9 @@ static int gfs2_readpage(struct file *file, struct page *page)
/* gfs2_sharewrite_nopage has grabbed the ip->i_gl already */
goto skip_lock;
}
gfs2_holder_init(ip->i_gl, LM_ST_SHARED, GL_ATIME|GL_AOP, &gh);
gfs2_holder_init(ip->i_gl, LM_ST_SHARED, GL_ATIME|LM_FLAG_TRY_1CB, &gh);
do_unlock = 1;
error = gfs2_glock_nq_m_atime(1, &gh);
error = gfs2_glock_nq_atime(&gh);
if (unlikely(error))
goto out_unlock;
}
......@@ -254,6 +239,8 @@ skip_lock:
out:
return error;
out_unlock:
if (error == GLR_TRYFAILED)
error = AOP_TRUNCATED_PAGE;
unlock_page(page);
if (do_unlock)
gfs2_holder_uninit(&gh);
......@@ -293,9 +280,9 @@ static int gfs2_readpages(struct file *file, struct address_space *mapping,
goto skip_lock;
}
gfs2_holder_init(ip->i_gl, LM_ST_SHARED,
LM_FLAG_TRY_1CB|GL_ATIME|GL_AOP, &gh);
LM_FLAG_TRY_1CB|GL_ATIME, &gh);
do_unlock = 1;
ret = gfs2_glock_nq_m_atime(1, &gh);
ret = gfs2_glock_nq_atime(&gh);
if (ret == GLR_TRYFAILED)
goto out_noerror;
if (unlikely(ret))
......@@ -366,10 +353,13 @@ static int gfs2_prepare_write(struct file *file, struct page *page,
unsigned int write_len = to - from;
gfs2_holder_init(ip->i_gl, LM_ST_EXCLUSIVE, GL_ATIME|GL_AOP, &ip->i_gh);
error = gfs2_glock_nq_m_atime(1, &ip->i_gh);
if (error)
gfs2_holder_init(ip->i_gl, LM_ST_EXCLUSIVE, GL_ATIME|LM_FLAG_TRY_1CB, &ip->i_gh);
error = gfs2_glock_nq_atime(&ip->i_gh);
if (unlikely(error)) {
if (error == GLR_TRYFAILED)
error = AOP_TRUNCATED_PAGE;
goto out_uninit;
}
gfs2_write_calc_reserv(ip, write_len, &data_blocks, &ind_blocks);
......@@ -386,7 +376,7 @@ static int gfs2_prepare_write(struct file *file, struct page *page,
if (error)
goto out_alloc_put;
error = gfs2_quota_check(ip, ip->i_di.di_uid, ip->i_di.di_gid);
error = gfs2_quota_check(ip, ip->i_inode.i_uid, ip->i_inode.i_gid);
if (error)
goto out_qunlock;
......@@ -482,8 +472,10 @@ static int gfs2_commit_write(struct file *file, struct page *page,
SetPageUptodate(page);
if (inode->i_size < file_size)
if (inode->i_size < file_size) {
i_size_write(inode, file_size);
mark_inode_dirty(inode);
}
} else {
if (sdp->sd_args.ar_data == GFS2_DATA_ORDERED ||
gfs2_is_jdata(ip))
......@@ -498,11 +490,6 @@ static int gfs2_commit_write(struct file *file, struct page *page,
di->di_size = cpu_to_be64(inode->i_size);
}
di->di_mode = cpu_to_be32(inode->i_mode);
di->di_atime = cpu_to_be64(inode->i_atime.tv_sec);
di->di_mtime = cpu_to_be64(inode->i_mtime.tv_sec);
di->di_ctime = cpu_to_be64(inode->i_ctime.tv_sec);
brelse(dibh);
gfs2_trans_end(sdp);
if (al->al_requested) {
......@@ -624,7 +611,7 @@ static ssize_t gfs2_direct_IO(int rw, struct kiocb *iocb,
* on this path. All we need change is atime.
*/
gfs2_holder_init(ip->i_gl, LM_ST_SHARED, GL_ATIME, &gh);
rv = gfs2_glock_nq_m_atime(1, &gh);
rv = gfs2_glock_nq_atime(&gh);
if (rv)
goto out;
......@@ -737,6 +724,9 @@ int gfs2_releasepage(struct page *page, gfp_t gfp_mask)
if (!atomic_read(&aspace->i_writecount))
return 0;
if (!(gfp_mask & __GFP_WAIT))
return 0;
if (time_after_eq(jiffies, t)) {
stuck_releasepage(bh);
/* should we withdraw here? */
......
......@@ -43,7 +43,7 @@ static int gfs2_drevalidate(struct dentry *dentry, struct nameidata *nd)
struct inode *inode = dentry->d_inode;
struct gfs2_holder d_gh;
struct gfs2_inode *ip;
struct gfs2_inum inum;
struct gfs2_inum_host inum;
unsigned int type;
int error;
......@@ -76,7 +76,7 @@ static int gfs2_drevalidate(struct dentry *dentry, struct nameidata *nd)
if (!gfs2_inum_equal(&ip->i_num, &inum))
goto invalid_gunlock;
if (IF2DT(ip->i_di.di_mode) != type) {
if (IF2DT(ip->i_inode.i_mode) != type) {
gfs2_consist_inode(dip);
goto fail_gunlock;
}
......
This diff is collapsed.
......@@ -15,7 +15,7 @@
extern struct export_operations gfs2_export_ops;
struct gfs2_fh_obj {
struct gfs2_inum this;
struct gfs2_inum_host this;
__u32 imode;
};
......
This diff is collapsed.
......@@ -17,7 +17,7 @@ extern struct file gfs2_internal_file_sentinel;
extern int gfs2_internal_read(struct gfs2_inode *ip,
struct file_ra_state *ra_state,
char *buf, loff_t *pos, unsigned size);
extern void gfs2_set_inode_flags(struct inode *inode);
extern const struct file_operations gfs2_file_fops;
extern const struct file_operations gfs2_dir_fops;
......
......@@ -237,7 +237,7 @@ fail:
}
static struct inode *gfs2_lookup_root(struct super_block *sb,
struct gfs2_inum *inum)
struct gfs2_inum_host *inum)
{
return gfs2_inode_lookup(sb, inum, DT_DIR);
}
......@@ -246,7 +246,7 @@ static int init_sb(struct gfs2_sbd *sdp, int silent, int undo)
{
struct super_block *sb = sdp->sd_vfs;
struct gfs2_holder sb_gh;
struct gfs2_inum *inum;
struct gfs2_inum_host *inum;
struct inode *inode;
int error = 0;
......
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment