Commit 301933a0 authored by Trond Myklebust's avatar Trond Myklebust

Merge commit 'linux-pnfs/nfs41-for-2.6.31' into nfsv41-for-2.6.31

parents 3fe0344f 68f3f901
......@@ -74,6 +74,15 @@ config NFS_V4
If unsure, say N.
config NFS_V4_1
bool "NFS client support for NFSv4.1 (DEVELOPER ONLY)"
depends on NFS_V4 && EXPERIMENTAL
help
This option enables support for minor version 1 of the NFSv4 protocol
(draft-ietf-nfsv4-minorversion1) in the kernel's NFS client.
Unless you're an NFS developer, say N.
config ROOT_NFS
bool "Root file system on NFS"
depends on NFS_FS=y && IP_PNP
......
......@@ -17,6 +17,9 @@
#include <linux/freezer.h>
#include <linux/kthread.h>
#include <linux/sunrpc/svcauth_gss.h>
#if defined(CONFIG_NFS_V4_1)
#include <linux/sunrpc/bc_xprt.h>
#endif
#include <net/inet_sock.h>
......@@ -28,11 +31,12 @@
struct nfs_callback_data {
unsigned int users;
struct svc_serv *serv;
struct svc_rqst *rqst;
struct task_struct *task;
};
static struct nfs_callback_data nfs_callback_info;
static struct nfs_callback_data nfs_callback_info[NFS4_MAX_MINOR_VERSION + 1];
static DEFINE_MUTEX(nfs_callback_mutex);
static struct svc_program nfs4_callback_program;
......@@ -56,10 +60,10 @@ module_param_call(callback_tcpport, param_set_port, param_get_int,
&nfs_callback_set_tcpport, 0644);
/*
* This is the callback kernel thread.
* This is the NFSv4 callback kernel thread.
*/
static int
nfs_callback_svc(void *vrqstp)
nfs4_callback_svc(void *vrqstp)
{
int err, preverr = 0;
struct svc_rqst *rqstp = vrqstp;
......@@ -97,20 +101,12 @@ nfs_callback_svc(void *vrqstp)
}
/*
* Bring up the callback thread if it is not already up.
* Prepare to bring up the NFSv4 callback service
*/
int nfs_callback_up(void)
struct svc_rqst *
nfs4_callback_up(struct svc_serv *serv)
{
struct svc_serv *serv = NULL;
int ret = 0;
mutex_lock(&nfs_callback_mutex);
if (nfs_callback_info.users++ || nfs_callback_info.task != NULL)
goto out;
serv = svc_create(&nfs4_callback_program, NFS4_CALLBACK_BUFSIZE, NULL);
ret = -ENOMEM;
if (!serv)
goto out_err;
int ret;
ret = svc_create_xprt(serv, "tcp", PF_INET,
nfs_callback_set_tcpport, SVC_SOCK_ANONYMOUS);
......@@ -131,23 +127,168 @@ int nfs_callback_up(void)
goto out_err;
#endif /* defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) */
nfs_callback_info.rqst = svc_prepare_thread(serv, &serv->sv_pools[0]);
if (IS_ERR(nfs_callback_info.rqst)) {
ret = PTR_ERR(nfs_callback_info.rqst);
nfs_callback_info.rqst = NULL;
return svc_prepare_thread(serv, &serv->sv_pools[0]);
out_err:
if (ret == 0)
ret = -ENOMEM;
return ERR_PTR(ret);
}
#if defined(CONFIG_NFS_V4_1)
/*
* The callback service for NFSv4.1 callbacks
*/
static int
nfs41_callback_svc(void *vrqstp)
{
struct svc_rqst *rqstp = vrqstp;
struct svc_serv *serv = rqstp->rq_server;
struct rpc_rqst *req;
int error;
DEFINE_WAIT(wq);
set_freezable();
/*
* FIXME: do we really need to run this under the BKL? If so, please
* add a comment about what it's intended to protect.
*/
lock_kernel();
while (!kthread_should_stop()) {
prepare_to_wait(&serv->sv_cb_waitq, &wq, TASK_INTERRUPTIBLE);
spin_lock_bh(&serv->sv_cb_lock);
if (!list_empty(&serv->sv_cb_list)) {
req = list_first_entry(&serv->sv_cb_list,
struct rpc_rqst, rq_bc_list);
list_del(&req->rq_bc_list);
spin_unlock_bh(&serv->sv_cb_lock);
dprintk("Invoking bc_svc_process()\n");
error = bc_svc_process(serv, req, rqstp);
dprintk("bc_svc_process() returned w/ error code= %d\n",
error);
} else {
spin_unlock_bh(&serv->sv_cb_lock);
schedule();
}
finish_wait(&serv->sv_cb_waitq, &wq);
}
unlock_kernel();
return 0;
}
/*
* Bring up the NFSv4.1 callback service
*/
struct svc_rqst *
nfs41_callback_up(struct svc_serv *serv, struct rpc_xprt *xprt)
{
struct svc_xprt *bc_xprt;
struct svc_rqst *rqstp = ERR_PTR(-ENOMEM);
dprintk("--> %s\n", __func__);
/* Create a svc_sock for the service */
bc_xprt = svc_sock_create(serv, xprt->prot);
if (!bc_xprt)
goto out;
/*
* Save the svc_serv in the transport so that it can
* be referenced when the session backchannel is initialized
*/
serv->bc_xprt = bc_xprt;
xprt->bc_serv = serv;
INIT_LIST_HEAD(&serv->sv_cb_list);
spin_lock_init(&serv->sv_cb_lock);
init_waitqueue_head(&serv->sv_cb_waitq);
rqstp = svc_prepare_thread(serv, &serv->sv_pools[0]);
if (IS_ERR(rqstp))
svc_sock_destroy(bc_xprt);
out:
dprintk("--> %s return %p\n", __func__, rqstp);
return rqstp;
}
static inline int nfs_minorversion_callback_svc_setup(u32 minorversion,
struct svc_serv *serv, struct rpc_xprt *xprt,
struct svc_rqst **rqstpp, int (**callback_svc)(void *vrqstp))
{
if (minorversion) {
*rqstpp = nfs41_callback_up(serv, xprt);
*callback_svc = nfs41_callback_svc;
}
return minorversion;
}
static inline void nfs_callback_bc_serv(u32 minorversion, struct rpc_xprt *xprt,
struct nfs_callback_data *cb_info)
{
if (minorversion)
xprt->bc_serv = cb_info->serv;
}
#else
static inline int nfs_minorversion_callback_svc_setup(u32 minorversion,
struct svc_serv *serv, struct rpc_xprt *xprt,
struct svc_rqst **rqstpp, int (**callback_svc)(void *vrqstp))
{
return 0;
}
static inline void nfs_callback_bc_serv(u32 minorversion, struct rpc_xprt *xprt,
struct nfs_callback_data *cb_info)
{
}
#endif /* CONFIG_NFS_V4_1 */
/*
* Bring up the callback thread if it is not already up.
*/
int nfs_callback_up(u32 minorversion, struct rpc_xprt *xprt)
{
struct svc_serv *serv = NULL;
struct svc_rqst *rqstp;
int (*callback_svc)(void *vrqstp);
struct nfs_callback_data *cb_info = &nfs_callback_info[minorversion];
char svc_name[12];
int ret = 0;
int minorversion_setup;
mutex_lock(&nfs_callback_mutex);
if (cb_info->users++ || cb_info->task != NULL) {
nfs_callback_bc_serv(minorversion, xprt, cb_info);
goto out;
}
serv = svc_create(&nfs4_callback_program, NFS4_CALLBACK_BUFSIZE, NULL);
if (!serv) {
ret = -ENOMEM;
goto out_err;
}
minorversion_setup = nfs_minorversion_callback_svc_setup(minorversion,
serv, xprt, &rqstp, &callback_svc);
if (!minorversion_setup) {
/* v4.0 callback setup */
rqstp = nfs4_callback_up(serv);
callback_svc = nfs4_callback_svc;
}
if (IS_ERR(rqstp)) {
ret = PTR_ERR(rqstp);
goto out_err;
}
svc_sock_update_bufs(serv);
nfs_callback_info.task = kthread_run(nfs_callback_svc,
nfs_callback_info.rqst,
"nfsv4-svc");
if (IS_ERR(nfs_callback_info.task)) {
ret = PTR_ERR(nfs_callback_info.task);
svc_exit_thread(nfs_callback_info.rqst);
nfs_callback_info.rqst = NULL;
nfs_callback_info.task = NULL;
sprintf(svc_name, "nfsv4.%u-svc", minorversion);
cb_info->serv = serv;
cb_info->rqst = rqstp;
cb_info->task = kthread_run(callback_svc, cb_info->rqst, svc_name);
if (IS_ERR(cb_info->task)) {
ret = PTR_ERR(cb_info->task);
svc_exit_thread(cb_info->rqst);
cb_info->rqst = NULL;
cb_info->task = NULL;
goto out_err;
}
out:
......@@ -164,22 +305,25 @@ out:
out_err:
dprintk("NFS: Couldn't create callback socket or server thread; "
"err = %d\n", ret);
nfs_callback_info.users--;
cb_info->users--;
goto out;
}
/*
* Kill the callback thread if it's no longer being used.
*/
void nfs_callback_down(void)
void nfs_callback_down(int minorversion)
{
struct nfs_callback_data *cb_info = &nfs_callback_info[minorversion];
mutex_lock(&nfs_callback_mutex);
nfs_callback_info.users--;
if (nfs_callback_info.users == 0 && nfs_callback_info.task != NULL) {
kthread_stop(nfs_callback_info.task);
svc_exit_thread(nfs_callback_info.rqst);
nfs_callback_info.rqst = NULL;
nfs_callback_info.task = NULL;
cb_info->users--;
if (cb_info->users == 0 && cb_info->task != NULL) {
kthread_stop(cb_info->task);
svc_exit_thread(cb_info->rqst);
cb_info->serv = NULL;
cb_info->rqst = NULL;
cb_info->task = NULL;
}
mutex_unlock(&nfs_callback_mutex);
}
......
......@@ -20,13 +20,24 @@ enum nfs4_callback_procnum {
enum nfs4_callback_opnum {
OP_CB_GETATTR = 3,
OP_CB_RECALL = 4,
/* Callback operations new to NFSv4.1 */
OP_CB_LAYOUTRECALL = 5,
OP_CB_NOTIFY = 6,
OP_CB_PUSH_DELEG = 7,
OP_CB_RECALL_ANY = 8,
OP_CB_RECALLABLE_OBJ_AVAIL = 9,
OP_CB_RECALL_SLOT = 10,
OP_CB_SEQUENCE = 11,
OP_CB_WANTS_CANCELLED = 12,
OP_CB_NOTIFY_LOCK = 13,
OP_CB_NOTIFY_DEVICEID = 14,
OP_CB_ILLEGAL = 10044,
};
struct cb_compound_hdr_arg {
unsigned int taglen;
const char *tag;
unsigned int callback_ident;
unsigned int minorversion;
unsigned nops;
};
......@@ -59,16 +70,59 @@ struct cb_recallargs {
uint32_t truncate;
};
#if defined(CONFIG_NFS_V4_1)
struct referring_call {
uint32_t rc_sequenceid;
uint32_t rc_slotid;
};
struct referring_call_list {
struct nfs4_sessionid rcl_sessionid;
uint32_t rcl_nrefcalls;
struct referring_call *rcl_refcalls;
};
struct cb_sequenceargs {
struct sockaddr *csa_addr;
struct nfs4_sessionid csa_sessionid;
uint32_t csa_sequenceid;
uint32_t csa_slotid;
uint32_t csa_highestslotid;
uint32_t csa_cachethis;
uint32_t csa_nrclists;
struct referring_call_list *csa_rclists;
};
struct cb_sequenceres {
__be32 csr_status;
struct nfs4_sessionid csr_sessionid;
uint32_t csr_sequenceid;
uint32_t csr_slotid;
uint32_t csr_highestslotid;
uint32_t csr_target_highestslotid;
};
extern unsigned nfs4_callback_sequence(struct cb_sequenceargs *args,
struct cb_sequenceres *res);
#endif /* CONFIG_NFS_V4_1 */
extern __be32 nfs4_callback_getattr(struct cb_getattrargs *args, struct cb_getattrres *res);
extern __be32 nfs4_callback_recall(struct cb_recallargs *args, void *dummy);
#ifdef CONFIG_NFS_V4
extern int nfs_callback_up(void);
extern void nfs_callback_down(void);
#else
#define nfs_callback_up() (0)
#define nfs_callback_down() do {} while(0)
#endif
extern int nfs_callback_up(u32 minorversion, struct rpc_xprt *xprt);
extern void nfs_callback_down(int minorversion);
#endif /* CONFIG_NFS_V4 */
/*
* nfs41: Callbacks are expected to not cause substantial latency,
* so we limit their concurrency to 1 by setting up the maximum number
* of slots for the backchannel.
*/
#define NFS41_BC_MIN_CALLBACKS 1
#define NFS41_BC_MAX_CALLBACKS 1
extern unsigned int nfs_callback_set_tcpport;
extern unsigned short nfs_callback_tcpport;
......
......@@ -101,3 +101,130 @@ out:
dprintk("%s: exit with status = %d\n", __func__, ntohl(res));
return res;
}
#if defined(CONFIG_NFS_V4_1)
/*
* Validate the sequenceID sent by the server.
* Return success if the sequenceID is one more than what we last saw on
* this slot, accounting for wraparound. Increments the slot's sequence.
*
* We don't yet implement a duplicate request cache, so at this time
* we will log replays, and process them as if we had not seen them before,
* but we don't bump the sequence in the slot. Not too worried about it,
* since we only currently implement idempotent callbacks anyway.
*
* We have a single slot backchannel at this time, so we don't bother
* checking the used_slots bit array on the table. The lower layer guarantees
* a single outstanding callback request at a time.
*/
static int
validate_seqid(struct nfs4_slot_table *tbl, u32 slotid, u32 seqid)
{
struct nfs4_slot *slot;
dprintk("%s enter. slotid %d seqid %d\n",
__func__, slotid, seqid);
if (slotid > NFS41_BC_MAX_CALLBACKS)
return htonl(NFS4ERR_BADSLOT);
slot = tbl->slots + slotid;
dprintk("%s slot table seqid: %d\n", __func__, slot->seq_nr);
/* Normal */
if (likely(seqid == slot->seq_nr + 1)) {
slot->seq_nr++;
return htonl(NFS4_OK);
}
/* Replay */
if (seqid == slot->seq_nr) {
dprintk("%s seqid %d is a replay - no DRC available\n",
__func__, seqid);
return htonl(NFS4_OK);
}
/* Wraparound */
if (seqid == 1 && (slot->seq_nr + 1) == 0) {
slot->seq_nr = 1;
return htonl(NFS4_OK);
}
/* Misordered request */
return htonl(NFS4ERR_SEQ_MISORDERED);
}
/*
* Returns a pointer to a held 'struct nfs_client' that matches the server's
* address, major version number, and session ID. It is the caller's
* responsibility to release the returned reference.
*
* Returns NULL if there are no connections with sessions, or if no session
* matches the one of interest.
*/
static struct nfs_client *find_client_with_session(
const struct sockaddr *addr, u32 nfsversion,
struct nfs4_sessionid *sessionid)
{
struct nfs_client *clp;
clp = nfs_find_client(addr, 4);
if (clp == NULL)
return NULL;
do {
struct nfs_client *prev = clp;
if (clp->cl_session != NULL) {
if (memcmp(clp->cl_session->sess_id.data,
sessionid->data,
NFS4_MAX_SESSIONID_LEN) == 0) {
/* Returns a held reference to clp */
return clp;
}
}
clp = nfs_find_client_next(prev);
nfs_put_client(prev);
} while (clp != NULL);
return NULL;
}
/* FIXME: referring calls should be processed */
unsigned nfs4_callback_sequence(struct cb_sequenceargs *args,
struct cb_sequenceres *res)
{
struct nfs_client *clp;
int i, status;
for (i = 0; i < args->csa_nrclists; i++)
kfree(args->csa_rclists[i].rcl_refcalls);
kfree(args->csa_rclists);
status = htonl(NFS4ERR_BADSESSION);
clp = find_client_with_session(args->csa_addr, 4, &args->csa_sessionid);
if (clp == NULL)
goto out;
status = validate_seqid(&clp->cl_session->bc_slot_table,
args->csa_slotid, args->csa_sequenceid);
if (status)
goto out_putclient;
memcpy(&res->csr_sessionid, &args->csa_sessionid,
sizeof(res->csr_sessionid));
res->csr_sequenceid = args->csa_sequenceid;
res->csr_slotid = args->csa_slotid;
res->csr_highestslotid = NFS41_BC_MAX_CALLBACKS - 1;
res->csr_target_highestslotid = NFS41_BC_MAX_CALLBACKS - 1;
out_putclient:
nfs_put_client(clp);
out:
dprintk("%s: exit with status = %d\n", __func__, ntohl(status));
res->csr_status = status;
return res->csr_status;
}
#endif /* CONFIG_NFS_V4_1 */
......@@ -20,6 +20,11 @@
2 + 2 + 3 + 3)
#define CB_OP_RECALL_RES_MAXSZ (CB_OP_HDR_RES_MAXSZ)
#if defined(CONFIG_NFS_V4_1)
#define CB_OP_SEQUENCE_RES_MAXSZ (CB_OP_HDR_RES_MAXSZ + \
4 + 1 + 3)
#endif /* CONFIG_NFS_V4_1 */
#define NFSDBG_FACILITY NFSDBG_CALLBACK
typedef __be32 (*callback_process_op_t)(void *, void *);
......@@ -132,7 +137,6 @@ static __be32 decode_stateid(struct xdr_stream *xdr, nfs4_stateid *stateid)
static __be32 decode_compound_hdr_arg(struct xdr_stream *xdr, struct cb_compound_hdr_arg *hdr)
{
__be32 *p;
unsigned int minor_version;
__be32 status;
status = decode_string(xdr, &hdr->taglen, &hdr->tag);
......@@ -147,15 +151,19 @@ static __be32 decode_compound_hdr_arg(struct xdr_stream *xdr, struct cb_compound
p = read_buf(xdr, 12);
if (unlikely(p == NULL))
return htonl(NFS4ERR_RESOURCE);
minor_version = ntohl(*p++);
/* Check minor version is zero. */
if (minor_version != 0) {
printk(KERN_WARNING "%s: NFSv4 server callback with illegal minor version %u!\n",
__func__, minor_version);
hdr->minorversion = ntohl(*p++);
/* Check minor version is zero or one. */
if (hdr->minorversion <= 1) {
p++; /* skip callback_ident */
} else {
printk(KERN_WARNING "%s: NFSv4 server callback with "
"illegal minor version %u!\n",
__func__, hdr->minorversion);
return htonl(NFS4ERR_MINOR_VERS_MISMATCH);
}
hdr->callback_ident = ntohl(*p++);
hdr->nops = ntohl(*p);
dprintk("%s: minorversion %d nops %d\n", __func__,
hdr->minorversion, hdr->nops);
return 0;
}
......@@ -204,6 +212,122 @@ out:
return status;
}
#if defined(CONFIG_NFS_V4_1)
static unsigned decode_sessionid(struct xdr_stream *xdr,
struct nfs4_sessionid *sid)
{
uint32_t *p;
int len = NFS4_MAX_SESSIONID_LEN;
p = read_buf(xdr, len);
if (unlikely(p == NULL))
return htonl(NFS4ERR_RESOURCE);;
memcpy(sid->data, p, len);
return 0;
}
static unsigned decode_rc_list(struct xdr_stream *xdr,
struct referring_call_list *rc_list)
{
uint32_t *p;
int i;
unsigned status;
status = decode_sessionid(xdr, &rc_list->rcl_sessionid);
if (status)
goto out;
status = htonl(NFS4ERR_RESOURCE);
p = read_buf(xdr, sizeof(uint32_t));
if (unlikely(p == NULL))
goto out;
rc_list->rcl_nrefcalls = ntohl(*p++);
if (rc_list->rcl_nrefcalls) {
p = read_buf(xdr,
rc_list->rcl_nrefcalls * 2 * sizeof(uint32_t));
if (unlikely(p == NULL))
goto out;
rc_list->rcl_refcalls = kmalloc(rc_list->rcl_nrefcalls *
sizeof(*rc_list->rcl_refcalls),
GFP_KERNEL);
if (unlikely(rc_list->rcl_refcalls == NULL))
goto out;
for (i = 0; i < rc_list->rcl_nrefcalls; i++) {
rc_list->rcl_refcalls[i].rc_sequenceid = ntohl(*p++);
rc_list->rcl_refcalls[i].rc_slotid = ntohl(*p++);
}
}
status = 0;
out:
return status;
}
static unsigned decode_cb_sequence_args(struct svc_rqst *rqstp,
struct xdr_stream *xdr,
struct cb_sequenceargs *args)
{
uint32_t *p;
int i;
unsigned status;
status = decode_sessionid(xdr, &args->csa_sessionid);
if (status)
goto out;
status = htonl(NFS4ERR_RESOURCE);
p = read_buf(xdr, 5 * sizeof(uint32_t));
if (unlikely(p == NULL))
goto out;
args->csa_addr = svc_addr(rqstp);
args->csa_sequenceid = ntohl(*p++);
args->csa_slotid = ntohl(*p++);
args->csa_highestslotid = ntohl(*p++);
args->csa_cachethis = ntohl(*p++);
args->csa_nrclists = ntohl(*p++);
args->csa_rclists = NULL;
if (args->csa_nrclists) {
args->csa_rclists = kmalloc(args->csa_nrclists *
sizeof(*args->csa_rclists),
GFP_KERNEL);
if (unlikely(args->csa_rclists == NULL))
goto out;
for (i = 0; i < args->csa_nrclists; i++) {
status = decode_rc_list(xdr, &args->csa_rclists[i]);
if (status)
goto out_free;
}
}
status = 0;
dprintk("%s: sessionid %x:%x:%x:%x sequenceid %u slotid %u "
"highestslotid %u cachethis %d nrclists %u\n",
__func__,
((u32 *)&args->csa_sessionid)[0],
((u32 *)&args->csa_sessionid)[1],
((u32 *)&args->csa_sessionid)[2],
((u32 *)&args->csa_sessionid)[3],
args->csa_sequenceid, args->csa_slotid,
args->csa_highestslotid, args->csa_cachethis,
args->csa_nrclists);
out:
dprintk("%s: exit with status = %d\n", __func__, ntohl(status));
return status;
out_free:
for (i = 0; i < args->csa_nrclists; i++)
kfree(args->csa_rclists[i].rcl_refcalls);
kfree(args->csa_rclists);
goto out;
}
#endif /* CONFIG_NFS_V4_1 */
static __be32 encode_string(struct xdr_stream *xdr, unsigned int len, const char *str)
{
__be32 *p;
......@@ -353,31 +477,134 @@ out:
return status;
}
static __be32 process_op(struct svc_rqst *rqstp,
#if defined(CONFIG_NFS_V4_1)
static unsigned encode_sessionid(struct xdr_stream *xdr,
const struct nfs4_sessionid *sid)
{
uint32_t *p;
int len = NFS4_MAX_SESSIONID_LEN;
p = xdr_reserve_space(xdr, len);
if (unlikely(p == NULL))
return htonl(NFS4ERR_RESOURCE);
memcpy(p, sid, len);
return 0;
}
static unsigned encode_cb_sequence_res(struct svc_rqst *rqstp,
struct xdr_stream *xdr,
const struct cb_sequenceres *res)
{
uint32_t *p;
unsigned status = res->csr_status;
if (unlikely(status != 0))
goto out;
encode_sessionid(xdr, &res->csr_sessionid);
p = xdr_reserve_space(xdr, 4 * sizeof(uint32_t));
if (unlikely(p == NULL))
return htonl(NFS4ERR_RESOURCE);
*p++ = htonl(res->csr_sequenceid);
*p++ = htonl(res->csr_slotid);
*p++ = htonl(res->csr_highestslotid);
*p++ = htonl(res->csr_target_highestslotid);
out:
dprintk("%s: exit with status = %d\n", __func__, ntohl(status));
return status;
}
static __be32
preprocess_nfs41_op(int nop, unsigned int op_nr, struct callback_op **op)
{
if (op_nr == OP_CB_SEQUENCE) {
if (nop != 0)
return htonl(NFS4ERR_SEQUENCE_POS);
} else {
if (nop == 0)
return htonl(NFS4ERR_OP_NOT_IN_SESSION);
}
switch (op_nr) {
case OP_CB_GETATTR:
case OP_CB_RECALL:
case OP_CB_SEQUENCE:
*op = &callback_ops[op_nr];
break;
case OP_CB_LAYOUTRECALL:
case OP_CB_NOTIFY_DEVICEID:
case OP_CB_NOTIFY:
case OP_CB_PUSH_DELEG:
case OP_CB_RECALL_ANY:
case OP_CB_RECALLABLE_OBJ_AVAIL:
case OP_CB_RECALL_SLOT:
case OP_CB_WANTS_CANCELLED:
case OP_CB_NOTIFY_LOCK:
return htonl(NFS4ERR_NOTSUPP);
default:
return htonl(NFS4ERR_OP_ILLEGAL);
}
return htonl(NFS_OK);
}
#else /* CONFIG_NFS_V4_1 */
static __be32
preprocess_nfs41_op(int nop, unsigned int op_nr, struct callback_op **op)
{
return htonl(NFS4ERR_MINOR_VERS_MISMATCH);
}
#endif /* CONFIG_NFS_V4_1 */
static __be32
preprocess_nfs4_op(unsigned int op_nr, struct callback_op **op)
{
switch (op_nr) {
case OP_CB_GETATTR:
case OP_CB_RECALL:
*op = &callback_ops[op_nr];
break;
default:
return htonl(NFS4ERR_OP_ILLEGAL);
}
return htonl(NFS_OK);
}
static __be32 process_op(uint32_t minorversion, int nop,
struct svc_rqst *rqstp,
struct xdr_stream *xdr_in, void *argp,
struct xdr_stream *xdr_out, void *resp)
{
struct callback_op *op = &callback_ops[0];
unsigned int op_nr = OP_CB_ILLEGAL;
__be32 status = 0;
__be32 status;
long maxlen;
__be32 res;
dprintk("%s: start\n", __func__);
status = decode_op_hdr(xdr_in, &op_nr);
if (likely(status == 0)) {
switch (op_nr) {
case OP_CB_GETATTR:
case OP_CB_RECALL:
op = &callback_ops[op_nr];
break;
default:
op_nr = OP_CB_ILLEGAL;
op = &callback_ops[0];
if (unlikely(status)) {
status = htonl(NFS4ERR_OP_ILLEGAL);
goto out;
}
}
dprintk("%s: minorversion=%d nop=%d op_nr=%u\n",
__func__, minorversion, nop, op_nr);
status = minorversion ? preprocess_nfs41_op(nop, op_nr, &op) :
preprocess_nfs4_op(op_nr, &op);
if (status == htonl(NFS4ERR_OP_ILLEGAL))
op_nr = OP_CB_ILLEGAL;
out:
maxlen = xdr_out->end - xdr_out->p;
if (maxlen > 0 && maxlen < PAGE_SIZE) {
if (likely(status == 0 && op->decode_args != NULL))
......@@ -425,7 +652,8 @@ static __be32 nfs4_callback_compound(struct svc_rqst *rqstp, void *argp, void *r
return rpc_system_err;
while (status == 0 && nops != hdr_arg.nops) {
status = process_op(rqstp, &xdr_in, argp, &xdr_out, resp);
status = process_op(hdr_arg.minorversion, nops,
rqstp, &xdr_in, argp, &xdr_out, resp);
nops++;
}
......@@ -452,7 +680,15 @@ static struct callback_op callback_ops[] = {
.process_op = (callback_process_op_t)nfs4_callback_recall,
.decode_args = (callback_decode_arg_t)decode_recall_args,
.res_maxsize = CB_OP_RECALL_RES_MAXSZ,
}
},
#if defined(CONFIG_NFS_V4_1)
[OP_CB_SEQUENCE] = {
.process_op = (callback_process_op_t)nfs4_callback_sequence,
.decode_args = (callback_decode_arg_t)decode_cb_sequence_args,
.encode_res = (callback_encode_res_t)encode_cb_sequence_res,
.res_maxsize = CB_OP_SEQUENCE_RES_MAXSZ,
},
#endif /* CONFIG_NFS_V4_1 */
};
/*
......
......@@ -37,6 +37,7 @@
#include <linux/in6.h>
#include <net/ipv6.h>
#include <linux/nfs_xdr.h>
#include <linux/sunrpc/bc_xprt.h>
#include <asm/system.h>
......@@ -102,6 +103,7 @@ struct nfs_client_initdata {
size_t addrlen;
const struct nfs_rpc_ops *rpc_ops;
int proto;
u32 minorversion;
};
/*
......@@ -120,12 +122,6 @@ static struct nfs_client *nfs_alloc_client(const struct nfs_client_initdata *cl_
clp->rpc_ops = cl_init->rpc_ops;
if (cl_init->rpc_ops->version == 4) {
if (nfs_callback_up() < 0)
goto error_2;
__set_bit(NFS_CS_CALLBACK, &clp->cl_res_state);
}
atomic_set(&clp->cl_count, 1);
clp->cl_cons_state = NFS_CS_INITING;
......@@ -135,7 +131,7 @@ static struct nfs_client *nfs_alloc_client(const struct nfs_client_initdata *cl_
if (cl_init->hostname) {
clp->cl_hostname = kstrdup(cl_init->hostname, GFP_KERNEL);
if (!clp->cl_hostname)
goto error_3;
goto error_cleanup;
}
INIT_LIST_HEAD(&clp->cl_superblocks);
......@@ -150,6 +146,7 @@ static struct nfs_client *nfs_alloc_client(const struct nfs_client_initdata *cl_
rpc_init_wait_queue(&clp->cl_rpcwaitq, "NFS client");
clp->cl_boot_time = CURRENT_TIME;
clp->cl_state = 1 << NFS4CLNT_LEASE_EXPIRED;
clp->cl_minorversion = cl_init->minorversion;
#endif
cred = rpc_lookup_machine_cred();
if (!IS_ERR(cred))
......@@ -159,10 +156,7 @@ static struct nfs_client *nfs_alloc_client(const struct nfs_client_initdata *cl_
return clp;
error_3:
if (__test_and_clear_bit(NFS_CS_CALLBACK, &clp->cl_res_state))
nfs_callback_down();
error_2:
error_cleanup:
kfree(clp);
error_0:
return NULL;
......@@ -181,6 +175,35 @@ static void nfs4_shutdown_client(struct nfs_client *clp)
#endif
}
/*
* Destroy the NFS4 callback service
*/
static void nfs4_destroy_callback(struct nfs_client *clp)
{
#ifdef CONFIG_NFS_V4
if (__test_and_clear_bit(NFS_CS_CALLBACK, &clp->cl_res_state))
nfs_callback_down(clp->cl_minorversion);
#endif /* CONFIG_NFS_V4 */
}
/*
* Clears/puts all minor version specific parts from an nfs_client struct
* reverting it to minorversion 0.
*/
static void nfs4_clear_client_minor_version(struct nfs_client *clp)
{
#ifdef CONFIG_NFS_V4_1
if (nfs4_has_session(clp)) {
nfs4_destroy_session(clp->cl_session);
clp->cl_session = NULL;
}
clp->cl_call_sync = _nfs4_call_sync;
#endif /* CONFIG_NFS_V4_1 */
nfs4_destroy_callback(clp);
}
/*
* Destroy a shared client record
*/
......@@ -188,6 +211,7 @@ static void nfs_free_client(struct nfs_client *clp)
{
dprintk("--> nfs_free_client(%u)\n", clp->rpc_ops->version);
nfs4_clear_client_minor_version(clp);
nfs4_shutdown_client(clp);
nfs_fscache_release_client_cookie(clp);
......@@ -196,9 +220,6 @@ static void nfs_free_client(struct nfs_client *clp)
if (!IS_ERR(clp->cl_rpcclient))
rpc_shutdown_client(clp->cl_rpcclient);
if (__test_and_clear_bit(NFS_CS_CALLBACK, &clp->cl_res_state))
nfs_callback_down();
if (clp->cl_machine_cred != NULL)
put_rpccred(clp->cl_machine_cred);
......@@ -347,7 +368,8 @@ struct nfs_client *nfs_find_client(const struct sockaddr *addr, u32 nfsversion)
struct sockaddr *clap = (struct sockaddr *)&clp->cl_addr;
/* Don't match clients that failed to initialise properly */
if (clp->cl_cons_state != NFS_CS_READY)
if (!(clp->cl_cons_state == NFS_CS_READY ||
clp->cl_cons_state == NFS_CS_SESSION_INITING))
continue;
/* Different NFS versions cannot share the same nfs_client */
......@@ -420,7 +442,9 @@ static struct nfs_client *nfs_match_client(const struct nfs_client_initdata *dat
if (clp->cl_proto != data->proto)
continue;
/* Match nfsv4 minorversion */
if (clp->cl_minorversion != data->minorversion)
continue;
/* Match the full socket address */
if (!nfs_sockaddr_cmp(sap, clap))
continue;
......@@ -478,7 +502,7 @@ found_client:
nfs_free_client(new);
error = wait_event_killable(nfs_client_active_wq,
clp->cl_cons_state != NFS_CS_INITING);
clp->cl_cons_state < NFS_CS_INITING);
if (error < 0) {
nfs_put_client(clp);
return ERR_PTR(-ERESTARTSYS);
......@@ -499,12 +523,28 @@ found_client:
/*
* Mark a server as ready or failed
*/
static void nfs_mark_client_ready(struct nfs_client *clp, int state)
void nfs_mark_client_ready(struct nfs_client *clp, int state)
{
clp->cl_cons_state = state;
wake_up_all(&nfs_client_active_wq);
}
/*
* With sessions, the client is not marked ready until after a
* successful EXCHANGE_ID and CREATE_SESSION.
*
* Map errors cl_cons_state errors to EPROTONOSUPPORT to indicate
* other versions of NFS can be tried.
*/
int nfs4_check_client_ready(struct nfs_client *clp)
{
if (!nfs4_has_session(clp))
return 0;
if (clp->cl_cons_state < NFS_CS_READY)
return -EPROTONOSUPPORT;
return 0;
}
/*
* Initialise the timeout values for a connection
*/
......@@ -1049,6 +1089,61 @@ error:
}
#ifdef CONFIG_NFS_V4
/*
* Initialize the NFS4 callback service
*/
static int nfs4_init_callback(struct nfs_client *clp)
{
int error;
if (clp->rpc_ops->version == 4) {
if (nfs4_has_session(clp)) {
error = xprt_setup_backchannel(
clp->cl_rpcclient->cl_xprt,
NFS41_BC_MIN_CALLBACKS);
if (error < 0)
return error;
}
error = nfs_callback_up(clp->cl_minorversion,
clp->cl_rpcclient->cl_xprt);
if (error < 0) {
dprintk("%s: failed to start callback. Error = %d\n",
__func__, error);
return error;
}
__set_bit(NFS_CS_CALLBACK, &clp->cl_res_state);
}
return 0;
}
/*
* Initialize the minor version specific parts of an NFS4 client record
*/
static int nfs4_init_client_minor_version(struct nfs_client *clp)
{
clp->cl_call_sync = _nfs4_call_sync;
#if defined(CONFIG_NFS_V4_1)
if (clp->cl_minorversion) {
struct nfs4_session *session = NULL;
/*
* Create the session and mark it expired.
* When a SEQUENCE operation encounters the expired session
* it will do session recovery to initialize it.
*/
session = nfs4_alloc_session(clp);
if (!session)
return -ENOMEM;
clp->cl_session = session;
clp->cl_call_sync = _nfs4_call_sync_session;
}
#endif /* CONFIG_NFS_V4_1 */
return nfs4_init_callback(clp);
}
/*
* Initialise an NFS4 client record
*/
......@@ -1083,6 +1178,11 @@ static int nfs4_init_client(struct nfs_client *clp,
}
__set_bit(NFS_CS_IDMAP, &clp->cl_res_state);
error = nfs4_init_client_minor_version(clp);
if (error < 0)
goto error;
if (!nfs4_has_session(clp))
nfs_mark_client_ready(clp, NFS_CS_READY);
return 0;
......@@ -1101,7 +1201,8 @@ static int nfs4_set_client(struct nfs_server *server,
const size_t addrlen,
const char *ip_addr,
rpc_authflavor_t authflavour,
int proto, const struct rpc_timeout *timeparms)
int proto, const struct rpc_timeout *timeparms,
u32 minorversion)
{
struct nfs_client_initdata cl_init = {
.hostname = hostname,
......@@ -1109,6 +1210,7 @@ static int nfs4_set_client(struct nfs_server *server,
.addrlen = addrlen,
.rpc_ops = &nfs_v4_clientops,
.proto = proto,
.minorversion = minorversion,
};
struct nfs_client *clp;
int error;
......@@ -1137,6 +1239,36 @@ error:
return error;
}
/*
* Initialize a session.
* Note: save the mount rsize and wsize for create_server negotiation.
*/
static void nfs4_init_session(struct nfs_client *clp,
unsigned int wsize, unsigned int rsize)
{
#if defined(CONFIG_NFS_V4_1)
if (nfs4_has_session(clp)) {
clp->cl_session->fc_attrs.max_rqst_sz = wsize;
clp->cl_session->fc_attrs.max_resp_sz = rsize;
}
#endif /* CONFIG_NFS_V4_1 */
}
/*
* Session has been established, and the client marked ready.
* Set the mount rsize and wsize with negotiated fore channel
* attributes which will be bound checked in nfs_server_set_fsinfo.
*/
static void nfs4_session_set_rwsize(struct nfs_server *server)
{
#ifdef CONFIG_NFS_V4_1
if (!nfs4_has_session(server->nfs_client))
return;
server->rsize = server->nfs_client->cl_session->fc_attrs.max_resp_sz;
server->wsize = server->nfs_client->cl_session->fc_attrs.max_rqst_sz;
#endif /* CONFIG_NFS_V4_1 */
}
/*
* Create a version 4 volume record
*/
......@@ -1164,7 +1296,8 @@ static int nfs4_init_server(struct nfs_server *server,
data->client_address,
data->auth_flavors[0],
data->nfs_server.protocol,
&timeparms);
&timeparms,
data->minorversion);
if (error < 0)
goto error;
......@@ -1214,6 +1347,8 @@ struct nfs_server *nfs4_create_server(const struct nfs_parsed_mount_data *data,
BUG_ON(!server->nfs_client->rpc_ops);
BUG_ON(!server->nfs_client->rpc_ops->file_inode_ops);
nfs4_init_session(server->nfs_client, server->wsize, server->rsize);
/* Probe the root fh to retrieve its FSID */
error = nfs4_path_walk(server, mntfh, data->nfs_server.export_path);
if (error < 0)
......@@ -1224,6 +1359,8 @@ struct nfs_server *nfs4_create_server(const struct nfs_parsed_mount_data *data,
(unsigned long long) server->fsid.minor);
dprintk("Mount FH: %d\n", mntfh->size);
nfs4_session_set_rwsize(server);
error = nfs_probe_fsinfo(server, mntfh, &fattr);
if (error < 0)
goto error;
......@@ -1282,7 +1419,8 @@ struct nfs_server *nfs4_create_referral_server(struct nfs_clone_mount *data,
parent_client->cl_ipaddr,
data->authflavor,
parent_server->client->cl_xprt->prot,
parent_server->client->cl_timeout);
parent_server->client->cl_timeout,
parent_client->cl_minorversion);
if (error < 0)
goto error;
......
......@@ -259,6 +259,9 @@ static void nfs_direct_read_release(void *calldata)
}
static const struct rpc_call_ops nfs_read_direct_ops = {
#if defined(CONFIG_NFS_V4_1)
.rpc_call_prepare = nfs_read_prepare,
#endif /* CONFIG_NFS_V4_1 */
.rpc_call_done = nfs_direct_read_result,
.rpc_release = nfs_direct_read_release,
};
......@@ -535,6 +538,9 @@ static void nfs_direct_commit_release(void *calldata)
}
static const struct rpc_call_ops nfs_commit_direct_ops = {
#if defined(CONFIG_NFS_V4_1)
.rpc_call_prepare = nfs_write_prepare,
#endif /* CONFIG_NFS_V4_1 */
.rpc_call_done = nfs_direct_commit_result,
.rpc_release = nfs_direct_commit_release,
};
......@@ -673,6 +679,9 @@ out_unlock:
}
static const struct rpc_call_ops nfs_write_direct_ops = {
#if defined(CONFIG_NFS_V4_1)
.rpc_call_prepare = nfs_write_prepare,
#endif /* CONFIG_NFS_V4_1 */
.rpc_call_done = nfs_direct_write_result,
.rpc_release = nfs_direct_write_release,
};
......
......@@ -2,6 +2,7 @@
* NFS internal definitions
*/
#include "nfs4_fs.h"
#include <linux/mount.h>
#include <linux/security.h>
......@@ -17,6 +18,18 @@ struct nfs_string;
*/
#define NFS_MAX_READAHEAD (RPC_DEF_SLOT_TABLE - 1)
/*
* Determine if sessions are in use.
*/
static inline int nfs4_has_session(const struct nfs_client *clp)
{
#ifdef CONFIG_NFS_V4_1
if (clp->cl_session)
return 1;
#endif /* CONFIG_NFS_V4_1 */
return 0;
}
struct nfs_clone_mount {
const struct super_block *sb;
const struct dentry *dentry;
......@@ -44,6 +57,7 @@ struct nfs_parsed_mount_data {
unsigned int auth_flavor_len;
rpc_authflavor_t auth_flavors[1];
char *client_address;
unsigned int minorversion;
char *fscache_uniq;
struct {
......@@ -99,6 +113,8 @@ extern void nfs_free_server(struct nfs_server *server);
extern struct nfs_server *nfs_clone_server(struct nfs_server *,
struct nfs_fh *,
struct nfs_fattr *);
extern void nfs_mark_client_ready(struct nfs_client *clp, int state);
extern int nfs4_check_client_ready(struct nfs_client *clp);
#ifdef CONFIG_PROC_FS
extern int __init nfs_fs_proc_init(void);
extern void nfs_fs_proc_exit(void);
......@@ -146,6 +162,20 @@ extern __be32 * nfs_decode_dirent(__be32 *, struct nfs_entry *, int);
extern struct rpc_procinfo nfs3_procedures[];
extern __be32 *nfs3_decode_dirent(__be32 *, struct nfs_entry *, int);
/* nfs4proc.c */
static inline void nfs4_restart_rpc(struct rpc_task *task,
const struct nfs_client *clp)
{
#ifdef CONFIG_NFS_V4_1
if (nfs4_has_session(clp) &&
test_bit(NFS4CLNT_SESSION_SETUP, &clp->cl_state)) {
rpc_restart_call_prepare(task);
return;
}
#endif /* CONFIG_NFS_V4_1 */
rpc_restart_call(task);
}
/* nfs4xdr.c */
#ifdef CONFIG_NFS_V4
extern __be32 *nfs4_decode_dirent(__be32 *p, struct nfs_entry *entry, int plus);
......@@ -205,6 +235,38 @@ extern int nfs4_path_walk(struct nfs_server *server,
const char *path);
#endif
/* read.c */
extern void nfs_read_prepare(struct rpc_task *task, void *calldata);
/* write.c */
extern void nfs_write_prepare(struct rpc_task *task, void *calldata);
/* nfs4proc.c */
extern int _nfs4_call_sync(struct nfs_server *server,
struct rpc_message *msg,
struct nfs4_sequence_args *args,
struct nfs4_sequence_res *res,
int cache_reply);
extern int _nfs4_call_sync_session(struct nfs_server *server,
struct rpc_message *msg,
struct nfs4_sequence_args *args,
struct nfs4_sequence_res *res,
int cache_reply);
#ifdef CONFIG_NFS_V4_1
extern void nfs41_sequence_free_slot(const struct nfs_client *,
struct nfs4_sequence_res *res);
#endif /* CONFIG_NFS_V4_1 */
static inline void nfs4_sequence_free_slot(const struct nfs_client *clp,
struct nfs4_sequence_res *res)
{
#ifdef CONFIG_NFS_V4_1
if (nfs4_has_session(clp))
nfs41_sequence_free_slot(clp, res);
#endif /* CONFIG_NFS_V4_1 */
}
/*
* Determine the device name as a string
*/
......
......@@ -44,6 +44,7 @@ enum nfs4_client_state {
NFS4CLNT_RECLAIM_REBOOT,
NFS4CLNT_RECLAIM_NOGRACE,
NFS4CLNT_DELEGRETURN,
NFS4CLNT_SESSION_SETUP,
};
/*
......@@ -177,6 +178,14 @@ struct nfs4_state_recovery_ops {
int state_flag_bit;
int (*recover_open)(struct nfs4_state_owner *, struct nfs4_state *);
int (*recover_lock)(struct nfs4_state *, struct file_lock *);
int (*establish_clid)(struct nfs_client *, struct rpc_cred *);
struct rpc_cred * (*get_clid_cred)(struct nfs_client *);
};
struct nfs4_state_maintenance_ops {
int (*sched_state_renewal)(struct nfs_client *, struct rpc_cred *);
struct rpc_cred * (*get_state_renewal_cred_locked)(struct nfs_client *);
int (*renew_lease)(struct nfs_client *, struct rpc_cred *);
};
extern const struct dentry_operations nfs4_dentry_operations;
......@@ -193,6 +202,7 @@ extern int nfs4_proc_setclientid(struct nfs_client *, u32, unsigned short, struc
extern int nfs4_proc_setclientid_confirm(struct nfs_client *, struct rpc_cred *);
extern int nfs4_proc_async_renew(struct nfs_client *, struct rpc_cred *);
extern int nfs4_proc_renew(struct nfs_client *, struct rpc_cred *);
extern int nfs4_init_clientid(struct nfs_client *, struct rpc_cred *);
extern int nfs4_do_close(struct path *path, struct nfs4_state *state, int wait);
extern struct dentry *nfs4_atomic_open(struct inode *, struct dentry *, struct nameidata *);
extern int nfs4_open_revalidate(struct inode *, struct dentry *, int, struct nameidata *);
......@@ -200,8 +210,26 @@ extern int nfs4_server_capabilities(struct nfs_server *server, struct nfs_fh *fh
extern int nfs4_proc_fs_locations(struct inode *dir, const struct qstr *name,
struct nfs4_fs_locations *fs_locations, struct page *page);
extern struct nfs4_state_recovery_ops nfs4_reboot_recovery_ops;
extern struct nfs4_state_recovery_ops nfs4_nograce_recovery_ops;
extern struct nfs4_state_recovery_ops *nfs4_reboot_recovery_ops[];
extern struct nfs4_state_recovery_ops *nfs4_nograce_recovery_ops[];
#if defined(CONFIG_NFS_V4_1)
extern int nfs4_setup_sequence(struct nfs_client *clp,
struct nfs4_sequence_args *args, struct nfs4_sequence_res *res,
int cache_reply, struct rpc_task *task);
extern void nfs4_destroy_session(struct nfs4_session *session);
extern struct nfs4_session *nfs4_alloc_session(struct nfs_client *clp);
extern int nfs4_proc_create_session(struct nfs_client *, int reset);
extern int nfs4_proc_destroy_session(struct nfs4_session *);
#else /* CONFIG_NFS_v4_1 */
static inline int nfs4_setup_sequence(struct nfs_client *clp,
struct nfs4_sequence_args *args, struct nfs4_sequence_res *res,
int cache_reply, struct rpc_task *task)
{
return 0;
}
#endif /* CONFIG_NFS_V4_1 */
extern struct nfs4_state_maintenance_ops *nfs4_state_renewal_ops[];
extern const u32 nfs4_fattr_bitmap[2];
extern const u32 nfs4_statfs_bitmap[2];
......@@ -216,7 +244,12 @@ extern void nfs4_kill_renewd(struct nfs_client *);
extern void nfs4_renew_state(struct work_struct *);
/* nfs4state.c */
struct rpc_cred *nfs4_get_setclientid_cred(struct nfs_client *clp);
struct rpc_cred *nfs4_get_renew_cred_locked(struct nfs_client *clp);
#if defined(CONFIG_NFS_V4_1)
struct rpc_cred *nfs4_get_machine_cred_locked(struct nfs_client *clp);
struct rpc_cred *nfs4_get_exchange_id_cred(struct nfs_client *clp);
#endif /* CONFIG_NFS_V4_1 */
extern struct nfs4_state_owner * nfs4_get_state_owner(struct nfs_server *, struct rpc_cred *);
extern void nfs4_put_state_owner(struct nfs4_state_owner *);
......
This diff is collapsed.
......@@ -59,12 +59,14 @@
void
nfs4_renew_state(struct work_struct *work)
{
struct nfs4_state_maintenance_ops *ops;
struct nfs_client *clp =
container_of(work, struct nfs_client, cl_renewd.work);
struct rpc_cred *cred;
long lease, timeout;
unsigned long last, now;
ops = nfs4_state_renewal_ops[clp->cl_minorversion];
dprintk("%s: start\n", __func__);
/* Are there any active superblocks? */
if (list_empty(&clp->cl_superblocks))
......@@ -76,7 +78,7 @@ nfs4_renew_state(struct work_struct *work)
timeout = (2 * lease) / 3 + (long)last - (long)now;
/* Are we close to a lease timeout? */
if (time_after(now, last + lease/3)) {
cred = nfs4_get_renew_cred_locked(clp);
cred = ops->get_state_renewal_cred_locked(clp);
spin_unlock(&clp->cl_lock);
if (cred == NULL) {
if (list_empty(&clp->cl_delegations)) {
......@@ -86,7 +88,7 @@ nfs4_renew_state(struct work_struct *work)
nfs_expire_all_delegations(clp);
} else {
/* Queue an asynchronous RENEW. */
nfs4_proc_async_renew(clp, cred);
ops->sched_state_renewal(clp, cred);
put_rpccred(cred);
}
timeout = (2 * lease) / 3;
......
......@@ -60,7 +60,7 @@ const nfs4_stateid zero_stateid;
static LIST_HEAD(nfs4_clientid_list);
static int nfs4_init_client(struct nfs_client *clp, struct rpc_cred *cred)
int nfs4_init_clientid(struct nfs_client *clp, struct rpc_cred *cred)
{
unsigned short port;
int status;
......@@ -77,7 +77,7 @@ static int nfs4_init_client(struct nfs_client *clp, struct rpc_cred *cred)
return status;
}
static struct rpc_cred *nfs4_get_machine_cred_locked(struct nfs_client *clp)
struct rpc_cred *nfs4_get_machine_cred_locked(struct nfs_client *clp)
{
struct rpc_cred *cred = NULL;
......@@ -114,17 +114,21 @@ struct rpc_cred *nfs4_get_renew_cred_locked(struct nfs_client *clp)
return cred;
}
static struct rpc_cred *nfs4_get_renew_cred(struct nfs_client *clp)
#if defined(CONFIG_NFS_V4_1)
struct rpc_cred *nfs4_get_exchange_id_cred(struct nfs_client *clp)
{
struct rpc_cred *cred;
spin_lock(&clp->cl_lock);
cred = nfs4_get_renew_cred_locked(clp);
cred = nfs4_get_machine_cred_locked(clp);
spin_unlock(&clp->cl_lock);
return cred;
}
static struct rpc_cred *nfs4_get_setclientid_cred(struct nfs_client *clp)
#endif /* CONFIG_NFS_V4_1 */
struct rpc_cred *nfs4_get_setclientid_cred(struct nfs_client *clp)
{
struct nfs4_state_owner *sp;
struct rb_node *pos;
......@@ -738,11 +742,13 @@ static void nfs_increment_seqid(int status, struct nfs_seqid *seqid)
void nfs_increment_open_seqid(int status, struct nfs_seqid *seqid)
{
if (status == -NFS4ERR_BAD_SEQID) {
struct nfs4_state_owner *sp = container_of(seqid->sequence,
struct nfs4_state_owner, so_seqid);
struct nfs_server *server = sp->so_server;
if (status == -NFS4ERR_BAD_SEQID)
nfs4_drop_state_owner(sp);
}
if (!nfs4_has_session(server->nfs_client))
nfs_increment_seqid(status, seqid);
}
......@@ -1042,6 +1048,14 @@ static void nfs4_recovery_handle_error(struct nfs_client *clp, int error)
case -NFS4ERR_EXPIRED:
set_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state);
nfs4_state_start_reclaim_nograce(clp);
case -NFS4ERR_BADSESSION:
case -NFS4ERR_BADSLOT:
case -NFS4ERR_BAD_HIGH_SLOT:
case -NFS4ERR_DEADSESSION:
case -NFS4ERR_CONN_NOT_BOUND_TO_SESSION:
case -NFS4ERR_SEQ_FALSE_RETRY:
case -NFS4ERR_SEQ_MISORDERED:
set_bit(NFS4CLNT_SESSION_SETUP, &clp->cl_state);
}
}
......@@ -1075,18 +1089,22 @@ restart:
static int nfs4_check_lease(struct nfs_client *clp)
{
struct rpc_cred *cred;
struct nfs4_state_maintenance_ops *ops =
nfs4_state_renewal_ops[clp->cl_minorversion];
int status = -NFS4ERR_EXPIRED;
/* Is the client already known to have an expired lease? */
if (test_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state))
return 0;
cred = nfs4_get_renew_cred(clp);
spin_lock(&clp->cl_lock);
cred = ops->get_state_renewal_cred_locked(clp);
spin_unlock(&clp->cl_lock);
if (cred == NULL) {
cred = nfs4_get_setclientid_cred(clp);
if (cred == NULL)
goto out;
}
status = nfs4_proc_renew(clp, cred);
status = ops->renew_lease(clp, cred);
put_rpccred(cred);
out:
nfs4_recovery_handle_error(clp, status);
......@@ -1096,20 +1114,97 @@ out:
static int nfs4_reclaim_lease(struct nfs_client *clp)
{
struct rpc_cred *cred;
struct nfs4_state_recovery_ops *ops =
nfs4_reboot_recovery_ops[clp->cl_minorversion];
int status = -ENOENT;
cred = nfs4_get_setclientid_cred(clp);
cred = ops->get_clid_cred(clp);
if (cred != NULL) {
status = nfs4_init_client(clp, cred);
status = ops->establish_clid(clp, cred);
put_rpccred(cred);
/* Handle case where the user hasn't set up machine creds */
if (status == -EACCES && cred == clp->cl_machine_cred) {
nfs4_clear_machine_cred(clp);
status = -EAGAIN;
}
if (status == -NFS4ERR_MINOR_VERS_MISMATCH)
status = -EPROTONOSUPPORT;
}
return status;
}
#ifdef CONFIG_NFS_V4_1
static void nfs4_session_recovery_handle_error(struct nfs_client *clp, int err)
{
switch (err) {
case -NFS4ERR_STALE_CLIENTID:
set_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state);
set_bit(NFS4CLNT_SESSION_SETUP, &clp->cl_state);
}
}
static int nfs4_reset_session(struct nfs_client *clp)
{
int status;
status = nfs4_proc_destroy_session(clp->cl_session);
if (status && status != -NFS4ERR_BADSESSION &&
status != -NFS4ERR_DEADSESSION) {
nfs4_session_recovery_handle_error(clp, status);
goto out;
}
memset(clp->cl_session->sess_id.data, 0, NFS4_MAX_SESSIONID_LEN);
status = nfs4_proc_create_session(clp, 1);
if (status)
nfs4_session_recovery_handle_error(clp, status);
/* fall through*/
out:
/* Wake up the next rpc task even on error */
rpc_wake_up_next(&clp->cl_session->fc_slot_table.slot_tbl_waitq);
return status;
}
static int nfs4_initialize_session(struct nfs_client *clp)
{
int status;
status = nfs4_proc_create_session(clp, 0);
if (!status) {
nfs_mark_client_ready(clp, NFS_CS_READY);
} else if (status == -NFS4ERR_STALE_CLIENTID) {
set_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state);
set_bit(NFS4CLNT_SESSION_SETUP, &clp->cl_state);
} else {
nfs_mark_client_ready(clp, status);
}
return status;
}
#else /* CONFIG_NFS_V4_1 */
static int nfs4_reset_session(struct nfs_client *clp) { return 0; }
static int nfs4_initialize_session(struct nfs_client *clp) { return 0; }
#endif /* CONFIG_NFS_V4_1 */
/* Set NFS4CLNT_LEASE_EXPIRED for all v4.0 errors and for recoverable errors
* on EXCHANGE_ID for v4.1
*/
static void nfs4_set_lease_expired(struct nfs_client *clp, int status)
{
if (nfs4_has_session(clp)) {
switch (status) {
case -NFS4ERR_DELAY:
case -NFS4ERR_CLID_INUSE:
case -EAGAIN:
break;
case -NFS4ERR_NOT_SAME: /* FixMe: implement recovery
* in nfs4_exchange_id */
default:
return;
}
}
set_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state);
}
static void nfs4_state_manager(struct nfs_client *clp)
{
......@@ -1121,9 +1216,12 @@ static void nfs4_state_manager(struct nfs_client *clp)
/* We're going to have to re-establish a clientid */
status = nfs4_reclaim_lease(clp);
if (status) {
set_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state);
nfs4_set_lease_expired(clp, status);
if (status == -EAGAIN)
continue;
if (clp->cl_cons_state ==
NFS_CS_SESSION_INITING)
nfs_mark_client_ready(clp, status);
goto out_error;
}
clear_bit(NFS4CLNT_CHECK_LEASE, &clp->cl_state);
......@@ -1134,25 +1232,44 @@ static void nfs4_state_manager(struct nfs_client *clp)
if (status != 0)
continue;
}
/* Initialize or reset the session */
if (nfs4_has_session(clp) &&
test_and_clear_bit(NFS4CLNT_SESSION_SETUP, &clp->cl_state)) {
if (clp->cl_cons_state == NFS_CS_SESSION_INITING)
status = nfs4_initialize_session(clp);
else
status = nfs4_reset_session(clp);
if (status) {
if (status == -NFS4ERR_STALE_CLIENTID)
continue;
goto out_error;
}
}
/* First recover reboot state... */
if (test_and_clear_bit(NFS4CLNT_RECLAIM_REBOOT, &clp->cl_state)) {
status = nfs4_do_reclaim(clp, &nfs4_reboot_recovery_ops);
status = nfs4_do_reclaim(clp,
nfs4_reboot_recovery_ops[clp->cl_minorversion]);
if (status == -NFS4ERR_STALE_CLIENTID)
continue;
if (test_bit(NFS4CLNT_SESSION_SETUP, &clp->cl_state))
continue;
nfs4_state_end_reclaim_reboot(clp);
continue;
}
/* Now recover expired state... */
if (test_and_clear_bit(NFS4CLNT_RECLAIM_NOGRACE, &clp->cl_state)) {
status = nfs4_do_reclaim(clp, &nfs4_nograce_recovery_ops);
status = nfs4_do_reclaim(clp,
nfs4_nograce_recovery_ops[clp->cl_minorversion]);
if (status < 0) {
set_bit(NFS4CLNT_RECLAIM_NOGRACE, &clp->cl_state);
if (status == -NFS4ERR_STALE_CLIENTID)
continue;
if (status == -NFS4ERR_EXPIRED)
continue;
if (test_bit(NFS4CLNT_SESSION_SETUP,
&clp->cl_state))
continue;
goto out_error;
} else
nfs4_state_end_reclaim_nograce(clp);
......
This diff is collapsed.
......@@ -22,6 +22,7 @@
#include <asm/system.h>
#include "nfs4_fs.h"
#include "internal.h"
#include "iostat.h"
#include "fscache.h"
......@@ -46,6 +47,7 @@ struct nfs_read_data *nfs_readdata_alloc(unsigned int pagecount)
memset(p, 0, sizeof(*p));
INIT_LIST_HEAD(&p->pages);
p->npages = pagecount;
p->res.seq_res.sr_slotid = NFS4_MAX_SLOT_TABLE;
if (pagecount <= ARRAY_SIZE(p->page_array))
p->pagevec = p->page_array;
else {
......@@ -357,19 +359,25 @@ static void nfs_readpage_retry(struct rpc_task *task, struct nfs_read_data *data
struct nfs_readres *resp = &data->res;
if (resp->eof || resp->count == argp->count)
return;
goto out;
/* This is a short read! */
nfs_inc_stats(data->inode, NFSIOS_SHORTREAD);
/* Has the server at least made some progress? */
if (resp->count == 0)
return;
goto out;
/* Yes, so retry the read at the end of the data */
argp->offset += resp->count;
argp->pgbase += resp->count;
argp->count -= resp->count;
rpc_restart_call(task);
nfs4_restart_rpc(task, NFS_SERVER(data->inode)->nfs_client);
return;
out:
nfs4_sequence_free_slot(NFS_SERVER(data->inode)->nfs_client,
&data->res.seq_res);
return;
}
/*
......@@ -406,7 +414,23 @@ static void nfs_readpage_release_partial(void *calldata)
nfs_readdata_release(calldata);
}
#if defined(CONFIG_NFS_V4_1)
void nfs_read_prepare(struct rpc_task *task, void *calldata)
{
struct nfs_read_data *data = calldata;
if (nfs4_setup_sequence(NFS_SERVER(data->inode)->nfs_client,
&data->args.seq_args, &data->res.seq_res,
0, task))
return;
rpc_call_start(task);
}
#endif /* CONFIG_NFS_V4_1 */
static const struct rpc_call_ops nfs_read_partial_ops = {
#if defined(CONFIG_NFS_V4_1)
.rpc_call_prepare = nfs_read_prepare,
#endif /* CONFIG_NFS_V4_1 */
.rpc_call_done = nfs_readpage_result_partial,
.rpc_release = nfs_readpage_release_partial,
};
......@@ -470,6 +494,9 @@ static void nfs_readpage_release_full(void *calldata)
}
static const struct rpc_call_ops nfs_read_full_ops = {
#if defined(CONFIG_NFS_V4_1)
.rpc_call_prepare = nfs_read_prepare,
#endif /* CONFIG_NFS_V4_1 */
.rpc_call_done = nfs_readpage_result_full,
.rpc_release = nfs_readpage_release_full,
};
......
......@@ -90,6 +90,7 @@ enum {
Opt_mountport,
Opt_mountvers,
Opt_nfsvers,
Opt_minorversion,
/* Mount options that take string arguments */
Opt_sec, Opt_proto, Opt_mountproto, Opt_mounthost,
......@@ -155,6 +156,7 @@ static const match_table_t nfs_mount_option_tokens = {
{ Opt_mountvers, "mountvers=%u" },
{ Opt_nfsvers, "nfsvers=%u" },
{ Opt_nfsvers, "vers=%u" },
{ Opt_minorversion, "minorversion=%u" },
{ Opt_sec, "sec=%s" },
{ Opt_proto, "proto=%s" },
......@@ -1211,6 +1213,13 @@ static int nfs_parse_mount_options(char *raw,
nfs_parse_invalid_value("nfsvers");
}
break;
case Opt_minorversion:
if (match_int(args, &option))
return 0;
if (option < 0 || option > NFS4_MAX_MINOR_VERSION)
return 0;
mnt->minorversion = option;
break;
/*
* options that take text values
......@@ -2263,6 +2272,7 @@ static int nfs4_validate_mount_data(void *options,
args->nfs_server.port = NFS_PORT; /* 2049 unless user set port= */
args->auth_flavors[0] = RPC_AUTH_UNIX;
args->auth_flavor_len = 0;
args->minorversion = 0;
switch (data->version) {
case 1:
......@@ -2477,12 +2487,13 @@ static void nfs4_kill_super(struct super_block *sb)
{
struct nfs_server *server = NFS_SB(sb);
dprintk("--> %s\n", __func__);
nfs_super_return_all_delegations(sb);
kill_anon_super(sb);
nfs4_renewd_prepare_shutdown(server);
nfs_fscache_release_super_cookie(sb);
nfs_free_server(server);
dprintk("<-- %s\n", __func__);
}
/*
......
......@@ -15,6 +15,7 @@
#include <linux/wait.h>
#include "internal.h"
#include "nfs4_fs.h"
struct nfs_unlinkdata {
struct hlist_node list;
......@@ -82,7 +83,7 @@ static void nfs_async_unlink_done(struct rpc_task *task, void *calldata)
struct inode *dir = data->dir;
if (!NFS_PROTO(dir)->unlink_done(task, dir))
rpc_restart_call(task);
nfs4_restart_rpc(task, NFS_SERVER(dir)->nfs_client);
}
/**
......@@ -102,9 +103,25 @@ static void nfs_async_unlink_release(void *calldata)
nfs_sb_deactive(sb);
}
#if defined(CONFIG_NFS_V4_1)
void nfs_unlink_prepare(struct rpc_task *task, void *calldata)
{
struct nfs_unlinkdata *data = calldata;
struct nfs_server *server = NFS_SERVER(data->dir);
if (nfs4_setup_sequence(server->nfs_client, &data->args.seq_args,
&data->res.seq_res, 1, task))
return;
rpc_call_start(task);
}
#endif /* CONFIG_NFS_V4_1 */
static const struct rpc_call_ops nfs_unlink_ops = {
.rpc_call_done = nfs_async_unlink_done,
.rpc_release = nfs_async_unlink_release,
#if defined(CONFIG_NFS_V4_1)
.rpc_call_prepare = nfs_unlink_prepare,
#endif /* CONFIG_NFS_V4_1 */
};
static int nfs_do_call_unlink(struct dentry *parent, struct inode *dir, struct nfs_unlinkdata *data)
......@@ -241,6 +258,7 @@ nfs_async_unlink(struct inode *dir, struct dentry *dentry)
status = PTR_ERR(data->cred);
goto out_free;
}
data->res.seq_res.sr_slotid = NFS4_MAX_SLOT_TABLE;
status = -EBUSY;
spin_lock(&dentry->d_lock);
......
......@@ -25,6 +25,7 @@
#include "delegation.h"
#include "internal.h"
#include "iostat.h"
#include "nfs4_fs.h"
#define NFSDBG_FACILITY NFSDBG_PAGECACHE
......@@ -52,6 +53,7 @@ struct nfs_write_data *nfs_commitdata_alloc(void)
if (p) {
memset(p, 0, sizeof(*p));
INIT_LIST_HEAD(&p->pages);
p->res.seq_res.sr_slotid = NFS4_MAX_SLOT_TABLE;
}
return p;
}
......@@ -71,6 +73,7 @@ struct nfs_write_data *nfs_writedata_alloc(unsigned int pagecount)
memset(p, 0, sizeof(*p));
INIT_LIST_HEAD(&p->pages);
p->npages = pagecount;
p->res.seq_res.sr_slotid = NFS4_MAX_SLOT_TABLE;
if (pagecount <= ARRAY_SIZE(p->page_array))
p->pagevec = p->page_array;
else {
......@@ -1048,7 +1051,23 @@ out:
nfs_writedata_release(calldata);
}
#if defined(CONFIG_NFS_V4_1)
void nfs_write_prepare(struct rpc_task *task, void *calldata)
{
struct nfs_write_data *data = calldata;
struct nfs_client *clp = (NFS_SERVER(data->inode))->nfs_client;
if (nfs4_setup_sequence(clp, &data->args.seq_args,
&data->res.seq_res, 1, task))
return;
rpc_call_start(task);
}
#endif /* CONFIG_NFS_V4_1 */
static const struct rpc_call_ops nfs_write_partial_ops = {
#if defined(CONFIG_NFS_V4_1)
.rpc_call_prepare = nfs_write_prepare,
#endif /* CONFIG_NFS_V4_1 */
.rpc_call_done = nfs_writeback_done_partial,
.rpc_release = nfs_writeback_release_partial,
};
......@@ -1111,6 +1130,9 @@ remove_request:
}
static const struct rpc_call_ops nfs_write_full_ops = {
#if defined(CONFIG_NFS_V4_1)
.rpc_call_prepare = nfs_write_prepare,
#endif /* CONFIG_NFS_V4_1 */
.rpc_call_done = nfs_writeback_done_full,
.rpc_release = nfs_writeback_release_full,
};
......@@ -1123,6 +1145,7 @@ int nfs_writeback_done(struct rpc_task *task, struct nfs_write_data *data)
{
struct nfs_writeargs *argp = &data->args;
struct nfs_writeres *resp = &data->res;
struct nfs_server *server = NFS_SERVER(data->inode);
int status;
dprintk("NFS: %5u nfs_writeback_done (status %d)\n",
......@@ -1155,7 +1178,7 @@ int nfs_writeback_done(struct rpc_task *task, struct nfs_write_data *data)
if (time_before(complain, jiffies)) {
dprintk("NFS: faulty NFS server %s:"
" (committed = %d) != (stable = %d)\n",
NFS_SERVER(data->inode)->nfs_client->cl_hostname,
server->nfs_client->cl_hostname,
resp->verf->committed, argp->stable);
complain = jiffies + 300 * HZ;
}
......@@ -1181,7 +1204,7 @@ int nfs_writeback_done(struct rpc_task *task, struct nfs_write_data *data)
*/
argp->stable = NFS_FILE_SYNC;
}
rpc_restart_call(task);
nfs4_restart_rpc(task, server->nfs_client);
return -EAGAIN;
}
if (time_before(complain, jiffies)) {
......@@ -1193,6 +1216,7 @@ int nfs_writeback_done(struct rpc_task *task, struct nfs_write_data *data)
/* Can't do anything about it except throw an error. */
task->tk_status = -EIO;
}
nfs4_sequence_free_slot(server->nfs_client, &data->res.seq_res);
return 0;
}
......@@ -1349,6 +1373,9 @@ static void nfs_commit_release(void *calldata)
}
static const struct rpc_call_ops nfs_commit_ops = {
#if defined(CONFIG_NFS_V4_1)
.rpc_call_prepare = nfs_write_prepare,
#endif /* CONFIG_NFS_V4_1 */
.rpc_call_done = nfs_commit_done,
.rpc_release = nfs_commit_release,
};
......
......@@ -21,6 +21,7 @@
#define NFS4_FHSIZE 128
#define NFS4_MAXPATHLEN PATH_MAX
#define NFS4_MAXNAMLEN NAME_MAX
#define NFS4_OPAQUE_LIMIT 1024
#define NFS4_MAX_SESSIONID_LEN 16
#define NFS4_ACCESS_READ 0x0001
......@@ -130,6 +131,16 @@
#define NFS4_MAX_UINT64 (~(u64)0)
/* An NFS4 sessions server must support at least NFS4_MAX_OPS operations.
* If a compound requires more operations, adjust NFS4_MAX_OPS accordingly.
*/
#define NFS4_MAX_OPS 8
/* Our NFS4 client back channel server only wants the cb_sequene and the
* actual operation per compound
*/
#define NFS4_MAX_BACK_CHANNEL_OPS 2
enum nfs4_acl_whotype {
NFS4_ACL_WHO_NAMED = 0,
NFS4_ACL_WHO_OWNER,
......@@ -462,6 +473,13 @@ enum lock_type4 {
#define NFSPROC4_NULL 0
#define NFSPROC4_COMPOUND 1
#define NFS4_MINOR_VERSION 0
#if defined(CONFIG_NFS_V4_1)
#define NFS4_MAX_MINOR_VERSION 1
#else
#define NFS4_MAX_MINOR_VERSION 0
#endif /* CONFIG_NFS_V4_1 */
#define NFS4_DEBUG 1
/* Index of predefined Linux client operations */
......
......@@ -4,11 +4,17 @@
#include <linux/list.h>
#include <linux/backing-dev.h>
#include <linux/wait.h>
#include <linux/nfs_xdr.h>
#include <linux/sunrpc/xprt.h>
#include <asm/atomic.h>
struct nfs4_session;
struct nfs_iostats;
struct nlm_host;
struct nfs4_sequence_args;
struct nfs4_sequence_res;
struct nfs_server;
/*
* The nfs_client identifies our client state to the server.
......@@ -18,6 +24,7 @@ struct nfs_client {
int cl_cons_state; /* current construction state (-ve: init error) */
#define NFS_CS_READY 0 /* ready to be used */
#define NFS_CS_INITING 1 /* busy initialising */
#define NFS_CS_SESSION_INITING 2 /* busy initialising session */
unsigned long cl_res_state; /* NFS resources state */
#define NFS_CS_CALLBACK 1 /* - callback started */
#define NFS_CS_IDMAP 2 /* - idmap started */
......@@ -32,6 +39,7 @@ struct nfs_client {
const struct nfs_rpc_ops *rpc_ops; /* NFS protocol vector */
int cl_proto; /* Network transport protocol */
u32 cl_minorversion;/* NFSv4 minorversion */
struct rpc_cred *cl_machine_cred;
#ifdef CONFIG_NFS_V4
......@@ -63,7 +71,22 @@ struct nfs_client {
*/
char cl_ipaddr[48];
unsigned char cl_id_uniquifier;
#endif
int (* cl_call_sync)(struct nfs_server *server,
struct rpc_message *msg,
struct nfs4_sequence_args *args,
struct nfs4_sequence_res *res,
int cache_reply);
#endif /* CONFIG_NFS_V4 */
#ifdef CONFIG_NFS_V4_1
/* clientid returned from EXCHANGE_ID, used by session operations */
u64 cl_ex_clid;
/* The sequence id to use for the next CREATE_SESSION */
u32 cl_seqid;
/* The flags used for obtaining the clientid during EXCHANGE_ID */
u32 cl_exchange_flags;
struct nfs4_session *cl_session; /* sharred session */
#endif /* CONFIG_NFS_V4_1 */
#ifdef CONFIG_NFS_FSCACHE
struct fscache_cookie *fscache; /* client index cache cookie */
......@@ -145,4 +168,46 @@ struct nfs_server {
#define NFS_CAP_ACLS (1U << 3)
#define NFS_CAP_ATOMIC_OPEN (1U << 4)
/* maximum number of slots to use */
#define NFS4_MAX_SLOT_TABLE RPC_MAX_SLOT_TABLE
#if defined(CONFIG_NFS_V4_1)
/* Sessions */
#define SLOT_TABLE_SZ (NFS4_MAX_SLOT_TABLE/(8*sizeof(long)))
struct nfs4_slot_table {
struct nfs4_slot *slots; /* seqid per slot */
unsigned long used_slots[SLOT_TABLE_SZ]; /* used/unused bitmap */
spinlock_t slot_tbl_lock;
struct rpc_wait_queue slot_tbl_waitq; /* allocators may wait here */
int max_slots; /* # slots in table */
int highest_used_slotid; /* sent to server on each SEQ.
* op for dynamic resizing */
};
static inline int slot_idx(struct nfs4_slot_table *tbl, struct nfs4_slot *sp)
{
return sp - tbl->slots;
}
/*
* Session related parameters
*/
struct nfs4_session {
struct nfs4_sessionid sess_id;
u32 flags;
unsigned long session_state;
u32 hash_alg;
u32 ssv_len;
/* The fore and back channel */
struct nfs4_channel_attrs fc_attrs;
struct nfs4_slot_table fc_slot_table;
struct nfs4_channel_attrs bc_attrs;
struct nfs4_slot_table bc_slot_table;
struct nfs_client *clp;
};
#endif /* CONFIG_NFS_V4_1 */
#endif
This diff is collapsed.
......@@ -41,7 +41,6 @@
#include <linux/kref.h>
#include <linux/sunrpc/clnt.h>
#define NFS4_OPAQUE_LIMIT 1024
typedef struct {
u32 cl_boot;
u32 cl_id;
......
/******************************************************************************
(c) 2008 NetApp. All Rights Reserved.
NetApp provides this source code under the GPL v2 License.
The GPL v2 license is available at
http://opensource.org/licenses/gpl-license.php.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
******************************************************************************/
/*
* Functions to create and manage the backchannel
*/
#ifndef _LINUX_SUNRPC_BC_XPRT_H
#define _LINUX_SUNRPC_BC_XPRT_H
#include <linux/sunrpc/svcsock.h>
#include <linux/sunrpc/xprt.h>
#include <linux/sunrpc/sched.h>
#ifdef CONFIG_NFS_V4_1
struct rpc_rqst *xprt_alloc_bc_request(struct rpc_xprt *xprt);
void xprt_free_bc_request(struct rpc_rqst *req);
int xprt_setup_backchannel(struct rpc_xprt *, unsigned int min_reqs);
void xprt_destroy_backchannel(struct rpc_xprt *, int max_reqs);
void bc_release_request(struct rpc_task *);
int bc_send(struct rpc_rqst *req);
#else /* CONFIG_NFS_V4_1 */
static inline int xprt_setup_backchannel(struct rpc_xprt *xprt,
unsigned int min_reqs)
{
return 0;
}
#endif /* CONFIG_NFS_V4_1 */
#endif /* _LINUX_SUNRPC_BC_XPRT_H */
......@@ -143,6 +143,7 @@ int rpc_call_sync(struct rpc_clnt *clnt,
const struct rpc_message *msg, int flags);
struct rpc_task *rpc_call_null(struct rpc_clnt *clnt, struct rpc_cred *cred,
int flags);
void rpc_restart_call_prepare(struct rpc_task *);
void rpc_restart_call(struct rpc_task *);
void rpc_setbufsize(struct rpc_clnt *, unsigned int, unsigned int);
size_t rpc_max_payload(struct rpc_clnt *);
......
......@@ -210,6 +210,8 @@ struct rpc_wait_queue {
*/
struct rpc_task *rpc_new_task(const struct rpc_task_setup *);
struct rpc_task *rpc_run_task(const struct rpc_task_setup *);
struct rpc_task *rpc_run_bc_task(struct rpc_rqst *req,
const struct rpc_call_ops *ops);
void rpc_put_task(struct rpc_task *);
void rpc_exit_task(struct rpc_task *);
void rpc_release_calldata(const struct rpc_call_ops *, void *);
......@@ -237,6 +239,7 @@ void rpc_show_tasks(void);
int rpc_init_mempool(void);
void rpc_destroy_mempool(void);
extern struct workqueue_struct *rpciod_workqueue;
void rpc_prepare_task(struct rpc_task *task);
static inline void rpc_exit(struct rpc_task *task, int status)
{
......
......@@ -96,6 +96,15 @@ struct svc_serv {
svc_thread_fn sv_function; /* main function for threads */
unsigned int sv_drc_max_pages; /* Total pages for DRC */
unsigned int sv_drc_pages_used;/* DRC pages used */
#if defined(CONFIG_NFS_V4_1)
struct list_head sv_cb_list; /* queue for callback requests
* that arrive over the same
* connection */
spinlock_t sv_cb_lock; /* protects the svc_cb_list */
wait_queue_head_t sv_cb_waitq; /* sleep here if there are no
* entries in the svc_cb_list */
struct svc_xprt *bc_xprt;
#endif /* CONFIG_NFS_V4_1 */
};
/*
......@@ -411,6 +420,8 @@ int svc_set_num_threads(struct svc_serv *, struct svc_pool *, int);
int svc_pool_stats_open(struct svc_serv *serv, struct file *file);
void svc_destroy(struct svc_serv *);
int svc_process(struct svc_rqst *);
int bc_svc_process(struct svc_serv *, struct rpc_rqst *,
struct svc_rqst *);
int svc_register(const struct svc_serv *, const int,
const unsigned short, const unsigned short);
......
......@@ -42,6 +42,8 @@ int svc_sock_names(char *buf, struct svc_serv *serv, char *toclose);
int svc_addsock(struct svc_serv *serv, int fd, char *name_return);
void svc_init_xprt_sock(void);
void svc_cleanup_xprt_sock(void);
struct svc_xprt *svc_sock_create(struct svc_serv *serv, int prot);
void svc_sock_destroy(struct svc_xprt *);
/*
* svc_makesock socket characteristics
......
......@@ -67,7 +67,8 @@ struct rpc_rqst {
struct rpc_task * rq_task; /* RPC task data */
__be32 rq_xid; /* request XID */
int rq_cong; /* has incremented xprt->cong */
int rq_received; /* receive completed */
int rq_reply_bytes_recvd; /* number of reply */
/* bytes received */
u32 rq_seqno; /* gss seq no. used on req. */
int rq_enc_pages_num;
struct page **rq_enc_pages; /* scratch pages for use by
......@@ -97,6 +98,12 @@ struct rpc_rqst {
unsigned long rq_xtime; /* when transmitted */
int rq_ntrans;
#if defined(CONFIG_NFS_V4_1)
struct list_head rq_bc_list; /* Callback service list */
unsigned long rq_bc_pa_state; /* Backchannel prealloc state */
struct list_head rq_bc_pa_list; /* Backchannel prealloc list */
#endif /* CONFIG_NFS_V4_1 */
};
#define rq_svec rq_snd_buf.head
#define rq_slen rq_snd_buf.len
......@@ -174,6 +181,15 @@ struct rpc_xprt {
spinlock_t reserve_lock; /* lock slot table */
u32 xid; /* Next XID value to use */
struct rpc_task * snd_task; /* Task blocked in send */
#if defined(CONFIG_NFS_V4_1)
struct svc_serv *bc_serv; /* The RPC service which will */
/* process the callback */
unsigned int bc_alloc_count; /* Total number of preallocs */
spinlock_t bc_pa_lock; /* Protects the preallocated
* items */
struct list_head bc_pa_list; /* List of preallocated
* backchannel rpc_rqst's */
#endif /* CONFIG_NFS_V4_1 */
struct list_head recv;
struct {
......@@ -192,6 +208,26 @@ struct rpc_xprt {
const char *address_strings[RPC_DISPLAY_MAX];
};
#if defined(CONFIG_NFS_V4_1)
/*
* Backchannel flags
*/
#define RPC_BC_PA_IN_USE 0x0001 /* Preallocated backchannel */
/* buffer in use */
#endif /* CONFIG_NFS_V4_1 */
#if defined(CONFIG_NFS_V4_1)
static inline int bc_prealloc(struct rpc_rqst *req)
{
return test_bit(RPC_BC_PA_IN_USE, &req->rq_bc_pa_state);
}
#else
static inline int bc_prealloc(struct rpc_rqst *req)
{
return 0;
}
#endif /* CONFIG_NFS_V4_1 */
struct xprt_create {
int ident; /* XPRT_TRANSPORT identifier */
struct sockaddr * srcaddr; /* optional local address */
......
......@@ -13,5 +13,6 @@ sunrpc-y := clnt.o xprt.o socklib.o xprtsock.o sched.o \
rpcb_clnt.o timer.o xdr.o \
sunrpc_syms.o cache.o rpc_pipe.o \
svc_xprt.o
sunrpc-$(CONFIG_NFS_V4_1) += backchannel_rqst.o bc_svc.o
sunrpc-$(CONFIG_PROC_FS) += stats.o
sunrpc-$(CONFIG_SYSCTL) += sysctl.o
/******************************************************************************
(c) 2007 Network Appliance, Inc. All Rights Reserved.
(c) 2009 NetApp. All Rights Reserved.
NetApp provides this source code under the GPL v2 License.
The GPL v2 license is available at
http://opensource.org/licenses/gpl-license.php.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
******************************************************************************/
#include <linux/tcp.h>
#include <linux/sunrpc/xprt.h>
#ifdef RPC_DEBUG
#define RPCDBG_FACILITY RPCDBG_TRANS
#endif
#if defined(CONFIG_NFS_V4_1)
/*
* Helper routines that track the number of preallocation elements
* on the transport.
*/
static inline int xprt_need_to_requeue(struct rpc_xprt *xprt)
{
return xprt->bc_alloc_count > 0;
}
static inline void xprt_inc_alloc_count(struct rpc_xprt *xprt, unsigned int n)
{
xprt->bc_alloc_count += n;
}
static inline int xprt_dec_alloc_count(struct rpc_xprt *xprt, unsigned int n)
{
return xprt->bc_alloc_count -= n;
}
/*
* Free the preallocated rpc_rqst structure and the memory
* buffers hanging off of it.
*/
static void xprt_free_allocation(struct rpc_rqst *req)
{
struct xdr_buf *xbufp;
dprintk("RPC: free allocations for req= %p\n", req);
BUG_ON(test_bit(RPC_BC_PA_IN_USE, &req->rq_bc_pa_state));
xbufp = &req->rq_private_buf;
free_page((unsigned long)xbufp->head[0].iov_base);
xbufp = &req->rq_snd_buf;
free_page((unsigned long)xbufp->head[0].iov_base);
list_del(&req->rq_bc_pa_list);
kfree(req);
}
/*
* Preallocate up to min_reqs structures and related buffers for use
* by the backchannel. This function can be called multiple times
* when creating new sessions that use the same rpc_xprt. The
* preallocated buffers are added to the pool of resources used by
* the rpc_xprt. Anyone of these resources may be used used by an
* incoming callback request. It's up to the higher levels in the
* stack to enforce that the maximum number of session slots is not
* being exceeded.
*
* Some callback arguments can be large. For example, a pNFS server
* using multiple deviceids. The list can be unbound, but the client
* has the ability to tell the server the maximum size of the callback
* requests. Each deviceID is 16 bytes, so allocate one page
* for the arguments to have enough room to receive a number of these
* deviceIDs. The NFS client indicates to the pNFS server that its
* callback requests can be up to 4096 bytes in size.
*/
int xprt_setup_backchannel(struct rpc_xprt *xprt, unsigned int min_reqs)
{
struct page *page_rcv = NULL, *page_snd = NULL;
struct xdr_buf *xbufp = NULL;
struct rpc_rqst *req, *tmp;
struct list_head tmp_list;
int i;
dprintk("RPC: setup backchannel transport\n");
/*
* We use a temporary list to keep track of the preallocated
* buffers. Once we're done building the list we splice it
* into the backchannel preallocation list off of the rpc_xprt
* struct. This helps minimize the amount of time the list
* lock is held on the rpc_xprt struct. It also makes cleanup
* easier in case of memory allocation errors.
*/
INIT_LIST_HEAD(&tmp_list);
for (i = 0; i < min_reqs; i++) {
/* Pre-allocate one backchannel rpc_rqst */
req = kzalloc(sizeof(struct rpc_rqst), GFP_KERNEL);
if (req == NULL) {
printk(KERN_ERR "Failed to create bc rpc_rqst\n");
goto out_free;
}
/* Add the allocated buffer to the tmp list */
dprintk("RPC: adding req= %p\n", req);
list_add(&req->rq_bc_pa_list, &tmp_list);
req->rq_xprt = xprt;
INIT_LIST_HEAD(&req->rq_list);
INIT_LIST_HEAD(&req->rq_bc_list);
/* Preallocate one XDR receive buffer */
page_rcv = alloc_page(GFP_KERNEL);
if (page_rcv == NULL) {
printk(KERN_ERR "Failed to create bc receive xbuf\n");
goto out_free;
}
xbufp = &req->rq_rcv_buf;
xbufp->head[0].iov_base = page_address(page_rcv);
xbufp->head[0].iov_len = PAGE_SIZE;
xbufp->tail[0].iov_base = NULL;
xbufp->tail[0].iov_len = 0;
xbufp->page_len = 0;
xbufp->len = PAGE_SIZE;
xbufp->buflen = PAGE_SIZE;
/* Preallocate one XDR send buffer */
page_snd = alloc_page(GFP_KERNEL);
if (page_snd == NULL) {
printk(KERN_ERR "Failed to create bc snd xbuf\n");
goto out_free;
}
xbufp = &req->rq_snd_buf;
xbufp->head[0].iov_base = page_address(page_snd);
xbufp->head[0].iov_len = 0;
xbufp->tail[0].iov_base = NULL;
xbufp->tail[0].iov_len = 0;
xbufp->page_len = 0;
xbufp->len = 0;
xbufp->buflen = PAGE_SIZE;
}
/*
* Add the temporary list to the backchannel preallocation list
*/
spin_lock_bh(&xprt->bc_pa_lock);
list_splice(&tmp_list, &xprt->bc_pa_list);
xprt_inc_alloc_count(xprt, min_reqs);
spin_unlock_bh(&xprt->bc_pa_lock);
dprintk("RPC: setup backchannel transport done\n");
return 0;
out_free:
/*
* Memory allocation failed, free the temporary list
*/
list_for_each_entry_safe(req, tmp, &tmp_list, rq_bc_pa_list)
xprt_free_allocation(req);
dprintk("RPC: setup backchannel transport failed\n");
return -1;
}
EXPORT_SYMBOL(xprt_setup_backchannel);
/*
* Destroys the backchannel preallocated structures.
* Since these structures may have been allocated by multiple calls
* to xprt_setup_backchannel, we only destroy up to the maximum number
* of reqs specified by the caller.
* @xprt: the transport holding the preallocated strucures
* @max_reqs the maximum number of preallocated structures to destroy
*/
void xprt_destroy_backchannel(struct rpc_xprt *xprt, unsigned int max_reqs)
{
struct rpc_rqst *req = NULL, *tmp = NULL;
dprintk("RPC: destroy backchannel transport\n");
BUG_ON(max_reqs == 0);
spin_lock_bh(&xprt->bc_pa_lock);
xprt_dec_alloc_count(xprt, max_reqs);
list_for_each_entry_safe(req, tmp, &xprt->bc_pa_list, rq_bc_pa_list) {
dprintk("RPC: req=%p\n", req);
xprt_free_allocation(req);
if (--max_reqs == 0)
break;
}
spin_unlock_bh(&xprt->bc_pa_lock);
dprintk("RPC: backchannel list empty= %s\n",
list_empty(&xprt->bc_pa_list) ? "true" : "false");
}
EXPORT_SYMBOL(xprt_destroy_backchannel);
/*
* One or more rpc_rqst structure have been preallocated during the
* backchannel setup. Buffer space for the send and private XDR buffers
* has been preallocated as well. Use xprt_alloc_bc_request to allocate
* to this request. Use xprt_free_bc_request to return it.
*
* Return an available rpc_rqst, otherwise NULL if non are available.
*/
struct rpc_rqst *xprt_alloc_bc_request(struct rpc_xprt *xprt)
{
struct rpc_rqst *req;
dprintk("RPC: allocate a backchannel request\n");
spin_lock_bh(&xprt->bc_pa_lock);
if (!list_empty(&xprt->bc_pa_list)) {
req = list_first_entry(&xprt->bc_pa_list, struct rpc_rqst,
rq_bc_pa_list);
list_del(&req->rq_bc_pa_list);
} else {
req = NULL;
}
spin_unlock_bh(&xprt->bc_pa_lock);
if (req != NULL) {
set_bit(RPC_BC_PA_IN_USE, &req->rq_bc_pa_state);
req->rq_reply_bytes_recvd = 0;
req->rq_bytes_sent = 0;
memcpy(&req->rq_private_buf, &req->rq_rcv_buf,
sizeof(req->rq_private_buf));
}
dprintk("RPC: backchannel req=%p\n", req);
return req;
}
/*
* Return the preallocated rpc_rqst structure and XDR buffers
* associated with this rpc_task.
*/
void xprt_free_bc_request(struct rpc_rqst *req)
{
struct rpc_xprt *xprt = req->rq_xprt;
dprintk("RPC: free backchannel req=%p\n", req);
smp_mb__before_clear_bit();
BUG_ON(!test_bit(RPC_BC_PA_IN_USE, &req->rq_bc_pa_state));
clear_bit(RPC_BC_PA_IN_USE, &req->rq_bc_pa_state);
smp_mb__after_clear_bit();
if (!xprt_need_to_requeue(xprt)) {
/*
* The last remaining session was destroyed while this
* entry was in use. Free the entry and don't attempt
* to add back to the list because there is no need to
* have anymore preallocated entries.
*/
dprintk("RPC: Last session removed req=%p\n", req);
xprt_free_allocation(req);
return;
}
/*
* Return it to the list of preallocations so that it
* may be reused by a new callback request.
*/
spin_lock_bh(&xprt->bc_pa_lock);
list_add(&req->rq_bc_pa_list, &xprt->bc_pa_list);
spin_unlock_bh(&xprt->bc_pa_lock);
}
#endif /* CONFIG_NFS_V4_1 */
/******************************************************************************
(c) 2007 Network Appliance, Inc. All Rights Reserved.
(c) 2009 NetApp. All Rights Reserved.
NetApp provides this source code under the GPL v2 License.
The GPL v2 license is available at
http://opensource.org/licenses/gpl-license.php.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
******************************************************************************/
/*
* The NFSv4.1 callback service helper routines.
* They implement the transport level processing required to send the
* reply over an existing open connection previously established by the client.
*/
#if defined(CONFIG_NFS_V4_1)
#include <linux/module.h>
#include <linux/sunrpc/xprt.h>
#include <linux/sunrpc/sched.h>
#include <linux/sunrpc/bc_xprt.h>
#define RPCDBG_FACILITY RPCDBG_SVCDSP
void bc_release_request(struct rpc_task *task)
{
struct rpc_rqst *req = task->tk_rqstp;
dprintk("RPC: bc_release_request: task= %p\n", task);
/*
* Release this request only if it's a backchannel
* preallocated request
*/
if (!bc_prealloc(req))
return;
xprt_free_bc_request(req);
}
/* Empty callback ops */
static const struct rpc_call_ops nfs41_callback_ops = {
};
/*
* Send the callback reply
*/
int bc_send(struct rpc_rqst *req)
{
struct rpc_task *task;
int ret;
dprintk("RPC: bc_send req= %p\n", req);
task = rpc_run_bc_task(req, &nfs41_callback_ops);
if (IS_ERR(task))
ret = PTR_ERR(task);
else {
BUG_ON(atomic_read(&task->tk_count) != 1);
ret = task->tk_status;
rpc_put_task(task);
}
return ret;
dprintk("RPC: bc_send ret= %d \n", ret);
}
#endif /* CONFIG_NFS_V4_1 */
......@@ -36,7 +36,9 @@
#include <linux/sunrpc/clnt.h>
#include <linux/sunrpc/rpc_pipe_fs.h>
#include <linux/sunrpc/metrics.h>
#include <linux/sunrpc/bc_xprt.h>
#include "sunrpc.h"
#ifdef RPC_DEBUG
# define RPCDBG_FACILITY RPCDBG_CALL
......@@ -63,6 +65,9 @@ static void call_decode(struct rpc_task *task);
static void call_bind(struct rpc_task *task);
static void call_bind_status(struct rpc_task *task);
static void call_transmit(struct rpc_task *task);
#if defined(CONFIG_NFS_V4_1)
static void call_bc_transmit(struct rpc_task *task);
#endif /* CONFIG_NFS_V4_1 */
static void call_status(struct rpc_task *task);
static void call_transmit_status(struct rpc_task *task);
static void call_refresh(struct rpc_task *task);
......@@ -613,6 +618,50 @@ rpc_call_async(struct rpc_clnt *clnt, const struct rpc_message *msg, int flags,
}
EXPORT_SYMBOL_GPL(rpc_call_async);
#if defined(CONFIG_NFS_V4_1)
/**
* rpc_run_bc_task - Allocate a new RPC task for backchannel use, then run
* rpc_execute against it
* @ops: RPC call ops
*/
struct rpc_task *rpc_run_bc_task(struct rpc_rqst *req,
const struct rpc_call_ops *tk_ops)
{
struct rpc_task *task;
struct xdr_buf *xbufp = &req->rq_snd_buf;
struct rpc_task_setup task_setup_data = {
.callback_ops = tk_ops,
};
dprintk("RPC: rpc_run_bc_task req= %p\n", req);
/*
* Create an rpc_task to send the data
*/
task = rpc_new_task(&task_setup_data);
if (!task) {
xprt_free_bc_request(req);
goto out;
}
task->tk_rqstp = req;
/*
* Set up the xdr_buf length.
* This also indicates that the buffer is XDR encoded already.
*/
xbufp->len = xbufp->head[0].iov_len + xbufp->page_len +
xbufp->tail[0].iov_len;
task->tk_action = call_bc_transmit;
atomic_inc(&task->tk_count);
BUG_ON(atomic_read(&task->tk_count) != 2);
rpc_execute(task);
out:
dprintk("RPC: rpc_run_bc_task: task= %p\n", task);
return task;
}
#endif /* CONFIG_NFS_V4_1 */
void
rpc_call_start(struct rpc_task *task)
{
......@@ -694,6 +743,19 @@ void rpc_force_rebind(struct rpc_clnt *clnt)
}
EXPORT_SYMBOL_GPL(rpc_force_rebind);
/*
* Restart an (async) RPC call from the call_prepare state.
* Usually called from within the exit handler.
*/
void
rpc_restart_call_prepare(struct rpc_task *task)
{
if (RPC_ASSASSINATED(task))
return;
task->tk_action = rpc_prepare_task;
}
EXPORT_SYMBOL_GPL(rpc_restart_call_prepare);
/*
* Restart an (async) RPC call. Usually called from within the
* exit handler.
......@@ -1085,7 +1147,7 @@ call_transmit(struct rpc_task *task)
* in order to allow access to the socket to other RPC requests.
*/
call_transmit_status(task);
if (task->tk_msg.rpc_proc->p_decode != NULL)
if (rpc_reply_expected(task))
return;
task->tk_action = rpc_exit_task;
rpc_wake_up_queued_task(&task->tk_xprt->pending, task);
......@@ -1120,6 +1182,72 @@ call_transmit_status(struct rpc_task *task)
}
}
#if defined(CONFIG_NFS_V4_1)
/*
* 5b. Send the backchannel RPC reply. On error, drop the reply. In
* addition, disconnect on connectivity errors.
*/
static void
call_bc_transmit(struct rpc_task *task)
{
struct rpc_rqst *req = task->tk_rqstp;
BUG_ON(task->tk_status != 0);
task->tk_status = xprt_prepare_transmit(task);
if (task->tk_status == -EAGAIN) {
/*
* Could not reserve the transport. Try again after the
* transport is released.
*/
task->tk_status = 0;
task->tk_action = call_bc_transmit;
return;
}
task->tk_action = rpc_exit_task;
if (task->tk_status < 0) {
printk(KERN_NOTICE "RPC: Could not send backchannel reply "
"error: %d\n", task->tk_status);
return;
}
xprt_transmit(task);
xprt_end_transmit(task);
dprint_status(task);
switch (task->tk_status) {
case 0:
/* Success */
break;
case -EHOSTDOWN:
case -EHOSTUNREACH:
case -ENETUNREACH:
case -ETIMEDOUT:
/*
* Problem reaching the server. Disconnect and let the
* forechannel reestablish the connection. The server will
* have to retransmit the backchannel request and we'll
* reprocess it. Since these ops are idempotent, there's no
* need to cache our reply at this time.
*/
printk(KERN_NOTICE "RPC: Could not send backchannel reply "
"error: %d\n", task->tk_status);
xprt_conditional_disconnect(task->tk_xprt,
req->rq_connect_cookie);
break;
default:
/*
* We were unable to reply and will have to drop the
* request. The server should reconnect and retransmit.
*/
BUG_ON(task->tk_status == -EAGAIN);
printk(KERN_NOTICE "RPC: Could not send backchannel reply "
"error: %d\n", task->tk_status);
break;
}
rpc_wake_up_queued_task(&req->rq_xprt->pending, task);
}
#endif /* CONFIG_NFS_V4_1 */
/*
* 6. Sort out the RPC call status
*/
......@@ -1130,8 +1258,8 @@ call_status(struct rpc_task *task)
struct rpc_rqst *req = task->tk_rqstp;
int status;
if (req->rq_received > 0 && !req->rq_bytes_sent)
task->tk_status = req->rq_received;
if (req->rq_reply_bytes_recvd > 0 && !req->rq_bytes_sent)
task->tk_status = req->rq_reply_bytes_recvd;
dprint_status(task);
......@@ -1248,7 +1376,7 @@ call_decode(struct rpc_task *task)
/*
* Ensure that we see all writes made by xprt_complete_rqst()
* before it changed req->rq_received.
* before it changed req->rq_reply_bytes_recvd.
*/
smp_rmb();
req->rq_rcv_buf.len = req->rq_private_buf.len;
......@@ -1289,7 +1417,7 @@ out_retry:
task->tk_status = 0;
/* Note: rpc_verify_header() may have freed the RPC slot */
if (task->tk_rqstp == req) {
req->rq_received = req->rq_rcv_buf.len = 0;
req->rq_reply_bytes_recvd = req->rq_rcv_buf.len = 0;
if (task->tk_client->cl_discrtry)
xprt_conditional_disconnect(task->tk_xprt,
req->rq_connect_cookie);
......@@ -1377,13 +1505,14 @@ rpc_verify_header(struct rpc_task *task)
}
if ((len -= 3) < 0)
goto out_overflow;
p += 1; /* skip XID */
p += 1; /* skip XID */
if ((n = ntohl(*p++)) != RPC_REPLY) {
dprintk("RPC: %5u %s: not an RPC reply: %x\n",
task->tk_pid, __func__, n);
goto out_garbage;
}
if ((n = ntohl(*p++)) != RPC_MSG_ACCEPTED) {
if (--len < 0)
goto out_overflow;
......
......@@ -569,7 +569,7 @@ EXPORT_SYMBOL_GPL(rpc_delay);
/*
* Helper to call task->tk_ops->rpc_call_prepare
*/
static void rpc_prepare_task(struct rpc_task *task)
void rpc_prepare_task(struct rpc_task *task)
{
task->tk_ops->rpc_call_prepare(task, task->tk_calldata);
}
......
......@@ -141,12 +141,14 @@ EXPORT_SYMBOL_GPL(rpc_free_iostats);
void rpc_count_iostats(struct rpc_task *task)
{
struct rpc_rqst *req = task->tk_rqstp;
struct rpc_iostats *stats = task->tk_client->cl_metrics;
struct rpc_iostats *stats;
struct rpc_iostats *op_metrics;
long rtt, execute, queue;
if (!stats || !req)
if (!task->tk_client || !task->tk_client->cl_metrics || !req)
return;
stats = task->tk_client->cl_metrics;
op_metrics = &stats[task->tk_msg.rpc_proc->p_statidx];
op_metrics->om_ops++;
......@@ -154,7 +156,7 @@ void rpc_count_iostats(struct rpc_task *task)
op_metrics->om_timeouts += task->tk_timeouts;
op_metrics->om_bytes_sent += task->tk_bytes_sent;
op_metrics->om_bytes_recv += req->rq_received;
op_metrics->om_bytes_recv += req->rq_reply_bytes_recvd;
queue = (long)req->rq_xtime - task->tk_start;
if (queue < 0)
......
/******************************************************************************
(c) 2008 NetApp. All Rights Reserved.
NetApp provides this source code under the GPL v2 License.
The GPL v2 license is available at
http://opensource.org/licenses/gpl-license.php.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
******************************************************************************/
/*
* Functions and macros used internally by RPC
*/
#ifndef _NET_SUNRPC_SUNRPC_H
#define _NET_SUNRPC_SUNRPC_H
static inline int rpc_reply_expected(struct rpc_task *task)
{
return (task->tk_msg.rpc_proc != NULL) &&
(task->tk_msg.rpc_proc->p_decode != NULL);
}
#endif /* _NET_SUNRPC_SUNRPC_H */
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment