Merge git://git.linux-nfs.org/pub/linux/nfs-2.6
* git://git.linux-nfs.org/pub/linux/nfs-2.6: (118 commits) NFSv4: Iterate through all nfs_clients when the server recalls a delegation NFSv4: Deal more correctly with duplicate delegations NFS: Fix a potential race between umount and nfs_access_cache_shrinker() NFS: Add an asynchronous delegreturn operation for use in nfs_clear_inode nfs: convert NFS_*(inode) helpers to static inline nfs: obliterate NFS_FLAGS macro NFS: Address memory leaks in the NFS client mount option parser nfs4: allow nfsv4 acls on non-regular-files NFS: Optimise away the sigmask code in aio/dio reads and writes SUNRPC: Don't bother changing the sigmask for asynchronous RPC calls SUNRPC: rpcb_getport_sync() passes incorrect address size to rpc_create() SUNRPC: Clean up block comment preceding rpcb_getport_sync() SUNRPC: Use appropriate argument types in rpcb client SUNRPC: rpcb_getport_sync() should use built-in hostname generator SUNRPC: Clean up functions that free address_strings array NFS: NFS version number is unsigned NLM: Fix a bogus 'return' in nlmclnt_rpc_release NLM: Introduce an arguments structure for nlmclnt_init() NLM/NFS: Use cached nlm_host when calling nlmclnt_proc() NFS: Invoke nlmclnt_init during NFS mount processing ...
This commit is contained in:
commit
85004cc367
55 changed files with 1920 additions and 1303 deletions
|
@ -41,6 +41,48 @@ struct nlm_wait {
|
|||
|
||||
static LIST_HEAD(nlm_blocked);
|
||||
|
||||
/**
|
||||
* nlmclnt_init - Set up per-NFS mount point lockd data structures
|
||||
* @nlm_init: pointer to arguments structure
|
||||
*
|
||||
* Returns pointer to an appropriate nlm_host struct,
|
||||
* or an ERR_PTR value.
|
||||
*/
|
||||
struct nlm_host *nlmclnt_init(const struct nlmclnt_initdata *nlm_init)
|
||||
{
|
||||
struct nlm_host *host;
|
||||
u32 nlm_version = (nlm_init->nfs_version == 2) ? 1 : 4;
|
||||
int status;
|
||||
|
||||
status = lockd_up(nlm_init->protocol);
|
||||
if (status < 0)
|
||||
return ERR_PTR(status);
|
||||
|
||||
host = nlmclnt_lookup_host((struct sockaddr_in *)nlm_init->address,
|
||||
nlm_init->protocol, nlm_version,
|
||||
nlm_init->hostname,
|
||||
strlen(nlm_init->hostname));
|
||||
if (host == NULL) {
|
||||
lockd_down();
|
||||
return ERR_PTR(-ENOLCK);
|
||||
}
|
||||
|
||||
return host;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(nlmclnt_init);
|
||||
|
||||
/**
|
||||
* nlmclnt_done - Release resources allocated by nlmclnt_init()
|
||||
* @host: nlm_host structure reserved by nlmclnt_init()
|
||||
*
|
||||
*/
|
||||
void nlmclnt_done(struct nlm_host *host)
|
||||
{
|
||||
nlm_release_host(host);
|
||||
lockd_down();
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(nlmclnt_done);
|
||||
|
||||
/*
|
||||
* Queue up a lock for blocking so that the GRANTED request can see it
|
||||
*/
|
||||
|
|
|
@ -145,34 +145,21 @@ static void nlmclnt_release_lockargs(struct nlm_rqst *req)
|
|||
BUG_ON(req->a_args.lock.fl.fl_ops != NULL);
|
||||
}
|
||||
|
||||
/*
|
||||
* This is the main entry point for the NLM client.
|
||||
/**
|
||||
* nlmclnt_proc - Perform a single client-side lock request
|
||||
* @host: address of a valid nlm_host context representing the NLM server
|
||||
* @cmd: fcntl-style file lock operation to perform
|
||||
* @fl: address of arguments for the lock operation
|
||||
*
|
||||
*/
|
||||
int
|
||||
nlmclnt_proc(struct inode *inode, int cmd, struct file_lock *fl)
|
||||
int nlmclnt_proc(struct nlm_host *host, int cmd, struct file_lock *fl)
|
||||
{
|
||||
struct rpc_clnt *client = NFS_CLIENT(inode);
|
||||
struct sockaddr_in addr;
|
||||
struct nfs_server *nfssrv = NFS_SERVER(inode);
|
||||
struct nlm_host *host;
|
||||
struct nlm_rqst *call;
|
||||
sigset_t oldset;
|
||||
unsigned long flags;
|
||||
int status, vers;
|
||||
|
||||
vers = (NFS_PROTO(inode)->version == 3) ? 4 : 1;
|
||||
if (NFS_PROTO(inode)->version > 3) {
|
||||
printk(KERN_NOTICE "NFSv4 file locking not implemented!\n");
|
||||
return -ENOLCK;
|
||||
}
|
||||
|
||||
rpc_peeraddr(client, (struct sockaddr *) &addr, sizeof(addr));
|
||||
host = nlmclnt_lookup_host(&addr, client->cl_xprt->prot, vers,
|
||||
nfssrv->nfs_client->cl_hostname,
|
||||
strlen(nfssrv->nfs_client->cl_hostname));
|
||||
if (host == NULL)
|
||||
return -ENOLCK;
|
||||
int status;
|
||||
|
||||
nlm_get_host(host);
|
||||
call = nlm_alloc_call(host);
|
||||
if (call == NULL)
|
||||
return -ENOMEM;
|
||||
|
@ -219,7 +206,7 @@ nlmclnt_proc(struct inode *inode, int cmd, struct file_lock *fl)
|
|||
dprintk("lockd: clnt proc returns %d\n", status);
|
||||
return status;
|
||||
}
|
||||
EXPORT_SYMBOL(nlmclnt_proc);
|
||||
EXPORT_SYMBOL_GPL(nlmclnt_proc);
|
||||
|
||||
/*
|
||||
* Allocate an NLM RPC call struct
|
||||
|
@ -257,7 +244,7 @@ void nlm_release_call(struct nlm_rqst *call)
|
|||
|
||||
static void nlmclnt_rpc_release(void *data)
|
||||
{
|
||||
return nlm_release_call(data);
|
||||
nlm_release_call(data);
|
||||
}
|
||||
|
||||
static int nlm_wait_on_grace(wait_queue_head_t *queue)
|
||||
|
|
|
@ -612,8 +612,7 @@ const char *nlmdbg_cookie2a(const struct nlm_cookie *cookie)
|
|||
* called with BKL held.
|
||||
*/
|
||||
static char buf[2*NLM_MAXCOOKIELEN+1];
|
||||
int i;
|
||||
int len = sizeof(buf);
|
||||
unsigned int i, len = sizeof(buf);
|
||||
char *p = buf;
|
||||
|
||||
len--; /* allow for trailing \0 */
|
||||
|
|
|
@ -73,8 +73,6 @@ static void nfs_callback_svc(struct svc_rqst *rqstp)
|
|||
complete(&nfs_callback_info.started);
|
||||
|
||||
for(;;) {
|
||||
char buf[RPC_MAX_ADDRBUFLEN];
|
||||
|
||||
if (signalled()) {
|
||||
if (nfs_callback_info.users == 0)
|
||||
break;
|
||||
|
@ -92,8 +90,6 @@ static void nfs_callback_svc(struct svc_rqst *rqstp)
|
|||
__FUNCTION__, -err);
|
||||
break;
|
||||
}
|
||||
dprintk("%s: request from %s\n", __FUNCTION__,
|
||||
svc_print_addr(rqstp, buf, sizeof(buf)));
|
||||
svc_process(rqstp);
|
||||
}
|
||||
|
||||
|
@ -168,12 +164,11 @@ void nfs_callback_down(void)
|
|||
|
||||
static int nfs_callback_authenticate(struct svc_rqst *rqstp)
|
||||
{
|
||||
struct sockaddr_in *addr = svc_addr_in(rqstp);
|
||||
struct nfs_client *clp;
|
||||
char buf[RPC_MAX_ADDRBUFLEN];
|
||||
|
||||
/* Don't talk to strangers */
|
||||
clp = nfs_find_client(addr, 4);
|
||||
clp = nfs_find_client(svc_addr(rqstp), 4);
|
||||
if (clp == NULL)
|
||||
return SVC_DROP;
|
||||
|
||||
|
|
|
@ -38,7 +38,7 @@ struct cb_compound_hdr_res {
|
|||
};
|
||||
|
||||
struct cb_getattrargs {
|
||||
struct sockaddr_in *addr;
|
||||
struct sockaddr *addr;
|
||||
struct nfs_fh fh;
|
||||
uint32_t bitmap[2];
|
||||
};
|
||||
|
@ -53,7 +53,7 @@ struct cb_getattrres {
|
|||
};
|
||||
|
||||
struct cb_recallargs {
|
||||
struct sockaddr_in *addr;
|
||||
struct sockaddr *addr;
|
||||
struct nfs_fh fh;
|
||||
nfs4_stateid stateid;
|
||||
uint32_t truncate;
|
||||
|
|
|
@ -12,7 +12,9 @@
|
|||
#include "delegation.h"
|
||||
#include "internal.h"
|
||||
|
||||
#ifdef NFS_DEBUG
|
||||
#define NFSDBG_FACILITY NFSDBG_CALLBACK
|
||||
#endif
|
||||
|
||||
__be32 nfs4_callback_getattr(struct cb_getattrargs *args, struct cb_getattrres *res)
|
||||
{
|
||||
|
@ -20,12 +22,16 @@ __be32 nfs4_callback_getattr(struct cb_getattrargs *args, struct cb_getattrres *
|
|||
struct nfs_delegation *delegation;
|
||||
struct nfs_inode *nfsi;
|
||||
struct inode *inode;
|
||||
|
||||
|
||||
res->bitmap[0] = res->bitmap[1] = 0;
|
||||
res->status = htonl(NFS4ERR_BADHANDLE);
|
||||
clp = nfs_find_client(args->addr, 4);
|
||||
if (clp == NULL)
|
||||
goto out;
|
||||
|
||||
dprintk("NFS: GETATTR callback request from %s\n",
|
||||
rpc_peeraddr2str(clp->cl_rpcclient, RPC_DISPLAY_ADDR));
|
||||
|
||||
inode = nfs_delegation_find_inode(clp, &args->fh);
|
||||
if (inode == NULL)
|
||||
goto out_putclient;
|
||||
|
@ -65,23 +71,32 @@ __be32 nfs4_callback_recall(struct cb_recallargs *args, void *dummy)
|
|||
clp = nfs_find_client(args->addr, 4);
|
||||
if (clp == NULL)
|
||||
goto out;
|
||||
inode = nfs_delegation_find_inode(clp, &args->fh);
|
||||
if (inode == NULL)
|
||||
goto out_putclient;
|
||||
/* Set up a helper thread to actually return the delegation */
|
||||
switch(nfs_async_inode_return_delegation(inode, &args->stateid)) {
|
||||
case 0:
|
||||
res = 0;
|
||||
break;
|
||||
case -ENOENT:
|
||||
res = htonl(NFS4ERR_BAD_STATEID);
|
||||
break;
|
||||
default:
|
||||
res = htonl(NFS4ERR_RESOURCE);
|
||||
}
|
||||
iput(inode);
|
||||
out_putclient:
|
||||
nfs_put_client(clp);
|
||||
|
||||
dprintk("NFS: RECALL callback request from %s\n",
|
||||
rpc_peeraddr2str(clp->cl_rpcclient, RPC_DISPLAY_ADDR));
|
||||
|
||||
do {
|
||||
struct nfs_client *prev = clp;
|
||||
|
||||
inode = nfs_delegation_find_inode(clp, &args->fh);
|
||||
if (inode != NULL) {
|
||||
/* Set up a helper thread to actually return the delegation */
|
||||
switch(nfs_async_inode_return_delegation(inode, &args->stateid)) {
|
||||
case 0:
|
||||
res = 0;
|
||||
break;
|
||||
case -ENOENT:
|
||||
if (res != 0)
|
||||
res = htonl(NFS4ERR_BAD_STATEID);
|
||||
break;
|
||||
default:
|
||||
res = htonl(NFS4ERR_RESOURCE);
|
||||
}
|
||||
iput(inode);
|
||||
}
|
||||
clp = nfs_find_client_next(prev);
|
||||
nfs_put_client(prev);
|
||||
} while (clp != NULL);
|
||||
out:
|
||||
dprintk("%s: exit with status = %d\n", __FUNCTION__, ntohl(res));
|
||||
return res;
|
||||
|
|
|
@ -139,7 +139,7 @@ static __be32 decode_compound_hdr_arg(struct xdr_stream *xdr, struct cb_compound
|
|||
if (unlikely(status != 0))
|
||||
return status;
|
||||
/* We do not like overly long tags! */
|
||||
if (hdr->taglen > CB_OP_TAGLEN_MAXSZ-12 || hdr->taglen < 0) {
|
||||
if (hdr->taglen > CB_OP_TAGLEN_MAXSZ - 12) {
|
||||
printk("NFSv4 CALLBACK %s: client sent tag of length %u\n",
|
||||
__FUNCTION__, hdr->taglen);
|
||||
return htonl(NFS4ERR_RESOURCE);
|
||||
|
@ -176,7 +176,7 @@ static __be32 decode_getattr_args(struct svc_rqst *rqstp, struct xdr_stream *xdr
|
|||
status = decode_fh(xdr, &args->fh);
|
||||
if (unlikely(status != 0))
|
||||
goto out;
|
||||
args->addr = svc_addr_in(rqstp);
|
||||
args->addr = svc_addr(rqstp);
|
||||
status = decode_bitmap(xdr, args->bitmap);
|
||||
out:
|
||||
dprintk("%s: exit with status = %d\n", __FUNCTION__, ntohl(status));
|
||||
|
@ -188,7 +188,7 @@ static __be32 decode_recall_args(struct svc_rqst *rqstp, struct xdr_stream *xdr,
|
|||
__be32 *p;
|
||||
__be32 status;
|
||||
|
||||
args->addr = svc_addr_in(rqstp);
|
||||
args->addr = svc_addr(rqstp);
|
||||
status = decode_stateid(xdr, &args->stateid);
|
||||
if (unlikely(status != 0))
|
||||
goto out;
|
||||
|
|
372
fs/nfs/client.c
372
fs/nfs/client.c
|
@ -34,6 +34,8 @@
|
|||
#include <linux/nfs_idmap.h>
|
||||
#include <linux/vfs.h>
|
||||
#include <linux/inet.h>
|
||||
#include <linux/in6.h>
|
||||
#include <net/ipv6.h>
|
||||
#include <linux/nfs_xdr.h>
|
||||
|
||||
#include <asm/system.h>
|
||||
|
@ -93,22 +95,30 @@ struct rpc_program nfsacl_program = {
|
|||
};
|
||||
#endif /* CONFIG_NFS_V3_ACL */
|
||||
|
||||
struct nfs_client_initdata {
|
||||
const char *hostname;
|
||||
const struct sockaddr *addr;
|
||||
size_t addrlen;
|
||||
const struct nfs_rpc_ops *rpc_ops;
|
||||
int proto;
|
||||
};
|
||||
|
||||
/*
|
||||
* Allocate a shared client record
|
||||
*
|
||||
* Since these are allocated/deallocated very rarely, we don't
|
||||
* bother putting them in a slab cache...
|
||||
*/
|
||||
static struct nfs_client *nfs_alloc_client(const char *hostname,
|
||||
const struct sockaddr_in *addr,
|
||||
int nfsversion)
|
||||
static struct nfs_client *nfs_alloc_client(const struct nfs_client_initdata *cl_init)
|
||||
{
|
||||
struct nfs_client *clp;
|
||||
|
||||
if ((clp = kzalloc(sizeof(*clp), GFP_KERNEL)) == NULL)
|
||||
goto error_0;
|
||||
|
||||
if (nfsversion == 4) {
|
||||
clp->rpc_ops = cl_init->rpc_ops;
|
||||
|
||||
if (cl_init->rpc_ops->version == 4) {
|
||||
if (nfs_callback_up() < 0)
|
||||
goto error_2;
|
||||
__set_bit(NFS_CS_CALLBACK, &clp->cl_res_state);
|
||||
|
@ -117,11 +127,11 @@ static struct nfs_client *nfs_alloc_client(const char *hostname,
|
|||
atomic_set(&clp->cl_count, 1);
|
||||
clp->cl_cons_state = NFS_CS_INITING;
|
||||
|
||||
clp->cl_nfsversion = nfsversion;
|
||||
memcpy(&clp->cl_addr, addr, sizeof(clp->cl_addr));
|
||||
memcpy(&clp->cl_addr, cl_init->addr, cl_init->addrlen);
|
||||
clp->cl_addrlen = cl_init->addrlen;
|
||||
|
||||
if (hostname) {
|
||||
clp->cl_hostname = kstrdup(hostname, GFP_KERNEL);
|
||||
if (cl_init->hostname) {
|
||||
clp->cl_hostname = kstrdup(cl_init->hostname, GFP_KERNEL);
|
||||
if (!clp->cl_hostname)
|
||||
goto error_3;
|
||||
}
|
||||
|
@ -129,6 +139,8 @@ static struct nfs_client *nfs_alloc_client(const char *hostname,
|
|||
INIT_LIST_HEAD(&clp->cl_superblocks);
|
||||
clp->cl_rpcclient = ERR_PTR(-EINVAL);
|
||||
|
||||
clp->cl_proto = cl_init->proto;
|
||||
|
||||
#ifdef CONFIG_NFS_V4
|
||||
init_rwsem(&clp->cl_sem);
|
||||
INIT_LIST_HEAD(&clp->cl_delegations);
|
||||
|
@ -166,7 +178,7 @@ static void nfs4_shutdown_client(struct nfs_client *clp)
|
|||
*/
|
||||
static void nfs_free_client(struct nfs_client *clp)
|
||||
{
|
||||
dprintk("--> nfs_free_client(%d)\n", clp->cl_nfsversion);
|
||||
dprintk("--> nfs_free_client(%u)\n", clp->rpc_ops->version);
|
||||
|
||||
nfs4_shutdown_client(clp);
|
||||
|
||||
|
@ -203,11 +215,106 @@ void nfs_put_client(struct nfs_client *clp)
|
|||
}
|
||||
}
|
||||
|
||||
static int nfs_sockaddr_match_ipaddr4(const struct sockaddr_in *sa1,
|
||||
const struct sockaddr_in *sa2)
|
||||
{
|
||||
return sa1->sin_addr.s_addr == sa2->sin_addr.s_addr;
|
||||
}
|
||||
|
||||
static int nfs_sockaddr_match_ipaddr6(const struct sockaddr_in6 *sa1,
|
||||
const struct sockaddr_in6 *sa2)
|
||||
{
|
||||
return ipv6_addr_equal(&sa1->sin6_addr, &sa2->sin6_addr);
|
||||
}
|
||||
|
||||
static int nfs_sockaddr_match_ipaddr(const struct sockaddr *sa1,
|
||||
const struct sockaddr *sa2)
|
||||
{
|
||||
switch (sa1->sa_family) {
|
||||
case AF_INET:
|
||||
return nfs_sockaddr_match_ipaddr4((const struct sockaddr_in *)sa1,
|
||||
(const struct sockaddr_in *)sa2);
|
||||
case AF_INET6:
|
||||
return nfs_sockaddr_match_ipaddr6((const struct sockaddr_in6 *)sa1,
|
||||
(const struct sockaddr_in6 *)sa2);
|
||||
}
|
||||
BUG();
|
||||
}
|
||||
|
||||
/*
|
||||
* Find a client by address
|
||||
* - caller must hold nfs_client_lock
|
||||
* Find a client by IP address and protocol version
|
||||
* - returns NULL if no such client
|
||||
*/
|
||||
static struct nfs_client *__nfs_find_client(const struct sockaddr_in *addr, int nfsversion, int match_port)
|
||||
struct nfs_client *nfs_find_client(const struct sockaddr *addr, u32 nfsversion)
|
||||
{
|
||||
struct nfs_client *clp;
|
||||
|
||||
spin_lock(&nfs_client_lock);
|
||||
list_for_each_entry(clp, &nfs_client_list, cl_share_link) {
|
||||
struct sockaddr *clap = (struct sockaddr *)&clp->cl_addr;
|
||||
|
||||
/* Don't match clients that failed to initialise properly */
|
||||
if (clp->cl_cons_state != NFS_CS_READY)
|
||||
continue;
|
||||
|
||||
/* Different NFS versions cannot share the same nfs_client */
|
||||
if (clp->rpc_ops->version != nfsversion)
|
||||
continue;
|
||||
|
||||
if (addr->sa_family != clap->sa_family)
|
||||
continue;
|
||||
/* Match only the IP address, not the port number */
|
||||
if (!nfs_sockaddr_match_ipaddr(addr, clap))
|
||||
continue;
|
||||
|
||||
atomic_inc(&clp->cl_count);
|
||||
spin_unlock(&nfs_client_lock);
|
||||
return clp;
|
||||
}
|
||||
spin_unlock(&nfs_client_lock);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
/*
|
||||
* Find a client by IP address and protocol version
|
||||
* - returns NULL if no such client
|
||||
*/
|
||||
struct nfs_client *nfs_find_client_next(struct nfs_client *clp)
|
||||
{
|
||||
struct sockaddr *sap = (struct sockaddr *)&clp->cl_addr;
|
||||
u32 nfsvers = clp->rpc_ops->version;
|
||||
|
||||
spin_lock(&nfs_client_lock);
|
||||
list_for_each_entry_continue(clp, &nfs_client_list, cl_share_link) {
|
||||
struct sockaddr *clap = (struct sockaddr *)&clp->cl_addr;
|
||||
|
||||
/* Don't match clients that failed to initialise properly */
|
||||
if (clp->cl_cons_state != NFS_CS_READY)
|
||||
continue;
|
||||
|
||||
/* Different NFS versions cannot share the same nfs_client */
|
||||
if (clp->rpc_ops->version != nfsvers)
|
||||
continue;
|
||||
|
||||
if (sap->sa_family != clap->sa_family)
|
||||
continue;
|
||||
/* Match only the IP address, not the port number */
|
||||
if (!nfs_sockaddr_match_ipaddr(sap, clap))
|
||||
continue;
|
||||
|
||||
atomic_inc(&clp->cl_count);
|
||||
spin_unlock(&nfs_client_lock);
|
||||
return clp;
|
||||
}
|
||||
spin_unlock(&nfs_client_lock);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
/*
|
||||
* Find an nfs_client on the list that matches the initialisation data
|
||||
* that is supplied.
|
||||
*/
|
||||
static struct nfs_client *nfs_match_client(const struct nfs_client_initdata *data)
|
||||
{
|
||||
struct nfs_client *clp;
|
||||
|
||||
|
@ -217,62 +324,39 @@ static struct nfs_client *__nfs_find_client(const struct sockaddr_in *addr, int
|
|||
continue;
|
||||
|
||||
/* Different NFS versions cannot share the same nfs_client */
|
||||
if (clp->cl_nfsversion != nfsversion)
|
||||
if (clp->rpc_ops != data->rpc_ops)
|
||||
continue;
|
||||
|
||||
if (memcmp(&clp->cl_addr.sin_addr, &addr->sin_addr,
|
||||
sizeof(clp->cl_addr.sin_addr)) != 0)
|
||||
if (clp->cl_proto != data->proto)
|
||||
continue;
|
||||
|
||||
if (!match_port || clp->cl_addr.sin_port == addr->sin_port)
|
||||
goto found;
|
||||
/* Match the full socket address */
|
||||
if (memcmp(&clp->cl_addr, data->addr, sizeof(clp->cl_addr)) != 0)
|
||||
continue;
|
||||
|
||||
atomic_inc(&clp->cl_count);
|
||||
return clp;
|
||||
}
|
||||
|
||||
return NULL;
|
||||
|
||||
found:
|
||||
atomic_inc(&clp->cl_count);
|
||||
return clp;
|
||||
}
|
||||
|
||||
/*
|
||||
* Find a client by IP address and protocol version
|
||||
* - returns NULL if no such client
|
||||
*/
|
||||
struct nfs_client *nfs_find_client(const struct sockaddr_in *addr, int nfsversion)
|
||||
{
|
||||
struct nfs_client *clp;
|
||||
|
||||
spin_lock(&nfs_client_lock);
|
||||
clp = __nfs_find_client(addr, nfsversion, 0);
|
||||
spin_unlock(&nfs_client_lock);
|
||||
if (clp != NULL && clp->cl_cons_state != NFS_CS_READY) {
|
||||
nfs_put_client(clp);
|
||||
clp = NULL;
|
||||
}
|
||||
return clp;
|
||||
}
|
||||
|
||||
/*
|
||||
* Look up a client by IP address and protocol version
|
||||
* - creates a new record if one doesn't yet exist
|
||||
*/
|
||||
static struct nfs_client *nfs_get_client(const char *hostname,
|
||||
const struct sockaddr_in *addr,
|
||||
int nfsversion)
|
||||
static struct nfs_client *nfs_get_client(const struct nfs_client_initdata *cl_init)
|
||||
{
|
||||
struct nfs_client *clp, *new = NULL;
|
||||
int error;
|
||||
|
||||
dprintk("--> nfs_get_client(%s,"NIPQUAD_FMT":%d,%d)\n",
|
||||
hostname ?: "", NIPQUAD(addr->sin_addr),
|
||||
addr->sin_port, nfsversion);
|
||||
dprintk("--> nfs_get_client(%s,v%u)\n",
|
||||
cl_init->hostname ?: "", cl_init->rpc_ops->version);
|
||||
|
||||
/* see if the client already exists */
|
||||
do {
|
||||
spin_lock(&nfs_client_lock);
|
||||
|
||||
clp = __nfs_find_client(addr, nfsversion, 1);
|
||||
clp = nfs_match_client(cl_init);
|
||||
if (clp)
|
||||
goto found_client;
|
||||
if (new)
|
||||
|
@ -280,7 +364,7 @@ static struct nfs_client *nfs_get_client(const char *hostname,
|
|||
|
||||
spin_unlock(&nfs_client_lock);
|
||||
|
||||
new = nfs_alloc_client(hostname, addr, nfsversion);
|
||||
new = nfs_alloc_client(cl_init);
|
||||
} while (new);
|
||||
|
||||
return ERR_PTR(-ENOMEM);
|
||||
|
@ -344,12 +428,16 @@ static void nfs_init_timeout_values(struct rpc_timeout *to, int proto,
|
|||
switch (proto) {
|
||||
case XPRT_TRANSPORT_TCP:
|
||||
case XPRT_TRANSPORT_RDMA:
|
||||
if (!to->to_initval)
|
||||
if (to->to_initval == 0)
|
||||
to->to_initval = 60 * HZ;
|
||||
if (to->to_initval > NFS_MAX_TCP_TIMEOUT)
|
||||
to->to_initval = NFS_MAX_TCP_TIMEOUT;
|
||||
to->to_increment = to->to_initval;
|
||||
to->to_maxval = to->to_initval + (to->to_increment * to->to_retries);
|
||||
if (to->to_maxval > NFS_MAX_TCP_TIMEOUT)
|
||||
to->to_maxval = NFS_MAX_TCP_TIMEOUT;
|
||||
if (to->to_maxval < to->to_initval)
|
||||
to->to_maxval = to->to_initval;
|
||||
to->to_exponential = 0;
|
||||
break;
|
||||
case XPRT_TRANSPORT_UDP:
|
||||
|
@ -367,19 +455,17 @@ static void nfs_init_timeout_values(struct rpc_timeout *to, int proto,
|
|||
/*
|
||||
* Create an RPC client handle
|
||||
*/
|
||||
static int nfs_create_rpc_client(struct nfs_client *clp, int proto,
|
||||
unsigned int timeo,
|
||||
unsigned int retrans,
|
||||
rpc_authflavor_t flavor,
|
||||
int flags)
|
||||
static int nfs_create_rpc_client(struct nfs_client *clp,
|
||||
const struct rpc_timeout *timeparms,
|
||||
rpc_authflavor_t flavor,
|
||||
int flags)
|
||||
{
|
||||
struct rpc_timeout timeparms;
|
||||
struct rpc_clnt *clnt = NULL;
|
||||
struct rpc_create_args args = {
|
||||
.protocol = proto,
|
||||
.protocol = clp->cl_proto,
|
||||
.address = (struct sockaddr *)&clp->cl_addr,
|
||||
.addrsize = sizeof(clp->cl_addr),
|
||||
.timeout = &timeparms,
|
||||
.addrsize = clp->cl_addrlen,
|
||||
.timeout = timeparms,
|
||||
.servername = clp->cl_hostname,
|
||||
.program = &nfs_program,
|
||||
.version = clp->rpc_ops->version,
|
||||
|
@ -390,10 +476,6 @@ static int nfs_create_rpc_client(struct nfs_client *clp, int proto,
|
|||
if (!IS_ERR(clp->cl_rpcclient))
|
||||
return 0;
|
||||
|
||||
nfs_init_timeout_values(&timeparms, proto, timeo, retrans);
|
||||
clp->retrans_timeo = timeparms.to_initval;
|
||||
clp->retrans_count = timeparms.to_retries;
|
||||
|
||||
clnt = rpc_create(&args);
|
||||
if (IS_ERR(clnt)) {
|
||||
dprintk("%s: cannot create RPC client. Error = %ld\n",
|
||||
|
@ -411,7 +493,7 @@ static int nfs_create_rpc_client(struct nfs_client *clp, int proto,
|
|||
static void nfs_destroy_server(struct nfs_server *server)
|
||||
{
|
||||
if (!(server->flags & NFS_MOUNT_NONLM))
|
||||
lockd_down(); /* release rpc.lockd */
|
||||
nlmclnt_done(server->nlm_host);
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -419,20 +501,29 @@ static void nfs_destroy_server(struct nfs_server *server)
|
|||
*/
|
||||
static int nfs_start_lockd(struct nfs_server *server)
|
||||
{
|
||||
int error = 0;
|
||||
struct nlm_host *host;
|
||||
struct nfs_client *clp = server->nfs_client;
|
||||
struct nlmclnt_initdata nlm_init = {
|
||||
.hostname = clp->cl_hostname,
|
||||
.address = (struct sockaddr *)&clp->cl_addr,
|
||||
.addrlen = clp->cl_addrlen,
|
||||
.protocol = server->flags & NFS_MOUNT_TCP ?
|
||||
IPPROTO_TCP : IPPROTO_UDP,
|
||||
.nfs_version = clp->rpc_ops->version,
|
||||
};
|
||||
|
||||
if (server->nfs_client->cl_nfsversion > 3)
|
||||
goto out;
|
||||
if (nlm_init.nfs_version > 3)
|
||||
return 0;
|
||||
if (server->flags & NFS_MOUNT_NONLM)
|
||||
goto out;
|
||||
error = lockd_up((server->flags & NFS_MOUNT_TCP) ?
|
||||
IPPROTO_TCP : IPPROTO_UDP);
|
||||
if (error < 0)
|
||||
server->flags |= NFS_MOUNT_NONLM;
|
||||
else
|
||||
server->destroy = nfs_destroy_server;
|
||||
out:
|
||||
return error;
|
||||
return 0;
|
||||
|
||||
host = nlmclnt_init(&nlm_init);
|
||||
if (IS_ERR(host))
|
||||
return PTR_ERR(host);
|
||||
|
||||
server->nlm_host = host;
|
||||
server->destroy = nfs_destroy_server;
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -441,7 +532,7 @@ static int nfs_start_lockd(struct nfs_server *server)
|
|||
#ifdef CONFIG_NFS_V3_ACL
|
||||
static void nfs_init_server_aclclient(struct nfs_server *server)
|
||||
{
|
||||
if (server->nfs_client->cl_nfsversion != 3)
|
||||
if (server->nfs_client->rpc_ops->version != 3)
|
||||
goto out_noacl;
|
||||
if (server->flags & NFS_MOUNT_NOACL)
|
||||
goto out_noacl;
|
||||
|
@ -468,7 +559,9 @@ static inline void nfs_init_server_aclclient(struct nfs_server *server)
|
|||
/*
|
||||
* Create a general RPC client
|
||||
*/
|
||||
static int nfs_init_server_rpcclient(struct nfs_server *server, rpc_authflavor_t pseudoflavour)
|
||||
static int nfs_init_server_rpcclient(struct nfs_server *server,
|
||||
const struct rpc_timeout *timeo,
|
||||
rpc_authflavor_t pseudoflavour)
|
||||
{
|
||||
struct nfs_client *clp = server->nfs_client;
|
||||
|
||||
|
@ -478,6 +571,11 @@ static int nfs_init_server_rpcclient(struct nfs_server *server, rpc_authflavor_t
|
|||
return PTR_ERR(server->client);
|
||||
}
|
||||
|
||||
memcpy(&server->client->cl_timeout_default,
|
||||
timeo,
|
||||
sizeof(server->client->cl_timeout_default));
|
||||
server->client->cl_timeout = &server->client->cl_timeout_default;
|
||||
|
||||
if (pseudoflavour != clp->cl_rpcclient->cl_auth->au_flavor) {
|
||||
struct rpc_auth *auth;
|
||||
|
||||
|
@ -502,6 +600,7 @@ static int nfs_init_server_rpcclient(struct nfs_server *server, rpc_authflavor_t
|
|||
* Initialise an NFS2 or NFS3 client
|
||||
*/
|
||||
static int nfs_init_client(struct nfs_client *clp,
|
||||
const struct rpc_timeout *timeparms,
|
||||
const struct nfs_parsed_mount_data *data)
|
||||
{
|
||||
int error;
|
||||
|
@ -512,18 +611,11 @@ static int nfs_init_client(struct nfs_client *clp,
|
|||
return 0;
|
||||
}
|
||||
|
||||
/* Check NFS protocol revision and initialize RPC op vector */
|
||||
clp->rpc_ops = &nfs_v2_clientops;
|
||||
#ifdef CONFIG_NFS_V3
|
||||
if (clp->cl_nfsversion == 3)
|
||||
clp->rpc_ops = &nfs_v3_clientops;
|
||||
#endif
|
||||
/*
|
||||
* Create a client RPC handle for doing FSSTAT with UNIX auth only
|
||||
* - RFC 2623, sec 2.3.2
|
||||
*/
|
||||
error = nfs_create_rpc_client(clp, data->nfs_server.protocol,
|
||||
data->timeo, data->retrans, RPC_AUTH_UNIX, 0);
|
||||
error = nfs_create_rpc_client(clp, timeparms, RPC_AUTH_UNIX, 0);
|
||||
if (error < 0)
|
||||
goto error;
|
||||
nfs_mark_client_ready(clp, NFS_CS_READY);
|
||||
|
@ -541,25 +633,34 @@ static int nfs_init_client(struct nfs_client *clp,
|
|||
static int nfs_init_server(struct nfs_server *server,
|
||||
const struct nfs_parsed_mount_data *data)
|
||||
{
|
||||
struct nfs_client_initdata cl_init = {
|
||||
.hostname = data->nfs_server.hostname,
|
||||
.addr = (const struct sockaddr *)&data->nfs_server.address,
|
||||
.addrlen = data->nfs_server.addrlen,
|
||||
.rpc_ops = &nfs_v2_clientops,
|
||||
.proto = data->nfs_server.protocol,
|
||||
};
|
||||
struct rpc_timeout timeparms;
|
||||
struct nfs_client *clp;
|
||||
int error, nfsvers = 2;
|
||||
int error;
|
||||
|
||||
dprintk("--> nfs_init_server()\n");
|
||||
|
||||
#ifdef CONFIG_NFS_V3
|
||||
if (data->flags & NFS_MOUNT_VER3)
|
||||
nfsvers = 3;
|
||||
cl_init.rpc_ops = &nfs_v3_clientops;
|
||||
#endif
|
||||
|
||||
/* Allocate or find a client reference we can use */
|
||||
clp = nfs_get_client(data->nfs_server.hostname,
|
||||
&data->nfs_server.address, nfsvers);
|
||||
clp = nfs_get_client(&cl_init);
|
||||
if (IS_ERR(clp)) {
|
||||
dprintk("<-- nfs_init_server() = error %ld\n", PTR_ERR(clp));
|
||||
return PTR_ERR(clp);
|
||||
}
|
||||
|
||||
error = nfs_init_client(clp, data);
|
||||
nfs_init_timeout_values(&timeparms, data->nfs_server.protocol,
|
||||
data->timeo, data->retrans);
|
||||
error = nfs_init_client(clp, &timeparms, data);
|
||||
if (error < 0)
|
||||
goto error;
|
||||
|
||||
|
@ -583,7 +684,7 @@ static int nfs_init_server(struct nfs_server *server,
|
|||
if (error < 0)
|
||||
goto error;
|
||||
|
||||
error = nfs_init_server_rpcclient(server, data->auth_flavors[0]);
|
||||
error = nfs_init_server_rpcclient(server, &timeparms, data->auth_flavors[0]);
|
||||
if (error < 0)
|
||||
goto error;
|
||||
|
||||
|
@ -729,6 +830,9 @@ static struct nfs_server *nfs_alloc_server(void)
|
|||
INIT_LIST_HEAD(&server->client_link);
|
||||
INIT_LIST_HEAD(&server->master_link);
|
||||
|
||||
init_waitqueue_head(&server->active_wq);
|
||||
atomic_set(&server->active, 0);
|
||||
|
||||
server->io_stats = nfs_alloc_iostats();
|
||||
if (!server->io_stats) {
|
||||
kfree(server);
|
||||
|
@ -840,7 +944,7 @@ struct nfs_server *nfs_create_server(const struct nfs_parsed_mount_data *data,
|
|||
* Initialise an NFS4 client record
|
||||
*/
|
||||
static int nfs4_init_client(struct nfs_client *clp,
|
||||
int proto, int timeo, int retrans,
|
||||
const struct rpc_timeout *timeparms,
|
||||
const char *ip_addr,
|
||||
rpc_authflavor_t authflavour)
|
||||
{
|
||||
|
@ -855,7 +959,7 @@ static int nfs4_init_client(struct nfs_client *clp,
|
|||
/* Check NFS protocol revision and initialize RPC op vector */
|
||||
clp->rpc_ops = &nfs_v4_clientops;
|
||||
|
||||
error = nfs_create_rpc_client(clp, proto, timeo, retrans, authflavour,
|
||||
error = nfs_create_rpc_client(clp, timeparms, authflavour,
|
||||
RPC_CLNT_CREATE_DISCRTRY);
|
||||
if (error < 0)
|
||||
goto error;
|
||||
|
@ -882,23 +986,32 @@ static int nfs4_init_client(struct nfs_client *clp,
|
|||
* Set up an NFS4 client
|
||||
*/
|
||||
static int nfs4_set_client(struct nfs_server *server,
|
||||
const char *hostname, const struct sockaddr_in *addr,
|
||||
const char *hostname,
|
||||
const struct sockaddr *addr,
|
||||
const size_t addrlen,
|
||||
const char *ip_addr,
|
||||
rpc_authflavor_t authflavour,
|
||||
int proto, int timeo, int retrans)
|
||||
int proto, const struct rpc_timeout *timeparms)
|
||||
{
|
||||
struct nfs_client_initdata cl_init = {
|
||||
.hostname = hostname,
|
||||
.addr = addr,
|
||||
.addrlen = addrlen,
|
||||
.rpc_ops = &nfs_v4_clientops,
|
||||
.proto = proto,
|
||||
};
|
||||
struct nfs_client *clp;
|
||||
int error;
|
||||
|
||||
dprintk("--> nfs4_set_client()\n");
|
||||
|
||||
/* Allocate or find a client reference we can use */
|
||||
clp = nfs_get_client(hostname, addr, 4);
|
||||
clp = nfs_get_client(&cl_init);
|
||||
if (IS_ERR(clp)) {
|
||||
error = PTR_ERR(clp);
|
||||
goto error;
|
||||
}
|
||||
error = nfs4_init_client(clp, proto, timeo, retrans, ip_addr, authflavour);
|
||||
error = nfs4_init_client(clp, timeparms, ip_addr, authflavour);
|
||||
if (error < 0)
|
||||
goto error_put;
|
||||
|
||||
|
@ -919,10 +1032,26 @@ static int nfs4_set_client(struct nfs_server *server,
|
|||
static int nfs4_init_server(struct nfs_server *server,
|
||||
const struct nfs_parsed_mount_data *data)
|
||||
{
|
||||
struct rpc_timeout timeparms;
|
||||
int error;
|
||||
|
||||
dprintk("--> nfs4_init_server()\n");
|
||||
|
||||
nfs_init_timeout_values(&timeparms, data->nfs_server.protocol,
|
||||
data->timeo, data->retrans);
|
||||
|
||||
/* Get a client record */
|
||||
error = nfs4_set_client(server,
|
||||
data->nfs_server.hostname,
|
||||
(const struct sockaddr *)&data->nfs_server.address,
|
||||
data->nfs_server.addrlen,
|
||||
data->client_address,
|
||||
data->auth_flavors[0],
|
||||
data->nfs_server.protocol,
|
||||
&timeparms);
|
||||
if (error < 0)
|
||||
goto error;
|
||||
|
||||
/* Initialise the client representation from the mount data */
|
||||
server->flags = data->flags & NFS_MOUNT_FLAGMASK;
|
||||
server->caps |= NFS_CAP_ATOMIC_OPEN;
|
||||
|
@ -937,8 +1066,9 @@ static int nfs4_init_server(struct nfs_server *server,
|
|||
server->acdirmin = data->acdirmin * HZ;
|
||||
server->acdirmax = data->acdirmax * HZ;
|
||||
|
||||
error = nfs_init_server_rpcclient(server, data->auth_flavors[0]);
|
||||
error = nfs_init_server_rpcclient(server, &timeparms, data->auth_flavors[0]);
|
||||
|
||||
error:
|
||||
/* Done */
|
||||
dprintk("<-- nfs4_init_server() = %d\n", error);
|
||||
return error;
|
||||
|
@ -961,17 +1091,6 @@ struct nfs_server *nfs4_create_server(const struct nfs_parsed_mount_data *data,
|
|||
if (!server)
|
||||
return ERR_PTR(-ENOMEM);
|
||||
|
||||
/* Get a client record */
|
||||
error = nfs4_set_client(server,
|
||||
data->nfs_server.hostname,
|
||||
&data->nfs_server.address,
|
||||
data->client_address,
|
||||
data->auth_flavors[0],
|
||||
data->nfs_server.protocol,
|
||||
data->timeo, data->retrans);
|
||||
if (error < 0)
|
||||
goto error;
|
||||
|
||||
/* set up the general RPC client */
|
||||
error = nfs4_init_server(server, data);
|
||||
if (error < 0)
|
||||
|
@ -1039,12 +1158,13 @@ struct nfs_server *nfs4_create_referral_server(struct nfs_clone_mount *data,
|
|||
|
||||
/* Get a client representation.
|
||||
* Note: NFSv4 always uses TCP, */
|
||||
error = nfs4_set_client(server, data->hostname, data->addr,
|
||||
parent_client->cl_ipaddr,
|
||||
data->authflavor,
|
||||
parent_server->client->cl_xprt->prot,
|
||||
parent_client->retrans_timeo,
|
||||
parent_client->retrans_count);
|
||||
error = nfs4_set_client(server, data->hostname,
|
||||
data->addr,
|
||||
data->addrlen,
|
||||
parent_client->cl_ipaddr,
|
||||
data->authflavor,
|
||||
parent_server->client->cl_xprt->prot,
|
||||
parent_server->client->cl_timeout);
|
||||
if (error < 0)
|
||||
goto error;
|
||||
|
||||
|
@ -1052,7 +1172,7 @@ struct nfs_server *nfs4_create_referral_server(struct nfs_clone_mount *data,
|
|||
nfs_server_copy_userdata(server, parent_server);
|
||||
server->caps |= NFS_CAP_ATOMIC_OPEN;
|
||||
|
||||
error = nfs_init_server_rpcclient(server, data->authflavor);
|
||||
error = nfs_init_server_rpcclient(server, parent_server->client->cl_timeout, data->authflavor);
|
||||
if (error < 0)
|
||||
goto error;
|
||||
|
||||
|
@ -1121,7 +1241,9 @@ struct nfs_server *nfs_clone_server(struct nfs_server *source,
|
|||
|
||||
server->fsid = fattr->fsid;
|
||||
|
||||
error = nfs_init_server_rpcclient(server, source->client->cl_auth->au_flavor);
|
||||
error = nfs_init_server_rpcclient(server,
|
||||
source->client->cl_timeout,
|
||||
source->client->cl_auth->au_flavor);
|
||||
if (error < 0)
|
||||
goto out_free_server;
|
||||
if (!IS_ERR(source->client_acl))
|
||||
|
@ -1263,10 +1385,10 @@ static int nfs_server_list_show(struct seq_file *m, void *v)
|
|||
/* display one transport per line on subsequent lines */
|
||||
clp = list_entry(v, struct nfs_client, cl_share_link);
|
||||
|
||||
seq_printf(m, "v%d %02x%02x%02x%02x %4hx %3d %s\n",
|
||||
clp->cl_nfsversion,
|
||||
NIPQUAD(clp->cl_addr.sin_addr),
|
||||
ntohs(clp->cl_addr.sin_port),
|
||||
seq_printf(m, "v%u %s %s %3d %s\n",
|
||||
clp->rpc_ops->version,
|
||||
rpc_peeraddr2str(clp->cl_rpcclient, RPC_DISPLAY_HEX_ADDR),
|
||||
rpc_peeraddr2str(clp->cl_rpcclient, RPC_DISPLAY_HEX_PORT),
|
||||
atomic_read(&clp->cl_count),
|
||||
clp->cl_hostname);
|
||||
|
||||
|
@ -1342,10 +1464,10 @@ static int nfs_volume_list_show(struct seq_file *m, void *v)
|
|||
(unsigned long long) server->fsid.major,
|
||||
(unsigned long long) server->fsid.minor);
|
||||
|
||||
seq_printf(m, "v%d %02x%02x%02x%02x %4hx %-7s %-17s\n",
|
||||
clp->cl_nfsversion,
|
||||
NIPQUAD(clp->cl_addr.sin_addr),
|
||||
ntohs(clp->cl_addr.sin_port),
|
||||
seq_printf(m, "v%u %s %s %-7s %-17s\n",
|
||||
clp->rpc_ops->version,
|
||||
rpc_peeraddr2str(clp->cl_rpcclient, RPC_DISPLAY_HEX_ADDR),
|
||||
rpc_peeraddr2str(clp->cl_rpcclient, RPC_DISPLAY_HEX_PORT),
|
||||
dev,
|
||||
fsid);
|
||||
|
||||
|
|
|
@ -125,6 +125,32 @@ void nfs_inode_reclaim_delegation(struct inode *inode, struct rpc_cred *cred, st
|
|||
put_rpccred(oldcred);
|
||||
}
|
||||
|
||||
static int nfs_do_return_delegation(struct inode *inode, struct nfs_delegation *delegation, int issync)
|
||||
{
|
||||
int res = 0;
|
||||
|
||||
res = nfs4_proc_delegreturn(inode, delegation->cred, &delegation->stateid, issync);
|
||||
nfs_free_delegation(delegation);
|
||||
return res;
|
||||
}
|
||||
|
||||
static struct nfs_delegation *nfs_detach_delegation_locked(struct nfs_inode *nfsi, const nfs4_stateid *stateid)
|
||||
{
|
||||
struct nfs_delegation *delegation = rcu_dereference(nfsi->delegation);
|
||||
|
||||
if (delegation == NULL)
|
||||
goto nomatch;
|
||||
if (stateid != NULL && memcmp(delegation->stateid.data, stateid->data,
|
||||
sizeof(delegation->stateid.data)) != 0)
|
||||
goto nomatch;
|
||||
list_del_rcu(&delegation->super_list);
|
||||
nfsi->delegation_state = 0;
|
||||
rcu_assign_pointer(nfsi->delegation, NULL);
|
||||
return delegation;
|
||||
nomatch:
|
||||
return NULL;
|
||||
}
|
||||
|
||||
/*
|
||||
* Set up a delegation on an inode
|
||||
*/
|
||||
|
@ -133,6 +159,7 @@ int nfs_inode_set_delegation(struct inode *inode, struct rpc_cred *cred, struct
|
|||
struct nfs_client *clp = NFS_SERVER(inode)->nfs_client;
|
||||
struct nfs_inode *nfsi = NFS_I(inode);
|
||||
struct nfs_delegation *delegation;
|
||||
struct nfs_delegation *freeme = NULL;
|
||||
int status = 0;
|
||||
|
||||
delegation = kmalloc(sizeof(*delegation), GFP_KERNEL);
|
||||
|
@ -147,41 +174,45 @@ int nfs_inode_set_delegation(struct inode *inode, struct rpc_cred *cred, struct
|
|||
delegation->inode = inode;
|
||||
|
||||
spin_lock(&clp->cl_lock);
|
||||
if (rcu_dereference(nfsi->delegation) == NULL) {
|
||||
list_add_rcu(&delegation->super_list, &clp->cl_delegations);
|
||||
nfsi->delegation_state = delegation->type;
|
||||
rcu_assign_pointer(nfsi->delegation, delegation);
|
||||
delegation = NULL;
|
||||
} else {
|
||||
if (rcu_dereference(nfsi->delegation) != NULL) {
|
||||
if (memcmp(&delegation->stateid, &nfsi->delegation->stateid,
|
||||
sizeof(delegation->stateid)) != 0 ||
|
||||
delegation->type != nfsi->delegation->type) {
|
||||
printk("%s: server %u.%u.%u.%u, handed out a duplicate delegation!\n",
|
||||
__FUNCTION__, NIPQUAD(clp->cl_addr.sin_addr));
|
||||
status = -EIO;
|
||||
sizeof(delegation->stateid)) == 0 &&
|
||||
delegation->type == nfsi->delegation->type) {
|
||||
goto out;
|
||||
}
|
||||
/*
|
||||
* Deal with broken servers that hand out two
|
||||
* delegations for the same file.
|
||||
*/
|
||||
dfprintk(FILE, "%s: server %s handed out "
|
||||
"a duplicate delegation!\n",
|
||||
__FUNCTION__, clp->cl_hostname);
|
||||
if (delegation->type <= nfsi->delegation->type) {
|
||||
freeme = delegation;
|
||||
delegation = NULL;
|
||||
goto out;
|
||||
}
|
||||
freeme = nfs_detach_delegation_locked(nfsi, NULL);
|
||||
}
|
||||
list_add_rcu(&delegation->super_list, &clp->cl_delegations);
|
||||
nfsi->delegation_state = delegation->type;
|
||||
rcu_assign_pointer(nfsi->delegation, delegation);
|
||||
delegation = NULL;
|
||||
|
||||
/* Ensure we revalidate the attributes and page cache! */
|
||||
spin_lock(&inode->i_lock);
|
||||
nfsi->cache_validity |= NFS_INO_REVAL_FORCED;
|
||||
spin_unlock(&inode->i_lock);
|
||||
|
||||
out:
|
||||
spin_unlock(&clp->cl_lock);
|
||||
if (delegation != NULL)
|
||||
nfs_free_delegation(delegation);
|
||||
if (freeme != NULL)
|
||||
nfs_do_return_delegation(inode, freeme, 0);
|
||||
return status;
|
||||
}
|
||||
|
||||
static int nfs_do_return_delegation(struct inode *inode, struct nfs_delegation *delegation)
|
||||
{
|
||||
int res = 0;
|
||||
|
||||
res = nfs4_proc_delegreturn(inode, delegation->cred, &delegation->stateid);
|
||||
nfs_free_delegation(delegation);
|
||||
return res;
|
||||
}
|
||||
|
||||
/* Sync all data to disk upon delegation return */
|
||||
static void nfs_msync_inode(struct inode *inode)
|
||||
{
|
||||
|
@ -207,24 +238,28 @@ static int __nfs_inode_return_delegation(struct inode *inode, struct nfs_delegat
|
|||
up_read(&clp->cl_sem);
|
||||
nfs_msync_inode(inode);
|
||||
|
||||
return nfs_do_return_delegation(inode, delegation);
|
||||
return nfs_do_return_delegation(inode, delegation, 1);
|
||||
}
|
||||
|
||||
static struct nfs_delegation *nfs_detach_delegation_locked(struct nfs_inode *nfsi, const nfs4_stateid *stateid)
|
||||
/*
|
||||
* This function returns the delegation without reclaiming opens
|
||||
* or protecting against delegation reclaims.
|
||||
* It is therefore really only safe to be called from
|
||||
* nfs4_clear_inode()
|
||||
*/
|
||||
void nfs_inode_return_delegation_noreclaim(struct inode *inode)
|
||||
{
|
||||
struct nfs_delegation *delegation = rcu_dereference(nfsi->delegation);
|
||||
struct nfs_client *clp = NFS_SERVER(inode)->nfs_client;
|
||||
struct nfs_inode *nfsi = NFS_I(inode);
|
||||
struct nfs_delegation *delegation;
|
||||
|
||||
if (delegation == NULL)
|
||||
goto nomatch;
|
||||
if (stateid != NULL && memcmp(delegation->stateid.data, stateid->data,
|
||||
sizeof(delegation->stateid.data)) != 0)
|
||||
goto nomatch;
|
||||
list_del_rcu(&delegation->super_list);
|
||||
nfsi->delegation_state = 0;
|
||||
rcu_assign_pointer(nfsi->delegation, NULL);
|
||||
return delegation;
|
||||
nomatch:
|
||||
return NULL;
|
||||
if (rcu_dereference(nfsi->delegation) != NULL) {
|
||||
spin_lock(&clp->cl_lock);
|
||||
delegation = nfs_detach_delegation_locked(nfsi, NULL);
|
||||
spin_unlock(&clp->cl_lock);
|
||||
if (delegation != NULL)
|
||||
nfs_do_return_delegation(inode, delegation, 0);
|
||||
}
|
||||
}
|
||||
|
||||
int nfs_inode_return_delegation(struct inode *inode)
|
||||
|
@ -314,8 +349,9 @@ void nfs_expire_all_delegations(struct nfs_client *clp)
|
|||
__module_get(THIS_MODULE);
|
||||
atomic_inc(&clp->cl_count);
|
||||
task = kthread_run(nfs_do_expire_all_delegations, clp,
|
||||
"%u.%u.%u.%u-delegreturn",
|
||||
NIPQUAD(clp->cl_addr.sin_addr));
|
||||
"%s-delegreturn",
|
||||
rpc_peeraddr2str(clp->cl_rpcclient,
|
||||
RPC_DISPLAY_ADDR));
|
||||
if (!IS_ERR(task))
|
||||
return;
|
||||
nfs_put_client(clp);
|
||||
|
@ -386,7 +422,7 @@ static int recall_thread(void *data)
|
|||
nfs_msync_inode(inode);
|
||||
|
||||
if (delegation != NULL)
|
||||
nfs_do_return_delegation(inode, delegation);
|
||||
nfs_do_return_delegation(inode, delegation, 1);
|
||||
iput(inode);
|
||||
module_put_and_exit(0);
|
||||
}
|
||||
|
|
|
@ -29,6 +29,7 @@ int nfs_inode_set_delegation(struct inode *inode, struct rpc_cred *cred, struct
|
|||
void nfs_inode_reclaim_delegation(struct inode *inode, struct rpc_cred *cred, struct nfs_openres *res);
|
||||
int nfs_inode_return_delegation(struct inode *inode);
|
||||
int nfs_async_inode_return_delegation(struct inode *inode, const nfs4_stateid *stateid);
|
||||
void nfs_inode_return_delegation_noreclaim(struct inode *inode);
|
||||
|
||||
struct inode *nfs_delegation_find_inode(struct nfs_client *clp, const struct nfs_fh *fhandle);
|
||||
void nfs_return_all_delegations(struct super_block *sb);
|
||||
|
@ -39,7 +40,7 @@ void nfs_delegation_mark_reclaim(struct nfs_client *clp);
|
|||
void nfs_delegation_reap_unclaimed(struct nfs_client *clp);
|
||||
|
||||
/* NFSv4 delegation-related procedures */
|
||||
int nfs4_proc_delegreturn(struct inode *inode, struct rpc_cred *cred, const nfs4_stateid *stateid);
|
||||
int nfs4_proc_delegreturn(struct inode *inode, struct rpc_cred *cred, const nfs4_stateid *stateid, int issync);
|
||||
int nfs4_open_delegation_recall(struct nfs_open_context *ctx, struct nfs4_state *state, const nfs4_stateid *stateid);
|
||||
int nfs4_lock_delegation_recall(struct nfs4_state *state, struct file_lock *fl);
|
||||
int nfs4_copy_delegation_stateid(nfs4_stateid *dst, struct inode *inode);
|
||||
|
|
65
fs/nfs/dir.c
65
fs/nfs/dir.c
|
@ -192,7 +192,7 @@ int nfs_readdir_filler(nfs_readdir_descriptor_t *desc, struct page *page)
|
|||
/* We requested READDIRPLUS, but the server doesn't grok it */
|
||||
if (error == -ENOTSUPP && desc->plus) {
|
||||
NFS_SERVER(inode)->caps &= ~NFS_CAP_READDIRPLUS;
|
||||
clear_bit(NFS_INO_ADVISE_RDPLUS, &NFS_FLAGS(inode));
|
||||
clear_bit(NFS_INO_ADVISE_RDPLUS, &NFS_I(inode)->flags);
|
||||
desc->plus = 0;
|
||||
goto again;
|
||||
}
|
||||
|
@ -537,12 +537,6 @@ static int nfs_readdir(struct file *filp, void *dirent, filldir_t filldir)
|
|||
|
||||
lock_kernel();
|
||||
|
||||
res = nfs_revalidate_mapping_nolock(inode, filp->f_mapping);
|
||||
if (res < 0) {
|
||||
unlock_kernel();
|
||||
return res;
|
||||
}
|
||||
|
||||
/*
|
||||
* filp->f_pos points to the dirent entry number.
|
||||
* *desc->dir_cookie has the cookie for the next entry. We have
|
||||
|
@ -564,6 +558,10 @@ static int nfs_readdir(struct file *filp, void *dirent, filldir_t filldir)
|
|||
desc->entry = &my_entry;
|
||||
|
||||
nfs_block_sillyrename(dentry);
|
||||
res = nfs_revalidate_mapping_nolock(inode, filp->f_mapping);
|
||||
if (res < 0)
|
||||
goto out;
|
||||
|
||||
while(!desc->entry->eof) {
|
||||
res = readdir_search_pagecache(desc);
|
||||
|
||||
|
@ -579,7 +577,7 @@ static int nfs_readdir(struct file *filp, void *dirent, filldir_t filldir)
|
|||
break;
|
||||
}
|
||||
if (res == -ETOOSMALL && desc->plus) {
|
||||
clear_bit(NFS_INO_ADVISE_RDPLUS, &NFS_FLAGS(inode));
|
||||
clear_bit(NFS_INO_ADVISE_RDPLUS, &NFS_I(inode)->flags);
|
||||
nfs_zap_caches(inode);
|
||||
desc->plus = 0;
|
||||
desc->entry->eof = 0;
|
||||
|
@ -594,6 +592,7 @@ static int nfs_readdir(struct file *filp, void *dirent, filldir_t filldir)
|
|||
break;
|
||||
}
|
||||
}
|
||||
out:
|
||||
nfs_unblock_sillyrename(dentry);
|
||||
unlock_kernel();
|
||||
if (res > 0)
|
||||
|
@ -639,6 +638,21 @@ static int nfs_fsync_dir(struct file *filp, struct dentry *dentry, int datasync)
|
|||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* nfs_force_lookup_revalidate - Mark the directory as having changed
|
||||
* @dir - pointer to directory inode
|
||||
*
|
||||
* This forces the revalidation code in nfs_lookup_revalidate() to do a
|
||||
* full lookup on all child dentries of 'dir' whenever a change occurs
|
||||
* on the server that might have invalidated our dcache.
|
||||
*
|
||||
* The caller should be holding dir->i_lock
|
||||
*/
|
||||
void nfs_force_lookup_revalidate(struct inode *dir)
|
||||
{
|
||||
NFS_I(dir)->cache_change_attribute = jiffies;
|
||||
}
|
||||
|
||||
/*
|
||||
* A check for whether or not the parent directory has changed.
|
||||
* In the case it has, we assume that the dentries are untrustworthy
|
||||
|
@ -827,6 +841,10 @@ static int nfs_dentry_delete(struct dentry *dentry)
|
|||
dentry->d_parent->d_name.name, dentry->d_name.name,
|
||||
dentry->d_flags);
|
||||
|
||||
/* Unhash any dentry with a stale inode */
|
||||
if (dentry->d_inode != NULL && NFS_STALE(dentry->d_inode))
|
||||
return 1;
|
||||
|
||||
if (dentry->d_flags & DCACHE_NFSFS_RENAMED) {
|
||||
/* Unhash it, so that ->d_iput() would be called */
|
||||
return 1;
|
||||
|
@ -846,7 +864,6 @@ static int nfs_dentry_delete(struct dentry *dentry)
|
|||
*/
|
||||
static void nfs_dentry_iput(struct dentry *dentry, struct inode *inode)
|
||||
{
|
||||
nfs_inode_return_delegation(inode);
|
||||
if (S_ISDIR(inode->i_mode))
|
||||
/* drop any readdir cache as it could easily be old */
|
||||
NFS_I(inode)->cache_validity |= NFS_INO_INVALID_DATA;
|
||||
|
@ -1268,6 +1285,12 @@ static int nfs_mkdir(struct inode *dir, struct dentry *dentry, int mode)
|
|||
return error;
|
||||
}
|
||||
|
||||
static void nfs_dentry_handle_enoent(struct dentry *dentry)
|
||||
{
|
||||
if (dentry->d_inode != NULL && !d_unhashed(dentry))
|
||||
d_delete(dentry);
|
||||
}
|
||||
|
||||
static int nfs_rmdir(struct inode *dir, struct dentry *dentry)
|
||||
{
|
||||
int error;
|
||||
|
@ -1280,6 +1303,8 @@ static int nfs_rmdir(struct inode *dir, struct dentry *dentry)
|
|||
/* Ensure the VFS deletes this inode */
|
||||
if (error == 0 && dentry->d_inode != NULL)
|
||||
clear_nlink(dentry->d_inode);
|
||||
else if (error == -ENOENT)
|
||||
nfs_dentry_handle_enoent(dentry);
|
||||
unlock_kernel();
|
||||
|
||||
return error;
|
||||
|
@ -1386,6 +1411,8 @@ static int nfs_safe_remove(struct dentry *dentry)
|
|||
nfs_mark_for_revalidate(inode);
|
||||
} else
|
||||
error = NFS_PROTO(dir)->remove(dir, &dentry->d_name);
|
||||
if (error == -ENOENT)
|
||||
nfs_dentry_handle_enoent(dentry);
|
||||
out:
|
||||
return error;
|
||||
}
|
||||
|
@ -1422,7 +1449,7 @@ static int nfs_unlink(struct inode *dir, struct dentry *dentry)
|
|||
spin_unlock(&dentry->d_lock);
|
||||
spin_unlock(&dcache_lock);
|
||||
error = nfs_safe_remove(dentry);
|
||||
if (!error) {
|
||||
if (!error || error == -ENOENT) {
|
||||
nfs_set_verifier(dentry, nfs_save_change_attribute(dir));
|
||||
} else if (need_rehash)
|
||||
d_rehash(dentry);
|
||||
|
@ -1635,7 +1662,8 @@ static int nfs_rename(struct inode *old_dir, struct dentry *old_dentry,
|
|||
d_move(old_dentry, new_dentry);
|
||||
nfs_set_verifier(new_dentry,
|
||||
nfs_save_change_attribute(new_dir));
|
||||
}
|
||||
} else if (error == -ENOENT)
|
||||
nfs_dentry_handle_enoent(old_dentry);
|
||||
|
||||
/* new dentry created? */
|
||||
if (dentry)
|
||||
|
@ -1666,13 +1694,19 @@ int nfs_access_cache_shrinker(int nr_to_scan, gfp_t gfp_mask)
|
|||
restart:
|
||||
spin_lock(&nfs_access_lru_lock);
|
||||
list_for_each_entry(nfsi, &nfs_access_lru_list, access_cache_inode_lru) {
|
||||
struct rw_semaphore *s_umount;
|
||||
struct inode *inode;
|
||||
|
||||
if (nr_to_scan-- == 0)
|
||||
break;
|
||||
inode = igrab(&nfsi->vfs_inode);
|
||||
if (inode == NULL)
|
||||
s_umount = &nfsi->vfs_inode.i_sb->s_umount;
|
||||
if (!down_read_trylock(s_umount))
|
||||
continue;
|
||||
inode = igrab(&nfsi->vfs_inode);
|
||||
if (inode == NULL) {
|
||||
up_read(s_umount);
|
||||
continue;
|
||||
}
|
||||
spin_lock(&inode->i_lock);
|
||||
if (list_empty(&nfsi->access_cache_entry_lru))
|
||||
goto remove_lru_entry;
|
||||
|
@ -1691,6 +1725,7 @@ int nfs_access_cache_shrinker(int nr_to_scan, gfp_t gfp_mask)
|
|||
spin_unlock(&inode->i_lock);
|
||||
spin_unlock(&nfs_access_lru_lock);
|
||||
iput(inode);
|
||||
up_read(s_umount);
|
||||
goto restart;
|
||||
}
|
||||
spin_unlock(&nfs_access_lru_lock);
|
||||
|
@ -1731,7 +1766,7 @@ static void __nfs_access_zap_cache(struct inode *inode)
|
|||
void nfs_access_zap_cache(struct inode *inode)
|
||||
{
|
||||
/* Remove from global LRU init */
|
||||
if (test_and_clear_bit(NFS_INO_ACL_LRU_SET, &NFS_FLAGS(inode))) {
|
||||
if (test_and_clear_bit(NFS_INO_ACL_LRU_SET, &NFS_I(inode)->flags)) {
|
||||
spin_lock(&nfs_access_lru_lock);
|
||||
list_del_init(&NFS_I(inode)->access_cache_inode_lru);
|
||||
spin_unlock(&nfs_access_lru_lock);
|
||||
|
@ -1845,7 +1880,7 @@ static void nfs_access_add_cache(struct inode *inode, struct nfs_access_entry *s
|
|||
smp_mb__after_atomic_inc();
|
||||
|
||||
/* Add inode to global LRU list */
|
||||
if (!test_and_set_bit(NFS_INO_ACL_LRU_SET, &NFS_FLAGS(inode))) {
|
||||
if (!test_and_set_bit(NFS_INO_ACL_LRU_SET, &NFS_I(inode)->flags)) {
|
||||
spin_lock(&nfs_access_lru_lock);
|
||||
list_add_tail(&NFS_I(inode)->access_cache_inode_lru, &nfs_access_lru_list);
|
||||
spin_unlock(&nfs_access_lru_lock);
|
||||
|
|
124
fs/nfs/direct.c
124
fs/nfs/direct.c
|
@ -188,12 +188,17 @@ static void nfs_direct_req_release(struct nfs_direct_req *dreq)
|
|||
static ssize_t nfs_direct_wait(struct nfs_direct_req *dreq)
|
||||
{
|
||||
ssize_t result = -EIOCBQUEUED;
|
||||
struct rpc_clnt *clnt;
|
||||
sigset_t oldset;
|
||||
|
||||
/* Async requests don't wait here */
|
||||
if (dreq->iocb)
|
||||
goto out;
|
||||
|
||||
clnt = NFS_CLIENT(dreq->inode);
|
||||
rpc_clnt_sigmask(clnt, &oldset);
|
||||
result = wait_for_completion_interruptible(&dreq->completion);
|
||||
rpc_clnt_sigunmask(clnt, &oldset);
|
||||
|
||||
if (!result)
|
||||
result = dreq->error;
|
||||
|
@ -272,6 +277,16 @@ static ssize_t nfs_direct_read_schedule_segment(struct nfs_direct_req *dreq,
|
|||
unsigned long user_addr = (unsigned long)iov->iov_base;
|
||||
size_t count = iov->iov_len;
|
||||
size_t rsize = NFS_SERVER(inode)->rsize;
|
||||
struct rpc_task *task;
|
||||
struct rpc_message msg = {
|
||||
.rpc_cred = ctx->cred,
|
||||
};
|
||||
struct rpc_task_setup task_setup_data = {
|
||||
.rpc_client = NFS_CLIENT(inode),
|
||||
.rpc_message = &msg,
|
||||
.callback_ops = &nfs_read_direct_ops,
|
||||
.flags = RPC_TASK_ASYNC,
|
||||
};
|
||||
unsigned int pgbase;
|
||||
int result;
|
||||
ssize_t started = 0;
|
||||
|
@ -311,7 +326,7 @@ static ssize_t nfs_direct_read_schedule_segment(struct nfs_direct_req *dreq,
|
|||
|
||||
data->req = (struct nfs_page *) dreq;
|
||||
data->inode = inode;
|
||||
data->cred = ctx->cred;
|
||||
data->cred = msg.rpc_cred;
|
||||
data->args.fh = NFS_FH(inode);
|
||||
data->args.context = ctx;
|
||||
data->args.offset = pos;
|
||||
|
@ -321,14 +336,16 @@ static ssize_t nfs_direct_read_schedule_segment(struct nfs_direct_req *dreq,
|
|||
data->res.fattr = &data->fattr;
|
||||
data->res.eof = 0;
|
||||
data->res.count = bytes;
|
||||
msg.rpc_argp = &data->args;
|
||||
msg.rpc_resp = &data->res;
|
||||
|
||||
rpc_init_task(&data->task, NFS_CLIENT(inode), RPC_TASK_ASYNC,
|
||||
&nfs_read_direct_ops, data);
|
||||
NFS_PROTO(inode)->read_setup(data);
|
||||
task_setup_data.task = &data->task;
|
||||
task_setup_data.callback_data = data;
|
||||
NFS_PROTO(inode)->read_setup(data, &msg);
|
||||
|
||||
data->task.tk_cookie = (unsigned long) inode;
|
||||
|
||||
rpc_execute(&data->task);
|
||||
task = rpc_run_task(&task_setup_data);
|
||||
if (!IS_ERR(task))
|
||||
rpc_put_task(task);
|
||||
|
||||
dprintk("NFS: %5u initiated direct read call "
|
||||
"(req %s/%Ld, %zu bytes @ offset %Lu)\n",
|
||||
|
@ -391,9 +408,7 @@ static ssize_t nfs_direct_read(struct kiocb *iocb, const struct iovec *iov,
|
|||
unsigned long nr_segs, loff_t pos)
|
||||
{
|
||||
ssize_t result = 0;
|
||||
sigset_t oldset;
|
||||
struct inode *inode = iocb->ki_filp->f_mapping->host;
|
||||
struct rpc_clnt *clnt = NFS_CLIENT(inode);
|
||||
struct nfs_direct_req *dreq;
|
||||
|
||||
dreq = nfs_direct_req_alloc();
|
||||
|
@ -405,11 +420,9 @@ static ssize_t nfs_direct_read(struct kiocb *iocb, const struct iovec *iov,
|
|||
if (!is_sync_kiocb(iocb))
|
||||
dreq->iocb = iocb;
|
||||
|
||||
rpc_clnt_sigmask(clnt, &oldset);
|
||||
result = nfs_direct_read_schedule_iovec(dreq, iov, nr_segs, pos);
|
||||
if (!result)
|
||||
result = nfs_direct_wait(dreq);
|
||||
rpc_clnt_sigunmask(clnt, &oldset);
|
||||
nfs_direct_req_release(dreq);
|
||||
|
||||
return result;
|
||||
|
@ -431,6 +444,15 @@ static void nfs_direct_write_reschedule(struct nfs_direct_req *dreq)
|
|||
struct inode *inode = dreq->inode;
|
||||
struct list_head *p;
|
||||
struct nfs_write_data *data;
|
||||
struct rpc_task *task;
|
||||
struct rpc_message msg = {
|
||||
.rpc_cred = dreq->ctx->cred,
|
||||
};
|
||||
struct rpc_task_setup task_setup_data = {
|
||||
.rpc_client = NFS_CLIENT(inode),
|
||||
.callback_ops = &nfs_write_direct_ops,
|
||||
.flags = RPC_TASK_ASYNC,
|
||||
};
|
||||
|
||||
dreq->count = 0;
|
||||
get_dreq(dreq);
|
||||
|
@ -440,6 +462,9 @@ static void nfs_direct_write_reschedule(struct nfs_direct_req *dreq)
|
|||
|
||||
get_dreq(dreq);
|
||||
|
||||
/* Use stable writes */
|
||||
data->args.stable = NFS_FILE_SYNC;
|
||||
|
||||
/*
|
||||
* Reset data->res.
|
||||
*/
|
||||
|
@ -451,17 +476,18 @@ static void nfs_direct_write_reschedule(struct nfs_direct_req *dreq)
|
|||
* Reuse data->task; data->args should not have changed
|
||||
* since the original request was sent.
|
||||
*/
|
||||
rpc_init_task(&data->task, NFS_CLIENT(inode), RPC_TASK_ASYNC,
|
||||
&nfs_write_direct_ops, data);
|
||||
NFS_PROTO(inode)->write_setup(data, FLUSH_STABLE);
|
||||
|
||||
data->task.tk_priority = RPC_PRIORITY_NORMAL;
|
||||
data->task.tk_cookie = (unsigned long) inode;
|
||||
task_setup_data.task = &data->task;
|
||||
task_setup_data.callback_data = data;
|
||||
msg.rpc_argp = &data->args;
|
||||
msg.rpc_resp = &data->res;
|
||||
NFS_PROTO(inode)->write_setup(data, &msg);
|
||||
|
||||
/*
|
||||
* We're called via an RPC callback, so BKL is already held.
|
||||
*/
|
||||
rpc_execute(&data->task);
|
||||
task = rpc_run_task(&task_setup_data);
|
||||
if (!IS_ERR(task))
|
||||
rpc_put_task(task);
|
||||
|
||||
dprintk("NFS: %5u rescheduled direct write call (req %s/%Ld, %u bytes @ offset %Lu)\n",
|
||||
data->task.tk_pid,
|
||||
|
@ -504,9 +530,23 @@ static const struct rpc_call_ops nfs_commit_direct_ops = {
|
|||
static void nfs_direct_commit_schedule(struct nfs_direct_req *dreq)
|
||||
{
|
||||
struct nfs_write_data *data = dreq->commit_data;
|
||||
struct rpc_task *task;
|
||||
struct rpc_message msg = {
|
||||
.rpc_argp = &data->args,
|
||||
.rpc_resp = &data->res,
|
||||
.rpc_cred = dreq->ctx->cred,
|
||||
};
|
||||
struct rpc_task_setup task_setup_data = {
|
||||
.task = &data->task,
|
||||
.rpc_client = NFS_CLIENT(dreq->inode),
|
||||
.rpc_message = &msg,
|
||||
.callback_ops = &nfs_commit_direct_ops,
|
||||
.callback_data = data,
|
||||
.flags = RPC_TASK_ASYNC,
|
||||
};
|
||||
|
||||
data->inode = dreq->inode;
|
||||
data->cred = dreq->ctx->cred;
|
||||
data->cred = msg.rpc_cred;
|
||||
|
||||
data->args.fh = NFS_FH(data->inode);
|
||||
data->args.offset = 0;
|
||||
|
@ -515,18 +555,16 @@ static void nfs_direct_commit_schedule(struct nfs_direct_req *dreq)
|
|||
data->res.fattr = &data->fattr;
|
||||
data->res.verf = &data->verf;
|
||||
|
||||
rpc_init_task(&data->task, NFS_CLIENT(dreq->inode), RPC_TASK_ASYNC,
|
||||
&nfs_commit_direct_ops, data);
|
||||
NFS_PROTO(data->inode)->commit_setup(data, 0);
|
||||
NFS_PROTO(data->inode)->commit_setup(data, &msg);
|
||||
|
||||
data->task.tk_priority = RPC_PRIORITY_NORMAL;
|
||||
data->task.tk_cookie = (unsigned long)data->inode;
|
||||
/* Note: task.tk_ops->rpc_release will free dreq->commit_data */
|
||||
dreq->commit_data = NULL;
|
||||
|
||||
dprintk("NFS: %5u initiated commit call\n", data->task.tk_pid);
|
||||
|
||||
rpc_execute(&data->task);
|
||||
task = rpc_run_task(&task_setup_data);
|
||||
if (!IS_ERR(task))
|
||||
rpc_put_task(task);
|
||||
}
|
||||
|
||||
static void nfs_direct_write_complete(struct nfs_direct_req *dreq, struct inode *inode)
|
||||
|
@ -641,6 +679,16 @@ static ssize_t nfs_direct_write_schedule_segment(struct nfs_direct_req *dreq,
|
|||
struct inode *inode = ctx->path.dentry->d_inode;
|
||||
unsigned long user_addr = (unsigned long)iov->iov_base;
|
||||
size_t count = iov->iov_len;
|
||||
struct rpc_task *task;
|
||||
struct rpc_message msg = {
|
||||
.rpc_cred = ctx->cred,
|
||||
};
|
||||
struct rpc_task_setup task_setup_data = {
|
||||
.rpc_client = NFS_CLIENT(inode),
|
||||
.rpc_message = &msg,
|
||||
.callback_ops = &nfs_write_direct_ops,
|
||||
.flags = RPC_TASK_ASYNC,
|
||||
};
|
||||
size_t wsize = NFS_SERVER(inode)->wsize;
|
||||
unsigned int pgbase;
|
||||
int result;
|
||||
|
@ -683,25 +731,27 @@ static ssize_t nfs_direct_write_schedule_segment(struct nfs_direct_req *dreq,
|
|||
|
||||
data->req = (struct nfs_page *) dreq;
|
||||
data->inode = inode;
|
||||
data->cred = ctx->cred;
|
||||
data->cred = msg.rpc_cred;
|
||||
data->args.fh = NFS_FH(inode);
|
||||
data->args.context = ctx;
|
||||
data->args.offset = pos;
|
||||
data->args.pgbase = pgbase;
|
||||
data->args.pages = data->pagevec;
|
||||
data->args.count = bytes;
|
||||
data->args.stable = sync;
|
||||
data->res.fattr = &data->fattr;
|
||||
data->res.count = bytes;
|
||||
data->res.verf = &data->verf;
|
||||
|
||||
rpc_init_task(&data->task, NFS_CLIENT(inode), RPC_TASK_ASYNC,
|
||||
&nfs_write_direct_ops, data);
|
||||
NFS_PROTO(inode)->write_setup(data, sync);
|
||||
task_setup_data.task = &data->task;
|
||||
task_setup_data.callback_data = data;
|
||||
msg.rpc_argp = &data->args;
|
||||
msg.rpc_resp = &data->res;
|
||||
NFS_PROTO(inode)->write_setup(data, &msg);
|
||||
|
||||
data->task.tk_priority = RPC_PRIORITY_NORMAL;
|
||||
data->task.tk_cookie = (unsigned long) inode;
|
||||
|
||||
rpc_execute(&data->task);
|
||||
task = rpc_run_task(&task_setup_data);
|
||||
if (!IS_ERR(task))
|
||||
rpc_put_task(task);
|
||||
|
||||
dprintk("NFS: %5u initiated direct write call "
|
||||
"(req %s/%Ld, %zu bytes @ offset %Lu)\n",
|
||||
|
@ -767,12 +817,10 @@ static ssize_t nfs_direct_write(struct kiocb *iocb, const struct iovec *iov,
|
|||
size_t count)
|
||||
{
|
||||
ssize_t result = 0;
|
||||
sigset_t oldset;
|
||||
struct inode *inode = iocb->ki_filp->f_mapping->host;
|
||||
struct rpc_clnt *clnt = NFS_CLIENT(inode);
|
||||
struct nfs_direct_req *dreq;
|
||||
size_t wsize = NFS_SERVER(inode)->wsize;
|
||||
int sync = 0;
|
||||
int sync = NFS_UNSTABLE;
|
||||
|
||||
dreq = nfs_direct_req_alloc();
|
||||
if (!dreq)
|
||||
|
@ -780,18 +828,16 @@ static ssize_t nfs_direct_write(struct kiocb *iocb, const struct iovec *iov,
|
|||
nfs_alloc_commit_data(dreq);
|
||||
|
||||
if (dreq->commit_data == NULL || count < wsize)
|
||||
sync = FLUSH_STABLE;
|
||||
sync = NFS_FILE_SYNC;
|
||||
|
||||
dreq->inode = inode;
|
||||
dreq->ctx = get_nfs_open_context(nfs_file_open_context(iocb->ki_filp));
|
||||
if (!is_sync_kiocb(iocb))
|
||||
dreq->iocb = iocb;
|
||||
|
||||
rpc_clnt_sigmask(clnt, &oldset);
|
||||
result = nfs_direct_write_schedule_iovec(dreq, iov, nr_segs, pos, sync);
|
||||
if (!result)
|
||||
result = nfs_direct_wait(dreq);
|
||||
rpc_clnt_sigunmask(clnt, &oldset);
|
||||
nfs_direct_req_release(dreq);
|
||||
|
||||
return result;
|
||||
|
|
|
@ -349,7 +349,9 @@ static int nfs_write_end(struct file *file, struct address_space *mapping,
|
|||
unlock_page(page);
|
||||
page_cache_release(page);
|
||||
|
||||
return status < 0 ? status : copied;
|
||||
if (status < 0)
|
||||
return status;
|
||||
return copied;
|
||||
}
|
||||
|
||||
static void nfs_invalidate_page(struct page *page, unsigned long offset)
|
||||
|
@ -392,35 +394,27 @@ static int nfs_vm_page_mkwrite(struct vm_area_struct *vma, struct page *page)
|
|||
struct file *filp = vma->vm_file;
|
||||
unsigned pagelen;
|
||||
int ret = -EINVAL;
|
||||
void *fsdata;
|
||||
struct address_space *mapping;
|
||||
loff_t offset;
|
||||
|
||||
lock_page(page);
|
||||
mapping = page->mapping;
|
||||
if (mapping != vma->vm_file->f_path.dentry->d_inode->i_mapping) {
|
||||
unlock_page(page);
|
||||
return -EINVAL;
|
||||
}
|
||||
if (mapping != vma->vm_file->f_path.dentry->d_inode->i_mapping)
|
||||
goto out_unlock;
|
||||
|
||||
ret = 0;
|
||||
pagelen = nfs_page_length(page);
|
||||
offset = (loff_t)page->index << PAGE_CACHE_SHIFT;
|
||||
if (pagelen == 0)
|
||||
goto out_unlock;
|
||||
|
||||
ret = nfs_flush_incompatible(filp, page);
|
||||
if (ret != 0)
|
||||
goto out_unlock;
|
||||
|
||||
ret = nfs_updatepage(filp, page, 0, pagelen);
|
||||
if (ret == 0)
|
||||
ret = pagelen;
|
||||
out_unlock:
|
||||
unlock_page(page);
|
||||
|
||||
/*
|
||||
* we can use mapping after releasing the page lock, because:
|
||||
* we hold mmap_sem on the fault path, which should pin the vma
|
||||
* which should pin the file, which pins the dentry which should
|
||||
* hold a reference on inode.
|
||||
*/
|
||||
|
||||
if (pagelen) {
|
||||
struct page *page2 = NULL;
|
||||
ret = nfs_write_begin(filp, mapping, offset, pagelen,
|
||||
0, &page2, &fsdata);
|
||||
if (!ret)
|
||||
ret = nfs_write_end(filp, mapping, offset, pagelen,
|
||||
pagelen, page2, fsdata);
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
|
|
@ -72,39 +72,39 @@ module_param_call(idmap_cache_timeout, param_set_idmap_timeout, param_get_int,
|
|||
&nfs_idmap_cache_timeout, 0644);
|
||||
|
||||
struct idmap_hashent {
|
||||
unsigned long ih_expires;
|
||||
__u32 ih_id;
|
||||
int ih_namelen;
|
||||
char ih_name[IDMAP_NAMESZ];
|
||||
unsigned long ih_expires;
|
||||
__u32 ih_id;
|
||||
size_t ih_namelen;
|
||||
char ih_name[IDMAP_NAMESZ];
|
||||
};
|
||||
|
||||
struct idmap_hashtable {
|
||||
__u8 h_type;
|
||||
struct idmap_hashent h_entries[IDMAP_HASH_SZ];
|
||||
__u8 h_type;
|
||||
struct idmap_hashent h_entries[IDMAP_HASH_SZ];
|
||||
};
|
||||
|
||||
struct idmap {
|
||||
struct dentry *idmap_dentry;
|
||||
wait_queue_head_t idmap_wq;
|
||||
struct idmap_msg idmap_im;
|
||||
struct mutex idmap_lock; /* Serializes upcalls */
|
||||
struct mutex idmap_im_lock; /* Protects the hashtable */
|
||||
struct idmap_hashtable idmap_user_hash;
|
||||
struct idmap_hashtable idmap_group_hash;
|
||||
struct dentry *idmap_dentry;
|
||||
wait_queue_head_t idmap_wq;
|
||||
struct idmap_msg idmap_im;
|
||||
struct mutex idmap_lock; /* Serializes upcalls */
|
||||
struct mutex idmap_im_lock; /* Protects the hashtable */
|
||||
struct idmap_hashtable idmap_user_hash;
|
||||
struct idmap_hashtable idmap_group_hash;
|
||||
};
|
||||
|
||||
static ssize_t idmap_pipe_upcall(struct file *, struct rpc_pipe_msg *,
|
||||
char __user *, size_t);
|
||||
static ssize_t idmap_pipe_downcall(struct file *, const char __user *,
|
||||
size_t);
|
||||
static void idmap_pipe_destroy_msg(struct rpc_pipe_msg *);
|
||||
static ssize_t idmap_pipe_upcall(struct file *, struct rpc_pipe_msg *,
|
||||
char __user *, size_t);
|
||||
static ssize_t idmap_pipe_downcall(struct file *, const char __user *,
|
||||
size_t);
|
||||
static void idmap_pipe_destroy_msg(struct rpc_pipe_msg *);
|
||||
|
||||
static unsigned int fnvhash32(const void *, size_t);
|
||||
|
||||
static struct rpc_pipe_ops idmap_upcall_ops = {
|
||||
.upcall = idmap_pipe_upcall,
|
||||
.downcall = idmap_pipe_downcall,
|
||||
.destroy_msg = idmap_pipe_destroy_msg,
|
||||
.upcall = idmap_pipe_upcall,
|
||||
.downcall = idmap_pipe_downcall,
|
||||
.destroy_msg = idmap_pipe_destroy_msg,
|
||||
};
|
||||
|
||||
int
|
||||
|
@ -115,19 +115,20 @@ nfs_idmap_new(struct nfs_client *clp)
|
|||
|
||||
BUG_ON(clp->cl_idmap != NULL);
|
||||
|
||||
if ((idmap = kzalloc(sizeof(*idmap), GFP_KERNEL)) == NULL)
|
||||
return -ENOMEM;
|
||||
idmap = kzalloc(sizeof(*idmap), GFP_KERNEL);
|
||||
if (idmap == NULL)
|
||||
return -ENOMEM;
|
||||
|
||||
idmap->idmap_dentry = rpc_mkpipe(clp->cl_rpcclient->cl_dentry, "idmap",
|
||||
idmap, &idmap_upcall_ops, 0);
|
||||
if (IS_ERR(idmap->idmap_dentry)) {
|
||||
idmap->idmap_dentry = rpc_mkpipe(clp->cl_rpcclient->cl_dentry, "idmap",
|
||||
idmap, &idmap_upcall_ops, 0);
|
||||
if (IS_ERR(idmap->idmap_dentry)) {
|
||||
error = PTR_ERR(idmap->idmap_dentry);
|
||||
kfree(idmap);
|
||||
return error;
|
||||
}
|
||||
|
||||
mutex_init(&idmap->idmap_lock);
|
||||
mutex_init(&idmap->idmap_im_lock);
|
||||
mutex_init(&idmap->idmap_lock);
|
||||
mutex_init(&idmap->idmap_im_lock);
|
||||
init_waitqueue_head(&idmap->idmap_wq);
|
||||
idmap->idmap_user_hash.h_type = IDMAP_TYPE_USER;
|
||||
idmap->idmap_group_hash.h_type = IDMAP_TYPE_GROUP;
|
||||
|
@ -192,7 +193,7 @@ idmap_lookup_id(struct idmap_hashtable *h, __u32 id)
|
|||
* pretty trivial.
|
||||
*/
|
||||
static inline struct idmap_hashent *
|
||||
idmap_alloc_name(struct idmap_hashtable *h, char *name, unsigned len)
|
||||
idmap_alloc_name(struct idmap_hashtable *h, char *name, size_t len)
|
||||
{
|
||||
return idmap_name_hash(h, name, len);
|
||||
}
|
||||
|
@ -285,7 +286,7 @@ nfs_idmap_id(struct idmap *idmap, struct idmap_hashtable *h,
|
|||
memset(im, 0, sizeof(*im));
|
||||
mutex_unlock(&idmap->idmap_im_lock);
|
||||
mutex_unlock(&idmap->idmap_lock);
|
||||
return (ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -354,42 +355,40 @@ nfs_idmap_name(struct idmap *idmap, struct idmap_hashtable *h,
|
|||
/* RPC pipefs upcall/downcall routines */
|
||||
static ssize_t
|
||||
idmap_pipe_upcall(struct file *filp, struct rpc_pipe_msg *msg,
|
||||
char __user *dst, size_t buflen)
|
||||
char __user *dst, size_t buflen)
|
||||
{
|
||||
char *data = (char *)msg->data + msg->copied;
|
||||
ssize_t mlen = msg->len - msg->copied;
|
||||
ssize_t left;
|
||||
char *data = (char *)msg->data + msg->copied;
|
||||
size_t mlen = min(msg->len, buflen);
|
||||
unsigned long left;
|
||||
|
||||
if (mlen > buflen)
|
||||
mlen = buflen;
|
||||
|
||||
left = copy_to_user(dst, data, mlen);
|
||||
if (left < 0) {
|
||||
msg->errno = left;
|
||||
return left;
|
||||
left = copy_to_user(dst, data, mlen);
|
||||
if (left == mlen) {
|
||||
msg->errno = -EFAULT;
|
||||
return -EFAULT;
|
||||
}
|
||||
|
||||
mlen -= left;
|
||||
msg->copied += mlen;
|
||||
msg->errno = 0;
|
||||
return mlen;
|
||||
return mlen;
|
||||
}
|
||||
|
||||
static ssize_t
|
||||
idmap_pipe_downcall(struct file *filp, const char __user *src, size_t mlen)
|
||||
{
|
||||
struct rpc_inode *rpci = RPC_I(filp->f_path.dentry->d_inode);
|
||||
struct rpc_inode *rpci = RPC_I(filp->f_path.dentry->d_inode);
|
||||
struct idmap *idmap = (struct idmap *)rpci->private;
|
||||
struct idmap_msg im_in, *im = &idmap->idmap_im;
|
||||
struct idmap_hashtable *h;
|
||||
struct idmap_hashent *he = NULL;
|
||||
int namelen_in;
|
||||
size_t namelen_in;
|
||||
int ret;
|
||||
|
||||
if (mlen != sizeof(im_in))
|
||||
return (-ENOSPC);
|
||||
if (mlen != sizeof(im_in))
|
||||
return -ENOSPC;
|
||||
|
||||
if (copy_from_user(&im_in, src, mlen) != 0)
|
||||
return (-EFAULT);
|
||||
if (copy_from_user(&im_in, src, mlen) != 0)
|
||||
return -EFAULT;
|
||||
|
||||
mutex_lock(&idmap->idmap_im_lock);
|
||||
|
||||
|
@ -487,7 +486,7 @@ static unsigned int fnvhash32(const void *buf, size_t buflen)
|
|||
hash ^= (unsigned int)*p;
|
||||
}
|
||||
|
||||
return (hash);
|
||||
return hash;
|
||||
}
|
||||
|
||||
int nfs_map_name_to_uid(struct nfs_client *clp, const char *name, size_t namelen, __u32 *uid)
|
||||
|
|
|
@ -192,7 +192,7 @@ void nfs_invalidate_atime(struct inode *inode)
|
|||
*/
|
||||
static void nfs_invalidate_inode(struct inode *inode)
|
||||
{
|
||||
set_bit(NFS_INO_STALE, &NFS_FLAGS(inode));
|
||||
set_bit(NFS_INO_STALE, &NFS_I(inode)->flags);
|
||||
nfs_zap_caches_locked(inode);
|
||||
}
|
||||
|
||||
|
@ -229,7 +229,7 @@ nfs_init_locked(struct inode *inode, void *opaque)
|
|||
struct nfs_find_desc *desc = (struct nfs_find_desc *)opaque;
|
||||
struct nfs_fattr *fattr = desc->fattr;
|
||||
|
||||
NFS_FILEID(inode) = fattr->fileid;
|
||||
set_nfs_fileid(inode, fattr->fileid);
|
||||
nfs_copy_fh(NFS_FH(inode), desc->fh);
|
||||
return 0;
|
||||
}
|
||||
|
@ -291,7 +291,7 @@ nfs_fhget(struct super_block *sb, struct nfs_fh *fh, struct nfs_fattr *fattr)
|
|||
inode->i_fop = &nfs_dir_operations;
|
||||
if (nfs_server_capable(inode, NFS_CAP_READDIRPLUS)
|
||||
&& fattr->size <= NFS_LIMIT_READDIRPLUS)
|
||||
set_bit(NFS_INO_ADVISE_RDPLUS, &NFS_FLAGS(inode));
|
||||
set_bit(NFS_INO_ADVISE_RDPLUS, &NFS_I(inode)->flags);
|
||||
/* Deal with crossing mountpoints */
|
||||
if (!nfs_fsid_equal(&NFS_SB(sb)->fsid, &fattr->fsid)) {
|
||||
if (fattr->valid & NFS_ATTR_FATTR_V4_REFERRAL)
|
||||
|
@ -461,9 +461,18 @@ int nfs_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat *stat)
|
|||
int need_atime = NFS_I(inode)->cache_validity & NFS_INO_INVALID_ATIME;
|
||||
int err;
|
||||
|
||||
/* Flush out writes to the server in order to update c/mtime */
|
||||
if (S_ISREG(inode->i_mode))
|
||||
/*
|
||||
* Flush out writes to the server in order to update c/mtime.
|
||||
*
|
||||
* Hold the i_mutex to suspend application writes temporarily;
|
||||
* this prevents long-running writing applications from blocking
|
||||
* nfs_wb_nocommit.
|
||||
*/
|
||||
if (S_ISREG(inode->i_mode)) {
|
||||
mutex_lock(&inode->i_mutex);
|
||||
nfs_wb_nocommit(inode);
|
||||
mutex_unlock(&inode->i_mutex);
|
||||
}
|
||||
|
||||
/*
|
||||
* We may force a getattr if the user cares about atime.
|
||||
|
@ -659,7 +668,7 @@ __nfs_revalidate_inode(struct nfs_server *server, struct inode *inode)
|
|||
if (status == -ESTALE) {
|
||||
nfs_zap_caches(inode);
|
||||
if (!S_ISDIR(inode->i_mode))
|
||||
set_bit(NFS_INO_STALE, &NFS_FLAGS(inode));
|
||||
set_bit(NFS_INO_STALE, &NFS_I(inode)->flags);
|
||||
}
|
||||
goto out;
|
||||
}
|
||||
|
@ -814,8 +823,9 @@ static void nfs_wcc_update_inode(struct inode *inode, struct nfs_fattr *fattr)
|
|||
if (S_ISDIR(inode->i_mode))
|
||||
nfsi->cache_validity |= NFS_INO_INVALID_DATA;
|
||||
}
|
||||
if (inode->i_size == fattr->pre_size && nfsi->npages == 0)
|
||||
inode->i_size = fattr->size;
|
||||
if (inode->i_size == nfs_size_to_loff_t(fattr->pre_size) &&
|
||||
nfsi->npages == 0)
|
||||
inode->i_size = nfs_size_to_loff_t(fattr->size);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1019,7 +1029,8 @@ static int nfs_update_inode(struct inode *inode, struct nfs_fattr *fattr)
|
|||
dprintk("NFS: mtime change on server for file %s/%ld\n",
|
||||
inode->i_sb->s_id, inode->i_ino);
|
||||
invalid |= NFS_INO_INVALID_ATTR|NFS_INO_INVALID_DATA;
|
||||
nfsi->cache_change_attribute = now;
|
||||
if (S_ISDIR(inode->i_mode))
|
||||
nfs_force_lookup_revalidate(inode);
|
||||
}
|
||||
/* If ctime has changed we should definitely clear access+acl caches */
|
||||
if (!timespec_equal(&inode->i_ctime, &fattr->ctime))
|
||||
|
@ -1028,7 +1039,8 @@ static int nfs_update_inode(struct inode *inode, struct nfs_fattr *fattr)
|
|||
dprintk("NFS: change_attr change on server for file %s/%ld\n",
|
||||
inode->i_sb->s_id, inode->i_ino);
|
||||
invalid |= NFS_INO_INVALID_ATTR|NFS_INO_INVALID_DATA|NFS_INO_INVALID_ACCESS|NFS_INO_INVALID_ACL;
|
||||
nfsi->cache_change_attribute = now;
|
||||
if (S_ISDIR(inode->i_mode))
|
||||
nfs_force_lookup_revalidate(inode);
|
||||
}
|
||||
|
||||
/* Check if our cached file size is stale */
|
||||
|
@ -1133,7 +1145,7 @@ static int nfs_update_inode(struct inode *inode, struct nfs_fattr *fattr)
|
|||
void nfs4_clear_inode(struct inode *inode)
|
||||
{
|
||||
/* If we are holding a delegation, return it! */
|
||||
nfs_inode_return_delegation(inode);
|
||||
nfs_inode_return_delegation_noreclaim(inode);
|
||||
/* First call standard NFS clear_inode() code */
|
||||
nfs_clear_inode(inode);
|
||||
}
|
||||
|
|
|
@ -21,7 +21,8 @@ struct nfs_clone_mount {
|
|||
struct nfs_fattr *fattr;
|
||||
char *hostname;
|
||||
char *mnt_path;
|
||||
struct sockaddr_in *addr;
|
||||
struct sockaddr *addr;
|
||||
size_t addrlen;
|
||||
rpc_authflavor_t authflavor;
|
||||
};
|
||||
|
||||
|
@ -41,19 +42,19 @@ struct nfs_parsed_mount_data {
|
|||
char *client_address;
|
||||
|
||||
struct {
|
||||
struct sockaddr_in address;
|
||||
struct sockaddr_storage address;
|
||||
size_t addrlen;
|
||||
char *hostname;
|
||||
unsigned int program;
|
||||
unsigned int version;
|
||||
unsigned short port;
|
||||
int protocol;
|
||||
} mount_server;
|
||||
|
||||
struct {
|
||||
struct sockaddr_in address;
|
||||
struct sockaddr_storage address;
|
||||
size_t addrlen;
|
||||
char *hostname;
|
||||
char *export_path;
|
||||
unsigned int program;
|
||||
int protocol;
|
||||
} nfs_server;
|
||||
};
|
||||
|
@ -62,7 +63,8 @@ struct nfs_parsed_mount_data {
|
|||
extern struct rpc_program nfs_program;
|
||||
|
||||
extern void nfs_put_client(struct nfs_client *);
|
||||
extern struct nfs_client *nfs_find_client(const struct sockaddr_in *, int);
|
||||
extern struct nfs_client *nfs_find_client(const struct sockaddr *, u32);
|
||||
extern struct nfs_client *nfs_find_client_next(struct nfs_client *);
|
||||
extern struct nfs_server *nfs_create_server(
|
||||
const struct nfs_parsed_mount_data *,
|
||||
struct nfs_fh *);
|
||||
|
@ -160,6 +162,8 @@ extern struct rpc_stat nfs_rpcstat;
|
|||
|
||||
extern int __init register_nfs_fs(void);
|
||||
extern void __exit unregister_nfs_fs(void);
|
||||
extern void nfs_sb_active(struct nfs_server *server);
|
||||
extern void nfs_sb_deactive(struct nfs_server *server);
|
||||
|
||||
/* namespace.c */
|
||||
extern char *nfs_path(const char *base,
|
||||
|
|
|
@ -188,7 +188,7 @@ static struct vfsmount *nfs_do_clone_mount(struct nfs_server *server,
|
|||
{
|
||||
#ifdef CONFIG_NFS_V4
|
||||
struct vfsmount *mnt = NULL;
|
||||
switch (server->nfs_client->cl_nfsversion) {
|
||||
switch (server->nfs_client->rpc_ops->version) {
|
||||
case 2:
|
||||
case 3:
|
||||
mnt = vfs_kern_mount(&nfs_xdev_fs_type, 0, devname, mountdata);
|
||||
|
|
|
@ -262,7 +262,9 @@ static int
|
|||
nfs_xdr_readres(struct rpc_rqst *req, __be32 *p, struct nfs_readres *res)
|
||||
{
|
||||
struct kvec *iov = req->rq_rcv_buf.head;
|
||||
int status, count, recvd, hdrlen;
|
||||
size_t hdrlen;
|
||||
u32 count, recvd;
|
||||
int status;
|
||||
|
||||
if ((status = ntohl(*p++)))
|
||||
return -nfs_stat_to_errno(status);
|
||||
|
@ -273,7 +275,7 @@ nfs_xdr_readres(struct rpc_rqst *req, __be32 *p, struct nfs_readres *res)
|
|||
hdrlen = (u8 *) p - (u8 *) iov->iov_base;
|
||||
if (iov->iov_len < hdrlen) {
|
||||
dprintk("NFS: READ reply header overflowed:"
|
||||
"length %d > %Zu\n", hdrlen, iov->iov_len);
|
||||
"length %Zu > %Zu\n", hdrlen, iov->iov_len);
|
||||
return -errno_NFSERR_IO;
|
||||
} else if (iov->iov_len != hdrlen) {
|
||||
dprintk("NFS: READ header is short. iovec will be shifted.\n");
|
||||
|
@ -283,11 +285,11 @@ nfs_xdr_readres(struct rpc_rqst *req, __be32 *p, struct nfs_readres *res)
|
|||
recvd = req->rq_rcv_buf.len - hdrlen;
|
||||
if (count > recvd) {
|
||||
dprintk("NFS: server cheating in read reply: "
|
||||
"count %d > recvd %d\n", count, recvd);
|
||||
"count %u > recvd %u\n", count, recvd);
|
||||
count = recvd;
|
||||
}
|
||||
|
||||
dprintk("RPC: readres OK count %d\n", count);
|
||||
dprintk("RPC: readres OK count %u\n", count);
|
||||
if (count < res->count)
|
||||
res->count = count;
|
||||
|
||||
|
@ -423,9 +425,10 @@ nfs_xdr_readdirres(struct rpc_rqst *req, __be32 *p, void *dummy)
|
|||
struct xdr_buf *rcvbuf = &req->rq_rcv_buf;
|
||||
struct kvec *iov = rcvbuf->head;
|
||||
struct page **page;
|
||||
int hdrlen, recvd;
|
||||
size_t hdrlen;
|
||||
unsigned int pglen, recvd;
|
||||
u32 len;
|
||||
int status, nr;
|
||||
unsigned int len, pglen;
|
||||
__be32 *end, *entry, *kaddr;
|
||||
|
||||
if ((status = ntohl(*p++)))
|
||||
|
@ -434,7 +437,7 @@ nfs_xdr_readdirres(struct rpc_rqst *req, __be32 *p, void *dummy)
|
|||
hdrlen = (u8 *) p - (u8 *) iov->iov_base;
|
||||
if (iov->iov_len < hdrlen) {
|
||||
dprintk("NFS: READDIR reply header overflowed:"
|
||||
"length %d > %Zu\n", hdrlen, iov->iov_len);
|
||||
"length %Zu > %Zu\n", hdrlen, iov->iov_len);
|
||||
return -errno_NFSERR_IO;
|
||||
} else if (iov->iov_len != hdrlen) {
|
||||
dprintk("NFS: READDIR header is short. iovec will be shifted.\n");
|
||||
|
@ -576,7 +579,8 @@ nfs_xdr_readlinkres(struct rpc_rqst *req, __be32 *p, void *dummy)
|
|||
{
|
||||
struct xdr_buf *rcvbuf = &req->rq_rcv_buf;
|
||||
struct kvec *iov = rcvbuf->head;
|
||||
int hdrlen, len, recvd;
|
||||
size_t hdrlen;
|
||||
u32 len, recvd;
|
||||
char *kaddr;
|
||||
int status;
|
||||
|
||||
|
@ -584,14 +588,14 @@ nfs_xdr_readlinkres(struct rpc_rqst *req, __be32 *p, void *dummy)
|
|||
return -nfs_stat_to_errno(status);
|
||||
/* Convert length of symlink */
|
||||
len = ntohl(*p++);
|
||||
if (len >= rcvbuf->page_len || len <= 0) {
|
||||
if (len >= rcvbuf->page_len) {
|
||||
dprintk("nfs: server returned giant symlink!\n");
|
||||
return -ENAMETOOLONG;
|
||||
}
|
||||
hdrlen = (u8 *) p - (u8 *) iov->iov_base;
|
||||
if (iov->iov_len < hdrlen) {
|
||||
dprintk("NFS: READLINK reply header overflowed:"
|
||||
"length %d > %Zu\n", hdrlen, iov->iov_len);
|
||||
"length %Zu > %Zu\n", hdrlen, iov->iov_len);
|
||||
return -errno_NFSERR_IO;
|
||||
} else if (iov->iov_len != hdrlen) {
|
||||
dprintk("NFS: READLINK header is short. iovec will be shifted.\n");
|
||||
|
|
|
@ -732,16 +732,9 @@ static int nfs3_read_done(struct rpc_task *task, struct nfs_read_data *data)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static void nfs3_proc_read_setup(struct nfs_read_data *data)
|
||||
static void nfs3_proc_read_setup(struct nfs_read_data *data, struct rpc_message *msg)
|
||||
{
|
||||
struct rpc_message msg = {
|
||||
.rpc_proc = &nfs3_procedures[NFS3PROC_READ],
|
||||
.rpc_argp = &data->args,
|
||||
.rpc_resp = &data->res,
|
||||
.rpc_cred = data->cred,
|
||||
};
|
||||
|
||||
rpc_call_setup(&data->task, &msg, 0);
|
||||
msg->rpc_proc = &nfs3_procedures[NFS3PROC_READ];
|
||||
}
|
||||
|
||||
static int nfs3_write_done(struct rpc_task *task, struct nfs_write_data *data)
|
||||
|
@ -753,24 +746,9 @@ static int nfs3_write_done(struct rpc_task *task, struct nfs_write_data *data)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static void nfs3_proc_write_setup(struct nfs_write_data *data, int how)
|
||||
static void nfs3_proc_write_setup(struct nfs_write_data *data, struct rpc_message *msg)
|
||||
{
|
||||
struct rpc_message msg = {
|
||||
.rpc_proc = &nfs3_procedures[NFS3PROC_WRITE],
|
||||
.rpc_argp = &data->args,
|
||||
.rpc_resp = &data->res,
|
||||
.rpc_cred = data->cred,
|
||||
};
|
||||
|
||||
data->args.stable = NFS_UNSTABLE;
|
||||
if (how & FLUSH_STABLE) {
|
||||
data->args.stable = NFS_FILE_SYNC;
|
||||
if (NFS_I(data->inode)->ncommit)
|
||||
data->args.stable = NFS_DATA_SYNC;
|
||||
}
|
||||
|
||||
/* Finalize the task. */
|
||||
rpc_call_setup(&data->task, &msg, 0);
|
||||
msg->rpc_proc = &nfs3_procedures[NFS3PROC_WRITE];
|
||||
}
|
||||
|
||||
static int nfs3_commit_done(struct rpc_task *task, struct nfs_write_data *data)
|
||||
|
@ -781,22 +759,17 @@ static int nfs3_commit_done(struct rpc_task *task, struct nfs_write_data *data)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static void nfs3_proc_commit_setup(struct nfs_write_data *data, int how)
|
||||
static void nfs3_proc_commit_setup(struct nfs_write_data *data, struct rpc_message *msg)
|
||||
{
|
||||
struct rpc_message msg = {
|
||||
.rpc_proc = &nfs3_procedures[NFS3PROC_COMMIT],
|
||||
.rpc_argp = &data->args,
|
||||
.rpc_resp = &data->res,
|
||||
.rpc_cred = data->cred,
|
||||
};
|
||||
|
||||
rpc_call_setup(&data->task, &msg, 0);
|
||||
msg->rpc_proc = &nfs3_procedures[NFS3PROC_COMMIT];
|
||||
}
|
||||
|
||||
static int
|
||||
nfs3_proc_lock(struct file *filp, int cmd, struct file_lock *fl)
|
||||
{
|
||||
return nlmclnt_proc(filp->f_path.dentry->d_inode, cmd, fl);
|
||||
struct inode *inode = filp->f_path.dentry->d_inode;
|
||||
|
||||
return nlmclnt_proc(NFS_SERVER(inode)->nlm_host, cmd, fl);
|
||||
}
|
||||
|
||||
const struct nfs_rpc_ops nfs_v3_clientops = {
|
||||
|
|
|
@ -506,9 +506,9 @@ nfs3_xdr_readdirres(struct rpc_rqst *req, __be32 *p, struct nfs3_readdirres *res
|
|||
struct xdr_buf *rcvbuf = &req->rq_rcv_buf;
|
||||
struct kvec *iov = rcvbuf->head;
|
||||
struct page **page;
|
||||
int hdrlen, recvd;
|
||||
size_t hdrlen;
|
||||
u32 len, recvd, pglen;
|
||||
int status, nr;
|
||||
unsigned int len, pglen;
|
||||
__be32 *entry, *end, *kaddr;
|
||||
|
||||
status = ntohl(*p++);
|
||||
|
@ -527,7 +527,7 @@ nfs3_xdr_readdirres(struct rpc_rqst *req, __be32 *p, struct nfs3_readdirres *res
|
|||
hdrlen = (u8 *) p - (u8 *) iov->iov_base;
|
||||
if (iov->iov_len < hdrlen) {
|
||||
dprintk("NFS: READDIR reply header overflowed:"
|
||||
"length %d > %Zu\n", hdrlen, iov->iov_len);
|
||||
"length %Zu > %Zu\n", hdrlen, iov->iov_len);
|
||||
return -errno_NFSERR_IO;
|
||||
} else if (iov->iov_len != hdrlen) {
|
||||
dprintk("NFS: READDIR header is short. iovec will be shifted.\n");
|
||||
|
@ -549,7 +549,7 @@ nfs3_xdr_readdirres(struct rpc_rqst *req, __be32 *p, struct nfs3_readdirres *res
|
|||
len = ntohl(*p++); /* string length */
|
||||
p += XDR_QUADLEN(len) + 2; /* name + cookie */
|
||||
if (len > NFS3_MAXNAMLEN) {
|
||||
dprintk("NFS: giant filename in readdir (len %x)!\n",
|
||||
dprintk("NFS: giant filename in readdir (len 0x%x)!\n",
|
||||
len);
|
||||
goto err_unmap;
|
||||
}
|
||||
|
@ -570,7 +570,7 @@ nfs3_xdr_readdirres(struct rpc_rqst *req, __be32 *p, struct nfs3_readdirres *res
|
|||
len = ntohl(*p++);
|
||||
if (len > NFS3_FHSIZE) {
|
||||
dprintk("NFS: giant filehandle in "
|
||||
"readdir (len %x)!\n", len);
|
||||
"readdir (len 0x%x)!\n", len);
|
||||
goto err_unmap;
|
||||
}
|
||||
p += XDR_QUADLEN(len);
|
||||
|
@ -815,7 +815,8 @@ nfs3_xdr_readlinkres(struct rpc_rqst *req, __be32 *p, struct nfs_fattr *fattr)
|
|||
{
|
||||
struct xdr_buf *rcvbuf = &req->rq_rcv_buf;
|
||||
struct kvec *iov = rcvbuf->head;
|
||||
int hdrlen, len, recvd;
|
||||
size_t hdrlen;
|
||||
u32 len, recvd;
|
||||
char *kaddr;
|
||||
int status;
|
||||
|
||||
|
@ -827,7 +828,7 @@ nfs3_xdr_readlinkres(struct rpc_rqst *req, __be32 *p, struct nfs_fattr *fattr)
|
|||
|
||||
/* Convert length of symlink */
|
||||
len = ntohl(*p++);
|
||||
if (len >= rcvbuf->page_len || len <= 0) {
|
||||
if (len >= rcvbuf->page_len) {
|
||||
dprintk("nfs: server returned giant symlink!\n");
|
||||
return -ENAMETOOLONG;
|
||||
}
|
||||
|
@ -835,7 +836,7 @@ nfs3_xdr_readlinkres(struct rpc_rqst *req, __be32 *p, struct nfs_fattr *fattr)
|
|||
hdrlen = (u8 *) p - (u8 *) iov->iov_base;
|
||||
if (iov->iov_len < hdrlen) {
|
||||
dprintk("NFS: READLINK reply header overflowed:"
|
||||
"length %d > %Zu\n", hdrlen, iov->iov_len);
|
||||
"length %Zu > %Zu\n", hdrlen, iov->iov_len);
|
||||
return -errno_NFSERR_IO;
|
||||
} else if (iov->iov_len != hdrlen) {
|
||||
dprintk("NFS: READLINK header is short. "
|
||||
|
@ -863,7 +864,9 @@ static int
|
|||
nfs3_xdr_readres(struct rpc_rqst *req, __be32 *p, struct nfs_readres *res)
|
||||
{
|
||||
struct kvec *iov = req->rq_rcv_buf.head;
|
||||
int status, count, ocount, recvd, hdrlen;
|
||||
size_t hdrlen;
|
||||
u32 count, ocount, recvd;
|
||||
int status;
|
||||
|
||||
status = ntohl(*p++);
|
||||
p = xdr_decode_post_op_attr(p, res->fattr);
|
||||
|
@ -871,7 +874,7 @@ nfs3_xdr_readres(struct rpc_rqst *req, __be32 *p, struct nfs_readres *res)
|
|||
if (status != 0)
|
||||
return -nfs_stat_to_errno(status);
|
||||
|
||||
/* Decode reply could and EOF flag. NFSv3 is somewhat redundant
|
||||
/* Decode reply count and EOF flag. NFSv3 is somewhat redundant
|
||||
* in that it puts the count both in the res struct and in the
|
||||
* opaque data count. */
|
||||
count = ntohl(*p++);
|
||||
|
@ -886,7 +889,7 @@ nfs3_xdr_readres(struct rpc_rqst *req, __be32 *p, struct nfs_readres *res)
|
|||
hdrlen = (u8 *) p - (u8 *) iov->iov_base;
|
||||
if (iov->iov_len < hdrlen) {
|
||||
dprintk("NFS: READ reply header overflowed:"
|
||||
"length %d > %Zu\n", hdrlen, iov->iov_len);
|
||||
"length %Zu > %Zu\n", hdrlen, iov->iov_len);
|
||||
return -errno_NFSERR_IO;
|
||||
} else if (iov->iov_len != hdrlen) {
|
||||
dprintk("NFS: READ header is short. iovec will be shifted.\n");
|
||||
|
@ -896,7 +899,7 @@ nfs3_xdr_readres(struct rpc_rqst *req, __be32 *p, struct nfs_readres *res)
|
|||
recvd = req->rq_rcv_buf.len - hdrlen;
|
||||
if (count > recvd) {
|
||||
dprintk("NFS: server cheating in read reply: "
|
||||
"count %d > recvd %d\n", count, recvd);
|
||||
"count %u > recvd %u\n", count, recvd);
|
||||
count = recvd;
|
||||
res->eof = 0;
|
||||
}
|
||||
|
|
|
@ -114,10 +114,7 @@ static inline int valid_ipaddr4(const char *buf)
|
|||
* nfs_follow_referral - set up mountpoint when hitting a referral on moved error
|
||||
* @mnt_parent - mountpoint of parent directory
|
||||
* @dentry - parent directory
|
||||
* @fspath - fs path returned in fs_locations
|
||||
* @mntpath - mount path to new server
|
||||
* @hostname - hostname of new server
|
||||
* @addr - host addr of new server
|
||||
* @locations - array of NFSv4 server location information
|
||||
*
|
||||
*/
|
||||
static struct vfsmount *nfs_follow_referral(const struct vfsmount *mnt_parent,
|
||||
|
@ -131,7 +128,8 @@ static struct vfsmount *nfs_follow_referral(const struct vfsmount *mnt_parent,
|
|||
.authflavor = NFS_SB(mnt_parent->mnt_sb)->client->cl_auth->au_flavor,
|
||||
};
|
||||
char *page = NULL, *page2 = NULL;
|
||||
int loc, s, error;
|
||||
unsigned int s;
|
||||
int loc, error;
|
||||
|
||||
if (locations == NULL || locations->nlocations <= 0)
|
||||
goto out;
|
||||
|
@ -174,7 +172,10 @@ static struct vfsmount *nfs_follow_referral(const struct vfsmount *mnt_parent,
|
|||
|
||||
s = 0;
|
||||
while (s < location->nservers) {
|
||||
struct sockaddr_in addr = {};
|
||||
struct sockaddr_in addr = {
|
||||
.sin_family = AF_INET,
|
||||
.sin_port = htons(NFS_PORT),
|
||||
};
|
||||
|
||||
if (location->servers[s].len <= 0 ||
|
||||
valid_ipaddr4(location->servers[s].data) < 0) {
|
||||
|
@ -183,10 +184,9 @@ static struct vfsmount *nfs_follow_referral(const struct vfsmount *mnt_parent,
|
|||
}
|
||||
|
||||
mountdata.hostname = location->servers[s].data;
|
||||
addr.sin_addr.s_addr = in_aton(mountdata.hostname);
|
||||
addr.sin_family = AF_INET;
|
||||
addr.sin_port = htons(NFS_PORT);
|
||||
mountdata.addr = &addr;
|
||||
addr.sin_addr.s_addr = in_aton(mountdata.hostname),
|
||||
mountdata.addr = (struct sockaddr *)&addr;
|
||||
mountdata.addrlen = sizeof(addr);
|
||||
|
||||
snprintf(page, PAGE_SIZE, "%s:%s",
|
||||
mountdata.hostname,
|
||||
|
|
|
@ -210,7 +210,7 @@ static void update_changeattr(struct inode *dir, struct nfs4_change_info *cinfo)
|
|||
spin_lock(&dir->i_lock);
|
||||
nfsi->cache_validity |= NFS_INO_INVALID_ATTR|NFS_INO_REVAL_PAGECACHE|NFS_INO_INVALID_DATA;
|
||||
if (!cinfo->atomic || cinfo->before != nfsi->change_attr)
|
||||
nfsi->cache_change_attribute = jiffies;
|
||||
nfs_force_lookup_revalidate(dir);
|
||||
nfsi->change_attr = cinfo->after;
|
||||
spin_unlock(&dir->i_lock);
|
||||
}
|
||||
|
@ -718,19 +718,6 @@ int nfs4_open_delegation_recall(struct nfs_open_context *ctx, struct nfs4_state
|
|||
return err;
|
||||
}
|
||||
|
||||
static void nfs4_open_confirm_prepare(struct rpc_task *task, void *calldata)
|
||||
{
|
||||
struct nfs4_opendata *data = calldata;
|
||||
struct rpc_message msg = {
|
||||
.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_OPEN_CONFIRM],
|
||||
.rpc_argp = &data->c_arg,
|
||||
.rpc_resp = &data->c_res,
|
||||
.rpc_cred = data->owner->so_cred,
|
||||
};
|
||||
data->timestamp = jiffies;
|
||||
rpc_call_setup(task, &msg, 0);
|
||||
}
|
||||
|
||||
static void nfs4_open_confirm_done(struct rpc_task *task, void *calldata)
|
||||
{
|
||||
struct nfs4_opendata *data = calldata;
|
||||
|
@ -767,7 +754,6 @@ static void nfs4_open_confirm_release(void *calldata)
|
|||
}
|
||||
|
||||
static const struct rpc_call_ops nfs4_open_confirm_ops = {
|
||||
.rpc_call_prepare = nfs4_open_confirm_prepare,
|
||||
.rpc_call_done = nfs4_open_confirm_done,
|
||||
.rpc_release = nfs4_open_confirm_release,
|
||||
};
|
||||
|
@ -779,12 +765,26 @@ static int _nfs4_proc_open_confirm(struct nfs4_opendata *data)
|
|||
{
|
||||
struct nfs_server *server = NFS_SERVER(data->dir->d_inode);
|
||||
struct rpc_task *task;
|
||||
struct rpc_message msg = {
|
||||
.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_OPEN_CONFIRM],
|
||||
.rpc_argp = &data->c_arg,
|
||||
.rpc_resp = &data->c_res,
|
||||
.rpc_cred = data->owner->so_cred,
|
||||
};
|
||||
struct rpc_task_setup task_setup_data = {
|
||||
.rpc_client = server->client,
|
||||
.rpc_message = &msg,
|
||||
.callback_ops = &nfs4_open_confirm_ops,
|
||||
.callback_data = data,
|
||||
.flags = RPC_TASK_ASYNC,
|
||||
};
|
||||
int status;
|
||||
|
||||
kref_get(&data->kref);
|
||||
data->rpc_done = 0;
|
||||
data->rpc_status = 0;
|
||||
task = rpc_run_task(server->client, RPC_TASK_ASYNC, &nfs4_open_confirm_ops, data);
|
||||
data->timestamp = jiffies;
|
||||
task = rpc_run_task(&task_setup_data);
|
||||
if (IS_ERR(task))
|
||||
return PTR_ERR(task);
|
||||
status = nfs4_wait_for_completion_rpc_task(task);
|
||||
|
@ -801,13 +801,7 @@ static void nfs4_open_prepare(struct rpc_task *task, void *calldata)
|
|||
{
|
||||
struct nfs4_opendata *data = calldata;
|
||||
struct nfs4_state_owner *sp = data->owner;
|
||||
struct rpc_message msg = {
|
||||
.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_OPEN],
|
||||
.rpc_argp = &data->o_arg,
|
||||
.rpc_resp = &data->o_res,
|
||||
.rpc_cred = sp->so_cred,
|
||||
};
|
||||
|
||||
|
||||
if (nfs_wait_on_sequence(data->o_arg.seqid, task) != 0)
|
||||
return;
|
||||
/*
|
||||
|
@ -832,11 +826,11 @@ static void nfs4_open_prepare(struct rpc_task *task, void *calldata)
|
|||
data->o_arg.id = sp->so_owner_id.id;
|
||||
data->o_arg.clientid = sp->so_client->cl_clientid;
|
||||
if (data->o_arg.claim == NFS4_OPEN_CLAIM_PREVIOUS) {
|
||||
msg.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_OPEN_NOATTR];
|
||||
task->tk_msg.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_OPEN_NOATTR];
|
||||
nfs_copy_fh(&data->o_res.fh, data->o_arg.fh);
|
||||
}
|
||||
data->timestamp = jiffies;
|
||||
rpc_call_setup(task, &msg, 0);
|
||||
rpc_call_start(task);
|
||||
return;
|
||||
out_no_action:
|
||||
task->tk_action = NULL;
|
||||
|
@ -908,13 +902,26 @@ static int _nfs4_proc_open(struct nfs4_opendata *data)
|
|||
struct nfs_openargs *o_arg = &data->o_arg;
|
||||
struct nfs_openres *o_res = &data->o_res;
|
||||
struct rpc_task *task;
|
||||
struct rpc_message msg = {
|
||||
.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_OPEN],
|
||||
.rpc_argp = o_arg,
|
||||
.rpc_resp = o_res,
|
||||
.rpc_cred = data->owner->so_cred,
|
||||
};
|
||||
struct rpc_task_setup task_setup_data = {
|
||||
.rpc_client = server->client,
|
||||
.rpc_message = &msg,
|
||||
.callback_ops = &nfs4_open_ops,
|
||||
.callback_data = data,
|
||||
.flags = RPC_TASK_ASYNC,
|
||||
};
|
||||
int status;
|
||||
|
||||
kref_get(&data->kref);
|
||||
data->rpc_done = 0;
|
||||
data->rpc_status = 0;
|
||||
data->cancelled = 0;
|
||||
task = rpc_run_task(server->client, RPC_TASK_ASYNC, &nfs4_open_ops, data);
|
||||
task = rpc_run_task(&task_setup_data);
|
||||
if (IS_ERR(task))
|
||||
return PTR_ERR(task);
|
||||
status = nfs4_wait_for_completion_rpc_task(task);
|
||||
|
@ -1244,12 +1251,6 @@ static void nfs4_close_prepare(struct rpc_task *task, void *data)
|
|||
{
|
||||
struct nfs4_closedata *calldata = data;
|
||||
struct nfs4_state *state = calldata->state;
|
||||
struct rpc_message msg = {
|
||||
.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_CLOSE],
|
||||
.rpc_argp = &calldata->arg,
|
||||
.rpc_resp = &calldata->res,
|
||||
.rpc_cred = state->owner->so_cred,
|
||||
};
|
||||
int clear_rd, clear_wr, clear_rdwr;
|
||||
|
||||
if (nfs_wait_on_sequence(calldata->arg.seqid, task) != 0)
|
||||
|
@ -1276,14 +1277,14 @@ static void nfs4_close_prepare(struct rpc_task *task, void *data)
|
|||
}
|
||||
nfs_fattr_init(calldata->res.fattr);
|
||||
if (test_bit(NFS_O_RDONLY_STATE, &state->flags) != 0) {
|
||||
msg.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_OPEN_DOWNGRADE];
|
||||
task->tk_msg.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_OPEN_DOWNGRADE];
|
||||
calldata->arg.open_flags = FMODE_READ;
|
||||
} else if (test_bit(NFS_O_WRONLY_STATE, &state->flags) != 0) {
|
||||
msg.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_OPEN_DOWNGRADE];
|
||||
task->tk_msg.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_OPEN_DOWNGRADE];
|
||||
calldata->arg.open_flags = FMODE_WRITE;
|
||||
}
|
||||
calldata->timestamp = jiffies;
|
||||
rpc_call_setup(task, &msg, 0);
|
||||
rpc_call_start(task);
|
||||
}
|
||||
|
||||
static const struct rpc_call_ops nfs4_close_ops = {
|
||||
|
@ -1309,6 +1310,16 @@ int nfs4_do_close(struct path *path, struct nfs4_state *state, int wait)
|
|||
struct nfs4_closedata *calldata;
|
||||
struct nfs4_state_owner *sp = state->owner;
|
||||
struct rpc_task *task;
|
||||
struct rpc_message msg = {
|
||||
.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_CLOSE],
|
||||
.rpc_cred = state->owner->so_cred,
|
||||
};
|
||||
struct rpc_task_setup task_setup_data = {
|
||||
.rpc_client = server->client,
|
||||
.rpc_message = &msg,
|
||||
.callback_ops = &nfs4_close_ops,
|
||||
.flags = RPC_TASK_ASYNC,
|
||||
};
|
||||
int status = -ENOMEM;
|
||||
|
||||
calldata = kmalloc(sizeof(*calldata), GFP_KERNEL);
|
||||
|
@ -1328,7 +1339,10 @@ int nfs4_do_close(struct path *path, struct nfs4_state *state, int wait)
|
|||
calldata->path.mnt = mntget(path->mnt);
|
||||
calldata->path.dentry = dget(path->dentry);
|
||||
|
||||
task = rpc_run_task(server->client, RPC_TASK_ASYNC, &nfs4_close_ops, calldata);
|
||||
msg.rpc_argp = &calldata->arg,
|
||||
msg.rpc_resp = &calldata->res,
|
||||
task_setup_data.callback_data = calldata;
|
||||
task = rpc_run_task(&task_setup_data);
|
||||
if (IS_ERR(task))
|
||||
return PTR_ERR(task);
|
||||
status = 0;
|
||||
|
@ -2414,18 +2428,10 @@ static int nfs4_read_done(struct rpc_task *task, struct nfs_read_data *data)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static void nfs4_proc_read_setup(struct nfs_read_data *data)
|
||||
static void nfs4_proc_read_setup(struct nfs_read_data *data, struct rpc_message *msg)
|
||||
{
|
||||
struct rpc_message msg = {
|
||||
.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_READ],
|
||||
.rpc_argp = &data->args,
|
||||
.rpc_resp = &data->res,
|
||||
.rpc_cred = data->cred,
|
||||
};
|
||||
|
||||
data->timestamp = jiffies;
|
||||
|
||||
rpc_call_setup(&data->task, &msg, 0);
|
||||
msg->rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_READ];
|
||||
}
|
||||
|
||||
static int nfs4_write_done(struct rpc_task *task, struct nfs_write_data *data)
|
||||
|
@ -2443,33 +2449,15 @@ static int nfs4_write_done(struct rpc_task *task, struct nfs_write_data *data)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static void nfs4_proc_write_setup(struct nfs_write_data *data, int how)
|
||||
static void nfs4_proc_write_setup(struct nfs_write_data *data, struct rpc_message *msg)
|
||||
{
|
||||
struct rpc_message msg = {
|
||||
.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_WRITE],
|
||||
.rpc_argp = &data->args,
|
||||
.rpc_resp = &data->res,
|
||||
.rpc_cred = data->cred,
|
||||
};
|
||||
struct inode *inode = data->inode;
|
||||
struct nfs_server *server = NFS_SERVER(inode);
|
||||
int stable;
|
||||
|
||||
if (how & FLUSH_STABLE) {
|
||||
if (!NFS_I(inode)->ncommit)
|
||||
stable = NFS_FILE_SYNC;
|
||||
else
|
||||
stable = NFS_DATA_SYNC;
|
||||
} else
|
||||
stable = NFS_UNSTABLE;
|
||||
data->args.stable = stable;
|
||||
struct nfs_server *server = NFS_SERVER(data->inode);
|
||||
|
||||
data->args.bitmask = server->attr_bitmask;
|
||||
data->res.server = server;
|
||||
|
||||
data->timestamp = jiffies;
|
||||
|
||||
/* Finalize the task. */
|
||||
rpc_call_setup(&data->task, &msg, 0);
|
||||
msg->rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_WRITE];
|
||||
}
|
||||
|
||||
static int nfs4_commit_done(struct rpc_task *task, struct nfs_write_data *data)
|
||||
|
@ -2484,20 +2472,13 @@ static int nfs4_commit_done(struct rpc_task *task, struct nfs_write_data *data)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static void nfs4_proc_commit_setup(struct nfs_write_data *data, int how)
|
||||
static void nfs4_proc_commit_setup(struct nfs_write_data *data, struct rpc_message *msg)
|
||||
{
|
||||
struct rpc_message msg = {
|
||||
.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_COMMIT],
|
||||
.rpc_argp = &data->args,
|
||||
.rpc_resp = &data->res,
|
||||
.rpc_cred = data->cred,
|
||||
};
|
||||
struct nfs_server *server = NFS_SERVER(data->inode);
|
||||
|
||||
data->args.bitmask = server->attr_bitmask;
|
||||
data->res.server = server;
|
||||
|
||||
rpc_call_setup(&data->task, &msg, 0);
|
||||
msg->rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_COMMIT];
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -2910,14 +2891,20 @@ int nfs4_proc_setclientid(struct nfs_client *clp, u32 program, unsigned short po
|
|||
|
||||
for(;;) {
|
||||
setclientid.sc_name_len = scnprintf(setclientid.sc_name,
|
||||
sizeof(setclientid.sc_name), "%s/%u.%u.%u.%u %s %u",
|
||||
clp->cl_ipaddr, NIPQUAD(clp->cl_addr.sin_addr),
|
||||
sizeof(setclientid.sc_name), "%s/%s %s %s %u",
|
||||
clp->cl_ipaddr,
|
||||
rpc_peeraddr2str(clp->cl_rpcclient,
|
||||
RPC_DISPLAY_ADDR),
|
||||
rpc_peeraddr2str(clp->cl_rpcclient,
|
||||
RPC_DISPLAY_PROTO),
|
||||
cred->cr_ops->cr_name,
|
||||
clp->cl_id_uniquifier);
|
||||
setclientid.sc_netid_len = scnprintf(setclientid.sc_netid,
|
||||
sizeof(setclientid.sc_netid), "tcp");
|
||||
sizeof(setclientid.sc_netid),
|
||||
rpc_peeraddr2str(clp->cl_rpcclient,
|
||||
RPC_DISPLAY_NETID));
|
||||
setclientid.sc_uaddr_len = scnprintf(setclientid.sc_uaddr,
|
||||
sizeof(setclientid.sc_uaddr), "%s.%d.%d",
|
||||
sizeof(setclientid.sc_uaddr), "%s.%u.%u",
|
||||
clp->cl_ipaddr, port >> 8, port & 255);
|
||||
|
||||
status = rpc_call_sync(clp->cl_rpcclient, &msg, 0);
|
||||
|
@ -2981,25 +2968,11 @@ struct nfs4_delegreturndata {
|
|||
struct nfs4_delegreturnres res;
|
||||
struct nfs_fh fh;
|
||||
nfs4_stateid stateid;
|
||||
struct rpc_cred *cred;
|
||||
unsigned long timestamp;
|
||||
struct nfs_fattr fattr;
|
||||
int rpc_status;
|
||||
};
|
||||
|
||||
static void nfs4_delegreturn_prepare(struct rpc_task *task, void *calldata)
|
||||
{
|
||||
struct nfs4_delegreturndata *data = calldata;
|
||||
struct rpc_message msg = {
|
||||
.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_DELEGRETURN],
|
||||
.rpc_argp = &data->args,
|
||||
.rpc_resp = &data->res,
|
||||
.rpc_cred = data->cred,
|
||||
};
|
||||
nfs_fattr_init(data->res.fattr);
|
||||
rpc_call_setup(task, &msg, 0);
|
||||
}
|
||||
|
||||
static void nfs4_delegreturn_done(struct rpc_task *task, void *calldata)
|
||||
{
|
||||
struct nfs4_delegreturndata *data = calldata;
|
||||
|
@ -3010,24 +2983,30 @@ static void nfs4_delegreturn_done(struct rpc_task *task, void *calldata)
|
|||
|
||||
static void nfs4_delegreturn_release(void *calldata)
|
||||
{
|
||||
struct nfs4_delegreturndata *data = calldata;
|
||||
|
||||
put_rpccred(data->cred);
|
||||
kfree(calldata);
|
||||
}
|
||||
|
||||
static const struct rpc_call_ops nfs4_delegreturn_ops = {
|
||||
.rpc_call_prepare = nfs4_delegreturn_prepare,
|
||||
.rpc_call_done = nfs4_delegreturn_done,
|
||||
.rpc_release = nfs4_delegreturn_release,
|
||||
};
|
||||
|
||||
static int _nfs4_proc_delegreturn(struct inode *inode, struct rpc_cred *cred, const nfs4_stateid *stateid)
|
||||
static int _nfs4_proc_delegreturn(struct inode *inode, struct rpc_cred *cred, const nfs4_stateid *stateid, int issync)
|
||||
{
|
||||
struct nfs4_delegreturndata *data;
|
||||
struct nfs_server *server = NFS_SERVER(inode);
|
||||
struct rpc_task *task;
|
||||
int status;
|
||||
struct rpc_message msg = {
|
||||
.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_DELEGRETURN],
|
||||
.rpc_cred = cred,
|
||||
};
|
||||
struct rpc_task_setup task_setup_data = {
|
||||
.rpc_client = server->client,
|
||||
.rpc_message = &msg,
|
||||
.callback_ops = &nfs4_delegreturn_ops,
|
||||
.flags = RPC_TASK_ASYNC,
|
||||
};
|
||||
int status = 0;
|
||||
|
||||
data = kmalloc(sizeof(*data), GFP_KERNEL);
|
||||
if (data == NULL)
|
||||
|
@ -3039,30 +3018,37 @@ static int _nfs4_proc_delegreturn(struct inode *inode, struct rpc_cred *cred, co
|
|||
memcpy(&data->stateid, stateid, sizeof(data->stateid));
|
||||
data->res.fattr = &data->fattr;
|
||||
data->res.server = server;
|
||||
data->cred = get_rpccred(cred);
|
||||
nfs_fattr_init(data->res.fattr);
|
||||
data->timestamp = jiffies;
|
||||
data->rpc_status = 0;
|
||||
|
||||
task = rpc_run_task(NFS_CLIENT(inode), RPC_TASK_ASYNC, &nfs4_delegreturn_ops, data);
|
||||
task_setup_data.callback_data = data;
|
||||
msg.rpc_argp = &data->args,
|
||||
msg.rpc_resp = &data->res,
|
||||
task = rpc_run_task(&task_setup_data);
|
||||
if (IS_ERR(task))
|
||||
return PTR_ERR(task);
|
||||
if (!issync)
|
||||
goto out;
|
||||
status = nfs4_wait_for_completion_rpc_task(task);
|
||||
if (status == 0) {
|
||||
status = data->rpc_status;
|
||||
if (status == 0)
|
||||
nfs_refresh_inode(inode, &data->fattr);
|
||||
}
|
||||
if (status != 0)
|
||||
goto out;
|
||||
status = data->rpc_status;
|
||||
if (status != 0)
|
||||
goto out;
|
||||
nfs_refresh_inode(inode, &data->fattr);
|
||||
out:
|
||||
rpc_put_task(task);
|
||||
return status;
|
||||
}
|
||||
|
||||
int nfs4_proc_delegreturn(struct inode *inode, struct rpc_cred *cred, const nfs4_stateid *stateid)
|
||||
int nfs4_proc_delegreturn(struct inode *inode, struct rpc_cred *cred, const nfs4_stateid *stateid, int issync)
|
||||
{
|
||||
struct nfs_server *server = NFS_SERVER(inode);
|
||||
struct nfs4_exception exception = { };
|
||||
int err;
|
||||
do {
|
||||
err = _nfs4_proc_delegreturn(inode, cred, stateid);
|
||||
err = _nfs4_proc_delegreturn(inode, cred, stateid, issync);
|
||||
switch (err) {
|
||||
case -NFS4ERR_STALE_STATEID:
|
||||
case -NFS4ERR_EXPIRED:
|
||||
|
@ -3230,12 +3216,6 @@ static void nfs4_locku_done(struct rpc_task *task, void *data)
|
|||
static void nfs4_locku_prepare(struct rpc_task *task, void *data)
|
||||
{
|
||||
struct nfs4_unlockdata *calldata = data;
|
||||
struct rpc_message msg = {
|
||||
.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LOCKU],
|
||||
.rpc_argp = &calldata->arg,
|
||||
.rpc_resp = &calldata->res,
|
||||
.rpc_cred = calldata->lsp->ls_state->owner->so_cred,
|
||||
};
|
||||
|
||||
if (nfs_wait_on_sequence(calldata->arg.seqid, task) != 0)
|
||||
return;
|
||||
|
@ -3245,7 +3225,7 @@ static void nfs4_locku_prepare(struct rpc_task *task, void *data)
|
|||
return;
|
||||
}
|
||||
calldata->timestamp = jiffies;
|
||||
rpc_call_setup(task, &msg, 0);
|
||||
rpc_call_start(task);
|
||||
}
|
||||
|
||||
static const struct rpc_call_ops nfs4_locku_ops = {
|
||||
|
@ -3260,6 +3240,16 @@ static struct rpc_task *nfs4_do_unlck(struct file_lock *fl,
|
|||
struct nfs_seqid *seqid)
|
||||
{
|
||||
struct nfs4_unlockdata *data;
|
||||
struct rpc_message msg = {
|
||||
.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LOCKU],
|
||||
.rpc_cred = ctx->cred,
|
||||
};
|
||||
struct rpc_task_setup task_setup_data = {
|
||||
.rpc_client = NFS_CLIENT(lsp->ls_state->inode),
|
||||
.rpc_message = &msg,
|
||||
.callback_ops = &nfs4_locku_ops,
|
||||
.flags = RPC_TASK_ASYNC,
|
||||
};
|
||||
|
||||
/* Ensure this is an unlock - when canceling a lock, the
|
||||
* canceled lock is passed in, and it won't be an unlock.
|
||||
|
@ -3272,7 +3262,10 @@ static struct rpc_task *nfs4_do_unlck(struct file_lock *fl,
|
|||
return ERR_PTR(-ENOMEM);
|
||||
}
|
||||
|
||||
return rpc_run_task(NFS_CLIENT(lsp->ls_state->inode), RPC_TASK_ASYNC, &nfs4_locku_ops, data);
|
||||
msg.rpc_argp = &data->arg,
|
||||
msg.rpc_resp = &data->res,
|
||||
task_setup_data.callback_data = data;
|
||||
return rpc_run_task(&task_setup_data);
|
||||
}
|
||||
|
||||
static int nfs4_proc_unlck(struct nfs4_state *state, int cmd, struct file_lock *request)
|
||||
|
@ -3331,15 +3324,12 @@ static struct nfs4_lockdata *nfs4_alloc_lockdata(struct file_lock *fl,
|
|||
|
||||
p->arg.fh = NFS_FH(inode);
|
||||
p->arg.fl = &p->fl;
|
||||
if (!(lsp->ls_seqid.flags & NFS_SEQID_CONFIRMED)) {
|
||||
p->arg.open_seqid = nfs_alloc_seqid(&lsp->ls_state->owner->so_seqid);
|
||||
if (p->arg.open_seqid == NULL)
|
||||
goto out_free;
|
||||
|
||||
}
|
||||
p->arg.open_seqid = nfs_alloc_seqid(&lsp->ls_state->owner->so_seqid);
|
||||
if (p->arg.open_seqid == NULL)
|
||||
goto out_free;
|
||||
p->arg.lock_seqid = nfs_alloc_seqid(&lsp->ls_seqid);
|
||||
if (p->arg.lock_seqid == NULL)
|
||||
goto out_free;
|
||||
goto out_free_seqid;
|
||||
p->arg.lock_stateid = &lsp->ls_stateid;
|
||||
p->arg.lock_owner.clientid = server->nfs_client->cl_clientid;
|
||||
p->arg.lock_owner.id = lsp->ls_id.id;
|
||||
|
@ -3348,9 +3338,9 @@ static struct nfs4_lockdata *nfs4_alloc_lockdata(struct file_lock *fl,
|
|||
p->ctx = get_nfs_open_context(ctx);
|
||||
memcpy(&p->fl, fl, sizeof(p->fl));
|
||||
return p;
|
||||
out_free_seqid:
|
||||
nfs_free_seqid(p->arg.open_seqid);
|
||||
out_free:
|
||||
if (p->arg.open_seqid != NULL)
|
||||
nfs_free_seqid(p->arg.open_seqid);
|
||||
kfree(p);
|
||||
return NULL;
|
||||
}
|
||||
|
@ -3359,31 +3349,20 @@ static void nfs4_lock_prepare(struct rpc_task *task, void *calldata)
|
|||
{
|
||||
struct nfs4_lockdata *data = calldata;
|
||||
struct nfs4_state *state = data->lsp->ls_state;
|
||||
struct nfs4_state_owner *sp = state->owner;
|
||||
struct rpc_message msg = {
|
||||
.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LOCK],
|
||||
.rpc_argp = &data->arg,
|
||||
.rpc_resp = &data->res,
|
||||
.rpc_cred = sp->so_cred,
|
||||
};
|
||||
|
||||
dprintk("%s: begin!\n", __FUNCTION__);
|
||||
if (nfs_wait_on_sequence(data->arg.lock_seqid, task) != 0)
|
||||
return;
|
||||
/* Do we need to do an open_to_lock_owner? */
|
||||
if (!(data->arg.lock_seqid->sequence->flags & NFS_SEQID_CONFIRMED)) {
|
||||
if (nfs_wait_on_sequence(data->arg.open_seqid, task) != 0)
|
||||
return;
|
||||
data->arg.open_stateid = &state->stateid;
|
||||
data->arg.new_lock_owner = 1;
|
||||
/* Retest in case we raced... */
|
||||
if (!(data->arg.lock_seqid->sequence->flags & NFS_SEQID_CONFIRMED))
|
||||
goto do_rpc;
|
||||
}
|
||||
if (nfs_wait_on_sequence(data->arg.lock_seqid, task) != 0)
|
||||
return;
|
||||
data->arg.new_lock_owner = 0;
|
||||
do_rpc:
|
||||
} else
|
||||
data->arg.new_lock_owner = 0;
|
||||
data->timestamp = jiffies;
|
||||
rpc_call_setup(task, &msg, 0);
|
||||
rpc_call_start(task);
|
||||
dprintk("%s: done!, ret = %d\n", __FUNCTION__, data->rpc_status);
|
||||
}
|
||||
|
||||
|
@ -3419,6 +3398,7 @@ static void nfs4_lock_release(void *calldata)
|
|||
struct nfs4_lockdata *data = calldata;
|
||||
|
||||
dprintk("%s: begin!\n", __FUNCTION__);
|
||||
nfs_free_seqid(data->arg.open_seqid);
|
||||
if (data->cancelled != 0) {
|
||||
struct rpc_task *task;
|
||||
task = nfs4_do_unlck(&data->fl, data->ctx, data->lsp,
|
||||
|
@ -3428,8 +3408,6 @@ static void nfs4_lock_release(void *calldata)
|
|||
dprintk("%s: cancelling lock!\n", __FUNCTION__);
|
||||
} else
|
||||
nfs_free_seqid(data->arg.lock_seqid);
|
||||
if (data->arg.open_seqid != NULL)
|
||||
nfs_free_seqid(data->arg.open_seqid);
|
||||
nfs4_put_lock_state(data->lsp);
|
||||
put_nfs_open_context(data->ctx);
|
||||
kfree(data);
|
||||
|
@ -3446,6 +3424,16 @@ static int _nfs4_do_setlk(struct nfs4_state *state, int cmd, struct file_lock *f
|
|||
{
|
||||
struct nfs4_lockdata *data;
|
||||
struct rpc_task *task;
|
||||
struct rpc_message msg = {
|
||||
.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LOCK],
|
||||
.rpc_cred = state->owner->so_cred,
|
||||
};
|
||||
struct rpc_task_setup task_setup_data = {
|
||||
.rpc_client = NFS_CLIENT(state->inode),
|
||||
.rpc_message = &msg,
|
||||
.callback_ops = &nfs4_lock_ops,
|
||||
.flags = RPC_TASK_ASYNC,
|
||||
};
|
||||
int ret;
|
||||
|
||||
dprintk("%s: begin!\n", __FUNCTION__);
|
||||
|
@ -3457,8 +3445,10 @@ static int _nfs4_do_setlk(struct nfs4_state *state, int cmd, struct file_lock *f
|
|||
data->arg.block = 1;
|
||||
if (reclaim != 0)
|
||||
data->arg.reclaim = 1;
|
||||
task = rpc_run_task(NFS_CLIENT(state->inode), RPC_TASK_ASYNC,
|
||||
&nfs4_lock_ops, data);
|
||||
msg.rpc_argp = &data->arg,
|
||||
msg.rpc_resp = &data->res,
|
||||
task_setup_data.callback_data = data;
|
||||
task = rpc_run_task(&task_setup_data);
|
||||
if (IS_ERR(task))
|
||||
return PTR_ERR(task);
|
||||
ret = nfs4_wait_for_completion_rpc_task(task);
|
||||
|
@ -3631,10 +3621,6 @@ int nfs4_setxattr(struct dentry *dentry, const char *key, const void *buf,
|
|||
if (strcmp(key, XATTR_NAME_NFSV4_ACL) != 0)
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
if (!S_ISREG(inode->i_mode) &&
|
||||
(!S_ISDIR(inode->i_mode) || inode->i_mode & S_ISVTX))
|
||||
return -EPERM;
|
||||
|
||||
return nfs4_proc_set_acl(inode, buf, buflen);
|
||||
}
|
||||
|
||||
|
|
|
@ -644,27 +644,26 @@ void nfs4_copy_stateid(nfs4_stateid *dst, struct nfs4_state *state, fl_owner_t f
|
|||
|
||||
struct nfs_seqid *nfs_alloc_seqid(struct nfs_seqid_counter *counter)
|
||||
{
|
||||
struct rpc_sequence *sequence = counter->sequence;
|
||||
struct nfs_seqid *new;
|
||||
|
||||
new = kmalloc(sizeof(*new), GFP_KERNEL);
|
||||
if (new != NULL) {
|
||||
new->sequence = counter;
|
||||
spin_lock(&sequence->lock);
|
||||
list_add_tail(&new->list, &sequence->list);
|
||||
spin_unlock(&sequence->lock);
|
||||
INIT_LIST_HEAD(&new->list);
|
||||
}
|
||||
return new;
|
||||
}
|
||||
|
||||
void nfs_free_seqid(struct nfs_seqid *seqid)
|
||||
{
|
||||
struct rpc_sequence *sequence = seqid->sequence->sequence;
|
||||
if (!list_empty(&seqid->list)) {
|
||||
struct rpc_sequence *sequence = seqid->sequence->sequence;
|
||||
|
||||
spin_lock(&sequence->lock);
|
||||
list_del(&seqid->list);
|
||||
spin_unlock(&sequence->lock);
|
||||
rpc_wake_up(&sequence->wait);
|
||||
spin_lock(&sequence->lock);
|
||||
list_del(&seqid->list);
|
||||
spin_unlock(&sequence->lock);
|
||||
rpc_wake_up(&sequence->wait);
|
||||
}
|
||||
kfree(seqid);
|
||||
}
|
||||
|
||||
|
@ -675,6 +674,7 @@ void nfs_free_seqid(struct nfs_seqid *seqid)
|
|||
*/
|
||||
static void nfs_increment_seqid(int status, struct nfs_seqid *seqid)
|
||||
{
|
||||
BUG_ON(list_first_entry(&seqid->sequence->sequence->list, struct nfs_seqid, list) != seqid);
|
||||
switch (status) {
|
||||
case 0:
|
||||
break;
|
||||
|
@ -726,15 +726,15 @@ int nfs_wait_on_sequence(struct nfs_seqid *seqid, struct rpc_task *task)
|
|||
struct rpc_sequence *sequence = seqid->sequence->sequence;
|
||||
int status = 0;
|
||||
|
||||
if (sequence->list.next == &seqid->list)
|
||||
goto out;
|
||||
spin_lock(&sequence->lock);
|
||||
if (sequence->list.next != &seqid->list) {
|
||||
rpc_sleep_on(&sequence->wait, task, NULL, NULL);
|
||||
status = -EAGAIN;
|
||||
}
|
||||
if (list_empty(&seqid->list))
|
||||
list_add_tail(&seqid->list, &sequence->list);
|
||||
if (list_first_entry(&sequence->list, struct nfs_seqid, list) == seqid)
|
||||
goto unlock;
|
||||
rpc_sleep_on(&sequence->wait, task, NULL, NULL);
|
||||
status = -EAGAIN;
|
||||
unlock:
|
||||
spin_unlock(&sequence->lock);
|
||||
out:
|
||||
return status;
|
||||
}
|
||||
|
||||
|
@ -758,8 +758,9 @@ static void nfs4_recover_state(struct nfs_client *clp)
|
|||
|
||||
__module_get(THIS_MODULE);
|
||||
atomic_inc(&clp->cl_count);
|
||||
task = kthread_run(reclaimer, clp, "%u.%u.%u.%u-reclaim",
|
||||
NIPQUAD(clp->cl_addr.sin_addr));
|
||||
task = kthread_run(reclaimer, clp, "%s-reclaim",
|
||||
rpc_peeraddr2str(clp->cl_rpcclient,
|
||||
RPC_DISPLAY_ADDR));
|
||||
if (!IS_ERR(task))
|
||||
return;
|
||||
nfs4_clear_recover_bit(clp);
|
||||
|
@ -970,8 +971,8 @@ static int reclaimer(void *ptr)
|
|||
module_put_and_exit(0);
|
||||
return 0;
|
||||
out_error:
|
||||
printk(KERN_WARNING "Error: state recovery failed on NFSv4 server %u.%u.%u.%u with error %d\n",
|
||||
NIPQUAD(clp->cl_addr.sin_addr), -status);
|
||||
printk(KERN_WARNING "Error: state recovery failed on NFSv4 server %s"
|
||||
" with error %d\n", clp->cl_hostname, -status);
|
||||
set_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state);
|
||||
goto out;
|
||||
}
|
||||
|
|
|
@ -116,10 +116,12 @@ static int nfs4_stat_to_errno(int);
|
|||
#define decode_renew_maxsz (op_decode_hdr_maxsz)
|
||||
#define encode_setclientid_maxsz \
|
||||
(op_encode_hdr_maxsz + \
|
||||
4 /*server->ip_addr*/ + \
|
||||
1 /*Netid*/ + \
|
||||
6 /*uaddr*/ + \
|
||||
6 + (NFS4_VERIFIER_SIZE >> 2))
|
||||
XDR_QUADLEN(NFS4_VERIFIER_SIZE) + \
|
||||
XDR_QUADLEN(NFS4_SETCLIENTID_NAMELEN) + \
|
||||
1 /* sc_prog */ + \
|
||||
XDR_QUADLEN(RPCBIND_MAXNETIDLEN) + \
|
||||
XDR_QUADLEN(RPCBIND_MAXUADDRLEN) + \
|
||||
1) /* sc_cb_ident */
|
||||
#define decode_setclientid_maxsz \
|
||||
(op_decode_hdr_maxsz + \
|
||||
2 + \
|
||||
|
@ -2515,14 +2517,12 @@ static int decode_attr_files_total(struct xdr_stream *xdr, uint32_t *bitmap, uin
|
|||
|
||||
static int decode_pathname(struct xdr_stream *xdr, struct nfs4_pathname *path)
|
||||
{
|
||||
int n;
|
||||
u32 n;
|
||||
__be32 *p;
|
||||
int status = 0;
|
||||
|
||||
READ_BUF(4);
|
||||
READ32(n);
|
||||
if (n < 0)
|
||||
goto out_eio;
|
||||
if (n == 0)
|
||||
goto root_path;
|
||||
dprintk("path ");
|
||||
|
@ -2579,13 +2579,11 @@ static int decode_attr_fs_locations(struct xdr_stream *xdr, uint32_t *bitmap, st
|
|||
goto out_eio;
|
||||
res->nlocations = 0;
|
||||
while (res->nlocations < n) {
|
||||
int m;
|
||||
u32 m;
|
||||
struct nfs4_fs_location *loc = &res->locations[res->nlocations];
|
||||
|
||||
READ_BUF(4);
|
||||
READ32(m);
|
||||
if (m <= 0)
|
||||
goto out_eio;
|
||||
|
||||
loc->nservers = 0;
|
||||
dprintk("%s: servers ", __FUNCTION__);
|
||||
|
@ -2598,8 +2596,12 @@ static int decode_attr_fs_locations(struct xdr_stream *xdr, uint32_t *bitmap, st
|
|||
if (loc->nservers < NFS4_FS_LOCATION_MAXSERVERS)
|
||||
loc->nservers++;
|
||||
else {
|
||||
int i;
|
||||
dprintk("%s: using first %d of %d servers returned for location %d\n", __FUNCTION__, NFS4_FS_LOCATION_MAXSERVERS, m, res->nlocations);
|
||||
unsigned int i;
|
||||
dprintk("%s: using first %u of %u servers "
|
||||
"returned for location %u\n",
|
||||
__FUNCTION__,
|
||||
NFS4_FS_LOCATION_MAXSERVERS,
|
||||
m, res->nlocations);
|
||||
for (i = loc->nservers; i < m; i++) {
|
||||
unsigned int len;
|
||||
char *data;
|
||||
|
@ -3476,10 +3478,11 @@ static int decode_readdir(struct xdr_stream *xdr, struct rpc_rqst *req, struct n
|
|||
struct xdr_buf *rcvbuf = &req->rq_rcv_buf;
|
||||
struct page *page = *rcvbuf->pages;
|
||||
struct kvec *iov = rcvbuf->head;
|
||||
unsigned int nr, pglen = rcvbuf->page_len;
|
||||
size_t hdrlen;
|
||||
u32 recvd, pglen = rcvbuf->page_len;
|
||||
__be32 *end, *entry, *p, *kaddr;
|
||||
uint32_t len, attrlen, xlen;
|
||||
int hdrlen, recvd, status;
|
||||
unsigned int nr;
|
||||
int status;
|
||||
|
||||
status = decode_op_hdr(xdr, OP_READDIR);
|
||||
if (status)
|
||||
|
@ -3503,6 +3506,7 @@ static int decode_readdir(struct xdr_stream *xdr, struct rpc_rqst *req, struct n
|
|||
end = p + ((pglen + readdir->pgbase) >> 2);
|
||||
entry = p;
|
||||
for (nr = 0; *p++; nr++) {
|
||||
u32 len, attrlen, xlen;
|
||||
if (end - p < 3)
|
||||
goto short_pkt;
|
||||
dprintk("cookie = %Lu, ", *((unsigned long long *)p));
|
||||
|
@ -3551,7 +3555,8 @@ static int decode_readlink(struct xdr_stream *xdr, struct rpc_rqst *req)
|
|||
{
|
||||
struct xdr_buf *rcvbuf = &req->rq_rcv_buf;
|
||||
struct kvec *iov = rcvbuf->head;
|
||||
int hdrlen, len, recvd;
|
||||
size_t hdrlen;
|
||||
u32 len, recvd;
|
||||
__be32 *p;
|
||||
char *kaddr;
|
||||
int status;
|
||||
|
@ -3646,7 +3651,8 @@ static int decode_getacl(struct xdr_stream *xdr, struct rpc_rqst *req,
|
|||
if (unlikely(bitmap[0] & (FATTR4_WORD0_ACL - 1U)))
|
||||
return -EIO;
|
||||
if (likely(bitmap[0] & FATTR4_WORD0_ACL)) {
|
||||
int hdrlen, recvd;
|
||||
size_t hdrlen;
|
||||
u32 recvd;
|
||||
|
||||
/* We ignore &savep and don't do consistency checks on
|
||||
* the attr length. Let userspace figure it out.... */
|
||||
|
|
|
@ -111,13 +111,14 @@ void nfs_unlock_request(struct nfs_page *req)
|
|||
* nfs_set_page_tag_locked - Tag a request as locked
|
||||
* @req:
|
||||
*/
|
||||
static int nfs_set_page_tag_locked(struct nfs_page *req)
|
||||
int nfs_set_page_tag_locked(struct nfs_page *req)
|
||||
{
|
||||
struct nfs_inode *nfsi = NFS_I(req->wb_context->path.dentry->d_inode);
|
||||
|
||||
if (!nfs_lock_request(req))
|
||||
if (!nfs_lock_request_dontget(req))
|
||||
return 0;
|
||||
radix_tree_tag_set(&nfsi->nfs_page_tree, req->wb_index, NFS_PAGE_TAG_LOCKED);
|
||||
if (req->wb_page != NULL)
|
||||
radix_tree_tag_set(&nfsi->nfs_page_tree, req->wb_index, NFS_PAGE_TAG_LOCKED);
|
||||
return 1;
|
||||
}
|
||||
|
||||
|
@ -132,9 +133,10 @@ void nfs_clear_page_tag_locked(struct nfs_page *req)
|
|||
if (req->wb_page != NULL) {
|
||||
spin_lock(&inode->i_lock);
|
||||
radix_tree_tag_clear(&nfsi->nfs_page_tree, req->wb_index, NFS_PAGE_TAG_LOCKED);
|
||||
nfs_unlock_request(req);
|
||||
spin_unlock(&inode->i_lock);
|
||||
}
|
||||
nfs_unlock_request(req);
|
||||
} else
|
||||
nfs_unlock_request(req);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -421,6 +423,7 @@ int nfs_scan_list(struct nfs_inode *nfsi,
|
|||
goto out;
|
||||
idx_start = req->wb_index + 1;
|
||||
if (nfs_set_page_tag_locked(req)) {
|
||||
kref_get(&req->wb_kref);
|
||||
nfs_list_remove_request(req);
|
||||
radix_tree_tag_clear(&nfsi->nfs_page_tree,
|
||||
req->wb_index, tag);
|
||||
|
|
|
@ -565,16 +565,9 @@ static int nfs_read_done(struct rpc_task *task, struct nfs_read_data *data)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static void nfs_proc_read_setup(struct nfs_read_data *data)
|
||||
static void nfs_proc_read_setup(struct nfs_read_data *data, struct rpc_message *msg)
|
||||
{
|
||||
struct rpc_message msg = {
|
||||
.rpc_proc = &nfs_procedures[NFSPROC_READ],
|
||||
.rpc_argp = &data->args,
|
||||
.rpc_resp = &data->res,
|
||||
.rpc_cred = data->cred,
|
||||
};
|
||||
|
||||
rpc_call_setup(&data->task, &msg, 0);
|
||||
msg->rpc_proc = &nfs_procedures[NFSPROC_READ];
|
||||
}
|
||||
|
||||
static int nfs_write_done(struct rpc_task *task, struct nfs_write_data *data)
|
||||
|
@ -584,24 +577,15 @@ static int nfs_write_done(struct rpc_task *task, struct nfs_write_data *data)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static void nfs_proc_write_setup(struct nfs_write_data *data, int how)
|
||||
static void nfs_proc_write_setup(struct nfs_write_data *data, struct rpc_message *msg)
|
||||
{
|
||||
struct rpc_message msg = {
|
||||
.rpc_proc = &nfs_procedures[NFSPROC_WRITE],
|
||||
.rpc_argp = &data->args,
|
||||
.rpc_resp = &data->res,
|
||||
.rpc_cred = data->cred,
|
||||
};
|
||||
|
||||
/* Note: NFSv2 ignores @stable and always uses NFS_FILE_SYNC */
|
||||
data->args.stable = NFS_FILE_SYNC;
|
||||
|
||||
/* Finalize the task. */
|
||||
rpc_call_setup(&data->task, &msg, 0);
|
||||
msg->rpc_proc = &nfs_procedures[NFSPROC_WRITE];
|
||||
}
|
||||
|
||||
static void
|
||||
nfs_proc_commit_setup(struct nfs_write_data *data, int how)
|
||||
nfs_proc_commit_setup(struct nfs_write_data *data, struct rpc_message *msg)
|
||||
{
|
||||
BUG();
|
||||
}
|
||||
|
@ -609,7 +593,9 @@ nfs_proc_commit_setup(struct nfs_write_data *data, int how)
|
|||
static int
|
||||
nfs_proc_lock(struct file *filp, int cmd, struct file_lock *fl)
|
||||
{
|
||||
return nlmclnt_proc(filp->f_path.dentry->d_inode, cmd, fl);
|
||||
struct inode *inode = filp->f_path.dentry->d_inode;
|
||||
|
||||
return nlmclnt_proc(NFS_SERVER(inode)->nlm_host, cmd, fl);
|
||||
}
|
||||
|
||||
|
||||
|
|
|
@ -160,12 +160,26 @@ static void nfs_read_rpcsetup(struct nfs_page *req, struct nfs_read_data *data,
|
|||
const struct rpc_call_ops *call_ops,
|
||||
unsigned int count, unsigned int offset)
|
||||
{
|
||||
struct inode *inode;
|
||||
int flags;
|
||||
struct inode *inode = req->wb_context->path.dentry->d_inode;
|
||||
int swap_flags = IS_SWAPFILE(inode) ? NFS_RPC_SWAPFLAGS : 0;
|
||||
struct rpc_task *task;
|
||||
struct rpc_message msg = {
|
||||
.rpc_argp = &data->args,
|
||||
.rpc_resp = &data->res,
|
||||
.rpc_cred = req->wb_context->cred,
|
||||
};
|
||||
struct rpc_task_setup task_setup_data = {
|
||||
.task = &data->task,
|
||||
.rpc_client = NFS_CLIENT(inode),
|
||||
.rpc_message = &msg,
|
||||
.callback_ops = call_ops,
|
||||
.callback_data = data,
|
||||
.flags = RPC_TASK_ASYNC | swap_flags,
|
||||
};
|
||||
|
||||
data->req = req;
|
||||
data->inode = inode = req->wb_context->path.dentry->d_inode;
|
||||
data->cred = req->wb_context->cred;
|
||||
data->inode = inode;
|
||||
data->cred = msg.rpc_cred;
|
||||
|
||||
data->args.fh = NFS_FH(inode);
|
||||
data->args.offset = req_offset(req) + offset;
|
||||
|
@ -180,11 +194,7 @@ static void nfs_read_rpcsetup(struct nfs_page *req, struct nfs_read_data *data,
|
|||
nfs_fattr_init(&data->fattr);
|
||||
|
||||
/* Set up the initial task struct. */
|
||||
flags = RPC_TASK_ASYNC | (IS_SWAPFILE(inode)? NFS_RPC_SWAPFLAGS : 0);
|
||||
rpc_init_task(&data->task, NFS_CLIENT(inode), flags, call_ops, data);
|
||||
NFS_PROTO(inode)->read_setup(data);
|
||||
|
||||
data->task.tk_cookie = (unsigned long)inode;
|
||||
NFS_PROTO(inode)->read_setup(data, &msg);
|
||||
|
||||
dprintk("NFS: %5u initiated read call (req %s/%Ld, %u bytes @ offset %Lu)\n",
|
||||
data->task.tk_pid,
|
||||
|
@ -192,6 +202,10 @@ static void nfs_read_rpcsetup(struct nfs_page *req, struct nfs_read_data *data,
|
|||
(long long)NFS_FILEID(inode),
|
||||
count,
|
||||
(unsigned long long)data->args.offset);
|
||||
|
||||
task = rpc_run_task(&task_setup_data);
|
||||
if (!IS_ERR(task))
|
||||
rpc_put_task(task);
|
||||
}
|
||||
|
||||
static void
|
||||
|
@ -207,19 +221,6 @@ nfs_async_read_error(struct list_head *head)
|
|||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Start an async read operation
|
||||
*/
|
||||
static void nfs_execute_read(struct nfs_read_data *data)
|
||||
{
|
||||
struct rpc_clnt *clnt = NFS_CLIENT(data->inode);
|
||||
sigset_t oldset;
|
||||
|
||||
rpc_clnt_sigmask(clnt, &oldset);
|
||||
rpc_execute(&data->task);
|
||||
rpc_clnt_sigunmask(clnt, &oldset);
|
||||
}
|
||||
|
||||
/*
|
||||
* Generate multiple requests to fill a single page.
|
||||
*
|
||||
|
@ -274,7 +275,6 @@ static int nfs_pagein_multi(struct inode *inode, struct list_head *head, unsigne
|
|||
rsize, offset);
|
||||
offset += rsize;
|
||||
nbytes -= rsize;
|
||||
nfs_execute_read(data);
|
||||
} while (nbytes != 0);
|
||||
|
||||
return 0;
|
||||
|
@ -312,8 +312,6 @@ static int nfs_pagein_one(struct inode *inode, struct list_head *head, unsigned
|
|||
req = nfs_list_entry(data->pages.next);
|
||||
|
||||
nfs_read_rpcsetup(req, data, &nfs_read_full_ops, count, 0);
|
||||
|
||||
nfs_execute_read(data);
|
||||
return 0;
|
||||
out_bad:
|
||||
nfs_async_read_error(head);
|
||||
|
@ -338,7 +336,7 @@ int nfs_readpage_result(struct rpc_task *task, struct nfs_read_data *data)
|
|||
nfs_add_stats(data->inode, NFSIOS_SERVERREADBYTES, data->res.count);
|
||||
|
||||
if (task->tk_status == -ESTALE) {
|
||||
set_bit(NFS_INO_STALE, &NFS_FLAGS(data->inode));
|
||||
set_bit(NFS_INO_STALE, &NFS_I(data->inode)->flags);
|
||||
nfs_mark_for_revalidate(data->inode);
|
||||
}
|
||||
return 0;
|
||||
|
|
297
fs/nfs/super.c
297
fs/nfs/super.c
|
@ -45,6 +45,8 @@
|
|||
#include <linux/nfs_idmap.h>
|
||||
#include <linux/vfs.h>
|
||||
#include <linux/inet.h>
|
||||
#include <linux/in6.h>
|
||||
#include <net/ipv6.h>
|
||||
#include <linux/nfs_xdr.h>
|
||||
#include <linux/magic.h>
|
||||
#include <linux/parser.h>
|
||||
|
@ -83,11 +85,11 @@ enum {
|
|||
Opt_actimeo,
|
||||
Opt_namelen,
|
||||
Opt_mountport,
|
||||
Opt_mountprog, Opt_mountvers,
|
||||
Opt_nfsprog, Opt_nfsvers,
|
||||
Opt_mountvers,
|
||||
Opt_nfsvers,
|
||||
|
||||
/* Mount options that take string arguments */
|
||||
Opt_sec, Opt_proto, Opt_mountproto,
|
||||
Opt_sec, Opt_proto, Opt_mountproto, Opt_mounthost,
|
||||
Opt_addr, Opt_mountaddr, Opt_clientaddr,
|
||||
|
||||
/* Mount options that are ignored */
|
||||
|
@ -137,9 +139,7 @@ static match_table_t nfs_mount_option_tokens = {
|
|||
{ Opt_userspace, "retry=%u" },
|
||||
{ Opt_namelen, "namlen=%u" },
|
||||
{ Opt_mountport, "mountport=%u" },
|
||||
{ Opt_mountprog, "mountprog=%u" },
|
||||
{ Opt_mountvers, "mountvers=%u" },
|
||||
{ Opt_nfsprog, "nfsprog=%u" },
|
||||
{ Opt_nfsvers, "nfsvers=%u" },
|
||||
{ Opt_nfsvers, "vers=%u" },
|
||||
|
||||
|
@ -148,7 +148,7 @@ static match_table_t nfs_mount_option_tokens = {
|
|||
{ Opt_mountproto, "mountproto=%s" },
|
||||
{ Opt_addr, "addr=%s" },
|
||||
{ Opt_clientaddr, "clientaddr=%s" },
|
||||
{ Opt_userspace, "mounthost=%s" },
|
||||
{ Opt_mounthost, "mounthost=%s" },
|
||||
{ Opt_mountaddr, "mountaddr=%s" },
|
||||
|
||||
{ Opt_err, NULL }
|
||||
|
@ -202,6 +202,7 @@ static int nfs_get_sb(struct file_system_type *, int, const char *, void *, stru
|
|||
static int nfs_xdev_get_sb(struct file_system_type *fs_type,
|
||||
int flags, const char *dev_name, void *raw_data, struct vfsmount *mnt);
|
||||
static void nfs_kill_super(struct super_block *);
|
||||
static void nfs_put_super(struct super_block *);
|
||||
|
||||
static struct file_system_type nfs_fs_type = {
|
||||
.owner = THIS_MODULE,
|
||||
|
@ -223,6 +224,7 @@ static const struct super_operations nfs_sops = {
|
|||
.alloc_inode = nfs_alloc_inode,
|
||||
.destroy_inode = nfs_destroy_inode,
|
||||
.write_inode = nfs_write_inode,
|
||||
.put_super = nfs_put_super,
|
||||
.statfs = nfs_statfs,
|
||||
.clear_inode = nfs_clear_inode,
|
||||
.umount_begin = nfs_umount_begin,
|
||||
|
@ -325,6 +327,28 @@ void __exit unregister_nfs_fs(void)
|
|||
unregister_filesystem(&nfs_fs_type);
|
||||
}
|
||||
|
||||
void nfs_sb_active(struct nfs_server *server)
|
||||
{
|
||||
atomic_inc(&server->active);
|
||||
}
|
||||
|
||||
void nfs_sb_deactive(struct nfs_server *server)
|
||||
{
|
||||
if (atomic_dec_and_test(&server->active))
|
||||
wake_up(&server->active_wq);
|
||||
}
|
||||
|
||||
static void nfs_put_super(struct super_block *sb)
|
||||
{
|
||||
struct nfs_server *server = NFS_SB(sb);
|
||||
/*
|
||||
* Make sure there are no outstanding ops to this server.
|
||||
* If so, wait for them to finish before allowing the
|
||||
* unmount to continue.
|
||||
*/
|
||||
wait_event(server->active_wq, atomic_read(&server->active) == 0);
|
||||
}
|
||||
|
||||
/*
|
||||
* Deliver file system statistics to userspace
|
||||
*/
|
||||
|
@ -455,8 +479,8 @@ static void nfs_show_mount_options(struct seq_file *m, struct nfs_server *nfss,
|
|||
}
|
||||
seq_printf(m, ",proto=%s",
|
||||
rpc_peeraddr2str(nfss->client, RPC_DISPLAY_PROTO));
|
||||
seq_printf(m, ",timeo=%lu", 10U * clp->retrans_timeo / HZ);
|
||||
seq_printf(m, ",retrans=%u", clp->retrans_count);
|
||||
seq_printf(m, ",timeo=%lu", 10U * nfss->client->cl_timeout->to_initval / HZ);
|
||||
seq_printf(m, ",retrans=%u", nfss->client->cl_timeout->to_retries);
|
||||
seq_printf(m, ",sec=%s", nfs_pseudoflavour_to_name(nfss->client->cl_auth->au_flavor));
|
||||
}
|
||||
|
||||
|
@ -469,8 +493,9 @@ static int nfs_show_options(struct seq_file *m, struct vfsmount *mnt)
|
|||
|
||||
nfs_show_mount_options(m, nfss, 0);
|
||||
|
||||
seq_printf(m, ",addr="NIPQUAD_FMT,
|
||||
NIPQUAD(nfss->nfs_client->cl_addr.sin_addr));
|
||||
seq_printf(m, ",addr=%s",
|
||||
rpc_peeraddr2str(nfss->nfs_client->cl_rpcclient,
|
||||
RPC_DISPLAY_ADDR));
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -507,7 +532,7 @@ static int nfs_show_stats(struct seq_file *m, struct vfsmount *mnt)
|
|||
seq_printf(m, ",namelen=%d", nfss->namelen);
|
||||
|
||||
#ifdef CONFIG_NFS_V4
|
||||
if (nfss->nfs_client->cl_nfsversion == 4) {
|
||||
if (nfss->nfs_client->rpc_ops->version == 4) {
|
||||
seq_printf(m, "\n\tnfsv4:\t");
|
||||
seq_printf(m, "bm0=0x%x", nfss->attr_bitmask[0]);
|
||||
seq_printf(m, ",bm1=0x%x", nfss->attr_bitmask[1]);
|
||||
|
@ -575,22 +600,80 @@ static void nfs_umount_begin(struct vfsmount *vfsmnt, int flags)
|
|||
}
|
||||
|
||||
/*
|
||||
* Sanity-check a server address provided by the mount command
|
||||
* Set the port number in an address. Be agnostic about the address family.
|
||||
*/
|
||||
static void nfs_set_port(struct sockaddr *sap, unsigned short port)
|
||||
{
|
||||
switch (sap->sa_family) {
|
||||
case AF_INET: {
|
||||
struct sockaddr_in *ap = (struct sockaddr_in *)sap;
|
||||
ap->sin_port = htons(port);
|
||||
break;
|
||||
}
|
||||
case AF_INET6: {
|
||||
struct sockaddr_in6 *ap = (struct sockaddr_in6 *)sap;
|
||||
ap->sin6_port = htons(port);
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Sanity-check a server address provided by the mount command.
|
||||
*
|
||||
* Address family must be initialized, and address must not be
|
||||
* the ANY address for that family.
|
||||
*/
|
||||
static int nfs_verify_server_address(struct sockaddr *addr)
|
||||
{
|
||||
switch (addr->sa_family) {
|
||||
case AF_INET: {
|
||||
struct sockaddr_in *sa = (struct sockaddr_in *) addr;
|
||||
if (sa->sin_addr.s_addr != INADDR_ANY)
|
||||
return 1;
|
||||
break;
|
||||
struct sockaddr_in *sa = (struct sockaddr_in *)addr;
|
||||
return sa->sin_addr.s_addr != INADDR_ANY;
|
||||
}
|
||||
case AF_INET6: {
|
||||
struct in6_addr *sa = &((struct sockaddr_in6 *)addr)->sin6_addr;
|
||||
return !ipv6_addr_any(sa);
|
||||
}
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* Parse string addresses passed in via a mount option,
|
||||
* and construct a sockaddr based on the result.
|
||||
*
|
||||
* If address parsing fails, set the sockaddr's address
|
||||
* family to AF_UNSPEC to force nfs_verify_server_address()
|
||||
* to punt the mount.
|
||||
*/
|
||||
static void nfs_parse_server_address(char *value,
|
||||
struct sockaddr *sap,
|
||||
size_t *len)
|
||||
{
|
||||
if (strchr(value, ':')) {
|
||||
struct sockaddr_in6 *ap = (struct sockaddr_in6 *)sap;
|
||||
u8 *addr = (u8 *)&ap->sin6_addr.in6_u;
|
||||
|
||||
ap->sin6_family = AF_INET6;
|
||||
*len = sizeof(*ap);
|
||||
if (in6_pton(value, -1, addr, '\0', NULL))
|
||||
return;
|
||||
} else {
|
||||
struct sockaddr_in *ap = (struct sockaddr_in *)sap;
|
||||
u8 *addr = (u8 *)&ap->sin_addr.s_addr;
|
||||
|
||||
ap->sin_family = AF_INET;
|
||||
*len = sizeof(*ap);
|
||||
if (in4_pton(value, -1, addr, '\0', NULL))
|
||||
return;
|
||||
}
|
||||
|
||||
sap->sa_family = AF_UNSPEC;
|
||||
*len = 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* Error-check and convert a string of mount options from user space into
|
||||
* a data structure
|
||||
|
@ -599,6 +682,7 @@ static int nfs_parse_mount_options(char *raw,
|
|||
struct nfs_parsed_mount_data *mnt)
|
||||
{
|
||||
char *p, *string;
|
||||
unsigned short port = 0;
|
||||
|
||||
if (!raw) {
|
||||
dfprintk(MOUNT, "NFS: mount options string was NULL.\n");
|
||||
|
@ -701,7 +785,7 @@ static int nfs_parse_mount_options(char *raw,
|
|||
return 0;
|
||||
if (option < 0 || option > 65535)
|
||||
return 0;
|
||||
mnt->nfs_server.address.sin_port = htons(option);
|
||||
port = option;
|
||||
break;
|
||||
case Opt_rsize:
|
||||
if (match_int(args, &mnt->rsize))
|
||||
|
@ -763,13 +847,6 @@ static int nfs_parse_mount_options(char *raw,
|
|||
return 0;
|
||||
mnt->mount_server.port = option;
|
||||
break;
|
||||
case Opt_mountprog:
|
||||
if (match_int(args, &option))
|
||||
return 0;
|
||||
if (option < 0)
|
||||
return 0;
|
||||
mnt->mount_server.program = option;
|
||||
break;
|
||||
case Opt_mountvers:
|
||||
if (match_int(args, &option))
|
||||
return 0;
|
||||
|
@ -777,13 +854,6 @@ static int nfs_parse_mount_options(char *raw,
|
|||
return 0;
|
||||
mnt->mount_server.version = option;
|
||||
break;
|
||||
case Opt_nfsprog:
|
||||
if (match_int(args, &option))
|
||||
return 0;
|
||||
if (option < 0)
|
||||
return 0;
|
||||
mnt->nfs_server.program = option;
|
||||
break;
|
||||
case Opt_nfsvers:
|
||||
if (match_int(args, &option))
|
||||
return 0;
|
||||
|
@ -927,24 +997,32 @@ static int nfs_parse_mount_options(char *raw,
|
|||
string = match_strdup(args);
|
||||
if (string == NULL)
|
||||
goto out_nomem;
|
||||
mnt->nfs_server.address.sin_family = AF_INET;
|
||||
mnt->nfs_server.address.sin_addr.s_addr =
|
||||
in_aton(string);
|
||||
nfs_parse_server_address(string, (struct sockaddr *)
|
||||
&mnt->nfs_server.address,
|
||||
&mnt->nfs_server.addrlen);
|
||||
kfree(string);
|
||||
break;
|
||||
case Opt_clientaddr:
|
||||
string = match_strdup(args);
|
||||
if (string == NULL)
|
||||
goto out_nomem;
|
||||
kfree(mnt->client_address);
|
||||
mnt->client_address = string;
|
||||
break;
|
||||
case Opt_mounthost:
|
||||
string = match_strdup(args);
|
||||
if (string == NULL)
|
||||
goto out_nomem;
|
||||
kfree(mnt->mount_server.hostname);
|
||||
mnt->mount_server.hostname = string;
|
||||
break;
|
||||
case Opt_mountaddr:
|
||||
string = match_strdup(args);
|
||||
if (string == NULL)
|
||||
goto out_nomem;
|
||||
mnt->mount_server.address.sin_family = AF_INET;
|
||||
mnt->mount_server.address.sin_addr.s_addr =
|
||||
in_aton(string);
|
||||
nfs_parse_server_address(string, (struct sockaddr *)
|
||||
&mnt->mount_server.address,
|
||||
&mnt->mount_server.addrlen);
|
||||
kfree(string);
|
||||
break;
|
||||
|
||||
|
@ -957,6 +1035,8 @@ static int nfs_parse_mount_options(char *raw,
|
|||
}
|
||||
}
|
||||
|
||||
nfs_set_port((struct sockaddr *)&mnt->nfs_server.address, port);
|
||||
|
||||
return 1;
|
||||
|
||||
out_nomem:
|
||||
|
@ -987,7 +1067,8 @@ static int nfs_parse_mount_options(char *raw,
|
|||
static int nfs_try_mount(struct nfs_parsed_mount_data *args,
|
||||
struct nfs_fh *root_fh)
|
||||
{
|
||||
struct sockaddr_in sin;
|
||||
struct sockaddr *sap = (struct sockaddr *)&args->mount_server.address;
|
||||
char *hostname;
|
||||
int status;
|
||||
|
||||
if (args->mount_server.version == 0) {
|
||||
|
@ -997,25 +1078,32 @@ static int nfs_try_mount(struct nfs_parsed_mount_data *args,
|
|||
args->mount_server.version = NFS_MNT_VERSION;
|
||||
}
|
||||
|
||||
if (args->mount_server.hostname)
|
||||
hostname = args->mount_server.hostname;
|
||||
else
|
||||
hostname = args->nfs_server.hostname;
|
||||
|
||||
/*
|
||||
* Construct the mount server's address.
|
||||
*/
|
||||
if (args->mount_server.address.sin_addr.s_addr != INADDR_ANY)
|
||||
sin = args->mount_server.address;
|
||||
else
|
||||
sin = args->nfs_server.address;
|
||||
if (args->mount_server.address.ss_family == AF_UNSPEC) {
|
||||
memcpy(sap, &args->nfs_server.address,
|
||||
args->nfs_server.addrlen);
|
||||
args->mount_server.addrlen = args->nfs_server.addrlen;
|
||||
}
|
||||
|
||||
/*
|
||||
* autobind will be used if mount_server.port == 0
|
||||
*/
|
||||
sin.sin_port = htons(args->mount_server.port);
|
||||
nfs_set_port(sap, args->mount_server.port);
|
||||
|
||||
/*
|
||||
* Now ask the mount server to map our export path
|
||||
* to a file handle.
|
||||
*/
|
||||
status = nfs_mount((struct sockaddr *) &sin,
|
||||
sizeof(sin),
|
||||
args->nfs_server.hostname,
|
||||
status = nfs_mount(sap,
|
||||
args->mount_server.addrlen,
|
||||
hostname,
|
||||
args->nfs_server.export_path,
|
||||
args->mount_server.version,
|
||||
args->mount_server.protocol,
|
||||
|
@ -1023,8 +1111,8 @@ static int nfs_try_mount(struct nfs_parsed_mount_data *args,
|
|||
if (status == 0)
|
||||
return 0;
|
||||
|
||||
dfprintk(MOUNT, "NFS: unable to mount server " NIPQUAD_FMT
|
||||
", error %d\n", NIPQUAD(sin.sin_addr.s_addr), status);
|
||||
dfprintk(MOUNT, "NFS: unable to mount server %s, error %d",
|
||||
hostname, status);
|
||||
return status;
|
||||
}
|
||||
|
||||
|
@ -1043,9 +1131,6 @@ static int nfs_try_mount(struct nfs_parsed_mount_data *args,
|
|||
*
|
||||
* + breaking back: trying proto=udp after proto=tcp, v2 after v3,
|
||||
* mountproto=tcp after mountproto=udp, and so on
|
||||
*
|
||||
* XXX: as far as I can tell, changing the NFS program number is not
|
||||
* supported in the NFS client.
|
||||
*/
|
||||
static int nfs_validate_mount_data(void *options,
|
||||
struct nfs_parsed_mount_data *args,
|
||||
|
@ -1069,9 +1154,7 @@ static int nfs_validate_mount_data(void *options,
|
|||
args->acdirmin = 30;
|
||||
args->acdirmax = 60;
|
||||
args->mount_server.protocol = XPRT_TRANSPORT_UDP;
|
||||
args->mount_server.program = NFS_MNT_PROGRAM;
|
||||
args->nfs_server.protocol = XPRT_TRANSPORT_TCP;
|
||||
args->nfs_server.program = NFS_PROGRAM;
|
||||
|
||||
switch (data->version) {
|
||||
case 1:
|
||||
|
@ -1102,9 +1185,6 @@ static int nfs_validate_mount_data(void *options,
|
|||
memset(mntfh->data + mntfh->size, 0,
|
||||
sizeof(mntfh->data) - mntfh->size);
|
||||
|
||||
if (!nfs_verify_server_address((struct sockaddr *) &data->addr))
|
||||
goto out_no_address;
|
||||
|
||||
/*
|
||||
* Translate to nfs_parsed_mount_data, which nfs_fill_super
|
||||
* can deal with.
|
||||
|
@ -1119,7 +1199,14 @@ static int nfs_validate_mount_data(void *options,
|
|||
args->acregmax = data->acregmax;
|
||||
args->acdirmin = data->acdirmin;
|
||||
args->acdirmax = data->acdirmax;
|
||||
args->nfs_server.address = data->addr;
|
||||
|
||||
memcpy(&args->nfs_server.address, &data->addr,
|
||||
sizeof(data->addr));
|
||||
args->nfs_server.addrlen = sizeof(data->addr);
|
||||
if (!nfs_verify_server_address((struct sockaddr *)
|
||||
&args->nfs_server.address))
|
||||
goto out_no_address;
|
||||
|
||||
if (!(data->flags & NFS_MOUNT_TCP))
|
||||
args->nfs_server.protocol = XPRT_TRANSPORT_UDP;
|
||||
/* N.B. caller will free nfs_server.hostname in all cases */
|
||||
|
@ -1322,15 +1409,50 @@ static int nfs_set_super(struct super_block *s, void *data)
|
|||
return ret;
|
||||
}
|
||||
|
||||
static int nfs_compare_super_address(struct nfs_server *server1,
|
||||
struct nfs_server *server2)
|
||||
{
|
||||
struct sockaddr *sap1, *sap2;
|
||||
|
||||
sap1 = (struct sockaddr *)&server1->nfs_client->cl_addr;
|
||||
sap2 = (struct sockaddr *)&server2->nfs_client->cl_addr;
|
||||
|
||||
if (sap1->sa_family != sap2->sa_family)
|
||||
return 0;
|
||||
|
||||
switch (sap1->sa_family) {
|
||||
case AF_INET: {
|
||||
struct sockaddr_in *sin1 = (struct sockaddr_in *)sap1;
|
||||
struct sockaddr_in *sin2 = (struct sockaddr_in *)sap2;
|
||||
if (sin1->sin_addr.s_addr != sin2->sin_addr.s_addr)
|
||||
return 0;
|
||||
if (sin1->sin_port != sin2->sin_port)
|
||||
return 0;
|
||||
break;
|
||||
}
|
||||
case AF_INET6: {
|
||||
struct sockaddr_in6 *sin1 = (struct sockaddr_in6 *)sap1;
|
||||
struct sockaddr_in6 *sin2 = (struct sockaddr_in6 *)sap2;
|
||||
if (!ipv6_addr_equal(&sin1->sin6_addr, &sin2->sin6_addr))
|
||||
return 0;
|
||||
if (sin1->sin6_port != sin2->sin6_port)
|
||||
return 0;
|
||||
break;
|
||||
}
|
||||
default:
|
||||
return 0;
|
||||
}
|
||||
|
||||
return 1;
|
||||
}
|
||||
|
||||
static int nfs_compare_super(struct super_block *sb, void *data)
|
||||
{
|
||||
struct nfs_sb_mountdata *sb_mntdata = data;
|
||||
struct nfs_server *server = sb_mntdata->server, *old = NFS_SB(sb);
|
||||
int mntflags = sb_mntdata->mntflags;
|
||||
|
||||
if (memcmp(&old->nfs_client->cl_addr,
|
||||
&server->nfs_client->cl_addr,
|
||||
sizeof(old->nfs_client->cl_addr)) != 0)
|
||||
if (!nfs_compare_super_address(old, server))
|
||||
return 0;
|
||||
/* Note: NFS_MOUNT_UNSHARED == NFS4_MOUNT_UNSHARED */
|
||||
if (old->flags & NFS_MOUNT_UNSHARED)
|
||||
|
@ -1400,6 +1522,7 @@ static int nfs_get_sb(struct file_system_type *fs_type,
|
|||
|
||||
out:
|
||||
kfree(data.nfs_server.hostname);
|
||||
kfree(data.mount_server.hostname);
|
||||
return error;
|
||||
|
||||
out_err_nosb:
|
||||
|
@ -1527,6 +1650,28 @@ static void nfs4_fill_super(struct super_block *sb)
|
|||
nfs_initialise_sb(sb);
|
||||
}
|
||||
|
||||
/*
|
||||
* If the user didn't specify a port, set the port number to
|
||||
* the NFS version 4 default port.
|
||||
*/
|
||||
static void nfs4_default_port(struct sockaddr *sap)
|
||||
{
|
||||
switch (sap->sa_family) {
|
||||
case AF_INET: {
|
||||
struct sockaddr_in *ap = (struct sockaddr_in *)sap;
|
||||
if (ap->sin_port == 0)
|
||||
ap->sin_port = htons(NFS_PORT);
|
||||
break;
|
||||
}
|
||||
case AF_INET6: {
|
||||
struct sockaddr_in6 *ap = (struct sockaddr_in6 *)sap;
|
||||
if (ap->sin6_port == 0)
|
||||
ap->sin6_port = htons(NFS_PORT);
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Validate NFSv4 mount options
|
||||
*/
|
||||
|
@ -1534,6 +1679,7 @@ static int nfs4_validate_mount_data(void *options,
|
|||
struct nfs_parsed_mount_data *args,
|
||||
const char *dev_name)
|
||||
{
|
||||
struct sockaddr_in *ap;
|
||||
struct nfs4_mount_data *data = (struct nfs4_mount_data *)options;
|
||||
char *c;
|
||||
|
||||
|
@ -1554,18 +1700,21 @@ static int nfs4_validate_mount_data(void *options,
|
|||
|
||||
switch (data->version) {
|
||||
case 1:
|
||||
if (data->host_addrlen != sizeof(args->nfs_server.address))
|
||||
ap = (struct sockaddr_in *)&args->nfs_server.address;
|
||||
if (data->host_addrlen > sizeof(args->nfs_server.address))
|
||||
goto out_no_address;
|
||||
if (copy_from_user(&args->nfs_server.address,
|
||||
data->host_addr,
|
||||
sizeof(args->nfs_server.address)))
|
||||
if (data->host_addrlen == 0)
|
||||
goto out_no_address;
|
||||
args->nfs_server.addrlen = data->host_addrlen;
|
||||
if (copy_from_user(ap, data->host_addr, data->host_addrlen))
|
||||
return -EFAULT;
|
||||
if (args->nfs_server.address.sin_port == 0)
|
||||
args->nfs_server.address.sin_port = htons(NFS_PORT);
|
||||
if (!nfs_verify_server_address((struct sockaddr *)
|
||||
&args->nfs_server.address))
|
||||
goto out_no_address;
|
||||
|
||||
nfs4_default_port((struct sockaddr *)
|
||||
&args->nfs_server.address);
|
||||
|
||||
switch (data->auth_flavourlen) {
|
||||
case 0:
|
||||
args->auth_flavors[0] = RPC_AUTH_UNIX;
|
||||
|
@ -1623,6 +1772,9 @@ static int nfs4_validate_mount_data(void *options,
|
|||
&args->nfs_server.address))
|
||||
return -EINVAL;
|
||||
|
||||
nfs4_default_port((struct sockaddr *)
|
||||
&args->nfs_server.address);
|
||||
|
||||
switch (args->auth_flavor_len) {
|
||||
case 0:
|
||||
args->auth_flavors[0] = RPC_AUTH_UNIX;
|
||||
|
@ -1643,21 +1795,16 @@ static int nfs4_validate_mount_data(void *options,
|
|||
len = c - dev_name;
|
||||
if (len > NFS4_MAXNAMLEN)
|
||||
return -ENAMETOOLONG;
|
||||
args->nfs_server.hostname = kzalloc(len, GFP_KERNEL);
|
||||
if (args->nfs_server.hostname == NULL)
|
||||
return -ENOMEM;
|
||||
strncpy(args->nfs_server.hostname, dev_name, len - 1);
|
||||
/* N.B. caller will free nfs_server.hostname in all cases */
|
||||
args->nfs_server.hostname = kstrndup(dev_name, len, GFP_KERNEL);
|
||||
|
||||
c++; /* step over the ':' */
|
||||
len = strlen(c);
|
||||
if (len > NFS4_MAXPATHLEN)
|
||||
return -ENAMETOOLONG;
|
||||
args->nfs_server.export_path = kzalloc(len + 1, GFP_KERNEL);
|
||||
if (args->nfs_server.export_path == NULL)
|
||||
return -ENOMEM;
|
||||
strncpy(args->nfs_server.export_path, c, len);
|
||||
args->nfs_server.export_path = kstrndup(c, len, GFP_KERNEL);
|
||||
|
||||
dprintk("MNTPATH: %s\n", args->nfs_server.export_path);
|
||||
dprintk("NFS: MNTPATH: '%s'\n", args->nfs_server.export_path);
|
||||
|
||||
if (args->client_address == NULL)
|
||||
goto out_no_client_address;
|
||||
|
|
|
@ -14,6 +14,8 @@
|
|||
#include <linux/sched.h>
|
||||
#include <linux/wait.h>
|
||||
|
||||
#include "internal.h"
|
||||
|
||||
struct nfs_unlinkdata {
|
||||
struct hlist_node list;
|
||||
struct nfs_removeargs args;
|
||||
|
@ -68,24 +70,6 @@ static void nfs_dec_sillycount(struct inode *dir)
|
|||
wake_up(&nfsi->waitqueue);
|
||||
}
|
||||
|
||||
/**
|
||||
* nfs_async_unlink_init - Initialize the RPC info
|
||||
* task: rpc_task of the sillydelete
|
||||
*/
|
||||
static void nfs_async_unlink_init(struct rpc_task *task, void *calldata)
|
||||
{
|
||||
struct nfs_unlinkdata *data = calldata;
|
||||
struct inode *dir = data->dir;
|
||||
struct rpc_message msg = {
|
||||
.rpc_argp = &data->args,
|
||||
.rpc_resp = &data->res,
|
||||
.rpc_cred = data->cred,
|
||||
};
|
||||
|
||||
NFS_PROTO(dir)->unlink_setup(&msg, dir);
|
||||
rpc_call_setup(task, &msg, 0);
|
||||
}
|
||||
|
||||
/**
|
||||
* nfs_async_unlink_done - Sillydelete post-processing
|
||||
* @task: rpc_task of the sillydelete
|
||||
|
@ -113,32 +97,45 @@ static void nfs_async_unlink_release(void *calldata)
|
|||
struct nfs_unlinkdata *data = calldata;
|
||||
|
||||
nfs_dec_sillycount(data->dir);
|
||||
nfs_sb_deactive(NFS_SERVER(data->dir));
|
||||
nfs_free_unlinkdata(data);
|
||||
}
|
||||
|
||||
static const struct rpc_call_ops nfs_unlink_ops = {
|
||||
.rpc_call_prepare = nfs_async_unlink_init,
|
||||
.rpc_call_done = nfs_async_unlink_done,
|
||||
.rpc_release = nfs_async_unlink_release,
|
||||
};
|
||||
|
||||
static int nfs_do_call_unlink(struct dentry *parent, struct inode *dir, struct nfs_unlinkdata *data)
|
||||
{
|
||||
struct rpc_message msg = {
|
||||
.rpc_argp = &data->args,
|
||||
.rpc_resp = &data->res,
|
||||
.rpc_cred = data->cred,
|
||||
};
|
||||
struct rpc_task_setup task_setup_data = {
|
||||
.rpc_message = &msg,
|
||||
.callback_ops = &nfs_unlink_ops,
|
||||
.callback_data = data,
|
||||
.flags = RPC_TASK_ASYNC,
|
||||
};
|
||||
struct rpc_task *task;
|
||||
struct dentry *alias;
|
||||
|
||||
alias = d_lookup(parent, &data->args.name);
|
||||
if (alias != NULL) {
|
||||
int ret = 0;
|
||||
|
||||
/*
|
||||
* Hey, we raced with lookup... See if we need to transfer
|
||||
* the sillyrename information to the aliased dentry.
|
||||
*/
|
||||
nfs_free_dname(data);
|
||||
spin_lock(&alias->d_lock);
|
||||
if (!(alias->d_flags & DCACHE_NFSFS_RENAMED)) {
|
||||
if (alias->d_inode != NULL &&
|
||||
!(alias->d_flags & DCACHE_NFSFS_RENAMED)) {
|
||||
alias->d_fsdata = data;
|
||||
alias->d_flags ^= DCACHE_NFSFS_RENAMED;
|
||||
alias->d_flags |= DCACHE_NFSFS_RENAMED;
|
||||
ret = 1;
|
||||
}
|
||||
spin_unlock(&alias->d_lock);
|
||||
|
@ -151,10 +148,14 @@ static int nfs_do_call_unlink(struct dentry *parent, struct inode *dir, struct n
|
|||
nfs_dec_sillycount(dir);
|
||||
return 0;
|
||||
}
|
||||
nfs_sb_active(NFS_SERVER(dir));
|
||||
data->args.fh = NFS_FH(dir);
|
||||
nfs_fattr_init(&data->res.dir_attr);
|
||||
|
||||
task = rpc_run_task(NFS_CLIENT(dir), RPC_TASK_ASYNC, &nfs_unlink_ops, data);
|
||||
NFS_PROTO(dir)->unlink_setup(&msg, dir);
|
||||
|
||||
task_setup_data.rpc_client = NFS_CLIENT(dir);
|
||||
task = rpc_run_task(&task_setup_data);
|
||||
if (!IS_ERR(task))
|
||||
rpc_put_task(task);
|
||||
return 1;
|
||||
|
|
107
fs/nfs/write.c
107
fs/nfs/write.c
|
@ -196,7 +196,7 @@ static int nfs_writepage_setup(struct nfs_open_context *ctx, struct page *page,
|
|||
}
|
||||
/* Update file length */
|
||||
nfs_grow_file(page, offset, count);
|
||||
nfs_unlock_request(req);
|
||||
nfs_clear_page_tag_locked(req);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -252,7 +252,6 @@ static int nfs_page_async_flush(struct nfs_pageio_descriptor *pgio,
|
|||
struct page *page)
|
||||
{
|
||||
struct inode *inode = page->mapping->host;
|
||||
struct nfs_inode *nfsi = NFS_I(inode);
|
||||
struct nfs_page *req;
|
||||
int ret;
|
||||
|
||||
|
@ -263,10 +262,10 @@ static int nfs_page_async_flush(struct nfs_pageio_descriptor *pgio,
|
|||
spin_unlock(&inode->i_lock);
|
||||
return 0;
|
||||
}
|
||||
if (nfs_lock_request_dontget(req))
|
||||
if (nfs_set_page_tag_locked(req))
|
||||
break;
|
||||
/* Note: If we hold the page lock, as is the case in nfs_writepage,
|
||||
* then the call to nfs_lock_request_dontget() will always
|
||||
* then the call to nfs_set_page_tag_locked() will always
|
||||
* succeed provided that someone hasn't already marked the
|
||||
* request as dirty (in which case we don't care).
|
||||
*/
|
||||
|
@ -280,7 +279,7 @@ static int nfs_page_async_flush(struct nfs_pageio_descriptor *pgio,
|
|||
if (test_bit(PG_NEED_COMMIT, &req->wb_flags)) {
|
||||
/* This request is marked for commit */
|
||||
spin_unlock(&inode->i_lock);
|
||||
nfs_unlock_request(req);
|
||||
nfs_clear_page_tag_locked(req);
|
||||
nfs_pageio_complete(pgio);
|
||||
return 0;
|
||||
}
|
||||
|
@ -288,8 +287,6 @@ static int nfs_page_async_flush(struct nfs_pageio_descriptor *pgio,
|
|||
spin_unlock(&inode->i_lock);
|
||||
BUG();
|
||||
}
|
||||
radix_tree_tag_set(&nfsi->nfs_page_tree, req->wb_index,
|
||||
NFS_PAGE_TAG_LOCKED);
|
||||
spin_unlock(&inode->i_lock);
|
||||
nfs_pageio_add_request(pgio, req);
|
||||
return 0;
|
||||
|
@ -381,6 +378,7 @@ static int nfs_inode_add_request(struct inode *inode, struct nfs_page *req)
|
|||
set_page_private(req->wb_page, (unsigned long)req);
|
||||
nfsi->npages++;
|
||||
kref_get(&req->wb_kref);
|
||||
radix_tree_tag_set(&nfsi->nfs_page_tree, req->wb_index, NFS_PAGE_TAG_LOCKED);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -596,7 +594,7 @@ static struct nfs_page * nfs_update_request(struct nfs_open_context* ctx,
|
|||
spin_lock(&inode->i_lock);
|
||||
req = nfs_page_find_request_locked(page);
|
||||
if (req) {
|
||||
if (!nfs_lock_request_dontget(req)) {
|
||||
if (!nfs_set_page_tag_locked(req)) {
|
||||
int error;
|
||||
|
||||
spin_unlock(&inode->i_lock);
|
||||
|
@ -646,7 +644,7 @@ static struct nfs_page * nfs_update_request(struct nfs_open_context* ctx,
|
|||
|| req->wb_page != page
|
||||
|| !nfs_dirty_request(req)
|
||||
|| offset > rqend || end < req->wb_offset) {
|
||||
nfs_unlock_request(req);
|
||||
nfs_clear_page_tag_locked(req);
|
||||
return ERR_PTR(-EBUSY);
|
||||
}
|
||||
|
||||
|
@ -755,7 +753,7 @@ static void nfs_writepage_release(struct nfs_page *req)
|
|||
nfs_clear_page_tag_locked(req);
|
||||
}
|
||||
|
||||
static inline int flush_task_priority(int how)
|
||||
static int flush_task_priority(int how)
|
||||
{
|
||||
switch (how & (FLUSH_HIGHPRI|FLUSH_LOWPRI)) {
|
||||
case FLUSH_HIGHPRI:
|
||||
|
@ -775,15 +773,31 @@ static void nfs_write_rpcsetup(struct nfs_page *req,
|
|||
unsigned int count, unsigned int offset,
|
||||
int how)
|
||||
{
|
||||
struct inode *inode;
|
||||
int flags;
|
||||
struct inode *inode = req->wb_context->path.dentry->d_inode;
|
||||
int flags = (how & FLUSH_SYNC) ? 0 : RPC_TASK_ASYNC;
|
||||
int priority = flush_task_priority(how);
|
||||
struct rpc_task *task;
|
||||
struct rpc_message msg = {
|
||||
.rpc_argp = &data->args,
|
||||
.rpc_resp = &data->res,
|
||||
.rpc_cred = req->wb_context->cred,
|
||||
};
|
||||
struct rpc_task_setup task_setup_data = {
|
||||
.rpc_client = NFS_CLIENT(inode),
|
||||
.task = &data->task,
|
||||
.rpc_message = &msg,
|
||||
.callback_ops = call_ops,
|
||||
.callback_data = data,
|
||||
.flags = flags,
|
||||
.priority = priority,
|
||||
};
|
||||
|
||||
/* Set up the RPC argument and reply structs
|
||||
* NB: take care not to mess about with data->commit et al. */
|
||||
|
||||
data->req = req;
|
||||
data->inode = inode = req->wb_context->path.dentry->d_inode;
|
||||
data->cred = req->wb_context->cred;
|
||||
data->cred = msg.rpc_cred;
|
||||
|
||||
data->args.fh = NFS_FH(inode);
|
||||
data->args.offset = req_offset(req) + offset;
|
||||
|
@ -791,6 +805,12 @@ static void nfs_write_rpcsetup(struct nfs_page *req,
|
|||
data->args.pages = data->pagevec;
|
||||
data->args.count = count;
|
||||
data->args.context = req->wb_context;
|
||||
data->args.stable = NFS_UNSTABLE;
|
||||
if (how & FLUSH_STABLE) {
|
||||
data->args.stable = NFS_DATA_SYNC;
|
||||
if (!NFS_I(inode)->ncommit)
|
||||
data->args.stable = NFS_FILE_SYNC;
|
||||
}
|
||||
|
||||
data->res.fattr = &data->fattr;
|
||||
data->res.count = count;
|
||||
|
@ -798,12 +818,7 @@ static void nfs_write_rpcsetup(struct nfs_page *req,
|
|||
nfs_fattr_init(&data->fattr);
|
||||
|
||||
/* Set up the initial task struct. */
|
||||
flags = (how & FLUSH_SYNC) ? 0 : RPC_TASK_ASYNC;
|
||||
rpc_init_task(&data->task, NFS_CLIENT(inode), flags, call_ops, data);
|
||||
NFS_PROTO(inode)->write_setup(data, how);
|
||||
|
||||
data->task.tk_priority = flush_task_priority(how);
|
||||
data->task.tk_cookie = (unsigned long)inode;
|
||||
NFS_PROTO(inode)->write_setup(data, &msg);
|
||||
|
||||
dprintk("NFS: %5u initiated write call "
|
||||
"(req %s/%Ld, %u bytes @ offset %Lu)\n",
|
||||
|
@ -812,16 +827,10 @@ static void nfs_write_rpcsetup(struct nfs_page *req,
|
|||
(long long)NFS_FILEID(inode),
|
||||
count,
|
||||
(unsigned long long)data->args.offset);
|
||||
}
|
||||
|
||||
static void nfs_execute_write(struct nfs_write_data *data)
|
||||
{
|
||||
struct rpc_clnt *clnt = NFS_CLIENT(data->inode);
|
||||
sigset_t oldset;
|
||||
|
||||
rpc_clnt_sigmask(clnt, &oldset);
|
||||
rpc_execute(&data->task);
|
||||
rpc_clnt_sigunmask(clnt, &oldset);
|
||||
task = rpc_run_task(&task_setup_data);
|
||||
if (!IS_ERR(task))
|
||||
rpc_put_task(task);
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -868,7 +877,6 @@ static int nfs_flush_multi(struct inode *inode, struct list_head *head, unsigned
|
|||
wsize, offset, how);
|
||||
offset += wsize;
|
||||
nbytes -= wsize;
|
||||
nfs_execute_write(data);
|
||||
} while (nbytes != 0);
|
||||
|
||||
return 0;
|
||||
|
@ -916,7 +924,6 @@ static int nfs_flush_one(struct inode *inode, struct list_head *head, unsigned i
|
|||
/* Set up the argument struct */
|
||||
nfs_write_rpcsetup(req, data, &nfs_write_full_ops, count, 0, how);
|
||||
|
||||
nfs_execute_write(data);
|
||||
return 0;
|
||||
out_bad:
|
||||
while (!list_empty(head)) {
|
||||
|
@ -932,7 +939,7 @@ static int nfs_flush_one(struct inode *inode, struct list_head *head, unsigned i
|
|||
static void nfs_pageio_init_write(struct nfs_pageio_descriptor *pgio,
|
||||
struct inode *inode, int ioflags)
|
||||
{
|
||||
int wsize = NFS_SERVER(inode)->wsize;
|
||||
size_t wsize = NFS_SERVER(inode)->wsize;
|
||||
|
||||
if (wsize < PAGE_CACHE_SIZE)
|
||||
nfs_pageio_init(pgio, inode, nfs_flush_multi, wsize, ioflags);
|
||||
|
@ -1146,19 +1153,33 @@ static void nfs_commit_rpcsetup(struct list_head *head,
|
|||
struct nfs_write_data *data,
|
||||
int how)
|
||||
{
|
||||
struct nfs_page *first;
|
||||
struct inode *inode;
|
||||
int flags;
|
||||
struct nfs_page *first = nfs_list_entry(head->next);
|
||||
struct inode *inode = first->wb_context->path.dentry->d_inode;
|
||||
int flags = (how & FLUSH_SYNC) ? 0 : RPC_TASK_ASYNC;
|
||||
int priority = flush_task_priority(how);
|
||||
struct rpc_task *task;
|
||||
struct rpc_message msg = {
|
||||
.rpc_argp = &data->args,
|
||||
.rpc_resp = &data->res,
|
||||
.rpc_cred = first->wb_context->cred,
|
||||
};
|
||||
struct rpc_task_setup task_setup_data = {
|
||||
.task = &data->task,
|
||||
.rpc_client = NFS_CLIENT(inode),
|
||||
.rpc_message = &msg,
|
||||
.callback_ops = &nfs_commit_ops,
|
||||
.callback_data = data,
|
||||
.flags = flags,
|
||||
.priority = priority,
|
||||
};
|
||||
|
||||
/* Set up the RPC argument and reply structs
|
||||
* NB: take care not to mess about with data->commit et al. */
|
||||
|
||||
list_splice_init(head, &data->pages);
|
||||
first = nfs_list_entry(data->pages.next);
|
||||
inode = first->wb_context->path.dentry->d_inode;
|
||||
|
||||
data->inode = inode;
|
||||
data->cred = first->wb_context->cred;
|
||||
data->cred = msg.rpc_cred;
|
||||
|
||||
data->args.fh = NFS_FH(data->inode);
|
||||
/* Note: we always request a commit of the entire inode */
|
||||
|
@ -1170,14 +1191,13 @@ static void nfs_commit_rpcsetup(struct list_head *head,
|
|||
nfs_fattr_init(&data->fattr);
|
||||
|
||||
/* Set up the initial task struct. */
|
||||
flags = (how & FLUSH_SYNC) ? 0 : RPC_TASK_ASYNC;
|
||||
rpc_init_task(&data->task, NFS_CLIENT(inode), flags, &nfs_commit_ops, data);
|
||||
NFS_PROTO(inode)->commit_setup(data, how);
|
||||
NFS_PROTO(inode)->commit_setup(data, &msg);
|
||||
|
||||
data->task.tk_priority = flush_task_priority(how);
|
||||
data->task.tk_cookie = (unsigned long)inode;
|
||||
|
||||
dprintk("NFS: %5u initiated commit call\n", data->task.tk_pid);
|
||||
|
||||
task = rpc_run_task(&task_setup_data);
|
||||
if (!IS_ERR(task))
|
||||
rpc_put_task(task);
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -1197,7 +1217,6 @@ nfs_commit_list(struct inode *inode, struct list_head *head, int how)
|
|||
/* Set up the argument struct */
|
||||
nfs_commit_rpcsetup(head, data, how);
|
||||
|
||||
nfs_execute_write(data);
|
||||
return 0;
|
||||
out_bad:
|
||||
while (!list_empty(head)) {
|
||||
|
|
|
@ -32,10 +32,27 @@ struct nlmsvc_binding {
|
|||
|
||||
extern struct nlmsvc_binding * nlmsvc_ops;
|
||||
|
||||
/*
|
||||
* Similar to nfs_client_initdata, but without the NFS-specific
|
||||
* rpc_ops field.
|
||||
*/
|
||||
struct nlmclnt_initdata {
|
||||
const char *hostname;
|
||||
const struct sockaddr *address;
|
||||
size_t addrlen;
|
||||
unsigned short protocol;
|
||||
u32 nfs_version;
|
||||
};
|
||||
|
||||
/*
|
||||
* Functions exported by the lockd module
|
||||
*/
|
||||
extern int nlmclnt_proc(struct inode *, int, struct file_lock *);
|
||||
|
||||
extern struct nlm_host *nlmclnt_init(const struct nlmclnt_initdata *nlm_init);
|
||||
extern void nlmclnt_done(struct nlm_host *host);
|
||||
|
||||
extern int nlmclnt_proc(struct nlm_host *host, int cmd,
|
||||
struct file_lock *fl);
|
||||
extern int lockd_up(int proto);
|
||||
extern void lockd_down(void);
|
||||
|
||||
|
|
|
@ -196,28 +196,67 @@ struct nfs_inode {
|
|||
#define NFS_INO_STALE (2) /* possible stale inode */
|
||||
#define NFS_INO_ACL_LRU_SET (3) /* Inode is on the LRU list */
|
||||
|
||||
static inline struct nfs_inode *NFS_I(struct inode *inode)
|
||||
static inline struct nfs_inode *NFS_I(const struct inode *inode)
|
||||
{
|
||||
return container_of(inode, struct nfs_inode, vfs_inode);
|
||||
}
|
||||
#define NFS_SB(s) ((struct nfs_server *)(s->s_fs_info))
|
||||
|
||||
#define NFS_FH(inode) (&NFS_I(inode)->fh)
|
||||
#define NFS_SERVER(inode) (NFS_SB(inode->i_sb))
|
||||
#define NFS_CLIENT(inode) (NFS_SERVER(inode)->client)
|
||||
#define NFS_PROTO(inode) (NFS_SERVER(inode)->nfs_client->rpc_ops)
|
||||
#define NFS_COOKIEVERF(inode) (NFS_I(inode)->cookieverf)
|
||||
#define NFS_MINATTRTIMEO(inode) \
|
||||
(S_ISDIR(inode->i_mode)? NFS_SERVER(inode)->acdirmin \
|
||||
: NFS_SERVER(inode)->acregmin)
|
||||
#define NFS_MAXATTRTIMEO(inode) \
|
||||
(S_ISDIR(inode->i_mode)? NFS_SERVER(inode)->acdirmax \
|
||||
: NFS_SERVER(inode)->acregmax)
|
||||
static inline struct nfs_server *NFS_SB(const struct super_block *s)
|
||||
{
|
||||
return (struct nfs_server *)(s->s_fs_info);
|
||||
}
|
||||
|
||||
#define NFS_FLAGS(inode) (NFS_I(inode)->flags)
|
||||
#define NFS_STALE(inode) (test_bit(NFS_INO_STALE, &NFS_FLAGS(inode)))
|
||||
static inline struct nfs_fh *NFS_FH(const struct inode *inode)
|
||||
{
|
||||
return &NFS_I(inode)->fh;
|
||||
}
|
||||
|
||||
#define NFS_FILEID(inode) (NFS_I(inode)->fileid)
|
||||
static inline struct nfs_server *NFS_SERVER(const struct inode *inode)
|
||||
{
|
||||
return NFS_SB(inode->i_sb);
|
||||
}
|
||||
|
||||
static inline struct rpc_clnt *NFS_CLIENT(const struct inode *inode)
|
||||
{
|
||||
return NFS_SERVER(inode)->client;
|
||||
}
|
||||
|
||||
static inline const struct nfs_rpc_ops *NFS_PROTO(const struct inode *inode)
|
||||
{
|
||||
return NFS_SERVER(inode)->nfs_client->rpc_ops;
|
||||
}
|
||||
|
||||
static inline __be32 *NFS_COOKIEVERF(const struct inode *inode)
|
||||
{
|
||||
return NFS_I(inode)->cookieverf;
|
||||
}
|
||||
|
||||
static inline unsigned NFS_MINATTRTIMEO(const struct inode *inode)
|
||||
{
|
||||
struct nfs_server *nfss = NFS_SERVER(inode);
|
||||
return S_ISDIR(inode->i_mode) ? nfss->acdirmin : nfss->acregmin;
|
||||
}
|
||||
|
||||
static inline unsigned NFS_MAXATTRTIMEO(const struct inode *inode)
|
||||
{
|
||||
struct nfs_server *nfss = NFS_SERVER(inode);
|
||||
return S_ISDIR(inode->i_mode) ? nfss->acdirmax : nfss->acregmax;
|
||||
}
|
||||
|
||||
static inline int NFS_STALE(const struct inode *inode)
|
||||
{
|
||||
return test_bit(NFS_INO_STALE, &NFS_I(inode)->flags);
|
||||
}
|
||||
|
||||
static inline __u64 NFS_FILEID(const struct inode *inode)
|
||||
{
|
||||
return NFS_I(inode)->fileid;
|
||||
}
|
||||
|
||||
static inline void set_nfs_fileid(struct inode *inode, __u64 fileid)
|
||||
{
|
||||
NFS_I(inode)->fileid = fileid;
|
||||
}
|
||||
|
||||
static inline void nfs_mark_for_revalidate(struct inode *inode)
|
||||
{
|
||||
|
@ -237,7 +276,7 @@ static inline int nfs_server_capable(struct inode *inode, int cap)
|
|||
|
||||
static inline int NFS_USE_READDIRPLUS(struct inode *inode)
|
||||
{
|
||||
return test_bit(NFS_INO_ADVISE_RDPLUS, &NFS_FLAGS(inode));
|
||||
return test_bit(NFS_INO_ADVISE_RDPLUS, &NFS_I(inode)->flags);
|
||||
}
|
||||
|
||||
static inline void nfs_set_verifier(struct dentry * dentry, unsigned long verf)
|
||||
|
@ -366,6 +405,7 @@ extern const struct inode_operations nfs3_dir_inode_operations;
|
|||
extern const struct file_operations nfs_dir_operations;
|
||||
extern struct dentry_operations nfs_dentry_operations;
|
||||
|
||||
extern void nfs_force_lookup_revalidate(struct inode *dir);
|
||||
extern int nfs_instantiate(struct dentry *dentry, struct nfs_fh *fh, struct nfs_fattr *fattr);
|
||||
extern int nfs_may_open(struct inode *inode, struct rpc_cred *cred, int openflags);
|
||||
extern void nfs_access_zap_cache(struct inode *inode);
|
||||
|
|
|
@ -3,8 +3,12 @@
|
|||
|
||||
#include <linux/list.h>
|
||||
#include <linux/backing-dev.h>
|
||||
#include <linux/wait.h>
|
||||
|
||||
#include <asm/atomic.h>
|
||||
|
||||
struct nfs_iostats;
|
||||
struct nlm_host;
|
||||
|
||||
/*
|
||||
* The nfs_client identifies our client state to the server.
|
||||
|
@ -14,20 +18,19 @@ struct nfs_client {
|
|||
int cl_cons_state; /* current construction state (-ve: init error) */
|
||||
#define NFS_CS_READY 0 /* ready to be used */
|
||||
#define NFS_CS_INITING 1 /* busy initialising */
|
||||
int cl_nfsversion; /* NFS protocol version */
|
||||
unsigned long cl_res_state; /* NFS resources state */
|
||||
#define NFS_CS_CALLBACK 1 /* - callback started */
|
||||
#define NFS_CS_IDMAP 2 /* - idmap started */
|
||||
#define NFS_CS_RENEWD 3 /* - renewd started */
|
||||
struct sockaddr_in cl_addr; /* server identifier */
|
||||
struct sockaddr_storage cl_addr; /* server identifier */
|
||||
size_t cl_addrlen;
|
||||
char * cl_hostname; /* hostname of server */
|
||||
struct list_head cl_share_link; /* link in global client list */
|
||||
struct list_head cl_superblocks; /* List of nfs_server structs */
|
||||
|
||||
struct rpc_clnt * cl_rpcclient;
|
||||
const struct nfs_rpc_ops *rpc_ops; /* NFS protocol vector */
|
||||
unsigned long retrans_timeo; /* retransmit timeout */
|
||||
unsigned int retrans_count; /* number of retransmit tries */
|
||||
int cl_proto; /* Network transport protocol */
|
||||
|
||||
#ifdef CONFIG_NFS_V4
|
||||
u64 cl_clientid; /* constant */
|
||||
|
@ -62,7 +65,7 @@ struct nfs_client {
|
|||
/* Our own IP address, as a null-terminated string.
|
||||
* This is used to generate the clientid, and the callback address.
|
||||
*/
|
||||
char cl_ipaddr[16];
|
||||
char cl_ipaddr[48];
|
||||
unsigned char cl_id_uniquifier;
|
||||
#endif
|
||||
};
|
||||
|
@ -78,6 +81,7 @@ struct nfs_server {
|
|||
struct list_head master_link; /* link in master servers list */
|
||||
struct rpc_clnt * client; /* RPC client handle */
|
||||
struct rpc_clnt * client_acl; /* ACL RPC client handle */
|
||||
struct nlm_host *nlm_host; /* NLM client handle */
|
||||
struct nfs_iostats * io_stats; /* I/O statistics */
|
||||
struct backing_dev_info backing_dev_info;
|
||||
atomic_long_t writeback; /* number of writeback pages */
|
||||
|
@ -110,6 +114,9 @@ struct nfs_server {
|
|||
filesystem */
|
||||
#endif
|
||||
void (*destroy)(struct nfs_server *);
|
||||
|
||||
atomic_t active; /* Keep trace of any activity to this server */
|
||||
wait_queue_head_t active_wq; /* Wait for any activity to stop */
|
||||
};
|
||||
|
||||
/* Server capabilities */
|
||||
|
|
|
@ -83,6 +83,7 @@ extern void nfs_pageio_complete(struct nfs_pageio_descriptor *desc);
|
|||
extern void nfs_pageio_cond_complete(struct nfs_pageio_descriptor *, pgoff_t);
|
||||
extern int nfs_wait_on_request(struct nfs_page *);
|
||||
extern void nfs_unlock_request(struct nfs_page *req);
|
||||
extern int nfs_set_page_tag_locked(struct nfs_page *req);
|
||||
extern void nfs_clear_page_tag_locked(struct nfs_page *req);
|
||||
|
||||
|
||||
|
@ -95,18 +96,6 @@ nfs_lock_request_dontget(struct nfs_page *req)
|
|||
return !test_and_set_bit(PG_BUSY, &req->wb_flags);
|
||||
}
|
||||
|
||||
/*
|
||||
* Lock the page of an asynchronous request and take a reference
|
||||
*/
|
||||
static inline int
|
||||
nfs_lock_request(struct nfs_page *req)
|
||||
{
|
||||
if (test_and_set_bit(PG_BUSY, &req->wb_flags))
|
||||
return 0;
|
||||
kref_get(&req->wb_kref);
|
||||
return 1;
|
||||
}
|
||||
|
||||
/**
|
||||
* nfs_list_add_request - Insert a request into a list
|
||||
* @req: request
|
||||
|
|
|
@ -666,16 +666,17 @@ struct nfs4_rename_res {
|
|||
struct nfs_fattr * new_fattr;
|
||||
};
|
||||
|
||||
#define NFS4_SETCLIENTID_NAMELEN (56)
|
||||
struct nfs4_setclientid {
|
||||
const nfs4_verifier * sc_verifier; /* request */
|
||||
const nfs4_verifier * sc_verifier;
|
||||
unsigned int sc_name_len;
|
||||
char sc_name[48]; /* request */
|
||||
u32 sc_prog; /* request */
|
||||
char sc_name[NFS4_SETCLIENTID_NAMELEN];
|
||||
u32 sc_prog;
|
||||
unsigned int sc_netid_len;
|
||||
char sc_netid[4]; /* request */
|
||||
char sc_netid[RPCBIND_MAXNETIDLEN];
|
||||
unsigned int sc_uaddr_len;
|
||||
char sc_uaddr[24]; /* request */
|
||||
u32 sc_cb_ident; /* request */
|
||||
char sc_uaddr[RPCBIND_MAXUADDRLEN];
|
||||
u32 sc_cb_ident;
|
||||
};
|
||||
|
||||
struct nfs4_statfs_arg {
|
||||
|
@ -773,7 +774,7 @@ struct nfs_access_entry;
|
|||
* RPC procedure vector for NFSv2/NFSv3 demuxing
|
||||
*/
|
||||
struct nfs_rpc_ops {
|
||||
int version; /* Protocol version */
|
||||
u32 version; /* Protocol version */
|
||||
struct dentry_operations *dentry_ops;
|
||||
const struct inode_operations *dir_inode_ops;
|
||||
const struct inode_operations *file_inode_ops;
|
||||
|
@ -816,11 +817,11 @@ struct nfs_rpc_ops {
|
|||
struct nfs_pathconf *);
|
||||
int (*set_capabilities)(struct nfs_server *, struct nfs_fh *);
|
||||
__be32 *(*decode_dirent)(__be32 *, struct nfs_entry *, int plus);
|
||||
void (*read_setup) (struct nfs_read_data *);
|
||||
void (*read_setup) (struct nfs_read_data *, struct rpc_message *);
|
||||
int (*read_done) (struct rpc_task *, struct nfs_read_data *);
|
||||
void (*write_setup) (struct nfs_write_data *, int how);
|
||||
void (*write_setup) (struct nfs_write_data *, struct rpc_message *);
|
||||
int (*write_done) (struct rpc_task *, struct nfs_write_data *);
|
||||
void (*commit_setup) (struct nfs_write_data *, int how);
|
||||
void (*commit_setup) (struct nfs_write_data *, struct rpc_message *);
|
||||
int (*commit_done) (struct rpc_task *, struct nfs_write_data *);
|
||||
int (*file_open) (struct inode *, struct file *);
|
||||
int (*file_release) (struct inode *, struct file *);
|
||||
|
|
|
@ -46,6 +46,7 @@ struct rpc_clnt {
|
|||
cl_autobind : 1;/* use getport() */
|
||||
|
||||
struct rpc_rtt * cl_rtt; /* RTO estimator data */
|
||||
const struct rpc_timeout *cl_timeout; /* Timeout strategy */
|
||||
|
||||
int cl_nodelen; /* nodename length */
|
||||
char cl_nodename[UNX_MAXNODENAME];
|
||||
|
@ -54,6 +55,7 @@ struct rpc_clnt {
|
|||
struct dentry * cl_dentry; /* inode */
|
||||
struct rpc_clnt * cl_parent; /* Points to parent of clones */
|
||||
struct rpc_rtt cl_rtt_default;
|
||||
struct rpc_timeout cl_timeout_default;
|
||||
struct rpc_program * cl_program;
|
||||
char cl_inline_name[32];
|
||||
};
|
||||
|
@ -99,7 +101,7 @@ struct rpc_create_args {
|
|||
struct sockaddr *address;
|
||||
size_t addrsize;
|
||||
struct sockaddr *saddress;
|
||||
struct rpc_timeout *timeout;
|
||||
const struct rpc_timeout *timeout;
|
||||
char *servername;
|
||||
struct rpc_program *program;
|
||||
u32 version;
|
||||
|
@ -123,11 +125,10 @@ void rpc_shutdown_client(struct rpc_clnt *);
|
|||
void rpc_release_client(struct rpc_clnt *);
|
||||
|
||||
int rpcb_register(u32, u32, int, unsigned short, int *);
|
||||
int rpcb_getport_sync(struct sockaddr_in *, __u32, __u32, int);
|
||||
int rpcb_getport_sync(struct sockaddr_in *, u32, u32, int);
|
||||
void rpcb_getport_async(struct rpc_task *);
|
||||
|
||||
void rpc_call_setup(struct rpc_task *, struct rpc_message *, int);
|
||||
|
||||
void rpc_call_start(struct rpc_task *);
|
||||
int rpc_call_async(struct rpc_clnt *clnt, struct rpc_message *msg,
|
||||
int flags, const struct rpc_call_ops *tk_ops,
|
||||
void *calldata);
|
||||
|
@ -142,7 +143,7 @@ void rpc_setbufsize(struct rpc_clnt *, unsigned int, unsigned int);
|
|||
size_t rpc_max_payload(struct rpc_clnt *);
|
||||
void rpc_force_rebind(struct rpc_clnt *);
|
||||
size_t rpc_peeraddr(struct rpc_clnt *, struct sockaddr *, size_t);
|
||||
char * rpc_peeraddr2str(struct rpc_clnt *, enum rpc_display_format_t);
|
||||
const char *rpc_peeraddr2str(struct rpc_clnt *, enum rpc_display_format_t);
|
||||
|
||||
#endif /* __KERNEL__ */
|
||||
#endif /* _LINUX_SUNRPC_CLNT_H */
|
||||
|
|
|
@ -152,5 +152,44 @@ typedef __be32 rpc_fraghdr;
|
|||
*/
|
||||
#define RPCBIND_MAXNETIDLEN (4u)
|
||||
|
||||
/*
|
||||
* Universal addresses are introduced in RFC 1833 and further spelled
|
||||
* out in RFC 3530. RPCBIND_MAXUADDRLEN defines a maximum byte length
|
||||
* of a universal address for use in allocating buffers and character
|
||||
* arrays.
|
||||
*
|
||||
* Quoting RFC 3530, section 2.2:
|
||||
*
|
||||
* For TCP over IPv4 and for UDP over IPv4, the format of r_addr is the
|
||||
* US-ASCII string:
|
||||
*
|
||||
* h1.h2.h3.h4.p1.p2
|
||||
*
|
||||
* The prefix, "h1.h2.h3.h4", is the standard textual form for
|
||||
* representing an IPv4 address, which is always four octets long.
|
||||
* Assuming big-endian ordering, h1, h2, h3, and h4, are respectively,
|
||||
* the first through fourth octets each converted to ASCII-decimal.
|
||||
* Assuming big-endian ordering, p1 and p2 are, respectively, the first
|
||||
* and second octets each converted to ASCII-decimal. For example, if a
|
||||
* host, in big-endian order, has an address of 0x0A010307 and there is
|
||||
* a service listening on, in big endian order, port 0x020F (decimal
|
||||
* 527), then the complete universal address is "10.1.3.7.2.15".
|
||||
*
|
||||
* ...
|
||||
*
|
||||
* For TCP over IPv6 and for UDP over IPv6, the format of r_addr is the
|
||||
* US-ASCII string:
|
||||
*
|
||||
* x1:x2:x3:x4:x5:x6:x7:x8.p1.p2
|
||||
*
|
||||
* The suffix "p1.p2" is the service port, and is computed the same way
|
||||
* as with universal addresses for TCP and UDP over IPv4. The prefix,
|
||||
* "x1:x2:x3:x4:x5:x6:x7:x8", is the standard textual form for
|
||||
* representing an IPv6 address as defined in Section 2.2 of [RFC2373].
|
||||
* Additionally, the two alternative forms specified in Section 2.2 of
|
||||
* [RFC2373] are also acceptable.
|
||||
*/
|
||||
#define RPCBIND_MAXUADDRLEN (56u)
|
||||
|
||||
#endif /* __KERNEL__ */
|
||||
#endif /* _LINUX_SUNRPC_MSGPROT_H_ */
|
||||
|
|
|
@ -56,8 +56,6 @@ struct rpc_task {
|
|||
__u8 tk_garb_retry;
|
||||
__u8 tk_cred_retry;
|
||||
|
||||
unsigned long tk_cookie; /* Cookie for batching tasks */
|
||||
|
||||
/*
|
||||
* timeout_fn to be executed by timer bottom half
|
||||
* callback to be executed after waking up
|
||||
|
@ -78,7 +76,6 @@ struct rpc_task {
|
|||
struct timer_list tk_timer; /* kernel timer */
|
||||
unsigned long tk_timeout; /* timeout for rpc_sleep() */
|
||||
unsigned short tk_flags; /* misc flags */
|
||||
unsigned char tk_priority : 2;/* Task priority */
|
||||
unsigned long tk_runstate; /* Task run status */
|
||||
struct workqueue_struct *tk_workqueue; /* Normally rpciod, but could
|
||||
* be any workqueue
|
||||
|
@ -94,6 +91,9 @@ struct rpc_task {
|
|||
unsigned long tk_start; /* RPC task init timestamp */
|
||||
long tk_rtt; /* round-trip time (jiffies) */
|
||||
|
||||
pid_t tk_owner; /* Process id for batching tasks */
|
||||
unsigned char tk_priority : 2;/* Task priority */
|
||||
|
||||
#ifdef RPC_DEBUG
|
||||
unsigned short tk_pid; /* debugging aid */
|
||||
#endif
|
||||
|
@ -117,6 +117,15 @@ struct rpc_call_ops {
|
|||
void (*rpc_release)(void *);
|
||||
};
|
||||
|
||||
struct rpc_task_setup {
|
||||
struct rpc_task *task;
|
||||
struct rpc_clnt *rpc_client;
|
||||
const struct rpc_message *rpc_message;
|
||||
const struct rpc_call_ops *callback_ops;
|
||||
void *callback_data;
|
||||
unsigned short flags;
|
||||
signed char priority;
|
||||
};
|
||||
|
||||
/*
|
||||
* RPC task flags
|
||||
|
@ -180,10 +189,10 @@ struct rpc_call_ops {
|
|||
* Note: if you change these, you must also change
|
||||
* the task initialization definitions below.
|
||||
*/
|
||||
#define RPC_PRIORITY_LOW 0
|
||||
#define RPC_PRIORITY_NORMAL 1
|
||||
#define RPC_PRIORITY_HIGH 2
|
||||
#define RPC_NR_PRIORITY (RPC_PRIORITY_HIGH+1)
|
||||
#define RPC_PRIORITY_LOW (-1)
|
||||
#define RPC_PRIORITY_NORMAL (0)
|
||||
#define RPC_PRIORITY_HIGH (1)
|
||||
#define RPC_NR_PRIORITY (1 + RPC_PRIORITY_HIGH - RPC_PRIORITY_LOW)
|
||||
|
||||
/*
|
||||
* RPC synchronization objects
|
||||
|
@ -191,7 +200,7 @@ struct rpc_call_ops {
|
|||
struct rpc_wait_queue {
|
||||
spinlock_t lock;
|
||||
struct list_head tasks[RPC_NR_PRIORITY]; /* task queue for each priority level */
|
||||
unsigned long cookie; /* cookie of last task serviced */
|
||||
pid_t owner; /* process id of last task serviced */
|
||||
unsigned char maxpriority; /* maximum priority (0 if queue is not a priority queue) */
|
||||
unsigned char priority; /* current priority */
|
||||
unsigned char count; /* # task groups remaining serviced so far */
|
||||
|
@ -208,41 +217,13 @@ struct rpc_wait_queue {
|
|||
* performance of NFS operations such as read/write.
|
||||
*/
|
||||
#define RPC_BATCH_COUNT 16
|
||||
|
||||
#ifndef RPC_DEBUG
|
||||
# define RPC_WAITQ_INIT(var,qname) { \
|
||||
.lock = __SPIN_LOCK_UNLOCKED(var.lock), \
|
||||
.tasks = { \
|
||||
[0] = LIST_HEAD_INIT(var.tasks[0]), \
|
||||
[1] = LIST_HEAD_INIT(var.tasks[1]), \
|
||||
[2] = LIST_HEAD_INIT(var.tasks[2]), \
|
||||
}, \
|
||||
}
|
||||
#else
|
||||
# define RPC_WAITQ_INIT(var,qname) { \
|
||||
.lock = __SPIN_LOCK_UNLOCKED(var.lock), \
|
||||
.tasks = { \
|
||||
[0] = LIST_HEAD_INIT(var.tasks[0]), \
|
||||
[1] = LIST_HEAD_INIT(var.tasks[1]), \
|
||||
[2] = LIST_HEAD_INIT(var.tasks[2]), \
|
||||
}, \
|
||||
.name = qname, \
|
||||
}
|
||||
#endif
|
||||
# define RPC_WAITQ(var,qname) struct rpc_wait_queue var = RPC_WAITQ_INIT(var,qname)
|
||||
|
||||
#define RPC_IS_PRIORITY(q) ((q)->maxpriority > 0)
|
||||
|
||||
/*
|
||||
* Function prototypes
|
||||
*/
|
||||
struct rpc_task *rpc_new_task(struct rpc_clnt *, int flags,
|
||||
const struct rpc_call_ops *ops, void *data);
|
||||
struct rpc_task *rpc_run_task(struct rpc_clnt *clnt, int flags,
|
||||
const struct rpc_call_ops *ops, void *data);
|
||||
void rpc_init_task(struct rpc_task *task, struct rpc_clnt *clnt,
|
||||
int flags, const struct rpc_call_ops *ops,
|
||||
void *data);
|
||||
struct rpc_task *rpc_new_task(const struct rpc_task_setup *);
|
||||
struct rpc_task *rpc_run_task(const struct rpc_task_setup *);
|
||||
void rpc_put_task(struct rpc_task *);
|
||||
void rpc_exit_task(struct rpc_task *);
|
||||
void rpc_release_calldata(const struct rpc_call_ops *, void *);
|
||||
|
|
|
@ -120,7 +120,7 @@ struct rpc_xprt {
|
|||
struct kref kref; /* Reference count */
|
||||
struct rpc_xprt_ops * ops; /* transport methods */
|
||||
|
||||
struct rpc_timeout timeout; /* timeout parms */
|
||||
const struct rpc_timeout *timeout; /* timeout parms */
|
||||
struct sockaddr_storage addr; /* server address */
|
||||
size_t addrlen; /* size of server address */
|
||||
int prot; /* IP protocol */
|
||||
|
@ -183,7 +183,7 @@ struct rpc_xprt {
|
|||
bklog_u; /* backlog queue utilization */
|
||||
} stat;
|
||||
|
||||
char * address_strings[RPC_DISPLAY_MAX];
|
||||
const char *address_strings[RPC_DISPLAY_MAX];
|
||||
};
|
||||
|
||||
struct xprt_create {
|
||||
|
@ -191,7 +191,6 @@ struct xprt_create {
|
|||
struct sockaddr * srcaddr; /* optional local address */
|
||||
struct sockaddr * dstaddr; /* remote peer address */
|
||||
size_t addrlen;
|
||||
struct rpc_timeout * timeout; /* optional timeout parameters */
|
||||
};
|
||||
|
||||
struct xprt_class {
|
||||
|
@ -202,11 +201,6 @@ struct xprt_class {
|
|||
char name[32];
|
||||
};
|
||||
|
||||
/*
|
||||
* Transport operations used by ULPs
|
||||
*/
|
||||
void xprt_set_timeout(struct rpc_timeout *to, unsigned int retr, unsigned long incr);
|
||||
|
||||
/*
|
||||
* Generic internal transport functions
|
||||
*/
|
||||
|
@ -245,7 +239,8 @@ void xprt_adjust_cwnd(struct rpc_task *task, int result);
|
|||
struct rpc_rqst * xprt_lookup_rqst(struct rpc_xprt *xprt, __be32 xid);
|
||||
void xprt_complete_rqst(struct rpc_task *task, int copied);
|
||||
void xprt_release_rqst_cong(struct rpc_task *task);
|
||||
void xprt_disconnect(struct rpc_xprt *xprt);
|
||||
void xprt_disconnect_done(struct rpc_xprt *xprt);
|
||||
void xprt_force_disconnect(struct rpc_xprt *xprt);
|
||||
|
||||
/*
|
||||
* Reserved bit positions in xprt->state
|
||||
|
@ -256,6 +251,7 @@ void xprt_disconnect(struct rpc_xprt *xprt);
|
|||
#define XPRT_CLOSE_WAIT (3)
|
||||
#define XPRT_BOUND (4)
|
||||
#define XPRT_BINDING (5)
|
||||
#define XPRT_CLOSING (6)
|
||||
|
||||
static inline void xprt_set_connected(struct rpc_xprt *xprt)
|
||||
{
|
||||
|
|
|
@ -51,6 +51,7 @@ rpcauth_register(const struct rpc_authops *ops)
|
|||
spin_unlock(&rpc_authflavor_lock);
|
||||
return ret;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(rpcauth_register);
|
||||
|
||||
int
|
||||
rpcauth_unregister(const struct rpc_authops *ops)
|
||||
|
@ -68,6 +69,7 @@ rpcauth_unregister(const struct rpc_authops *ops)
|
|||
spin_unlock(&rpc_authflavor_lock);
|
||||
return ret;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(rpcauth_unregister);
|
||||
|
||||
struct rpc_auth *
|
||||
rpcauth_create(rpc_authflavor_t pseudoflavor, struct rpc_clnt *clnt)
|
||||
|
@ -102,6 +104,7 @@ rpcauth_create(rpc_authflavor_t pseudoflavor, struct rpc_clnt *clnt)
|
|||
out:
|
||||
return auth;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(rpcauth_create);
|
||||
|
||||
void
|
||||
rpcauth_release(struct rpc_auth *auth)
|
||||
|
@ -151,6 +154,7 @@ rpcauth_init_credcache(struct rpc_auth *auth)
|
|||
auth->au_credcache = new;
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(rpcauth_init_credcache);
|
||||
|
||||
/*
|
||||
* Destroy a list of credentials
|
||||
|
@ -213,6 +217,7 @@ rpcauth_destroy_credcache(struct rpc_auth *auth)
|
|||
kfree(cache);
|
||||
}
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(rpcauth_destroy_credcache);
|
||||
|
||||
/*
|
||||
* Remove stale credentials. Avoid sleeping inside the loop.
|
||||
|
@ -332,6 +337,7 @@ rpcauth_lookup_credcache(struct rpc_auth *auth, struct auth_cred * acred,
|
|||
out:
|
||||
return cred;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(rpcauth_lookup_credcache);
|
||||
|
||||
struct rpc_cred *
|
||||
rpcauth_lookupcred(struct rpc_auth *auth, int flags)
|
||||
|
@ -350,6 +356,7 @@ rpcauth_lookupcred(struct rpc_auth *auth, int flags)
|
|||
put_group_info(acred.group_info);
|
||||
return ret;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(rpcauth_lookupcred);
|
||||
|
||||
void
|
||||
rpcauth_init_cred(struct rpc_cred *cred, const struct auth_cred *acred,
|
||||
|
@ -366,7 +373,7 @@ rpcauth_init_cred(struct rpc_cred *cred, const struct auth_cred *acred,
|
|||
#endif
|
||||
cred->cr_uid = acred->uid;
|
||||
}
|
||||
EXPORT_SYMBOL(rpcauth_init_cred);
|
||||
EXPORT_SYMBOL_GPL(rpcauth_init_cred);
|
||||
|
||||
struct rpc_cred *
|
||||
rpcauth_bindcred(struct rpc_task *task)
|
||||
|
@ -378,6 +385,7 @@ rpcauth_bindcred(struct rpc_task *task)
|
|||
.group_info = current->group_info,
|
||||
};
|
||||
struct rpc_cred *ret;
|
||||
sigset_t oldset;
|
||||
int flags = 0;
|
||||
|
||||
dprintk("RPC: %5u looking up %s cred\n",
|
||||
|
@ -385,7 +393,9 @@ rpcauth_bindcred(struct rpc_task *task)
|
|||
get_group_info(acred.group_info);
|
||||
if (task->tk_flags & RPC_TASK_ROOTCREDS)
|
||||
flags |= RPCAUTH_LOOKUP_ROOTCREDS;
|
||||
rpc_clnt_sigmask(task->tk_client, &oldset);
|
||||
ret = auth->au_ops->lookup_cred(auth, &acred, flags);
|
||||
rpc_clnt_sigunmask(task->tk_client, &oldset);
|
||||
if (!IS_ERR(ret))
|
||||
task->tk_msg.rpc_cred = ret;
|
||||
else
|
||||
|
@ -435,6 +445,7 @@ put_rpccred(struct rpc_cred *cred)
|
|||
out_destroy:
|
||||
cred->cr_ops->crdestroy(cred);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(put_rpccred);
|
||||
|
||||
void
|
||||
rpcauth_unbindcred(struct rpc_task *task)
|
||||
|
|
|
@ -472,16 +472,15 @@ gss_pipe_upcall(struct file *filp, struct rpc_pipe_msg *msg,
|
|||
char __user *dst, size_t buflen)
|
||||
{
|
||||
char *data = (char *)msg->data + msg->copied;
|
||||
ssize_t mlen = msg->len;
|
||||
ssize_t left;
|
||||
size_t mlen = min(msg->len, buflen);
|
||||
unsigned long left;
|
||||
|
||||
if (mlen > buflen)
|
||||
mlen = buflen;
|
||||
left = copy_to_user(dst, data, mlen);
|
||||
if (left < 0) {
|
||||
msg->errno = left;
|
||||
return left;
|
||||
if (left == mlen) {
|
||||
msg->errno = -EFAULT;
|
||||
return -EFAULT;
|
||||
}
|
||||
|
||||
mlen -= left;
|
||||
msg->copied += mlen;
|
||||
msg->errno = 0;
|
||||
|
|
|
@ -30,6 +30,7 @@
|
|||
#include <linux/smp_lock.h>
|
||||
#include <linux/utsname.h>
|
||||
#include <linux/workqueue.h>
|
||||
#include <linux/in6.h>
|
||||
|
||||
#include <linux/sunrpc/clnt.h>
|
||||
#include <linux/sunrpc/rpc_pipe_fs.h>
|
||||
|
@ -121,8 +122,9 @@ rpc_setup_pipedir(struct rpc_clnt *clnt, char *dir_name)
|
|||
}
|
||||
}
|
||||
|
||||
static struct rpc_clnt * rpc_new_client(struct rpc_xprt *xprt, char *servname, struct rpc_program *program, u32 vers, rpc_authflavor_t flavor)
|
||||
static struct rpc_clnt * rpc_new_client(const struct rpc_create_args *args, struct rpc_xprt *xprt)
|
||||
{
|
||||
struct rpc_program *program = args->program;
|
||||
struct rpc_version *version;
|
||||
struct rpc_clnt *clnt = NULL;
|
||||
struct rpc_auth *auth;
|
||||
|
@ -131,13 +133,13 @@ static struct rpc_clnt * rpc_new_client(struct rpc_xprt *xprt, char *servname, s
|
|||
|
||||
/* sanity check the name before trying to print it */
|
||||
err = -EINVAL;
|
||||
len = strlen(servname);
|
||||
len = strlen(args->servername);
|
||||
if (len > RPC_MAXNETNAMELEN)
|
||||
goto out_no_rpciod;
|
||||
len++;
|
||||
|
||||
dprintk("RPC: creating %s client for %s (xprt %p)\n",
|
||||
program->name, servname, xprt);
|
||||
program->name, args->servername, xprt);
|
||||
|
||||
err = rpciod_up();
|
||||
if (err)
|
||||
|
@ -145,7 +147,11 @@ static struct rpc_clnt * rpc_new_client(struct rpc_xprt *xprt, char *servname, s
|
|||
err = -EINVAL;
|
||||
if (!xprt)
|
||||
goto out_no_xprt;
|
||||
if (vers >= program->nrvers || !(version = program->version[vers]))
|
||||
|
||||
if (args->version >= program->nrvers)
|
||||
goto out_err;
|
||||
version = program->version[args->version];
|
||||
if (version == NULL)
|
||||
goto out_err;
|
||||
|
||||
err = -ENOMEM;
|
||||
|
@ -157,12 +163,12 @@ static struct rpc_clnt * rpc_new_client(struct rpc_xprt *xprt, char *servname, s
|
|||
clnt->cl_server = clnt->cl_inline_name;
|
||||
if (len > sizeof(clnt->cl_inline_name)) {
|
||||
char *buf = kmalloc(len, GFP_KERNEL);
|
||||
if (buf != 0)
|
||||
if (buf != NULL)
|
||||
clnt->cl_server = buf;
|
||||
else
|
||||
len = sizeof(clnt->cl_inline_name);
|
||||
}
|
||||
strlcpy(clnt->cl_server, servname, len);
|
||||
strlcpy(clnt->cl_server, args->servername, len);
|
||||
|
||||
clnt->cl_xprt = xprt;
|
||||
clnt->cl_procinfo = version->procs;
|
||||
|
@ -182,8 +188,15 @@ static struct rpc_clnt * rpc_new_client(struct rpc_xprt *xprt, char *servname, s
|
|||
if (!xprt_bound(clnt->cl_xprt))
|
||||
clnt->cl_autobind = 1;
|
||||
|
||||
clnt->cl_timeout = xprt->timeout;
|
||||
if (args->timeout != NULL) {
|
||||
memcpy(&clnt->cl_timeout_default, args->timeout,
|
||||
sizeof(clnt->cl_timeout_default));
|
||||
clnt->cl_timeout = &clnt->cl_timeout_default;
|
||||
}
|
||||
|
||||
clnt->cl_rtt = &clnt->cl_rtt_default;
|
||||
rpc_init_rtt(&clnt->cl_rtt_default, xprt->timeout.to_initval);
|
||||
rpc_init_rtt(&clnt->cl_rtt_default, clnt->cl_timeout->to_initval);
|
||||
|
||||
kref_init(&clnt->cl_kref);
|
||||
|
||||
|
@ -191,10 +204,10 @@ static struct rpc_clnt * rpc_new_client(struct rpc_xprt *xprt, char *servname, s
|
|||
if (err < 0)
|
||||
goto out_no_path;
|
||||
|
||||
auth = rpcauth_create(flavor, clnt);
|
||||
auth = rpcauth_create(args->authflavor, clnt);
|
||||
if (IS_ERR(auth)) {
|
||||
printk(KERN_INFO "RPC: Couldn't create auth handle (flavor %u)\n",
|
||||
flavor);
|
||||
args->authflavor);
|
||||
err = PTR_ERR(auth);
|
||||
goto out_no_auth;
|
||||
}
|
||||
|
@ -245,9 +258,8 @@ struct rpc_clnt *rpc_create(struct rpc_create_args *args)
|
|||
.srcaddr = args->saddress,
|
||||
.dstaddr = args->address,
|
||||
.addrlen = args->addrsize,
|
||||
.timeout = args->timeout
|
||||
};
|
||||
char servername[20];
|
||||
char servername[48];
|
||||
|
||||
xprt = xprt_create_transport(&xprtargs);
|
||||
if (IS_ERR(xprt))
|
||||
|
@ -258,13 +270,34 @@ struct rpc_clnt *rpc_create(struct rpc_create_args *args)
|
|||
* up a string representation of the passed-in address.
|
||||
*/
|
||||
if (args->servername == NULL) {
|
||||
struct sockaddr_in *addr =
|
||||
(struct sockaddr_in *) args->address;
|
||||
snprintf(servername, sizeof(servername), NIPQUAD_FMT,
|
||||
NIPQUAD(addr->sin_addr.s_addr));
|
||||
servername[0] = '\0';
|
||||
switch (args->address->sa_family) {
|
||||
case AF_INET: {
|
||||
struct sockaddr_in *sin =
|
||||
(struct sockaddr_in *)args->address;
|
||||
snprintf(servername, sizeof(servername), NIPQUAD_FMT,
|
||||
NIPQUAD(sin->sin_addr.s_addr));
|
||||
break;
|
||||
}
|
||||
case AF_INET6: {
|
||||
struct sockaddr_in6 *sin =
|
||||
(struct sockaddr_in6 *)args->address;
|
||||
snprintf(servername, sizeof(servername), NIP6_FMT,
|
||||
NIP6(sin->sin6_addr));
|
||||
break;
|
||||
}
|
||||
default:
|
||||
/* caller wants default server name, but
|
||||
* address family isn't recognized. */
|
||||
return ERR_PTR(-EINVAL);
|
||||
}
|
||||
args->servername = servername;
|
||||
}
|
||||
|
||||
xprt = xprt_create_transport(&xprtargs);
|
||||
if (IS_ERR(xprt))
|
||||
return (struct rpc_clnt *)xprt;
|
||||
|
||||
/*
|
||||
* By default, kernel RPC client connects from a reserved port.
|
||||
* CAP_NET_BIND_SERVICE will not be set for unprivileged requesters,
|
||||
|
@ -275,8 +308,7 @@ struct rpc_clnt *rpc_create(struct rpc_create_args *args)
|
|||
if (args->flags & RPC_CLNT_CREATE_NONPRIVPORT)
|
||||
xprt->resvport = 0;
|
||||
|
||||
clnt = rpc_new_client(xprt, args->servername, args->program,
|
||||
args->version, args->authflavor);
|
||||
clnt = rpc_new_client(args, xprt);
|
||||
if (IS_ERR(clnt))
|
||||
return clnt;
|
||||
|
||||
|
@ -322,7 +354,7 @@ rpc_clone_client(struct rpc_clnt *clnt)
|
|||
new->cl_autobind = 0;
|
||||
INIT_LIST_HEAD(&new->cl_tasks);
|
||||
spin_lock_init(&new->cl_lock);
|
||||
rpc_init_rtt(&new->cl_rtt_default, clnt->cl_xprt->timeout.to_initval);
|
||||
rpc_init_rtt(&new->cl_rtt_default, clnt->cl_timeout->to_initval);
|
||||
new->cl_metrics = rpc_alloc_iostats(clnt);
|
||||
if (new->cl_metrics == NULL)
|
||||
goto out_no_stats;
|
||||
|
@ -345,6 +377,7 @@ rpc_clone_client(struct rpc_clnt *clnt)
|
|||
dprintk("RPC: %s: returned error %d\n", __FUNCTION__, err);
|
||||
return ERR_PTR(err);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(rpc_clone_client);
|
||||
|
||||
/*
|
||||
* Properly shut down an RPC client, terminating all outstanding
|
||||
|
@ -363,6 +396,7 @@ void rpc_shutdown_client(struct rpc_clnt *clnt)
|
|||
|
||||
rpc_release_client(clnt);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(rpc_shutdown_client);
|
||||
|
||||
/*
|
||||
* Free an RPC client
|
||||
|
@ -467,6 +501,7 @@ struct rpc_clnt *rpc_bind_new_program(struct rpc_clnt *old,
|
|||
out:
|
||||
return clnt;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(rpc_bind_new_program);
|
||||
|
||||
/*
|
||||
* Default callback for async RPC calls
|
||||
|
@ -498,12 +533,12 @@ static void rpc_save_sigmask(sigset_t *oldset, int intr)
|
|||
sigprocmask(SIG_BLOCK, &sigmask, oldset);
|
||||
}
|
||||
|
||||
static inline void rpc_task_sigmask(struct rpc_task *task, sigset_t *oldset)
|
||||
static void rpc_task_sigmask(struct rpc_task *task, sigset_t *oldset)
|
||||
{
|
||||
rpc_save_sigmask(oldset, !RPC_TASK_UNINTERRUPTIBLE(task));
|
||||
}
|
||||
|
||||
static inline void rpc_restore_sigmask(sigset_t *oldset)
|
||||
static void rpc_restore_sigmask(sigset_t *oldset)
|
||||
{
|
||||
sigprocmask(SIG_SETMASK, oldset, NULL);
|
||||
}
|
||||
|
@ -512,45 +547,49 @@ void rpc_clnt_sigmask(struct rpc_clnt *clnt, sigset_t *oldset)
|
|||
{
|
||||
rpc_save_sigmask(oldset, clnt->cl_intr);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(rpc_clnt_sigmask);
|
||||
|
||||
void rpc_clnt_sigunmask(struct rpc_clnt *clnt, sigset_t *oldset)
|
||||
{
|
||||
rpc_restore_sigmask(oldset);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(rpc_clnt_sigunmask);
|
||||
|
||||
static
|
||||
struct rpc_task *rpc_do_run_task(struct rpc_clnt *clnt,
|
||||
struct rpc_message *msg,
|
||||
int flags,
|
||||
const struct rpc_call_ops *ops,
|
||||
void *data)
|
||||
/**
|
||||
* rpc_run_task - Allocate a new RPC task, then run rpc_execute against it
|
||||
* @task_setup_data: pointer to task initialisation data
|
||||
*/
|
||||
struct rpc_task *rpc_run_task(const struct rpc_task_setup *task_setup_data)
|
||||
{
|
||||
struct rpc_task *task, *ret;
|
||||
sigset_t oldset;
|
||||
|
||||
task = rpc_new_task(clnt, flags, ops, data);
|
||||
task = rpc_new_task(task_setup_data);
|
||||
if (task == NULL) {
|
||||
rpc_release_calldata(ops, data);
|
||||
return ERR_PTR(-ENOMEM);
|
||||
rpc_release_calldata(task_setup_data->callback_ops,
|
||||
task_setup_data->callback_data);
|
||||
ret = ERR_PTR(-ENOMEM);
|
||||
goto out;
|
||||
}
|
||||
|
||||
/* Mask signals on synchronous RPC calls and RPCSEC_GSS upcalls */
|
||||
rpc_task_sigmask(task, &oldset);
|
||||
if (msg != NULL) {
|
||||
rpc_call_setup(task, msg, 0);
|
||||
if (task->tk_status != 0) {
|
||||
ret = ERR_PTR(task->tk_status);
|
||||
rpc_put_task(task);
|
||||
goto out;
|
||||
}
|
||||
if (task->tk_status != 0) {
|
||||
ret = ERR_PTR(task->tk_status);
|
||||
rpc_put_task(task);
|
||||
goto out;
|
||||
}
|
||||
atomic_inc(&task->tk_count);
|
||||
rpc_execute(task);
|
||||
/* Mask signals on synchronous RPC calls and RPCSEC_GSS upcalls */
|
||||
if (!RPC_IS_ASYNC(task)) {
|
||||
rpc_task_sigmask(task, &oldset);
|
||||
rpc_execute(task);
|
||||
rpc_restore_sigmask(&oldset);
|
||||
} else
|
||||
rpc_execute(task);
|
||||
ret = task;
|
||||
out:
|
||||
rpc_restore_sigmask(&oldset);
|
||||
return ret;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(rpc_run_task);
|
||||
|
||||
/**
|
||||
* rpc_call_sync - Perform a synchronous RPC call
|
||||
|
@ -561,17 +600,24 @@ struct rpc_task *rpc_do_run_task(struct rpc_clnt *clnt,
|
|||
int rpc_call_sync(struct rpc_clnt *clnt, struct rpc_message *msg, int flags)
|
||||
{
|
||||
struct rpc_task *task;
|
||||
struct rpc_task_setup task_setup_data = {
|
||||
.rpc_client = clnt,
|
||||
.rpc_message = msg,
|
||||
.callback_ops = &rpc_default_ops,
|
||||
.flags = flags,
|
||||
};
|
||||
int status;
|
||||
|
||||
BUG_ON(flags & RPC_TASK_ASYNC);
|
||||
|
||||
task = rpc_do_run_task(clnt, msg, flags, &rpc_default_ops, NULL);
|
||||
task = rpc_run_task(&task_setup_data);
|
||||
if (IS_ERR(task))
|
||||
return PTR_ERR(task);
|
||||
status = task->tk_status;
|
||||
rpc_put_task(task);
|
||||
return status;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(rpc_call_sync);
|
||||
|
||||
/**
|
||||
* rpc_call_async - Perform an asynchronous RPC call
|
||||
|
@ -586,45 +632,28 @@ rpc_call_async(struct rpc_clnt *clnt, struct rpc_message *msg, int flags,
|
|||
const struct rpc_call_ops *tk_ops, void *data)
|
||||
{
|
||||
struct rpc_task *task;
|
||||
struct rpc_task_setup task_setup_data = {
|
||||
.rpc_client = clnt,
|
||||
.rpc_message = msg,
|
||||
.callback_ops = tk_ops,
|
||||
.callback_data = data,
|
||||
.flags = flags|RPC_TASK_ASYNC,
|
||||
};
|
||||
|
||||
task = rpc_do_run_task(clnt, msg, flags|RPC_TASK_ASYNC, tk_ops, data);
|
||||
task = rpc_run_task(&task_setup_data);
|
||||
if (IS_ERR(task))
|
||||
return PTR_ERR(task);
|
||||
rpc_put_task(task);
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* rpc_run_task - Allocate a new RPC task, then run rpc_execute against it
|
||||
* @clnt: pointer to RPC client
|
||||
* @flags: RPC flags
|
||||
* @ops: RPC call ops
|
||||
* @data: user call data
|
||||
*/
|
||||
struct rpc_task *rpc_run_task(struct rpc_clnt *clnt, int flags,
|
||||
const struct rpc_call_ops *tk_ops,
|
||||
void *data)
|
||||
{
|
||||
return rpc_do_run_task(clnt, NULL, flags, tk_ops, data);
|
||||
}
|
||||
EXPORT_SYMBOL(rpc_run_task);
|
||||
EXPORT_SYMBOL_GPL(rpc_call_async);
|
||||
|
||||
void
|
||||
rpc_call_setup(struct rpc_task *task, struct rpc_message *msg, int flags)
|
||||
rpc_call_start(struct rpc_task *task)
|
||||
{
|
||||
task->tk_msg = *msg;
|
||||
task->tk_flags |= flags;
|
||||
/* Bind the user cred */
|
||||
if (task->tk_msg.rpc_cred != NULL)
|
||||
rpcauth_holdcred(task);
|
||||
else
|
||||
rpcauth_bindcred(task);
|
||||
|
||||
if (task->tk_status == 0)
|
||||
task->tk_action = call_start;
|
||||
else
|
||||
task->tk_action = rpc_exit_task;
|
||||
task->tk_action = call_start;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(rpc_call_start);
|
||||
|
||||
/**
|
||||
* rpc_peeraddr - extract remote peer address from clnt's xprt
|
||||
|
@ -653,7 +682,8 @@ EXPORT_SYMBOL_GPL(rpc_peeraddr);
|
|||
* @format: address format
|
||||
*
|
||||
*/
|
||||
char *rpc_peeraddr2str(struct rpc_clnt *clnt, enum rpc_display_format_t format)
|
||||
const char *rpc_peeraddr2str(struct rpc_clnt *clnt,
|
||||
enum rpc_display_format_t format)
|
||||
{
|
||||
struct rpc_xprt *xprt = clnt->cl_xprt;
|
||||
|
||||
|
@ -671,6 +701,7 @@ rpc_setbufsize(struct rpc_clnt *clnt, unsigned int sndsize, unsigned int rcvsize
|
|||
if (xprt->ops->set_buffer_size)
|
||||
xprt->ops->set_buffer_size(xprt, sndsize, rcvsize);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(rpc_setbufsize);
|
||||
|
||||
/*
|
||||
* Return size of largest payload RPC client can support, in bytes
|
||||
|
@ -710,6 +741,7 @@ rpc_restart_call(struct rpc_task *task)
|
|||
|
||||
task->tk_action = call_start;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(rpc_restart_call);
|
||||
|
||||
/*
|
||||
* 0. Initial state
|
||||
|
@ -1137,7 +1169,7 @@ call_status(struct rpc_task *task)
|
|||
case -ETIMEDOUT:
|
||||
task->tk_action = call_timeout;
|
||||
if (task->tk_client->cl_discrtry)
|
||||
xprt_disconnect(task->tk_xprt);
|
||||
xprt_force_disconnect(task->tk_xprt);
|
||||
break;
|
||||
case -ECONNREFUSED:
|
||||
case -ENOTCONN:
|
||||
|
@ -1260,7 +1292,7 @@ call_decode(struct rpc_task *task)
|
|||
req->rq_received = req->rq_private_buf.len = 0;
|
||||
task->tk_status = 0;
|
||||
if (task->tk_client->cl_discrtry)
|
||||
xprt_disconnect(task->tk_xprt);
|
||||
xprt_force_disconnect(task->tk_xprt);
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -1517,9 +1549,15 @@ struct rpc_task *rpc_call_null(struct rpc_clnt *clnt, struct rpc_cred *cred, int
|
|||
.rpc_proc = &rpcproc_null,
|
||||
.rpc_cred = cred,
|
||||
};
|
||||
return rpc_do_run_task(clnt, &msg, flags, &rpc_default_ops, NULL);
|
||||
struct rpc_task_setup task_setup_data = {
|
||||
.rpc_client = clnt,
|
||||
.rpc_message = &msg,
|
||||
.callback_ops = &rpc_default_ops,
|
||||
.flags = flags,
|
||||
};
|
||||
return rpc_run_task(&task_setup_data);
|
||||
}
|
||||
EXPORT_SYMBOL(rpc_call_null);
|
||||
EXPORT_SYMBOL_GPL(rpc_call_null);
|
||||
|
||||
#ifdef RPC_DEBUG
|
||||
void rpc_show_tasks(void)
|
||||
|
|
|
@ -76,6 +76,16 @@ rpc_timeout_upcall_queue(struct work_struct *work)
|
|||
rpc_purge_list(rpci, &free_list, destroy_msg, -ETIMEDOUT);
|
||||
}
|
||||
|
||||
/**
|
||||
* rpc_queue_upcall
|
||||
* @inode: inode of upcall pipe on which to queue given message
|
||||
* @msg: message to queue
|
||||
*
|
||||
* Call with an @inode created by rpc_mkpipe() to queue an upcall.
|
||||
* A userspace process may then later read the upcall by performing a
|
||||
* read on an open file for this inode. It is up to the caller to
|
||||
* initialize the fields of @msg (other than @msg->list) appropriately.
|
||||
*/
|
||||
int
|
||||
rpc_queue_upcall(struct inode *inode, struct rpc_pipe_msg *msg)
|
||||
{
|
||||
|
@ -103,6 +113,7 @@ rpc_queue_upcall(struct inode *inode, struct rpc_pipe_msg *msg)
|
|||
wake_up(&rpci->waitq);
|
||||
return res;
|
||||
}
|
||||
EXPORT_SYMBOL(rpc_queue_upcall);
|
||||
|
||||
static inline void
|
||||
rpc_inode_setowner(struct inode *inode, void *private)
|
||||
|
@ -512,8 +523,8 @@ rpc_get_inode(struct super_block *sb, int mode)
|
|||
/*
|
||||
* FIXME: This probably has races.
|
||||
*/
|
||||
static void
|
||||
rpc_depopulate(struct dentry *parent, int start, int eof)
|
||||
static void rpc_depopulate(struct dentry *parent,
|
||||
unsigned long start, unsigned long eof)
|
||||
{
|
||||
struct inode *dir = parent->d_inode;
|
||||
struct list_head *pos, *next;
|
||||
|
@ -663,7 +674,16 @@ rpc_lookup_negative(char *path, struct nameidata *nd)
|
|||
return dentry;
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* rpc_mkdir - Create a new directory in rpc_pipefs
|
||||
* @path: path from the rpc_pipefs root to the new directory
|
||||
* @rpc_clnt: rpc client to associate with this directory
|
||||
*
|
||||
* This creates a directory at the given @path associated with
|
||||
* @rpc_clnt, which will contain a file named "info" with some basic
|
||||
* information about the client, together with any "pipes" that may
|
||||
* later be created using rpc_mkpipe().
|
||||
*/
|
||||
struct dentry *
|
||||
rpc_mkdir(char *path, struct rpc_clnt *rpc_client)
|
||||
{
|
||||
|
@ -699,6 +719,10 @@ rpc_mkdir(char *path, struct rpc_clnt *rpc_client)
|
|||
goto out;
|
||||
}
|
||||
|
||||
/**
|
||||
* rpc_rmdir - Remove a directory created with rpc_mkdir()
|
||||
* @dentry: directory to remove
|
||||
*/
|
||||
int
|
||||
rpc_rmdir(struct dentry *dentry)
|
||||
{
|
||||
|
@ -717,6 +741,25 @@ rpc_rmdir(struct dentry *dentry)
|
|||
return error;
|
||||
}
|
||||
|
||||
/**
|
||||
* rpc_mkpipe - make an rpc_pipefs file for kernel<->userspace communication
|
||||
* @parent: dentry of directory to create new "pipe" in
|
||||
* @name: name of pipe
|
||||
* @private: private data to associate with the pipe, for the caller's use
|
||||
* @ops: operations defining the behavior of the pipe: upcall, downcall,
|
||||
* release_pipe, and destroy_msg.
|
||||
*
|
||||
* Data is made available for userspace to read by calls to
|
||||
* rpc_queue_upcall(). The actual reads will result in calls to
|
||||
* @ops->upcall, which will be called with the file pointer,
|
||||
* message, and userspace buffer to copy to.
|
||||
*
|
||||
* Writes can come at any time, and do not necessarily have to be
|
||||
* responses to upcalls. They will result in calls to @msg->downcall.
|
||||
*
|
||||
* The @private argument passed here will be available to all these methods
|
||||
* from the file pointer, via RPC_I(file->f_dentry->d_inode)->private.
|
||||
*/
|
||||
struct dentry *
|
||||
rpc_mkpipe(struct dentry *parent, const char *name, void *private, struct rpc_pipe_ops *ops, int flags)
|
||||
{
|
||||
|
@ -763,7 +806,16 @@ rpc_mkpipe(struct dentry *parent, const char *name, void *private, struct rpc_pi
|
|||
-ENOMEM);
|
||||
goto out;
|
||||
}
|
||||
EXPORT_SYMBOL(rpc_mkpipe);
|
||||
|
||||
/**
|
||||
* rpc_unlink - remove a pipe
|
||||
* @dentry: dentry for the pipe, as returned from rpc_mkpipe
|
||||
*
|
||||
* After this call, lookups will no longer find the pipe, and any
|
||||
* attempts to read or write using preexisting opens of the pipe will
|
||||
* return -EPIPE.
|
||||
*/
|
||||
int
|
||||
rpc_unlink(struct dentry *dentry)
|
||||
{
|
||||
|
@ -785,6 +837,7 @@ rpc_unlink(struct dentry *dentry)
|
|||
dput(parent);
|
||||
return error;
|
||||
}
|
||||
EXPORT_SYMBOL(rpc_unlink);
|
||||
|
||||
/*
|
||||
* populate the filesystem
|
||||
|
|
|
@ -54,45 +54,6 @@ enum {
|
|||
#define RPCB_HIGHPROC_3 RPCBPROC_TADDR2UADDR
|
||||
#define RPCB_HIGHPROC_4 RPCBPROC_GETSTAT
|
||||
|
||||
/*
|
||||
* r_addr
|
||||
*
|
||||
* Quoting RFC 3530, section 2.2:
|
||||
*
|
||||
* For TCP over IPv4 and for UDP over IPv4, the format of r_addr is the
|
||||
* US-ASCII string:
|
||||
*
|
||||
* h1.h2.h3.h4.p1.p2
|
||||
*
|
||||
* The prefix, "h1.h2.h3.h4", is the standard textual form for
|
||||
* representing an IPv4 address, which is always four octets long.
|
||||
* Assuming big-endian ordering, h1, h2, h3, and h4, are respectively,
|
||||
* the first through fourth octets each converted to ASCII-decimal.
|
||||
* Assuming big-endian ordering, p1 and p2 are, respectively, the first
|
||||
* and second octets each converted to ASCII-decimal. For example, if a
|
||||
* host, in big-endian order, has an address of 0x0A010307 and there is
|
||||
* a service listening on, in big endian order, port 0x020F (decimal
|
||||
* 527), then the complete universal address is "10.1.3.7.2.15".
|
||||
*
|
||||
* ...
|
||||
*
|
||||
* For TCP over IPv6 and for UDP over IPv6, the format of r_addr is the
|
||||
* US-ASCII string:
|
||||
*
|
||||
* x1:x2:x3:x4:x5:x6:x7:x8.p1.p2
|
||||
*
|
||||
* The suffix "p1.p2" is the service port, and is computed the same way
|
||||
* as with universal addresses for TCP and UDP over IPv4. The prefix,
|
||||
* "x1:x2:x3:x4:x5:x6:x7:x8", is the standard textual form for
|
||||
* representing an IPv6 address as defined in Section 2.2 of [RFC2373].
|
||||
* Additionally, the two alternative forms specified in Section 2.2 of
|
||||
* [RFC2373] are also acceptable.
|
||||
*
|
||||
* XXX: Currently this implementation does not explicitly convert the
|
||||
* stored address to US-ASCII on non-ASCII systems.
|
||||
*/
|
||||
#define RPCB_MAXADDRLEN (128u)
|
||||
|
||||
/*
|
||||
* r_owner
|
||||
*
|
||||
|
@ -112,9 +73,9 @@ struct rpcbind_args {
|
|||
u32 r_vers;
|
||||
u32 r_prot;
|
||||
unsigned short r_port;
|
||||
char * r_netid;
|
||||
char r_addr[RPCB_MAXADDRLEN];
|
||||
char * r_owner;
|
||||
const char * r_netid;
|
||||
const char * r_addr;
|
||||
const char * r_owner;
|
||||
};
|
||||
|
||||
static struct rpc_procinfo rpcb_procedures2[];
|
||||
|
@ -128,19 +89,6 @@ struct rpcb_info {
|
|||
static struct rpcb_info rpcb_next_version[];
|
||||
static struct rpcb_info rpcb_next_version6[];
|
||||
|
||||
static void rpcb_getport_prepare(struct rpc_task *task, void *calldata)
|
||||
{
|
||||
struct rpcbind_args *map = calldata;
|
||||
struct rpc_xprt *xprt = map->r_xprt;
|
||||
struct rpc_message msg = {
|
||||
.rpc_proc = rpcb_next_version[xprt->bind_index].rpc_proc,
|
||||
.rpc_argp = map,
|
||||
.rpc_resp = &map->r_port,
|
||||
};
|
||||
|
||||
rpc_call_setup(task, &msg, 0);
|
||||
}
|
||||
|
||||
static void rpcb_map_release(void *data)
|
||||
{
|
||||
struct rpcbind_args *map = data;
|
||||
|
@ -150,7 +98,6 @@ static void rpcb_map_release(void *data)
|
|||
}
|
||||
|
||||
static const struct rpc_call_ops rpcb_getport_ops = {
|
||||
.rpc_call_prepare = rpcb_getport_prepare,
|
||||
.rpc_call_done = rpcb_getport_done,
|
||||
.rpc_release = rpcb_map_release,
|
||||
};
|
||||
|
@ -162,12 +109,13 @@ static void rpcb_wake_rpcbind_waiters(struct rpc_xprt *xprt, int status)
|
|||
}
|
||||
|
||||
static struct rpc_clnt *rpcb_create(char *hostname, struct sockaddr *srvaddr,
|
||||
int proto, int version, int privileged)
|
||||
size_t salen, int proto, u32 version,
|
||||
int privileged)
|
||||
{
|
||||
struct rpc_create_args args = {
|
||||
.protocol = proto,
|
||||
.address = srvaddr,
|
||||
.addrsize = sizeof(struct sockaddr_in),
|
||||
.addrsize = salen,
|
||||
.servername = hostname,
|
||||
.program = &rpcb_program,
|
||||
.version = version,
|
||||
|
@ -230,7 +178,7 @@ int rpcb_register(u32 prog, u32 vers, int prot, unsigned short port, int *okay)
|
|||
prog, vers, prot, port);
|
||||
|
||||
rpcb_clnt = rpcb_create("localhost", (struct sockaddr *) &sin,
|
||||
XPRT_TRANSPORT_UDP, 2, 1);
|
||||
sizeof(sin), XPRT_TRANSPORT_UDP, 2, 1);
|
||||
if (IS_ERR(rpcb_clnt))
|
||||
return PTR_ERR(rpcb_clnt);
|
||||
|
||||
|
@ -252,13 +200,15 @@ int rpcb_register(u32 prog, u32 vers, int prot, unsigned short port, int *okay)
|
|||
* @vers: RPC version number to bind
|
||||
* @prot: transport protocol to use to make this request
|
||||
*
|
||||
* Return value is the requested advertised port number,
|
||||
* or a negative errno value.
|
||||
*
|
||||
* Called from outside the RPC client in a synchronous task context.
|
||||
* Uses default timeout parameters specified by underlying transport.
|
||||
*
|
||||
* XXX: Needs to support IPv6, and rpcbind versions 3 and 4
|
||||
* XXX: Needs to support IPv6
|
||||
*/
|
||||
int rpcb_getport_sync(struct sockaddr_in *sin, __u32 prog,
|
||||
__u32 vers, int prot)
|
||||
int rpcb_getport_sync(struct sockaddr_in *sin, u32 prog, u32 vers, int prot)
|
||||
{
|
||||
struct rpcbind_args map = {
|
||||
.r_prog = prog,
|
||||
|
@ -272,14 +222,13 @@ int rpcb_getport_sync(struct sockaddr_in *sin, __u32 prog,
|
|||
.rpc_resp = &map.r_port,
|
||||
};
|
||||
struct rpc_clnt *rpcb_clnt;
|
||||
char hostname[40];
|
||||
int status;
|
||||
|
||||
dprintk("RPC: %s(" NIPQUAD_FMT ", %u, %u, %d)\n",
|
||||
__FUNCTION__, NIPQUAD(sin->sin_addr.s_addr), prog, vers, prot);
|
||||
|
||||
sprintf(hostname, NIPQUAD_FMT, NIPQUAD(sin->sin_addr.s_addr));
|
||||
rpcb_clnt = rpcb_create(hostname, (struct sockaddr *)sin, prot, 2, 0);
|
||||
rpcb_clnt = rpcb_create(NULL, (struct sockaddr *)sin,
|
||||
sizeof(*sin), prot, 2, 0);
|
||||
if (IS_ERR(rpcb_clnt))
|
||||
return PTR_ERR(rpcb_clnt);
|
||||
|
||||
|
@ -295,6 +244,24 @@ int rpcb_getport_sync(struct sockaddr_in *sin, __u32 prog,
|
|||
}
|
||||
EXPORT_SYMBOL_GPL(rpcb_getport_sync);
|
||||
|
||||
static struct rpc_task *rpcb_call_async(struct rpc_clnt *rpcb_clnt, struct rpcbind_args *map, int version)
|
||||
{
|
||||
struct rpc_message msg = {
|
||||
.rpc_proc = rpcb_next_version[version].rpc_proc,
|
||||
.rpc_argp = map,
|
||||
.rpc_resp = &map->r_port,
|
||||
};
|
||||
struct rpc_task_setup task_setup_data = {
|
||||
.rpc_client = rpcb_clnt,
|
||||
.rpc_message = &msg,
|
||||
.callback_ops = &rpcb_getport_ops,
|
||||
.callback_data = map,
|
||||
.flags = RPC_TASK_ASYNC,
|
||||
};
|
||||
|
||||
return rpc_run_task(&task_setup_data);
|
||||
}
|
||||
|
||||
/**
|
||||
* rpcb_getport_async - obtain the port for a given RPC service on a given host
|
||||
* @task: task that is waiting for portmapper request
|
||||
|
@ -305,12 +272,14 @@ EXPORT_SYMBOL_GPL(rpcb_getport_sync);
|
|||
void rpcb_getport_async(struct rpc_task *task)
|
||||
{
|
||||
struct rpc_clnt *clnt = task->tk_client;
|
||||
int bind_version;
|
||||
u32 bind_version;
|
||||
struct rpc_xprt *xprt = task->tk_xprt;
|
||||
struct rpc_clnt *rpcb_clnt;
|
||||
static struct rpcbind_args *map;
|
||||
struct rpc_task *child;
|
||||
struct sockaddr addr;
|
||||
struct sockaddr_storage addr;
|
||||
struct sockaddr *sap = (struct sockaddr *)&addr;
|
||||
size_t salen;
|
||||
int status;
|
||||
struct rpcb_info *info;
|
||||
|
||||
|
@ -340,10 +309,10 @@ void rpcb_getport_async(struct rpc_task *task)
|
|||
goto bailout_nofree;
|
||||
}
|
||||
|
||||
rpc_peeraddr(clnt, (void *)&addr, sizeof(addr));
|
||||
salen = rpc_peeraddr(clnt, sap, sizeof(addr));
|
||||
|
||||
/* Don't ever use rpcbind v2 for AF_INET6 requests */
|
||||
switch (addr.sa_family) {
|
||||
switch (sap->sa_family) {
|
||||
case AF_INET:
|
||||
info = rpcb_next_version;
|
||||
break;
|
||||
|
@ -368,7 +337,7 @@ void rpcb_getport_async(struct rpc_task *task)
|
|||
dprintk("RPC: %5u %s: trying rpcbind version %u\n",
|
||||
task->tk_pid, __FUNCTION__, bind_version);
|
||||
|
||||
rpcb_clnt = rpcb_create(clnt->cl_server, &addr, xprt->prot,
|
||||
rpcb_clnt = rpcb_create(clnt->cl_server, sap, salen, xprt->prot,
|
||||
bind_version, 0);
|
||||
if (IS_ERR(rpcb_clnt)) {
|
||||
status = PTR_ERR(rpcb_clnt);
|
||||
|
@ -390,12 +359,10 @@ void rpcb_getport_async(struct rpc_task *task)
|
|||
map->r_port = 0;
|
||||
map->r_xprt = xprt_get(xprt);
|
||||
map->r_netid = rpc_peeraddr2str(clnt, RPC_DISPLAY_NETID);
|
||||
memcpy(map->r_addr,
|
||||
rpc_peeraddr2str(rpcb_clnt, RPC_DISPLAY_UNIVERSAL_ADDR),
|
||||
sizeof(map->r_addr));
|
||||
map->r_addr = rpc_peeraddr2str(rpcb_clnt, RPC_DISPLAY_UNIVERSAL_ADDR);
|
||||
map->r_owner = RPCB_OWNER_STRING; /* ignored for GETADDR */
|
||||
|
||||
child = rpc_run_task(rpcb_clnt, RPC_TASK_ASYNC, &rpcb_getport_ops, map);
|
||||
child = rpcb_call_async(rpcb_clnt, map, xprt->bind_index);
|
||||
rpc_release_client(rpcb_clnt);
|
||||
if (IS_ERR(child)) {
|
||||
status = -EIO;
|
||||
|
@ -518,7 +485,7 @@ static int rpcb_decode_getaddr(struct rpc_rqst *req, __be32 *p,
|
|||
* Simple sanity check. The smallest possible universal
|
||||
* address is an IPv4 address string containing 11 bytes.
|
||||
*/
|
||||
if (addr_len < 11 || addr_len > RPCB_MAXADDRLEN)
|
||||
if (addr_len < 11 || addr_len > RPCBIND_MAXUADDRLEN)
|
||||
goto out_err;
|
||||
|
||||
/*
|
||||
|
@ -569,7 +536,7 @@ static int rpcb_decode_getaddr(struct rpc_rqst *req, __be32 *p,
|
|||
#define RPCB_boolean_sz (1u)
|
||||
|
||||
#define RPCB_netid_sz (1+XDR_QUADLEN(RPCBIND_MAXNETIDLEN))
|
||||
#define RPCB_addr_sz (1+XDR_QUADLEN(RPCB_MAXADDRLEN))
|
||||
#define RPCB_addr_sz (1+XDR_QUADLEN(RPCBIND_MAXUADDRLEN))
|
||||
#define RPCB_ownerstring_sz (1+XDR_QUADLEN(RPCB_MAXOWNERLEN))
|
||||
|
||||
#define RPCB_mappingargs_sz RPCB_program_sz+RPCB_version_sz+ \
|
||||
|
|
|
@ -45,7 +45,7 @@ static void rpc_release_task(struct rpc_task *task);
|
|||
/*
|
||||
* RPC tasks sit here while waiting for conditions to improve.
|
||||
*/
|
||||
static RPC_WAITQ(delay_queue, "delayq");
|
||||
static struct rpc_wait_queue delay_queue;
|
||||
|
||||
/*
|
||||
* rpciod-related stuff
|
||||
|
@ -135,7 +135,7 @@ static void __rpc_add_wait_queue_priority(struct rpc_wait_queue *queue, struct r
|
|||
if (unlikely(task->tk_priority > queue->maxpriority))
|
||||
q = &queue->tasks[queue->maxpriority];
|
||||
list_for_each_entry(t, q, u.tk_wait.list) {
|
||||
if (t->tk_cookie == task->tk_cookie) {
|
||||
if (t->tk_owner == task->tk_owner) {
|
||||
list_add_tail(&task->u.tk_wait.list, &t->u.tk_wait.links);
|
||||
return;
|
||||
}
|
||||
|
@ -208,26 +208,26 @@ static inline void rpc_set_waitqueue_priority(struct rpc_wait_queue *queue, int
|
|||
queue->count = 1 << (priority * 2);
|
||||
}
|
||||
|
||||
static inline void rpc_set_waitqueue_cookie(struct rpc_wait_queue *queue, unsigned long cookie)
|
||||
static inline void rpc_set_waitqueue_owner(struct rpc_wait_queue *queue, pid_t pid)
|
||||
{
|
||||
queue->cookie = cookie;
|
||||
queue->owner = pid;
|
||||
queue->nr = RPC_BATCH_COUNT;
|
||||
}
|
||||
|
||||
static inline void rpc_reset_waitqueue_priority(struct rpc_wait_queue *queue)
|
||||
{
|
||||
rpc_set_waitqueue_priority(queue, queue->maxpriority);
|
||||
rpc_set_waitqueue_cookie(queue, 0);
|
||||
rpc_set_waitqueue_owner(queue, 0);
|
||||
}
|
||||
|
||||
static void __rpc_init_priority_wait_queue(struct rpc_wait_queue *queue, const char *qname, int maxprio)
|
||||
static void __rpc_init_priority_wait_queue(struct rpc_wait_queue *queue, const char *qname, unsigned char nr_queues)
|
||||
{
|
||||
int i;
|
||||
|
||||
spin_lock_init(&queue->lock);
|
||||
for (i = 0; i < ARRAY_SIZE(queue->tasks); i++)
|
||||
INIT_LIST_HEAD(&queue->tasks[i]);
|
||||
queue->maxpriority = maxprio;
|
||||
queue->maxpriority = nr_queues - 1;
|
||||
rpc_reset_waitqueue_priority(queue);
|
||||
#ifdef RPC_DEBUG
|
||||
queue->name = qname;
|
||||
|
@ -236,14 +236,14 @@ static void __rpc_init_priority_wait_queue(struct rpc_wait_queue *queue, const c
|
|||
|
||||
void rpc_init_priority_wait_queue(struct rpc_wait_queue *queue, const char *qname)
|
||||
{
|
||||
__rpc_init_priority_wait_queue(queue, qname, RPC_PRIORITY_HIGH);
|
||||
__rpc_init_priority_wait_queue(queue, qname, RPC_NR_PRIORITY);
|
||||
}
|
||||
|
||||
void rpc_init_wait_queue(struct rpc_wait_queue *queue, const char *qname)
|
||||
{
|
||||
__rpc_init_priority_wait_queue(queue, qname, 0);
|
||||
__rpc_init_priority_wait_queue(queue, qname, 1);
|
||||
}
|
||||
EXPORT_SYMBOL(rpc_init_wait_queue);
|
||||
EXPORT_SYMBOL_GPL(rpc_init_wait_queue);
|
||||
|
||||
static int rpc_wait_bit_interruptible(void *word)
|
||||
{
|
||||
|
@ -303,7 +303,7 @@ int __rpc_wait_for_completion_task(struct rpc_task *task, int (*action)(void *))
|
|||
return wait_on_bit(&task->tk_runstate, RPC_TASK_ACTIVE,
|
||||
action, TASK_INTERRUPTIBLE);
|
||||
}
|
||||
EXPORT_SYMBOL(__rpc_wait_for_completion_task);
|
||||
EXPORT_SYMBOL_GPL(__rpc_wait_for_completion_task);
|
||||
|
||||
/*
|
||||
* Make an RPC task runnable.
|
||||
|
@ -373,6 +373,7 @@ void rpc_sleep_on(struct rpc_wait_queue *q, struct rpc_task *task,
|
|||
__rpc_sleep_on(q, task, action, timer);
|
||||
spin_unlock_bh(&q->lock);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(rpc_sleep_on);
|
||||
|
||||
/**
|
||||
* __rpc_do_wake_up_task - wake up a single rpc_task
|
||||
|
@ -444,6 +445,7 @@ void rpc_wake_up_task(struct rpc_task *task)
|
|||
}
|
||||
rcu_read_unlock_bh();
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(rpc_wake_up_task);
|
||||
|
||||
/*
|
||||
* Wake up the next task on a priority queue.
|
||||
|
@ -454,12 +456,12 @@ static struct rpc_task * __rpc_wake_up_next_priority(struct rpc_wait_queue *queu
|
|||
struct rpc_task *task;
|
||||
|
||||
/*
|
||||
* Service a batch of tasks from a single cookie.
|
||||
* Service a batch of tasks from a single owner.
|
||||
*/
|
||||
q = &queue->tasks[queue->priority];
|
||||
if (!list_empty(q)) {
|
||||
task = list_entry(q->next, struct rpc_task, u.tk_wait.list);
|
||||
if (queue->cookie == task->tk_cookie) {
|
||||
if (queue->owner == task->tk_owner) {
|
||||
if (--queue->nr)
|
||||
goto out;
|
||||
list_move_tail(&task->u.tk_wait.list, q);
|
||||
|
@ -468,7 +470,7 @@ static struct rpc_task * __rpc_wake_up_next_priority(struct rpc_wait_queue *queu
|
|||
* Check if we need to switch queues.
|
||||
*/
|
||||
if (--queue->count)
|
||||
goto new_cookie;
|
||||
goto new_owner;
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -490,8 +492,8 @@ static struct rpc_task * __rpc_wake_up_next_priority(struct rpc_wait_queue *queu
|
|||
|
||||
new_queue:
|
||||
rpc_set_waitqueue_priority(queue, (unsigned int)(q - &queue->tasks[0]));
|
||||
new_cookie:
|
||||
rpc_set_waitqueue_cookie(queue, task->tk_cookie);
|
||||
new_owner:
|
||||
rpc_set_waitqueue_owner(queue, task->tk_owner);
|
||||
out:
|
||||
__rpc_wake_up_task(task);
|
||||
return task;
|
||||
|
@ -519,6 +521,7 @@ struct rpc_task * rpc_wake_up_next(struct rpc_wait_queue *queue)
|
|||
|
||||
return task;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(rpc_wake_up_next);
|
||||
|
||||
/**
|
||||
* rpc_wake_up - wake up all rpc_tasks
|
||||
|
@ -544,6 +547,7 @@ void rpc_wake_up(struct rpc_wait_queue *queue)
|
|||
spin_unlock(&queue->lock);
|
||||
rcu_read_unlock_bh();
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(rpc_wake_up);
|
||||
|
||||
/**
|
||||
* rpc_wake_up_status - wake up all rpc_tasks and set their status value.
|
||||
|
@ -572,6 +576,7 @@ void rpc_wake_up_status(struct rpc_wait_queue *queue, int status)
|
|||
spin_unlock(&queue->lock);
|
||||
rcu_read_unlock_bh();
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(rpc_wake_up_status);
|
||||
|
||||
static void __rpc_atrun(struct rpc_task *task)
|
||||
{
|
||||
|
@ -586,6 +591,7 @@ void rpc_delay(struct rpc_task *task, unsigned long delay)
|
|||
task->tk_timeout = delay;
|
||||
rpc_sleep_on(&delay_queue, task, NULL, __rpc_atrun);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(rpc_delay);
|
||||
|
||||
/*
|
||||
* Helper to call task->tk_ops->rpc_call_prepare
|
||||
|
@ -614,7 +620,7 @@ void rpc_exit_task(struct rpc_task *task)
|
|||
}
|
||||
}
|
||||
}
|
||||
EXPORT_SYMBOL(rpc_exit_task);
|
||||
EXPORT_SYMBOL_GPL(rpc_exit_task);
|
||||
|
||||
void rpc_release_calldata(const struct rpc_call_ops *ops, void *calldata)
|
||||
{
|
||||
|
@ -808,39 +814,49 @@ EXPORT_SYMBOL_GPL(rpc_free);
|
|||
/*
|
||||
* Creation and deletion of RPC task structures
|
||||
*/
|
||||
void rpc_init_task(struct rpc_task *task, struct rpc_clnt *clnt, int flags, const struct rpc_call_ops *tk_ops, void *calldata)
|
||||
static void rpc_init_task(struct rpc_task *task, const struct rpc_task_setup *task_setup_data)
|
||||
{
|
||||
memset(task, 0, sizeof(*task));
|
||||
setup_timer(&task->tk_timer, (void (*)(unsigned long))rpc_run_timer,
|
||||
(unsigned long)task);
|
||||
atomic_set(&task->tk_count, 1);
|
||||
task->tk_client = clnt;
|
||||
task->tk_flags = flags;
|
||||
task->tk_ops = tk_ops;
|
||||
if (tk_ops->rpc_call_prepare != NULL)
|
||||
task->tk_action = rpc_prepare_task;
|
||||
task->tk_calldata = calldata;
|
||||
task->tk_flags = task_setup_data->flags;
|
||||
task->tk_ops = task_setup_data->callback_ops;
|
||||
task->tk_calldata = task_setup_data->callback_data;
|
||||
INIT_LIST_HEAD(&task->tk_task);
|
||||
|
||||
/* Initialize retry counters */
|
||||
task->tk_garb_retry = 2;
|
||||
task->tk_cred_retry = 2;
|
||||
|
||||
task->tk_priority = RPC_PRIORITY_NORMAL;
|
||||
task->tk_cookie = (unsigned long)current;
|
||||
task->tk_priority = task_setup_data->priority - RPC_PRIORITY_LOW;
|
||||
task->tk_owner = current->tgid;
|
||||
|
||||
/* Initialize workqueue for async tasks */
|
||||
task->tk_workqueue = rpciod_workqueue;
|
||||
|
||||
if (clnt) {
|
||||
kref_get(&clnt->cl_kref);
|
||||
if (clnt->cl_softrtry)
|
||||
task->tk_client = task_setup_data->rpc_client;
|
||||
if (task->tk_client != NULL) {
|
||||
kref_get(&task->tk_client->cl_kref);
|
||||
if (task->tk_client->cl_softrtry)
|
||||
task->tk_flags |= RPC_TASK_SOFT;
|
||||
if (!clnt->cl_intr)
|
||||
if (!task->tk_client->cl_intr)
|
||||
task->tk_flags |= RPC_TASK_NOINTR;
|
||||
}
|
||||
|
||||
BUG_ON(task->tk_ops == NULL);
|
||||
if (task->tk_ops->rpc_call_prepare != NULL)
|
||||
task->tk_action = rpc_prepare_task;
|
||||
|
||||
if (task_setup_data->rpc_message != NULL) {
|
||||
memcpy(&task->tk_msg, task_setup_data->rpc_message, sizeof(task->tk_msg));
|
||||
/* Bind the user cred */
|
||||
if (task->tk_msg.rpc_cred != NULL)
|
||||
rpcauth_holdcred(task);
|
||||
else
|
||||
rpcauth_bindcred(task);
|
||||
if (task->tk_action == NULL)
|
||||
rpc_call_start(task);
|
||||
}
|
||||
|
||||
/* starting timestamp */
|
||||
task->tk_start = jiffies;
|
||||
|
@ -865,18 +881,22 @@ static void rpc_free_task(struct rcu_head *rcu)
|
|||
/*
|
||||
* Create a new task for the specified client.
|
||||
*/
|
||||
struct rpc_task *rpc_new_task(struct rpc_clnt *clnt, int flags, const struct rpc_call_ops *tk_ops, void *calldata)
|
||||
struct rpc_task *rpc_new_task(const struct rpc_task_setup *setup_data)
|
||||
{
|
||||
struct rpc_task *task;
|
||||
struct rpc_task *task = setup_data->task;
|
||||
unsigned short flags = 0;
|
||||
|
||||
task = rpc_alloc_task();
|
||||
if (!task)
|
||||
goto out;
|
||||
if (task == NULL) {
|
||||
task = rpc_alloc_task();
|
||||
if (task == NULL)
|
||||
goto out;
|
||||
flags = RPC_TASK_DYNAMIC;
|
||||
}
|
||||
|
||||
rpc_init_task(task, clnt, flags, tk_ops, calldata);
|
||||
rpc_init_task(task, setup_data);
|
||||
|
||||
task->tk_flags |= flags;
|
||||
dprintk("RPC: allocated task %p\n", task);
|
||||
task->tk_flags |= RPC_TASK_DYNAMIC;
|
||||
out:
|
||||
return task;
|
||||
}
|
||||
|
@ -902,7 +922,7 @@ void rpc_put_task(struct rpc_task *task)
|
|||
call_rcu_bh(&task->u.tk_rcu, rpc_free_task);
|
||||
rpc_release_calldata(tk_ops, calldata);
|
||||
}
|
||||
EXPORT_SYMBOL(rpc_put_task);
|
||||
EXPORT_SYMBOL_GPL(rpc_put_task);
|
||||
|
||||
static void rpc_release_task(struct rpc_task *task)
|
||||
{
|
||||
|
@ -959,6 +979,7 @@ void rpc_killall_tasks(struct rpc_clnt *clnt)
|
|||
}
|
||||
spin_unlock(&clnt->cl_lock);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(rpc_killall_tasks);
|
||||
|
||||
int rpciod_up(void)
|
||||
{
|
||||
|
@ -1038,6 +1059,11 @@ rpc_init_mempool(void)
|
|||
goto err_nomem;
|
||||
if (!rpciod_start())
|
||||
goto err_nomem;
|
||||
/*
|
||||
* The following is not strictly a mempool initialisation,
|
||||
* but there is no harm in doing it here
|
||||
*/
|
||||
rpc_init_wait_queue(&delay_queue, "delayq");
|
||||
return 0;
|
||||
err_nomem:
|
||||
rpc_destroy_mempool();
|
||||
|
|
|
@ -72,7 +72,7 @@ ssize_t xdr_partial_copy_from_skb(struct xdr_buf *xdr, unsigned int base, struct
|
|||
struct page **ppage = xdr->pages;
|
||||
unsigned int len, pglen = xdr->page_len;
|
||||
ssize_t copied = 0;
|
||||
int ret;
|
||||
size_t ret;
|
||||
|
||||
len = xdr->head[0].iov_len;
|
||||
if (base < len) {
|
||||
|
|
|
@ -118,7 +118,7 @@ struct rpc_iostats *rpc_alloc_iostats(struct rpc_clnt *clnt)
|
|||
new = kcalloc(clnt->cl_maxproc, sizeof(struct rpc_iostats), GFP_KERNEL);
|
||||
return new;
|
||||
}
|
||||
EXPORT_SYMBOL(rpc_alloc_iostats);
|
||||
EXPORT_SYMBOL_GPL(rpc_alloc_iostats);
|
||||
|
||||
/**
|
||||
* rpc_free_iostats - release an rpc_iostats structure
|
||||
|
@ -129,7 +129,7 @@ void rpc_free_iostats(struct rpc_iostats *stats)
|
|||
{
|
||||
kfree(stats);
|
||||
}
|
||||
EXPORT_SYMBOL(rpc_free_iostats);
|
||||
EXPORT_SYMBOL_GPL(rpc_free_iostats);
|
||||
|
||||
/**
|
||||
* rpc_count_iostats - tally up per-task stats
|
||||
|
@ -215,7 +215,7 @@ void rpc_print_iostats(struct seq_file *seq, struct rpc_clnt *clnt)
|
|||
metrics->om_execute * MILLISECS_PER_JIFFY);
|
||||
}
|
||||
}
|
||||
EXPORT_SYMBOL(rpc_print_iostats);
|
||||
EXPORT_SYMBOL_GPL(rpc_print_iostats);
|
||||
|
||||
/*
|
||||
* Register/unregister RPC proc files
|
||||
|
@ -241,12 +241,14 @@ rpc_proc_register(struct rpc_stat *statp)
|
|||
{
|
||||
return do_register(statp->program->name, statp, &rpc_proc_fops);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(rpc_proc_register);
|
||||
|
||||
void
|
||||
rpc_proc_unregister(const char *name)
|
||||
{
|
||||
remove_proc_entry(name, proc_net_rpc);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(rpc_proc_unregister);
|
||||
|
||||
struct proc_dir_entry *
|
||||
svc_proc_register(struct svc_stat *statp, const struct file_operations *fops)
|
||||
|
|
|
@ -22,45 +22,6 @@
|
|||
#include <linux/sunrpc/rpc_pipe_fs.h>
|
||||
#include <linux/sunrpc/xprtsock.h>
|
||||
|
||||
/* RPC scheduler */
|
||||
EXPORT_SYMBOL(rpc_execute);
|
||||
EXPORT_SYMBOL(rpc_init_task);
|
||||
EXPORT_SYMBOL(rpc_sleep_on);
|
||||
EXPORT_SYMBOL(rpc_wake_up_next);
|
||||
EXPORT_SYMBOL(rpc_wake_up_task);
|
||||
EXPORT_SYMBOL(rpc_wake_up_status);
|
||||
|
||||
/* RPC client functions */
|
||||
EXPORT_SYMBOL(rpc_clone_client);
|
||||
EXPORT_SYMBOL(rpc_bind_new_program);
|
||||
EXPORT_SYMBOL(rpc_shutdown_client);
|
||||
EXPORT_SYMBOL(rpc_killall_tasks);
|
||||
EXPORT_SYMBOL(rpc_call_sync);
|
||||
EXPORT_SYMBOL(rpc_call_async);
|
||||
EXPORT_SYMBOL(rpc_call_setup);
|
||||
EXPORT_SYMBOL(rpc_clnt_sigmask);
|
||||
EXPORT_SYMBOL(rpc_clnt_sigunmask);
|
||||
EXPORT_SYMBOL(rpc_delay);
|
||||
EXPORT_SYMBOL(rpc_restart_call);
|
||||
EXPORT_SYMBOL(rpc_setbufsize);
|
||||
EXPORT_SYMBOL(rpc_unlink);
|
||||
EXPORT_SYMBOL(rpc_wake_up);
|
||||
EXPORT_SYMBOL(rpc_queue_upcall);
|
||||
EXPORT_SYMBOL(rpc_mkpipe);
|
||||
|
||||
/* Client transport */
|
||||
EXPORT_SYMBOL(xprt_set_timeout);
|
||||
|
||||
/* Client credential cache */
|
||||
EXPORT_SYMBOL(rpcauth_register);
|
||||
EXPORT_SYMBOL(rpcauth_unregister);
|
||||
EXPORT_SYMBOL(rpcauth_create);
|
||||
EXPORT_SYMBOL(rpcauth_lookupcred);
|
||||
EXPORT_SYMBOL(rpcauth_lookup_credcache);
|
||||
EXPORT_SYMBOL(rpcauth_destroy_credcache);
|
||||
EXPORT_SYMBOL(rpcauth_init_credcache);
|
||||
EXPORT_SYMBOL(put_rpccred);
|
||||
|
||||
/* RPC server stuff */
|
||||
EXPORT_SYMBOL(svc_create);
|
||||
EXPORT_SYMBOL(svc_create_thread);
|
||||
|
@ -81,8 +42,6 @@ EXPORT_SYMBOL(svc_set_client);
|
|||
|
||||
/* RPC statistics */
|
||||
#ifdef CONFIG_PROC_FS
|
||||
EXPORT_SYMBOL(rpc_proc_register);
|
||||
EXPORT_SYMBOL(rpc_proc_unregister);
|
||||
EXPORT_SYMBOL(svc_proc_register);
|
||||
EXPORT_SYMBOL(svc_proc_unregister);
|
||||
EXPORT_SYMBOL(svc_seq_show);
|
||||
|
@ -105,31 +64,6 @@ EXPORT_SYMBOL(qword_get);
|
|||
EXPORT_SYMBOL(svcauth_unix_purge);
|
||||
EXPORT_SYMBOL(unix_domain_find);
|
||||
|
||||
/* Generic XDR */
|
||||
EXPORT_SYMBOL(xdr_encode_string);
|
||||
EXPORT_SYMBOL(xdr_decode_string_inplace);
|
||||
EXPORT_SYMBOL(xdr_decode_netobj);
|
||||
EXPORT_SYMBOL(xdr_encode_netobj);
|
||||
EXPORT_SYMBOL(xdr_encode_pages);
|
||||
EXPORT_SYMBOL(xdr_inline_pages);
|
||||
EXPORT_SYMBOL(xdr_shift_buf);
|
||||
EXPORT_SYMBOL(xdr_encode_word);
|
||||
EXPORT_SYMBOL(xdr_decode_word);
|
||||
EXPORT_SYMBOL(xdr_encode_array2);
|
||||
EXPORT_SYMBOL(xdr_decode_array2);
|
||||
EXPORT_SYMBOL(xdr_buf_from_iov);
|
||||
EXPORT_SYMBOL(xdr_buf_subsegment);
|
||||
EXPORT_SYMBOL(xdr_buf_read_netobj);
|
||||
EXPORT_SYMBOL(read_bytes_from_xdr_buf);
|
||||
|
||||
/* Debugging symbols */
|
||||
#ifdef RPC_DEBUG
|
||||
EXPORT_SYMBOL(rpc_debug);
|
||||
EXPORT_SYMBOL(nfs_debug);
|
||||
EXPORT_SYMBOL(nfsd_debug);
|
||||
EXPORT_SYMBOL(nlm_debug);
|
||||
#endif
|
||||
|
||||
extern struct cache_detail ip_map_cache, unix_gid_cache;
|
||||
|
||||
static int __init
|
||||
|
|
|
@ -23,9 +23,16 @@
|
|||
* Declare the debug flags here
|
||||
*/
|
||||
unsigned int rpc_debug;
|
||||
EXPORT_SYMBOL_GPL(rpc_debug);
|
||||
|
||||
unsigned int nfs_debug;
|
||||
EXPORT_SYMBOL_GPL(nfs_debug);
|
||||
|
||||
unsigned int nfsd_debug;
|
||||
EXPORT_SYMBOL_GPL(nfsd_debug);
|
||||
|
||||
unsigned int nlm_debug;
|
||||
EXPORT_SYMBOL_GPL(nlm_debug);
|
||||
|
||||
#ifdef RPC_DEBUG
|
||||
|
||||
|
|
|
@ -28,6 +28,7 @@ xdr_encode_netobj(__be32 *p, const struct xdr_netobj *obj)
|
|||
memcpy(p, obj->data, obj->len);
|
||||
return p + XDR_QUADLEN(obj->len);
|
||||
}
|
||||
EXPORT_SYMBOL(xdr_encode_netobj);
|
||||
|
||||
__be32 *
|
||||
xdr_decode_netobj(__be32 *p, struct xdr_netobj *obj)
|
||||
|
@ -40,6 +41,7 @@ xdr_decode_netobj(__be32 *p, struct xdr_netobj *obj)
|
|||
obj->data = (u8 *) p;
|
||||
return p + XDR_QUADLEN(len);
|
||||
}
|
||||
EXPORT_SYMBOL(xdr_decode_netobj);
|
||||
|
||||
/**
|
||||
* xdr_encode_opaque_fixed - Encode fixed length opaque data
|
||||
|
@ -91,6 +93,7 @@ xdr_encode_string(__be32 *p, const char *string)
|
|||
{
|
||||
return xdr_encode_array(p, string, strlen(string));
|
||||
}
|
||||
EXPORT_SYMBOL(xdr_encode_string);
|
||||
|
||||
__be32 *
|
||||
xdr_decode_string_inplace(__be32 *p, char **sp, int *lenp, int maxlen)
|
||||
|
@ -103,6 +106,7 @@ xdr_decode_string_inplace(__be32 *p, char **sp, int *lenp, int maxlen)
|
|||
*sp = (char *) p;
|
||||
return p + XDR_QUADLEN(len);
|
||||
}
|
||||
EXPORT_SYMBOL(xdr_decode_string_inplace);
|
||||
|
||||
void
|
||||
xdr_encode_pages(struct xdr_buf *xdr, struct page **pages, unsigned int base,
|
||||
|
@ -130,6 +134,7 @@ xdr_encode_pages(struct xdr_buf *xdr, struct page **pages, unsigned int base,
|
|||
xdr->buflen += len;
|
||||
xdr->len += len;
|
||||
}
|
||||
EXPORT_SYMBOL(xdr_encode_pages);
|
||||
|
||||
void
|
||||
xdr_inline_pages(struct xdr_buf *xdr, unsigned int offset,
|
||||
|
@ -151,7 +156,7 @@ xdr_inline_pages(struct xdr_buf *xdr, unsigned int offset,
|
|||
|
||||
xdr->buflen += len;
|
||||
}
|
||||
|
||||
EXPORT_SYMBOL(xdr_inline_pages);
|
||||
|
||||
/*
|
||||
* Helper routines for doing 'memmove' like operations on a struct xdr_buf
|
||||
|
@ -418,6 +423,7 @@ xdr_shift_buf(struct xdr_buf *buf, size_t len)
|
|||
{
|
||||
xdr_shrink_bufhead(buf, len);
|
||||
}
|
||||
EXPORT_SYMBOL(xdr_shift_buf);
|
||||
|
||||
/**
|
||||
* xdr_init_encode - Initialize a struct xdr_stream for sending data.
|
||||
|
@ -639,6 +645,7 @@ xdr_buf_from_iov(struct kvec *iov, struct xdr_buf *buf)
|
|||
buf->page_len = 0;
|
||||
buf->buflen = buf->len = iov->iov_len;
|
||||
}
|
||||
EXPORT_SYMBOL(xdr_buf_from_iov);
|
||||
|
||||
/* Sets subbuf to the portion of buf of length len beginning base bytes
|
||||
* from the start of buf. Returns -1 if base of length are out of bounds. */
|
||||
|
@ -687,6 +694,7 @@ xdr_buf_subsegment(struct xdr_buf *buf, struct xdr_buf *subbuf,
|
|||
return -1;
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL(xdr_buf_subsegment);
|
||||
|
||||
static void __read_bytes_from_xdr_buf(struct xdr_buf *subbuf, void *obj, unsigned int len)
|
||||
{
|
||||
|
@ -717,6 +725,7 @@ int read_bytes_from_xdr_buf(struct xdr_buf *buf, unsigned int base, void *obj, u
|
|||
__read_bytes_from_xdr_buf(&subbuf, obj, len);
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL(read_bytes_from_xdr_buf);
|
||||
|
||||
static void __write_bytes_to_xdr_buf(struct xdr_buf *subbuf, void *obj, unsigned int len)
|
||||
{
|
||||
|
@ -760,6 +769,7 @@ xdr_decode_word(struct xdr_buf *buf, unsigned int base, u32 *obj)
|
|||
*obj = ntohl(raw);
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL(xdr_decode_word);
|
||||
|
||||
int
|
||||
xdr_encode_word(struct xdr_buf *buf, unsigned int base, u32 obj)
|
||||
|
@ -768,6 +778,7 @@ xdr_encode_word(struct xdr_buf *buf, unsigned int base, u32 obj)
|
|||
|
||||
return write_bytes_to_xdr_buf(buf, base, &raw, sizeof(obj));
|
||||
}
|
||||
EXPORT_SYMBOL(xdr_encode_word);
|
||||
|
||||
/* If the netobj starting offset bytes from the start of xdr_buf is contained
|
||||
* entirely in the head or the tail, set object to point to it; otherwise
|
||||
|
@ -805,6 +816,7 @@ int xdr_buf_read_netobj(struct xdr_buf *buf, struct xdr_netobj *obj, unsigned in
|
|||
__read_bytes_from_xdr_buf(&subbuf, obj->data, obj->len);
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL(xdr_buf_read_netobj);
|
||||
|
||||
/* Returns 0 on success, or else a negative error code. */
|
||||
static int
|
||||
|
@ -1010,6 +1022,7 @@ xdr_decode_array2(struct xdr_buf *buf, unsigned int base,
|
|||
|
||||
return xdr_xcode_array2(buf, base, desc, 0);
|
||||
}
|
||||
EXPORT_SYMBOL(xdr_decode_array2);
|
||||
|
||||
int
|
||||
xdr_encode_array2(struct xdr_buf *buf, unsigned int base,
|
||||
|
@ -1021,6 +1034,7 @@ xdr_encode_array2(struct xdr_buf *buf, unsigned int base,
|
|||
|
||||
return xdr_xcode_array2(buf, base, desc, 1);
|
||||
}
|
||||
EXPORT_SYMBOL(xdr_encode_array2);
|
||||
|
||||
int
|
||||
xdr_process_buf(struct xdr_buf *buf, unsigned int offset, unsigned int len,
|
||||
|
|
|
@ -501,9 +501,10 @@ EXPORT_SYMBOL_GPL(xprt_set_retrans_timeout_def);
|
|||
void xprt_set_retrans_timeout_rtt(struct rpc_task *task)
|
||||
{
|
||||
int timer = task->tk_msg.rpc_proc->p_timer;
|
||||
struct rpc_rtt *rtt = task->tk_client->cl_rtt;
|
||||
struct rpc_clnt *clnt = task->tk_client;
|
||||
struct rpc_rtt *rtt = clnt->cl_rtt;
|
||||
struct rpc_rqst *req = task->tk_rqstp;
|
||||
unsigned long max_timeout = req->rq_xprt->timeout.to_maxval;
|
||||
unsigned long max_timeout = clnt->cl_timeout->to_maxval;
|
||||
|
||||
task->tk_timeout = rpc_calc_rto(rtt, timer);
|
||||
task->tk_timeout <<= rpc_ntimeo(rtt, timer) + req->rq_retries;
|
||||
|
@ -514,7 +515,7 @@ EXPORT_SYMBOL_GPL(xprt_set_retrans_timeout_rtt);
|
|||
|
||||
static void xprt_reset_majortimeo(struct rpc_rqst *req)
|
||||
{
|
||||
struct rpc_timeout *to = &req->rq_xprt->timeout;
|
||||
const struct rpc_timeout *to = req->rq_task->tk_client->cl_timeout;
|
||||
|
||||
req->rq_majortimeo = req->rq_timeout;
|
||||
if (to->to_exponential)
|
||||
|
@ -534,7 +535,7 @@ static void xprt_reset_majortimeo(struct rpc_rqst *req)
|
|||
int xprt_adjust_timeout(struct rpc_rqst *req)
|
||||
{
|
||||
struct rpc_xprt *xprt = req->rq_xprt;
|
||||
struct rpc_timeout *to = &xprt->timeout;
|
||||
const struct rpc_timeout *to = req->rq_task->tk_client->cl_timeout;
|
||||
int status = 0;
|
||||
|
||||
if (time_before(jiffies, req->rq_majortimeo)) {
|
||||
|
@ -568,17 +569,17 @@ static void xprt_autoclose(struct work_struct *work)
|
|||
struct rpc_xprt *xprt =
|
||||
container_of(work, struct rpc_xprt, task_cleanup);
|
||||
|
||||
xprt_disconnect(xprt);
|
||||
xprt->ops->close(xprt);
|
||||
clear_bit(XPRT_CLOSE_WAIT, &xprt->state);
|
||||
xprt_release_write(xprt, NULL);
|
||||
}
|
||||
|
||||
/**
|
||||
* xprt_disconnect - mark a transport as disconnected
|
||||
* xprt_disconnect_done - mark a transport as disconnected
|
||||
* @xprt: transport to flag for disconnect
|
||||
*
|
||||
*/
|
||||
void xprt_disconnect(struct rpc_xprt *xprt)
|
||||
void xprt_disconnect_done(struct rpc_xprt *xprt)
|
||||
{
|
||||
dprintk("RPC: disconnected transport %p\n", xprt);
|
||||
spin_lock_bh(&xprt->transport_lock);
|
||||
|
@ -586,7 +587,26 @@ void xprt_disconnect(struct rpc_xprt *xprt)
|
|||
xprt_wake_pending_tasks(xprt, -ENOTCONN);
|
||||
spin_unlock_bh(&xprt->transport_lock);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(xprt_disconnect);
|
||||
EXPORT_SYMBOL_GPL(xprt_disconnect_done);
|
||||
|
||||
/**
|
||||
* xprt_force_disconnect - force a transport to disconnect
|
||||
* @xprt: transport to disconnect
|
||||
*
|
||||
*/
|
||||
void xprt_force_disconnect(struct rpc_xprt *xprt)
|
||||
{
|
||||
/* Don't race with the test_bit() in xprt_clear_locked() */
|
||||
spin_lock_bh(&xprt->transport_lock);
|
||||
set_bit(XPRT_CLOSE_WAIT, &xprt->state);
|
||||
/* Try to schedule an autoclose RPC call */
|
||||
if (test_and_set_bit(XPRT_LOCKED, &xprt->state) == 0)
|
||||
queue_work(rpciod_workqueue, &xprt->task_cleanup);
|
||||
else if (xprt->snd_task != NULL)
|
||||
rpc_wake_up_task(xprt->snd_task);
|
||||
spin_unlock_bh(&xprt->transport_lock);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(xprt_force_disconnect);
|
||||
|
||||
static void
|
||||
xprt_init_autodisconnect(unsigned long data)
|
||||
|
@ -909,7 +929,7 @@ static void xprt_request_init(struct rpc_task *task, struct rpc_xprt *xprt)
|
|||
{
|
||||
struct rpc_rqst *req = task->tk_rqstp;
|
||||
|
||||
req->rq_timeout = xprt->timeout.to_initval;
|
||||
req->rq_timeout = task->tk_client->cl_timeout->to_initval;
|
||||
req->rq_task = task;
|
||||
req->rq_xprt = xprt;
|
||||
req->rq_buffer = NULL;
|
||||
|
@ -958,22 +978,6 @@ void xprt_release(struct rpc_task *task)
|
|||
spin_unlock(&xprt->reserve_lock);
|
||||
}
|
||||
|
||||
/**
|
||||
* xprt_set_timeout - set constant RPC timeout
|
||||
* @to: RPC timeout parameters to set up
|
||||
* @retr: number of retries
|
||||
* @incr: amount of increase after each retry
|
||||
*
|
||||
*/
|
||||
void xprt_set_timeout(struct rpc_timeout *to, unsigned int retr, unsigned long incr)
|
||||
{
|
||||
to->to_initval =
|
||||
to->to_increment = incr;
|
||||
to->to_maxval = to->to_initval + (incr * retr);
|
||||
to->to_retries = retr;
|
||||
to->to_exponential = 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* xprt_create_transport - create an RPC transport
|
||||
* @args: rpc transport creation arguments
|
||||
|
|
|
@ -83,7 +83,7 @@ static const char transfertypes[][12] = {
|
|||
*/
|
||||
|
||||
static int
|
||||
rpcrdma_convert_iovs(struct xdr_buf *xdrbuf, int pos,
|
||||
rpcrdma_convert_iovs(struct xdr_buf *xdrbuf, unsigned int pos,
|
||||
enum rpcrdma_chunktype type, struct rpcrdma_mr_seg *seg, int nsegs)
|
||||
{
|
||||
int len, n = 0, p;
|
||||
|
@ -169,7 +169,7 @@ rpcrdma_create_chunks(struct rpc_rqst *rqst, struct xdr_buf *target,
|
|||
struct rpcrdma_req *req = rpcr_to_rdmar(rqst);
|
||||
struct rpcrdma_xprt *r_xprt = rpcx_to_rdmax(rqst->rq_task->tk_xprt);
|
||||
int nsegs, nchunks = 0;
|
||||
int pos;
|
||||
unsigned int pos;
|
||||
struct rpcrdma_mr_seg *seg = req->rl_segments;
|
||||
struct rpcrdma_read_chunk *cur_rchunk = NULL;
|
||||
struct rpcrdma_write_array *warray = NULL;
|
||||
|
@ -213,7 +213,7 @@ rpcrdma_create_chunks(struct rpc_rqst *rqst, struct xdr_buf *target,
|
|||
(__be32 *)&cur_rchunk->rc_target.rs_offset,
|
||||
seg->mr_base);
|
||||
dprintk("RPC: %s: read chunk "
|
||||
"elem %d@0x%llx:0x%x pos %d (%s)\n", __func__,
|
||||
"elem %d@0x%llx:0x%x pos %u (%s)\n", __func__,
|
||||
seg->mr_len, (unsigned long long)seg->mr_base,
|
||||
seg->mr_rkey, pos, n < nsegs ? "more" : "last");
|
||||
cur_rchunk++;
|
||||
|
@ -552,7 +552,7 @@ rpcrdma_marshal_req(struct rpc_rqst *rqst)
|
|||
* RDMA'd by server. See map at rpcrdma_create_chunks()! :-)
|
||||
*/
|
||||
static int
|
||||
rpcrdma_count_chunks(struct rpcrdma_rep *rep, int max, int wrchunk, __be32 **iptrp)
|
||||
rpcrdma_count_chunks(struct rpcrdma_rep *rep, unsigned int max, int wrchunk, __be32 **iptrp)
|
||||
{
|
||||
unsigned int i, total_len;
|
||||
struct rpcrdma_write_chunk *cur_wchunk;
|
||||
|
|
|
@ -212,12 +212,16 @@ xprt_rdma_format_addresses(struct rpc_xprt *xprt)
|
|||
static void
|
||||
xprt_rdma_free_addresses(struct rpc_xprt *xprt)
|
||||
{
|
||||
kfree(xprt->address_strings[RPC_DISPLAY_ADDR]);
|
||||
kfree(xprt->address_strings[RPC_DISPLAY_PORT]);
|
||||
kfree(xprt->address_strings[RPC_DISPLAY_ALL]);
|
||||
kfree(xprt->address_strings[RPC_DISPLAY_HEX_ADDR]);
|
||||
kfree(xprt->address_strings[RPC_DISPLAY_HEX_PORT]);
|
||||
kfree(xprt->address_strings[RPC_DISPLAY_UNIVERSAL_ADDR]);
|
||||
unsigned int i;
|
||||
|
||||
for (i = 0; i < RPC_DISPLAY_MAX; i++)
|
||||
switch (i) {
|
||||
case RPC_DISPLAY_PROTO:
|
||||
case RPC_DISPLAY_NETID:
|
||||
continue;
|
||||
default:
|
||||
kfree(xprt->address_strings[i]);
|
||||
}
|
||||
}
|
||||
|
||||
static void
|
||||
|
@ -289,6 +293,11 @@ xprt_rdma_destroy(struct rpc_xprt *xprt)
|
|||
module_put(THIS_MODULE);
|
||||
}
|
||||
|
||||
static const struct rpc_timeout xprt_rdma_default_timeout = {
|
||||
.to_initval = 60 * HZ,
|
||||
.to_maxval = 60 * HZ,
|
||||
};
|
||||
|
||||
/**
|
||||
* xprt_setup_rdma - Set up transport to use RDMA
|
||||
*
|
||||
|
@ -327,7 +336,7 @@ xprt_setup_rdma(struct xprt_create *args)
|
|||
}
|
||||
|
||||
/* 60 second timeout, no retries */
|
||||
xprt_set_timeout(&xprt->timeout, 0, 60UL * HZ);
|
||||
xprt->timeout = &xprt_rdma_default_timeout;
|
||||
xprt->bind_timeout = (60U * HZ);
|
||||
xprt->connect_timeout = (60U * HZ);
|
||||
xprt->reestablish_timeout = (5U * HZ);
|
||||
|
@ -449,7 +458,7 @@ xprt_rdma_close(struct rpc_xprt *xprt)
|
|||
struct rpcrdma_xprt *r_xprt = rpcx_to_rdmax(xprt);
|
||||
|
||||
dprintk("RPC: %s: closing\n", __func__);
|
||||
xprt_disconnect(xprt);
|
||||
xprt_disconnect_done(xprt);
|
||||
(void) rpcrdma_ep_disconnect(&r_xprt->rx_ep, &r_xprt->rx_ia);
|
||||
}
|
||||
|
||||
|
@ -682,7 +691,7 @@ xprt_rdma_send_request(struct rpc_task *task)
|
|||
}
|
||||
|
||||
if (rpcrdma_ep_post(&r_xprt->rx_ia, &r_xprt->rx_ep, req)) {
|
||||
xprt_disconnect(xprt);
|
||||
xprt_disconnect_done(xprt);
|
||||
return -ENOTCONN; /* implies disconnect */
|
||||
}
|
||||
|
||||
|
|
|
@ -522,7 +522,7 @@ rpcrdma_ep_create(struct rpcrdma_ep *ep, struct rpcrdma_ia *ia,
|
|||
struct rpcrdma_create_data_internal *cdata)
|
||||
{
|
||||
struct ib_device_attr devattr;
|
||||
int rc;
|
||||
int rc, err;
|
||||
|
||||
rc = ib_query_device(ia->ri_id->device, &devattr);
|
||||
if (rc) {
|
||||
|
@ -648,8 +648,10 @@ rpcrdma_ep_create(struct rpcrdma_ep *ep, struct rpcrdma_ia *ia,
|
|||
return 0;
|
||||
|
||||
out2:
|
||||
if (ib_destroy_cq(ep->rep_cq))
|
||||
;
|
||||
err = ib_destroy_cq(ep->rep_cq);
|
||||
if (err)
|
||||
dprintk("RPC: %s: ib_destroy_cq returned %i\n",
|
||||
__func__, err);
|
||||
out1:
|
||||
return rc;
|
||||
}
|
||||
|
|
|
@ -280,7 +280,9 @@ static inline struct sockaddr_in6 *xs_addr_in6(struct rpc_xprt *xprt)
|
|||
return (struct sockaddr_in6 *) &xprt->addr;
|
||||
}
|
||||
|
||||
static void xs_format_ipv4_peer_addresses(struct rpc_xprt *xprt)
|
||||
static void xs_format_ipv4_peer_addresses(struct rpc_xprt *xprt,
|
||||
const char *protocol,
|
||||
const char *netid)
|
||||
{
|
||||
struct sockaddr_in *addr = xs_addr_in(xprt);
|
||||
char *buf;
|
||||
|
@ -299,21 +301,14 @@ static void xs_format_ipv4_peer_addresses(struct rpc_xprt *xprt)
|
|||
}
|
||||
xprt->address_strings[RPC_DISPLAY_PORT] = buf;
|
||||
|
||||
buf = kzalloc(8, GFP_KERNEL);
|
||||
if (buf) {
|
||||
if (xprt->prot == IPPROTO_UDP)
|
||||
snprintf(buf, 8, "udp");
|
||||
else
|
||||
snprintf(buf, 8, "tcp");
|
||||
}
|
||||
xprt->address_strings[RPC_DISPLAY_PROTO] = buf;
|
||||
xprt->address_strings[RPC_DISPLAY_PROTO] = protocol;
|
||||
|
||||
buf = kzalloc(48, GFP_KERNEL);
|
||||
if (buf) {
|
||||
snprintf(buf, 48, "addr="NIPQUAD_FMT" port=%u proto=%s",
|
||||
NIPQUAD(addr->sin_addr.s_addr),
|
||||
ntohs(addr->sin_port),
|
||||
xprt->prot == IPPROTO_UDP ? "udp" : "tcp");
|
||||
protocol);
|
||||
}
|
||||
xprt->address_strings[RPC_DISPLAY_ALL] = buf;
|
||||
|
||||
|
@ -340,12 +335,12 @@ static void xs_format_ipv4_peer_addresses(struct rpc_xprt *xprt)
|
|||
}
|
||||
xprt->address_strings[RPC_DISPLAY_UNIVERSAL_ADDR] = buf;
|
||||
|
||||
xprt->address_strings[RPC_DISPLAY_NETID] =
|
||||
kstrdup(xprt->prot == IPPROTO_UDP ?
|
||||
RPCBIND_NETID_UDP : RPCBIND_NETID_TCP, GFP_KERNEL);
|
||||
xprt->address_strings[RPC_DISPLAY_NETID] = netid;
|
||||
}
|
||||
|
||||
static void xs_format_ipv6_peer_addresses(struct rpc_xprt *xprt)
|
||||
static void xs_format_ipv6_peer_addresses(struct rpc_xprt *xprt,
|
||||
const char *protocol,
|
||||
const char *netid)
|
||||
{
|
||||
struct sockaddr_in6 *addr = xs_addr_in6(xprt);
|
||||
char *buf;
|
||||
|
@ -364,21 +359,14 @@ static void xs_format_ipv6_peer_addresses(struct rpc_xprt *xprt)
|
|||
}
|
||||
xprt->address_strings[RPC_DISPLAY_PORT] = buf;
|
||||
|
||||
buf = kzalloc(8, GFP_KERNEL);
|
||||
if (buf) {
|
||||
if (xprt->prot == IPPROTO_UDP)
|
||||
snprintf(buf, 8, "udp");
|
||||
else
|
||||
snprintf(buf, 8, "tcp");
|
||||
}
|
||||
xprt->address_strings[RPC_DISPLAY_PROTO] = buf;
|
||||
xprt->address_strings[RPC_DISPLAY_PROTO] = protocol;
|
||||
|
||||
buf = kzalloc(64, GFP_KERNEL);
|
||||
if (buf) {
|
||||
snprintf(buf, 64, "addr="NIP6_FMT" port=%u proto=%s",
|
||||
NIP6(addr->sin6_addr),
|
||||
ntohs(addr->sin6_port),
|
||||
xprt->prot == IPPROTO_UDP ? "udp" : "tcp");
|
||||
protocol);
|
||||
}
|
||||
xprt->address_strings[RPC_DISPLAY_ALL] = buf;
|
||||
|
||||
|
@ -405,17 +393,21 @@ static void xs_format_ipv6_peer_addresses(struct rpc_xprt *xprt)
|
|||
}
|
||||
xprt->address_strings[RPC_DISPLAY_UNIVERSAL_ADDR] = buf;
|
||||
|
||||
xprt->address_strings[RPC_DISPLAY_NETID] =
|
||||
kstrdup(xprt->prot == IPPROTO_UDP ?
|
||||
RPCBIND_NETID_UDP6 : RPCBIND_NETID_TCP6, GFP_KERNEL);
|
||||
xprt->address_strings[RPC_DISPLAY_NETID] = netid;
|
||||
}
|
||||
|
||||
static void xs_free_peer_addresses(struct rpc_xprt *xprt)
|
||||
{
|
||||
int i;
|
||||
unsigned int i;
|
||||
|
||||
for (i = 0; i < RPC_DISPLAY_MAX; i++)
|
||||
kfree(xprt->address_strings[i]);
|
||||
switch (i) {
|
||||
case RPC_DISPLAY_PROTO:
|
||||
case RPC_DISPLAY_NETID:
|
||||
continue;
|
||||
default:
|
||||
kfree(xprt->address_strings[i]);
|
||||
}
|
||||
}
|
||||
|
||||
#define XS_SENDMSG_FLAGS (MSG_DONTWAIT | MSG_NOSIGNAL)
|
||||
|
@ -614,6 +606,22 @@ static int xs_udp_send_request(struct rpc_task *task)
|
|||
return status;
|
||||
}
|
||||
|
||||
/**
|
||||
* xs_tcp_shutdown - gracefully shut down a TCP socket
|
||||
* @xprt: transport
|
||||
*
|
||||
* Initiates a graceful shutdown of the TCP socket by calling the
|
||||
* equivalent of shutdown(SHUT_WR);
|
||||
*/
|
||||
static void xs_tcp_shutdown(struct rpc_xprt *xprt)
|
||||
{
|
||||
struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt);
|
||||
struct socket *sock = transport->sock;
|
||||
|
||||
if (sock != NULL)
|
||||
kernel_sock_shutdown(sock, SHUT_WR);
|
||||
}
|
||||
|
||||
static inline void xs_encode_tcp_record_marker(struct xdr_buf *buf)
|
||||
{
|
||||
u32 reclen = buf->len - sizeof(rpc_fraghdr);
|
||||
|
@ -691,7 +699,7 @@ static int xs_tcp_send_request(struct rpc_task *task)
|
|||
default:
|
||||
dprintk("RPC: sendmsg returned unrecognized error %d\n",
|
||||
-status);
|
||||
xprt_disconnect(xprt);
|
||||
xs_tcp_shutdown(xprt);
|
||||
break;
|
||||
}
|
||||
|
||||
|
@ -759,7 +767,9 @@ static void xs_close(struct rpc_xprt *xprt)
|
|||
clear_close_wait:
|
||||
smp_mb__before_clear_bit();
|
||||
clear_bit(XPRT_CLOSE_WAIT, &xprt->state);
|
||||
clear_bit(XPRT_CLOSING, &xprt->state);
|
||||
smp_mb__after_clear_bit();
|
||||
xprt_disconnect_done(xprt);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -775,7 +785,6 @@ static void xs_destroy(struct rpc_xprt *xprt)
|
|||
|
||||
cancel_rearming_delayed_work(&transport->connect_worker);
|
||||
|
||||
xprt_disconnect(xprt);
|
||||
xs_close(xprt);
|
||||
xs_free_peer_addresses(xprt);
|
||||
kfree(xprt->slot);
|
||||
|
@ -886,7 +895,7 @@ static inline void xs_tcp_read_fraghdr(struct rpc_xprt *xprt, struct xdr_skb_rea
|
|||
/* Sanity check of the record length */
|
||||
if (unlikely(transport->tcp_reclen < 4)) {
|
||||
dprintk("RPC: invalid TCP record fragment length\n");
|
||||
xprt_disconnect(xprt);
|
||||
xprt_force_disconnect(xprt);
|
||||
return;
|
||||
}
|
||||
dprintk("RPC: reading TCP record fragment of length %d\n",
|
||||
|
@ -1113,21 +1122,44 @@ static void xs_tcp_state_change(struct sock *sk)
|
|||
transport->tcp_flags =
|
||||
TCP_RCV_COPY_FRAGHDR | TCP_RCV_COPY_XID;
|
||||
|
||||
xprt->reestablish_timeout = XS_TCP_INIT_REEST_TO;
|
||||
xprt_wake_pending_tasks(xprt, 0);
|
||||
}
|
||||
spin_unlock_bh(&xprt->transport_lock);
|
||||
break;
|
||||
case TCP_SYN_SENT:
|
||||
case TCP_SYN_RECV:
|
||||
case TCP_FIN_WAIT1:
|
||||
/* The client initiated a shutdown of the socket */
|
||||
xprt->reestablish_timeout = 0;
|
||||
set_bit(XPRT_CLOSING, &xprt->state);
|
||||
smp_mb__before_clear_bit();
|
||||
clear_bit(XPRT_CONNECTED, &xprt->state);
|
||||
clear_bit(XPRT_CLOSE_WAIT, &xprt->state);
|
||||
smp_mb__after_clear_bit();
|
||||
break;
|
||||
case TCP_CLOSE_WAIT:
|
||||
/* Try to schedule an autoclose RPC calls */
|
||||
set_bit(XPRT_CLOSE_WAIT, &xprt->state);
|
||||
if (test_and_set_bit(XPRT_LOCKED, &xprt->state) == 0)
|
||||
queue_work(rpciod_workqueue, &xprt->task_cleanup);
|
||||
default:
|
||||
xprt_disconnect(xprt);
|
||||
/* The server initiated a shutdown of the socket */
|
||||
set_bit(XPRT_CLOSING, &xprt->state);
|
||||
xprt_force_disconnect(xprt);
|
||||
case TCP_SYN_SENT:
|
||||
case TCP_CLOSING:
|
||||
/*
|
||||
* If the server closed down the connection, make sure that
|
||||
* we back off before reconnecting
|
||||
*/
|
||||
if (xprt->reestablish_timeout < XS_TCP_INIT_REEST_TO)
|
||||
xprt->reestablish_timeout = XS_TCP_INIT_REEST_TO;
|
||||
break;
|
||||
case TCP_LAST_ACK:
|
||||
smp_mb__before_clear_bit();
|
||||
clear_bit(XPRT_CONNECTED, &xprt->state);
|
||||
smp_mb__after_clear_bit();
|
||||
break;
|
||||
case TCP_CLOSE:
|
||||
smp_mb__before_clear_bit();
|
||||
clear_bit(XPRT_CLOSE_WAIT, &xprt->state);
|
||||
clear_bit(XPRT_CLOSING, &xprt->state);
|
||||
smp_mb__after_clear_bit();
|
||||
/* Mark transport as closed and wake up all pending tasks */
|
||||
xprt_disconnect_done(xprt);
|
||||
}
|
||||
out:
|
||||
read_unlock(&sk->sk_callback_lock);
|
||||
|
@ -1279,34 +1311,53 @@ static void xs_set_port(struct rpc_xprt *xprt, unsigned short port)
|
|||
}
|
||||
}
|
||||
|
||||
static unsigned short xs_get_srcport(struct sock_xprt *transport, struct socket *sock)
|
||||
{
|
||||
unsigned short port = transport->port;
|
||||
|
||||
if (port == 0 && transport->xprt.resvport)
|
||||
port = xs_get_random_port();
|
||||
return port;
|
||||
}
|
||||
|
||||
static unsigned short xs_next_srcport(struct sock_xprt *transport, struct socket *sock, unsigned short port)
|
||||
{
|
||||
if (transport->port != 0)
|
||||
transport->port = 0;
|
||||
if (!transport->xprt.resvport)
|
||||
return 0;
|
||||
if (port <= xprt_min_resvport || port > xprt_max_resvport)
|
||||
return xprt_max_resvport;
|
||||
return --port;
|
||||
}
|
||||
|
||||
static int xs_bind4(struct sock_xprt *transport, struct socket *sock)
|
||||
{
|
||||
struct sockaddr_in myaddr = {
|
||||
.sin_family = AF_INET,
|
||||
};
|
||||
struct sockaddr_in *sa;
|
||||
int err;
|
||||
unsigned short port = transport->port;
|
||||
int err, nloop = 0;
|
||||
unsigned short port = xs_get_srcport(transport, sock);
|
||||
unsigned short last;
|
||||
|
||||
if (!transport->xprt.resvport)
|
||||
port = 0;
|
||||
sa = (struct sockaddr_in *)&transport->addr;
|
||||
myaddr.sin_addr = sa->sin_addr;
|
||||
do {
|
||||
myaddr.sin_port = htons(port);
|
||||
err = kernel_bind(sock, (struct sockaddr *) &myaddr,
|
||||
sizeof(myaddr));
|
||||
if (!transport->xprt.resvport)
|
||||
if (port == 0)
|
||||
break;
|
||||
if (err == 0) {
|
||||
transport->port = port;
|
||||
break;
|
||||
}
|
||||
if (port <= xprt_min_resvport)
|
||||
port = xprt_max_resvport;
|
||||
else
|
||||
port--;
|
||||
} while (err == -EADDRINUSE && port != transport->port);
|
||||
last = port;
|
||||
port = xs_next_srcport(transport, sock, port);
|
||||
if (port > last)
|
||||
nloop++;
|
||||
} while (err == -EADDRINUSE && nloop != 2);
|
||||
dprintk("RPC: %s "NIPQUAD_FMT":%u: %s (%d)\n",
|
||||
__FUNCTION__, NIPQUAD(myaddr.sin_addr),
|
||||
port, err ? "failed" : "ok", err);
|
||||
|
@ -1319,28 +1370,27 @@ static int xs_bind6(struct sock_xprt *transport, struct socket *sock)
|
|||
.sin6_family = AF_INET6,
|
||||
};
|
||||
struct sockaddr_in6 *sa;
|
||||
int err;
|
||||
unsigned short port = transport->port;
|
||||
int err, nloop = 0;
|
||||
unsigned short port = xs_get_srcport(transport, sock);
|
||||
unsigned short last;
|
||||
|
||||
if (!transport->xprt.resvport)
|
||||
port = 0;
|
||||
sa = (struct sockaddr_in6 *)&transport->addr;
|
||||
myaddr.sin6_addr = sa->sin6_addr;
|
||||
do {
|
||||
myaddr.sin6_port = htons(port);
|
||||
err = kernel_bind(sock, (struct sockaddr *) &myaddr,
|
||||
sizeof(myaddr));
|
||||
if (!transport->xprt.resvport)
|
||||
if (port == 0)
|
||||
break;
|
||||
if (err == 0) {
|
||||
transport->port = port;
|
||||
break;
|
||||
}
|
||||
if (port <= xprt_min_resvport)
|
||||
port = xprt_max_resvport;
|
||||
else
|
||||
port--;
|
||||
} while (err == -EADDRINUSE && port != transport->port);
|
||||
last = port;
|
||||
port = xs_next_srcport(transport, sock, port);
|
||||
if (port > last)
|
||||
nloop++;
|
||||
} while (err == -EADDRINUSE && nloop != 2);
|
||||
dprintk("RPC: xs_bind6 "NIP6_FMT":%u: %s (%d)\n",
|
||||
NIP6(myaddr.sin6_addr), port, err ? "failed" : "ok", err);
|
||||
return err;
|
||||
|
@ -1602,8 +1652,7 @@ static void xs_tcp_connect_worker4(struct work_struct *work)
|
|||
break;
|
||||
default:
|
||||
/* get rid of existing socket, and retry */
|
||||
xs_close(xprt);
|
||||
break;
|
||||
xs_tcp_shutdown(xprt);
|
||||
}
|
||||
}
|
||||
out:
|
||||
|
@ -1662,8 +1711,7 @@ static void xs_tcp_connect_worker6(struct work_struct *work)
|
|||
break;
|
||||
default:
|
||||
/* get rid of existing socket, and retry */
|
||||
xs_close(xprt);
|
||||
break;
|
||||
xs_tcp_shutdown(xprt);
|
||||
}
|
||||
}
|
||||
out:
|
||||
|
@ -1710,6 +1758,19 @@ static void xs_connect(struct rpc_task *task)
|
|||
}
|
||||
}
|
||||
|
||||
static void xs_tcp_connect(struct rpc_task *task)
|
||||
{
|
||||
struct rpc_xprt *xprt = task->tk_xprt;
|
||||
|
||||
/* Initiate graceful shutdown of the socket if not already done */
|
||||
if (test_bit(XPRT_CONNECTED, &xprt->state))
|
||||
xs_tcp_shutdown(xprt);
|
||||
/* Exit if we need to wait for socket shutdown to complete */
|
||||
if (test_bit(XPRT_CLOSING, &xprt->state))
|
||||
return;
|
||||
xs_connect(task);
|
||||
}
|
||||
|
||||
/**
|
||||
* xs_udp_print_stats - display UDP socket-specifc stats
|
||||
* @xprt: rpc_xprt struct containing statistics
|
||||
|
@ -1780,12 +1841,12 @@ static struct rpc_xprt_ops xs_tcp_ops = {
|
|||
.release_xprt = xs_tcp_release_xprt,
|
||||
.rpcbind = rpcb_getport_async,
|
||||
.set_port = xs_set_port,
|
||||
.connect = xs_connect,
|
||||
.connect = xs_tcp_connect,
|
||||
.buf_alloc = rpc_malloc,
|
||||
.buf_free = rpc_free,
|
||||
.send_request = xs_tcp_send_request,
|
||||
.set_retrans_timeout = xprt_set_retrans_timeout_def,
|
||||
.close = xs_close,
|
||||
.close = xs_tcp_shutdown,
|
||||
.destroy = xs_destroy,
|
||||
.print_stats = xs_tcp_print_stats,
|
||||
};
|
||||
|
@ -1822,11 +1883,17 @@ static struct rpc_xprt *xs_setup_xprt(struct xprt_create *args,
|
|||
xprt->addrlen = args->addrlen;
|
||||
if (args->srcaddr)
|
||||
memcpy(&new->addr, args->srcaddr, args->addrlen);
|
||||
new->port = xs_get_random_port();
|
||||
|
||||
return xprt;
|
||||
}
|
||||
|
||||
static const struct rpc_timeout xs_udp_default_timeout = {
|
||||
.to_initval = 5 * HZ,
|
||||
.to_maxval = 30 * HZ,
|
||||
.to_increment = 5 * HZ,
|
||||
.to_retries = 5,
|
||||
};
|
||||
|
||||
/**
|
||||
* xs_setup_udp - Set up transport to use a UDP socket
|
||||
* @args: rpc transport creation arguments
|
||||
|
@ -1855,10 +1922,7 @@ static struct rpc_xprt *xs_setup_udp(struct xprt_create *args)
|
|||
|
||||
xprt->ops = &xs_udp_ops;
|
||||
|
||||
if (args->timeout)
|
||||
xprt->timeout = *args->timeout;
|
||||
else
|
||||
xprt_set_timeout(&xprt->timeout, 5, 5 * HZ);
|
||||
xprt->timeout = &xs_udp_default_timeout;
|
||||
|
||||
switch (addr->sa_family) {
|
||||
case AF_INET:
|
||||
|
@ -1867,7 +1931,7 @@ static struct rpc_xprt *xs_setup_udp(struct xprt_create *args)
|
|||
|
||||
INIT_DELAYED_WORK(&transport->connect_worker,
|
||||
xs_udp_connect_worker4);
|
||||
xs_format_ipv4_peer_addresses(xprt);
|
||||
xs_format_ipv4_peer_addresses(xprt, "udp", RPCBIND_NETID_UDP);
|
||||
break;
|
||||
case AF_INET6:
|
||||
if (((struct sockaddr_in6 *)addr)->sin6_port != htons(0))
|
||||
|
@ -1875,7 +1939,7 @@ static struct rpc_xprt *xs_setup_udp(struct xprt_create *args)
|
|||
|
||||
INIT_DELAYED_WORK(&transport->connect_worker,
|
||||
xs_udp_connect_worker6);
|
||||
xs_format_ipv6_peer_addresses(xprt);
|
||||
xs_format_ipv6_peer_addresses(xprt, "udp", RPCBIND_NETID_UDP6);
|
||||
break;
|
||||
default:
|
||||
kfree(xprt);
|
||||
|
@ -1893,6 +1957,12 @@ static struct rpc_xprt *xs_setup_udp(struct xprt_create *args)
|
|||
return ERR_PTR(-EINVAL);
|
||||
}
|
||||
|
||||
static const struct rpc_timeout xs_tcp_default_timeout = {
|
||||
.to_initval = 60 * HZ,
|
||||
.to_maxval = 60 * HZ,
|
||||
.to_retries = 2,
|
||||
};
|
||||
|
||||
/**
|
||||
* xs_setup_tcp - Set up transport to use a TCP socket
|
||||
* @args: rpc transport creation arguments
|
||||
|
@ -1919,11 +1989,7 @@ static struct rpc_xprt *xs_setup_tcp(struct xprt_create *args)
|
|||
xprt->idle_timeout = XS_IDLE_DISC_TO;
|
||||
|
||||
xprt->ops = &xs_tcp_ops;
|
||||
|
||||
if (args->timeout)
|
||||
xprt->timeout = *args->timeout;
|
||||
else
|
||||
xprt_set_timeout(&xprt->timeout, 2, 60 * HZ);
|
||||
xprt->timeout = &xs_tcp_default_timeout;
|
||||
|
||||
switch (addr->sa_family) {
|
||||
case AF_INET:
|
||||
|
@ -1931,14 +1997,14 @@ static struct rpc_xprt *xs_setup_tcp(struct xprt_create *args)
|
|||
xprt_set_bound(xprt);
|
||||
|
||||
INIT_DELAYED_WORK(&transport->connect_worker, xs_tcp_connect_worker4);
|
||||
xs_format_ipv4_peer_addresses(xprt);
|
||||
xs_format_ipv4_peer_addresses(xprt, "tcp", RPCBIND_NETID_TCP);
|
||||
break;
|
||||
case AF_INET6:
|
||||
if (((struct sockaddr_in6 *)addr)->sin6_port != htons(0))
|
||||
xprt_set_bound(xprt);
|
||||
|
||||
INIT_DELAYED_WORK(&transport->connect_worker, xs_tcp_connect_worker6);
|
||||
xs_format_ipv6_peer_addresses(xprt);
|
||||
xs_format_ipv6_peer_addresses(xprt, "tcp", RPCBIND_NETID_TCP6);
|
||||
break;
|
||||
default:
|
||||
kfree(xprt);
|
||||
|
|
Loading…
Reference in a new issue