svc: Move sk_reserved to svc_xprt
authorTom Tucker <tom@opengridcomputing.com>
Mon, 31 Dec 2007 03:07:55 +0000 (21:07 -0600)
committerJ. Bruce Fields <bfields@citi.umich.edu>
Fri, 1 Feb 2008 21:42:11 +0000 (16:42 -0500)
This functionally trivial patch moves the sk_reserved field to the
transport independent svc_xprt structure.

Signed-off-by: Tom Tucker <tom@opengridcomputing.com>
Acked-by: Neil Brown <neilb@suse.de>
Reviewed-by: Chuck Lever <chuck.lever@oracle.com>
Reviewed-by: Greg Banks <gnb@sgi.com>
Signed-off-by: J. Bruce Fields <bfields@citi.umich.edu>
include/linux/sunrpc/svc_xprt.h
include/linux/sunrpc/svcsock.h
net/sunrpc/svcsock.c

index 0a3e09b42a8340f1a618534088cb9e42492309a3..0b8ee06f99c0ac7ba3143ed33faae4f46d13c17c 100644 (file)
@@ -53,6 +53,7 @@ struct svc_xprt {
 
        struct svc_pool         *xpt_pool;      /* current pool iff queued */
        struct svc_serv         *xpt_server;    /* service for transport */
+       atomic_t                xpt_reserved;   /* space on outq that is rsvd */
 };
 
 int    svc_reg_xprt_class(struct svc_xprt_class *);
index 060508ba358bc3347e0f71c79a1a8e6f644e8bc7..ba41f11788f25d02db4ed387bf957cdf6ede2b48 100644 (file)
@@ -20,8 +20,6 @@ struct svc_sock {
        struct socket *         sk_sock;        /* berkeley socket layer */
        struct sock *           sk_sk;          /* INET layer */
 
-       atomic_t                sk_reserved;    /* space on outq that is reserved */
-
        spinlock_t              sk_lock;        /* protects sk_deferred and
                                                 * sk_info_authunix */
        struct list_head        sk_deferred;    /* deferred requests that need to
index 6f63a5ca6a91149b1a67881ee0c103e086549e01..c47bede754ea759a254a7f97b1261bb72625b66e 100644 (file)
@@ -288,7 +288,7 @@ svc_sock_enqueue(struct svc_sock *svsk)
                rqstp->rq_sock = svsk;
                svc_xprt_get(&svsk->sk_xprt);
                rqstp->rq_reserved = serv->sv_max_mesg;
-               atomic_add(rqstp->rq_reserved, &svsk->sk_reserved);
+               atomic_add(rqstp->rq_reserved, &svsk->sk_xprt.xpt_reserved);
                BUG_ON(svsk->sk_xprt.xpt_pool != pool);
                wake_up(&rqstp->rq_wait);
        } else {
@@ -353,7 +353,7 @@ void svc_reserve(struct svc_rqst *rqstp, int space)
 
        if (space < rqstp->rq_reserved) {
                struct svc_sock *svsk = rqstp->rq_sock;
-               atomic_sub((rqstp->rq_reserved - space), &svsk->sk_reserved);
+               atomic_sub((rqstp->rq_reserved - space), &svsk->sk_xprt.xpt_reserved);
                rqstp->rq_reserved = space;
 
                svc_sock_enqueue(svsk);
@@ -881,7 +881,7 @@ static int svc_udp_has_wspace(struct svc_xprt *xprt)
         * sock space.
         */
        set_bit(SOCK_NOSPACE, &svsk->sk_sock->flags);
-       required = atomic_read(&svsk->sk_reserved) + serv->sv_max_mesg;
+       required = atomic_read(&svsk->sk_xprt.xpt_reserved) + serv->sv_max_mesg;
        if (required*2 > sock_wspace(svsk->sk_sk))
                return 0;
        clear_bit(SOCK_NOSPACE, &svsk->sk_sock->flags);
@@ -1327,7 +1327,7 @@ static int svc_tcp_has_wspace(struct svc_xprt *xprt)
         * sock space.
         */
        set_bit(SOCK_NOSPACE, &svsk->sk_sock->flags);
-       required = atomic_read(&svsk->sk_reserved) + serv->sv_max_mesg;
+       required = atomic_read(&svsk->sk_xprt.xpt_reserved) + serv->sv_max_mesg;
        wspace = sk_stream_wspace(svsk->sk_sk);
 
        if (wspace < sk_stream_min_wspace(svsk->sk_sk))
@@ -1544,7 +1544,7 @@ svc_recv(struct svc_rqst *rqstp, long timeout)
                rqstp->rq_sock = svsk;
                svc_xprt_get(&svsk->sk_xprt);
                rqstp->rq_reserved = serv->sv_max_mesg;
-               atomic_add(rqstp->rq_reserved, &svsk->sk_reserved);
+               atomic_add(rqstp->rq_reserved, &svsk->sk_xprt.xpt_reserved);
        } else {
                /* No data pending. Go to sleep */
                svc_thread_enqueue(pool, rqstp);