summaryrefslogtreecommitdiff
path: root/net/sunrpc/xprt.c
diff options
context:
space:
mode:
authorChuck Lever <cel@netapp.com>2005-08-25 16:25:54 -0700
committerTrond Myklebust <Trond.Myklebust@netapp.com>2005-09-23 12:38:48 -0400
commit555ee3af161b037865793bd4bebc06b58daafde6 (patch)
tree4c72474dabffab62234db158e5b6c86ace4f9d09 /net/sunrpc/xprt.c
parented63c003701a314c4893c11eceb9d68f8f46c662 (diff)
[PATCH] RPC: clean up after nocong was removed
Clean-up: Move some macros that are specific to the Van Jacobson implementation into xprt.c. Get rid of the cong_wait field in rpc_xprt, which is no longer used. Get rid of xprt_clear_backlog. Test-plan: Compile with CONFIG_NFS enabled. Signed-off-by: Chuck Lever <cel@netapp.com> Signed-off-by: Trond Myklebust <Trond.Myklebust@netapp.com>
Diffstat (limited to 'net/sunrpc/xprt.c')
-rw-r--r--net/sunrpc/xprt.c29
1 files changed, 19 insertions, 10 deletions
diff --git a/net/sunrpc/xprt.c b/net/sunrpc/xprt.c
index e8d11bd6158e..0458319a1bdd 100644
--- a/net/sunrpc/xprt.c
+++ b/net/sunrpc/xprt.c
@@ -62,7 +62,23 @@ static inline void do_xprt_reserve(struct rpc_task *);
static void xprt_connect_status(struct rpc_task *task);
static int __xprt_get_cong(struct rpc_xprt *, struct rpc_task *);
-static int xprt_clear_backlog(struct rpc_xprt *xprt);
+/*
+ * The transport code maintains an estimate on the maximum number of out-
+ * standing RPC requests, using a smoothed version of the congestion
+ * avoidance implemented in 44BSD. This is basically the Van Jacobson
+ * congestion algorithm: If a retransmit occurs, the congestion window is
+ * halved; otherwise, it is incremented by 1/cwnd when
+ *
+ * - a reply is received and
+ * - a full number of requests are outstanding and
+ * - the congestion window hasn't been updated recently.
+ */
+#define RPC_CWNDSHIFT (8U)
+#define RPC_CWNDSCALE (1U << RPC_CWNDSHIFT)
+#define RPC_INITCWND RPC_CWNDSCALE
+#define RPC_MAXCWND(xprt) ((xprt)->max_reqs << RPC_CWNDSHIFT)
+
+#define RPCXPRT_CONGESTED(xprt) ((xprt)->cong >= (xprt)->cwnd)
/**
* xprt_reserve_xprt - serialize write access to transports
@@ -850,7 +866,7 @@ void xprt_release(struct rpc_task *task)
spin_lock(&xprt->reserve_lock);
list_add(&req->rq_list, &xprt->free);
- xprt_clear_backlog(xprt);
+ rpc_wake_up_next(&xprt->backlog);
spin_unlock(&xprt->reserve_lock);
}
@@ -902,7 +918,6 @@ static struct rpc_xprt *xprt_setup(int proto, struct sockaddr_in *ap, struct rpc
spin_lock_init(&xprt->transport_lock);
spin_lock_init(&xprt->reserve_lock);
- init_waitqueue_head(&xprt->cong_wait);
INIT_LIST_HEAD(&xprt->free);
INIT_LIST_HEAD(&xprt->recv);
@@ -911,6 +926,7 @@ static struct rpc_xprt *xprt_setup(int proto, struct sockaddr_in *ap, struct rpc
xprt->timer.function = xprt_init_autodisconnect;
xprt->timer.data = (unsigned long) xprt;
xprt->last_used = jiffies;
+ xprt->cwnd = RPC_INITCWND;
rpc_init_wait_queue(&xprt->pending, "xprt_pending");
rpc_init_wait_queue(&xprt->sending, "xprt_sending");
@@ -955,16 +971,9 @@ static void xprt_shutdown(struct rpc_xprt *xprt)
rpc_wake_up(&xprt->resend);
xprt_wake_pending_tasks(xprt, -EIO);
rpc_wake_up(&xprt->backlog);
- wake_up(&xprt->cong_wait);
del_timer_sync(&xprt->timer);
}
-static int xprt_clear_backlog(struct rpc_xprt *xprt) {
- rpc_wake_up_next(&xprt->backlog);
- wake_up(&xprt->cong_wait);
- return 1;
-}
-
/**
* xprt_destroy - destroy an RPC transport, killing off all requests.
* @xprt: transport to destroy