summaryrefslogtreecommitdiff
path: root/include
diff options
context:
space:
mode:
authorDavid S. Miller <davem@davemloft.net>2015-03-17 22:02:53 -0400
committerDavid S. Miller <davem@davemloft.net>2015-03-17 22:02:53 -0400
commit5284143057708af297eea10812a67d18e42e9abe (patch)
tree798f475f48fbdff0eae359c3f345a1a0eaf83ca2 /include
parent9f2dbdd9b11d40f5fe0749eb91cd1cfc86fde575 (diff)
parent0470c8ca1d57927f2cc3e1d5add1fb2834609447 (diff)
Merge branch 'listener_refactor_part_12'
Eric Dumazet says: ==================== inet: tcp listener refactoring, part 12 By adding a pointer back to listener, we are preparing synack rtx handling to no longer be governed by listener keepalive timer, as this is the most problematic source of contention on listener spinlock. Note that TCP FastOpen had such pointer anyway, so we make it generic. ==================== Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'include')
-rw-r--r--include/linux/tcp.h2
-rw-r--r--include/net/inet_connection_sock.h5
-rw-r--r--include/net/inet_sock.h22
-rw-r--r--include/net/request_sock.h23
4 files changed, 23 insertions, 29 deletions
diff --git a/include/linux/tcp.h b/include/linux/tcp.h
index 97dbf16f7d9d..f869ae8afbaf 100644
--- a/include/linux/tcp.h
+++ b/include/linux/tcp.h
@@ -111,7 +111,7 @@ struct tcp_request_sock_ops;
struct tcp_request_sock {
struct inet_request_sock req;
const struct tcp_request_sock_ops *af_specific;
- struct sock *listener; /* needed for TFO */
+ bool tfo_listener;
u32 rcv_isn;
u32 snt_isn;
u32 snt_synack; /* synack sent time */
diff --git a/include/net/inet_connection_sock.h b/include/net/inet_connection_sock.h
index 191feec60205..b9a6b0a94cc6 100644
--- a/include/net/inet_connection_sock.h
+++ b/include/net/inet_connection_sock.h
@@ -275,11 +275,6 @@ static inline void inet_csk_reqsk_queue_add(struct sock *sk,
struct sock *child)
{
reqsk_queue_add(&inet_csk(sk)->icsk_accept_queue, req, sk, child);
- /* before letting lookups find us, make sure all req fields
- * are committed to memory.
- */
- smp_wmb();
- atomic_set(&req->rsk_refcnt, 1);
}
void inet_csk_reqsk_queue_hash_add(struct sock *sk, struct request_sock *req,
diff --git a/include/net/inet_sock.h b/include/net/inet_sock.h
index c9ed91891887..b6c3737da4e9 100644
--- a/include/net/inet_sock.h
+++ b/include/net/inet_sock.h
@@ -81,7 +81,6 @@ struct inet_request_sock {
#define ir_cookie req.__req_common.skc_cookie
#define ireq_net req.__req_common.skc_net
#define ireq_state req.__req_common.skc_state
-#define ireq_refcnt req.__req_common.skc_refcnt
#define ireq_family req.__req_common.skc_family
kmemcheck_bitfield_begin(flags);
@@ -244,25 +243,8 @@ static inline unsigned int __inet_ehashfn(const __be32 laddr,
initval);
}
-static inline struct request_sock *inet_reqsk_alloc(struct request_sock_ops *ops)
-{
- struct request_sock *req = reqsk_alloc(ops);
- struct inet_request_sock *ireq = inet_rsk(req);
-
- if (req != NULL) {
- kmemcheck_annotate_bitfield(ireq, flags);
- ireq->opt = NULL;
- atomic64_set(&ireq->ir_cookie, 0);
- ireq->ireq_state = TCP_NEW_SYN_RECV;
-
- /* Following is temporary. It is coupled with debugging
- * helpers in reqsk_put() & reqsk_free()
- */
- atomic_set(&ireq->ireq_refcnt, 0);
- }
-
- return req;
-}
+struct request_sock *inet_reqsk_alloc(const struct request_sock_ops *ops,
+ struct sock *sk_listener);
static inline __u8 inet_sk_flowi_flags(const struct sock *sk)
{
diff --git a/include/net/request_sock.h b/include/net/request_sock.h
index 56dc2faba47e..3fa4f824900a 100644
--- a/include/net/request_sock.h
+++ b/include/net/request_sock.h
@@ -52,6 +52,7 @@ struct request_sock {
#define rsk_refcnt __req_common.skc_refcnt
struct request_sock *dl_next;
+ struct sock *rsk_listener;
u16 mss;
u8 num_retrans; /* number of retransmits */
u8 cookie_ts:1; /* syncookie: encode tcpopts in timestamp */
@@ -67,13 +68,21 @@ struct request_sock {
u32 peer_secid;
};
-static inline struct request_sock *reqsk_alloc(const struct request_sock_ops *ops)
+static inline struct request_sock *
+reqsk_alloc(const struct request_sock_ops *ops, struct sock *sk_listener)
{
struct request_sock *req = kmem_cache_alloc(ops->slab, GFP_ATOMIC);
- if (req != NULL)
+ if (req) {
req->rsk_ops = ops;
-
+ sock_hold(sk_listener);
+ req->rsk_listener = sk_listener;
+
+ /* Following is temporary. It is coupled with debugging
+ * helpers in reqsk_put() & reqsk_free()
+ */
+ atomic_set(&req->rsk_refcnt, 0);
+ }
return req;
}
@@ -88,6 +97,8 @@ static inline void reqsk_free(struct request_sock *req)
WARN_ON_ONCE(atomic_read(&req->rsk_refcnt) != 0);
req->rsk_ops->destructor(req);
+ if (req->rsk_listener)
+ sock_put(req->rsk_listener);
kmem_cache_free(req->rsk_ops->slab, req);
}
@@ -286,6 +297,12 @@ static inline void reqsk_queue_hash_req(struct request_sock_queue *queue,
req->sk = NULL;
req->dl_next = lopt->syn_table[hash];
+ /* before letting lookups find us, make sure all req fields
+ * are committed to memory and refcnt initialized.
+ */
+ smp_wmb();
+ atomic_set(&req->rsk_refcnt, 1);
+
write_lock(&queue->syn_wait_lock);
lopt->syn_table[hash] = req;
write_unlock(&queue->syn_wait_lock);