diff options
| author | Ingo Molnar <mingo@elte.hu> | 2012-03-13 16:26:52 +0100 |
|---|---|---|
| committer | Ingo Molnar <mingo@elte.hu> | 2012-03-13 16:26:52 +0100 |
| commit | 47258cf3c4aa5d56e678bafe0dd0d03ddd980b88 (patch) | |
| tree | 4856f0fb1185ba97f320a7ed6fb63bf136708a42 /include/net | |
| parent | c308b56b5398779cd3da0f62ab26b0453494c3d4 (diff) | |
| parent | fde7d9049e55ab85a390be7f415d74c9f62dd0f9 (diff) | |
Merge tag 'v3.3-rc7' into sched/core
Merge reason: merge back final fixes, prepare for the merge window.
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'include/net')
| -rw-r--r-- | include/net/inetpeer.h | 4 | ||||
| -rw-r--r-- | include/net/tcp.h | 5 |
2 files changed, 6 insertions, 3 deletions
diff --git a/include/net/inetpeer.h b/include/net/inetpeer.h index 06b795dd5906..b94765e38e80 100644 --- a/include/net/inetpeer.h +++ b/include/net/inetpeer.h @@ -35,12 +35,12 @@ struct inet_peer { u32 metrics[RTAX_MAX]; u32 rate_tokens; /* rate limiting for ICMP */ - int redirect_genid; unsigned long rate_last; unsigned long pmtu_expires; u32 pmtu_orig; u32 pmtu_learned; struct inetpeer_addr_base redirect_learned; + struct list_head gc_list; /* * Once inet_peer is queued for deletion (refcnt == -1), following fields * are not available: rid, ip_id_count, tcp_ts, tcp_ts_stamp @@ -96,6 +96,8 @@ static inline struct inet_peer *inet_getpeer_v6(const struct in6_addr *v6daddr, extern void inet_putpeer(struct inet_peer *p); extern bool inet_peer_xrlim_allow(struct inet_peer *peer, int timeout); +extern void inetpeer_invalidate_tree(int family); + /* * temporary check to make sure we dont access rid, ip_id_count, tcp_ts, * tcp_ts_stamp if no refcount is taken on inet_peer diff --git a/include/net/tcp.h b/include/net/tcp.h index 42c29bfbcee3..2d80c291fffb 100644 --- a/include/net/tcp.h +++ b/include/net/tcp.h @@ -1364,8 +1364,9 @@ static inline void tcp_push_pending_frames(struct sock *sk) } } -/* Start sequence of the highest skb with SACKed bit, valid only if - * sacked > 0 or when the caller has ensured validity by itself. +/* Start sequence of the skb just after the highest skb with SACKed + * bit, valid only if sacked_out > 0 or when the caller has ensured + * validity by itself. */ static inline u32 tcp_highest_sack_seq(struct tcp_sock *tp) { |
