blob: 52d0086d55d3d4fd0607b5096ccd6f4b60bdb086 (
plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
|
#ifndef __NETNS_XFRM_H
#define __NETNS_XFRM_H
#include <linux/list.h>
#include <linux/wait.h>
#include <linux/workqueue.h>
#include <linux/xfrm.h>
#include <net/dst_ops.h>
#include <net/flowcache.h>
struct ctl_table_header;
struct xfrm_policy_hash {
struct hlist_head *table;
unsigned int hmask;
};
struct netns_xfrm {
struct list_head state_all;
/*
* Hash table to find appropriate SA towards given target (endpoint of
* tunnel or destination of transport mode) allowed by selector.
*
* Main use is finding SA after policy selected tunnel or transport
* mode. Also, it can be used by ah/esp icmp error handler to find
* offending SA.
*/
struct hlist_head *state_bydst;
struct hlist_head *state_bysrc;
struct hlist_head *state_byspi;
unsigned int state_hmask;
unsigned int state_num;
struct work_struct state_hash_work;
struct hlist_head state_gc_list;
struct work_struct state_gc_work;
struct list_head policy_all;
struct hlist_head *policy_byidx;
unsigned int policy_idx_hmask;
struct hlist_head policy_inexact[XFRM_POLICY_MAX * 2];
struct xfrm_policy_hash policy_bydst[XFRM_POLICY_MAX * 2];
unsigned int policy_count[XFRM_POLICY_MAX * 2];
struct work_struct policy_hash_work;
struct sock *nlsk;
struct sock *nlsk_stash;
u32 sysctl_aevent_etime;
u32 sysctl_aevent_rseqth;
int sysctl_larval_drop;
u32 sysctl_acq_expires;
#ifdef CONFIG_SYSCTL
struct ctl_table_header *sysctl_hdr;
#endif
struct dst_ops xfrm4_dst_ops;
#if IS_ENABLED(CONFIG_IPV6)
struct dst_ops xfrm6_dst_ops;
#endif
spinlock_t xfrm_state_lock;
spinlock_t xfrm_policy_sk_bundle_lock;
rwlock_t xfrm_policy_lock;
struct mutex xfrm_cfg_mutex;
/* flow cache part */
struct flow_cache flow_cache_global;
struct kmem_cache *flow_cachep;
atomic_t flow_cache_genid;
struct list_head flow_cache_gc_list;
spinlock_t flow_cache_gc_lock;
struct work_struct flow_cache_gc_work;
struct work_struct flow_cache_flush_work;
struct mutex flow_flush_sem;
};
#endif
|