summaryrefslogtreecommitdiff
path: root/net/core
diff options
context:
space:
mode:
authorMiroslav Urbanek <mu@miroslavurbanek.com>2016-11-21 15:48:21 +0100
committerSteffen Klassert <steffen.klassert@secunet.com>2016-11-23 06:37:09 +0100
commit6b226487815574193c1da864f2eac274781a2b0c (patch)
treeab9132564a33ee87fb6fc5f810b70531e9445ab5 /net/core
parent330e832abda923df06a4ca6d3faac6e9c1b42548 (diff)
flowcache: Increase threshold for refusing new allocations
The threshold for OOM protection is too small for systems with large number of CPUs. Applications report ENOBUFs on connect() every 10 minutes. The problem is that the variable net->xfrm.flow_cache_gc_count is a global counter while the variable fc->high_watermark is a per-CPU constant. Take the number of CPUs into account as well. Fixes: 6ad3122a08e3 ("flowcache: Avoid OOM condition under preasure") Reported-by: Lukáš Koldrt <lk@excello.cz> Tested-by: Jan Hejl <jh@excello.cz> Signed-off-by: Miroslav Urbanek <mu@miroslavurbanek.com> Signed-off-by: Steffen Klassert <steffen.klassert@secunet.com>
Diffstat (limited to 'net/core')
-rw-r--r--net/core/flow.c6
1 files changed, 2 insertions, 4 deletions
diff --git a/net/core/flow.c b/net/core/flow.c
index 3937b1b68d5b..18e8893d4be5 100644
--- a/net/core/flow.c
+++ b/net/core/flow.c
@@ -95,7 +95,6 @@ static void flow_cache_gc_task(struct work_struct *work)
list_for_each_entry_safe(fce, n, &gc_list, u.gc_list) {
flow_entry_kill(fce, xfrm);
atomic_dec(&xfrm->flow_cache_gc_count);
- WARN_ON(atomic_read(&xfrm->flow_cache_gc_count) < 0);
}
}
@@ -236,9 +235,8 @@ flow_cache_lookup(struct net *net, const struct flowi *key, u16 family, u8 dir,
if (fcp->hash_count > fc->high_watermark)
flow_cache_shrink(fc, fcp);
- if (fcp->hash_count > 2 * fc->high_watermark ||
- atomic_read(&net->xfrm.flow_cache_gc_count) > fc->high_watermark) {
- atomic_inc(&net->xfrm.flow_cache_genid);
+ if (atomic_read(&net->xfrm.flow_cache_gc_count) >
+ 2 * num_online_cpus() * fc->high_watermark) {
flo = ERR_PTR(-ENOBUFS);
goto ret_object;
}