summaryrefslogtreecommitdiff
path: root/net/netfilter/ipvs/ip_vs_lblcr.c
diff options
context:
space:
mode:
authorJulian Anastasov <ja@ssi.bg>2013-03-22 11:46:51 +0200
committerPablo Neira Ayuso <pablo@netfilter.org>2013-04-02 00:23:56 +0200
commitba3a3ce14ea26d602b253ef13a56d540827cd51d (patch)
treedad0fd534484fc25b8215a23021cf45b0cdbe849 /net/netfilter/ipvs/ip_vs_lblcr.c
parented3ffc4e48e2b03d5b23988f3cfa0ad8d79e0092 (diff)
ipvs: convert sched_lock to spin lock
As all read_locks are gone spin lock is preferred. Signed-off-by: Julian Anastasov <ja@ssi.bg> Signed-off-by: Simon Horman <horms@verge.net.au>
Diffstat (limited to 'net/netfilter/ipvs/ip_vs_lblcr.c')
-rw-r--r--net/netfilter/ipvs/ip_vs_lblcr.c26
1 files changed, 13 insertions, 13 deletions
diff --git a/net/netfilter/ipvs/ip_vs_lblcr.c b/net/netfilter/ipvs/ip_vs_lblcr.c
index 6049b85df41f..cdfe6a95eddb 100644
--- a/net/netfilter/ipvs/ip_vs_lblcr.c
+++ b/net/netfilter/ipvs/ip_vs_lblcr.c
@@ -368,7 +368,7 @@ ip_vs_lblcr_get(int af, struct ip_vs_lblcr_table *tbl,
/*
* Create or update an ip_vs_lblcr_entry, which is a mapping of a destination
- * IP address to a server. Called under write lock.
+ * IP address to a server. Called under spin lock.
*/
static inline struct ip_vs_lblcr_entry *
ip_vs_lblcr_new(struct ip_vs_lblcr_table *tbl, const union nf_inet_addr *daddr,
@@ -412,14 +412,14 @@ static void ip_vs_lblcr_flush(struct ip_vs_service *svc)
struct ip_vs_lblcr_entry *en;
struct hlist_node *next;
- write_lock_bh(&svc->sched_lock);
+ spin_lock_bh(&svc->sched_lock);
tbl->dead = 1;
for (i=0; i<IP_VS_LBLCR_TAB_SIZE; i++) {
hlist_for_each_entry_safe(en, next, &tbl->bucket[i], list) {
ip_vs_lblcr_free(en);
}
}
- write_unlock_bh(&svc->sched_lock);
+ spin_unlock_bh(&svc->sched_lock);
}
static int sysctl_lblcr_expiration(struct ip_vs_service *svc)
@@ -443,7 +443,7 @@ static inline void ip_vs_lblcr_full_check(struct ip_vs_service *svc)
for (i=0, j=tbl->rover; i<IP_VS_LBLCR_TAB_SIZE; i++) {
j = (j + 1) & IP_VS_LBLCR_TAB_MASK;
- write_lock(&svc->sched_lock);
+ spin_lock(&svc->sched_lock);
hlist_for_each_entry_safe(en, next, &tbl->bucket[j], list) {
if (time_after(en->lastuse +
sysctl_lblcr_expiration(svc), now))
@@ -452,7 +452,7 @@ static inline void ip_vs_lblcr_full_check(struct ip_vs_service *svc)
ip_vs_lblcr_free(en);
atomic_dec(&tbl->entries);
}
- write_unlock(&svc->sched_lock);
+ spin_unlock(&svc->sched_lock);
}
tbl->rover = j;
}
@@ -498,7 +498,7 @@ static void ip_vs_lblcr_check_expire(unsigned long data)
for (i=0, j=tbl->rover; i<IP_VS_LBLCR_TAB_SIZE; i++) {
j = (j + 1) & IP_VS_LBLCR_TAB_MASK;
- write_lock(&svc->sched_lock);
+ spin_lock(&svc->sched_lock);
hlist_for_each_entry_safe(en, next, &tbl->bucket[j], list) {
if (time_before(now, en->lastuse+ENTRY_TIMEOUT))
continue;
@@ -507,7 +507,7 @@ static void ip_vs_lblcr_check_expire(unsigned long data)
atomic_dec(&tbl->entries);
goal--;
}
- write_unlock(&svc->sched_lock);
+ spin_unlock(&svc->sched_lock);
if (goal <= 0)
break;
}
@@ -678,7 +678,7 @@ ip_vs_lblcr_schedule(struct ip_vs_service *svc, const struct sk_buff *skb)
if (atomic_read(&en->set.size) > 1 &&
time_after(jiffies, en->set.lastmod +
sysctl_lblcr_expiration(svc))) {
- write_lock(&svc->sched_lock);
+ spin_lock(&svc->sched_lock);
if (atomic_read(&en->set.size) > 1) {
struct ip_vs_dest *m;
@@ -686,7 +686,7 @@ ip_vs_lblcr_schedule(struct ip_vs_service *svc, const struct sk_buff *skb)
if (m)
ip_vs_dest_set_erase(&en->set, m);
}
- write_unlock(&svc->sched_lock);
+ spin_unlock(&svc->sched_lock);
}
/* If the destination is not overloaded, use it */
@@ -701,10 +701,10 @@ ip_vs_lblcr_schedule(struct ip_vs_service *svc, const struct sk_buff *skb)
}
/* Update our cache entry */
- write_lock(&svc->sched_lock);
+ spin_lock(&svc->sched_lock);
if (!tbl->dead)
ip_vs_dest_set_insert(&en->set, dest, true);
- write_unlock(&svc->sched_lock);
+ spin_unlock(&svc->sched_lock);
goto out;
}
@@ -716,10 +716,10 @@ ip_vs_lblcr_schedule(struct ip_vs_service *svc, const struct sk_buff *skb)
}
/* If we fail to create a cache entry, we'll just use the valid dest */
- write_lock(&svc->sched_lock);
+ spin_lock(&svc->sched_lock);
if (!tbl->dead)
ip_vs_lblcr_new(tbl, &iph.daddr, dest);
- write_unlock(&svc->sched_lock);
+ spin_unlock(&svc->sched_lock);
out:
IP_VS_DBG_BUF(6, "LBLCR: destination IP address %s --> server %s:%d\n",