From 16b8a4761cbe5082cd35641c066d7c4b6b83cdca Mon Sep 17 00:00:00 2001 From: Eric Dumazet Date: Tue, 22 Jun 2010 10:22:17 -0700 Subject: net: Introduce u64_stats_sync infrastructure To properly implement 64bits network statistics on 32bit or 64bit hosts, we provide one new type and four methods, to ease conversions. Stats producer should use following template granted it already got an exclusive access to counters (include/linux/u64_stats_sync.h contains some documentation about details) u64_stats_update_begin(&stats->syncp); stats->bytes64 += len; stats->packets64++; u64_stats_update_end(&stats->syncp); While a consumer should use following template to get consistent snapshot : u64 tbytes, tpackets; unsigned int start; do { start = u64_stats_fetch_begin(&stats->syncp); tbytes = stats->bytes64; tpackets = stats->packets64; } while (u64_stats_fetch_retry(&stats->lock, syncp)); Suggested by David Miller, and comments courtesy of Nick Piggin. Signed-off-by: Eric Dumazet Signed-off-by: David S. Miller --- include/linux/u64_stats_sync.h | 107 +++++++++++++++++++++++++++++++++++++++++ 1 file changed, 107 insertions(+) create mode 100644 include/linux/u64_stats_sync.h (limited to 'include/linux/u64_stats_sync.h') diff --git a/include/linux/u64_stats_sync.h b/include/linux/u64_stats_sync.h new file mode 100644 index 000000000000..d0505156ed52 --- /dev/null +++ b/include/linux/u64_stats_sync.h @@ -0,0 +1,107 @@ +#ifndef _LINUX_U64_STATS_SYNC_H +#define _LINUX_U64_STATS_SYNC_H + +/* + * To properly implement 64bits network statistics on 32bit and 64bit hosts, + * we provide a synchronization point, that is a noop on 64bit or UP kernels. + * + * Key points : + * 1) Use a seqcount on SMP 32bits, with low overhead. + * 2) Whole thing is a noop on 64bit arches or UP kernels. + * 3) Write side must ensure mutual exclusion or one seqcount update could + * be lost, thus blocking readers forever. + * If this synchronization point is not a mutex, but a spinlock or + * spinlock_bh() or disable_bh() : + * 3.1) Write side should not sleep. + * 3.2) Write side should not allow preemption. + * 3.3) If applicable, interrupts should be disabled. + * + * 4) If reader fetches several counters, there is no guarantee the whole values + * are consistent (remember point 1) : this is a noop on 64bit arches anyway) + * + * 5) readers are allowed to sleep or be preempted/interrupted : They perform + * pure reads. But if they have to fetch many values, it's better to not allow + * preemptions/interruptions to avoid many retries. + * + * Usage : + * + * Stats producer (writer) should use following template granted it already got + * an exclusive access to counters (a lock is already taken, or per cpu + * data is used [in a non preemptable context]) + * + * spin_lock_bh(...) or other synchronization to get exclusive access + * ... + * u64_stats_update_begin(&stats->syncp); + * stats->bytes64 += len; // non atomic operation + * stats->packets64++; // non atomic operation + * u64_stats_update_end(&stats->syncp); + * + * While a consumer (reader) should use following template to get consistent + * snapshot for each variable (but no guarantee on several ones) + * + * u64 tbytes, tpackets; + * unsigned int start; + * + * do { + * start = u64_stats_fetch_begin(&stats->syncp); + * tbytes = stats->bytes64; // non atomic operation + * tpackets = stats->packets64; // non atomic operation + * } while (u64_stats_fetch_retry(&stats->lock, syncp)); + * + * + * Example of use in drivers/net/loopback.c, using per_cpu containers, + * in BH disabled context. + */ +#include + +#if BITS_PER_LONG==32 && defined(CONFIG_SMP) +struct u64_stats_sync { + seqcount_t seq; +}; + +static void inline u64_stats_update_begin(struct u64_stats_sync *syncp) +{ + write_seqcount_begin(&syncp->seq); +} + +static void inline u64_stats_update_end(struct u64_stats_sync *syncp) +{ + write_seqcount_end(&syncp->seq); +} + +static unsigned int inline u64_stats_fetch_begin(const struct u64_stats_sync *syncp) +{ + return read_seqcount_begin(&syncp->seq); +} + +static bool inline u64_stats_fetch_retry(const struct u64_stats_sync *syncp, + unsigned int start) +{ + return read_seqcount_retry(&syncp->seq, start); +} + +#else +struct u64_stats_sync { +}; + +static void inline u64_stats_update_begin(struct u64_stats_sync *syncp) +{ +} + +static void inline u64_stats_update_end(struct u64_stats_sync *syncp) +{ +} + +static unsigned int inline u64_stats_fetch_begin(const struct u64_stats_sync *syncp) +{ + return 0; +} + +static bool inline u64_stats_fetch_retry(const struct u64_stats_sync *syncp, + unsigned int start) +{ + return false; +} +#endif + +#endif /* _LINUX_U64_STATS_SYNC_H */ -- cgit v1.2.3 From b6b3ecc71a0664d44ed8d087d583aee98fbf492a Mon Sep 17 00:00:00 2001 From: Eric Dumazet Date: Thu, 24 Jun 2010 00:04:38 +0000 Subject: net: u64_stats_sync improvements - Add a comment about interrupts: 6) If counter might be written by an interrupt, readers should block interrupts. - Fix a typo in sample of use. Signed-off-by: Eric Dumazet Signed-off-by: David S. Miller --- include/linux/u64_stats_sync.h | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) (limited to 'include/linux/u64_stats_sync.h') diff --git a/include/linux/u64_stats_sync.h b/include/linux/u64_stats_sync.h index d0505156ed52..b38e3a58de83 100644 --- a/include/linux/u64_stats_sync.h +++ b/include/linux/u64_stats_sync.h @@ -23,6 +23,10 @@ * pure reads. But if they have to fetch many values, it's better to not allow * preemptions/interruptions to avoid many retries. * + * 6) If counter might be written by an interrupt, readers should block interrupts. + * (On UP, there is no seqcount_t protection, a reader allowing interrupts could + * read partial values) + * * Usage : * * Stats producer (writer) should use following template granted it already got @@ -46,7 +50,7 @@ * start = u64_stats_fetch_begin(&stats->syncp); * tbytes = stats->bytes64; // non atomic operation * tpackets = stats->packets64; // non atomic operation - * } while (u64_stats_fetch_retry(&stats->lock, syncp)); + * } while (u64_stats_fetch_retry(&stats->syncp, start)); * * * Example of use in drivers/net/loopback.c, using per_cpu containers, -- cgit v1.2.3 From 33d91f00c73ba0012bce18c1690cb8313ca7adaa Mon Sep 17 00:00:00 2001 From: Eric Dumazet Date: Thu, 24 Jun 2010 00:54:06 +0000 Subject: net: u64_stats_fetch_begin_bh() and u64_stats_fetch_retry_bh() - Must disable preemption in case of 32bit UP in u64_stats_fetch_begin() and u64_stats_fetch_retry() - Add new u64_stats_fetch_begin_bh() and u64_stats_fetch_retry_bh() for network usage, disabling BH on 32bit UP only. Signed-off-by: Eric Dumazet Signed-off-by: David S. Miller --- include/linux/u64_stats_sync.h | 59 +++++++++++++++++++++++++++++++----------- 1 file changed, 44 insertions(+), 15 deletions(-) (limited to 'include/linux/u64_stats_sync.h') diff --git a/include/linux/u64_stats_sync.h b/include/linux/u64_stats_sync.h index b38e3a58de83..fa261a0da280 100644 --- a/include/linux/u64_stats_sync.h +++ b/include/linux/u64_stats_sync.h @@ -27,6 +27,9 @@ * (On UP, there is no seqcount_t protection, a reader allowing interrupts could * read partial values) * + * 7) For softirq uses, readers can use u64_stats_fetch_begin_bh() and + * u64_stats_fetch_retry_bh() helpers + * * Usage : * * Stats producer (writer) should use following template granted it already got @@ -58,54 +61,80 @@ */ #include -#if BITS_PER_LONG==32 && defined(CONFIG_SMP) struct u64_stats_sync { +#if BITS_PER_LONG==32 && defined(CONFIG_SMP) seqcount_t seq; +#endif }; static void inline u64_stats_update_begin(struct u64_stats_sync *syncp) { +#if BITS_PER_LONG==32 && defined(CONFIG_SMP) write_seqcount_begin(&syncp->seq); +#endif } static void inline u64_stats_update_end(struct u64_stats_sync *syncp) { +#if BITS_PER_LONG==32 && defined(CONFIG_SMP) write_seqcount_end(&syncp->seq); +#endif } static unsigned int inline u64_stats_fetch_begin(const struct u64_stats_sync *syncp) { +#if BITS_PER_LONG==32 && defined(CONFIG_SMP) return read_seqcount_begin(&syncp->seq); +#else +#if BITS_PER_LONG==32 + preempt_disable(); +#endif + return 0; +#endif } static bool inline u64_stats_fetch_retry(const struct u64_stats_sync *syncp, unsigned int start) { +#if BITS_PER_LONG==32 && defined(CONFIG_SMP) return read_seqcount_retry(&syncp->seq, start); -} - #else -struct u64_stats_sync { -}; - -static void inline u64_stats_update_begin(struct u64_stats_sync *syncp) -{ -} - -static void inline u64_stats_update_end(struct u64_stats_sync *syncp) -{ +#if BITS_PER_LONG==32 + preempt_enable(); +#endif + return false; +#endif } -static unsigned int inline u64_stats_fetch_begin(const struct u64_stats_sync *syncp) +/* + * In case softirq handlers can update u64 counters, readers can use following helpers + * - SMP 32bit arches use seqcount protection, irq safe. + * - UP 32bit must disable BH. + * - 64bit have no problem atomically reading u64 values, irq safe. + */ +static unsigned int inline u64_stats_fetch_begin_bh(const struct u64_stats_sync *syncp) { +#if BITS_PER_LONG==32 && defined(CONFIG_SMP) + return read_seqcount_begin(&syncp->seq); +#else +#if BITS_PER_LONG==32 + local_bh_disable(); +#endif return 0; +#endif } -static bool inline u64_stats_fetch_retry(const struct u64_stats_sync *syncp, +static bool inline u64_stats_fetch_retry_bh(const struct u64_stats_sync *syncp, unsigned int start) { +#if BITS_PER_LONG==32 && defined(CONFIG_SMP) + return read_seqcount_retry(&syncp->seq, start); +#else +#if BITS_PER_LONG==32 + local_bh_enable(); +#endif return false; -} #endif +} #endif /* _LINUX_U64_STATS_SYNC_H */ -- cgit v1.2.3