diff options
author | Linus Torvalds <torvalds@ppc970.osdl.org> | 2005-04-16 15:20:36 -0700 |
---|---|---|
committer | Linus Torvalds <torvalds@ppc970.osdl.org> | 2005-04-16 15:20:36 -0700 |
commit | 1da177e4c3f41524e886b7f1b8a0c1fc7321cac2 (patch) | |
tree | 0bba044c4ce775e45a88a51686b5d9f90697ea9d /include/linux/percpu_counter.h |
Linux-2.6.12-rc2v2.6.12-rc2
Initial git repository build. I'm not bothering with the full history,
even though we have it. We can create a separate "historical" git
archive of that later if we want to, and in the meantime it's about
3.2GB when imported into git - space that would just make the early
git days unnecessarily complicated, when we don't have a lot of good
infrastructure for it.
Let it rip!
Diffstat (limited to 'include/linux/percpu_counter.h')
-rw-r--r-- | include/linux/percpu_counter.h | 107 |
1 files changed, 107 insertions, 0 deletions
diff --git a/include/linux/percpu_counter.h b/include/linux/percpu_counter.h new file mode 100644 index 000000000000..bd6708e2c027 --- /dev/null +++ b/include/linux/percpu_counter.h @@ -0,0 +1,107 @@ +#ifndef _LINUX_PERCPU_COUNTER_H +#define _LINUX_PERCPU_COUNTER_H +/* + * A simple "approximate counter" for use in ext2 and ext3 superblocks. + * + * WARNING: these things are HUGE. 4 kbytes per counter on 32-way P4. + */ + +#include <linux/config.h> +#include <linux/spinlock.h> +#include <linux/smp.h> +#include <linux/threads.h> +#include <linux/percpu.h> + +#ifdef CONFIG_SMP + +struct percpu_counter { + spinlock_t lock; + long count; + long *counters; +}; + +#if NR_CPUS >= 16 +#define FBC_BATCH (NR_CPUS*2) +#else +#define FBC_BATCH (NR_CPUS*4) +#endif + +static inline void percpu_counter_init(struct percpu_counter *fbc) +{ + spin_lock_init(&fbc->lock); + fbc->count = 0; + fbc->counters = alloc_percpu(long); +} + +static inline void percpu_counter_destroy(struct percpu_counter *fbc) +{ + free_percpu(fbc->counters); +} + +void percpu_counter_mod(struct percpu_counter *fbc, long amount); + +static inline long percpu_counter_read(struct percpu_counter *fbc) +{ + return fbc->count; +} + +/* + * It is possible for the percpu_counter_read() to return a small negative + * number for some counter which should never be negative. + */ +static inline long percpu_counter_read_positive(struct percpu_counter *fbc) +{ + long ret = fbc->count; + + barrier(); /* Prevent reloads of fbc->count */ + if (ret > 0) + return ret; + return 1; +} + +#else + +struct percpu_counter { + long count; +}; + +static inline void percpu_counter_init(struct percpu_counter *fbc) +{ + fbc->count = 0; +} + +static inline void percpu_counter_destroy(struct percpu_counter *fbc) +{ +} + +static inline void +percpu_counter_mod(struct percpu_counter *fbc, long amount) +{ + preempt_disable(); + fbc->count += amount; + preempt_enable(); +} + +static inline long percpu_counter_read(struct percpu_counter *fbc) +{ + return fbc->count; +} + +static inline long percpu_counter_read_positive(struct percpu_counter *fbc) +{ + return fbc->count; +} + +#endif /* CONFIG_SMP */ + +static inline void percpu_counter_inc(struct percpu_counter *fbc) +{ + percpu_counter_mod(fbc, 1); +} + +static inline void percpu_counter_dec(struct percpu_counter *fbc) +{ + percpu_counter_mod(fbc, -1); +} + +#endif /* _LINUX_PERCPU_COUNTER_H */ |