diff options
author | Theodore Ts'o <tytso@mit.edu> | 2013-03-02 10:27:46 -0500 |
---|---|---|
committer | Theodore Ts'o <tytso@mit.edu> | 2013-03-02 10:27:46 -0500 |
commit | 1ac6466f253ef7bd063b7877fb056afe1820841c (patch) | |
tree | 8202e2ba849b72236ea186cfbfd4c4098aa7d699 /fs | |
parent | 246307745c406379996e6ed6411f0e20f1ce1449 (diff) |
ext4: use percpu counter for extent cache count
Use a percpu counter rather than atomic types for shrinker accounting.
There's no need for ultimate accuracy in the shrinker, so this
should come a little more cheaply. The percpu struct is somewhat
large, but there was a big gap before the cache-aligned
s_es_lru_lock anyway, and it fits nicely in there.
Signed-off-by: Eric Sandeen <sandeen@redhat.com>
Signed-off-by: "Theodore Ts'o" <tytso@mit.edu>
Diffstat (limited to 'fs')
-rw-r--r-- | fs/ext4/ext4.h | 2 | ||||
-rw-r--r-- | fs/ext4/extents_status.c | 8 | ||||
-rw-r--r-- | fs/ext4/super.c | 5 |
3 files changed, 10 insertions, 5 deletions
diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h index 96c10934bb96..4a01ba315262 100644 --- a/fs/ext4/ext4.h +++ b/fs/ext4/ext4.h @@ -1268,7 +1268,6 @@ struct ext4_sb_info { atomic_t s_mb_preallocated; atomic_t s_mb_discarded; atomic_t s_lock_busy; - atomic_t s_extent_cache_cnt; /* locality groups */ struct ext4_locality_group __percpu *s_locality_groups; @@ -1310,6 +1309,7 @@ struct ext4_sb_info { /* Reclaim extents from extent status tree */ struct shrinker s_es_shrinker; struct list_head s_es_lru; + struct percpu_counter s_extent_cache_cnt; spinlock_t s_es_lru_lock ____cacheline_aligned_in_smp; }; diff --git a/fs/ext4/extents_status.c b/fs/ext4/extents_status.c index 27fcdd2b2607..95796a1b7522 100644 --- a/fs/ext4/extents_status.c +++ b/fs/ext4/extents_status.c @@ -305,7 +305,7 @@ ext4_es_alloc_extent(struct inode *inode, ext4_lblk_t lblk, ext4_lblk_t len, */ if (!ext4_es_is_delayed(es)) { EXT4_I(inode)->i_es_lru_nr++; - atomic_inc(&EXT4_SB(inode->i_sb)->s_extent_cache_cnt); + percpu_counter_inc(&EXT4_SB(inode->i_sb)->s_extent_cache_cnt); } return es; @@ -317,7 +317,7 @@ static void ext4_es_free_extent(struct inode *inode, struct extent_status *es) if (!ext4_es_is_delayed(es)) { BUG_ON(EXT4_I(inode)->i_es_lru_nr == 0); EXT4_I(inode)->i_es_lru_nr--; - atomic_dec(&EXT4_SB(inode->i_sb)->s_extent_cache_cnt); + percpu_counter_dec(&EXT4_SB(inode->i_sb)->s_extent_cache_cnt); } kmem_cache_free(ext4_es_cachep, es); @@ -678,7 +678,7 @@ static int ext4_es_shrink(struct shrinker *shrink, struct shrink_control *sc) int nr_to_scan = sc->nr_to_scan; int ret, nr_shrunk = 0; - ret = atomic_read(&sbi->s_extent_cache_cnt); + ret = percpu_counter_read_positive(&sbi->s_extent_cache_cnt); trace_ext4_es_shrink_enter(sbi->s_sb, nr_to_scan, ret); if (!nr_to_scan) @@ -711,7 +711,7 @@ static int ext4_es_shrink(struct shrinker *shrink, struct shrink_control *sc) list_splice_tail(&scanned, &sbi->s_es_lru); spin_unlock(&sbi->s_es_lru_lock); - ret = atomic_read(&sbi->s_extent_cache_cnt); + ret = percpu_counter_read_positive(&sbi->s_extent_cache_cnt); trace_ext4_es_shrink_exit(sbi->s_sb, nr_shrunk, ret); return ret; } diff --git a/fs/ext4/super.c b/fs/ext4/super.c index 373d46cd5d3f..1ae5860b30a3 100644 --- a/fs/ext4/super.c +++ b/fs/ext4/super.c @@ -783,6 +783,7 @@ static void ext4_put_super(struct super_block *sb) percpu_counter_destroy(&sbi->s_freeinodes_counter); percpu_counter_destroy(&sbi->s_dirs_counter); percpu_counter_destroy(&sbi->s_dirtyclusters_counter); + percpu_counter_destroy(&sbi->s_extent_cache_cnt); brelse(sbi->s_sbh); #ifdef CONFIG_QUOTA for (i = 0; i < MAXQUOTAS; i++) @@ -3688,6 +3689,9 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent) if (!err) { err = percpu_counter_init(&sbi->s_dirtyclusters_counter, 0); } + if (!err) { + err = percpu_counter_init(&sbi->s_extent_cache_cnt, 0); + } if (err) { ext4_msg(sb, KERN_ERR, "insufficient memory"); goto failed_mount3; @@ -3993,6 +3997,7 @@ failed_mount3: percpu_counter_destroy(&sbi->s_freeinodes_counter); percpu_counter_destroy(&sbi->s_dirs_counter); percpu_counter_destroy(&sbi->s_dirtyclusters_counter); + percpu_counter_destroy(&sbi->s_extent_cache_cnt); if (sbi->s_mmp_tsk) kthread_stop(sbi->s_mmp_tsk); failed_mount2: |