diff options
author | Al Viro <viro@zeniv.linux.org.uk> | 2011-12-12 22:53:00 -0500 |
---|---|---|
committer | Al Viro <viro@zeniv.linux.org.uk> | 2012-01-03 22:52:39 -0500 |
commit | a5166169f9b920cae3c503910cb66a3ac5dd846d (patch) | |
tree | ba62f9f2227c4eff9ad6d473f674f72443fba0fb /fs/super.c | |
parent | 5352d3b65ae6f38e71e16f704414c1db4b4f7228 (diff) |
vfs: convert fs_supers to hlist
Signed-off-by: Al Viro <viro@zeniv.linux.org.uk>
Diffstat (limited to 'fs/super.c')
-rw-r--r-- | fs/super.c | 26 |
1 files changed, 14 insertions, 12 deletions
diff --git a/fs/super.c b/fs/super.c index 66a12f9bfc20..bab11bad13ba 100644 --- a/fs/super.c +++ b/fs/super.c @@ -136,7 +136,7 @@ static struct super_block *alloc_super(struct file_system_type *type) INIT_LIST_HEAD(&s->s_files); #endif s->s_bdi = &default_backing_dev_info; - INIT_LIST_HEAD(&s->s_instances); + INIT_HLIST_NODE(&s->s_instances); INIT_HLIST_BL_HEAD(&s->s_anon); INIT_LIST_HEAD(&s->s_inodes); INIT_LIST_HEAD(&s->s_dentry_lru); @@ -328,7 +328,7 @@ static int grab_super(struct super_block *s) __releases(sb_lock) bool grab_super_passive(struct super_block *sb) { spin_lock(&sb_lock); - if (list_empty(&sb->s_instances)) { + if (hlist_unhashed(&sb->s_instances)) { spin_unlock(&sb_lock); return false; } @@ -400,7 +400,7 @@ void generic_shutdown_super(struct super_block *sb) } spin_lock(&sb_lock); /* should be initialized for __put_super_and_need_restart() */ - list_del_init(&sb->s_instances); + hlist_del_init(&sb->s_instances); spin_unlock(&sb_lock); up_write(&sb->s_umount); } @@ -420,13 +420,14 @@ struct super_block *sget(struct file_system_type *type, void *data) { struct super_block *s = NULL; + struct hlist_node *node; struct super_block *old; int err; retry: spin_lock(&sb_lock); if (test) { - list_for_each_entry(old, &type->fs_supers, s_instances) { + hlist_for_each_entry(old, node, &type->fs_supers, s_instances) { if (!test(old, data)) continue; if (!grab_super(old)) @@ -462,7 +463,7 @@ retry: s->s_type = type; strlcpy(s->s_id, type->name, sizeof(s->s_id)); list_add_tail(&s->s_list, &super_blocks); - list_add(&s->s_instances, &type->fs_supers); + hlist_add_head(&s->s_instances, &type->fs_supers); spin_unlock(&sb_lock); get_filesystem(type); register_shrinker(&s->s_shrink); @@ -497,7 +498,7 @@ void sync_supers(void) spin_lock(&sb_lock); list_for_each_entry(sb, &super_blocks, s_list) { - if (list_empty(&sb->s_instances)) + if (hlist_unhashed(&sb->s_instances)) continue; if (sb->s_op->write_super && sb->s_dirt) { sb->s_count++; @@ -533,7 +534,7 @@ void iterate_supers(void (*f)(struct super_block *, void *), void *arg) spin_lock(&sb_lock); list_for_each_entry(sb, &super_blocks, s_list) { - if (list_empty(&sb->s_instances)) + if (hlist_unhashed(&sb->s_instances)) continue; sb->s_count++; spin_unlock(&sb_lock); @@ -566,9 +567,10 @@ void iterate_supers_type(struct file_system_type *type, void (*f)(struct super_block *, void *), void *arg) { struct super_block *sb, *p = NULL; + struct hlist_node *node; spin_lock(&sb_lock); - list_for_each_entry(sb, &type->fs_supers, s_instances) { + hlist_for_each_entry(sb, node, &type->fs_supers, s_instances) { sb->s_count++; spin_unlock(&sb_lock); @@ -607,7 +609,7 @@ struct super_block *get_super(struct block_device *bdev) spin_lock(&sb_lock); rescan: list_for_each_entry(sb, &super_blocks, s_list) { - if (list_empty(&sb->s_instances)) + if (hlist_unhashed(&sb->s_instances)) continue; if (sb->s_bdev == bdev) { sb->s_count++; @@ -647,7 +649,7 @@ struct super_block *get_active_super(struct block_device *bdev) restart: spin_lock(&sb_lock); list_for_each_entry(sb, &super_blocks, s_list) { - if (list_empty(&sb->s_instances)) + if (hlist_unhashed(&sb->s_instances)) continue; if (sb->s_bdev == bdev) { if (grab_super(sb)) /* drops sb_lock */ @@ -667,7 +669,7 @@ struct super_block *user_get_super(dev_t dev) spin_lock(&sb_lock); rescan: list_for_each_entry(sb, &super_blocks, s_list) { - if (list_empty(&sb->s_instances)) + if (hlist_unhashed(&sb->s_instances)) continue; if (sb->s_dev == dev) { sb->s_count++; @@ -756,7 +758,7 @@ static void do_emergency_remount(struct work_struct *work) spin_lock(&sb_lock); list_for_each_entry(sb, &super_blocks, s_list) { - if (list_empty(&sb->s_instances)) + if (hlist_unhashed(&sb->s_instances)) continue; sb->s_count++; spin_unlock(&sb_lock); |