summaryrefslogtreecommitdiff
path: root/drivers/md/dm-raid.c
diff options
context:
space:
mode:
authorJonathan Brassow <jbrassow@redhat.com>2013-05-08 17:57:13 -0500
committerNeilBrown <neilb@suse.de>2013-06-14 08:10:25 +1000
commitf381e71b042af910fbe5f8222792cc5092750993 (patch)
treef63cc984c0fbae60cd4f4dbca3c7e77972981fec /drivers/md/dm-raid.c
parent9092c02d943515b3c9ffd5d0003527f8cc1dd77b (diff)
DM RAID: Break-up untidy function
DM RAID: Break-up untidy function Clean-up excessive indentation by moving some code in raid_resume() into its own function. Signed-off-by: Jonathan Brassow <jbrassow@redhat.com> Signed-off-by: NeilBrown <neilb@suse.de>
Diffstat (limited to 'drivers/md/dm-raid.c')
-rw-r--r--drivers/md/dm-raid.c72
1 files changed, 39 insertions, 33 deletions
diff --git a/drivers/md/dm-raid.c b/drivers/md/dm-raid.c
index facaf9142d5a..59d15ec0ba81 100644
--- a/drivers/md/dm-raid.c
+++ b/drivers/md/dm-raid.c
@@ -1572,15 +1572,51 @@ static void raid_postsuspend(struct dm_target *ti)
mddev_suspend(&rs->md);
}
-static void raid_resume(struct dm_target *ti)
+static void attempt_restore_of_faulty_devices(struct raid_set *rs)
{
int i;
uint64_t failed_devices, cleared_failed_devices = 0;
unsigned long flags;
struct dm_raid_superblock *sb;
- struct raid_set *rs = ti->private;
struct md_rdev *r;
+ for (i = 0; i < rs->md.raid_disks; i++) {
+ r = &rs->dev[i].rdev;
+ if (test_bit(Faulty, &r->flags) && r->sb_page &&
+ sync_page_io(r, 0, r->sb_size, r->sb_page, READ, 1)) {
+ DMINFO("Faulty %s device #%d has readable super block."
+ " Attempting to revive it.",
+ rs->raid_type->name, i);
+ r->raid_disk = i;
+ r->saved_raid_disk = i;
+ flags = r->flags;
+ clear_bit(Faulty, &r->flags);
+ clear_bit(WriteErrorSeen, &r->flags);
+ clear_bit(In_sync, &r->flags);
+ if (r->mddev->pers->hot_add_disk(r->mddev, r)) {
+ r->raid_disk = -1;
+ r->saved_raid_disk = -1;
+ r->flags = flags;
+ } else {
+ r->recovery_offset = 0;
+ cleared_failed_devices |= 1 << i;
+ }
+ }
+ }
+ if (cleared_failed_devices) {
+ rdev_for_each(r, &rs->md) {
+ sb = page_address(r->sb_page);
+ failed_devices = le64_to_cpu(sb->failed_devices);
+ failed_devices &= ~cleared_failed_devices;
+ sb->failed_devices = cpu_to_le64(failed_devices);
+ }
+ }
+}
+
+static void raid_resume(struct dm_target *ti)
+{
+ struct raid_set *rs = ti->private;
+
set_bit(MD_CHANGE_DEVS, &rs->md.flags);
if (!rs->bitmap_loaded) {
bitmap_load(&rs->md);
@@ -1591,37 +1627,7 @@ static void raid_resume(struct dm_target *ti)
* Take this opportunity to check whether any failed
* devices are reachable again.
*/
- for (i = 0; i < rs->md.raid_disks; i++) {
- r = &rs->dev[i].rdev;
- if (test_bit(Faulty, &r->flags) && r->sb_page &&
- sync_page_io(r, 0, r->sb_size,
- r->sb_page, READ, 1)) {
- DMINFO("Faulty device #%d has readable super"
- "block. Attempting to revive it.", i);
- r->raid_disk = i;
- r->saved_raid_disk = i;
- flags = r->flags;
- clear_bit(Faulty, &r->flags);
- clear_bit(WriteErrorSeen, &r->flags);
- clear_bit(In_sync, &r->flags);
- if (r->mddev->pers->hot_add_disk(r->mddev, r)) {
- r->raid_disk = -1;
- r->saved_raid_disk = -1;
- r->flags = flags;
- } else {
- r->recovery_offset = 0;
- cleared_failed_devices |= 1 << i;
- }
- }
- }
- if (cleared_failed_devices) {
- rdev_for_each(r, &rs->md) {
- sb = page_address(r->sb_page);
- failed_devices = le64_to_cpu(sb->failed_devices);
- failed_devices &= ~cleared_failed_devices;
- sb->failed_devices = cpu_to_le64(failed_devices);
- }
- }
+ attempt_restore_of_faulty_devices(rs);
}
clear_bit(MD_RECOVERY_FROZEN, &rs->md.recovery);