diff options
Diffstat (limited to 'drivers/md/dm-mpath.c')
-rw-r--r-- | drivers/md/dm-mpath.c | 163 |
1 files changed, 90 insertions, 73 deletions
diff --git a/drivers/md/dm-mpath.c b/drivers/md/dm-mpath.c index e7ee59e655d5..9f7302d4878d 100644 --- a/drivers/md/dm-mpath.c +++ b/drivers/md/dm-mpath.c @@ -7,7 +7,6 @@ #include "dm.h" #include "dm-path-selector.h" -#include "dm-hw-handler.h" #include "dm-bio-list.h" #include "dm-bio-record.h" #include "dm-uevent.h" @@ -20,6 +19,7 @@ #include <linux/slab.h> #include <linux/time.h> #include <linux/workqueue.h> +#include <scsi/scsi_dh.h> #include <asm/atomic.h> #define DM_MSG_PREFIX "multipath" @@ -61,7 +61,8 @@ struct multipath { spinlock_t lock; - struct hw_handler hw_handler; + const char *hw_handler_name; + struct work_struct activate_path; unsigned nr_priority_groups; struct list_head priority_groups; unsigned pg_init_required; /* pg_init needs calling? */ @@ -106,9 +107,10 @@ typedef int (*action_fn) (struct pgpath *pgpath); static struct kmem_cache *_mpio_cache; -static struct workqueue_struct *kmultipathd; +static struct workqueue_struct *kmultipathd, *kmpath_handlerd; static void process_queued_ios(struct work_struct *work); static void trigger_event(struct work_struct *work); +static void activate_path(struct work_struct *work); /*----------------------------------------------- @@ -178,6 +180,7 @@ static struct multipath *alloc_multipath(struct dm_target *ti) m->queue_io = 1; INIT_WORK(&m->process_queued_ios, process_queued_ios); INIT_WORK(&m->trigger_event, trigger_event); + INIT_WORK(&m->activate_path, activate_path); m->mpio_pool = mempool_create_slab_pool(MIN_IOS, _mpio_cache); if (!m->mpio_pool) { kfree(m); @@ -193,18 +196,13 @@ static struct multipath *alloc_multipath(struct dm_target *ti) static void free_multipath(struct multipath *m) { struct priority_group *pg, *tmp; - struct hw_handler *hwh = &m->hw_handler; list_for_each_entry_safe(pg, tmp, &m->priority_groups, list) { list_del(&pg->list); free_priority_group(pg, m->ti); } - if (hwh->type) { - hwh->type->destroy(hwh); - dm_put_hw_handler(hwh->type); - } - + kfree(m->hw_handler_name); mempool_destroy(m->mpio_pool); kfree(m); } @@ -216,12 +214,10 @@ static void free_multipath(struct multipath *m) static void __switch_pg(struct multipath *m, struct pgpath *pgpath) { - struct hw_handler *hwh = &m->hw_handler; - m->current_pg = pgpath->pg; /* Must we initialise the PG first, and queue I/O till it's ready? */ - if (hwh->type && hwh->type->pg_init) { + if (m->hw_handler_name) { m->pg_init_required = 1; m->queue_io = 1; } else { @@ -409,7 +405,6 @@ static void process_queued_ios(struct work_struct *work) { struct multipath *m = container_of(work, struct multipath, process_queued_ios); - struct hw_handler *hwh = &m->hw_handler; struct pgpath *pgpath = NULL; unsigned init_required = 0, must_queue = 1; unsigned long flags; @@ -439,7 +434,7 @@ out: spin_unlock_irqrestore(&m->lock, flags); if (init_required) - hwh->type->pg_init(hwh, pgpath->pg->bypassed, &pgpath->path); + queue_work(kmpath_handlerd, &m->activate_path); if (!must_queue) dispatch_queued_ios(m); @@ -652,8 +647,6 @@ static struct priority_group *parse_priority_group(struct arg_set *as, static int parse_hw_handler(struct arg_set *as, struct multipath *m) { - int r; - struct hw_handler_type *hwht; unsigned hw_argc; struct dm_target *ti = m->ti; @@ -661,30 +654,20 @@ static int parse_hw_handler(struct arg_set *as, struct multipath *m) {0, 1024, "invalid number of hardware handler args"}, }; - r = read_param(_params, shift(as), &hw_argc, &ti->error); - if (r) + if (read_param(_params, shift(as), &hw_argc, &ti->error)) return -EINVAL; if (!hw_argc) return 0; - hwht = dm_get_hw_handler(shift(as)); - if (!hwht) { + m->hw_handler_name = kstrdup(shift(as), GFP_KERNEL); + request_module("scsi_dh_%s", m->hw_handler_name); + if (scsi_dh_handler_exist(m->hw_handler_name) == 0) { ti->error = "unknown hardware handler type"; + kfree(m->hw_handler_name); + m->hw_handler_name = NULL; return -EINVAL; } - - m->hw_handler.md = dm_table_get_md(ti->table); - dm_put(m->hw_handler.md); - - r = hwht->create(&m->hw_handler, hw_argc - 1, as->argv); - if (r) { - dm_put_hw_handler(hwht); - ti->error = "hardware handler constructor failed"; - return r; - } - - m->hw_handler.type = hwht; consume(as, hw_argc - 1); return 0; @@ -808,6 +791,7 @@ static void multipath_dtr(struct dm_target *ti) { struct multipath *m = (struct multipath *) ti->private; + flush_workqueue(kmpath_handlerd); flush_workqueue(kmultipathd); free_multipath(m); } @@ -1025,52 +1009,85 @@ static int pg_init_limit_reached(struct multipath *m, struct pgpath *pgpath) return limit_reached; } -/* - * pg_init must call this when it has completed its initialisation - */ -void dm_pg_init_complete(struct dm_path *path, unsigned err_flags) +static void pg_init_done(struct dm_path *path, int errors) { struct pgpath *pgpath = path_to_pgpath(path); struct priority_group *pg = pgpath->pg; struct multipath *m = pg->m; unsigned long flags; - /* - * If requested, retry pg_init until maximum number of retries exceeded. - * If retry not requested and PG already bypassed, always fail the path. - */ - if (err_flags & MP_RETRY) { - if (pg_init_limit_reached(m, pgpath)) - err_flags |= MP_FAIL_PATH; - } else if (err_flags && pg->bypassed) - err_flags |= MP_FAIL_PATH; - - if (err_flags & MP_FAIL_PATH) + /* device or driver problems */ + switch (errors) { + case SCSI_DH_OK: + break; + case SCSI_DH_NOSYS: + if (!m->hw_handler_name) { + errors = 0; + break; + } + DMERR("Cannot failover device because scsi_dh_%s was not " + "loaded.", m->hw_handler_name); + /* + * Fail path for now, so we do not ping pong + */ fail_path(pgpath); - - if (err_flags & MP_BYPASS_PG) + break; + case SCSI_DH_DEV_TEMP_BUSY: + /* + * Probably doing something like FW upgrade on the + * controller so try the other pg. + */ bypass_pg(m, pg, 1); + break; + /* TODO: For SCSI_DH_RETRY we should wait a couple seconds */ + case SCSI_DH_RETRY: + case SCSI_DH_IMM_RETRY: + case SCSI_DH_RES_TEMP_UNAVAIL: + if (pg_init_limit_reached(m, pgpath)) + fail_path(pgpath); + errors = 0; + break; + default: + /* + * We probably do not want to fail the path for a device + * error, but this is what the old dm did. In future + * patches we can do more advanced handling. + */ + fail_path(pgpath); + } spin_lock_irqsave(&m->lock, flags); - if (err_flags & ~MP_RETRY) { + if (errors) { + DMERR("Could not failover device. Error %d.", errors); m->current_pgpath = NULL; m->current_pg = NULL; - } else if (!m->pg_init_required) + } else if (!m->pg_init_required) { m->queue_io = 0; + pg->bypassed = 0; + } m->pg_init_in_progress = 0; queue_work(kmultipathd, &m->process_queued_ios); spin_unlock_irqrestore(&m->lock, flags); } +static void activate_path(struct work_struct *work) +{ + int ret; + struct multipath *m = + container_of(work, struct multipath, activate_path); + struct dm_path *path = &m->current_pgpath->path; + + ret = scsi_dh_activate(bdev_get_queue(path->dev->bdev)); + pg_init_done(path, ret); +} + /* * end_io handling */ static int do_end_io(struct multipath *m, struct bio *bio, int error, struct dm_mpath_io *mpio) { - struct hw_handler *hwh = &m->hw_handler; - unsigned err_flags = MP_FAIL_PATH; /* Default behavior */ unsigned long flags; if (!error) @@ -1097,19 +1114,8 @@ static int do_end_io(struct multipath *m, struct bio *bio, } spin_unlock_irqrestore(&m->lock, flags); - if (hwh->type && hwh->type->error) - err_flags = hwh->type->error(hwh, bio); - - if (mpio->pgpath) { - if (err_flags & MP_FAIL_PATH) - fail_path(mpio->pgpath); - - if (err_flags & MP_BYPASS_PG) - bypass_pg(m, mpio->pgpath->pg, 1); - } - - if (err_flags & MP_ERROR_IO) - return -EIO; + if (mpio->pgpath) + fail_path(mpio->pgpath); requeue: dm_bio_restore(&mpio->details, bio); @@ -1194,7 +1200,6 @@ static int multipath_status(struct dm_target *ti, status_type_t type, int sz = 0; unsigned long flags; struct multipath *m = (struct multipath *) ti->private; - struct hw_handler *hwh = &m->hw_handler; struct priority_group *pg; struct pgpath *p; unsigned pg_num; @@ -1214,12 +1219,10 @@ static int multipath_status(struct dm_target *ti, status_type_t type, DMEMIT("pg_init_retries %u ", m->pg_init_retries); } - if (hwh->type && hwh->type->status) - sz += hwh->type->status(hwh, type, result + sz, maxlen - sz); - else if (!hwh->type || type == STATUSTYPE_INFO) + if (!m->hw_handler_name || type == STATUSTYPE_INFO) DMEMIT("0 "); else - DMEMIT("1 %s ", hwh->type->name); + DMEMIT("1 %s ", m->hw_handler_name); DMEMIT("%u ", m->nr_priority_groups); @@ -1422,6 +1425,21 @@ static int __init dm_multipath_init(void) return -ENOMEM; } + /* + * A separate workqueue is used to handle the device handlers + * to avoid overloading existing workqueue. Overloading the + * old workqueue would also create a bottleneck in the + * path of the storage hardware device activation. + */ + kmpath_handlerd = create_singlethread_workqueue("kmpath_handlerd"); + if (!kmpath_handlerd) { + DMERR("failed to create workqueue kmpath_handlerd"); + destroy_workqueue(kmultipathd); + dm_unregister_target(&multipath_target); + kmem_cache_destroy(_mpio_cache); + return -ENOMEM; + } + DMINFO("version %u.%u.%u loaded", multipath_target.version[0], multipath_target.version[1], multipath_target.version[2]); @@ -1433,6 +1451,7 @@ static void __exit dm_multipath_exit(void) { int r; + destroy_workqueue(kmpath_handlerd); destroy_workqueue(kmultipathd); r = dm_unregister_target(&multipath_target); @@ -1441,8 +1460,6 @@ static void __exit dm_multipath_exit(void) kmem_cache_destroy(_mpio_cache); } -EXPORT_SYMBOL_GPL(dm_pg_init_complete); - module_init(dm_multipath_init); module_exit(dm_multipath_exit); |