diff options
Diffstat (limited to 'drivers/scsi')
58 files changed, 2682 insertions, 1330 deletions
diff --git a/drivers/scsi/53c700.c b/drivers/scsi/53c700.c index 68103e508db7..88e061d13d0b 100644 --- a/drivers/scsi/53c700.c +++ b/drivers/scsi/53c700.c @@ -667,12 +667,30 @@ NCR_700_chip_setup(struct Scsi_Host *host) __u8 min_xferp = (hostdata->chip710 ? NCR_710_MIN_XFERP : NCR_700_MIN_XFERP); if(hostdata->chip710) { - __u8 burst_disable = hostdata->burst_disable - ? BURST_DISABLE : 0; + __u8 burst_disable = 0; + __u8 burst_length = 0; + + switch (hostdata->burst_length) { + case 1: + burst_length = BURST_LENGTH_1; + break; + case 2: + burst_length = BURST_LENGTH_2; + break; + case 4: + burst_length = BURST_LENGTH_4; + break; + case 8: + burst_length = BURST_LENGTH_8; + break; + default: + burst_disable = BURST_DISABLE; + break; + } dcntl_extra = COMPAT_700_MODE; NCR_700_writeb(dcntl_extra, host, DCNTL_REG); - NCR_700_writeb(BURST_LENGTH_8 | hostdata->dmode_extra, + NCR_700_writeb(burst_length | hostdata->dmode_extra, host, DMODE_710_REG); NCR_700_writeb(burst_disable | (hostdata->differential ? DIFF : 0), host, CTEST7_REG); diff --git a/drivers/scsi/53c700.h b/drivers/scsi/53c700.h index f38822db4210..841e1bb27d57 100644 --- a/drivers/scsi/53c700.h +++ b/drivers/scsi/53c700.h @@ -203,7 +203,7 @@ struct NCR_700_Host_Parameters { __u32 force_le_on_be:1; #endif __u32 chip710:1; /* set if really a 710 not 700 */ - __u32 burst_disable:1; /* set to 1 to disable 710 bursting */ + __u32 burst_length:4; /* set to 0 to disable 710 bursting */ /* NOTHING BELOW HERE NEEDS ALTERING */ __u32 fast:1; /* if we can alter the SCSI bus clock diff --git a/drivers/scsi/Kconfig b/drivers/scsi/Kconfig index 7869c34a4a3e..5bf3f07870ba 100644 --- a/drivers/scsi/Kconfig +++ b/drivers/scsi/Kconfig @@ -973,6 +973,15 @@ config SCSI_LASI700 many PA-RISC workstations & servers. If you do not know whether you have a Lasi chip, it is safe to say "Y" here. +config SCSI_SNI_53C710 + tristate "SNI RM SCSI support for 53c710" + depends on SNI_RM && SCSI + select SCSI_SPI_ATTRS + select 53C700_LE_ON_BE + help + This is a driver for the onboard SCSI controller found in older + SNI RM workstations & servers. + config 53C700_LE_ON_BE bool depends on SCSI_LASI700 diff --git a/drivers/scsi/Makefile b/drivers/scsi/Makefile index bd7c9888f7f4..79ecf4ebe6eb 100644 --- a/drivers/scsi/Makefile +++ b/drivers/scsi/Makefile @@ -124,6 +124,7 @@ obj-$(CONFIG_JAZZ_ESP) += NCR53C9x.o jazz_esp.o obj-$(CONFIG_SUN3X_ESP) += NCR53C9x.o sun3x_esp.o obj-$(CONFIG_SCSI_FCAL) += fcal.o obj-$(CONFIG_SCSI_LASI700) += 53c700.o lasi700.o +obj-$(CONFIG_SCSI_SNI_53C710) += 53c700.o sni_53c710.o obj-$(CONFIG_SCSI_NSP32) += nsp32.o obj-$(CONFIG_SCSI_IPR) += ipr.o obj-$(CONFIG_SCSI_SRP) += libsrp.o diff --git a/drivers/scsi/NCR_D700.c b/drivers/scsi/NCR_D700.c index 9859cd17fc57..f12864abed2f 100644 --- a/drivers/scsi/NCR_D700.c +++ b/drivers/scsi/NCR_D700.c @@ -200,6 +200,7 @@ NCR_D700_probe_one(struct NCR_D700_private *p, int siop, int irq, hostdata->base = ioport_map(region, 64); hostdata->differential = (((1<<siop) & differential) != 0); hostdata->clock = NCR_D700_CLOCK_MHZ; + hostdata->burst_length = 8; /* and register the siop */ host = NCR_700_detect(&NCR_D700_driver_template, hostdata, p->dev); diff --git a/drivers/scsi/aacraid/Makefile b/drivers/scsi/aacraid/Makefile index 28d133a3094f..f1cca4ee5410 100644 --- a/drivers/scsi/aacraid/Makefile +++ b/drivers/scsi/aacraid/Makefile @@ -3,6 +3,6 @@ obj-$(CONFIG_SCSI_AACRAID) := aacraid.o aacraid-objs := linit.o aachba.o commctrl.o comminit.o commsup.o \ - dpcsup.o rx.o sa.o rkt.o + dpcsup.o rx.o sa.o rkt.o nark.o EXTRA_CFLAGS := -Idrivers/scsi diff --git a/drivers/scsi/aacraid/aachba.c b/drivers/scsi/aacraid/aachba.c index 426cd6f49f5d..ddb33b06e0ef 100644 --- a/drivers/scsi/aacraid/aachba.c +++ b/drivers/scsi/aacraid/aachba.c @@ -170,9 +170,9 @@ int acbsize = -1; module_param(acbsize, int, S_IRUGO|S_IWUSR); MODULE_PARM_DESC(acbsize, "Request a specific adapter control block (FIB) size. Valid values are 512, 2048, 4096 and 8192. Default is to use suggestion from Firmware."); -int expose_physicals = 0; +int expose_physicals = -1; module_param(expose_physicals, int, S_IRUGO|S_IWUSR); -MODULE_PARM_DESC(expose_physicals, "Expose physical components of the arrays. 0=off, 1=on"); +MODULE_PARM_DESC(expose_physicals, "Expose physical components of the arrays. -1=protect 0=off, 1=on"); /** * aac_get_config_status - check the adapter configuration * @common: adapter to query @@ -706,6 +706,309 @@ static void set_sense(u8 *sense_buf, u8 sense_key, u8 sense_code, } } +static int aac_bounds_32(struct aac_dev * dev, struct scsi_cmnd * cmd, u64 lba) +{ + if (lba & 0xffffffff00000000LL) { + int cid = scmd_id(cmd); + dprintk((KERN_DEBUG "aacraid: Illegal lba\n")); + cmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 | + SAM_STAT_CHECK_CONDITION; + set_sense((u8 *) &dev->fsa_dev[cid].sense_data, + HARDWARE_ERROR, + SENCODE_INTERNAL_TARGET_FAILURE, + ASENCODE_INTERNAL_TARGET_FAILURE, 0, 0, + 0, 0); + memcpy(cmd->sense_buffer, &dev->fsa_dev[cid].sense_data, + (sizeof(dev->fsa_dev[cid].sense_data) > sizeof(cmd->sense_buffer)) + ? sizeof(cmd->sense_buffer) + : sizeof(dev->fsa_dev[cid].sense_data)); + cmd->scsi_done(cmd); + return 1; + } + return 0; +} + +static int aac_bounds_64(struct aac_dev * dev, struct scsi_cmnd * cmd, u64 lba) +{ + return 0; +} + +static void io_callback(void *context, struct fib * fibptr); + +static int aac_read_raw_io(struct fib * fib, struct scsi_cmnd * cmd, u64 lba, u32 count) +{ + u16 fibsize; + struct aac_raw_io *readcmd; + aac_fib_init(fib); + readcmd = (struct aac_raw_io *) fib_data(fib); + readcmd->block[0] = cpu_to_le32((u32)(lba&0xffffffff)); + readcmd->block[1] = cpu_to_le32((u32)((lba&0xffffffff00000000LL)>>32)); + readcmd->count = cpu_to_le32(count<<9); + readcmd->cid = cpu_to_le16(scmd_id(cmd)); + readcmd->flags = cpu_to_le16(1); + readcmd->bpTotal = 0; + readcmd->bpComplete = 0; + + aac_build_sgraw(cmd, &readcmd->sg); + fibsize = sizeof(struct aac_raw_io) + ((le32_to_cpu(readcmd->sg.count) - 1) * sizeof (struct sgentryraw)); + BUG_ON(fibsize > (fib->dev->max_fib_size - sizeof(struct aac_fibhdr))); + /* + * Now send the Fib to the adapter + */ + return aac_fib_send(ContainerRawIo, + fib, + fibsize, + FsaNormal, + 0, 1, + (fib_callback) io_callback, + (void *) cmd); +} + +static int aac_read_block64(struct fib * fib, struct scsi_cmnd * cmd, u64 lba, u32 count) +{ + u16 fibsize; + struct aac_read64 *readcmd; + aac_fib_init(fib); + readcmd = (struct aac_read64 *) fib_data(fib); + readcmd->command = cpu_to_le32(VM_CtHostRead64); + readcmd->cid = cpu_to_le16(scmd_id(cmd)); + readcmd->sector_count = cpu_to_le16(count); + readcmd->block = cpu_to_le32((u32)(lba&0xffffffff)); + readcmd->pad = 0; + readcmd->flags = 0; + + aac_build_sg64(cmd, &readcmd->sg); + fibsize = sizeof(struct aac_read64) + + ((le32_to_cpu(readcmd->sg.count) - 1) * + sizeof (struct sgentry64)); + BUG_ON (fibsize > (fib->dev->max_fib_size - + sizeof(struct aac_fibhdr))); + /* + * Now send the Fib to the adapter + */ + return aac_fib_send(ContainerCommand64, + fib, + fibsize, + FsaNormal, + 0, 1, + (fib_callback) io_callback, + (void *) cmd); +} + +static int aac_read_block(struct fib * fib, struct scsi_cmnd * cmd, u64 lba, u32 count) +{ + u16 fibsize; + struct aac_read *readcmd; + aac_fib_init(fib); + readcmd = (struct aac_read *) fib_data(fib); + readcmd->command = cpu_to_le32(VM_CtBlockRead); + readcmd->cid = cpu_to_le16(scmd_id(cmd)); + readcmd->block = cpu_to_le32((u32)(lba&0xffffffff)); + readcmd->count = cpu_to_le32(count * 512); + + aac_build_sg(cmd, &readcmd->sg); + fibsize = sizeof(struct aac_read) + + ((le32_to_cpu(readcmd->sg.count) - 1) * + sizeof (struct sgentry)); + BUG_ON (fibsize > (fib->dev->max_fib_size - + sizeof(struct aac_fibhdr))); + /* + * Now send the Fib to the adapter + */ + return aac_fib_send(ContainerCommand, + fib, + fibsize, + FsaNormal, + 0, 1, + (fib_callback) io_callback, + (void *) cmd); +} + +static int aac_write_raw_io(struct fib * fib, struct scsi_cmnd * cmd, u64 lba, u32 count) +{ + u16 fibsize; + struct aac_raw_io *writecmd; + aac_fib_init(fib); + writecmd = (struct aac_raw_io *) fib_data(fib); + writecmd->block[0] = cpu_to_le32((u32)(lba&0xffffffff)); + writecmd->block[1] = cpu_to_le32((u32)((lba&0xffffffff00000000LL)>>32)); + writecmd->count = cpu_to_le32(count<<9); + writecmd->cid = cpu_to_le16(scmd_id(cmd)); + writecmd->flags = 0; + writecmd->bpTotal = 0; + writecmd->bpComplete = 0; + + aac_build_sgraw(cmd, &writecmd->sg); + fibsize = sizeof(struct aac_raw_io) + ((le32_to_cpu(writecmd->sg.count) - 1) * sizeof (struct sgentryraw)); + BUG_ON(fibsize > (fib->dev->max_fib_size - sizeof(struct aac_fibhdr))); + /* + * Now send the Fib to the adapter + */ + return aac_fib_send(ContainerRawIo, + fib, + fibsize, + FsaNormal, + 0, 1, + (fib_callback) io_callback, + (void *) cmd); +} + +static int aac_write_block64(struct fib * fib, struct scsi_cmnd * cmd, u64 lba, u32 count) +{ + u16 fibsize; + struct aac_write64 *writecmd; + aac_fib_init(fib); + writecmd = (struct aac_write64 *) fib_data(fib); + writecmd->command = cpu_to_le32(VM_CtHostWrite64); + writecmd->cid = cpu_to_le16(scmd_id(cmd)); + writecmd->sector_count = cpu_to_le16(count); + writecmd->block = cpu_to_le32((u32)(lba&0xffffffff)); + writecmd->pad = 0; + writecmd->flags = 0; + + aac_build_sg64(cmd, &writecmd->sg); + fibsize = sizeof(struct aac_write64) + + ((le32_to_cpu(writecmd->sg.count) - 1) * + sizeof (struct sgentry64)); + BUG_ON (fibsize > (fib->dev->max_fib_size - + sizeof(struct aac_fibhdr))); + /* + * Now send the Fib to the adapter + */ + return aac_fib_send(ContainerCommand64, + fib, + fibsize, + FsaNormal, + 0, 1, + (fib_callback) io_callback, + (void *) cmd); +} + +static int aac_write_block(struct fib * fib, struct scsi_cmnd * cmd, u64 lba, u32 count) +{ + u16 fibsize; + struct aac_write *writecmd; + aac_fib_init(fib); + writecmd = (struct aac_write *) fib_data(fib); + writecmd->command = cpu_to_le32(VM_CtBlockWrite); + writecmd->cid = cpu_to_le16(scmd_id(cmd)); + writecmd->block = cpu_to_le32((u32)(lba&0xffffffff)); + writecmd->count = cpu_to_le32(count * 512); + writecmd->sg.count = cpu_to_le32(1); + /* ->stable is not used - it did mean which type of write */ + + aac_build_sg(cmd, &writecmd->sg); + fibsize = sizeof(struct aac_write) + + ((le32_to_cpu(writecmd->sg.count) - 1) * + sizeof (struct sgentry)); + BUG_ON (fibsize > (fib->dev->max_fib_size - + sizeof(struct aac_fibhdr))); + /* + * Now send the Fib to the adapter + */ + return aac_fib_send(ContainerCommand, + fib, + fibsize, + FsaNormal, + 0, 1, + (fib_callback) io_callback, + (void *) cmd); +} + +static struct aac_srb * aac_scsi_common(struct fib * fib, struct scsi_cmnd * cmd) +{ + struct aac_srb * srbcmd; + u32 flag; + u32 timeout; + + aac_fib_init(fib); + switch(cmd->sc_data_direction){ + case DMA_TO_DEVICE: + flag = SRB_DataOut; + break; + case DMA_BIDIRECTIONAL: + flag = SRB_DataIn | SRB_DataOut; + break; + case DMA_FROM_DEVICE: + flag = SRB_DataIn; + break; + case DMA_NONE: + default: /* shuts up some versions of gcc */ + flag = SRB_NoDataXfer; + break; + } + + srbcmd = (struct aac_srb*) fib_data(fib); + srbcmd->function = cpu_to_le32(SRBF_ExecuteScsi); + srbcmd->channel = cpu_to_le32(aac_logical_to_phys(scmd_channel(cmd))); + srbcmd->id = cpu_to_le32(scmd_id(cmd)); + srbcmd->lun = cpu_to_le32(cmd->device->lun); + srbcmd->flags = cpu_to_le32(flag); + timeout = cmd->timeout_per_command/HZ; + if (timeout == 0) + timeout = 1; + srbcmd->timeout = cpu_to_le32(timeout); // timeout in seconds + srbcmd->retry_limit = 0; /* Obsolete parameter */ + srbcmd->cdb_size = cpu_to_le32(cmd->cmd_len); + return srbcmd; +} + +static void aac_srb_callback(void *context, struct fib * fibptr); + +static int aac_scsi_64(struct fib * fib, struct scsi_cmnd * cmd) +{ + u16 fibsize; + struct aac_srb * srbcmd = aac_scsi_common(fib, cmd); + + aac_build_sg64(cmd, (struct sgmap64*) &srbcmd->sg); + srbcmd->count = cpu_to_le32(cmd->request_bufflen); + + memset(srbcmd->cdb, 0, sizeof(srbcmd->cdb)); + memcpy(srbcmd->cdb, cmd->cmnd, cmd->cmd_len); + /* + * Build Scatter/Gather list + */ + fibsize = sizeof (struct aac_srb) - sizeof (struct sgentry) + + ((le32_to_cpu(srbcmd->sg.count) & 0xff) * + sizeof (struct sgentry64)); + BUG_ON (fibsize > (fib->dev->max_fib_size - + sizeof(struct aac_fibhdr))); + + /* + * Now send the Fib to the adapter + */ + return aac_fib_send(ScsiPortCommand64, fib, + fibsize, FsaNormal, 0, 1, + (fib_callback) aac_srb_callback, + (void *) cmd); +} + +static int aac_scsi_32(struct fib * fib, struct scsi_cmnd * cmd) +{ + u16 fibsize; + struct aac_srb * srbcmd = aac_scsi_common(fib, cmd); + + aac_build_sg(cmd, (struct sgmap*)&srbcmd->sg); + srbcmd->count = cpu_to_le32(cmd->request_bufflen); + + memset(srbcmd->cdb, 0, sizeof(srbcmd->cdb)); + memcpy(srbcmd->cdb, cmd->cmnd, cmd->cmd_len); + /* + * Build Scatter/Gather list + */ + fibsize = sizeof (struct aac_srb) + + (((le32_to_cpu(srbcmd->sg.count) & 0xff) - 1) * + sizeof (struct sgentry)); + BUG_ON (fibsize > (fib->dev->max_fib_size - + sizeof(struct aac_fibhdr))); + + /* + * Now send the Fib to the adapter + */ + return aac_fib_send(ScsiPortCommand, fib, fibsize, FsaNormal, 0, 1, + (fib_callback) aac_srb_callback, (void *) cmd); +} + int aac_get_adapter_info(struct aac_dev* dev) { struct fib* fibptr; @@ -874,14 +1177,27 @@ int aac_get_adapter_info(struct aac_dev* dev) } } /* - * 57 scatter gather elements + * Deal with configuring for the individualized limits of each packet + * interface. */ - if (!(dev->raw_io_interface)) { + dev->a_ops.adapter_scsi = (dev->dac_support) + ? aac_scsi_64 + : aac_scsi_32; + if (dev->raw_io_interface) { + dev->a_ops.adapter_bounds = (dev->raw_io_64) + ? aac_bounds_64 + : aac_bounds_32; + dev->a_ops.adapter_read = aac_read_raw_io; + dev->a_ops.adapter_write = aac_write_raw_io; + } else { + dev->a_ops.adapter_bounds = aac_bounds_32; dev->scsi_host_ptr->sg_tablesize = (dev->max_fib_size - sizeof(struct aac_fibhdr) - sizeof(struct aac_write) + sizeof(struct sgentry)) / sizeof(struct sgentry); if (dev->dac_support) { + dev->a_ops.adapter_read = aac_read_block64; + dev->a_ops.adapter_write = aac_write_block64; /* * 38 scatter gather elements */ @@ -891,6 +1207,9 @@ int aac_get_adapter_info(struct aac_dev* dev) sizeof(struct aac_write64) + sizeof(struct sgentry64)) / sizeof(struct sgentry64); + } else { + dev->a_ops.adapter_read = aac_read_block; + dev->a_ops.adapter_write = aac_write_block; } dev->scsi_host_ptr->max_sectors = AAC_MAX_32BIT_SGBCOUNT; if(!(dev->adapter_info.options & AAC_OPT_NEW_COMM)) { @@ -1004,8 +1323,6 @@ static int aac_read(struct scsi_cmnd * scsicmd, int cid) u64 lba; u32 count; int status; - - u16 fibsize; struct aac_dev *dev; struct fib * cmd_fibcontext; @@ -1059,23 +1376,8 @@ static int aac_read(struct scsi_cmnd * scsicmd, int cid) } dprintk((KERN_DEBUG "aac_read[cpu %d]: lba = %llu, t = %ld.\n", smp_processor_id(), (unsigned long long)lba, jiffies)); - if ((!(dev->raw_io_interface) || !(dev->raw_io_64)) && - (lba & 0xffffffff00000000LL)) { - dprintk((KERN_DEBUG "aac_read: Illegal lba\n")); - scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 | - SAM_STAT_CHECK_CONDITION; - set_sense((u8 *) &dev->fsa_dev[cid].sense_data, - HARDWARE_ERROR, - SENCODE_INTERNAL_TARGET_FAILURE, - ASENCODE_INTERNAL_TARGET_FAILURE, 0, 0, - 0, 0); - memcpy(scsicmd->sense_buffer, &dev->fsa_dev[cid].sense_data, - (sizeof(dev->fsa_dev[cid].sense_data) > sizeof(scsicmd->sense_buffer)) - ? sizeof(scsicmd->sense_buffer) - : sizeof(dev->fsa_dev[cid].sense_data)); - scsicmd->scsi_done(scsicmd); + if (aac_adapter_bounds(dev,scsicmd,lba)) return 0; - } /* * Alocate and initialize a Fib */ @@ -1083,85 +1385,7 @@ static int aac_read(struct scsi_cmnd * scsicmd, int cid) return -1; } - aac_fib_init(cmd_fibcontext); - - if (dev->raw_io_interface) { - struct aac_raw_io *readcmd; - readcmd = (struct aac_raw_io *) fib_data(cmd_fibcontext); - readcmd->block[0] = cpu_to_le32((u32)(lba&0xffffffff)); - readcmd->block[1] = cpu_to_le32((u32)((lba&0xffffffff00000000LL)>>32)); - readcmd->count = cpu_to_le32(count<<9); - readcmd->cid = cpu_to_le16(cid); - readcmd->flags = cpu_to_le16(1); - readcmd->bpTotal = 0; - readcmd->bpComplete = 0; - - aac_build_sgraw(scsicmd, &readcmd->sg); - fibsize = sizeof(struct aac_raw_io) + ((le32_to_cpu(readcmd->sg.count) - 1) * sizeof (struct sgentryraw)); - BUG_ON(fibsize > (dev->max_fib_size - sizeof(struct aac_fibhdr))); - /* - * Now send the Fib to the adapter - */ - status = aac_fib_send(ContainerRawIo, - cmd_fibcontext, - fibsize, - FsaNormal, - 0, 1, - (fib_callback) io_callback, - (void *) scsicmd); - } else if (dev->dac_support == 1) { - struct aac_read64 *readcmd; - readcmd = (struct aac_read64 *) fib_data(cmd_fibcontext); - readcmd->command = cpu_to_le32(VM_CtHostRead64); - readcmd->cid = cpu_to_le16(cid); - readcmd->sector_count = cpu_to_le16(count); - readcmd->block = cpu_to_le32((u32)(lba&0xffffffff)); - readcmd->pad = 0; - readcmd->flags = 0; - - aac_build_sg64(scsicmd, &readcmd->sg); - fibsize = sizeof(struct aac_read64) + - ((le32_to_cpu(readcmd->sg.count) - 1) * - sizeof (struct sgentry64)); - BUG_ON (fibsize > (dev->max_fib_size - - sizeof(struct aac_fibhdr))); - /* - * Now send the Fib to the adapter - */ - status = aac_fib_send(ContainerCommand64, - cmd_fibcontext, - fibsize, - FsaNormal, - 0, 1, - (fib_callback) io_callback, - (void *) scsicmd); - } else { - struct aac_read *readcmd; - readcmd = (struct aac_read *) fib_data(cmd_fibcontext); - readcmd->command = cpu_to_le32(VM_CtBlockRead); - readcmd->cid = cpu_to_le32(cid); - readcmd->block = cpu_to_le32((u32)(lba&0xffffffff)); - readcmd->count = cpu_to_le32(count * 512); - - aac_build_sg(scsicmd, &readcmd->sg); - fibsize = sizeof(struct aac_read) + - ((le32_to_cpu(readcmd->sg.count) - 1) * - sizeof (struct sgentry)); - BUG_ON (fibsize > (dev->max_fib_size - - sizeof(struct aac_fibhdr))); - /* - * Now send the Fib to the adapter - */ - status = aac_fib_send(ContainerCommand, - cmd_fibcontext, - fibsize, - FsaNormal, - 0, 1, - (fib_callback) io_callback, - (void *) scsicmd); - } - - + status = aac_adapter_read(cmd_fibcontext, scsicmd, lba, count); /* * Check that the command queued to the controller @@ -1187,7 +1411,6 @@ static int aac_write(struct scsi_cmnd * scsicmd, int cid) u64 lba; u32 count; int status; - u16 fibsize; struct aac_dev *dev; struct fib * cmd_fibcontext; @@ -1227,22 +1450,8 @@ static int aac_write(struct scsi_cmnd * scsicmd, int cid) } dprintk((KERN_DEBUG "aac_write[cpu %d]: lba = %llu, t = %ld.\n", smp_processor_id(), (unsigned long long)lba, jiffies)); - if ((!(dev->raw_io_interface) || !(dev->raw_io_64)) - && (lba & 0xffffffff00000000LL)) { - dprintk((KERN_DEBUG "aac_write: Illegal lba\n")); - scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 | SAM_STAT_CHECK_CONDITION; - set_sense((u8 *) &dev->fsa_dev[cid].sense_data, - HARDWARE_ERROR, - SENCODE_INTERNAL_TARGET_FAILURE, - ASENCODE_INTERNAL_TARGET_FAILURE, 0, 0, - 0, 0); - memcpy(scsicmd->sense_buffer, &dev->fsa_dev[cid].sense_data, - (sizeof(dev->fsa_dev[cid].sense_data) > sizeof(scsicmd->sense_buffer)) - ? sizeof(scsicmd->sense_buffer) - : sizeof(dev->fsa_dev[cid].sense_data)); - scsicmd->scsi_done(scsicmd); + if (aac_adapter_bounds(dev,scsicmd,lba)) return 0; - } /* * Allocate and initialize a Fib then setup a BlockWrite command */ @@ -1251,85 +1460,8 @@ static int aac_write(struct scsi_cmnd * scsicmd, int cid) scsicmd->scsi_done(scsicmd); return 0; } - aac_fib_init(cmd_fibcontext); - if (dev->raw_io_interface) { - struct aac_raw_io *writecmd; - writecmd = (struct aac_raw_io *) fib_data(cmd_fibcontext); - writecmd->block[0] = cpu_to_le32((u32)(lba&0xffffffff)); - writecmd->block[1] = cpu_to_le32((u32)((lba&0xffffffff00000000LL)>>32)); - writecmd->count = cpu_to_le32(count<<9); - writecmd->cid = cpu_to_le16(cid); - writecmd->flags = 0; - writecmd->bpTotal = 0; - writecmd->bpComplete = 0; - - aac_build_sgraw(scsicmd, &writecmd->sg); - fibsize = sizeof(struct aac_raw_io) + ((le32_to_cpu(writecmd->sg.count) - 1) * sizeof (struct sgentryraw)); - BUG_ON(fibsize > (dev->max_fib_size - sizeof(struct aac_fibhdr))); - /* - * Now send the Fib to the adapter - */ - status = aac_fib_send(ContainerRawIo, - cmd_fibcontext, - fibsize, - FsaNormal, - 0, 1, - (fib_callback) io_callback, - (void *) scsicmd); - } else if (dev->dac_support == 1) { - struct aac_write64 *writecmd; - writecmd = (struct aac_write64 *) fib_data(cmd_fibcontext); - writecmd->command = cpu_to_le32(VM_CtHostWrite64); - writecmd->cid = cpu_to_le16(cid); - writecmd->sector_count = cpu_to_le16(count); - writecmd->block = cpu_to_le32((u32)(lba&0xffffffff)); - writecmd->pad = 0; - writecmd->flags = 0; - - aac_build_sg64(scsicmd, &writecmd->sg); - fibsize = sizeof(struct aac_write64) + - ((le32_to_cpu(writecmd->sg.count) - 1) * - sizeof (struct sgentry64)); - BUG_ON (fibsize > (dev->max_fib_size - - sizeof(struct aac_fibhdr))); - /* - * Now send the Fib to the adapter - */ - status = aac_fib_send(ContainerCommand64, - cmd_fibcontext, - fibsize, - FsaNormal, - 0, 1, - (fib_callback) io_callback, - (void *) scsicmd); - } else { - struct aac_write *writecmd; - writecmd = (struct aac_write *) fib_data(cmd_fibcontext); - writecmd->command = cpu_to_le32(VM_CtBlockWrite); - writecmd->cid = cpu_to_le32(cid); - writecmd->block = cpu_to_le32((u32)(lba&0xffffffff)); - writecmd->count = cpu_to_le32(count * 512); - writecmd->sg.count = cpu_to_le32(1); - /* ->stable is not used - it did mean which type of write */ - - aac_build_sg(scsicmd, &writecmd->sg); - fibsize = sizeof(struct aac_write) + - ((le32_to_cpu(writecmd->sg.count) - 1) * - sizeof (struct sgentry)); - BUG_ON (fibsize > (dev->max_fib_size - - sizeof(struct aac_fibhdr))); - /* - * Now send the Fib to the adapter - */ - status = aac_fib_send(ContainerCommand, - cmd_fibcontext, - fibsize, - FsaNormal, - 0, 1, - (fib_callback) io_callback, - (void *) scsicmd); - } + status = aac_adapter_write(cmd_fibcontext, scsicmd, lba, count); /* * Check that the command queued to the controller @@ -2099,10 +2231,6 @@ static int aac_send_srb_fib(struct scsi_cmnd* scsicmd) struct fib* cmd_fibcontext; struct aac_dev* dev; int status; - struct aac_srb *srbcmd; - u16 fibsize; - u32 flag; - u32 timeout; dev = (struct aac_dev *)scsicmd->device->host->hostdata; if (scmd_id(scsicmd) >= dev->maximum_num_physicals || @@ -2112,88 +2240,14 @@ static int aac_send_srb_fib(struct scsi_cmnd* scsicmd) return 0; } - switch(scsicmd->sc_data_direction){ - case DMA_TO_DEVICE: - flag = SRB_DataOut; - break; - case DMA_BIDIRECTIONAL: - flag = SRB_DataIn | SRB_DataOut; - break; - case DMA_FROM_DEVICE: - flag = SRB_DataIn; - break; - case DMA_NONE: - default: /* shuts up some versions of gcc */ - flag = SRB_NoDataXfer; - break; - } - - /* * Allocate and initialize a Fib then setup a BlockWrite command */ if (!(cmd_fibcontext = aac_fib_alloc(dev))) { return -1; } - aac_fib_init(cmd_fibcontext); - - srbcmd = (struct aac_srb*) fib_data(cmd_fibcontext); - srbcmd->function = cpu_to_le32(SRBF_ExecuteScsi); - srbcmd->channel = cpu_to_le32(aac_logical_to_phys(scmd_channel(scsicmd))); - srbcmd->id = cpu_to_le32(scmd_id(scsicmd)); - srbcmd->lun = cpu_to_le32(scsicmd->device->lun); - srbcmd->flags = cpu_to_le32(flag); - timeout = scsicmd->timeout_per_command/HZ; - if(timeout == 0){ - timeout = 1; - } - srbcmd->timeout = cpu_to_le32(timeout); // timeout in seconds - srbcmd->retry_limit = 0; /* Obsolete parameter */ - srbcmd->cdb_size = cpu_to_le32(scsicmd->cmd_len); - - if( dev->dac_support == 1 ) { - aac_build_sg64(scsicmd, (struct sgmap64*) &srbcmd->sg); - srbcmd->count = cpu_to_le32(scsicmd->request_bufflen); - - memset(srbcmd->cdb, 0, sizeof(srbcmd->cdb)); - memcpy(srbcmd->cdb, scsicmd->cmnd, scsicmd->cmd_len); - /* - * Build Scatter/Gather list - */ - fibsize = sizeof (struct aac_srb) - sizeof (struct sgentry) + - ((le32_to_cpu(srbcmd->sg.count) & 0xff) * - sizeof (struct sgentry64)); - BUG_ON (fibsize > (dev->max_fib_size - - sizeof(struct aac_fibhdr))); + status = aac_adapter_scsi(cmd_fibcontext, scsicmd); - /* - * Now send the Fib to the adapter - */ - status = aac_fib_send(ScsiPortCommand64, cmd_fibcontext, - fibsize, FsaNormal, 0, 1, - (fib_callback) aac_srb_callback, - (void *) scsicmd); - } else { - aac_build_sg(scsicmd, (struct sgmap*)&srbcmd->sg); - srbcmd->count = cpu_to_le32(scsicmd->request_bufflen); - - memset(srbcmd->cdb, 0, sizeof(srbcmd->cdb)); - memcpy(srbcmd->cdb, scsicmd->cmnd, scsicmd->cmd_len); - /* - * Build Scatter/Gather list - */ - fibsize = sizeof (struct aac_srb) + - (((le32_to_cpu(srbcmd->sg.count) & 0xff) - 1) * - sizeof (struct sgentry)); - BUG_ON (fibsize > (dev->max_fib_size - - sizeof(struct aac_fibhdr))); - - /* - * Now send the Fib to the adapter - */ - status = aac_fib_send(ScsiPortCommand, cmd_fibcontext, fibsize, FsaNormal, 0, 1, - (fib_callback) aac_srb_callback, (void *) scsicmd); - } /* * Check that the command queued to the controller */ diff --git a/drivers/scsi/aacraid/aacraid.h b/drivers/scsi/aacraid/aacraid.h index 4f8b4c53d435..39ecd0d22eb0 100644 --- a/drivers/scsi/aacraid/aacraid.h +++ b/drivers/scsi/aacraid/aacraid.h @@ -5,6 +5,7 @@ #define _nblank(x) #x #define nblank(x) _nblank(x)[0] +#include <linux/interrupt.h> /*------------------------------------------------------------------------------ * D E F I N E S @@ -485,16 +486,28 @@ enum aac_log_level { struct aac_dev; struct fib; +struct scsi_cmnd; struct adapter_ops { + /* Low level operations */ void (*adapter_interrupt)(struct aac_dev *dev); void (*adapter_notify)(struct aac_dev *dev, u32 event); void (*adapter_disable_int)(struct aac_dev *dev); + void (*adapter_enable_int)(struct aac_dev *dev); int (*adapter_sync_cmd)(struct aac_dev *dev, u32 command, u32 p1, u32 p2, u32 p3, u32 p4, u32 p5, u32 p6, u32 *status, u32 *r1, u32 *r2, u32 *r3, u32 *r4); int (*adapter_check_health)(struct aac_dev *dev); - int (*adapter_send)(struct fib * fib); + /* Transport operations */ int (*adapter_ioremap)(struct aac_dev * dev, u32 size); + irqreturn_t (*adapter_intr)(int irq, void *dev_id); + /* Packet operations */ + int (*adapter_deliver)(struct fib * fib); + int (*adapter_bounds)(struct aac_dev * dev, struct scsi_cmnd * cmd, u64 lba); + int (*adapter_read)(struct fib * fib, struct scsi_cmnd * cmd, u64 lba, u32 count); + int (*adapter_write)(struct fib * fib, struct scsi_cmnd * cmd, u64 lba, u32 count); + int (*adapter_scsi)(struct fib * fib, struct scsi_cmnd * cmd); + /* Administrative operations */ + int (*adapter_comm)(struct aac_dev * dev, int comm); }; /* @@ -1018,7 +1031,9 @@ struct aac_dev u8 nondasd_support; u8 dac_support; u8 raid_scsi_mode; - u8 new_comm_interface; + u8 comm_interface; +# define AAC_COMM_PRODUCER 0 +# define AAC_COMM_MESSAGE 1 /* macro side-effects BEWARE */ # define raw_io_interface \ init->InitStructRevision==cpu_to_le32(ADAPTER_INIT_STRUCT_REVISION_4) @@ -1036,18 +1051,36 @@ struct aac_dev #define aac_adapter_disable_int(dev) \ (dev)->a_ops.adapter_disable_int(dev) +#define aac_adapter_enable_int(dev) \ + (dev)->a_ops.adapter_enable_int(dev) + #define aac_adapter_sync_cmd(dev, command, p1, p2, p3, p4, p5, p6, status, r1, r2, r3, r4) \ (dev)->a_ops.adapter_sync_cmd(dev, command, p1, p2, p3, p4, p5, p6, status, r1, r2, r3, r4) #define aac_adapter_check_health(dev) \ (dev)->a_ops.adapter_check_health(dev) -#define aac_adapter_send(fib) \ - ((fib)->dev)->a_ops.adapter_send(fib) - #define aac_adapter_ioremap(dev, size) \ (dev)->a_ops.adapter_ioremap(dev, size) +#define aac_adapter_deliver(fib) \ + ((fib)->dev)->a_ops.adapter_deliver(fib) + +#define aac_adapter_bounds(dev,cmd,lba) \ + dev->a_ops.adapter_bounds(dev,cmd,lba) + +#define aac_adapter_read(fib,cmd,lba,count) \ + ((fib)->dev)->a_ops.adapter_read(fib,cmd,lba,count) + +#define aac_adapter_write(fib,cmd,lba,count) \ + ((fib)->dev)->a_ops.adapter_write(fib,cmd,lba,count) + +#define aac_adapter_scsi(fib,cmd) \ + ((fib)->dev)->a_ops.adapter_scsi(fib,cmd) + +#define aac_adapter_comm(dev,comm) \ + (dev)->a_ops.adapter_comm(dev, comm) + #define FIB_CONTEXT_FLAG_TIMED_OUT (0x00000001) /* @@ -1767,7 +1800,6 @@ static inline u32 cap_to_cyls(sector_t capacity, u32 divisor) return (u32)capacity; } -struct scsi_cmnd; /* SCp.phase values */ #define AAC_OWNER_MIDLEVEL 0x101 #define AAC_OWNER_LOWLEVEL 0x102 @@ -1794,7 +1826,9 @@ int aac_dev_ioctl(struct aac_dev *dev, int cmd, void __user *arg); int aac_do_ioctl(struct aac_dev * dev, int cmd, void __user *arg); int aac_rx_init(struct aac_dev *dev); int aac_rkt_init(struct aac_dev *dev); +int aac_nark_init(struct aac_dev *dev); int aac_sa_init(struct aac_dev *dev); +int aac_queue_get(struct aac_dev * dev, u32 * index, u32 qid, struct hw_fib * hw_fib, int wait, struct fib * fibptr, unsigned long *nonotify); unsigned int aac_response_normal(struct aac_queue * q); unsigned int aac_command_normal(struct aac_queue * q); unsigned int aac_intr_normal(struct aac_dev * dev, u32 Index); diff --git a/drivers/scsi/aacraid/comminit.c b/drivers/scsi/aacraid/comminit.c index 6d305b2f854e..df67ba686023 100644 --- a/drivers/scsi/aacraid/comminit.c +++ b/drivers/scsi/aacraid/comminit.c @@ -95,7 +95,7 @@ static int aac_alloc_comm(struct aac_dev *dev, void **commaddr, unsigned long co init->HostPhysMemPages = cpu_to_le32(AAC_MAX_HOSTPHYSMEMPAGES); init->InitFlags = 0; - if (dev->new_comm_interface) { + if (dev->comm_interface == AAC_COMM_MESSAGE) { init->InitFlags = cpu_to_le32(INITFLAGS_NEW_COMM_SUPPORTED); dprintk((KERN_WARNING"aacraid: New Comm Interface enabled\n")); } @@ -297,21 +297,23 @@ struct aac_dev *aac_init_adapter(struct aac_dev *dev) - sizeof(struct aac_fibhdr) - sizeof(struct aac_write) + sizeof(struct sgentry)) / sizeof(struct sgentry); - dev->new_comm_interface = 0; + dev->comm_interface = AAC_COMM_PRODUCER; dev->raw_io_64 = 0; if ((!aac_adapter_sync_cmd(dev, GET_ADAPTER_PROPERTIES, 0, 0, 0, 0, 0, 0, status+0, status+1, status+2, NULL, NULL)) && (status[0] == 0x00000001)) { if (status[1] & AAC_OPT_NEW_COMM_64) dev->raw_io_64 = 1; - if (status[1] & AAC_OPT_NEW_COMM) - dev->new_comm_interface = dev->a_ops.adapter_send != 0; - if (dev->new_comm_interface && (status[2] > dev->base_size)) { + if (dev->a_ops.adapter_comm && + (status[1] & AAC_OPT_NEW_COMM)) + dev->comm_interface = AAC_COMM_MESSAGE; + if ((dev->comm_interface == AAC_COMM_MESSAGE) && + (status[2] > dev->base_size)) { aac_adapter_ioremap(dev, 0); dev->base_size = status[2]; if (aac_adapter_ioremap(dev, status[2])) { /* remap failed, go back ... */ - dev->new_comm_interface = 0; + dev->comm_interface = AAC_COMM_PRODUCER; if (aac_adapter_ioremap(dev, AAC_MIN_FOOTPRINT_SIZE)) { printk(KERN_WARNING "aacraid: unable to map adapter.\n"); diff --git a/drivers/scsi/aacraid/commsup.c b/drivers/scsi/aacraid/commsup.c index 4893a6d06a33..1b97f60652ba 100644 --- a/drivers/scsi/aacraid/commsup.c +++ b/drivers/scsi/aacraid/commsup.c @@ -317,7 +317,7 @@ static int aac_get_entry (struct aac_dev * dev, u32 qid, struct aac_entry **entr * success. */ -static int aac_queue_get(struct aac_dev * dev, u32 * index, u32 qid, struct hw_fib * hw_fib, int wait, struct fib * fibptr, unsigned long *nonotify) +int aac_queue_get(struct aac_dev * dev, u32 * index, u32 qid, struct hw_fib * hw_fib, int wait, struct fib * fibptr, unsigned long *nonotify) { struct aac_entry * entry = NULL; int map = 0; @@ -387,7 +387,6 @@ int aac_fib_send(u16 command, struct fib *fibptr, unsigned long size, { struct aac_dev * dev = fibptr->dev; struct hw_fib * hw_fib = fibptr->hw_fib; - struct aac_queue * q; unsigned long flags = 0; unsigned long qflags; @@ -469,38 +468,10 @@ int aac_fib_send(u16 command, struct fib *fibptr, unsigned long size, if (!dev->queues) return -EBUSY; - q = &dev->queues->queue[AdapNormCmdQueue]; if(wait) spin_lock_irqsave(&fibptr->event_lock, flags); - spin_lock_irqsave(q->lock, qflags); - if (dev->new_comm_interface) { - unsigned long count = 10000000L; /* 50 seconds */ - q->numpending++; - spin_unlock_irqrestore(q->lock, qflags); - while (aac_adapter_send(fibptr) != 0) { - if (--count == 0) { - if (wait) - spin_unlock_irqrestore(&fibptr->event_lock, flags); - spin_lock_irqsave(q->lock, qflags); - q->numpending--; - spin_unlock_irqrestore(q->lock, qflags); - return -ETIMEDOUT; - } - udelay(5); - } - } else { - u32 index; - unsigned long nointr = 0; - aac_queue_get( dev, &index, AdapNormCmdQueue, hw_fib, 1, fibptr, &nointr); - - q->numpending++; - *(q->headers.producer) = cpu_to_le32(index + 1); - spin_unlock_irqrestore(q->lock, qflags); - dprintk((KERN_DEBUG "aac_fib_send: inserting a queue entry at index %d.\n",index)); - if (!(nointr & aac_config.irq_mod)) - aac_adapter_notify(dev, AdapNormCmdQueue); - } + aac_adapter_deliver(fibptr); /* * If the caller wanted us to wait for response wait now. @@ -520,6 +491,7 @@ int aac_fib_send(u16 command, struct fib *fibptr, unsigned long size, while (down_trylock(&fibptr->event_wait)) { int blink; if (--count == 0) { + struct aac_queue * q = &dev->queues->queue[AdapNormCmdQueue]; spin_lock_irqsave(q->lock, qflags); q->numpending--; spin_unlock_irqrestore(q->lock, qflags); @@ -659,7 +631,7 @@ int aac_fib_adapter_complete(struct fib *fibptr, unsigned short size) unsigned long qflags; if (hw_fib->header.XferState == 0) { - if (dev->new_comm_interface) + if (dev->comm_interface == AAC_COMM_MESSAGE) kfree (hw_fib); return 0; } @@ -667,7 +639,7 @@ int aac_fib_adapter_complete(struct fib *fibptr, unsigned short size) * If we plan to do anything check the structure type first. */ if ( hw_fib->header.StructType != FIB_MAGIC ) { - if (dev->new_comm_interface) + if (dev->comm_interface == AAC_COMM_MESSAGE) kfree (hw_fib); return -EINVAL; } @@ -679,7 +651,7 @@ int aac_fib_adapter_complete(struct fib *fibptr, unsigned short size) * send the completed cdb to the adapter. */ if (hw_fib->header.XferState & cpu_to_le32(SentFromAdapter)) { - if (dev->new_comm_interface) { + if (dev->comm_interface == AAC_COMM_MESSAGE) { kfree (hw_fib); } else { u32 index; diff --git a/drivers/scsi/aacraid/linit.c b/drivers/scsi/aacraid/linit.c index d2cf875af59b..a9734e08fe28 100644 --- a/drivers/scsi/aacraid/linit.c +++ b/drivers/scsi/aacraid/linit.c @@ -157,6 +157,7 @@ static struct pci_device_id aac_pci_tbl[] = { { 0x9005, 0x0285, 0x17aa, PCI_ANY_ID, 0, 0, 58 }, /* Legend Catchall */ { 0x9005, 0x0285, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 59 }, /* Adaptec Catch All */ { 0x9005, 0x0286, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 60 }, /* Adaptec Rocket Catch All */ + { 0x9005, 0x0288, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 61 }, /* Adaptec NEMER/ARK Catch All */ { 0,} }; MODULE_DEVICE_TABLE(pci, aac_pci_tbl); @@ -230,7 +231,8 @@ static struct aac_driver_ident aac_drivers[] = { { aac_rx_init, "aacraid", "DELL ", "RAID ", 2, AAC_QUIRK_31BIT | AAC_QUIRK_34SG }, /* Dell Catchall */ { aac_rx_init, "aacraid", "Legend ", "RAID ", 2, AAC_QUIRK_31BIT | AAC_QUIRK_34SG }, /* Legend Catchall */ { aac_rx_init, "aacraid", "ADAPTEC ", "RAID ", 2, AAC_QUIRK_31BIT | AAC_QUIRK_34SG }, /* Adaptec Catch All */ - { aac_rkt_init, "aacraid", "ADAPTEC ", "RAID ", 2 } /* Adaptec Rocket Catch All */ + { aac_rkt_init, "aacraid", "ADAPTEC ", "RAID ", 2 }, /* Adaptec Rocket Catch All */ + { aac_nark_init, "aacraid", "ADAPTEC ", "RAID ", 2 } /* Adaptec NEMER/ARK Catch All */ }; /** @@ -396,11 +398,15 @@ static int aac_slave_configure(struct scsi_device *sdev) sdev->skip_ms_page_3f = 1; } if ((sdev->type == TYPE_DISK) && - !expose_physicals && (sdev_channel(sdev) != CONTAINER_CHANNEL)) { - struct aac_dev *aac = (struct aac_dev *)sdev->host->hostdata; - if (!aac->raid_scsi_mode || (sdev_channel(sdev) != 2)) - sdev->no_uld_attach = 1; + if (expose_physicals == 0) + return -ENXIO; + if (expose_physicals < 0) { + struct aac_dev *aac = + (struct aac_dev *)sdev->host->hostdata; + if (!aac->raid_scsi_mode || (sdev_channel(sdev) != 2)) + sdev->no_uld_attach = 1; + } } if (sdev->tagged_supported && (sdev->type == TYPE_DISK) && (sdev_channel(sdev) == CONTAINER_CHANNEL)) { @@ -804,7 +810,6 @@ static struct scsi_host_template aac_driver_template = { .emulated = 1, }; - static int __devinit aac_probe_one(struct pci_dev *pdev, const struct pci_device_id *id) { diff --git a/drivers/scsi/aacraid/nark.c b/drivers/scsi/aacraid/nark.c new file mode 100644 index 000000000000..c76b611b6afb --- /dev/null +++ b/drivers/scsi/aacraid/nark.c @@ -0,0 +1,87 @@ +/* + * Adaptec AAC series RAID controller driver + * (c) Copyright 2001 Red Hat Inc. <alan@redhat.com> + * + * based on the old aacraid driver that is.. + * Adaptec aacraid device driver for Linux. + * + * Copyright (c) 2000 Adaptec, Inc. (aacraid@adaptec.com) + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2, or (at your option) + * any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; see the file COPYING. If not, write to + * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA. + * + * Module Name: + * nark.c + * + * Abstract: Hardware Device Interface for NEMER/ARK + * + */ + +#include <linux/pci.h> +#include <linux/blkdev.h> + +#include <scsi/scsi_host.h> + +#include "aacraid.h" + +/** + * aac_nark_ioremap + * @size: mapping resize request + * + */ +static int aac_nark_ioremap(struct aac_dev * dev, u32 size) +{ + if (!size) { + iounmap(dev->regs.rx); + dev->regs.rx = NULL; + iounmap(dev->base); + dev->base = NULL; + return 0; + } + dev->scsi_host_ptr->base = pci_resource_start(dev->pdev, 2); + dev->regs.rx = ioremap((u64)pci_resource_start(dev->pdev, 0) | + ((u64)pci_resource_start(dev->pdev, 1) << 32), + sizeof(struct rx_registers) - sizeof(struct rx_inbound)); + dev->base = NULL; + if (dev->regs.rx == NULL) + return -1; + dev->base = ioremap(dev->scsi_host_ptr->base, size); + if (dev->base == NULL) { + iounmap(dev->regs.rx); + dev->regs.rx = NULL; + return -1; + } + dev->IndexRegs = &((struct rx_registers __iomem *)dev->base)->IndexRegs; + return 0; +} + +/** + * aac_nark_init - initialize an NEMER/ARK Split Bar card + * @dev: device to configure + * + */ + +int aac_nark_init(struct aac_dev * dev) +{ + extern int _aac_rx_init(struct aac_dev *dev); + extern int aac_rx_select_comm(struct aac_dev *dev, int comm); + + /* + * Fill in the function dispatch table. + */ + dev->a_ops.adapter_ioremap = aac_nark_ioremap; + dev->a_ops.adapter_comm = aac_rx_select_comm; + + return _aac_rx_init(dev); +} diff --git a/drivers/scsi/aacraid/rkt.c b/drivers/scsi/aacraid/rkt.c index 643f23b5ded8..d953c3fe998a 100644 --- a/drivers/scsi/aacraid/rkt.c +++ b/drivers/scsi/aacraid/rkt.c @@ -34,6 +34,40 @@ #include "aacraid.h" +#define AAC_NUM_IO_FIB_RKT (246 - AAC_NUM_MGT_FIB) + +/** + * aac_rkt_select_comm - Select communications method + * @dev: Adapter + * @comm: communications method + */ + +static int aac_rkt_select_comm(struct aac_dev *dev, int comm) +{ + int retval; + extern int aac_rx_select_comm(struct aac_dev *dev, int comm); + retval = aac_rx_select_comm(dev, comm); + if (comm == AAC_COMM_MESSAGE) { + /* + * FIB Setup has already been done, but we can minimize the + * damage by at least ensuring the OS never issues more + * commands than we can handle. The Rocket adapters currently + * can only handle 246 commands and 8 AIFs at the same time, + * and in fact do notify us accordingly if we negotiate the + * FIB size. The problem that causes us to add this check is + * to ensure that we do not overdo it with the adapter when a + * hard coded FIB override is being utilized. This special + * case warrants this half baked, but convenient, check here. + */ + if (dev->scsi_host_ptr->can_queue > AAC_NUM_IO_FIB_RKT) { + dev->init->MaxIoCommands = + cpu_to_le32(AAC_NUM_IO_FIB_RKT + AAC_NUM_MGT_FIB); + dev->scsi_host_ptr->can_queue = AAC_NUM_IO_FIB_RKT; + } + } + return retval; +} + /** * aac_rkt_ioremap * @size: mapping resize request @@ -63,39 +97,13 @@ static int aac_rkt_ioremap(struct aac_dev * dev, u32 size) int aac_rkt_init(struct aac_dev *dev) { - int retval; extern int _aac_rx_init(struct aac_dev *dev); - extern void aac_rx_start_adapter(struct aac_dev *dev); /* * Fill in the function dispatch table. */ dev->a_ops.adapter_ioremap = aac_rkt_ioremap; + dev->a_ops.adapter_comm = aac_rkt_select_comm; - retval = _aac_rx_init(dev); - if (retval) - return retval; - if (dev->new_comm_interface) { - /* - * FIB Setup has already been done, but we can minimize the - * damage by at least ensuring the OS never issues more - * commands than we can handle. The Rocket adapters currently - * can only handle 246 commands and 8 AIFs at the same time, - * and in fact do notify us accordingly if we negotiate the - * FIB size. The problem that causes us to add this check is - * to ensure that we do not overdo it with the adapter when a - * hard coded FIB override is being utilized. This special - * case warrants this half baked, but convenient, check here. - */ - if (dev->scsi_host_ptr->can_queue > (246 - AAC_NUM_MGT_FIB)) { - dev->init->MaxIoCommands = cpu_to_le32(246); - dev->scsi_host_ptr->can_queue = 246 - AAC_NUM_MGT_FIB; - } - } - /* - * Tell the adapter that all is configured, and it can start - * accepting requests - */ - aac_rx_start_adapter(dev); - return 0; + return _aac_rx_init(dev); } diff --git a/drivers/scsi/aacraid/rx.c b/drivers/scsi/aacraid/rx.c index dcc8b0ea7a9d..c632d9354a26 100644 --- a/drivers/scsi/aacraid/rx.c +++ b/drivers/scsi/aacraid/rx.c @@ -46,60 +46,60 @@ #include "aacraid.h" -static irqreturn_t aac_rx_intr(int irq, void *dev_id) +static irqreturn_t aac_rx_intr_producer(int irq, void *dev_id) { struct aac_dev *dev = dev_id; + unsigned long bellbits; + u8 intstat = rx_readb(dev, MUnit.OISR); - dprintk((KERN_DEBUG "aac_rx_intr(%d,%p)\n", irq, dev_id)); - if (dev->new_comm_interface) { - u32 Index = rx_readl(dev, MUnit.OutboundQueue); - if (Index == 0xFFFFFFFFL) - Index = rx_readl(dev, MUnit.OutboundQueue); - if (Index != 0xFFFFFFFFL) { - do { - if (aac_intr_normal(dev, Index)) { - rx_writel(dev, MUnit.OutboundQueue, Index); - rx_writel(dev, MUnit.ODR, DoorBellAdapterNormRespReady); - } - Index = rx_readl(dev, MUnit.OutboundQueue); - } while (Index != 0xFFFFFFFFL); - return IRQ_HANDLED; + /* + * Read mask and invert because drawbridge is reversed. + * This allows us to only service interrupts that have + * been enabled. + * Check to see if this is our interrupt. If it isn't just return + */ + if (intstat & ~(dev->OIMR)) { + bellbits = rx_readl(dev, OutboundDoorbellReg); + if (bellbits & DoorBellPrintfReady) { + aac_printf(dev, readl (&dev->IndexRegs->Mailbox[5])); + rx_writel(dev, MUnit.ODR,DoorBellPrintfReady); + rx_writel(dev, InboundDoorbellReg,DoorBellPrintfDone); } - } else { - unsigned long bellbits; - u8 intstat; - intstat = rx_readb(dev, MUnit.OISR); - /* - * Read mask and invert because drawbridge is reversed. - * This allows us to only service interrupts that have - * been enabled. - * Check to see if this is our interrupt. If it isn't just return - */ - if (intstat & ~(dev->OIMR)) - { - bellbits = rx_readl(dev, OutboundDoorbellReg); - if (bellbits & DoorBellPrintfReady) { - aac_printf(dev, readl (&dev->IndexRegs->Mailbox[5])); - rx_writel(dev, MUnit.ODR,DoorBellPrintfReady); - rx_writel(dev, InboundDoorbellReg,DoorBellPrintfDone); - } - else if (bellbits & DoorBellAdapterNormCmdReady) { - rx_writel(dev, MUnit.ODR, DoorBellAdapterNormCmdReady); - aac_command_normal(&dev->queues->queue[HostNormCmdQueue]); - } - else if (bellbits & DoorBellAdapterNormRespReady) { - rx_writel(dev, MUnit.ODR,DoorBellAdapterNormRespReady); - aac_response_normal(&dev->queues->queue[HostNormRespQueue]); - } - else if (bellbits & DoorBellAdapterNormCmdNotFull) { - rx_writel(dev, MUnit.ODR, DoorBellAdapterNormCmdNotFull); - } - else if (bellbits & DoorBellAdapterNormRespNotFull) { - rx_writel(dev, MUnit.ODR, DoorBellAdapterNormCmdNotFull); - rx_writel(dev, MUnit.ODR, DoorBellAdapterNormRespNotFull); - } - return IRQ_HANDLED; + else if (bellbits & DoorBellAdapterNormCmdReady) { + rx_writel(dev, MUnit.ODR, DoorBellAdapterNormCmdReady); + aac_command_normal(&dev->queues->queue[HostNormCmdQueue]); + } + else if (bellbits & DoorBellAdapterNormRespReady) { + rx_writel(dev, MUnit.ODR,DoorBellAdapterNormRespReady); + aac_response_normal(&dev->queues->queue[HostNormRespQueue]); + } + else if (bellbits & DoorBellAdapterNormCmdNotFull) { + rx_writel(dev, MUnit.ODR, DoorBellAdapterNormCmdNotFull); } + else if (bellbits & DoorBellAdapterNormRespNotFull) { + rx_writel(dev, MUnit.ODR, DoorBellAdapterNormCmdNotFull); + rx_writel(dev, MUnit.ODR, DoorBellAdapterNormRespNotFull); + } + return IRQ_HANDLED; + } + return IRQ_NONE; +} + +static irqreturn_t aac_rx_intr_message(int irq, void *dev_id) +{ + struct aac_dev *dev = dev_id; + u32 Index = rx_readl(dev, MUnit.OutboundQueue); + if (Index == 0xFFFFFFFFL) + Index = rx_readl(dev, MUnit.OutboundQueue); + if (Index != 0xFFFFFFFFL) { + do { + if (aac_intr_normal(dev, Index)) { + rx_writel(dev, MUnit.OutboundQueue, Index); + rx_writel(dev, MUnit.ODR, DoorBellAdapterNormRespReady); + } + Index = rx_readl(dev, MUnit.OutboundQueue); + } while (Index != 0xFFFFFFFFL); + return IRQ_HANDLED; } return IRQ_NONE; } @@ -115,6 +115,26 @@ static void aac_rx_disable_interrupt(struct aac_dev *dev) } /** + * aac_rx_enable_interrupt_producer - Enable interrupts + * @dev: Adapter + */ + +static void aac_rx_enable_interrupt_producer(struct aac_dev *dev) +{ + rx_writeb(dev, MUnit.OIMR, dev->OIMR = 0xfb); +} + +/** + * aac_rx_enable_interrupt_message - Enable interrupts + * @dev: Adapter + */ + +static void aac_rx_enable_interrupt_message(struct aac_dev *dev) +{ + rx_writeb(dev, MUnit.OIMR, dev->OIMR = 0xf7); +} + +/** * rx_sync_cmd - send a command and wait * @dev: Adapter * @command: Command to execute @@ -189,10 +209,7 @@ static int rx_sync_cmd(struct aac_dev *dev, u32 command, /* * Restore interrupt mask even though we timed out */ - if (dev->new_comm_interface) - rx_writeb(dev, MUnit.OIMR, dev->OIMR = 0xf7); - else - rx_writeb(dev, MUnit.OIMR, dev->OIMR = 0xfb); + aac_adapter_enable_int(dev); return -ETIMEDOUT; } /* @@ -215,10 +232,7 @@ static int rx_sync_cmd(struct aac_dev *dev, u32 command, /* * Restore interrupt mask */ - if (dev->new_comm_interface) - rx_writeb(dev, MUnit.OIMR, dev->OIMR = 0xf7); - else - rx_writeb(dev, MUnit.OIMR, dev->OIMR = 0xfb); + aac_adapter_enable_int(dev); return 0; } @@ -360,35 +374,72 @@ static int aac_rx_check_health(struct aac_dev *dev) } /** - * aac_rx_send + * aac_rx_deliver_producer * @fib: fib to issue * * Will send a fib, returning 0 if successful. */ -static int aac_rx_send(struct fib * fib) +static int aac_rx_deliver_producer(struct fib * fib) { - u64 addr = fib->hw_fib_pa; struct aac_dev *dev = fib->dev; - volatile void __iomem *device = dev->regs.rx; + struct aac_queue *q = &dev->queues->queue[AdapNormCmdQueue]; + unsigned long qflags; u32 Index; + unsigned long nointr = 0; - dprintk((KERN_DEBUG "%p->aac_rx_send(%p->%llx)\n", dev, fib, addr)); - Index = rx_readl(dev, MUnit.InboundQueue); - if (Index == 0xFFFFFFFFL) + spin_lock_irqsave(q->lock, qflags); + aac_queue_get( dev, &Index, AdapNormCmdQueue, fib->hw_fib, 1, fib, &nointr); + + q->numpending++; + *(q->headers.producer) = cpu_to_le32(Index + 1); + spin_unlock_irqrestore(q->lock, qflags); + if (!(nointr & aac_config.irq_mod)) + aac_adapter_notify(dev, AdapNormCmdQueue); + + return 0; +} + +/** + * aac_rx_deliver_message + * @fib: fib to issue + * + * Will send a fib, returning 0 if successful. + */ +static int aac_rx_deliver_message(struct fib * fib) +{ + struct aac_dev *dev = fib->dev; + struct aac_queue *q = &dev->queues->queue[AdapNormCmdQueue]; + unsigned long qflags; + u32 Index; + u64 addr; + volatile void __iomem *device; + + unsigned long count = 10000000L; /* 50 seconds */ + spin_lock_irqsave(q->lock, qflags); + q->numpending++; + spin_unlock_irqrestore(q->lock, qflags); + for(;;) { Index = rx_readl(dev, MUnit.InboundQueue); - dprintk((KERN_DEBUG "Index = 0x%x\n", Index)); - if (Index == 0xFFFFFFFFL) - return Index; + if (Index == 0xFFFFFFFFL) + Index = rx_readl(dev, MUnit.InboundQueue); + if (Index != 0xFFFFFFFFL) + break; + if (--count == 0) { + spin_lock_irqsave(q->lock, qflags); + q->numpending--; + spin_unlock_irqrestore(q->lock, qflags); + return -ETIMEDOUT; + } + udelay(5); + } device = dev->base + Index; - dprintk((KERN_DEBUG "entry = %x %x %u\n", (u32)(addr & 0xffffffff), - (u32)(addr >> 32), (u32)le16_to_cpu(fib->hw_fib->header.Size))); + addr = fib->hw_fib_pa; writel((u32)(addr & 0xffffffff), device); device += sizeof(u32); writel((u32)(addr >> 32), device); device += sizeof(u32); writel(le16_to_cpu(fib->hw_fib->header.Size), device); rx_writel(dev, MUnit.InboundQueue, Index); - dprintk((KERN_DEBUG "aac_rx_send - return 0\n")); return 0; } @@ -430,6 +481,31 @@ static int aac_rx_restart_adapter(struct aac_dev *dev) } /** + * aac_rx_select_comm - Select communications method + * @dev: Adapter + * @comm: communications method + */ + +int aac_rx_select_comm(struct aac_dev *dev, int comm) +{ + switch (comm) { + case AAC_COMM_PRODUCER: + dev->a_ops.adapter_enable_int = aac_rx_enable_interrupt_producer; + dev->a_ops.adapter_intr = aac_rx_intr_producer; + dev->a_ops.adapter_deliver = aac_rx_deliver_producer; + break; + case AAC_COMM_MESSAGE: + dev->a_ops.adapter_enable_int = aac_rx_enable_interrupt_message; + dev->a_ops.adapter_intr = aac_rx_intr_message; + dev->a_ops.adapter_deliver = aac_rx_deliver_message; + break; + default: + return 1; + } + return 0; +} + +/** * aac_rx_init - initialize an i960 based AAC card * @dev: device to configure * @@ -489,40 +565,42 @@ int _aac_rx_init(struct aac_dev *dev) } msleep(1); } - if (request_irq(dev->scsi_host_ptr->irq, aac_rx_intr, IRQF_SHARED|IRQF_DISABLED, "aacraid", (void *)dev)<0) - { - printk(KERN_ERR "%s%d: Interrupt unavailable.\n", name, instance); - goto error_iounmap; - } /* - * Fill in the function dispatch table. + * Fill in the common function dispatch table. */ dev->a_ops.adapter_interrupt = aac_rx_interrupt_adapter; dev->a_ops.adapter_disable_int = aac_rx_disable_interrupt; dev->a_ops.adapter_notify = aac_rx_notify_adapter; dev->a_ops.adapter_sync_cmd = rx_sync_cmd; dev->a_ops.adapter_check_health = aac_rx_check_health; - dev->a_ops.adapter_send = aac_rx_send; /* * First clear out all interrupts. Then enable the one's that we * can handle. */ - rx_writeb(dev, MUnit.OIMR, 0xff); + aac_adapter_comm(dev, AAC_COMM_PRODUCER); + aac_adapter_disable_int(dev); rx_writel(dev, MUnit.ODR, 0xffffffff); - rx_writeb(dev, MUnit.OIMR, dev->OIMR = 0xfb); + aac_adapter_enable_int(dev); if (aac_init_adapter(dev) == NULL) - goto error_irq; - if (dev->new_comm_interface) - rx_writeb(dev, MUnit.OIMR, dev->OIMR = 0xf7); + goto error_iounmap; + aac_adapter_comm(dev, dev->comm_interface); + if (request_irq(dev->scsi_host_ptr->irq, dev->a_ops.adapter_intr, + IRQF_SHARED|IRQF_DISABLED, "aacraid", dev) < 0) { + printk(KERN_ERR "%s%d: Interrupt unavailable.\n", + name, instance); + goto error_iounmap; + } + aac_adapter_enable_int(dev); + /* + * Tell the adapter that all is configured, and it can + * start accepting requests + */ + aac_rx_start_adapter(dev); return 0; -error_irq: - rx_writeb(dev, MUnit.OIMR, dev->OIMR = 0xff); - free_irq(dev->scsi_host_ptr->irq, (void *)dev); - error_iounmap: return -1; @@ -530,20 +608,11 @@ error_iounmap: int aac_rx_init(struct aac_dev *dev) { - int retval; - /* * Fill in the function dispatch table. */ dev->a_ops.adapter_ioremap = aac_rx_ioremap; + dev->a_ops.adapter_comm = aac_rx_select_comm; - retval = _aac_rx_init(dev); - if (!retval) { - /* - * Tell the adapter that all is configured, and it can - * start accepting requests - */ - aac_rx_start_adapter(dev); - } - return retval; + return _aac_rx_init(dev); } diff --git a/drivers/scsi/aacraid/sa.c b/drivers/scsi/aacraid/sa.c index 511b0a938fb1..8535db068c2f 100644 --- a/drivers/scsi/aacraid/sa.c +++ b/drivers/scsi/aacraid/sa.c @@ -92,6 +92,17 @@ static void aac_sa_disable_interrupt (struct aac_dev *dev) } /** + * aac_sa_enable_interrupt - enable interrupt + * @dev: Which adapter to enable. + */ + +static void aac_sa_enable_interrupt (struct aac_dev *dev) +{ + sa_writew(dev, SaDbCSR.PRICLEARIRQMASK, (PrintfReady | DOORBELL_1 | + DOORBELL_2 | DOORBELL_3 | DOORBELL_4)); +} + +/** * aac_sa_notify_adapter - handle adapter notification * @dev: Adapter that notification is for * @event: Event to notidy @@ -347,32 +358,36 @@ int aac_sa_init(struct aac_dev *dev) msleep(1); } - if (request_irq(dev->scsi_host_ptr->irq, aac_sa_intr, IRQF_SHARED|IRQF_DISABLED, "aacraid", (void *)dev ) < 0) { - printk(KERN_WARNING "%s%d: Interrupt unavailable.\n", name, instance); - goto error_iounmap; - } - /* * Fill in the function dispatch table. */ dev->a_ops.adapter_interrupt = aac_sa_interrupt_adapter; dev->a_ops.adapter_disable_int = aac_sa_disable_interrupt; + dev->a_ops.adapter_enable_int = aac_sa_enable_interrupt; dev->a_ops.adapter_notify = aac_sa_notify_adapter; dev->a_ops.adapter_sync_cmd = sa_sync_cmd; dev->a_ops.adapter_check_health = aac_sa_check_health; + dev->a_ops.adapter_intr = aac_sa_intr; dev->a_ops.adapter_ioremap = aac_sa_ioremap; /* * First clear out all interrupts. Then enable the one's that * we can handle. */ - sa_writew(dev, SaDbCSR.PRISETIRQMASK, 0xffff); - sa_writew(dev, SaDbCSR.PRICLEARIRQMASK, (PrintfReady | DOORBELL_1 | - DOORBELL_2 | DOORBELL_3 | DOORBELL_4)); + aac_adapter_disable_int(dev); + aac_adapter_enable_int(dev); if(aac_init_adapter(dev) == NULL) goto error_irq; + if (request_irq(dev->scsi_host_ptr->irq, dev->a_ops.adapter_intr, + IRQF_SHARED|IRQF_DISABLED, + "aacraid", (void *)dev ) < 0) { + printk(KERN_WARNING "%s%d: Interrupt unavailable.\n", + name, instance); + goto error_iounmap; + } + aac_adapter_enable_int(dev); /* * Tell the adapter that all is configure, and it can start @@ -382,7 +397,7 @@ int aac_sa_init(struct aac_dev *dev) return 0; error_irq: - sa_writew(dev, SaDbCSR.PRISETIRQMASK, 0xffff); + aac_sa_disable_interrupt(dev); free_irq(dev->scsi_host_ptr->irq, (void *)dev); error_iounmap: diff --git a/drivers/scsi/aic94xx/aic94xx_dev.c b/drivers/scsi/aic94xx/aic94xx_dev.c index 6f8901b748f7..c520e5b41fb5 100644 --- a/drivers/scsi/aic94xx/aic94xx_dev.c +++ b/drivers/scsi/aic94xx/aic94xx_dev.c @@ -37,18 +37,14 @@ static inline int asd_get_ddb(struct asd_ha_struct *asd_ha) { - unsigned long flags; int ddb, i; - spin_lock_irqsave(&asd_ha->hw_prof.ddb_lock, flags); ddb = FIND_FREE_DDB(asd_ha); if (ddb >= asd_ha->hw_prof.max_ddbs) { ddb = -ENOMEM; - spin_unlock_irqrestore(&asd_ha->hw_prof.ddb_lock, flags); goto out; } SET_DDB(ddb, asd_ha); - spin_unlock_irqrestore(&asd_ha->hw_prof.ddb_lock, flags); for (i = 0; i < sizeof(struct asd_ddb_ssp_smp_target_port); i+= 4) asd_ddbsite_write_dword(asd_ha, ddb, i, 0); @@ -77,14 +73,10 @@ out: static inline void asd_free_ddb(struct asd_ha_struct *asd_ha, int ddb) { - unsigned long flags; - if (!ddb || ddb >= 0xFFFF) return; asd_ddbsite_write_byte(asd_ha, ddb, DDB_TYPE, DDB_TYPE_UNUSED); - spin_lock_irqsave(&asd_ha->hw_prof.ddb_lock, flags); CLEAR_DDB(ddb, asd_ha); - spin_unlock_irqrestore(&asd_ha->hw_prof.ddb_lock, flags); } static inline void asd_set_ddb_type(struct domain_device *dev) @@ -320,8 +312,11 @@ out: int asd_dev_found(struct domain_device *dev) { + unsigned long flags; int res = 0; + struct asd_ha_struct *asd_ha = dev->port->ha->lldd_ha; + spin_lock_irqsave(&asd_ha->hw_prof.ddb_lock, flags); switch (dev->dev_type) { case SATA_PM: res = asd_init_sata_pm_ddb(dev); @@ -335,14 +330,18 @@ int asd_dev_found(struct domain_device *dev) else res = asd_init_initiator_ddb(dev); } + spin_unlock_irqrestore(&asd_ha->hw_prof.ddb_lock, flags); + return res; } void asd_dev_gone(struct domain_device *dev) { int ddb, sister_ddb; + unsigned long flags; struct asd_ha_struct *asd_ha = dev->port->ha->lldd_ha; + spin_lock_irqsave(&asd_ha->hw_prof.ddb_lock, flags); ddb = (int) (unsigned long) dev->lldd_dev; sister_ddb = asd_ddbsite_read_word(asd_ha, ddb, SISTER_DDB); @@ -350,4 +349,5 @@ void asd_dev_gone(struct domain_device *dev) asd_free_ddb(asd_ha, sister_ddb); asd_free_ddb(asd_ha, ddb); dev->lldd_dev = NULL; + spin_unlock_irqrestore(&asd_ha->hw_prof.ddb_lock, flags); } diff --git a/drivers/scsi/aic94xx/aic94xx_init.c b/drivers/scsi/aic94xx/aic94xx_init.c index fbc82b00a418..3aa568fbdbf7 100644 --- a/drivers/scsi/aic94xx/aic94xx_init.c +++ b/drivers/scsi/aic94xx/aic94xx_init.c @@ -38,7 +38,7 @@ #include "aic94xx_seq.h" /* The format is "version.release.patchlevel" */ -#define ASD_DRIVER_VERSION "1.0.2" +#define ASD_DRIVER_VERSION "1.0.3" static int use_msi = 0; module_param_named(use_msi, use_msi, int, S_IRUGO); @@ -57,6 +57,8 @@ MODULE_PARM_DESC(collector, "\n" char sas_addr_str[2*SAS_ADDR_SIZE + 1] = ""; static struct scsi_transport_template *aic94xx_transport_template; +static int asd_scan_finished(struct Scsi_Host *, unsigned long); +static void asd_scan_start(struct Scsi_Host *); static struct scsi_host_template aic94xx_sht = { .module = THIS_MODULE, @@ -66,6 +68,8 @@ static struct scsi_host_template aic94xx_sht = { .target_alloc = sas_target_alloc, .slave_configure = sas_slave_configure, .slave_destroy = sas_slave_destroy, + .scan_finished = asd_scan_finished, + .scan_start = asd_scan_start, .change_queue_depth = sas_change_queue_depth, .change_queue_type = sas_change_queue_type, .bios_param = sas_bios_param, @@ -75,6 +79,7 @@ static struct scsi_host_template aic94xx_sht = { .sg_tablesize = SG_ALL, .max_sectors = SCSI_DEFAULT_MAX_SECTORS, .use_clustering = ENABLE_CLUSTERING, + .eh_device_reset_handler = sas_eh_device_reset_handler, }; static int __devinit asd_map_memio(struct asd_ha_struct *asd_ha) @@ -234,7 +239,7 @@ static int __devinit asd_common_setup(struct asd_ha_struct *asd_ha) } /* Provide some sane default values. */ asd_ha->hw_prof.max_scbs = 512; - asd_ha->hw_prof.max_ddbs = 128; + asd_ha->hw_prof.max_ddbs = ASD_MAX_DDBS; asd_ha->hw_prof.num_phys = ASD_MAX_PHYS; /* All phys are enabled, by default. */ asd_ha->hw_prof.enabled_phys = 0xFF; @@ -526,6 +531,7 @@ static int asd_register_sas_ha(struct asd_ha_struct *asd_ha) asd_ha->sas_ha.num_phys= ASD_MAX_PHYS; asd_ha->sas_ha.lldd_queue_size = asd_ha->seq.can_queue; + asd_ha->sas_ha.lldd_max_execute_num = lldd_max_execute_num; return sas_register_ha(&asd_ha->sas_ha); } @@ -671,21 +677,10 @@ static int __devinit asd_pci_probe(struct pci_dev *dev, if (err) goto Err_reg_sas; - err = asd_enable_phys(asd_ha, asd_ha->hw_prof.enabled_phys); - if (err) { - asd_printk("coudln't enable phys, err:%d\n", err); - goto Err_en_phys; - } - ASD_DPRINTK("enabled phys\n"); - /* give the phy enabling interrupt event time to come in (1s - * is empirically about all it takes) */ - ssleep(1); - /* Wait for discovery to finish */ - scsi_flush_work(asd_ha->sas_ha.core.shost); + scsi_scan_host(shost); return 0; -Err_en_phys: - asd_unregister_sas_ha(asd_ha); + Err_reg_sas: asd_remove_dev_attrs(asd_ha); Err_dev_attrs: @@ -778,6 +773,28 @@ static void __devexit asd_pci_remove(struct pci_dev *dev) return; } +static void asd_scan_start(struct Scsi_Host *shost) +{ + struct asd_ha_struct *asd_ha; + int err; + + asd_ha = SHOST_TO_SAS_HA(shost)->lldd_ha; + err = asd_enable_phys(asd_ha, asd_ha->hw_prof.enabled_phys); + if (err) + asd_printk("Couldn't enable phys, err:%d\n", err); +} + +static int asd_scan_finished(struct Scsi_Host *shost, unsigned long time) +{ + /* give the phy enabling interrupt event time to come in (1s + * is empirically about all it takes) */ + if (time < HZ) + return 0; + /* Wait for discovery to finish */ + scsi_flush_work(shost); + return 1; +} + static ssize_t asd_version_show(struct device_driver *driver, char *buf) { return snprintf(buf, PAGE_SIZE, "%s\n", ASD_DRIVER_VERSION); @@ -885,6 +902,7 @@ static void __exit aic94xx_exit(void) asd_remove_driver_attrs(&aic94xx_pci_driver.driver); pci_unregister_driver(&aic94xx_pci_driver); sas_release_transport(aic94xx_transport_template); + asd_release_firmware(); asd_destroy_global_caches(); asd_printk("%s version %s unloaded\n", ASD_DRIVER_DESCRIPTION, ASD_DRIVER_VERSION); diff --git a/drivers/scsi/aic94xx/aic94xx_sas.h b/drivers/scsi/aic94xx/aic94xx_sas.h index 9050e93bfd5e..fa7c5290257d 100644 --- a/drivers/scsi/aic94xx/aic94xx_sas.h +++ b/drivers/scsi/aic94xx/aic94xx_sas.h @@ -34,6 +34,7 @@ * domain that this sequencer can maintain low-level connections for * us. They are be 64 bytes. */ +#define ASD_MAX_DDBS 128 struct asd_ddb_ssp_smp_target_port { u8 conn_type; /* byte 0 */ diff --git a/drivers/scsi/aic94xx/aic94xx_scb.c b/drivers/scsi/aic94xx/aic94xx_scb.c index 75ed6b0569d1..8f43ff772f23 100644 --- a/drivers/scsi/aic94xx/aic94xx_scb.c +++ b/drivers/scsi/aic94xx/aic94xx_scb.c @@ -413,40 +413,6 @@ void asd_invalidate_edb(struct asd_ascb *ascb, int edb_id) } } -/* hard reset a phy later */ -static void do_phy_reset_later(struct work_struct *work) -{ - struct sas_phy *sas_phy = - container_of(work, struct sas_phy, reset_work); - int error; - - ASD_DPRINTK("%s: About to hard reset phy %d\n", __FUNCTION__, - sas_phy->identify.phy_identifier); - /* Reset device port */ - error = sas_phy_reset(sas_phy, 1); - if (error) - ASD_DPRINTK("%s: Hard reset of phy %d failed (%d).\n", - __FUNCTION__, sas_phy->identify.phy_identifier, error); -} - -static void phy_reset_later(struct sas_phy *sas_phy, struct Scsi_Host *shost) -{ - INIT_WORK(&sas_phy->reset_work, do_phy_reset_later); - queue_work(shost->work_q, &sas_phy->reset_work); -} - -/* start up the ABORT TASK tmf... */ -static void task_kill_later(struct asd_ascb *ascb) -{ - struct asd_ha_struct *asd_ha = ascb->ha; - struct sas_ha_struct *sas_ha = &asd_ha->sas_ha; - struct Scsi_Host *shost = sas_ha->core.shost; - struct sas_task *task = ascb->uldd_task; - - INIT_WORK(&task->abort_work, sas_task_abort); - queue_work(shost->work_q, &task->abort_work); -} - static void escb_tasklet_complete(struct asd_ascb *ascb, struct done_list_struct *dl) { @@ -479,26 +445,55 @@ static void escb_tasklet_complete(struct asd_ascb *ascb, case REQ_TASK_ABORT: { struct asd_ascb *a, *b; u16 tc_abort; + struct domain_device *failed_dev = NULL; + + ASD_DPRINTK("%s: REQ_TASK_ABORT, reason=0x%X\n", + __FUNCTION__, dl->status_block[3]); + /* + * Find the task that caused the abort and abort it first. + * The sequencer won't put anything on the done list until + * that happens. + */ tc_abort = *((u16*)(&dl->status_block[1])); tc_abort = le16_to_cpu(tc_abort); - ASD_DPRINTK("%s: REQ_TASK_ABORT, reason=0x%X\n", - __FUNCTION__, dl->status_block[3]); + list_for_each_entry_safe(a, b, &asd_ha->seq.pend_q, list) { + struct sas_task *task = ascb->uldd_task; - /* Find the pending task and abort it. */ - list_for_each_entry_safe(a, b, &asd_ha->seq.pend_q, list) - if (a->tc_index == tc_abort) { - task_kill_later(a); + if (task && a->tc_index == tc_abort) { + failed_dev = task->dev; + sas_task_abort(task); break; } + } + + if (!failed_dev) { + ASD_DPRINTK("%s: Can't find task (tc=%d) to abort!\n", + __FUNCTION__, tc_abort); + goto out; + } + + /* + * Now abort everything else for that device (hba?) so + * that the EH will wake up and do something. + */ + list_for_each_entry_safe(a, b, &asd_ha->seq.pend_q, list) { + struct sas_task *task = ascb->uldd_task; + + if (task && + task->dev == failed_dev && + a->tc_index != tc_abort) + sas_task_abort(task); + } + goto out; } case REQ_DEVICE_RESET: { - struct Scsi_Host *shost = sas_ha->core.shost; - struct sas_phy *dev_phy; struct asd_ascb *a; u16 conn_handle; + unsigned long flags; + struct sas_task *last_dev_task = NULL; conn_handle = *((u16*)(&dl->status_block[1])); conn_handle = le16_to_cpu(conn_handle); @@ -506,32 +501,47 @@ static void escb_tasklet_complete(struct asd_ascb *ascb, ASD_DPRINTK("%s: REQ_DEVICE_RESET, reason=0x%X\n", __FUNCTION__, dl->status_block[3]); - /* Kill all pending tasks and reset the device */ - dev_phy = NULL; + /* Find the last pending task for the device... */ list_for_each_entry(a, &asd_ha->seq.pend_q, list) { - struct sas_task *task; - struct domain_device *dev; u16 x; + struct domain_device *dev; + struct sas_task *task = a->uldd_task; - task = a->uldd_task; if (!task) continue; dev = task->dev; x = (unsigned long)dev->lldd_dev; - if (x == conn_handle) { - dev_phy = dev->port->phy; - task_kill_later(a); - } + if (x == conn_handle) + last_dev_task = task; } - /* Reset device port */ - if (!dev_phy) { - ASD_DPRINTK("%s: No pending commands; can't reset.\n", - __FUNCTION__); + if (!last_dev_task) { + ASD_DPRINTK("%s: Device reset for idle device %d?\n", + __FUNCTION__, conn_handle); goto out; } - phy_reset_later(dev_phy, shost); + + /* ...and set the reset flag */ + spin_lock_irqsave(&last_dev_task->task_state_lock, flags); + last_dev_task->task_state_flags |= SAS_TASK_NEED_DEV_RESET; + spin_unlock_irqrestore(&last_dev_task->task_state_lock, flags); + + /* Kill all pending tasks for the device */ + list_for_each_entry(a, &asd_ha->seq.pend_q, list) { + u16 x; + struct domain_device *dev; + struct sas_task *task = a->uldd_task; + + if (!task) + continue; + dev = task->dev; + + x = (unsigned long)dev->lldd_dev; + if (x == conn_handle) + sas_task_abort(task); + } + goto out; } case SIGNAL_NCQ_ERROR: diff --git a/drivers/scsi/aic94xx/aic94xx_sds.c b/drivers/scsi/aic94xx/aic94xx_sds.c index e5a0ec37e954..5b0932f61473 100644 --- a/drivers/scsi/aic94xx/aic94xx_sds.c +++ b/drivers/scsi/aic94xx/aic94xx_sds.c @@ -427,7 +427,7 @@ struct asd_manuf_sec { struct asd_manuf_phy_desc { u8 state; /* low 4 bits */ -#define MS_PHY_STATE_ENABLEABLE 0 +#define MS_PHY_STATE_ENABLED 0 #define MS_PHY_STATE_REPORTED 1 #define MS_PHY_STATE_HIDDEN 2 u8 phy_id; @@ -756,11 +756,11 @@ static void *asd_find_ll_by_id(void * const start, const u8 id0, const u8 id1) * * HIDDEN phys do not count in the total count. REPORTED phys cannot * be enabled but are reported and counted towards the total. - * ENEBLEABLE phys are enabled by default and count towards the total. + * ENABLED phys are enabled by default and count towards the total. * The absolute total phy number is ASD_MAX_PHYS. hw_prof->num_phys * merely specifies the number of phys the host adapter decided to * report. E.g., it is possible for phys 0, 1 and 2 to be HIDDEN, - * phys 3, 4 and 5 to be REPORTED and phys 6 and 7 to be ENEBLEABLE. + * phys 3, 4 and 5 to be REPORTED and phys 6 and 7 to be ENABLED. * In this case ASD_MAX_PHYS is 8, hw_prof->num_phys is 5, and only 2 * are actually enabled (enabled by default, max number of phys * enableable in this case). @@ -816,8 +816,8 @@ static int asd_ms_get_phy_params(struct asd_ha_struct *asd_ha, asd_ha->hw_prof.enabled_phys &= ~(1 << i); rep_phys++; continue; - case MS_PHY_STATE_ENABLEABLE: - ASD_DPRINTK("ms: phy%d: ENEBLEABLE\n", i); + case MS_PHY_STATE_ENABLED: + ASD_DPRINTK("ms: phy%d: ENABLED\n", i); asd_ha->hw_prof.enabled_phys |= (1 << i); en_phys++; break; diff --git a/drivers/scsi/aic94xx/aic94xx_seq.c b/drivers/scsi/aic94xx/aic94xx_seq.c index 845112539d05..2768fe4d66ba 100644 --- a/drivers/scsi/aic94xx/aic94xx_seq.c +++ b/drivers/scsi/aic94xx/aic94xx_seq.c @@ -907,6 +907,16 @@ static void asd_init_scb_sites(struct asd_ha_struct *asd_ha) for (i = 0; i < ASD_SCB_SIZE; i += 4) asd_scbsite_write_dword(asd_ha, site_no, i, 0); + /* Initialize SCB Site Opcode field to invalid. */ + asd_scbsite_write_byte(asd_ha, site_no, + offsetof(struct scb_header, opcode), + 0xFF); + + /* Initialize SCB Site Flags field to mean a response + * frame has been received. This means inadvertent + * frames received to be dropped. */ + asd_scbsite_write_byte(asd_ha, site_no, 0x49, 0x01); + /* Workaround needed by SEQ to fix a SATA issue is to exclude * certain SCB sites from the free list. */ if (!SCB_SITE_VALID(site_no)) @@ -922,16 +932,6 @@ static void asd_init_scb_sites(struct asd_ha_struct *asd_ha) /* Q_NEXT field of the last SCB is invalidated. */ asd_scbsite_write_word(asd_ha, site_no, 0, first_scb_site_no); - /* Initialize SCB Site Opcode field to invalid. */ - asd_scbsite_write_byte(asd_ha, site_no, - offsetof(struct scb_header, opcode), - 0xFF); - - /* Initialize SCB Site Flags field to mean a response - * frame has been received. This means inadvertent - * frames received to be dropped. */ - asd_scbsite_write_byte(asd_ha, site_no, 0x49, 0x01); - first_scb_site_no = site_no; max_scbs++; } @@ -1173,6 +1173,16 @@ static void asd_init_ddb_0(struct asd_ha_struct *asd_ha) set_bit(0, asd_ha->hw_prof.ddb_bitmap); } +static void asd_seq_init_ddb_sites(struct asd_ha_struct *asd_ha) +{ + unsigned int i; + unsigned int ddb_site; + + for (ddb_site = 0 ; ddb_site < ASD_MAX_DDBS; ddb_site++) + for (i = 0; i < sizeof(struct asd_ddb_ssp_smp_target_port); i+= 4) + asd_ddbsite_write_dword(asd_ha, ddb_site, i, 0); +} + /** * asd_seq_setup_seqs -- setup and initialize central and link sequencers * @asd_ha: pointer to host adapter structure @@ -1182,6 +1192,9 @@ static void asd_seq_setup_seqs(struct asd_ha_struct *asd_ha) int lseq; u8 lseq_mask; + /* Initialize DDB sites */ + asd_seq_init_ddb_sites(asd_ha); + /* Initialize SCB sites. Done first to compute some values which * the rest of the init code depends on. */ asd_init_scb_sites(asd_ha); @@ -1232,6 +1245,13 @@ static int asd_seq_start_lseq(struct asd_ha_struct *asd_ha, int lseq) return asd_seq_unpause_lseq(asd_ha, lseq); } +int asd_release_firmware(void) +{ + if (sequencer_fw) + release_firmware(sequencer_fw); + return 0; +} + static int asd_request_firmware(struct asd_ha_struct *asd_ha) { int err, i; @@ -1375,7 +1395,9 @@ void asd_update_port_links(struct asd_ha_struct *asd_ha, struct asd_phy *phy) u8 phy_is_up; u8 mask; int i, err; + unsigned long flags; + spin_lock_irqsave(&asd_ha->hw_prof.ddb_lock, flags); for_each_phy(phy_mask, mask, i) asd_ddbsite_write_byte(asd_ha, 0, offsetof(struct asd_ddb_seq_shared, @@ -1395,6 +1417,7 @@ void asd_update_port_links(struct asd_ha_struct *asd_ha, struct asd_phy *phy) break; } } + spin_unlock_irqrestore(&asd_ha->hw_prof.ddb_lock, flags); if (err) asd_printk("couldn't update DDB 0:error:%d\n", err); diff --git a/drivers/scsi/aic94xx/aic94xx_seq.h b/drivers/scsi/aic94xx/aic94xx_seq.h index 9e715e5496af..9437ff0ae3a4 100644 --- a/drivers/scsi/aic94xx/aic94xx_seq.h +++ b/drivers/scsi/aic94xx/aic94xx_seq.h @@ -63,6 +63,7 @@ int asd_pause_lseq(struct asd_ha_struct *asd_ha, u8 lseq_mask); int asd_unpause_lseq(struct asd_ha_struct *asd_ha, u8 lseq_mask); int asd_init_seqs(struct asd_ha_struct *asd_ha); int asd_start_seqs(struct asd_ha_struct *asd_ha); +int asd_release_firmware(void); void asd_update_port_links(struct asd_ha_struct *asd_ha, struct asd_phy *phy); #endif diff --git a/drivers/scsi/aic94xx/aic94xx_task.c b/drivers/scsi/aic94xx/aic94xx_task.c index d202ed5a6709..e2ad5bed9403 100644 --- a/drivers/scsi/aic94xx/aic94xx_task.c +++ b/drivers/scsi/aic94xx/aic94xx_task.c @@ -349,6 +349,7 @@ Again: spin_lock_irqsave(&task->task_state_lock, flags); task->task_state_flags &= ~SAS_TASK_STATE_PENDING; + task->task_state_flags &= ~SAS_TASK_AT_INITIATOR; task->task_state_flags |= SAS_TASK_STATE_DONE; if (unlikely((task->task_state_flags & SAS_TASK_STATE_ABORTED))) { spin_unlock_irqrestore(&task->task_state_lock, flags); @@ -557,6 +558,7 @@ int asd_execute_task(struct sas_task *task, const int num, struct sas_task *t = task; struct asd_ascb *ascb = NULL, *a; struct asd_ha_struct *asd_ha = task->dev->port->ha->lldd_ha; + unsigned long flags; res = asd_can_queue(asd_ha, num); if (res) @@ -599,6 +601,10 @@ int asd_execute_task(struct sas_task *task, const int num, } if (res) goto out_err_unmap; + + spin_lock_irqsave(&t->task_state_lock, flags); + t->task_state_flags |= SAS_TASK_AT_INITIATOR; + spin_unlock_irqrestore(&t->task_state_lock, flags); } list_del_init(&alist); @@ -617,6 +623,9 @@ out_err_unmap: if (a == b) break; t = a->uldd_task; + spin_lock_irqsave(&t->task_state_lock, flags); + t->task_state_flags &= ~SAS_TASK_AT_INITIATOR; + spin_unlock_irqrestore(&t->task_state_lock, flags); switch (t->task_proto) { case SATA_PROTO: case SAS_PROTO_STP: diff --git a/drivers/scsi/aic94xx/aic94xx_tmf.c b/drivers/scsi/aic94xx/aic94xx_tmf.c index 61234384503b..fd5269e086a6 100644 --- a/drivers/scsi/aic94xx/aic94xx_tmf.c +++ b/drivers/scsi/aic94xx/aic94xx_tmf.c @@ -566,6 +566,11 @@ static int asd_initiate_ssp_tmf(struct domain_device *dev, u8 *lun, res = TMF_RESP_FUNC_ESUPP; break; default: + if (tmf == TMF_QUERY_TASK) { + ASD_DPRINTK("%s: QUERY_SSP_TASK response: 0x%x\n", + __FUNCTION__, res); + break; + } ASD_DPRINTK("%s: converting result 0x%x to TMF_RESP_FUNC_FAILED\n", __FUNCTION__, res); res = TMF_RESP_FUNC_FAILED; diff --git a/drivers/scsi/ipr.c b/drivers/scsi/ipr.c index b318500785e5..95045e33710d 100644 --- a/drivers/scsi/ipr.c +++ b/drivers/scsi/ipr.c @@ -595,10 +595,8 @@ static int ipr_save_pcix_cmd_reg(struct ipr_ioa_cfg *ioa_cfg) { int pcix_cmd_reg = pci_find_capability(ioa_cfg->pdev, PCI_CAP_ID_PCIX); - if (pcix_cmd_reg == 0) { - dev_err(&ioa_cfg->pdev->dev, "Failed to save PCI-X command register\n"); - return -EIO; - } + if (pcix_cmd_reg == 0) + return 0; if (pci_read_config_word(ioa_cfg->pdev, pcix_cmd_reg + PCI_X_CMD, &ioa_cfg->saved_pcix_cmd_reg) != PCIBIOS_SUCCESSFUL) { @@ -627,10 +625,6 @@ static int ipr_set_pcix_cmd_reg(struct ipr_ioa_cfg *ioa_cfg) dev_err(&ioa_cfg->pdev->dev, "Failed to setup PCI-X command register\n"); return -EIO; } - } else { - dev_err(&ioa_cfg->pdev->dev, - "Failed to setup PCI-X command register\n"); - return -EIO; } return 0; @@ -6314,7 +6308,6 @@ static int ipr_reset_restore_cfg_space(struct ipr_cmnd *ipr_cmd) int rc; ENTER; - pci_unblock_user_cfg_access(ioa_cfg->pdev); rc = pci_restore_state(ioa_cfg->pdev); if (rc != PCIBIOS_SUCCESSFUL) { @@ -6355,6 +6348,24 @@ static int ipr_reset_restore_cfg_space(struct ipr_cmnd *ipr_cmd) } /** + * ipr_reset_bist_done - BIST has completed on the adapter. + * @ipr_cmd: ipr command struct + * + * Description: Unblock config space and resume the reset process. + * + * Return value: + * IPR_RC_JOB_CONTINUE + **/ +static int ipr_reset_bist_done(struct ipr_cmnd *ipr_cmd) +{ + ENTER; + pci_unblock_user_cfg_access(ipr_cmd->ioa_cfg->pdev); + ipr_cmd->job_step = ipr_reset_restore_cfg_space; + LEAVE; + return IPR_RC_JOB_CONTINUE; +} + +/** * ipr_reset_start_bist - Run BIST on the adapter. * @ipr_cmd: ipr command struct * @@ -6376,7 +6387,7 @@ static int ipr_reset_start_bist(struct ipr_cmnd *ipr_cmd) ipr_cmd->ioasa.ioasc = cpu_to_be32(IPR_IOASC_PCI_ACCESS_ERROR); rc = IPR_RC_JOB_CONTINUE; } else { - ipr_cmd->job_step = ipr_reset_restore_cfg_space; + ipr_cmd->job_step = ipr_reset_bist_done; ipr_reset_start_timer(ipr_cmd, IPR_WAIT_FOR_BIST_TIMEOUT); rc = IPR_RC_JOB_RETURN; } @@ -7166,9 +7177,6 @@ ipr_get_chip_cfg(const struct pci_device_id *dev_id) { int i; - if (dev_id->driver_data) - return (const struct ipr_chip_cfg_t *)dev_id->driver_data; - for (i = 0; i < ARRAY_SIZE(ipr_chip); i++) if (ipr_chip[i].vendor == dev_id->vendor && ipr_chip[i].device == dev_id->device) @@ -7517,65 +7525,43 @@ static void ipr_shutdown(struct pci_dev *pdev) static struct pci_device_id ipr_pci_table[] __devinitdata = { { PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE, - PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_5702, - 0, 0, (kernel_ulong_t)&ipr_chip_cfg[0] }, + PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_5702, 0, 0, 0 }, { PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE, - PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_5703, - 0, 0, (kernel_ulong_t)&ipr_chip_cfg[0] }, + PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_5703, 0, 0, 0 }, { PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE, - PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_573D, - 0, 0, (kernel_ulong_t)&ipr_chip_cfg[0] }, + PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_573D, 0, 0, 0 }, { PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE, - PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_573E, - 0, 0, (kernel_ulong_t)&ipr_chip_cfg[0] }, + PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_573E, 0, 0, 0 }, { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE, - PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_571B, - 0, 0, (kernel_ulong_t)&ipr_chip_cfg[0] }, + PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_571B, 0, 0, 0 }, { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE, - PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572E, - 0, 0, (kernel_ulong_t)&ipr_chip_cfg[0] }, + PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572E, 0, 0, 0 }, { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE, - PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_571A, - 0, 0, (kernel_ulong_t)&ipr_chip_cfg[0] }, + PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_571A, 0, 0, 0 }, { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE, - PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_575B, - 0, 0, (kernel_ulong_t)&ipr_chip_cfg[0] }, + PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_575B, 0, 0, 0 }, { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN, - PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572A, - 0, 0, (kernel_ulong_t)&ipr_chip_cfg[0] }, + PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572A, 0, 0, 0 }, { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN, - PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572B, - 0, 0, (kernel_ulong_t)&ipr_chip_cfg[0] }, + PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572B, 0, 0, 0 }, { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN, - PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_575C, - 0, 0, (kernel_ulong_t)&ipr_chip_cfg[0] }, - { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN, - PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572A, - 0, 0, (kernel_ulong_t)&ipr_chip_cfg[0] }, + PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_575C, 0, 0, 0 }, { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN, - PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572B, - 0, 0, (kernel_ulong_t)&ipr_chip_cfg[0] }, + PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572A, 0, 0, 0 }, { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN, - PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_575C, - 0, 0, (kernel_ulong_t)&ipr_chip_cfg[0] }, + PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572B, 0, 0, 0 }, { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN, - PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B8, - 0, 0, (kernel_ulong_t)&ipr_chip_cfg[0] }, + PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_575C, 0, 0, 0 }, { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E, - PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B7, - 0, 0, (kernel_ulong_t)&ipr_chip_cfg[0] }, + PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B7, 0, 0, 0 }, { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_SNIPE, - PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_2780, - 0, 0, (kernel_ulong_t)&ipr_chip_cfg[1] }, + PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_2780, 0, 0, 0 }, { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_SCAMP, - PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_571E, - 0, 0, (kernel_ulong_t)&ipr_chip_cfg[1] }, + PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_571E, 0, 0, 0 }, { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_SCAMP, - PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_571F, - 0, 0, (kernel_ulong_t)&ipr_chip_cfg[1] }, + PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_571F, 0, 0, 0 }, { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_SCAMP, - PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572F, - 0, 0, (kernel_ulong_t)&ipr_chip_cfg[1] }, + PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572F, 0, 0, 0 }, { } }; MODULE_DEVICE_TABLE(pci, ipr_pci_table); diff --git a/drivers/scsi/ipr.h b/drivers/scsi/ipr.h index 9f62a1d4d511..88f285de97bb 100644 --- a/drivers/scsi/ipr.h +++ b/drivers/scsi/ipr.h @@ -37,8 +37,8 @@ /* * Literals */ -#define IPR_DRIVER_VERSION "2.3.0" -#define IPR_DRIVER_DATE "(November 8, 2006)" +#define IPR_DRIVER_VERSION "2.3.1" +#define IPR_DRIVER_DATE "(January 23, 2007)" /* * IPR_MAX_CMD_PER_LUN: This defines the maximum number of outstanding diff --git a/drivers/scsi/lasi700.c b/drivers/scsi/lasi700.c index f0871c3ac3d9..2aae1b081fcf 100644 --- a/drivers/scsi/lasi700.c +++ b/drivers/scsi/lasi700.c @@ -123,6 +123,7 @@ lasi700_probe(struct parisc_device *dev) hostdata->force_le_on_be = 0; hostdata->chip710 = 1; hostdata->dmode_extra = DMODE_FC2; + hostdata->burst_length = 8; } host = NCR_700_detect(&lasi700_template, hostdata, &dev->dev); diff --git a/drivers/scsi/libsas/sas_discover.c b/drivers/scsi/libsas/sas_discover.c index fb7df7b75811..a65598b1e536 100644 --- a/drivers/scsi/libsas/sas_discover.c +++ b/drivers/scsi/libsas/sas_discover.c @@ -548,7 +548,7 @@ int sas_discover_sata(struct domain_device *dev) res = sas_notify_lldd_dev_found(dev); if (res) - return res; + goto out_err2; switch (dev->dev_type) { case SATA_DEV: @@ -560,11 +560,23 @@ int sas_discover_sata(struct domain_device *dev) default: break; } + if (res) + goto out_err; sas_notify_lldd_dev_gone(dev); - if (!res) { - sas_notify_lldd_dev_found(dev); - } + res = sas_notify_lldd_dev_found(dev); + if (res) + goto out_err2; + + res = sas_rphy_add(dev->rphy); + if (res) + goto out_err; + + return res; + +out_err: + sas_notify_lldd_dev_gone(dev); +out_err2: return res; } @@ -580,21 +592,17 @@ int sas_discover_end_dev(struct domain_device *dev) res = sas_notify_lldd_dev_found(dev); if (res) - return res; + goto out_err2; res = sas_rphy_add(dev->rphy); if (res) goto out_err; - /* do this to get the end device port attributes which will have - * been scanned in sas_rphy_add */ - sas_notify_lldd_dev_gone(dev); - sas_notify_lldd_dev_found(dev); - return 0; out_err: sas_notify_lldd_dev_gone(dev); +out_err2: return res; } @@ -649,6 +657,7 @@ void sas_unregister_domain_devices(struct asd_sas_port *port) */ static void sas_discover_domain(struct work_struct *work) { + struct domain_device *dev; int error = 0; struct sas_discovery_event *ev = container_of(work, struct sas_discovery_event, work); @@ -658,35 +667,42 @@ static void sas_discover_domain(struct work_struct *work) &port->disc.pending); if (port->port_dev) - return ; - else { - error = sas_get_port_device(port); - if (error) - return; - } + return; + + error = sas_get_port_device(port); + if (error) + return; + dev = port->port_dev; SAS_DPRINTK("DOING DISCOVERY on port %d, pid:%d\n", port->id, current->pid); - switch (port->port_dev->dev_type) { + switch (dev->dev_type) { case SAS_END_DEV: - error = sas_discover_end_dev(port->port_dev); + error = sas_discover_end_dev(dev); break; case EDGE_DEV: case FANOUT_DEV: - error = sas_discover_root_expander(port->port_dev); + error = sas_discover_root_expander(dev); break; case SATA_DEV: case SATA_PM: - error = sas_discover_sata(port->port_dev); + error = sas_discover_sata(dev); break; default: - SAS_DPRINTK("unhandled device %d\n", port->port_dev->dev_type); + SAS_DPRINTK("unhandled device %d\n", dev->dev_type); break; } if (error) { - kfree(port->port_dev); /* not kobject_register-ed yet */ + sas_rphy_free(dev->rphy); + dev->rphy = NULL; + + spin_lock(&port->dev_list_lock); + list_del_init(&dev->dev_list_node); + spin_unlock(&port->dev_list_lock); + + kfree(dev); /* not kobject_register-ed yet */ port->port_dev = NULL; } @@ -726,7 +742,7 @@ int sas_discover_event(struct asd_sas_port *port, enum discover_event ev) BUG_ON(ev >= DISC_NUM_EVENTS); sas_queue_event(ev, &disc->disc_event_lock, &disc->pending, - &disc->disc_work[ev].work, port->ha->core.shost); + &disc->disc_work[ev].work, port->ha); return 0; } diff --git a/drivers/scsi/libsas/sas_event.c b/drivers/scsi/libsas/sas_event.c index d83392ee6823..9db30fb5caf2 100644 --- a/drivers/scsi/libsas/sas_event.c +++ b/drivers/scsi/libsas/sas_event.c @@ -31,7 +31,7 @@ static void notify_ha_event(struct sas_ha_struct *sas_ha, enum ha_event event) BUG_ON(event >= HA_NUM_EVENTS); sas_queue_event(event, &sas_ha->event_lock, &sas_ha->pending, - &sas_ha->ha_events[event].work, sas_ha->core.shost); + &sas_ha->ha_events[event].work, sas_ha); } static void notify_port_event(struct asd_sas_phy *phy, enum port_event event) @@ -41,7 +41,7 @@ static void notify_port_event(struct asd_sas_phy *phy, enum port_event event) BUG_ON(event >= PORT_NUM_EVENTS); sas_queue_event(event, &ha->event_lock, &phy->port_events_pending, - &phy->port_events[event].work, ha->core.shost); + &phy->port_events[event].work, ha); } static void notify_phy_event(struct asd_sas_phy *phy, enum phy_event event) @@ -51,7 +51,7 @@ static void notify_phy_event(struct asd_sas_phy *phy, enum phy_event event) BUG_ON(event >= PHY_NUM_EVENTS); sas_queue_event(event, &ha->event_lock, &phy->phy_events_pending, - &phy->phy_events[event].work, ha->core.shost); + &phy->phy_events[event].work, ha); } int sas_init_events(struct sas_ha_struct *sas_ha) diff --git a/drivers/scsi/libsas/sas_expander.c b/drivers/scsi/libsas/sas_expander.c index d31e6fa466f7..d9b9a008d36d 100644 --- a/drivers/scsi/libsas/sas_expander.c +++ b/drivers/scsi/libsas/sas_expander.c @@ -667,8 +667,9 @@ static struct domain_device *sas_ex_discover_end_dev( return child; out_list_del: + sas_rphy_free(child->rphy); + child->rphy = NULL; list_del(&child->dev_list_node); - sas_rphy_free(rphy); out_free: sas_port_delete(phy->port); out_err: @@ -1431,13 +1432,22 @@ int sas_discover_root_expander(struct domain_device *dev) int res; struct sas_expander_device *ex = rphy_to_expander_device(dev->rphy); - sas_rphy_add(dev->rphy); + res = sas_rphy_add(dev->rphy); + if (res) + goto out_err; ex->level = dev->port->disc.max_level; /* 0 */ res = sas_discover_expander(dev); - if (!res) - sas_ex_bfs_disc(dev->port); + if (res) + goto out_err2; + sas_ex_bfs_disc(dev->port); + + return res; + +out_err2: + sas_rphy_remove(dev->rphy); +out_err: return res; } diff --git a/drivers/scsi/libsas/sas_init.c b/drivers/scsi/libsas/sas_init.c index 2f0c07fc3f48..965698c8b7bf 100644 --- a/drivers/scsi/libsas/sas_init.c +++ b/drivers/scsi/libsas/sas_init.c @@ -87,6 +87,9 @@ int sas_register_ha(struct sas_ha_struct *sas_ha) else if (sas_ha->lldd_queue_size == -1) sas_ha->lldd_queue_size = 128; /* Sanity */ + sas_ha->state = SAS_HA_REGISTERED; + spin_lock_init(&sas_ha->state_lock); + error = sas_register_phys(sas_ha); if (error) { printk(KERN_NOTICE "couldn't register sas phys:%d\n", error); @@ -127,12 +130,22 @@ Undo_phys: int sas_unregister_ha(struct sas_ha_struct *sas_ha) { + unsigned long flags; + + /* Set the state to unregistered to avoid further + * events to be queued */ + spin_lock_irqsave(&sas_ha->state_lock, flags); + sas_ha->state = SAS_HA_UNREGISTERED; + spin_unlock_irqrestore(&sas_ha->state_lock, flags); + scsi_flush_work(sas_ha->core.shost); + + sas_unregister_ports(sas_ha); + if (sas_ha->lldd_max_execute_num > 1) { sas_shutdown_queue(sas_ha); + sas_ha->lldd_max_execute_num = 1; } - sas_unregister_ports(sas_ha); - return 0; } @@ -146,6 +159,36 @@ static int sas_get_linkerrors(struct sas_phy *phy) return sas_smp_get_phy_events(phy); } +int sas_phy_enable(struct sas_phy *phy, int enable) +{ + int ret; + enum phy_func command; + + if (enable) + command = PHY_FUNC_LINK_RESET; + else + command = PHY_FUNC_DISABLE; + + if (scsi_is_sas_phy_local(phy)) { + struct Scsi_Host *shost = dev_to_shost(phy->dev.parent); + struct sas_ha_struct *sas_ha = SHOST_TO_SAS_HA(shost); + struct asd_sas_phy *asd_phy = sas_ha->sas_phy[phy->number]; + struct sas_internal *i = + to_sas_internal(sas_ha->core.shost->transportt); + + if (!enable) { + sas_phy_disconnected(asd_phy); + sas_ha->notify_phy_event(asd_phy, PHYE_LOSS_OF_SIGNAL); + } + ret = i->dft->lldd_control_phy(asd_phy, command, NULL); + } else { + struct sas_rphy *rphy = dev_to_rphy(phy->dev.parent); + struct domain_device *ddev = sas_find_dev_by_rphy(rphy); + ret = sas_smp_phy_control(ddev, phy->number, command, NULL); + } + return ret; +} + int sas_phy_reset(struct sas_phy *phy, int hard_reset) { int ret; @@ -172,8 +215,8 @@ int sas_phy_reset(struct sas_phy *phy, int hard_reset) return ret; } -static int sas_set_phy_speed(struct sas_phy *phy, - struct sas_phy_linkrates *rates) +int sas_set_phy_speed(struct sas_phy *phy, + struct sas_phy_linkrates *rates) { int ret; @@ -212,6 +255,7 @@ static int sas_set_phy_speed(struct sas_phy *phy, } static struct sas_function_template sft = { + .phy_enable = sas_phy_enable, .phy_reset = sas_phy_reset, .set_phy_speed = sas_set_phy_speed, .get_linkerrors = sas_get_linkerrors, diff --git a/drivers/scsi/libsas/sas_internal.h b/drivers/scsi/libsas/sas_internal.h index 137d7e496b6d..a78638df2018 100644 --- a/drivers/scsi/libsas/sas_internal.h +++ b/drivers/scsi/libsas/sas_internal.h @@ -80,7 +80,7 @@ void sas_hae_reset(struct work_struct *work); static inline void sas_queue_event(int event, spinlock_t *lock, unsigned long *pending, struct work_struct *work, - struct Scsi_Host *shost) + struct sas_ha_struct *sas_ha) { unsigned long flags; @@ -91,7 +91,12 @@ static inline void sas_queue_event(int event, spinlock_t *lock, } __set_bit(event, pending); spin_unlock_irqrestore(lock, flags); - scsi_queue_work(shost, work); + + spin_lock_irqsave(&sas_ha->state_lock, flags); + if (sas_ha->state != SAS_HA_UNREGISTERED) { + scsi_queue_work(sas_ha->core.shost, work); + } + spin_unlock_irqrestore(&sas_ha->state_lock, flags); } static inline void sas_begin_event(int event, spinlock_t *lock, diff --git a/drivers/scsi/libsas/sas_port.c b/drivers/scsi/libsas/sas_port.c index 971c37ceecb4..e1e2d085c920 100644 --- a/drivers/scsi/libsas/sas_port.c +++ b/drivers/scsi/libsas/sas_port.c @@ -42,10 +42,11 @@ static void sas_form_port(struct asd_sas_phy *phy) struct asd_sas_port *port = phy->port; struct sas_internal *si = to_sas_internal(sas_ha->core.shost->transportt); + unsigned long flags; if (port) { if (memcmp(port->attached_sas_addr, phy->attached_sas_addr, - SAS_ADDR_SIZE) == 0) + SAS_ADDR_SIZE) != 0) sas_deform_port(phy); else { SAS_DPRINTK("%s: phy%d belongs to port%d already(%d)!\n", @@ -56,7 +57,7 @@ static void sas_form_port(struct asd_sas_phy *phy) } /* find a port */ - spin_lock(&sas_ha->phy_port_lock); + spin_lock_irqsave(&sas_ha->phy_port_lock, flags); for (i = 0; i < sas_ha->num_phys; i++) { port = sas_ha->sas_port[i]; spin_lock(&port->phy_list_lock); @@ -78,7 +79,7 @@ static void sas_form_port(struct asd_sas_phy *phy) if (i >= sas_ha->num_phys) { printk(KERN_NOTICE "%s: couldn't find a free port, bug?\n", __FUNCTION__); - spin_unlock(&sas_ha->phy_port_lock); + spin_unlock_irqrestore(&sas_ha->phy_port_lock, flags); return; } @@ -105,7 +106,7 @@ static void sas_form_port(struct asd_sas_phy *phy) } else port->linkrate = max(port->linkrate, phy->linkrate); spin_unlock(&port->phy_list_lock); - spin_unlock(&sas_ha->phy_port_lock); + spin_unlock_irqrestore(&sas_ha->phy_port_lock, flags); if (!port->port) { port->port = sas_port_alloc(phy->phy->dev.parent, port->id); @@ -137,6 +138,7 @@ void sas_deform_port(struct asd_sas_phy *phy) struct asd_sas_port *port = phy->port; struct sas_internal *si = to_sas_internal(sas_ha->core.shost->transportt); + unsigned long flags; if (!port) return; /* done by a phy event */ @@ -155,7 +157,7 @@ void sas_deform_port(struct asd_sas_phy *phy) if (si->dft->lldd_port_deformed) si->dft->lldd_port_deformed(phy); - spin_lock(&sas_ha->phy_port_lock); + spin_lock_irqsave(&sas_ha->phy_port_lock, flags); spin_lock(&port->phy_list_lock); list_del_init(&phy->port_phy_el); @@ -174,7 +176,7 @@ void sas_deform_port(struct asd_sas_phy *phy) port->phy_mask = 0; } spin_unlock(&port->phy_list_lock); - spin_unlock(&sas_ha->phy_port_lock); + spin_unlock_irqrestore(&sas_ha->phy_port_lock, flags); return; } diff --git a/drivers/scsi/libsas/sas_scsi_host.c b/drivers/scsi/libsas/sas_scsi_host.c index 22672d54aa27..cd2378010c7e 100644 --- a/drivers/scsi/libsas/sas_scsi_host.c +++ b/drivers/scsi/libsas/sas_scsi_host.c @@ -34,6 +34,7 @@ #include <scsi/scsi_transport_sas.h> #include "../scsi_sas_internal.h" #include "../scsi_transport_api.h" +#include "../scsi_priv.h" #include <linux/err.h> #include <linux/blkdev.h> @@ -130,7 +131,7 @@ static enum task_attribute sas_scsi_get_task_attr(struct scsi_cmnd *cmd) if (cmd->request && blk_rq_tagged(cmd->request)) { if (cmd->device->ordered_tags && (cmd->request->cmd_flags & REQ_HARDBARRIER)) - ta = TASK_ATTR_HOQ; + ta = TASK_ATTR_ORDERED; } return ta; } @@ -281,6 +282,7 @@ enum task_disposition { TASK_IS_ABORTED, TASK_IS_AT_LU, TASK_IS_NOT_AT_LU, + TASK_ABORT_FAILED, }; static enum task_disposition sas_scsi_find_task(struct sas_task *task) @@ -310,15 +312,6 @@ static enum task_disposition sas_scsi_find_task(struct sas_task *task) spin_unlock_irqrestore(&core->task_queue_lock, flags); } - spin_lock_irqsave(&task->task_state_lock, flags); - if (task->task_state_flags & SAS_TASK_INITIATOR_ABORTED) { - spin_unlock_irqrestore(&task->task_state_lock, flags); - SAS_DPRINTK("%s: task 0x%p already aborted\n", - __FUNCTION__, task); - return TASK_IS_ABORTED; - } - spin_unlock_irqrestore(&task->task_state_lock, flags); - for (i = 0; i < 5; i++) { SAS_DPRINTK("%s: aborting task 0x%p\n", __FUNCTION__, task); res = si->dft->lldd_abort_task(task); @@ -340,15 +333,21 @@ static enum task_disposition sas_scsi_find_task(struct sas_task *task) SAS_DPRINTK("%s: querying task 0x%p\n", __FUNCTION__, task); res = si->dft->lldd_query_task(task); - if (res == TMF_RESP_FUNC_SUCC) { + switch (res) { + case TMF_RESP_FUNC_SUCC: SAS_DPRINTK("%s: task 0x%p at LU\n", __FUNCTION__, task); return TASK_IS_AT_LU; - } else if (res == TMF_RESP_FUNC_COMPLETE) { + case TMF_RESP_FUNC_COMPLETE: SAS_DPRINTK("%s: task 0x%p not at LU\n", __FUNCTION__, task); return TASK_IS_NOT_AT_LU; - } + case TMF_RESP_FUNC_FAILED: + SAS_DPRINTK("%s: task 0x%p failed to abort\n", + __FUNCTION__, task); + return TASK_ABORT_FAILED; + } + } } return res; @@ -398,35 +397,82 @@ static int sas_recover_I_T(struct domain_device *dev) return res; } -void sas_scsi_recover_host(struct Scsi_Host *shost) +/* Find the sas_phy that's attached to this device */ +struct sas_phy *find_local_sas_phy(struct domain_device *dev) +{ + struct domain_device *pdev = dev->parent; + struct ex_phy *exphy = NULL; + int i; + + /* Directly attached device */ + if (!pdev) + return dev->port->phy; + + /* Otherwise look in the expander */ + for (i = 0; i < pdev->ex_dev.num_phys; i++) + if (!memcmp(dev->sas_addr, + pdev->ex_dev.ex_phy[i].attached_sas_addr, + SAS_ADDR_SIZE)) { + exphy = &pdev->ex_dev.ex_phy[i]; + break; + } + + BUG_ON(!exphy); + return exphy->phy; +} + +/* Attempt to send a target reset message to a device */ +int sas_eh_device_reset_handler(struct scsi_cmnd *cmd) +{ + struct domain_device *dev = cmd_to_domain_dev(cmd); + struct sas_phy *phy = find_local_sas_phy(dev); + int res; + + res = sas_phy_reset(phy, 1); + if (res) + SAS_DPRINTK("Device reset of %s failed 0x%x\n", + phy->dev.kobj.k_name, + res); + if (res == TMF_RESP_FUNC_SUCC || res == TMF_RESP_FUNC_COMPLETE) + return SUCCESS; + + return FAILED; +} + +/* Try to reset a device */ +static int try_to_reset_cmd_device(struct Scsi_Host *shost, + struct scsi_cmnd *cmd) +{ + if (!shost->hostt->eh_device_reset_handler) + return FAILED; + + return shost->hostt->eh_device_reset_handler(cmd); +} + +static int sas_eh_handle_sas_errors(struct Scsi_Host *shost, + struct list_head *work_q, + struct list_head *done_q) { - struct sas_ha_struct *ha = SHOST_TO_SAS_HA(shost); - unsigned long flags; - LIST_HEAD(error_q); struct scsi_cmnd *cmd, *n; enum task_disposition res = TASK_IS_DONE; - int tmf_resp; + int tmf_resp, need_reset; struct sas_internal *i = to_sas_internal(shost->transportt); + unsigned long flags; + struct sas_ha_struct *ha = SHOST_TO_SAS_HA(shost); - spin_lock_irqsave(shost->host_lock, flags); - list_splice_init(&shost->eh_cmd_q, &error_q); - spin_unlock_irqrestore(shost->host_lock, flags); - - SAS_DPRINTK("Enter %s\n", __FUNCTION__); - - /* All tasks on this list were marked SAS_TASK_STATE_ABORTED - * by sas_scsi_timed_out() callback. - */ Again: - SAS_DPRINTK("going over list...\n"); - list_for_each_entry_safe(cmd, n, &error_q, eh_entry) { + list_for_each_entry_safe(cmd, n, work_q, eh_entry) { struct sas_task *task = TO_SAS_TASK(cmd); - list_del_init(&cmd->eh_entry); - if (!task) { - SAS_DPRINTK("%s: taskless cmd?!\n", __FUNCTION__); + if (!task) continue; - } + + list_del_init(&cmd->eh_entry); + + spin_lock_irqsave(&task->task_state_lock, flags); + need_reset = task->task_state_flags & SAS_TASK_NEED_DEV_RESET; + spin_unlock_irqrestore(&task->task_state_lock, flags); + SAS_DPRINTK("trying to find task 0x%p\n", task); res = sas_scsi_find_task(task); @@ -437,11 +483,15 @@ Again: SAS_DPRINTK("%s: task 0x%p is done\n", __FUNCTION__, task); task->task_done(task); + if (need_reset) + try_to_reset_cmd_device(shost, cmd); continue; case TASK_IS_ABORTED: SAS_DPRINTK("%s: task 0x%p is aborted\n", __FUNCTION__, task); task->task_done(task); + if (need_reset) + try_to_reset_cmd_device(shost, cmd); continue; case TASK_IS_AT_LU: SAS_DPRINTK("task 0x%p is at LU: lu recover\n", task); @@ -452,11 +502,14 @@ Again: SAS_ADDR(task->dev), cmd->device->lun); task->task_done(task); - sas_scsi_clear_queue_lu(&error_q, cmd); + if (need_reset) + try_to_reset_cmd_device(shost, cmd); + sas_scsi_clear_queue_lu(work_q, cmd); goto Again; } /* fallthrough */ case TASK_IS_NOT_AT_LU: + case TASK_ABORT_FAILED: SAS_DPRINTK("task 0x%p is not at LU: I_T recover\n", task); tmf_resp = sas_recover_I_T(task->dev); @@ -464,7 +517,9 @@ Again: SAS_DPRINTK("I_T %016llx recovered\n", SAS_ADDR(task->dev->sas_addr)); task->task_done(task); - sas_scsi_clear_queue_I_T(&error_q, task->dev); + if (need_reset) + try_to_reset_cmd_device(shost, cmd); + sas_scsi_clear_queue_I_T(work_q, task->dev); goto Again; } /* Hammer time :-) */ @@ -477,7 +532,9 @@ Again: SAS_DPRINTK("clear nexus port:%d " "succeeded\n", port->id); task->task_done(task); - sas_scsi_clear_queue_port(&error_q, + if (need_reset) + try_to_reset_cmd_device(shost, cmd); + sas_scsi_clear_queue_port(work_q, port); goto Again; } @@ -489,6 +546,8 @@ Again: SAS_DPRINTK("clear nexus ha " "succeeded\n"); task->task_done(task); + if (need_reset) + try_to_reset_cmd_device(shost, cmd); goto out; } } @@ -502,20 +561,54 @@ Again: cmd->device->lun); task->task_done(task); + if (need_reset) + try_to_reset_cmd_device(shost, cmd); goto clear_q; } } out: - scsi_eh_flush_done_q(&ha->eh_done_q); - SAS_DPRINTK("--- Exit %s\n", __FUNCTION__); - return; + return list_empty(work_q); clear_q: SAS_DPRINTK("--- Exit %s -- clear_q\n", __FUNCTION__); - list_for_each_entry_safe(cmd, n, &error_q, eh_entry) { + list_for_each_entry_safe(cmd, n, work_q, eh_entry) { struct sas_task *task = TO_SAS_TASK(cmd); list_del_init(&cmd->eh_entry); task->task_done(task); } + return list_empty(work_q); +} + +void sas_scsi_recover_host(struct Scsi_Host *shost) +{ + struct sas_ha_struct *ha = SHOST_TO_SAS_HA(shost); + unsigned long flags; + LIST_HEAD(eh_work_q); + + spin_lock_irqsave(shost->host_lock, flags); + list_splice_init(&shost->eh_cmd_q, &eh_work_q); + spin_unlock_irqrestore(shost->host_lock, flags); + + SAS_DPRINTK("Enter %s\n", __FUNCTION__); + /* + * Deal with commands that still have SAS tasks (i.e. they didn't + * complete via the normal sas_task completion mechanism) + */ + if (sas_eh_handle_sas_errors(shost, &eh_work_q, &ha->eh_done_q)) + goto out; + + /* + * Now deal with SCSI commands that completed ok but have a an error + * code (and hopefully sense data) attached. This is roughly what + * scsi_unjam_host does, but we skip scsi_eh_abort_cmds because any + * command we see here has no sas_task and is thus unknown to the HA. + */ + if (!scsi_eh_get_sense(&eh_work_q, &ha->eh_done_q)) + scsi_eh_ready_devs(shost, &eh_work_q, &ha->eh_done_q); + +out: + scsi_eh_flush_done_q(&ha->eh_done_q); + SAS_DPRINTK("--- Exit %s\n", __FUNCTION__); + return; } enum scsi_eh_timer_return sas_scsi_timed_out(struct scsi_cmnd *cmd) @@ -524,24 +617,30 @@ enum scsi_eh_timer_return sas_scsi_timed_out(struct scsi_cmnd *cmd) unsigned long flags; if (!task) { - SAS_DPRINTK("command 0x%p, task 0x%p, gone: EH_HANDLED\n", - cmd, task); - return EH_HANDLED; + cmd->timeout_per_command /= 2; + SAS_DPRINTK("command 0x%p, task 0x%p, gone: %s\n", + cmd, task, (cmd->timeout_per_command ? + "EH_RESET_TIMER" : "EH_NOT_HANDLED")); + if (!cmd->timeout_per_command) + return EH_NOT_HANDLED; + return EH_RESET_TIMER; } spin_lock_irqsave(&task->task_state_lock, flags); - if (task->task_state_flags & SAS_TASK_INITIATOR_ABORTED) { - spin_unlock_irqrestore(&task->task_state_lock, flags); - SAS_DPRINTK("command 0x%p, task 0x%p, aborted by initiator: " - "EH_NOT_HANDLED\n", cmd, task); - return EH_NOT_HANDLED; - } + BUG_ON(task->task_state_flags & SAS_TASK_STATE_ABORTED); if (task->task_state_flags & SAS_TASK_STATE_DONE) { spin_unlock_irqrestore(&task->task_state_lock, flags); SAS_DPRINTK("command 0x%p, task 0x%p, timed out: EH_HANDLED\n", cmd, task); return EH_HANDLED; } + if (!(task->task_state_flags & SAS_TASK_AT_INITIATOR)) { + spin_unlock_irqrestore(&task->task_state_lock, flags); + SAS_DPRINTK("command 0x%p, task 0x%p, not at initiator: " + "EH_RESET_TIMER\n", + cmd, task); + return EH_RESET_TIMER; + } task->task_state_flags |= SAS_TASK_STATE_ABORTED; spin_unlock_irqrestore(&task->task_state_lock, flags); @@ -557,8 +656,9 @@ struct domain_device *sas_find_dev_by_rphy(struct sas_rphy *rphy) struct sas_ha_struct *ha = SHOST_TO_SAS_HA(shost); struct domain_device *found_dev = NULL; int i; + unsigned long flags; - spin_lock(&ha->phy_port_lock); + spin_lock_irqsave(&ha->phy_port_lock, flags); for (i = 0; i < ha->num_phys; i++) { struct asd_sas_port *port = ha->sas_port[i]; struct domain_device *dev; @@ -574,7 +674,7 @@ struct domain_device *sas_find_dev_by_rphy(struct sas_rphy *rphy) spin_unlock(&port->dev_list_lock); } found: - spin_unlock(&ha->phy_port_lock); + spin_unlock_irqrestore(&ha->phy_port_lock, flags); return found_dev; } @@ -623,6 +723,8 @@ int sas_slave_configure(struct scsi_device *scsi_dev) scsi_deactivate_tcq(scsi_dev, 1); } + scsi_dev->allow_restart = 1; + return 0; } @@ -799,46 +901,42 @@ void sas_shutdown_queue(struct sas_ha_struct *sas_ha) spin_unlock_irqrestore(&core->task_queue_lock, flags); } -static int do_sas_task_abort(struct sas_task *task) +/* + * Call the LLDD task abort routine directly. This function is intended for + * use by upper layers that need to tell the LLDD to abort a task. + */ +int __sas_task_abort(struct sas_task *task) { - struct scsi_cmnd *sc = task->uldd_task; struct sas_internal *si = to_sas_internal(task->dev->port->ha->core.shost->transportt); unsigned long flags; int res; spin_lock_irqsave(&task->task_state_lock, flags); - if (task->task_state_flags & SAS_TASK_STATE_ABORTED) { + if (task->task_state_flags & SAS_TASK_STATE_ABORTED || + task->task_state_flags & SAS_TASK_STATE_DONE) { spin_unlock_irqrestore(&task->task_state_lock, flags); - SAS_DPRINTK("%s: Task %p already aborted.\n", __FUNCTION__, + SAS_DPRINTK("%s: Task %p already finished.\n", __FUNCTION__, task); return 0; } - - task->task_state_flags |= SAS_TASK_INITIATOR_ABORTED; - if (!(task->task_state_flags & SAS_TASK_STATE_DONE)) - task->task_state_flags |= SAS_TASK_STATE_ABORTED; + task->task_state_flags |= SAS_TASK_STATE_ABORTED; spin_unlock_irqrestore(&task->task_state_lock, flags); if (!si->dft->lldd_abort_task) return -ENODEV; res = si->dft->lldd_abort_task(task); + + spin_lock_irqsave(&task->task_state_lock, flags); if ((task->task_state_flags & SAS_TASK_STATE_DONE) || (res == TMF_RESP_FUNC_COMPLETE)) { - /* SMP commands don't have scsi_cmds(?) */ - if (!sc) { - task->task_done(task); - return 0; - } - scsi_req_abort_cmd(sc); - scsi_schedule_eh(sc->device->host); + spin_unlock_irqrestore(&task->task_state_lock, flags); + task->task_done(task); return 0; } - spin_lock_irqsave(&task->task_state_lock, flags); - task->task_state_flags &= ~SAS_TASK_INITIATOR_ABORTED; if (!(task->task_state_flags & SAS_TASK_STATE_DONE)) task->task_state_flags &= ~SAS_TASK_STATE_ABORTED; spin_unlock_irqrestore(&task->task_state_lock, flags); @@ -846,17 +944,24 @@ static int do_sas_task_abort(struct sas_task *task) return -EAGAIN; } -void sas_task_abort(struct work_struct *work) +/* + * Tell an upper layer that it needs to initiate an abort for a given task. + * This should only ever be called by an LLDD. + */ +void sas_task_abort(struct sas_task *task) { - struct sas_task *task = - container_of(work, struct sas_task, abort_work); - int i; + struct scsi_cmnd *sc = task->uldd_task; - for (i = 0; i < 5; i++) - if (!do_sas_task_abort(task)) + /* Escape for libsas internal commands */ + if (!sc) { + if (!del_timer(&task->timer)) return; + task->timer.function(task->timer.data); + return; + } - SAS_DPRINTK("%s: Could not kill task!\n", __FUNCTION__); + scsi_req_abort_cmd(sc); + scsi_schedule_eh(sc->device->host); } EXPORT_SYMBOL_GPL(sas_queuecommand); @@ -866,5 +971,8 @@ EXPORT_SYMBOL_GPL(sas_slave_destroy); EXPORT_SYMBOL_GPL(sas_change_queue_depth); EXPORT_SYMBOL_GPL(sas_change_queue_type); EXPORT_SYMBOL_GPL(sas_bios_param); +EXPORT_SYMBOL_GPL(__sas_task_abort); EXPORT_SYMBOL_GPL(sas_task_abort); EXPORT_SYMBOL_GPL(sas_phy_reset); +EXPORT_SYMBOL_GPL(sas_phy_enable); +EXPORT_SYMBOL_GPL(sas_eh_device_reset_handler); diff --git a/drivers/scsi/megaraid/mbox_defs.h b/drivers/scsi/megaraid/mbox_defs.h index 3052869f51f4..170399ef06f4 100644 --- a/drivers/scsi/megaraid/mbox_defs.h +++ b/drivers/scsi/megaraid/mbox_defs.h @@ -748,7 +748,7 @@ typedef struct { /** - * private_bios_data - bios private data for boot devices + * struct private_bios_data - bios private data for boot devices * @geometry : bits 0-3 - BIOS geometry, 0x0001 - 1GB, 0x0010 - 2GB, * 0x1000 - 8GB, Others values are invalid * @unused : bits 4-7 are unused diff --git a/drivers/scsi/megaraid/mega_common.h b/drivers/scsi/megaraid/mega_common.h index b50e27e66024..26e1e6c55654 100644 --- a/drivers/scsi/megaraid/mega_common.h +++ b/drivers/scsi/megaraid/mega_common.h @@ -46,17 +46,17 @@ /** * scb_t - scsi command control block - * @param ccb : command control block for individual driver - * @param list : list of control blocks - * @param gp : general purpose field for LLDs - * @param sno : all SCBs have a serial number - * @param scp : associated scsi command - * @param state : current state of scb - * @param dma_dir : direction of data transfer - * @param dma_type : transfer with sg list, buffer, or no data transfer - * @param dev_channel : actual channel on the device - * @param dev_target : actual target on the device - * @param status : completion status + * @ccb : command control block for individual driver + * @list : list of control blocks + * @gp : general purpose field for LLDs + * @sno : all SCBs have a serial number + * @scp : associated scsi command + * @state : current state of scb + * @dma_dir : direction of data transfer + * @dma_type : transfer with sg list, buffer, or no data transfer + * @dev_channel : actual channel on the device + * @dev_target : actual target on the device + * @status : completion status * * This is our central data structure to issue commands the each driver. * Driver specific data structures are maintained in the ccb field. @@ -99,42 +99,42 @@ typedef struct { /** * struct adapter_t - driver's initialization structure - * @param dpc_h : tasklet handle - * @param pdev : pci configuration pointer for kernel - * @param host : pointer to host structure of mid-layer - * @param lock : synchronization lock for mid-layer and driver - * @param quiescent : driver is quiescent for now. - * @param outstanding_cmds : number of commands pending in the driver - * @param kscb_list : pointer to the bulk of SCBs pointers for IO - * @param kscb_pool : pool of free scbs for IO - * @param kscb_pool_lock : lock for pool of free scbs - * @param pend_list : pending commands list - * @param pend_list_lock : exlusion lock for pending commands list - * @param completed_list : list of completed commands - * @param completed_list_lock : exclusion lock for list of completed commands - * @param sglen : max sg elements supported - * @param device_ids : to convert kernel device addr to our devices. - * @param raid_device : raid adapter specific pointer - * @param max_channel : maximum channel number supported - inclusive - * @param max_target : max target supported - inclusive - * @param max_lun : max lun supported - inclusive - * @param unique_id : unique identifier for each adapter - * @param irq : IRQ for this adapter - * @param ito : internal timeout value, (-1) means no timeout - * @param ibuf : buffer to issue internal commands - * @param ibuf_dma_h : dma handle for the above buffer - * @param uscb_list : SCB pointers for user cmds, common mgmt module - * @param uscb_pool : pool of SCBs for user commands - * @param uscb_pool_lock : exclusion lock for these SCBs - * @param max_cmds : max outstanding commands - * @param fw_version : firmware version - * @param bios_version : bios version - * @param max_cdb_sz : biggest CDB size supported. - * @param ha : is high availability present - clustering - * @param init_id : initiator ID, the default value should be 7 - * @param max_sectors : max sectors per request - * @param cmd_per_lun : max outstanding commands per LUN - * @param being_detached : set when unloading, no more mgmt calls + * @aram dpc_h : tasklet handle + * @pdev : pci configuration pointer for kernel + * @host : pointer to host structure of mid-layer + * @lock : synchronization lock for mid-layer and driver + * @quiescent : driver is quiescent for now. + * @outstanding_cmds : number of commands pending in the driver + * @kscb_list : pointer to the bulk of SCBs pointers for IO + * @kscb_pool : pool of free scbs for IO + * @kscb_pool_lock : lock for pool of free scbs + * @pend_list : pending commands list + * @pend_list_lock : exclusion lock for pending commands list + * @completed_list : list of completed commands + * @completed_list_lock : exclusion lock for list of completed commands + * @sglen : max sg elements supported + * @device_ids : to convert kernel device addr to our devices. + * @raid_device : raid adapter specific pointer + * @max_channel : maximum channel number supported - inclusive + * @max_target : max target supported - inclusive + * @max_lun : max lun supported - inclusive + * @unique_id : unique identifier for each adapter + * @irq : IRQ for this adapter + * @ito : internal timeout value, (-1) means no timeout + * @ibuf : buffer to issue internal commands + * @ibuf_dma_h : dma handle for the above buffer + * @uscb_list : SCB pointers for user cmds, common mgmt module + * @uscb_pool : pool of SCBs for user commands + * @uscb_pool_lock : exclusion lock for these SCBs + * @max_cmds : max outstanding commands + * @fw_version : firmware version + * @bios_version : bios version + * @max_cdb_sz : biggest CDB size supported. + * @ha : is high availability present - clustering + * @init_id : initiator ID, the default value should be 7 + * @max_sectors : max sectors per request + * @cmd_per_lun : max outstanding commands per LUN + * @being_detached : set when unloading, no more mgmt calls * * * mraid_setup_device_map() can be called anytime after the device map is @@ -211,23 +211,23 @@ typedef struct { #define SCP2ADAPTER(scp) (adapter_t *)SCSIHOST2ADAP(SCP2HOST(scp)) -/** - * MRAID_GET_DEVICE_MAP - device ids - * @param adp - Adapter's soft state - * @param scp - mid-layer scsi command pointer - * @param p_chan - physical channel on the controller - * @param target - target id of the device or logical drive number - * @param islogical - set if the command is for the logical drive - * - * Macro to retrieve information about device class, logical or physical and - * the corresponding physical channel and target or logical drive number - **/ #define MRAID_IS_LOGICAL(adp, scp) \ (SCP2CHANNEL(scp) == (adp)->max_channel) ? 1 : 0 #define MRAID_IS_LOGICAL_SDEV(adp, sdev) \ (sdev->channel == (adp)->max_channel) ? 1 : 0 +/** + * MRAID_GET_DEVICE_MAP - device ids + * @adp : adapter's soft state + * @scp : mid-layer scsi command pointer + * @p_chan : physical channel on the controller + * @target : target id of the device or logical drive number + * @islogical : set if the command is for the logical drive + * + * Macro to retrieve information about device class, logical or physical and + * the corresponding physical channel and target or logical drive number + */ #define MRAID_GET_DEVICE_MAP(adp, scp, p_chan, target, islogical) \ /* \ * Is the request coming for the virtual channel \ @@ -271,10 +271,10 @@ typedef struct { #define ASSERT(expression) #endif -/* +/** * struct mraid_pci_blk - structure holds DMA memory block info - * @param vaddr : virtual address to a memory block - * @param dma_addr : DMA handle to a memory block + * @vaddr : virtual address to a memory block + * @dma_addr : DMA handle to a memory block * * This structure is filled up for the caller. It is the responsibilty of the * caller to allocate this array big enough to store addresses for all diff --git a/drivers/scsi/megaraid/megaraid_ioctl.h b/drivers/scsi/megaraid/megaraid_ioctl.h index b8aa34202ec3..706fa05a187a 100644 --- a/drivers/scsi/megaraid/megaraid_ioctl.h +++ b/drivers/scsi/megaraid/megaraid_ioctl.h @@ -22,23 +22,23 @@ #include "mbox_defs.h" +/* + * console messages debug levels + */ +#define CL_ANN 0 /* print unconditionally, announcements */ +#define CL_DLEVEL1 1 /* debug level 1, informative */ +#define CL_DLEVEL2 2 /* debug level 2, verbose */ +#define CL_DLEVEL3 3 /* debug level 3, very verbose */ + /** * con_log() - console log routine - * @param level : indicates the severity of the message. - * @fparam mt : format string + * @level : indicates the severity of the message. + * @fmt : format string * * con_log displays the error messages on the console based on the current * debug level. Also it attaches the appropriate kernel severity level with * the message. - * - * - * consolge messages debug levels */ -#define CL_ANN 0 /* print unconditionally, announcements */ -#define CL_DLEVEL1 1 /* debug level 1, informative */ -#define CL_DLEVEL2 2 /* debug level 2, verbose */ -#define CL_DLEVEL3 3 /* debug level 3, very verbose */ - #define con_log(level, fmt) if (LSI_DBGLVL >= level) printk fmt; /* @@ -157,14 +157,14 @@ typedef struct uioc { /** * struct mraid_hba_info - information about the controller * - * @param pci_vendor_id : PCI vendor id - * @param pci_device_id : PCI device id - * @param subsystem_vendor_id : PCI subsystem vendor id - * @param subsystem_device_id : PCI subsystem device id - * @param baseport : base port of hba memory - * @param pci_bus : PCI bus - * @param pci_dev_fn : PCI device/function values - * @param irq : interrupt vector for the device + * @pci_vendor_id : PCI vendor id + * @pci_device_id : PCI device id + * @subsystem_vendor_id : PCI subsystem vendor id + * @subsystem_device_id : PCI subsystem device id + * @baseport : base port of hba memory + * @pci_bus : PCI bus + * @pci_dev_fn : PCI device/function values + * @irq : interrupt vector for the device * * Extended information of 256 bytes about the controller. Align on the single * byte boundary so that 32-bit applications can be run on 64-bit platform diff --git a/drivers/scsi/megaraid/megaraid_mbox.c b/drivers/scsi/megaraid/megaraid_mbox.c index 7bac86dda88f..04d0b6918c61 100644 --- a/drivers/scsi/megaraid/megaraid_mbox.c +++ b/drivers/scsi/megaraid/megaraid_mbox.c @@ -10,13 +10,13 @@ * 2 of the License, or (at your option) any later version. * * FILE : megaraid_mbox.c - * Version : v2.20.4.9 (Jul 16 2006) + * Version : v2.20.5.1 (Nov 16 2006) * * Authors: - * Atul Mukker <Atul.Mukker@lsil.com> - * Sreenivas Bagalkote <Sreenivas.Bagalkote@lsil.com> - * Manoj Jose <Manoj.Jose@lsil.com> - * Seokmann Ju <Seokmann.Ju@lsil.com> + * Atul Mukker <Atul.Mukker@lsi.com> + * Sreenivas Bagalkote <Sreenivas.Bagalkote@lsi.com> + * Manoj Jose <Manoj.Jose@lsi.com> + * Seokmann Ju * * List of supported controllers * @@ -107,6 +107,7 @@ static int megaraid_mbox_support_random_del(adapter_t *); static int megaraid_mbox_get_max_sg(adapter_t *); static void megaraid_mbox_enum_raid_scsi(adapter_t *); static void megaraid_mbox_flush_cache(adapter_t *); +static int megaraid_mbox_fire_sync_cmd(adapter_t *); static void megaraid_mbox_display_scb(adapter_t *, scb_t *); static void megaraid_mbox_setup_device_map(adapter_t *); @@ -137,7 +138,7 @@ static int wait_till_fw_empty(adapter_t *); -MODULE_AUTHOR("sju@lsil.com"); +MODULE_AUTHOR("megaraidlinux@lsi.com"); MODULE_DESCRIPTION("LSI Logic MegaRAID Mailbox Driver"); MODULE_LICENSE("GPL"); MODULE_VERSION(MEGARAID_VERSION); @@ -146,7 +147,7 @@ MODULE_VERSION(MEGARAID_VERSION); * ### modules parameters for driver ### */ -/** +/* * Set to enable driver to expose unconfigured disk to kernel */ static int megaraid_expose_unconf_disks = 0; @@ -154,7 +155,7 @@ module_param_named(unconf_disks, megaraid_expose_unconf_disks, int, 0); MODULE_PARM_DESC(unconf_disks, "Set to expose unconfigured disks to kernel (default=0)"); -/** +/* * driver wait time if the adapter's mailbox is busy */ static unsigned int max_mbox_busy_wait = MBOX_BUSY_WAIT; @@ -162,7 +163,7 @@ module_param_named(busy_wait, max_mbox_busy_wait, int, 0); MODULE_PARM_DESC(busy_wait, "Max wait for mailbox in microseconds if busy (default=10)"); -/** +/* * number of sectors per IO command */ static unsigned int megaraid_max_sectors = MBOX_MAX_SECTORS; @@ -170,7 +171,7 @@ module_param_named(max_sectors, megaraid_max_sectors, int, 0); MODULE_PARM_DESC(max_sectors, "Maximum number of sectors per IO command (default=128)"); -/** +/* * number of commands per logical unit */ static unsigned int megaraid_cmd_per_lun = MBOX_DEF_CMD_PER_LUN; @@ -179,7 +180,7 @@ MODULE_PARM_DESC(cmd_per_lun, "Maximum number of commands per logical unit (default=64)"); -/** +/* * Fast driver load option, skip scanning for physical devices during load. * This would result in non-disk devices being skipped during driver load * time. These can be later added though, using /proc/scsi/scsi @@ -190,7 +191,7 @@ MODULE_PARM_DESC(fast_load, "Faster loading of the driver, skips physical devices! (default=0)"); -/** +/* * mraid_debug level - threshold for amount of information to be displayed by * the driver. This level can be changed through modules parameters, ioctl or * sysfs/proc interface. By default, print the announcement messages only. @@ -337,7 +338,7 @@ static struct device_attribute *megaraid_sdev_attrs[] = { * * Return value: * actual depth set - **/ + */ static int megaraid_change_queue_depth(struct scsi_device *sdev, int qdepth) { if (qdepth > MBOX_MAX_SCSI_CMDS) @@ -369,8 +370,8 @@ static struct scsi_host_template megaraid_template_g = { * megaraid_init - module load hook * * We register ourselves as hotplug enabled module and let PCI subsystem - * discover our adaters - **/ + * discover our adapters. + */ static int __init megaraid_init(void) { @@ -405,7 +406,7 @@ megaraid_init(void) /** * megaraid_exit - driver unload entry point * - * We simply unwrap the megaraid_init routine here + * We simply unwrap the megaraid_init routine here. */ static void __exit megaraid_exit(void) @@ -421,12 +422,12 @@ megaraid_exit(void) /** * megaraid_probe_one - PCI hotplug entry point - * @param pdev : handle to this controller's PCI configuration space - * @param id : pci device id of the class of controllers + * @pdev : handle to this controller's PCI configuration space + * @id : pci device id of the class of controllers * * This routine should be called whenever a new adapter is detected by the * PCI hotplug susbsytem. - **/ + */ static int __devinit megaraid_probe_one(struct pci_dev *pdev, const struct pci_device_id *id) { @@ -542,16 +543,15 @@ out_probe_one: /** - * megaraid_detach_one - release the framework resources and call LLD release - * routine - * @param pdev : handle for our PCI cofiguration space + * megaraid_detach_one - release framework resources and call LLD release routine + * @pdev : handle for our PCI cofiguration space * * This routine is called during driver unload. We free all the allocated * resources and call the corresponding LLD so that it can also release all * its resources. * - * This routine is also called from the PCI hotplug system - **/ + * This routine is also called from the PCI hotplug system. + */ static void megaraid_detach_one(struct pci_dev *pdev) { @@ -615,9 +615,9 @@ megaraid_detach_one(struct pci_dev *pdev) /** * megaraid_mbox_shutdown - PCI shutdown for megaraid HBA - * @param device : generice driver model device + * @pdev : generic driver model device * - * Shutdown notification, perform flush cache + * Shutdown notification, perform flush cache. */ static void megaraid_mbox_shutdown(struct pci_dev *pdev) @@ -643,10 +643,10 @@ megaraid_mbox_shutdown(struct pci_dev *pdev) /** * megaraid_io_attach - attach a device with the IO subsystem - * @param adapter : controller's soft state + * @adapter : controller's soft state * - * Attach this device with the IO subsystem - **/ + * Attach this device with the IO subsystem. + */ static int megaraid_io_attach(adapter_t *adapter) { @@ -695,10 +695,10 @@ megaraid_io_attach(adapter_t *adapter) /** * megaraid_io_detach - detach a device from the IO subsystem - * @param adapter : controller's soft state + * @adapter : controller's soft state * - * Detach this device from the IO subsystem - **/ + * Detach this device from the IO subsystem. + */ static void megaraid_io_detach(adapter_t *adapter) { @@ -722,13 +722,13 @@ megaraid_io_detach(adapter_t *adapter) /** * megaraid_init_mbox - initialize controller - * @param adapter - our soft state + * @adapter : our soft state * - * . Allocate 16-byte aligned mailbox memory for firmware handshake - * . Allocate controller's memory resources - * . Find out all initialization data - * . Allocate memory required for all the commands - * . Use internal library of FW routines, build up complete soft state + * - Allocate 16-byte aligned mailbox memory for firmware handshake + * - Allocate controller's memory resources + * - Find out all initialization data + * - Allocate memory required for all the commands + * - Use internal library of FW routines, build up complete soft state */ static int __devinit megaraid_init_mbox(adapter_t *adapter) @@ -779,33 +779,39 @@ megaraid_init_mbox(adapter_t *adapter) goto out_release_regions; } - // - // Setup the rest of the soft state using the library of FW routines - // + /* initialize the mutual exclusion lock for the mailbox */ + spin_lock_init(&raid_dev->mailbox_lock); - // request IRQ and register the interrupt service routine + /* allocate memory required for commands */ + if (megaraid_alloc_cmd_packets(adapter) != 0) + goto out_iounmap; + + /* + * Issue SYNC cmd to flush the pending cmds in the adapter + * and initialize its internal state + */ + + if (megaraid_mbox_fire_sync_cmd(adapter)) + con_log(CL_ANN, ("megaraid: sync cmd failed\n")); + + /* + * Setup the rest of the soft state using the library of + * FW routines + */ + + /* request IRQ and register the interrupt service routine */ if (request_irq(adapter->irq, megaraid_isr, IRQF_SHARED, "megaraid", adapter)) { con_log(CL_ANN, (KERN_WARNING "megaraid: Couldn't register IRQ %d!\n", adapter->irq)); + goto out_alloc_cmds; - goto out_iounmap; - } - - - // initialize the mutual exclusion lock for the mailbox - spin_lock_init(&raid_dev->mailbox_lock); - - // allocate memory required for commands - if (megaraid_alloc_cmd_packets(adapter) != 0) { - goto out_free_irq; } // Product info - if (megaraid_mbox_product_info(adapter) != 0) { - goto out_alloc_cmds; - } + if (megaraid_mbox_product_info(adapter) != 0) + goto out_free_irq; // Do we support extended CDBs adapter->max_cdb_sz = 10; @@ -874,9 +880,8 @@ megaraid_init_mbox(adapter_t *adapter) * Allocate resources required to issue FW calls, when sysfs is * accessed */ - if (megaraid_sysfs_alloc_resources(adapter) != 0) { - goto out_alloc_cmds; - } + if (megaraid_sysfs_alloc_resources(adapter) != 0) + goto out_free_irq; // Set the DMA mask to 64-bit. All supported controllers as capable of // DMA in this range @@ -920,10 +925,10 @@ megaraid_init_mbox(adapter_t *adapter) out_free_sysfs_res: megaraid_sysfs_free_resources(adapter); -out_alloc_cmds: - megaraid_free_cmd_packets(adapter); out_free_irq: free_irq(adapter->irq, adapter); +out_alloc_cmds: + megaraid_free_cmd_packets(adapter); out_iounmap: iounmap(raid_dev->baseaddr); out_release_regions: @@ -937,7 +942,7 @@ out_free_raid_dev: /** * megaraid_fini_mbox - undo controller initialization - * @param adapter : our soft state + * @adapter : our soft state */ static void megaraid_fini_mbox(adapter_t *adapter) @@ -967,12 +972,12 @@ megaraid_fini_mbox(adapter_t *adapter) /** * megaraid_alloc_cmd_packets - allocate shared mailbox - * @param adapter : soft state of the raid controller + * @adapter : soft state of the raid controller * * Allocate and align the shared mailbox. This maibox is used to issue * all the commands. For IO based controllers, the mailbox is also regsitered * with the FW. Allocate memory for all commands as well. - * This is our big allocator + * This is our big allocator. */ static int megaraid_alloc_cmd_packets(adapter_t *adapter) @@ -1132,9 +1137,9 @@ out_free_common_mbox: /** * megaraid_free_cmd_packets - free memory - * @param adapter : soft state of the raid controller + * @adapter : soft state of the raid controller * - * Release memory resources allocated for commands + * Release memory resources allocated for commands. */ static void megaraid_free_cmd_packets(adapter_t *adapter) @@ -1156,10 +1161,10 @@ megaraid_free_cmd_packets(adapter_t *adapter) /** * megaraid_mbox_setup_dma_pools - setup dma pool for command packets - * @param adapter : HBA soft state + * @adapter : HBA soft state * - * setup the dma pools for mailbox, passthru and extended passthru structures, - * and scatter-gather lists + * Setup the dma pools for mailbox, passthru and extended passthru structures, + * and scatter-gather lists. */ static int megaraid_mbox_setup_dma_pools(adapter_t *adapter) @@ -1252,10 +1257,10 @@ fail_setup_dma_pool: /** * megaraid_mbox_teardown_dma_pools - teardown dma pools for command packets - * @param adapter : HBA soft state + * @adapter : HBA soft state * - * teardown the dma pool for mailbox, passthru and extended passthru - * structures, and scatter-gather lists + * Teardown the dma pool for mailbox, passthru and extended passthru + * structures, and scatter-gather lists. */ static void megaraid_mbox_teardown_dma_pools(adapter_t *adapter) @@ -1300,10 +1305,11 @@ megaraid_mbox_teardown_dma_pools(adapter_t *adapter) /** * megaraid_alloc_scb - detach and return a scb from the free list * @adapter : controller's soft state + * @scp : pointer to the scsi command to be executed * - * return the scb from the head of the free list. NULL if there are none - * available - **/ + * Return the scb from the head of the free list. %NULL if there are none + * available. + */ static scb_t * megaraid_alloc_scb(adapter_t *adapter, struct scsi_cmnd *scp) { @@ -1337,11 +1343,11 @@ megaraid_alloc_scb(adapter_t *adapter, struct scsi_cmnd *scp) * @adapter : controller's soft state * @scb : scb to be freed * - * return the scb back to the free list of scbs. The caller must 'flush' the + * Return the scb back to the free list of scbs. The caller must 'flush' the * SCB before calling us. E.g., performing pci_unamp and/or pci_sync etc. * NOTE NOTE: Make sure the scb is not on any list before calling this * routine. - **/ + */ static inline void megaraid_dealloc_scb(adapter_t *adapter, scb_t *scb) { @@ -1362,10 +1368,10 @@ megaraid_dealloc_scb(adapter_t *adapter, scb_t *scb) /** * megaraid_mbox_mksgl - make the scatter-gather list - * @adapter - controller's soft state - * @scb - scsi control block + * @adapter : controller's soft state + * @scb : scsi control block * - * prepare the scatter-gather list + * Prepare the scatter-gather list. */ static int megaraid_mbox_mksgl(adapter_t *adapter, scb_t *scb) @@ -1435,10 +1441,10 @@ megaraid_mbox_mksgl(adapter_t *adapter, scb_t *scb) /** * mbox_post_cmd - issue a mailbox command - * @adapter - controller's soft state - * @scb - command to be issued + * @adapter : controller's soft state + * @scb : command to be issued * - * post the command to the controller if mailbox is availble. + * Post the command to the controller if mailbox is available. */ static int mbox_post_cmd(adapter_t *adapter, scb_t *scb) @@ -1518,7 +1524,7 @@ mbox_post_cmd(adapter_t *adapter, scb_t *scb) * Queue entry point for mailbox based controllers. */ static int -megaraid_queue_command(struct scsi_cmnd *scp, void (* done)(struct scsi_cmnd *)) +megaraid_queue_command(struct scsi_cmnd *scp, void (*done)(struct scsi_cmnd *)) { adapter_t *adapter; scb_t *scb; @@ -1548,15 +1554,15 @@ megaraid_queue_command(struct scsi_cmnd *scp, void (* done)(struct scsi_cmnd *)) } /** - * megaraid_mbox_build_cmd - transform the mid-layer scsi command to megaraid - * firmware lingua - * @adapter - controller's soft state - * @scp - mid-layer scsi command pointer - * @busy - set if request could not be completed because of lack of + * megaraid_mbox_build_cmd - transform the mid-layer scsi commands + * @adapter : controller's soft state + * @scp : mid-layer scsi command pointer + * @busy : set if request could not be completed because of lack of * resources * - * convert the command issued by mid-layer to format understood by megaraid - * firmware. We also complete certain command without sending them to firmware + * Transform the mid-layer scsi command to megaraid firmware lingua. + * Convert the command issued by mid-layer to format understood by megaraid + * firmware. We also complete certain commands without sending them to firmware. */ static scb_t * megaraid_mbox_build_cmd(adapter_t *adapter, struct scsi_cmnd *scp, int *busy) @@ -1937,9 +1943,9 @@ megaraid_mbox_build_cmd(adapter_t *adapter, struct scsi_cmnd *scp, int *busy) /** * megaraid_mbox_runpendq - execute commands queued in the pending queue * @adapter : controller's soft state - * @scb : SCB to be queued in the pending list + * @scb_q : SCB to be queued in the pending list * - * scan the pending list for commands which are not yet issued and try to + * Scan the pending list for commands which are not yet issued and try to * post to the controller. The SCB can be a null pointer, which would indicate * no SCB to be queue, just try to execute the ones in the pending list. * @@ -2012,11 +2018,11 @@ megaraid_mbox_runpendq(adapter_t *adapter, scb_t *scb_q) /** * megaraid_mbox_prepare_pthru - prepare a command for physical devices - * @adapter - pointer to controller's soft state - * @scb - scsi control block - * @scp - scsi command from the mid-layer + * @adapter : pointer to controller's soft state + * @scb : scsi control block + * @scp : scsi command from the mid-layer * - * prepare a command for the scsi physical devices + * Prepare a command for the scsi physical devices. */ static void megaraid_mbox_prepare_pthru(adapter_t *adapter, scb_t *scb, @@ -2060,12 +2066,12 @@ megaraid_mbox_prepare_pthru(adapter_t *adapter, scb_t *scb, /** * megaraid_mbox_prepare_epthru - prepare a command for physical devices - * @adapter - pointer to controller's soft state - * @scb - scsi control block - * @scp - scsi command from the mid-layer + * @adapter : pointer to controller's soft state + * @scb : scsi control block + * @scp : scsi command from the mid-layer * - * prepare a command for the scsi physical devices. This rountine prepares - * commands for devices which can take extended CDBs (>10 bytes) + * Prepare a command for the scsi physical devices. This rountine prepares + * commands for devices which can take extended CDBs (>10 bytes). */ static void megaraid_mbox_prepare_epthru(adapter_t *adapter, scb_t *scb, @@ -2109,9 +2115,9 @@ megaraid_mbox_prepare_epthru(adapter_t *adapter, scb_t *scb, /** * megaraid_ack_sequence - interrupt ack sequence for memory mapped HBAs - * @adapter - controller's soft state + * @adapter : controller's soft state * - * Interrupt ackrowledgement sequence for memory mapped HBAs. Find out the + * Interrupt acknowledgement sequence for memory mapped HBAs. Find out the * completed command and put them on the completed list for later processing. * * Returns: 1 if the interrupt is valid, 0 otherwise @@ -2224,9 +2230,8 @@ megaraid_ack_sequence(adapter_t *adapter) /** * megaraid_isr - isr for memory based mailbox based controllers - * @irq - irq - * @devp - pointer to our soft state - * @regs - unused + * @irq : irq + * @devp : pointer to our soft state * * Interrupt service routine for memory-mapped mailbox controllers. */ @@ -2671,7 +2676,7 @@ megaraid_abort_handler(struct scsi_cmnd *scp) * the FW is still live, in which case the outstanding commands counter mut go * down to 0. If that happens, also issue the reservation reset command to * relinquish (possible) reservations on the logical drives connected to this - * host + * host. **/ static int megaraid_reset_handler(struct scsi_cmnd *scp) @@ -2823,11 +2828,11 @@ megaraid_reset_handler(struct scsi_cmnd *scp) /** * mbox_post_sync_cmd() - blocking command to the mailbox based controllers - * @adapter - controller's soft state - * @raw_mbox - the mailbox + * @adapter : controller's soft state + * @raw_mbox : the mailbox * * Issue a scb in synchronous and non-interrupt mode for mailbox based - * controllers + * controllers. */ static int mbox_post_sync_cmd(adapter_t *adapter, uint8_t raw_mbox[]) @@ -2955,12 +2960,12 @@ blocked_mailbox: /** * mbox_post_sync_cmd_fast - blocking command to the mailbox based controllers - * @adapter - controller's soft state - * @raw_mbox - the mailbox + * @adapter : controller's soft state + * @raw_mbox : the mailbox * * Issue a scb in synchronous and non-interrupt mode for mailbox based * controllers. This is a faster version of the synchronous command and - * therefore can be called in interrupt-context as well + * therefore can be called in interrupt-context as well. */ static int mbox_post_sync_cmd_fast(adapter_t *adapter, uint8_t raw_mbox[]) @@ -3008,10 +3013,10 @@ mbox_post_sync_cmd_fast(adapter_t *adapter, uint8_t raw_mbox[]) /** * megaraid_busywait_mbox() - Wait until the controller's mailbox is available - * @raid_dev - RAID device (HBA) soft state + * @raid_dev : RAID device (HBA) soft state * - * wait until the controller's mailbox is available to accept more commands. - * wait for at most 1 second + * Wait until the controller's mailbox is available to accept more commands. + * Wait for at most 1 second. */ static int megaraid_busywait_mbox(mraid_device_t *raid_dev) @@ -3032,9 +3037,9 @@ megaraid_busywait_mbox(mraid_device_t *raid_dev) /** * megaraid_mbox_product_info - some static information about the controller - * @adapter - our soft state + * @adapter : our soft state * - * issue commands to the controller to grab some parameters required by our + * Issue commands to the controller to grab some parameters required by our * caller. */ static int @@ -3157,10 +3162,10 @@ megaraid_mbox_product_info(adapter_t *adapter) /** * megaraid_mbox_extended_cdb - check for support for extended CDBs - * @adapter - soft state for the controller + * @adapter : soft state for the controller * - * this routine check whether the controller in question supports extended - * ( > 10 bytes ) CDBs + * This routine check whether the controller in question supports extended + * ( > 10 bytes ) CDBs. */ static int megaraid_mbox_extended_cdb(adapter_t *adapter) @@ -3193,8 +3198,8 @@ megaraid_mbox_extended_cdb(adapter_t *adapter) /** * megaraid_mbox_support_ha - Do we support clustering - * @adapter - soft state for the controller - * @init_id - ID of the initiator + * @adapter : soft state for the controller + * @init_id : ID of the initiator * * Determine if the firmware supports clustering and the ID of the initiator. */ @@ -3236,9 +3241,9 @@ megaraid_mbox_support_ha(adapter_t *adapter, uint16_t *init_id) /** * megaraid_mbox_support_random_del - Do we support random deletion - * @adapter - soft state for the controller + * @adapter : soft state for the controller * - * Determine if the firmware supports random deletion + * Determine if the firmware supports random deletion. * Return: 1 is operation supported, 0 otherwise */ static int @@ -3271,10 +3276,10 @@ megaraid_mbox_support_random_del(adapter_t *adapter) /** * megaraid_mbox_get_max_sg - maximum sg elements supported by the firmware - * @adapter - soft state for the controller + * @adapter : soft state for the controller * * Find out the maximum number of scatter-gather elements supported by the - * firmware + * firmware. */ static int megaraid_mbox_get_max_sg(adapter_t *adapter) @@ -3311,10 +3316,10 @@ megaraid_mbox_get_max_sg(adapter_t *adapter) /** * megaraid_mbox_enum_raid_scsi - enumerate the RAID and SCSI channels - * @adapter - soft state for the controller + * @adapter : soft state for the controller * - * Enumerate the RAID and SCSI channels for ROMB platoforms so that channels - * can be exported as regular SCSI channels + * Enumerate the RAID and SCSI channels for ROMB platforms so that channels + * can be exported as regular SCSI channels. */ static void megaraid_mbox_enum_raid_scsi(adapter_t *adapter) @@ -3348,9 +3353,9 @@ megaraid_mbox_enum_raid_scsi(adapter_t *adapter) /** * megaraid_mbox_flush_cache - flush adapter and disks cache - * @param adapter : soft state for the controller + * @adapter : soft state for the controller * - * Flush adapter cache followed by disks cache + * Flush adapter cache followed by disks cache. */ static void megaraid_mbox_flush_cache(adapter_t *adapter) @@ -3380,13 +3385,91 @@ megaraid_mbox_flush_cache(adapter_t *adapter) /** + * megaraid_mbox_fire_sync_cmd - fire the sync cmd + * @adapter : soft state for the controller + * + * Clears the pending cmds in FW and reinits its RAID structs. + */ +static int +megaraid_mbox_fire_sync_cmd(adapter_t *adapter) +{ + mbox_t *mbox; + uint8_t raw_mbox[sizeof(mbox_t)]; + mraid_device_t *raid_dev = ADAP2RAIDDEV(adapter); + mbox64_t *mbox64; + int status = 0; + int i; + uint32_t dword; + + mbox = (mbox_t *)raw_mbox; + + memset((caddr_t)raw_mbox, 0, sizeof(mbox_t)); + + raw_mbox[0] = 0xFF; + + mbox64 = raid_dev->mbox64; + mbox = raid_dev->mbox; + + /* Wait until mailbox is free */ + if (megaraid_busywait_mbox(raid_dev) != 0) { + status = 1; + goto blocked_mailbox; + } + + /* Copy mailbox data into host structure */ + memcpy((caddr_t)mbox, (caddr_t)raw_mbox, 16); + mbox->cmdid = 0xFE; + mbox->busy = 1; + mbox->poll = 0; + mbox->ack = 0; + mbox->numstatus = 0; + mbox->status = 0; + + wmb(); + WRINDOOR(raid_dev, raid_dev->mbox_dma | 0x1); + + /* Wait for maximum 1 min for status to post. + * If the Firmware SUPPORTS the ABOVE COMMAND, + * mbox->cmd will be set to 0 + * else + * the firmware will reject the command with + * mbox->numstatus set to 1 + */ + + i = 0; + status = 0; + while (!mbox->numstatus && mbox->cmd == 0xFF) { + rmb(); + msleep(1); + i++; + if (i > 1000 * 60) { + status = 1; + break; + } + } + if (mbox->numstatus == 1) + status = 1; /*cmd not supported*/ + + /* Check for interrupt line */ + dword = RDOUTDOOR(raid_dev); + WROUTDOOR(raid_dev, dword); + WRINDOOR(raid_dev,2); + + return status; + +blocked_mailbox: + con_log(CL_ANN, (KERN_WARNING "megaraid: blocked mailbox\n")); + return status; +} + +/** * megaraid_mbox_display_scb - display SCB information, mostly debug purposes - * @param adapter : controllers' soft state - * @param scb : SCB to be displayed - * @param level : debug level for console print + * @adapter : controller's soft state + * @scb : SCB to be displayed + * @level : debug level for console print * * Diplay information about the given SCB iff the current debug level is - * verbose + * verbose. */ static void megaraid_mbox_display_scb(adapter_t *adapter, scb_t *scb) @@ -3434,7 +3517,7 @@ megaraid_mbox_display_scb(adapter_t *adapter, scb_t *scb) * scsi addresses and megaraid scsi and logical drive addresses. We export * scsi devices on their actual addresses, whereas the logical drives are * exported on a virtual scsi channel. - **/ + */ static void megaraid_mbox_setup_device_map(adapter_t *adapter) { @@ -3472,7 +3555,7 @@ megaraid_mbox_setup_device_map(adapter_t *adapter) /** * megaraid_cmm_register - register with the mangement module - * @param adapter : HBA soft state + * @adapter : HBA soft state * * Register with the management module, which allows applications to issue * ioctl calls to the drivers. This interface is used by the management module @@ -3562,11 +3645,11 @@ megaraid_cmm_register(adapter_t *adapter) /** * megaraid_cmm_unregister - un-register with the mangement module - * @param adapter : HBA soft state + * @adapter : HBA soft state * * Un-register with the management module. * FIXME: mgmt module must return failure for unregister if it has pending - * commands in LLD + * commands in LLD. */ static int megaraid_cmm_unregister(adapter_t *adapter) @@ -3579,9 +3662,9 @@ megaraid_cmm_unregister(adapter_t *adapter) /** * megaraid_mbox_mm_handler - interface for CMM to issue commands to LLD - * @param drvr_data : LLD specific data - * @param kioc : CMM interface packet - * @param action : command action + * @drvr_data : LLD specific data + * @kioc : CMM interface packet + * @action : command action * * This routine is invoked whenever the Common Mangement Module (CMM) has a * command for us. The 'action' parameter specifies if this is a new command @@ -3634,8 +3717,8 @@ megaraid_mbox_mm_handler(unsigned long drvr_data, uioc_t *kioc, uint32_t action) /** * megaraid_mbox_mm_command - issues commands routed through CMM - * @param adapter : HBA soft state - * @param kioc : management command packet + * @adapter : HBA soft state + * @kioc : management command packet * * Issues commands, which are routed through the management module. */ @@ -3804,8 +3887,8 @@ megaraid_mbox_mm_done(adapter_t *adapter, scb_t *scb) /** * gather_hbainfo - HBA characteristics for the applications - * @param adapter : HBA soft state - * @param hinfo : pointer to the caller's host info strucuture + * @adapter : HBA soft state + * @hinfo : pointer to the caller's host info strucuture */ static int gather_hbainfo(adapter_t *adapter, mraid_hba_info_t *hinfo) @@ -3839,16 +3922,15 @@ gather_hbainfo(adapter_t *adapter, mraid_hba_info_t *hinfo) /** * megaraid_sysfs_alloc_resources - allocate sysfs related resources + * @adapter : controller's soft state * * Allocate packets required to issue FW calls whenever the sysfs attributes * are read. These attributes would require up-to-date information from the * FW. Also set up resources for mutual exclusion to share these resources and * the wait queue. * - * @param adapter : controller's soft state - * - * @return 0 on success - * @return -ERROR_CODE on failure + * Return 0 on success. + * Return -ERROR_CODE on failure. */ static int megaraid_sysfs_alloc_resources(adapter_t *adapter) @@ -3885,10 +3967,9 @@ megaraid_sysfs_alloc_resources(adapter_t *adapter) /** * megaraid_sysfs_free_resources - free sysfs related resources + * @adapter : controller's soft state * * Free packets allocated for sysfs FW commands - * - * @param adapter : controller's soft state */ static void megaraid_sysfs_free_resources(adapter_t *adapter) @@ -3907,10 +3988,9 @@ megaraid_sysfs_free_resources(adapter_t *adapter) /** * megaraid_sysfs_get_ldmap_done - callback for get ldmap + * @uioc : completed packet * * Callback routine called in the ISR/tasklet context for get ldmap call - * - * @param uioc : completed packet */ static void megaraid_sysfs_get_ldmap_done(uioc_t *uioc) @@ -3926,12 +4006,11 @@ megaraid_sysfs_get_ldmap_done(uioc_t *uioc) /** * megaraid_sysfs_get_ldmap_timeout - timeout handling for get ldmap + * @data : timed out packet * * Timeout routine to recover and return to application, in case the adapter - * has stopped responding. A timeout of 60 seconds for this command seem like - * a good value - * - * @param uioc : timed out packet + * has stopped responding. A timeout of 60 seconds for this command seems like + * a good value. */ static void megaraid_sysfs_get_ldmap_timeout(unsigned long data) @@ -3948,6 +4027,7 @@ megaraid_sysfs_get_ldmap_timeout(unsigned long data) /** * megaraid_sysfs_get_ldmap - get update logical drive map + * @adapter : controller's soft state * * This routine will be called whenever user reads the logical drive * attributes, go get the current logical drive mapping table from the @@ -3959,10 +4039,8 @@ megaraid_sysfs_get_ldmap_timeout(unsigned long data) * standalone libary. For now, this should suffice since there is no other * user of this interface. * - * @param adapter : controller's soft state - * - * @return 0 on success - * @return -1 on failure + * Return 0 on success. + * Return -1 on failure. */ static int megaraid_sysfs_get_ldmap(adapter_t *adapter) @@ -4064,13 +4142,12 @@ megaraid_sysfs_get_ldmap(adapter_t *adapter) /** * megaraid_sysfs_show_app_hndl - display application handle for this adapter + * @cdev : class device object representation for the host + * @buf : buffer to send data to * * Display the handle used by the applications while executing management * tasks on the adapter. We invoke a management module API to get the adapter * handle, since we do not interface with applications directly. - * - * @param cdev : class device object representation for the host - * @param buf : buffer to send data to */ static ssize_t megaraid_sysfs_show_app_hndl(struct class_device *cdev, char *buf) @@ -4087,16 +4164,18 @@ megaraid_sysfs_show_app_hndl(struct class_device *cdev, char *buf) /** * megaraid_sysfs_show_ldnum - display the logical drive number for this device + * @dev : device object representation for the scsi device + * @attr : device attribute to show + * @buf : buffer to send data to * * Display the logical drive number for the device in question, if it a valid - * logical drive. For physical devices, "-1" is returned - * The logical drive number is displayed in following format + * logical drive. For physical devices, "-1" is returned. + * + * The logical drive number is displayed in following format: * * <SCSI ID> <LD NUM> <LD STICKY ID> <APP ADAPTER HANDLE> - * <int> <int> <int> <int> * - * @param dev : device object representation for the scsi device - * @param buf : buffer to send data to + * <int> <int> <int> <int> */ static ssize_t megaraid_sysfs_show_ldnum(struct device *dev, struct device_attribute *attr, char *buf) diff --git a/drivers/scsi/megaraid/megaraid_mbox.h b/drivers/scsi/megaraid/megaraid_mbox.h index 2b5a3285f799..9de803cebd4b 100644 --- a/drivers/scsi/megaraid/megaraid_mbox.h +++ b/drivers/scsi/megaraid/megaraid_mbox.h @@ -21,8 +21,8 @@ #include "megaraid_ioctl.h" -#define MEGARAID_VERSION "2.20.4.9" -#define MEGARAID_EXT_VERSION "(Release Date: Sun Jul 16 12:27:22 EST 2006)" +#define MEGARAID_VERSION "2.20.5.1" +#define MEGARAID_EXT_VERSION "(Release Date: Thu Nov 16 15:32:35 EST 2006)" /* @@ -146,27 +146,27 @@ typedef struct { /** * mraid_device_t - adapter soft state structure for mailbox controllers - * @param una_mbox64 : 64-bit mbox - unaligned - * @param una_mbox64_dma : mbox dma addr - unaligned - * @param mbox : 32-bit mbox - aligned - * @param mbox64 : 64-bit mbox - aligned - * @param mbox_dma : mbox dma addr - aligned - * @param mailbox_lock : exclusion lock for the mailbox - * @param baseport : base port of hba memory - * @param baseaddr : mapped addr of hba memory - * @param mbox_pool : pool of mailboxes - * @param mbox_pool_handle : handle for the mailbox pool memory - * @param epthru_pool : a pool for extended passthru commands - * @param epthru_pool_handle : handle to the pool above - * @param sg_pool : pool of scatter-gather lists for this driver - * @param sg_pool_handle : handle to the pool above - * @param ccb_list : list of our command control blocks - * @param uccb_list : list of cmd control blocks for mgmt module - * @param umbox64 : array of mailbox for user commands (cmm) - * @param pdrv_state : array for state of each physical drive. - * @param last_disp : flag used to show device scanning - * @param hw_error : set if FW not responding - * @param fast_load : If set, skip physical device scanning + * @una_mbox64 : 64-bit mbox - unaligned + * @una_mbox64_dma : mbox dma addr - unaligned + * @mbox : 32-bit mbox - aligned + * @mbox64 : 64-bit mbox - aligned + * @mbox_dma : mbox dma addr - aligned + * @mailbox_lock : exclusion lock for the mailbox + * @baseport : base port of hba memory + * @baseaddr : mapped addr of hba memory + * @mbox_pool : pool of mailboxes + * @mbox_pool_handle : handle for the mailbox pool memory + * @epthru_pool : a pool for extended passthru commands + * @epthru_pool_handle : handle to the pool above + * @sg_pool : pool of scatter-gather lists for this driver + * @sg_pool_handle : handle to the pool above + * @ccb_list : list of our command control blocks + * @uccb_list : list of cmd control blocks for mgmt module + * @umbox64 : array of mailbox for user commands (cmm) + * @pdrv_state : array for state of each physical drive. + * @last_disp : flag used to show device scanning + * @hw_error : set if FW not responding + * @fast_load : If set, skip physical device scanning * @channel_class : channel class, RAID or SCSI * @sysfs_sem : semaphore to serialize access to sysfs res. * @sysfs_uioc : management packet to issue FW calls from sysfs diff --git a/drivers/scsi/megaraid/megaraid_mm.c b/drivers/scsi/megaraid/megaraid_mm.c index d85b9a8f1b8d..c1ff20c4747d 100644 --- a/drivers/scsi/megaraid/megaraid_mm.c +++ b/drivers/scsi/megaraid/megaraid_mm.c @@ -78,10 +78,10 @@ static struct file_operations lsi_fops = { /** * mraid_mm_open - open routine for char node interface - * @inod : unused + * @inode : unused * @filep : unused * - * allow ioctl operations by apps only if they superuser privilege + * Allow ioctl operations by apps only if they have superuser privilege. */ static int mraid_mm_open(struct inode *inode, struct file *filep) @@ -214,7 +214,9 @@ mraid_mm_ioctl(struct inode *inode, struct file *filep, unsigned int cmd, /** * mraid_mm_get_adapter - Returns corresponding adapters for the mimd packet * @umimd : User space mimd_t ioctl packet - * @adapter : pointer to the adapter (OUT) + * @rval : returned success/error status + * + * The function return value is a pointer to the located @adapter. */ static mraid_mmadp_t * mraid_mm_get_adapter(mimd_t __user *umimd, int *rval) @@ -252,11 +254,11 @@ mraid_mm_get_adapter(mimd_t __user *umimd, int *rval) return adapter; } -/* - * handle_drvrcmd - This routine checks if the opcode is a driver - * cmd and if it is, handles it. +/** + * handle_drvrcmd - Checks if the opcode is a driver cmd and if it is, handles it. * @arg : packet sent by the user app * @old_ioctl : mimd if 1; uioc otherwise + * @rval : pointer for command's returned value (not function status) */ static int handle_drvrcmd(void __user *arg, uint8_t old_ioctl, int *rval) @@ -322,8 +324,8 @@ old_packet: /** * mimd_to_kioc - Converter from old to new ioctl format - * * @umimd : user space old MIMD IOCTL + * @adp : adapter softstate * @kioc : kernel space new format IOCTL * * Routine to convert MIMD interface IOCTL to new interface IOCTL packet. The @@ -474,7 +476,6 @@ mimd_to_kioc(mimd_t __user *umimd, mraid_mmadp_t *adp, uioc_t *kioc) /** * mraid_mm_attch_buf - Attach a free dma buffer for required size - * * @adp : Adapter softstate * @kioc : kioc that the buffer needs to be attached to * @xferlen : required length for buffer @@ -607,7 +608,6 @@ mraid_mm_alloc_kioc(mraid_mmadp_t *adp) /** * mraid_mm_dealloc_kioc - Return kioc to free pool - * * @adp : Adapter softstate * @kioc : uioc_t node to be returned to free pool */ @@ -652,7 +652,6 @@ mraid_mm_dealloc_kioc(mraid_mmadp_t *adp, uioc_t *kioc) /** * lld_ioctl - Routine to issue ioctl to low level drvr - * * @adp : The adapter handle * @kioc : The ioctl packet with kernel addresses */ @@ -705,7 +704,6 @@ lld_ioctl(mraid_mmadp_t *adp, uioc_t *kioc) /** * ioctl_done - callback from the low level driver - * * @kioc : completed ioctl packet */ static void @@ -756,9 +754,8 @@ ioctl_done(uioc_t *kioc) } -/* - * lld_timedout : callback from the expired timer - * +/** + * lld_timedout - callback from the expired timer * @ptr : ioctl packet that timed out */ static void @@ -776,8 +773,7 @@ lld_timedout(unsigned long ptr) /** - * kioc_to_mimd : Converter from new back to old format - * + * kioc_to_mimd - Converter from new back to old format * @kioc : Kernel space IOCTL packet (successfully issued) * @mimd : User space MIMD packet */ @@ -855,7 +851,6 @@ kioc_to_mimd(uioc_t *kioc, mimd_t __user *mimd) /** * hinfo_to_cinfo - Convert new format hba info into old format - * * @hinfo : New format, more comprehensive adapter info * @cinfo : Old format adapter info to support mimd_t apps */ @@ -878,10 +873,9 @@ hinfo_to_cinfo(mraid_hba_info_t *hinfo, mcontroller_t *cinfo) } -/* - * mraid_mm_register_adp - Registration routine for low level drvrs - * - * @adp : Adapter objejct +/** + * mraid_mm_register_adp - Registration routine for low level drivers + * @lld_adp : Adapter objejct */ int mraid_mm_register_adp(mraid_mmadp_t *lld_adp) @@ -1007,15 +1001,14 @@ memalloc_error: /** * mraid_mm_adapter_app_handle - return the application handle for this adapter + * @unique_id : adapter unique identifier * - * For the given driver data, locate the adadpter in our global list and + * For the given driver data, locate the adapter in our global list and * return the corresponding handle, which is also used by applications to * uniquely identify an adapter. * - * @param unique_id : adapter unique identifier - * - * @return adapter handle if found in the list - * @return 0 if adapter could not be located, should never happen though + * Return adapter handle if found in the list. + * Return 0 if adapter could not be located, should never happen though. */ uint32_t mraid_mm_adapter_app_handle(uint32_t unique_id) @@ -1040,7 +1033,6 @@ mraid_mm_adapter_app_handle(uint32_t unique_id) /** * mraid_mm_setup_dma_pools - Set up dma buffer pools per adapter - * * @adp : Adapter softstate * * We maintain a pool of dma buffers per each adapter. Each pool has one @@ -1093,11 +1085,11 @@ dma_pool_setup_error: } -/* +/** * mraid_mm_unregister_adp - Unregister routine for low level drivers - * Assume no outstanding ioctls to llds. - * * @unique_id : UID of the adpater + * + * Assumes no outstanding ioctls to llds. */ int mraid_mm_unregister_adp(uint32_t unique_id) @@ -1131,7 +1123,6 @@ mraid_mm_unregister_adp(uint32_t unique_id) /** * mraid_mm_free_adp_resources - Free adapter softstate - * * @adp : Adapter softstate */ static void @@ -1162,7 +1153,6 @@ mraid_mm_free_adp_resources(mraid_mmadp_t *adp) /** * mraid_mm_teardown_dma_pools - Free all per adapter dma buffers - * * @adp : Adapter softstate */ static void @@ -1190,7 +1180,7 @@ mraid_mm_teardown_dma_pools(mraid_mmadp_t *adp) } /** - * mraid_mm_init : Module entry point + * mraid_mm_init - Module entry point */ static int __init mraid_mm_init(void) @@ -1214,10 +1204,13 @@ mraid_mm_init(void) } +#ifdef CONFIG_COMPAT /** - * mraid_mm_compat_ioctl : 32bit to 64bit ioctl conversion routine + * mraid_mm_compat_ioctl - 32bit to 64bit ioctl conversion routine + * @filep : file operations pointer (ignored) + * @cmd : ioctl command + * @arg : user ioctl packet */ -#ifdef CONFIG_COMPAT static long mraid_mm_compat_ioctl(struct file *filep, unsigned int cmd, unsigned long arg) @@ -1231,7 +1224,7 @@ mraid_mm_compat_ioctl(struct file *filep, unsigned int cmd, #endif /** - * mraid_mm_exit : Module exit point + * mraid_mm_exit - Module exit point */ static void __exit mraid_mm_exit(void) diff --git a/drivers/scsi/megaraid/megaraid_sas.h b/drivers/scsi/megaraid/megaraid_sas.h index 55eddcf8eb15..cacb3ad92527 100644 --- a/drivers/scsi/megaraid/megaraid_sas.h +++ b/drivers/scsi/megaraid/megaraid_sas.h @@ -15,7 +15,7 @@ #ifndef LSI_MEGARAID_SAS_H #define LSI_MEGARAID_SAS_H -/** +/* * MegaRAID SAS Driver meta data */ #define MEGASAS_VERSION "00.00.03.05" @@ -40,7 +40,7 @@ * "message frames" */ -/** +/* * FW posts its state in upper 4 bits of outbound_msg_0 register */ #define MFI_STATE_MASK 0xF0000000 @@ -58,7 +58,7 @@ #define MEGAMFI_FRAME_SIZE 64 -/** +/* * During FW init, clear pending cmds & reset state using inbound_msg_0 * * ABORT : Abort all pending cmds @@ -78,7 +78,7 @@ MFI_INIT_MFIMODE| \ MFI_INIT_ABORT -/** +/* * MFI frame flags */ #define MFI_FRAME_POST_IN_REPLY_QUEUE 0x0000 @@ -92,12 +92,12 @@ #define MFI_FRAME_DIR_READ 0x0010 #define MFI_FRAME_DIR_BOTH 0x0018 -/** +/* * Definition for cmd_status */ #define MFI_CMD_STATUS_POLL_MODE 0xFF -/** +/* * MFI command opcodes */ #define MFI_CMD_INIT 0x00 @@ -128,7 +128,7 @@ #define MR_DCMD_CLUSTER_RESET_ALL 0x08010100 #define MR_DCMD_CLUSTER_RESET_LD 0x08010200 -/** +/* * MFI command completion codes */ enum MFI_STAT { diff --git a/drivers/scsi/qla2xxx/qla_attr.c b/drivers/scsi/qla2xxx/qla_attr.c index 7b18a6c7b7eb..8081b637d97e 100644 --- a/drivers/scsi/qla2xxx/qla_attr.c +++ b/drivers/scsi/qla2xxx/qla_attr.c @@ -140,6 +140,8 @@ qla2x00_sysfs_write_nvram(struct kobject *kobj, char *buf, loff_t off, ha->isp_ops.write_nvram(ha, (uint8_t *)buf, ha->nvram_base, count); spin_unlock_irqrestore(&ha->hardware_lock, flags); + set_bit(ISP_ABORT_NEEDED, &ha->dpc_flags); + return (count); } @@ -653,6 +655,43 @@ qla2x00_beacon_store(struct class_device *cdev, const char *buf, return count; } +static ssize_t +qla2x00_optrom_bios_version_show(struct class_device *cdev, char *buf) +{ + scsi_qla_host_t *ha = to_qla_host(class_to_shost(cdev)); + + return snprintf(buf, PAGE_SIZE, "%d.%02d\n", ha->bios_revision[1], + ha->bios_revision[0]); +} + +static ssize_t +qla2x00_optrom_efi_version_show(struct class_device *cdev, char *buf) +{ + scsi_qla_host_t *ha = to_qla_host(class_to_shost(cdev)); + + return snprintf(buf, PAGE_SIZE, "%d.%02d\n", ha->efi_revision[1], + ha->efi_revision[0]); +} + +static ssize_t +qla2x00_optrom_fcode_version_show(struct class_device *cdev, char *buf) +{ + scsi_qla_host_t *ha = to_qla_host(class_to_shost(cdev)); + + return snprintf(buf, PAGE_SIZE, "%d.%02d\n", ha->fcode_revision[1], + ha->fcode_revision[0]); +} + +static ssize_t +qla2x00_optrom_fw_version_show(struct class_device *cdev, char *buf) +{ + scsi_qla_host_t *ha = to_qla_host(class_to_shost(cdev)); + + return snprintf(buf, PAGE_SIZE, "%d.%02d.%02d %d\n", + ha->fw_revision[0], ha->fw_revision[1], ha->fw_revision[2], + ha->fw_revision[3]); +} + static CLASS_DEVICE_ATTR(driver_version, S_IRUGO, qla2x00_drvr_version_show, NULL); static CLASS_DEVICE_ATTR(fw_version, S_IRUGO, qla2x00_fw_version_show, NULL); @@ -669,6 +708,14 @@ static CLASS_DEVICE_ATTR(zio_timer, S_IRUGO | S_IWUSR, qla2x00_zio_timer_show, qla2x00_zio_timer_store); static CLASS_DEVICE_ATTR(beacon, S_IRUGO | S_IWUSR, qla2x00_beacon_show, qla2x00_beacon_store); +static CLASS_DEVICE_ATTR(optrom_bios_version, S_IRUGO, + qla2x00_optrom_bios_version_show, NULL); +static CLASS_DEVICE_ATTR(optrom_efi_version, S_IRUGO, + qla2x00_optrom_efi_version_show, NULL); +static CLASS_DEVICE_ATTR(optrom_fcode_version, S_IRUGO, + qla2x00_optrom_fcode_version_show, NULL); +static CLASS_DEVICE_ATTR(optrom_fw_version, S_IRUGO, + qla2x00_optrom_fw_version_show, NULL); struct class_device_attribute *qla2x00_host_attrs[] = { &class_device_attr_driver_version, @@ -683,6 +730,10 @@ struct class_device_attribute *qla2x00_host_attrs[] = { &class_device_attr_zio, &class_device_attr_zio_timer, &class_device_attr_beacon, + &class_device_attr_optrom_bios_version, + &class_device_attr_optrom_efi_version, + &class_device_attr_optrom_fcode_version, + &class_device_attr_optrom_fw_version, NULL, }; @@ -836,21 +887,24 @@ qla2x00_get_fc_host_stats(struct Scsi_Host *shost) link_stat_t stat_buf; struct fc_host_statistics *pfc_host_stat; + rval = QLA_FUNCTION_FAILED; pfc_host_stat = &ha->fc_host_stat; memset(pfc_host_stat, -1, sizeof(struct fc_host_statistics)); if (IS_QLA24XX(ha) || IS_QLA54XX(ha)) { rval = qla24xx_get_isp_stats(ha, (uint32_t *)&stat_buf, sizeof(stat_buf) / 4, mb_stat); - } else { + } else if (atomic_read(&ha->loop_state) == LOOP_READY && + !test_bit(ABORT_ISP_ACTIVE, &ha->dpc_flags) && + !test_bit(ISP_ABORT_NEEDED, &ha->dpc_flags) && + !ha->dpc_active) { + /* Must be in a 'READY' state for statistics retrieval. */ rval = qla2x00_get_link_status(ha, ha->loop_id, &stat_buf, mb_stat); } - if (rval != 0) { - qla_printk(KERN_WARNING, ha, - "Unable to retrieve host statistics (%d).\n", mb_stat[0]); - return pfc_host_stat; - } + + if (rval != QLA_SUCCESS) + goto done; pfc_host_stat->link_failure_count = stat_buf.link_fail_cnt; pfc_host_stat->loss_of_sync_count = stat_buf.loss_sync_cnt; @@ -858,7 +912,7 @@ qla2x00_get_fc_host_stats(struct Scsi_Host *shost) pfc_host_stat->prim_seq_protocol_err_count = stat_buf.prim_seq_err_cnt; pfc_host_stat->invalid_tx_word_count = stat_buf.inval_xmit_word_cnt; pfc_host_stat->invalid_crc_count = stat_buf.inval_crc_cnt; - +done: return pfc_host_stat; } diff --git a/drivers/scsi/qla2xxx/qla_def.h b/drivers/scsi/qla2xxx/qla_def.h index 2c10130d9e03..05f4f2a378eb 100644 --- a/drivers/scsi/qla2xxx/qla_def.h +++ b/drivers/scsi/qla2xxx/qla_def.h @@ -2045,6 +2045,29 @@ struct isp_operations { uint32_t, uint32_t); int (*write_optrom) (struct scsi_qla_host *, uint8_t *, uint32_t, uint32_t); + + int (*get_flash_version) (struct scsi_qla_host *, void *); +}; + +/* MSI-X Support *************************************************************/ + +#define QLA_MSIX_CHIP_REV_24XX 3 +#define QLA_MSIX_FW_MODE(m) (((m) & (BIT_7|BIT_8|BIT_9)) >> 7) +#define QLA_MSIX_FW_MODE_1(m) (QLA_MSIX_FW_MODE(m) == 1) + +#define QLA_MSIX_DEFAULT 0x00 +#define QLA_MSIX_RSP_Q 0x01 + +#define QLA_MSIX_ENTRIES 2 +#define QLA_MIDX_DEFAULT 0 +#define QLA_MIDX_RSP_Q 1 + +struct scsi_qla_host; + +struct qla_msix_entry { + int have_irq; + uint16_t msix_vector; + uint16_t msix_entry; }; /* @@ -2077,6 +2100,7 @@ typedef struct scsi_qla_host { uint32_t enable_lip_full_login :1; uint32_t enable_target_reset :1; uint32_t enable_led_scheme :1; + uint32_t inta_enabled :1; uint32_t msi_enabled :1; uint32_t msix_enabled :1; uint32_t disable_serdes :1; @@ -2316,8 +2340,6 @@ typedef struct scsi_qla_host { #define MBX_INTR_WAIT 2 #define MBX_UPDATE_FLASH_ACTIVE 3 - spinlock_t mbx_reg_lock; /* Mbx Cmd Register Lock */ - struct semaphore mbx_cmd_sem; /* Serialialize mbx access */ struct semaphore mbx_intr_sem; /* Used for completion notification */ @@ -2358,6 +2380,7 @@ typedef struct scsi_qla_host { uint8_t host_str[16]; uint32_t pci_attr; + uint16_t chip_revision; uint16_t product_id[4]; @@ -2379,6 +2402,15 @@ typedef struct scsi_qla_host { #define QLA_SREADING 1 #define QLA_SWRITING 2 + /* PCI expansion ROM image information. */ +#define ROM_CODE_TYPE_BIOS 0 +#define ROM_CODE_TYPE_FCODE 1 +#define ROM_CODE_TYPE_EFI 3 + uint8_t bios_revision[2]; + uint8_t efi_revision[2]; + uint8_t fcode_revision[16]; + uint32_t fw_revision[4]; + /* Needed for BEACON */ uint16_t beacon_blink_led; uint8_t beacon_color_state; @@ -2391,6 +2423,8 @@ typedef struct scsi_qla_host { uint16_t zio_mode; uint16_t zio_timer; struct fc_host_statistics fc_host_stat; + + struct qla_msix_entry msix_entries[QLA_MSIX_ENTRIES]; } scsi_qla_host_t; diff --git a/drivers/scsi/qla2xxx/qla_gbl.h b/drivers/scsi/qla2xxx/qla_gbl.h index e4dd12f4b80e..74544ae4b0e2 100644 --- a/drivers/scsi/qla2xxx/qla_gbl.h +++ b/drivers/scsi/qla2xxx/qla_gbl.h @@ -224,6 +224,9 @@ extern irqreturn_t qla24xx_intr_handler(int, void *); extern void qla2x00_process_response_queue(struct scsi_qla_host *); extern void qla24xx_process_response_queue(struct scsi_qla_host *); +extern int qla2x00_request_irqs(scsi_qla_host_t *); +extern void qla2x00_free_irqs(scsi_qla_host_t *); + /* * Global Function Prototypes in qla_sup.c source file. */ @@ -259,6 +262,9 @@ extern uint8_t *qla24xx_read_optrom_data(struct scsi_qla_host *, uint8_t *, extern int qla24xx_write_optrom_data(struct scsi_qla_host *, uint8_t *, uint32_t, uint32_t); +extern int qla2x00_get_flash_version(scsi_qla_host_t *, void *); +extern int qla24xx_get_flash_version(scsi_qla_host_t *, void *); + /* * Global Function Prototypes in qla_dbg.c source file. */ diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c index b3dac26ddba3..98c01cd5e1a8 100644 --- a/drivers/scsi/qla2xxx/qla_init.c +++ b/drivers/scsi/qla2xxx/qla_init.c @@ -65,7 +65,7 @@ qla2x00_initialize_adapter(scsi_qla_host_t *ha) ha->flags.reset_active = 0; atomic_set(&ha->loop_down_timer, LOOP_DOWN_TIME); atomic_set(&ha->loop_state, LOOP_DOWN); - ha->device_flags = 0; + ha->device_flags = DFLG_NO_CABLE; ha->dpc_flags = 0; ha->flags.management_server_logged_in = 0; ha->marker_needed = 0; @@ -77,16 +77,23 @@ qla2x00_initialize_adapter(scsi_qla_host_t *ha) qla_printk(KERN_INFO, ha, "Configuring PCI space...\n"); rval = ha->isp_ops.pci_config(ha); if (rval) { - DEBUG2(printk("scsi(%ld): Unable to configure PCI space=n", + DEBUG2(printk("scsi(%ld): Unable to configure PCI space.\n", ha->host_no)); return (rval); } ha->isp_ops.reset_chip(ha); + ha->isp_ops.get_flash_version(ha, ha->request_ring); + qla_printk(KERN_INFO, ha, "Configure NVRAM parameters...\n"); - ha->isp_ops.nvram_config(ha); + rval = ha->isp_ops.nvram_config(ha); + if (rval) { + DEBUG2(printk("scsi(%ld): Unable to verify NVRAM data.\n", + ha->host_no)); + return rval; + } if (ha->flags.disable_serdes) { /* Mask HBA via NVRAM settings? */ @@ -293,6 +300,8 @@ qla24xx_pci_config(scsi_qla_host_t *ha) d &= ~PCI_ROM_ADDRESS_ENABLE; pci_write_config_dword(ha->pdev, PCI_ROM_ADDRESS, d); + pci_read_config_word(ha->pdev, PCI_REVISION_ID, &ha->chip_revision); + /* Get PCI bus information. */ spin_lock_irqsave(&ha->hardware_lock, flags); ha->pci_attr = RD_REG_DWORD(®->ctrl_status); @@ -1351,6 +1360,39 @@ qla2x00_configure_hba(scsi_qla_host_t *ha) return(rval); } +static inline void +qla2x00_set_model_info(scsi_qla_host_t *ha, uint8_t *model, size_t len, char *def) +{ + char *st, *en; + uint16_t index; + + if (memcmp(model, BINZERO, len) != 0) { + strncpy(ha->model_number, model, len); + st = en = ha->model_number; + en += len - 1; + while (en > st) { + if (*en != 0x20 && *en != 0x00) + break; + *en-- = '\0'; + } + + index = (ha->pdev->subsystem_device & 0xff); + if (ha->pdev->subsystem_vendor == PCI_VENDOR_ID_QLOGIC && + index < QLA_MODEL_NAMES) + ha->model_desc = qla2x00_model_name[index * 2 + 1]; + } else { + index = (ha->pdev->subsystem_device & 0xff); + if (ha->pdev->subsystem_vendor == PCI_VENDOR_ID_QLOGIC && + index < QLA_MODEL_NAMES) { + strcpy(ha->model_number, + qla2x00_model_name[index * 2]); + ha->model_desc = qla2x00_model_name[index * 2 + 1]; + } else { + strcpy(ha->model_number, def); + } + } +} + /* * NVRAM configuration for ISP 2xxx * @@ -1367,7 +1409,6 @@ qla2x00_configure_hba(scsi_qla_host_t *ha) int qla2x00_nvram_config(scsi_qla_host_t *ha) { - int rval; uint8_t chksum = 0; uint16_t cnt; uint8_t *dptr1, *dptr2; @@ -1376,8 +1417,6 @@ qla2x00_nvram_config(scsi_qla_host_t *ha) uint8_t *ptr = (uint8_t *)ha->request_ring; struct device_reg_2xxx __iomem *reg = &ha->iobase->isp; - rval = QLA_SUCCESS; - /* Determine NVRAM starting address. */ ha->nvram_size = sizeof(nvram_t); ha->nvram_base = 0; @@ -1401,55 +1440,7 @@ qla2x00_nvram_config(scsi_qla_host_t *ha) qla_printk(KERN_WARNING, ha, "Inconsistent NVRAM detected: " "checksum=0x%x id=%c version=0x%x.\n", chksum, nv->id[0], nv->nvram_version); - qla_printk(KERN_WARNING, ha, "Falling back to functioning (yet " - "invalid -- WWPN) defaults.\n"); - - /* - * Set default initialization control block. - */ - memset(nv, 0, ha->nvram_size); - nv->parameter_block_version = ICB_VERSION; - - if (IS_QLA23XX(ha)) { - nv->firmware_options[0] = BIT_2 | BIT_1; - nv->firmware_options[1] = BIT_7 | BIT_5; - nv->add_firmware_options[0] = BIT_5; - nv->add_firmware_options[1] = BIT_5 | BIT_4; - nv->frame_payload_size = __constant_cpu_to_le16(2048); - nv->special_options[1] = BIT_7; - } else if (IS_QLA2200(ha)) { - nv->firmware_options[0] = BIT_2 | BIT_1; - nv->firmware_options[1] = BIT_7 | BIT_5; - nv->add_firmware_options[0] = BIT_5; - nv->add_firmware_options[1] = BIT_5 | BIT_4; - nv->frame_payload_size = __constant_cpu_to_le16(1024); - } else if (IS_QLA2100(ha)) { - nv->firmware_options[0] = BIT_3 | BIT_1; - nv->firmware_options[1] = BIT_5; - nv->frame_payload_size = __constant_cpu_to_le16(1024); - } - - nv->max_iocb_allocation = __constant_cpu_to_le16(256); - nv->execution_throttle = __constant_cpu_to_le16(16); - nv->retry_count = 8; - nv->retry_delay = 1; - - nv->port_name[0] = 33; - nv->port_name[3] = 224; - nv->port_name[4] = 139; - - nv->login_timeout = 4; - - /* - * Set default host adapter parameters - */ - nv->host_p[1] = BIT_2; - nv->reset_delay = 5; - nv->port_down_retry_count = 8; - nv->max_luns_per_target = __constant_cpu_to_le16(8); - nv->link_down_timeout = 60; - - rval = 1; + return QLA_FUNCTION_FAILED; } #if defined(CONFIG_IA64_GENERIC) || defined(CONFIG_IA64_SGI_SN2) @@ -1489,33 +1480,8 @@ qla2x00_nvram_config(scsi_qla_host_t *ha) strcpy(ha->model_number, "QLA2300"); } } else { - if (rval == 0 && - memcmp(nv->model_number, BINZERO, - sizeof(nv->model_number)) != 0) { - char *st, *en; - - strncpy(ha->model_number, nv->model_number, - sizeof(nv->model_number)); - st = en = ha->model_number; - en += sizeof(nv->model_number) - 1; - while (en > st) { - if (*en != 0x20 && *en != 0x00) - break; - *en-- = '\0'; - } - } else { - uint16_t index; - - index = (ha->pdev->subsystem_device & 0xff); - if (index < QLA_MODEL_NAMES) { - strcpy(ha->model_number, - qla2x00_model_name[index * 2]); - ha->model_desc = - qla2x00_model_name[index * 2 + 1]; - } else { - strcpy(ha->model_number, "QLA23xx"); - } - } + qla2x00_set_model_info(ha, nv->model_number, + sizeof(nv->model_number), "QLA23xx"); } } else if (IS_QLA2200(ha)) { nv->firmware_options[0] |= BIT_2; @@ -1687,11 +1653,7 @@ qla2x00_nvram_config(scsi_qla_host_t *ha) } } - if (rval) { - DEBUG2_3(printk(KERN_WARNING - "scsi(%ld): NVRAM configuration failed!\n", ha->host_no)); - } - return (rval); + return QLA_SUCCESS; } static void @@ -3107,7 +3069,11 @@ qla2x00_abort_isp(scsi_qla_host_t *ha) } spin_unlock_irqrestore(&ha->hardware_lock, flags); - ha->isp_ops.nvram_config(ha); + ha->isp_ops.get_flash_version(ha, ha->request_ring); + + rval = ha->isp_ops.nvram_config(ha); + if (rval) + goto isp_abort_retry; if (!qla2x00_restart_isp(ha)) { clear_bit(RESET_MARKER_NEEDED, &ha->dpc_flags); @@ -3137,6 +3103,7 @@ qla2x00_abort_isp(scsi_qla_host_t *ha) } } } else { /* failed the ISP abort */ +isp_abort_retry: ha->flags.online = 1; if (test_bit(ISP_ABORT_RETRY, &ha->dpc_flags)) { if (ha->isp_abort_cnt == 0) { @@ -3326,7 +3293,6 @@ qla24xx_reset_adapter(scsi_qla_host_t *ha) int qla24xx_nvram_config(scsi_qla_host_t *ha) { - int rval; struct init_cb_24xx *icb; struct nvram_24xx *nv; uint32_t *dptr; @@ -3334,7 +3300,6 @@ qla24xx_nvram_config(scsi_qla_host_t *ha) uint32_t chksum; uint16_t cnt; - rval = QLA_SUCCESS; icb = (struct init_cb_24xx *)ha->init_cb; nv = (struct nvram_24xx *)ha->request_ring; @@ -3367,51 +3332,7 @@ qla24xx_nvram_config(scsi_qla_host_t *ha) qla_printk(KERN_WARNING, ha, "Inconsistent NVRAM detected: " "checksum=0x%x id=%c version=0x%x.\n", chksum, nv->id[0], le16_to_cpu(nv->nvram_version)); - qla_printk(KERN_WARNING, ha, "Falling back to functioning (yet " - "invalid -- WWPN) defaults.\n"); - - /* - * Set default initialization control block. - */ - memset(nv, 0, ha->nvram_size); - nv->nvram_version = __constant_cpu_to_le16(ICB_VERSION); - nv->version = __constant_cpu_to_le16(ICB_VERSION); - nv->frame_payload_size = __constant_cpu_to_le16(2048); - nv->execution_throttle = __constant_cpu_to_le16(0xFFFF); - nv->exchange_count = __constant_cpu_to_le16(0); - nv->hard_address = __constant_cpu_to_le16(124); - nv->port_name[0] = 0x21; - nv->port_name[1] = 0x00 + PCI_FUNC(ha->pdev->devfn); - nv->port_name[2] = 0x00; - nv->port_name[3] = 0xe0; - nv->port_name[4] = 0x8b; - nv->port_name[5] = 0x1c; - nv->port_name[6] = 0x55; - nv->port_name[7] = 0x86; - nv->node_name[0] = 0x20; - nv->node_name[1] = 0x00; - nv->node_name[2] = 0x00; - nv->node_name[3] = 0xe0; - nv->node_name[4] = 0x8b; - nv->node_name[5] = 0x1c; - nv->node_name[6] = 0x55; - nv->node_name[7] = 0x86; - nv->login_retry_count = __constant_cpu_to_le16(8); - nv->interrupt_delay_timer = __constant_cpu_to_le16(0); - nv->login_timeout = __constant_cpu_to_le16(0); - nv->firmware_options_1 = - __constant_cpu_to_le32(BIT_14|BIT_13|BIT_2|BIT_1); - nv->firmware_options_2 = __constant_cpu_to_le32(2 << 4); - nv->firmware_options_2 |= __constant_cpu_to_le32(BIT_12); - nv->firmware_options_3 = __constant_cpu_to_le32(2 << 13); - nv->host_p = __constant_cpu_to_le32(BIT_11|BIT_10); - nv->efi_parameters = __constant_cpu_to_le32(0); - nv->reset_delay = 5; - nv->max_luns_per_target = __constant_cpu_to_le16(128); - nv->port_down_retry_count = __constant_cpu_to_le16(30); - nv->link_down_timeout = __constant_cpu_to_le16(30); - - rval = 1; + return QLA_FUNCTION_FAILED; } /* Reset Initialization control block */ @@ -3438,25 +3359,8 @@ qla24xx_nvram_config(scsi_qla_host_t *ha) /* * Setup driver NVRAM options. */ - if (memcmp(nv->model_name, BINZERO, sizeof(nv->model_name)) != 0) { - char *st, *en; - uint16_t index; - - strncpy(ha->model_number, nv->model_name, - sizeof(nv->model_name)); - st = en = ha->model_number; - en += sizeof(nv->model_name) - 1; - while (en > st) { - if (*en != 0x20 && *en != 0x00) - break; - *en-- = '\0'; - } - - index = (ha->pdev->subsystem_device & 0xff); - if (index < QLA_MODEL_NAMES) - ha->model_desc = qla2x00_model_name[index * 2 + 1]; - } else - strcpy(ha->model_number, "QLA2462"); + qla2x00_set_model_info(ha, nv->model_name, sizeof(nv->model_name), + "QLA2462"); /* Use alternate WWN? */ if (nv->host_p & __constant_cpu_to_le32(BIT_15)) { @@ -3575,11 +3479,7 @@ qla24xx_nvram_config(scsi_qla_host_t *ha) ha->flags.process_response_queue = 1; } - if (rval) { - DEBUG2_3(printk(KERN_WARNING - "scsi(%ld): NVRAM configuration failed!\n", ha->host_no)); - } - return (rval); + return QLA_SUCCESS; } static int diff --git a/drivers/scsi/qla2xxx/qla_isr.c b/drivers/scsi/qla2xxx/qla_isr.c index 39fd17b05be5..d4885616cd39 100644 --- a/drivers/scsi/qla2xxx/qla_isr.c +++ b/drivers/scsi/qla2xxx/qla_isr.c @@ -86,12 +86,8 @@ qla2100_intr_handler(int irq, void *dev_id) if (test_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags) && (status & MBX_INTERRUPT) && ha->flags.mbox_int) { - spin_lock_irqsave(&ha->mbx_reg_lock, flags); - set_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags); up(&ha->mbx_intr_sem); - - spin_unlock_irqrestore(&ha->mbx_reg_lock, flags); } return (IRQ_HANDLED); @@ -199,12 +195,8 @@ qla2300_intr_handler(int irq, void *dev_id) if (test_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags) && (status & MBX_INTERRUPT) && ha->flags.mbox_int) { - spin_lock_irqsave(&ha->mbx_reg_lock, flags); - set_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags); up(&ha->mbx_intr_sem); - - spin_unlock_irqrestore(&ha->mbx_reg_lock, flags); } return (IRQ_HANDLED); @@ -654,10 +646,8 @@ qla2x00_ramp_up_queue_depth(scsi_qla_host_t *ha, srb_t *sp) fcport->last_queue_full + ql2xqfullrampup * HZ)) return; - spin_unlock_irq(&ha->hardware_lock); starget_for_each_device(sdev->sdev_target, fcport, qla2x00_adjust_sdev_qdepth_up); - spin_lock_irq(&ha->hardware_lock); } /** @@ -927,10 +917,8 @@ qla2x00_status_entry(scsi_qla_host_t *ha, void *pkt) /* Adjust queue depth for all luns on the port. */ fcport->last_queue_full = jiffies; - spin_unlock_irq(&ha->hardware_lock); starget_for_each_device(cp->device->sdev_target, fcport, qla2x00_adjust_sdev_qdepth_down); - spin_lock_irq(&ha->hardware_lock); break; } if (lscsi_status != SS_CHECK_CONDITION) @@ -995,6 +983,22 @@ qla2x00_status_entry(scsi_qla_host_t *ha, void *pkt) if (lscsi_status != 0) { cp->result = DID_OK << 16 | lscsi_status; + if (lscsi_status == SAM_STAT_TASK_SET_FULL) { + DEBUG2(printk(KERN_INFO + "scsi(%ld): QUEUE FULL status detected " + "0x%x-0x%x.\n", ha->host_no, comp_status, + scsi_status)); + + /* + * Adjust queue depth for all luns on the + * port. + */ + fcport->last_queue_full = jiffies; + starget_for_each_device( + cp->device->sdev_target, fcport, + qla2x00_adjust_sdev_qdepth_down); + break; + } if (lscsi_status != SS_CHECK_CONDITION) break; @@ -1482,12 +1486,8 @@ qla24xx_intr_handler(int irq, void *dev_id) if (test_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags) && (status & MBX_INTERRUPT) && ha->flags.mbox_int) { - spin_lock_irqsave(&ha->mbx_reg_lock, flags); - set_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags); up(&ha->mbx_intr_sem); - - spin_unlock_irqrestore(&ha->mbx_reg_lock, flags); } return IRQ_HANDLED; @@ -1536,3 +1536,216 @@ qla24xx_ms_entry(scsi_qla_host_t *ha, struct ct_entry_24xx *pkt) qla2x00_sp_compl(ha, sp); } +static irqreturn_t +qla24xx_msix_rsp_q(int irq, void *dev_id) +{ + scsi_qla_host_t *ha; + struct device_reg_24xx __iomem *reg; + unsigned long flags; + + ha = dev_id; + reg = &ha->iobase->isp24; + + spin_lock_irqsave(&ha->hardware_lock, flags); + + qla24xx_process_response_queue(ha); + + WRT_REG_DWORD(®->hccr, HCCRX_CLR_RISC_INT); + RD_REG_DWORD_RELAXED(®->hccr); + + spin_unlock_irqrestore(&ha->hardware_lock, flags); + + return IRQ_HANDLED; +} + +static irqreturn_t +qla24xx_msix_default(int irq, void *dev_id) +{ + scsi_qla_host_t *ha; + struct device_reg_24xx __iomem *reg; + int status; + unsigned long flags; + unsigned long iter; + uint32_t stat; + uint32_t hccr; + uint16_t mb[4]; + + ha = dev_id; + reg = &ha->iobase->isp24; + status = 0; + + spin_lock_irqsave(&ha->hardware_lock, flags); + for (iter = 50; iter--; ) { + stat = RD_REG_DWORD(®->host_status); + if (stat & HSRX_RISC_PAUSED) { + hccr = RD_REG_DWORD(®->hccr); + + qla_printk(KERN_INFO, ha, "RISC paused -- HCCR=%x, " + "Dumping firmware!\n", hccr); + ha->isp_ops.fw_dump(ha, 1); + set_bit(ISP_ABORT_NEEDED, &ha->dpc_flags); + break; + } else if ((stat & HSRX_RISC_INT) == 0) + break; + + switch (stat & 0xff) { + case 0x1: + case 0x2: + case 0x10: + case 0x11: + qla24xx_mbx_completion(ha, MSW(stat)); + status |= MBX_INTERRUPT; + + break; + case 0x12: + mb[0] = MSW(stat); + mb[1] = RD_REG_WORD(®->mailbox1); + mb[2] = RD_REG_WORD(®->mailbox2); + mb[3] = RD_REG_WORD(®->mailbox3); + qla2x00_async_event(ha, mb); + break; + case 0x13: + qla24xx_process_response_queue(ha); + break; + default: + DEBUG2(printk("scsi(%ld): Unrecognized interrupt type " + "(%d).\n", + ha->host_no, stat & 0xff)); + break; + } + WRT_REG_DWORD(®->hccr, HCCRX_CLR_RISC_INT); + RD_REG_DWORD_RELAXED(®->hccr); + } + spin_unlock_irqrestore(&ha->hardware_lock, flags); + + if (test_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags) && + (status & MBX_INTERRUPT) && ha->flags.mbox_int) { + set_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags); + up(&ha->mbx_intr_sem); + } + + return IRQ_HANDLED; +} + +/* Interrupt handling helpers. */ + +struct qla_init_msix_entry { + uint16_t entry; + uint16_t index; + const char *name; + irqreturn_t (*handler)(int, void *); +}; + +static struct qla_init_msix_entry imsix_entries[QLA_MSIX_ENTRIES] = { + { QLA_MSIX_DEFAULT, QLA_MIDX_DEFAULT, + "qla2xxx (default)", qla24xx_msix_default }, + + { QLA_MSIX_RSP_Q, QLA_MIDX_RSP_Q, + "qla2xxx (rsp_q)", qla24xx_msix_rsp_q }, +}; + +static void +qla24xx_disable_msix(scsi_qla_host_t *ha) +{ + int i; + struct qla_msix_entry *qentry; + + for (i = 0; i < QLA_MSIX_ENTRIES; i++) { + qentry = &ha->msix_entries[imsix_entries[i].index]; + if (qentry->have_irq) + free_irq(qentry->msix_vector, ha); + } + pci_disable_msix(ha->pdev); +} + +static int +qla24xx_enable_msix(scsi_qla_host_t *ha) +{ + int i, ret; + struct msix_entry entries[QLA_MSIX_ENTRIES]; + struct qla_msix_entry *qentry; + + for (i = 0; i < QLA_MSIX_ENTRIES; i++) + entries[i].entry = imsix_entries[i].entry; + + ret = pci_enable_msix(ha->pdev, entries, ARRAY_SIZE(entries)); + if (ret) { + qla_printk(KERN_WARNING, ha, + "MSI-X: Failed to enable support -- %d/%d\n", + QLA_MSIX_ENTRIES, ret); + goto msix_out; + } + ha->flags.msix_enabled = 1; + + for (i = 0; i < QLA_MSIX_ENTRIES; i++) { + qentry = &ha->msix_entries[imsix_entries[i].index]; + qentry->msix_vector = entries[i].vector; + qentry->msix_entry = entries[i].entry; + qentry->have_irq = 0; + ret = request_irq(qentry->msix_vector, + imsix_entries[i].handler, 0, imsix_entries[i].name, ha); + if (ret) { + qla_printk(KERN_WARNING, ha, + "MSI-X: Unable to register handler -- %x/%d.\n", + imsix_entries[i].index, ret); + qla24xx_disable_msix(ha); + goto msix_out; + } + qentry->have_irq = 1; + } + +msix_out: + return ret; +} + +int +qla2x00_request_irqs(scsi_qla_host_t *ha) +{ + int ret; + + /* If possible, enable MSI-X. */ + if (!IS_QLA2432(ha)) + goto skip_msix; + + if (ha->chip_revision < QLA_MSIX_CHIP_REV_24XX || + !QLA_MSIX_FW_MODE_1(ha->fw_attributes)) { + DEBUG2(qla_printk(KERN_WARNING, ha, + "MSI-X: Unsupported ISP2432 (0x%X, 0x%X).\n", + ha->chip_revision, ha->fw_attributes)); + + goto skip_msix; + } + + ret = qla24xx_enable_msix(ha); + if (!ret) { + DEBUG2(qla_printk(KERN_INFO, ha, + "MSI-X: Enabled (0x%X, 0x%X).\n", ha->chip_revision, + ha->fw_attributes)); + return ret; + } + qla_printk(KERN_WARNING, ha, + "MSI-X: Falling back-to INTa mode -- %d.\n", ret); +skip_msix: + ret = request_irq(ha->pdev->irq, ha->isp_ops.intr_handler, + IRQF_DISABLED|IRQF_SHARED, QLA2XXX_DRIVER_NAME, ha); + if (!ret) { + ha->flags.inta_enabled = 1; + ha->host->irq = ha->pdev->irq; + } else { + qla_printk(KERN_WARNING, ha, + "Failed to reserve interrupt %d already in use.\n", + ha->pdev->irq); + } + + return ret; +} + +void +qla2x00_free_irqs(scsi_qla_host_t *ha) +{ + + if (ha->flags.msix_enabled) + qla24xx_disable_msix(ha); + else if (ha->flags.inta_enabled) + free_irq(ha->host->irq, ha); +} diff --git a/drivers/scsi/qla2xxx/qla_mbx.c b/drivers/scsi/qla2xxx/qla_mbx.c index 077e5789beeb..83376f6ac3db 100644 --- a/drivers/scsi/qla2xxx/qla_mbx.c +++ b/drivers/scsi/qla2xxx/qla_mbx.c @@ -55,7 +55,6 @@ qla2x00_mailbox_command(scsi_qla_host_t *ha, mbx_cmd_t *mcp) uint16_t __iomem *optr; uint32_t cnt; uint32_t mboxes; - unsigned long mbx_flags = 0; unsigned long wait_time; rval = QLA_SUCCESS; @@ -81,10 +80,6 @@ qla2x00_mailbox_command(scsi_qla_host_t *ha, mbx_cmd_t *mcp) /* Save mailbox command for debug */ ha->mcp = mcp; - /* Try to get mailbox register access */ - if (!abort_active) - spin_lock_irqsave(&ha->mbx_reg_lock, mbx_flags); - DEBUG11(printk("scsi(%ld): prepare to issue mbox cmd=0x%x.\n", ha->host_no, mcp->mb[0])); @@ -161,9 +156,6 @@ qla2x00_mailbox_command(scsi_qla_host_t *ha, mbx_cmd_t *mcp) WRT_REG_WORD(®->isp.hccr, HCCR_SET_HOST_INT); spin_unlock_irqrestore(&ha->hardware_lock, flags); - if (!abort_active) - spin_unlock_irqrestore(&ha->mbx_reg_lock, mbx_flags); - /* Wait for either the timer to expire * or the mbox completion interrupt */ @@ -184,8 +176,6 @@ qla2x00_mailbox_command(scsi_qla_host_t *ha, mbx_cmd_t *mcp) else WRT_REG_WORD(®->isp.hccr, HCCR_SET_HOST_INT); spin_unlock_irqrestore(&ha->hardware_lock, flags); - if (!abort_active) - spin_unlock_irqrestore(&ha->mbx_reg_lock, mbx_flags); wait_time = jiffies + mcp->tov * HZ; /* wait at most tov secs */ while (!ha->flags.mbox_int) { @@ -201,9 +191,6 @@ qla2x00_mailbox_command(scsi_qla_host_t *ha, mbx_cmd_t *mcp) } /* while */ } - if (!abort_active) - spin_lock_irqsave(&ha->mbx_reg_lock, mbx_flags); - /* Check whether we timed out */ if (ha->flags.mbox_int) { uint16_t *iptr2; @@ -256,9 +243,6 @@ qla2x00_mailbox_command(scsi_qla_host_t *ha, mbx_cmd_t *mcp) rval = QLA_FUNCTION_TIMEOUT; } - if (!abort_active) - spin_unlock_irqrestore(&ha->mbx_reg_lock, mbx_flags); - ha->flags.mbox_busy = 0; /* Clean up */ @@ -1713,7 +1697,7 @@ qla24xx_fabric_logout(scsi_qla_host_t *ha, uint16_t loop_id, uint8_t domain, lg->entry_count = 1; lg->nport_handle = cpu_to_le16(loop_id); lg->control_flags = - __constant_cpu_to_le16(LCF_COMMAND_LOGO|LCF_EXPL_LOGO); + __constant_cpu_to_le16(LCF_COMMAND_LOGO|LCF_IMPL_LOGO); lg->port_id[0] = al_pa; lg->port_id[1] = area; lg->port_id[2] = domain; diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c index d6445ae841ba..68f5d24b938b 100644 --- a/drivers/scsi/qla2xxx/qla_os.c +++ b/drivers/scsi/qla2xxx/qla_os.c @@ -1485,6 +1485,7 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id) ha->isp_ops.fw_dump = qla2100_fw_dump; ha->isp_ops.read_optrom = qla2x00_read_optrom_data; ha->isp_ops.write_optrom = qla2x00_write_optrom_data; + ha->isp_ops.get_flash_version = qla2x00_get_flash_version; if (IS_QLA2100(ha)) { host->max_id = MAX_TARGETS_2100; ha->mbx_count = MAILBOX_REGISTER_COUNT_2100; @@ -1550,6 +1551,7 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id) ha->isp_ops.beacon_on = qla24xx_beacon_on; ha->isp_ops.beacon_off = qla24xx_beacon_off; ha->isp_ops.beacon_blink = qla24xx_beacon_blink; + ha->isp_ops.get_flash_version = qla24xx_get_flash_version; ha->gid_list_info_size = 8; ha->optrom_size = OPTROM_SIZE_24XX; } @@ -1564,14 +1566,6 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id) INIT_LIST_HEAD(&ha->list); INIT_LIST_HEAD(&ha->fcports); - /* - * These locks are used to prevent more than one CPU - * from modifying the queue at the same time. The - * higher level "host_lock" will reduce most - * contention for these locks. - */ - spin_lock_init(&ha->mbx_reg_lock); - qla2x00_config_dma_addressing(ha); if (qla2x00_mem_alloc(ha)) { qla_printk(KERN_WARNING, ha, @@ -1615,15 +1609,9 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id) host->max_lun = MAX_LUNS; host->transportt = qla2xxx_transport_template; - ret = request_irq(pdev->irq, ha->isp_ops.intr_handler, - IRQF_DISABLED|IRQF_SHARED, QLA2XXX_DRIVER_NAME, ha); - if (ret) { - qla_printk(KERN_WARNING, ha, - "Failed to reserve interrupt %d already in use.\n", - pdev->irq); + ret = qla2x00_request_irqs(ha); + if (ret) goto probe_failed; - } - host->irq = pdev->irq; /* Initialized the timer */ qla2x00_start_timer(ha, qla2x00_timer, WATCH_INTERVAL); @@ -1753,9 +1741,7 @@ qla2x00_free_device(scsi_qla_host_t *ha) qla2x00_mem_free(ha); - /* Detach interrupts */ - if (ha->host->irq) - free_irq(ha->host->irq, ha); + qla2x00_free_irqs(ha); /* release io space registers */ if (ha->iobase) diff --git a/drivers/scsi/qla2xxx/qla_sup.c b/drivers/scsi/qla2xxx/qla_sup.c index 15390ad87456..ff1dd4175a7f 100644 --- a/drivers/scsi/qla2xxx/qla_sup.c +++ b/drivers/scsi/qla2xxx/qla_sup.c @@ -611,7 +611,6 @@ qla24xx_write_flash_data(scsi_qla_host_t *ha, uint32_t *dwptr, uint32_t faddr, flash_conf_to_access_addr(0x0339), (fdata & 0xff00) | ((fdata << 16) & 0xff0000) | ((fdata >> 16) & 0xff)); - fdata = (faddr & sec_mask) << 2; ret = qla24xx_write_flash_dword(ha, conf_addr, (fdata & 0xff00) |((fdata << 16) & 0xff0000) | ((fdata >> 16) & 0xff)); @@ -1383,6 +1382,29 @@ qla2x00_get_flash_manufacturer(scsi_qla_host_t *ha, uint8_t *man_id, qla2x00_write_flash_byte(ha, 0x5555, 0xf0); } +static void +qla2x00_read_flash_data(scsi_qla_host_t *ha, uint8_t *tmp_buf, uint32_t saddr, + uint32_t length) +{ + struct device_reg_2xxx __iomem *reg = &ha->iobase->isp; + uint32_t midpoint, ilength; + uint8_t data; + + midpoint = length / 2; + + WRT_REG_WORD(®->nvram, 0); + RD_REG_WORD(®->nvram); + for (ilength = 0; ilength < length; saddr++, ilength++, tmp_buf++) { + if (ilength == midpoint) { + WRT_REG_WORD(®->nvram, NVR_SELECT); + RD_REG_WORD(®->nvram); + } + data = qla2x00_read_flash_byte(ha, saddr); + if (saddr % 100) + udelay(10); + *tmp_buf = data; + } +} static inline void qla2x00_suspend_hba(struct scsi_qla_host *ha) @@ -1722,3 +1744,327 @@ qla24xx_write_optrom_data(struct scsi_qla_host *ha, uint8_t *buf, return rval; } + +/** + * qla2x00_get_fcode_version() - Determine an FCODE image's version. + * @ha: HA context + * @pcids: Pointer to the FCODE PCI data structure + * + * The process of retrieving the FCODE version information is at best + * described as interesting. + * + * Within the first 100h bytes of the image an ASCII string is present + * which contains several pieces of information including the FCODE + * version. Unfortunately it seems the only reliable way to retrieve + * the version is by scanning for another sentinel within the string, + * the FCODE build date: + * + * ... 2.00.02 10/17/02 ... + * + * Returns QLA_SUCCESS on successful retrieval of version. + */ +static void +qla2x00_get_fcode_version(scsi_qla_host_t *ha, uint32_t pcids) +{ + int ret = QLA_FUNCTION_FAILED; + uint32_t istart, iend, iter, vend; + uint8_t do_next, rbyte, *vbyte; + + memset(ha->fcode_revision, 0, sizeof(ha->fcode_revision)); + + /* Skip the PCI data structure. */ + istart = pcids + + ((qla2x00_read_flash_byte(ha, pcids + 0x0B) << 8) | + qla2x00_read_flash_byte(ha, pcids + 0x0A)); + iend = istart + 0x100; + do { + /* Scan for the sentinel date string...eeewww. */ + do_next = 0; + iter = istart; + while ((iter < iend) && !do_next) { + iter++; + if (qla2x00_read_flash_byte(ha, iter) == '/') { + if (qla2x00_read_flash_byte(ha, iter + 2) == + '/') + do_next++; + else if (qla2x00_read_flash_byte(ha, + iter + 3) == '/') + do_next++; + } + } + if (!do_next) + break; + + /* Backtrack to previous ' ' (space). */ + do_next = 0; + while ((iter > istart) && !do_next) { + iter--; + if (qla2x00_read_flash_byte(ha, iter) == ' ') + do_next++; + } + if (!do_next) + break; + + /* + * Mark end of version tag, and find previous ' ' (space) or + * string length (recent FCODE images -- major hack ahead!!!). + */ + vend = iter - 1; + do_next = 0; + while ((iter > istart) && !do_next) { + iter--; + rbyte = qla2x00_read_flash_byte(ha, iter); + if (rbyte == ' ' || rbyte == 0xd || rbyte == 0x10) + do_next++; + } + if (!do_next) + break; + + /* Mark beginning of version tag, and copy data. */ + iter++; + if ((vend - iter) && + ((vend - iter) < sizeof(ha->fcode_revision))) { + vbyte = ha->fcode_revision; + while (iter <= vend) { + *vbyte++ = qla2x00_read_flash_byte(ha, iter); + iter++; + } + ret = QLA_SUCCESS; + } + } while (0); + + if (ret != QLA_SUCCESS) + memset(ha->fcode_revision, 0, sizeof(ha->fcode_revision)); +} + +int +qla2x00_get_flash_version(scsi_qla_host_t *ha, void *mbuf) +{ + int ret = QLA_SUCCESS; + uint8_t code_type, last_image; + uint32_t pcihdr, pcids; + uint8_t *dbyte; + uint16_t *dcode; + + if (!ha->pio_address || !mbuf) + return QLA_FUNCTION_FAILED; + + memset(ha->bios_revision, 0, sizeof(ha->bios_revision)); + memset(ha->efi_revision, 0, sizeof(ha->efi_revision)); + memset(ha->fcode_revision, 0, sizeof(ha->fcode_revision)); + memset(ha->fw_revision, 0, sizeof(ha->fw_revision)); + + qla2x00_flash_enable(ha); + + /* Begin with first PCI expansion ROM header. */ + pcihdr = 0; + last_image = 1; + do { + /* Verify PCI expansion ROM header. */ + if (qla2x00_read_flash_byte(ha, pcihdr) != 0x55 || + qla2x00_read_flash_byte(ha, pcihdr + 0x01) != 0xaa) { + /* No signature */ + DEBUG2(printk("scsi(%ld): No matching ROM " + "signature.\n", ha->host_no)); + ret = QLA_FUNCTION_FAILED; + break; + } + + /* Locate PCI data structure. */ + pcids = pcihdr + + ((qla2x00_read_flash_byte(ha, pcihdr + 0x19) << 8) | + qla2x00_read_flash_byte(ha, pcihdr + 0x18)); + + /* Validate signature of PCI data structure. */ + if (qla2x00_read_flash_byte(ha, pcids) != 'P' || + qla2x00_read_flash_byte(ha, pcids + 0x1) != 'C' || + qla2x00_read_flash_byte(ha, pcids + 0x2) != 'I' || + qla2x00_read_flash_byte(ha, pcids + 0x3) != 'R') { + /* Incorrect header. */ + DEBUG2(printk("%s(): PCI data struct not found " + "pcir_adr=%x.\n", __func__, pcids)); + ret = QLA_FUNCTION_FAILED; + break; + } + + /* Read version */ + code_type = qla2x00_read_flash_byte(ha, pcids + 0x14); + switch (code_type) { + case ROM_CODE_TYPE_BIOS: + /* Intel x86, PC-AT compatible. */ + ha->bios_revision[0] = + qla2x00_read_flash_byte(ha, pcids + 0x12); + ha->bios_revision[1] = + qla2x00_read_flash_byte(ha, pcids + 0x13); + DEBUG3(printk("%s(): read BIOS %d.%d.\n", __func__, + ha->bios_revision[1], ha->bios_revision[0])); + break; + case ROM_CODE_TYPE_FCODE: + /* Open Firmware standard for PCI (FCode). */ + /* Eeeewww... */ + qla2x00_get_fcode_version(ha, pcids); + break; + case ROM_CODE_TYPE_EFI: + /* Extensible Firmware Interface (EFI). */ + ha->efi_revision[0] = + qla2x00_read_flash_byte(ha, pcids + 0x12); + ha->efi_revision[1] = + qla2x00_read_flash_byte(ha, pcids + 0x13); + DEBUG3(printk("%s(): read EFI %d.%d.\n", __func__, + ha->efi_revision[1], ha->efi_revision[0])); + break; + default: + DEBUG2(printk("%s(): Unrecognized code type %x at " + "pcids %x.\n", __func__, code_type, pcids)); + break; + } + + last_image = qla2x00_read_flash_byte(ha, pcids + 0x15) & BIT_7; + + /* Locate next PCI expansion ROM. */ + pcihdr += ((qla2x00_read_flash_byte(ha, pcids + 0x11) << 8) | + qla2x00_read_flash_byte(ha, pcids + 0x10)) * 512; + } while (!last_image); + + if (IS_QLA2322(ha)) { + /* Read firmware image information. */ + memset(ha->fw_revision, 0, sizeof(ha->fw_revision)); + dbyte = mbuf; + memset(dbyte, 0, 8); + dcode = (uint16_t *)dbyte; + + qla2x00_read_flash_data(ha, dbyte, FA_RISC_CODE_ADDR * 4 + 10, + 8); + DEBUG3(printk("%s(%ld): dumping fw ver from flash:\n", + __func__, ha->host_no)); + DEBUG3(qla2x00_dump_buffer((uint8_t *)dbyte, 8)); + + if ((dcode[0] == 0xffff && dcode[1] == 0xffff && + dcode[2] == 0xffff && dcode[3] == 0xffff) || + (dcode[0] == 0 && dcode[1] == 0 && dcode[2] == 0 && + dcode[3] == 0)) { + DEBUG2(printk("%s(): Unrecognized fw revision at " + "%x.\n", __func__, FA_RISC_CODE_ADDR * 4)); + } else { + /* values are in big endian */ + ha->fw_revision[0] = dbyte[0] << 16 | dbyte[1]; + ha->fw_revision[1] = dbyte[2] << 16 | dbyte[3]; + ha->fw_revision[2] = dbyte[4] << 16 | dbyte[5]; + } + } + + qla2x00_flash_disable(ha); + + return ret; +} + +int +qla24xx_get_flash_version(scsi_qla_host_t *ha, void *mbuf) +{ + int ret = QLA_SUCCESS; + uint32_t pcihdr, pcids; + uint32_t *dcode; + uint8_t *bcode; + uint8_t code_type, last_image; + int i; + + if (!mbuf) + return QLA_FUNCTION_FAILED; + + memset(ha->bios_revision, 0, sizeof(ha->bios_revision)); + memset(ha->efi_revision, 0, sizeof(ha->efi_revision)); + memset(ha->fcode_revision, 0, sizeof(ha->fcode_revision)); + memset(ha->fw_revision, 0, sizeof(ha->fw_revision)); + + dcode = mbuf; + + /* Begin with first PCI expansion ROM header. */ + pcihdr = 0; + last_image = 1; + do { + /* Verify PCI expansion ROM header. */ + qla24xx_read_flash_data(ha, dcode, pcihdr >> 2, 0x20); + bcode = mbuf + (pcihdr % 4); + if (bcode[0x0] != 0x55 || bcode[0x1] != 0xaa) { + /* No signature */ + DEBUG2(printk("scsi(%ld): No matching ROM " + "signature.\n", ha->host_no)); + ret = QLA_FUNCTION_FAILED; + break; + } + + /* Locate PCI data structure. */ + pcids = pcihdr + ((bcode[0x19] << 8) | bcode[0x18]); + + qla24xx_read_flash_data(ha, dcode, pcids >> 2, 0x20); + bcode = mbuf + (pcihdr % 4); + + /* Validate signature of PCI data structure. */ + if (bcode[0x0] != 'P' || bcode[0x1] != 'C' || + bcode[0x2] != 'I' || bcode[0x3] != 'R') { + /* Incorrect header. */ + DEBUG2(printk("%s(): PCI data struct not found " + "pcir_adr=%x.\n", __func__, pcids)); + ret = QLA_FUNCTION_FAILED; + break; + } + + /* Read version */ + code_type = bcode[0x14]; + switch (code_type) { + case ROM_CODE_TYPE_BIOS: + /* Intel x86, PC-AT compatible. */ + ha->bios_revision[0] = bcode[0x12]; + ha->bios_revision[1] = bcode[0x13]; + DEBUG3(printk("%s(): read BIOS %d.%d.\n", __func__, + ha->bios_revision[1], ha->bios_revision[0])); + break; + case ROM_CODE_TYPE_FCODE: + /* Open Firmware standard for PCI (FCode). */ + ha->fcode_revision[0] = bcode[0x12]; + ha->fcode_revision[1] = bcode[0x13]; + DEBUG3(printk("%s(): read FCODE %d.%d.\n", __func__, + ha->fcode_revision[1], ha->fcode_revision[0])); + break; + case ROM_CODE_TYPE_EFI: + /* Extensible Firmware Interface (EFI). */ + ha->efi_revision[0] = bcode[0x12]; + ha->efi_revision[1] = bcode[0x13]; + DEBUG3(printk("%s(): read EFI %d.%d.\n", __func__, + ha->efi_revision[1], ha->efi_revision[0])); + break; + default: + DEBUG2(printk("%s(): Unrecognized code type %x at " + "pcids %x.\n", __func__, code_type, pcids)); + break; + } + + last_image = bcode[0x15] & BIT_7; + + /* Locate next PCI expansion ROM. */ + pcihdr += ((bcode[0x11] << 8) | bcode[0x10]) * 512; + } while (!last_image); + + /* Read firmware image information. */ + memset(ha->fw_revision, 0, sizeof(ha->fw_revision)); + dcode = mbuf; + + qla24xx_read_flash_data(ha, dcode, FA_RISC_CODE_ADDR + 4, 4); + for (i = 0; i < 4; i++) + dcode[i] = be32_to_cpu(dcode[i]); + + if ((dcode[0] == 0xffffffff && dcode[1] == 0xffffffff && + dcode[2] == 0xffffffff && dcode[3] == 0xffffffff) || + (dcode[0] == 0 && dcode[1] == 0 && dcode[2] == 0 && + dcode[3] == 0)) { + DEBUG2(printk("%s(): Unrecognized fw version at %x.\n", + __func__, FA_RISC_CODE_ADDR)); + } else { + ha->fw_revision[0] = dcode[0]; + ha->fw_revision[1] = dcode[1]; + ha->fw_revision[2] = dcode[2]; + ha->fw_revision[3] = dcode[3]; + } + + return ret; +} diff --git a/drivers/scsi/scsi.c b/drivers/scsi/scsi.c index 24cffd98ee63..f33e2eb9f1b9 100644 --- a/drivers/scsi/scsi.c +++ b/drivers/scsi/scsi.c @@ -673,27 +673,6 @@ void __scsi_done(struct scsi_cmnd *cmd) } /* - * Function: scsi_retry_command - * - * Purpose: Send a command back to the low level to be retried. - * - * Notes: This command is always executed in the context of the - * bottom half handler, or the error handler thread. Low - * level drivers should not become re-entrant as a result of - * this. - */ -int scsi_retry_command(struct scsi_cmnd *cmd) -{ - /* - * Zero the sense information from the last time we tried - * this command. - */ - memset(cmd->sense_buffer, 0, sizeof(cmd->sense_buffer)); - - return scsi_queue_insert(cmd, SCSI_MLQUEUE_EH_RETRY); -} - -/* * Function: scsi_finish_command * * Purpose: Pass command off to upper layer for finishing of I/O diff --git a/drivers/scsi/scsi_debug.c b/drivers/scsi/scsi_debug.c index 30ee3d72c021..5adbbeedec38 100644 --- a/drivers/scsi/scsi_debug.c +++ b/drivers/scsi/scsi_debug.c @@ -51,10 +51,10 @@ #include "scsi_logging.h" #include "scsi_debug.h" -#define SCSI_DEBUG_VERSION "1.80" -static const char * scsi_debug_version_date = "20061018"; +#define SCSI_DEBUG_VERSION "1.81" +static const char * scsi_debug_version_date = "20070104"; -/* Additional Sense Code (ASC) used */ +/* Additional Sense Code (ASC) */ #define NO_ADDITIONAL_SENSE 0x0 #define LOGICAL_UNIT_NOT_READY 0x4 #define UNRECOVERED_READ_ERR 0x11 @@ -65,9 +65,13 @@ static const char * scsi_debug_version_date = "20061018"; #define INVALID_FIELD_IN_PARAM_LIST 0x26 #define POWERON_RESET 0x29 #define SAVING_PARAMS_UNSUP 0x39 +#define TRANSPORT_PROBLEM 0x4b #define THRESHOLD_EXCEEDED 0x5d #define LOW_POWER_COND_ON 0x5e +/* Additional Sense Code Qualifier (ASCQ) */ +#define ACK_NAK_TO 0x3 + #define SDEBUG_TAGGED_QUEUING 0 /* 0 | MSG_SIMPLE_TAG | MSG_ORDERED_TAG */ /* Default values for driver parameters */ @@ -95,15 +99,20 @@ static const char * scsi_debug_version_date = "20061018"; #define SCSI_DEBUG_OPT_MEDIUM_ERR 2 #define SCSI_DEBUG_OPT_TIMEOUT 4 #define SCSI_DEBUG_OPT_RECOVERED_ERR 8 +#define SCSI_DEBUG_OPT_TRANSPORT_ERR 16 /* When "every_nth" > 0 then modulo "every_nth" commands: * - a no response is simulated if SCSI_DEBUG_OPT_TIMEOUT is set * - a RECOVERED_ERROR is simulated on successful read and write * commands if SCSI_DEBUG_OPT_RECOVERED_ERR is set. + * - a TRANSPORT_ERROR is simulated on successful read and write + * commands if SCSI_DEBUG_OPT_TRANSPORT_ERR is set. * * When "every_nth" < 0 then after "- every_nth" commands: * - a no response is simulated if SCSI_DEBUG_OPT_TIMEOUT is set * - a RECOVERED_ERROR is simulated on successful read and write * commands if SCSI_DEBUG_OPT_RECOVERED_ERR is set. + * - a TRANSPORT_ERROR is simulated on successful read and write + * commands if SCSI_DEBUG_OPT_TRANSPORT_ERR is set. * This will continue until some other action occurs (e.g. the user * writing a new value (other than -1 or 1) to every_nth via sysfs). */ @@ -315,6 +324,7 @@ int scsi_debug_queuecommand(struct scsi_cmnd * SCpnt, done_funct_t done) int target = SCpnt->device->id; struct sdebug_dev_info * devip = NULL; int inj_recovered = 0; + int inj_transport = 0; int delay_override = 0; if (done == NULL) @@ -352,6 +362,8 @@ int scsi_debug_queuecommand(struct scsi_cmnd * SCpnt, done_funct_t done) return 0; /* ignore command causing timeout */ else if (SCSI_DEBUG_OPT_RECOVERED_ERR & scsi_debug_opts) inj_recovered = 1; /* to reads and writes below */ + else if (SCSI_DEBUG_OPT_TRANSPORT_ERR & scsi_debug_opts) + inj_transport = 1; /* to reads and writes below */ } if (devip->wlun) { @@ -468,7 +480,11 @@ int scsi_debug_queuecommand(struct scsi_cmnd * SCpnt, done_funct_t done) mk_sense_buffer(devip, RECOVERED_ERROR, THRESHOLD_EXCEEDED, 0); errsts = check_condition_result; - } + } else if (inj_transport && (0 == errsts)) { + mk_sense_buffer(devip, ABORTED_COMMAND, + TRANSPORT_PROBLEM, ACK_NAK_TO); + errsts = check_condition_result; + } break; case REPORT_LUNS: /* mandatory, ignore unit attention */ delay_override = 1; @@ -531,6 +547,9 @@ int scsi_debug_queuecommand(struct scsi_cmnd * SCpnt, done_funct_t done) delay_override = 1; errsts = check_readiness(SCpnt, 0, devip); break; + case WRITE_BUFFER: + errsts = check_readiness(SCpnt, 1, devip); + break; default: if (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts) printk(KERN_INFO "scsi_debug: Opcode: 0x%x not " @@ -954,7 +973,9 @@ static int resp_inquiry(struct scsi_cmnd * scp, int target, int alloc_len, n, ret; alloc_len = (cmd[3] << 8) + cmd[4]; - arr = kzalloc(SDEBUG_MAX_INQ_ARR_SZ, GFP_KERNEL); + arr = kzalloc(SDEBUG_MAX_INQ_ARR_SZ, GFP_ATOMIC); + if (! arr) + return DID_REQUEUE << 16; if (devip->wlun) pq_pdt = 0x1e; /* present, wlun */ else if (scsi_debug_no_lun_0 && (0 == devip->lun)) @@ -1217,7 +1238,9 @@ static int resp_report_tgtpgs(struct scsi_cmnd * scp, alen = ((cmd[6] << 24) + (cmd[7] << 16) + (cmd[8] << 8) + cmd[9]); - arr = kzalloc(SDEBUG_MAX_TGTPGS_ARR_SZ, GFP_KERNEL); + arr = kzalloc(SDEBUG_MAX_TGTPGS_ARR_SZ, GFP_ATOMIC); + if (! arr) + return DID_REQUEUE << 16; /* * EVPD page 0x88 states we have two ports, one * real and a fake port with no device connected. @@ -1996,6 +2019,8 @@ static int scsi_debug_slave_configure(struct scsi_device * sdp) if (sdp->host->max_cmd_len != SCSI_DEBUG_MAX_CMD_LEN) sdp->host->max_cmd_len = SCSI_DEBUG_MAX_CMD_LEN; devip = devInfoReg(sdp); + if (NULL == devip) + return 1; /* no resources, will be marked offline */ sdp->hostdata = devip; if (sdp->host->cmd_per_lun) scsi_adjust_queue_depth(sdp, SDEBUG_TAGGED_QUEUING, @@ -2044,7 +2069,7 @@ static struct sdebug_dev_info * devInfoReg(struct scsi_device * sdev) } } if (NULL == open_devip) { /* try and make a new one */ - open_devip = kzalloc(sizeof(*open_devip),GFP_KERNEL); + open_devip = kzalloc(sizeof(*open_devip),GFP_ATOMIC); if (NULL == open_devip) { printk(KERN_ERR "%s: out of memory at line %d\n", __FUNCTION__, __LINE__); @@ -2388,7 +2413,7 @@ MODULE_PARM_DESC(max_luns, "number of LUNs per target to simulate(def=1)"); MODULE_PARM_DESC(no_lun_0, "no LU number 0 (def=0 -> have lun 0)"); MODULE_PARM_DESC(num_parts, "number of partitions(def=0)"); MODULE_PARM_DESC(num_tgts, "number of targets per host to simulate(def=1)"); -MODULE_PARM_DESC(opts, "1->noise, 2->medium_error, 4->... (def=0)"); +MODULE_PARM_DESC(opts, "1->noise, 2->medium_err, 4->timeout, 8->recovered_err... (def=0)"); MODULE_PARM_DESC(ptype, "SCSI peripheral type(def=0[disk])"); MODULE_PARM_DESC(scsi_level, "SCSI level to simulate(def=5[SPC-3])"); MODULE_PARM_DESC(virtual_gb, "virtual gigabyte size (def=0 -> use dev_size_mb)"); @@ -2943,7 +2968,6 @@ static int sdebug_add_adapter(void) struct list_head *lh, *lh_sf; sdbg_host = kzalloc(sizeof(*sdbg_host),GFP_KERNEL); - if (NULL == sdbg_host) { printk(KERN_ERR "%s: out of memory at line %d\n", __FUNCTION__, __LINE__); diff --git a/drivers/scsi/scsi_error.c b/drivers/scsi/scsi_error.c index 2ecb6ff42444..8e5011d13a18 100644 --- a/drivers/scsi/scsi_error.c +++ b/drivers/scsi/scsi_error.c @@ -672,8 +672,8 @@ EXPORT_SYMBOL(scsi_eh_finish_cmd); * XXX: Long term this code should go away, but that needs an audit of * all LLDDs first. **/ -static int scsi_eh_get_sense(struct list_head *work_q, - struct list_head *done_q) +int scsi_eh_get_sense(struct list_head *work_q, + struct list_head *done_q) { struct scsi_cmnd *scmd, *next; int rtn; @@ -715,6 +715,7 @@ static int scsi_eh_get_sense(struct list_head *work_q, return list_empty(work_q); } +EXPORT_SYMBOL_GPL(scsi_eh_get_sense); /** * scsi_try_to_abort_cmd - Ask host to abort a running command. @@ -1411,9 +1412,9 @@ static void scsi_restart_operations(struct Scsi_Host *shost) * @eh_done_q: list_head for processed commands. * **/ -static void scsi_eh_ready_devs(struct Scsi_Host *shost, - struct list_head *work_q, - struct list_head *done_q) +void scsi_eh_ready_devs(struct Scsi_Host *shost, + struct list_head *work_q, + struct list_head *done_q) { if (!scsi_eh_stu(shost, work_q, done_q)) if (!scsi_eh_bus_device_reset(shost, work_q, done_q)) @@ -1421,6 +1422,7 @@ static void scsi_eh_ready_devs(struct Scsi_Host *shost, if (!scsi_eh_host_reset(work_q, done_q)) scsi_eh_offline_sdevs(work_q, done_q); } +EXPORT_SYMBOL_GPL(scsi_eh_ready_devs); /** * scsi_eh_flush_done_q - finish processed commands or retry them. diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c index f02f48a882a9..503f09c2f05f 100644 --- a/drivers/scsi/scsi_lib.c +++ b/drivers/scsi/scsi_lib.c @@ -1400,7 +1400,7 @@ static void scsi_softirq_done(struct request *rq) scsi_finish_command(cmd); break; case NEEDS_RETRY: - scsi_retry_command(cmd); + scsi_queue_insert(cmd, SCSI_MLQUEUE_EH_RETRY); break; case ADD_TO_MLQUEUE: scsi_queue_insert(cmd, SCSI_MLQUEUE_DEVICE_BUSY); diff --git a/drivers/scsi/scsi_priv.h b/drivers/scsi/scsi_priv.h index f458c2f686d2..ee8efe849bf4 100644 --- a/drivers/scsi/scsi_priv.h +++ b/drivers/scsi/scsi_priv.h @@ -28,7 +28,6 @@ extern int scsi_dispatch_cmd(struct scsi_cmnd *cmd); extern int scsi_setup_command_freelist(struct Scsi_Host *shost); extern void scsi_destroy_command_freelist(struct Scsi_Host *shost); extern void __scsi_done(struct scsi_cmnd *cmd); -extern int scsi_retry_command(struct scsi_cmnd *cmd); #ifdef CONFIG_SCSI_LOGGING void scsi_log_send(struct scsi_cmnd *cmd); void scsi_log_completion(struct scsi_cmnd *cmd, int disposition); @@ -58,6 +57,11 @@ extern int scsi_error_handler(void *host); extern int scsi_decide_disposition(struct scsi_cmnd *cmd); extern void scsi_eh_wakeup(struct Scsi_Host *shost); extern int scsi_eh_scmd_add(struct scsi_cmnd *, int); +void scsi_eh_ready_devs(struct Scsi_Host *shost, + struct list_head *work_q, + struct list_head *done_q); +int scsi_eh_get_sense(struct list_head *work_q, + struct list_head *done_q); /* scsi_lib.c */ extern int scsi_maybe_unblock_host(struct scsi_device *sdev); diff --git a/drivers/scsi/scsi_scan.c b/drivers/scsi/scsi_scan.c index b83d03c4deef..8160c00d1092 100644 --- a/drivers/scsi/scsi_scan.c +++ b/drivers/scsi/scsi_scan.c @@ -1029,7 +1029,7 @@ static int scsi_probe_and_add_lun(struct scsi_target *starget, sdev_printk(KERN_INFO, sdev, "scsi scan: consider passing scsi_mod." - "dev_flags=%s:%s:0x240 or 0x800240\n", + "dev_flags=%s:%s:0x240 or 0x1000240\n", scsi_inq_str(vend, result, 8, 16), scsi_inq_str(mod, result, 16, 32)); }); diff --git a/drivers/scsi/scsi_transport_sas.c b/drivers/scsi/scsi_transport_sas.c index 5c0b75bbfa10..6d39150e205b 100644 --- a/drivers/scsi/scsi_transport_sas.c +++ b/drivers/scsi/scsi_transport_sas.c @@ -336,6 +336,51 @@ show_sas_device_type(struct class_device *cdev, char *buf) } static CLASS_DEVICE_ATTR(device_type, S_IRUGO, show_sas_device_type, NULL); +static ssize_t do_sas_phy_enable(struct class_device *cdev, + size_t count, int enable) +{ + struct sas_phy *phy = transport_class_to_phy(cdev); + struct Scsi_Host *shost = dev_to_shost(phy->dev.parent); + struct sas_internal *i = to_sas_internal(shost->transportt); + int error; + + error = i->f->phy_enable(phy, enable); + if (error) + return error; + phy->enabled = enable; + return count; +}; + +static ssize_t store_sas_phy_enable(struct class_device *cdev, + const char *buf, size_t count) +{ + if (count < 1) + return -EINVAL; + + switch (buf[0]) { + case '0': + do_sas_phy_enable(cdev, count, 0); + break; + case '1': + do_sas_phy_enable(cdev, count, 1); + break; + default: + return -EINVAL; + } + + return count; +} + +static ssize_t show_sas_phy_enable(struct class_device *cdev, char *buf) +{ + struct sas_phy *phy = transport_class_to_phy(cdev); + + return snprintf(buf, 20, "%d", phy->enabled); +} + +static CLASS_DEVICE_ATTR(enable, S_IRUGO | S_IWUSR, show_sas_phy_enable, + store_sas_phy_enable); + static ssize_t do_sas_phy_reset(struct class_device *cdev, size_t count, int hard_reset) { @@ -435,6 +480,7 @@ struct sas_phy *sas_phy_alloc(struct device *parent, int number) return NULL; phy->number = number; + phy->enabled = 1; device_initialize(&phy->dev); phy->dev.parent = get_device(parent); @@ -579,8 +625,19 @@ static void sas_port_release(struct device *dev) static void sas_port_create_link(struct sas_port *port, struct sas_phy *phy) { - sysfs_create_link(&port->dev.kobj, &phy->dev.kobj, phy->dev.bus_id); - sysfs_create_link(&phy->dev.kobj, &port->dev.kobj, "port"); + int res; + + res = sysfs_create_link(&port->dev.kobj, &phy->dev.kobj, + phy->dev.bus_id); + if (res) + goto err; + res = sysfs_create_link(&phy->dev.kobj, &port->dev.kobj, "port"); + if (res) + goto err; + return; +err: + printk(KERN_ERR "%s: Cannot create port links, err=%d\n", + __FUNCTION__, res); } static void sas_port_delete_link(struct sas_port *port, @@ -818,13 +875,20 @@ EXPORT_SYMBOL(sas_port_delete_phy); void sas_port_mark_backlink(struct sas_port *port) { + int res; struct device *parent = port->dev.parent->parent->parent; if (port->is_backlink) return; port->is_backlink = 1; - sysfs_create_link(&port->dev.kobj, &parent->kobj, - parent->bus_id); + res = sysfs_create_link(&port->dev.kobj, &parent->kobj, + parent->bus_id); + if (res) + goto err; + return; +err: + printk(KERN_ERR "%s: Cannot create port backlink, err=%d\n", + __FUNCTION__, res); } EXPORT_SYMBOL(sas_port_mark_backlink); @@ -1237,7 +1301,7 @@ int sas_rphy_add(struct sas_rphy *rphy) if (identify->device_type == SAS_END_DEVICE && rphy->scsi_target_id != -1) { scsi_scan_target(&rphy->dev, 0, - rphy->scsi_target_id, ~0, 0); + rphy->scsi_target_id, SCAN_WILD_CARD, 0); } return 0; @@ -1253,7 +1317,7 @@ EXPORT_SYMBOL(sas_rphy_add); * Note: * This function must only be called on a remote * PHY that has not sucessfully been added using - * sas_rphy_add(). + * sas_rphy_add() (or has been sas_rphy_remove()'d) */ void sas_rphy_free(struct sas_rphy *rphy) { @@ -1272,18 +1336,30 @@ void sas_rphy_free(struct sas_rphy *rphy) EXPORT_SYMBOL(sas_rphy_free); /** - * sas_rphy_delete -- remove SAS remote PHY - * @rphy: SAS remote PHY to remove + * sas_rphy_delete -- remove and free SAS remote PHY + * @rphy: SAS remote PHY to remove and free * - * Removes the specified SAS remote PHY. + * Removes the specified SAS remote PHY and frees it. */ void sas_rphy_delete(struct sas_rphy *rphy) { + sas_rphy_remove(rphy); + sas_rphy_free(rphy); +} +EXPORT_SYMBOL(sas_rphy_delete); + +/** + * sas_rphy_remove -- remove SAS remote PHY + * @rphy: SAS remote phy to remove + * + * Removes the specified SAS remote PHY. + */ +void +sas_rphy_remove(struct sas_rphy *rphy) +{ struct device *dev = &rphy->dev; struct sas_port *parent = dev_to_sas_port(dev->parent); - struct Scsi_Host *shost = dev_to_shost(parent->dev.parent); - struct sas_host_attrs *sas_host = to_sas_host_attrs(shost); switch (rphy->identify.device_type) { case SAS_END_DEVICE: @@ -1299,17 +1375,10 @@ sas_rphy_delete(struct sas_rphy *rphy) transport_remove_device(dev); device_del(dev); - transport_destroy_device(dev); - - mutex_lock(&sas_host->lock); - list_del(&rphy->list); - mutex_unlock(&sas_host->lock); parent->rphy = NULL; - - put_device(dev); } -EXPORT_SYMBOL(sas_rphy_delete); +EXPORT_SYMBOL(sas_rphy_remove); /** * scsi_is_sas_rphy -- check if a struct device represents a SAS remote PHY @@ -1389,6 +1458,10 @@ static int sas_user_scan(struct Scsi_Host *shost, uint channel, SETUP_TEMPLATE_RW(phy_attrs, field, S_IRUGO | S_IWUSR, 1, \ !i->f->set_phy_speed, S_IRUGO) +#define SETUP_OPTIONAL_PHY_ATTRIBUTE_RW(field, func) \ + SETUP_TEMPLATE_RW(phy_attrs, field, S_IRUGO | S_IWUSR, 1, \ + !i->f->func, S_IRUGO) + #define SETUP_PORT_ATTRIBUTE(field) \ SETUP_TEMPLATE(port_attrs, field, S_IRUGO, 1) @@ -1396,10 +1469,10 @@ static int sas_user_scan(struct Scsi_Host *shost, uint channel, SETUP_TEMPLATE(phy_attrs, field, S_IRUGO, i->f->func) #define SETUP_PHY_ATTRIBUTE_WRONLY(field) \ - SETUP_TEMPLATE(phy_attrs, field, S_IWUGO, 1) + SETUP_TEMPLATE(phy_attrs, field, S_IWUSR, 1) #define SETUP_OPTIONAL_PHY_ATTRIBUTE_WRONLY(field, func) \ - SETUP_TEMPLATE(phy_attrs, field, S_IWUGO, i->f->func) + SETUP_TEMPLATE(phy_attrs, field, S_IWUSR, i->f->func) #define SETUP_END_DEV_ATTRIBUTE(field) \ SETUP_TEMPLATE(end_dev_attrs, field, S_IRUGO, 1) @@ -1479,6 +1552,7 @@ sas_attach_transport(struct sas_function_template *ft) SETUP_PHY_ATTRIBUTE(phy_reset_problem_count); SETUP_OPTIONAL_PHY_ATTRIBUTE_WRONLY(link_reset, phy_reset); SETUP_OPTIONAL_PHY_ATTRIBUTE_WRONLY(hard_reset, phy_reset); + SETUP_OPTIONAL_PHY_ATTRIBUTE_RW(enable, phy_enable); i->phy_attrs[count] = NULL; count = 0; @@ -1587,7 +1661,7 @@ static void __exit sas_transport_exit(void) } MODULE_AUTHOR("Christoph Hellwig"); -MODULE_DESCRIPTION("SAS Transphy Attributes"); +MODULE_DESCRIPTION("SAS Transport Attributes"); MODULE_LICENSE("GPL"); module_init(sas_transport_init); diff --git a/drivers/scsi/sim710.c b/drivers/scsi/sim710.c index 551baccec523..018c65f73ac4 100644 --- a/drivers/scsi/sim710.c +++ b/drivers/scsi/sim710.c @@ -123,6 +123,7 @@ sim710_probe_common(struct device *dev, unsigned long base_addr, hostdata->differential = differential; hostdata->clock = clock; hostdata->chip710 = 1; + hostdata->burst_length = 8; /* and register the chip */ if((host = NCR_700_detect(&sim710_driver_template, hostdata, dev)) diff --git a/drivers/scsi/sni_53c710.c b/drivers/scsi/sni_53c710.c new file mode 100644 index 000000000000..6bc505115841 --- /dev/null +++ b/drivers/scsi/sni_53c710.c @@ -0,0 +1,159 @@ +/* -*- mode: c; c-basic-offset: 8 -*- */ + +/* SNI RM driver + * + * Copyright (C) 2001 by James.Bottomley@HansenPartnership.com +**----------------------------------------------------------------------------- +** +** This program is free software; you can redistribute it and/or modify +** it under the terms of the GNU General Public License as published by +** the Free Software Foundation; either version 2 of the License, or +** (at your option) any later version. +** +** This program is distributed in the hope that it will be useful, +** but WITHOUT ANY WARRANTY; without even the implied warranty of +** MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +** GNU General Public License for more details. +** +** You should have received a copy of the GNU General Public License +** along with this program; if not, write to the Free Software +** Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. +** +**----------------------------------------------------------------------------- + */ + +/* + * Based on lasi700.c + */ + +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/init.h> +#include <linux/types.h> +#include <linux/stat.h> +#include <linux/mm.h> +#include <linux/blkdev.h> +#include <linux/sched.h> +#include <linux/ioport.h> +#include <linux/dma-mapping.h> +#include <linux/platform_device.h> + +#include <asm/page.h> +#include <asm/pgtable.h> +#include <asm/irq.h> +#include <asm/delay.h> + +#include <scsi/scsi_host.h> +#include <scsi/scsi_device.h> +#include <scsi/scsi_transport.h> +#include <scsi/scsi_transport_spi.h> + +#include "53c700.h" + +MODULE_AUTHOR("Thomas Bogendörfer"); +MODULE_DESCRIPTION("SNI RM 53c710 SCSI Driver"); +MODULE_LICENSE("GPL"); + +#define SNIRM710_CLOCK 32 + +static struct scsi_host_template snirm710_template = { + .name = "SNI RM SCSI 53c710", + .proc_name = "snirm_53c710", + .this_id = 7, + .module = THIS_MODULE, +}; + +static int __init snirm710_probe(struct platform_device *dev) +{ + unsigned long base; + struct NCR_700_Host_Parameters *hostdata; + struct Scsi_Host *host; + struct resource *res; + + res = platform_get_resource(dev, IORESOURCE_MEM, 0); + if (!res) + return -ENODEV; + + base = res->start; + hostdata = kzalloc(sizeof(*hostdata), GFP_KERNEL); + if (!hostdata) { + printk(KERN_ERR "%s: Failed to allocate host data\n", + dev->dev.bus_id); + return -ENOMEM; + } + + hostdata->dev = &dev->dev; + dma_set_mask(&dev->dev, DMA_32BIT_MASK); + hostdata->base = ioremap_nocache(CPHYSADDR(base), 0x100); + hostdata->differential = 0; + + hostdata->clock = SNIRM710_CLOCK; + hostdata->force_le_on_be = 1; + hostdata->chip710 = 1; + hostdata->burst_length = 4; + + host = NCR_700_detect(&snirm710_template, hostdata, &dev->dev); + if (!host) + goto out_kfree; + host->this_id = 7; + host->base = base; + host->irq = platform_get_irq(dev, 0); + if(request_irq(host->irq, NCR_700_intr, SA_SHIRQ, "snirm710", host)) { + printk(KERN_ERR "snirm710: request_irq failed!\n"); + goto out_put_host; + } + + dev_set_drvdata(&dev->dev, host); + scsi_scan_host(host); + + return 0; + + out_put_host: + scsi_host_put(host); + out_kfree: + iounmap(hostdata->base); + kfree(hostdata); + return -ENODEV; +} + +static int __exit snirm710_driver_remove(struct platform_device *dev) +{ + struct Scsi_Host *host = dev_get_drvdata(&dev->dev); + struct NCR_700_Host_Parameters *hostdata = + (struct NCR_700_Host_Parameters *)host->hostdata[0]; + + scsi_remove_host(host); + NCR_700_release(host); + free_irq(host->irq, host); + iounmap(hostdata->base); + kfree(hostdata); + + return 0; +} + +static struct platform_driver snirm710_driver = { + .probe = snirm710_probe, + .remove = __devexit_p(snirm710_driver_remove), + .driver = { + .name = "snirm_53c710", + }, +}; + +static int __init snirm710_init(void) +{ + int err; + + if ((err = platform_driver_register(&snirm710_driver))) { + printk(KERN_ERR "Driver registration failed\n"); + return err; + } + return 0; +} + +static void __exit snirm710_exit(void) +{ + platform_driver_unregister(&snirm710_driver); +} + +module_init(snirm710_init); +module_exit(snirm710_exit); |