diff options
author | David S. Miller <davem@davemloft.net> | 2012-04-10 14:30:45 -0400 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2012-04-10 14:30:45 -0400 |
commit | 06eb4eafbdc0796d741d139a44f1253278da8611 (patch) | |
tree | fbdb44317130c371928154c9e6903e699fe2b995 /drivers/scsi | |
parent | 32ed53b83ea5ec26a4dba90e18f5e0ff6c71eb48 (diff) | |
parent | f68e556e23d1a4176b563bcb25d8baf2c5313f91 (diff) |
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
Diffstat (limited to 'drivers/scsi')
98 files changed, 4264 insertions, 1010 deletions
diff --git a/drivers/scsi/53c700.c b/drivers/scsi/53c700.c index f672491774eb..a3adfb4357f5 100644 --- a/drivers/scsi/53c700.c +++ b/drivers/scsi/53c700.c @@ -129,7 +129,6 @@ #include <linux/interrupt.h> #include <linux/device.h> #include <asm/dma.h> -#include <asm/system.h> #include <asm/io.h> #include <asm/pgtable.h> #include <asm/byteorder.h> diff --git a/drivers/scsi/BusLogic.c b/drivers/scsi/BusLogic.c index f66c33b9ab41..d4da3708763b 100644 --- a/drivers/scsi/BusLogic.c +++ b/drivers/scsi/BusLogic.c @@ -47,7 +47,6 @@ #include <asm/dma.h> #include <asm/io.h> -#include <asm/system.h> #include <scsi/scsi.h> #include <scsi/scsi_cmnd.h> diff --git a/drivers/scsi/Kconfig b/drivers/scsi/Kconfig index a06e608789e3..29684c8142b0 100644 --- a/drivers/scsi/Kconfig +++ b/drivers/scsi/Kconfig @@ -619,6 +619,7 @@ config SCSI_ARCMSR source "drivers/scsi/megaraid/Kconfig.megaraid" source "drivers/scsi/mpt2sas/Kconfig" +source "drivers/scsi/ufs/Kconfig" config SCSI_HPTIOP tristate "HighPoint RocketRAID 3xxx/4xxx Controller support" diff --git a/drivers/scsi/Makefile b/drivers/scsi/Makefile index ad24e065b1e5..8deedeaf5608 100644 --- a/drivers/scsi/Makefile +++ b/drivers/scsi/Makefile @@ -108,6 +108,7 @@ obj-$(CONFIG_MEGARAID_LEGACY) += megaraid.o obj-$(CONFIG_MEGARAID_NEWGEN) += megaraid/ obj-$(CONFIG_MEGARAID_SAS) += megaraid/ obj-$(CONFIG_SCSI_MPT2SAS) += mpt2sas/ +obj-$(CONFIG_SCSI_UFSHCD) += ufs/ obj-$(CONFIG_SCSI_ACARD) += atp870u.o obj-$(CONFIG_SCSI_SUNESP) += esp_scsi.o sun_esp.o obj-$(CONFIG_SCSI_GDTH) += gdth.o diff --git a/drivers/scsi/advansys.c b/drivers/scsi/advansys.c index bfd618a69499..374c4edf4fcb 100644 --- a/drivers/scsi/advansys.c +++ b/drivers/scsi/advansys.c @@ -41,7 +41,6 @@ #include <linux/firmware.h> #include <asm/io.h> -#include <asm/system.h> #include <asm/dma.h> #include <scsi/scsi_cmnd.h> diff --git a/drivers/scsi/aha152x.c b/drivers/scsi/aha152x.c index f17c92cf808b..19a36945e6fd 100644 --- a/drivers/scsi/aha152x.c +++ b/drivers/scsi/aha152x.c @@ -239,7 +239,6 @@ #include <asm/irq.h> #include <linux/io.h> #include <linux/blkdev.h> -#include <asm/system.h> #include <linux/completion.h> #include <linux/errno.h> #include <linux/string.h> diff --git a/drivers/scsi/aha1542.c b/drivers/scsi/aha1542.c index ed119cedaae0..ede91f378000 100644 --- a/drivers/scsi/aha1542.c +++ b/drivers/scsi/aha1542.c @@ -42,7 +42,6 @@ #include <linux/slab.h> #include <asm/dma.h> -#include <asm/system.h> #include <asm/io.h> #include "scsi.h" diff --git a/drivers/scsi/aha1740.c b/drivers/scsi/aha1740.c index 1c10b796c1a2..a3e6ed353917 100644 --- a/drivers/scsi/aha1740.c +++ b/drivers/scsi/aha1740.c @@ -53,7 +53,6 @@ #include <linux/gfp.h> #include <asm/dma.h> -#include <asm/system.h> #include <asm/io.h> #include "scsi.h" diff --git a/drivers/scsi/aic7xxx/aic79xx_osm.c b/drivers/scsi/aic7xxx/aic79xx_osm.c index 7d48700257a7..9328121804bb 100644 --- a/drivers/scsi/aic7xxx/aic79xx_osm.c +++ b/drivers/scsi/aic7xxx/aic79xx_osm.c @@ -341,10 +341,10 @@ MODULE_PARM_DESC(aic79xx, " (0/256ms,1/128ms,2/64ms,3/32ms)\n" " slowcrc Turn on the SLOWCRC bit (Rev B only)\n" "\n" -" Sample /etc/modprobe.conf line:\n" -" Enable verbose logging\n" -" Set tag depth on Controller 2/Target 2 to 10 tags\n" -" Shorten the selection timeout to 128ms\n" +" Sample modprobe configuration file:\n" +" # Enable verbose logging\n" +" # Set tag depth on Controller 2/Target 2 to 10 tags\n" +" # Shorten the selection timeout to 128ms\n" "\n" " options aic79xx 'aic79xx=verbose.tag_info:{{}.{}.{..10}}.seltime:1'\n" ); diff --git a/drivers/scsi/aic7xxx/aic7xxx_osm.c b/drivers/scsi/aic7xxx/aic7xxx_osm.c index c6251bb4f438..5a477cdc780d 100644 --- a/drivers/scsi/aic7xxx/aic7xxx_osm.c +++ b/drivers/scsi/aic7xxx/aic7xxx_osm.c @@ -360,10 +360,10 @@ MODULE_PARM_DESC(aic7xxx, " seltime:<int> Selection Timeout\n" " (0/256ms,1/128ms,2/64ms,3/32ms)\n" "\n" -" Sample /etc/modprobe.conf line:\n" -" Toggle EISA/VLB probing\n" -" Set tag depth on Controller 1/Target 1 to 10 tags\n" -" Shorten the selection timeout to 128ms\n" +" Sample modprobe configuration file:\n" +" # Toggle EISA/VLB probing\n" +" # Set tag depth on Controller 1/Target 1 to 10 tags\n" +" # Shorten the selection timeout to 128ms\n" "\n" " options aic7xxx 'aic7xxx=probe_eisa_vl.tag_info:{{}.{.10}}.seltime:1'\n" ); diff --git a/drivers/scsi/arcmsr/arcmsr_hba.c b/drivers/scsi/arcmsr/arcmsr_hba.c index 2fe9e90e53d9..cbde1dca45ad 100644 --- a/drivers/scsi/arcmsr/arcmsr_hba.c +++ b/drivers/scsi/arcmsr/arcmsr_hba.c @@ -61,7 +61,6 @@ #include <linux/aer.h> #include <asm/dma.h> #include <asm/io.h> -#include <asm/system.h> #include <asm/uaccess.h> #include <scsi/scsi_host.h> #include <scsi/scsi.h> diff --git a/drivers/scsi/arm/acornscsi.c b/drivers/scsi/arm/acornscsi.c index c454e44cf51c..b330438ac662 100644 --- a/drivers/scsi/arm/acornscsi.c +++ b/drivers/scsi/arm/acornscsi.c @@ -138,7 +138,6 @@ #include <linux/stringify.h> #include <linux/io.h> -#include <asm/system.h> #include <asm/ecard.h> #include "../scsi.h" diff --git a/drivers/scsi/arm/arxescsi.c b/drivers/scsi/arm/arxescsi.c index a750aa72b8ef..2a28b4ad1975 100644 --- a/drivers/scsi/arm/arxescsi.c +++ b/drivers/scsi/arm/arxescsi.c @@ -305,7 +305,7 @@ arxescsi_probe(struct expansion_card *ec, const struct ecard_id *id) info->base = base; info->info.scsi.io_base = base + 0x2000; - info->info.scsi.irq = NO_IRQ; + info->info.scsi.irq = 0; info->info.scsi.dma = NO_DMA; info->info.scsi.io_shift = 5; info->info.ifcfg.clockrate = 24; /* MHz */ diff --git a/drivers/scsi/arm/cumana_1.c b/drivers/scsi/arm/cumana_1.c index a3398fe70a9c..c3b99c93637a 100644 --- a/drivers/scsi/arm/cumana_1.c +++ b/drivers/scsi/arm/cumana_1.c @@ -12,7 +12,6 @@ #include <asm/ecard.h> #include <asm/io.h> -#include <asm/system.h> #include "../scsi.h" #include <scsi/scsi_host.h> diff --git a/drivers/scsi/arm/fas216.c b/drivers/scsi/arm/fas216.c index e85c40b6e19b..6206a666a8ec 100644 --- a/drivers/scsi/arm/fas216.c +++ b/drivers/scsi/arm/fas216.c @@ -2176,7 +2176,7 @@ static void fas216_done(FAS216_Info *info, unsigned int result) fn = (void (*)(FAS216_Info *, struct scsi_cmnd *, unsigned int))SCpnt->host_scribble; fn(info, SCpnt, result); - if (info->scsi.irq != NO_IRQ) { + if (info->scsi.irq) { spin_lock_irqsave(&info->host_lock, flags); if (info->scsi.phase == PHASE_IDLE) fas216_kick(info); @@ -2276,7 +2276,7 @@ static int fas216_noqueue_command_lck(struct scsi_cmnd *SCpnt, * We should only be using this if we don't have an interrupt. * Provide some "incentive" to use the queueing code. */ - BUG_ON(info->scsi.irq != NO_IRQ); + BUG_ON(info->scsi.irq); info->internal_done = 0; fas216_queue_command_lck(SCpnt, fas216_internal_done); diff --git a/drivers/scsi/arm/fas216.h b/drivers/scsi/arm/fas216.h index 84b7127c0121..df2e1b3ddfe2 100644 --- a/drivers/scsi/arm/fas216.h +++ b/drivers/scsi/arm/fas216.h @@ -12,10 +12,6 @@ #ifndef FAS216_H #define FAS216_H -#ifndef NO_IRQ -#define NO_IRQ 255 -#endif - #include <scsi/scsi_eh.h> #include "queue.h" diff --git a/drivers/scsi/arm/oak.c b/drivers/scsi/arm/oak.c index 849cdf89f7bb..d25f944b59c2 100644 --- a/drivers/scsi/arm/oak.c +++ b/drivers/scsi/arm/oak.c @@ -13,7 +13,6 @@ #include <asm/ecard.h> #include <asm/io.h> -#include <asm/system.h> #include "../scsi.h" #include <scsi/scsi_host.h> diff --git a/drivers/scsi/atp870u.c b/drivers/scsi/atp870u.c index 7e6eca4a125e..68ce08552f69 100644 --- a/drivers/scsi/atp870u.c +++ b/drivers/scsi/atp870u.c @@ -30,7 +30,6 @@ #include <linux/blkdev.h> #include <linux/dma-mapping.h> #include <linux/slab.h> -#include <asm/system.h> #include <asm/io.h> #include <scsi/scsi.h> @@ -2583,7 +2582,7 @@ static int atp870u_probe(struct pci_dev *pdev, const struct pci_device_id *ent) * this than via the PCI device table */ if (ent->device == PCI_DEVICE_ID_ARTOP_AEC7610) { - error = pci_read_config_byte(pdev, PCI_CLASS_REVISION, &atpdev->chip_ver); + atpdev->chip_ver = pdev->revision; if (atpdev->chip_ver < 2) goto err_eio; } @@ -2602,7 +2601,7 @@ static int atp870u_probe(struct pci_dev *pdev, const struct pci_device_id *ent) base_io &= 0xfffffff8; if ((ent->device == ATP880_DEVID1)||(ent->device == ATP880_DEVID2)) { - error = pci_read_config_byte(pdev, PCI_CLASS_REVISION, &atpdev->chip_ver); + atpdev->chip_ver = pdev->revision; pci_write_config_byte(pdev, PCI_LATENCY_TIMER, 0x80);//JCC082803 host_id = inb(base_io + 0x39); diff --git a/drivers/scsi/bfa/bfa.h b/drivers/scsi/bfa/bfa.h index a796de935054..4ad7e368bbc2 100644 --- a/drivers/scsi/bfa/bfa.h +++ b/drivers/scsi/bfa/bfa.h @@ -225,9 +225,9 @@ struct bfa_faa_args_s { }; struct bfa_iocfc_s { + bfa_fsm_t fsm; struct bfa_s *bfa; struct bfa_iocfc_cfg_s cfg; - int action; u32 req_cq_pi[BFI_IOC_MAX_CQS]; u32 rsp_cq_ci[BFI_IOC_MAX_CQS]; u8 hw_qid[BFI_IOC_MAX_CQS]; @@ -236,7 +236,9 @@ struct bfa_iocfc_s { struct bfa_cb_qe_s dis_hcb_qe; struct bfa_cb_qe_s en_hcb_qe; struct bfa_cb_qe_s stats_hcb_qe; - bfa_boolean_t cfgdone; + bfa_boolean_t submod_enabled; + bfa_boolean_t cb_reqd; /* Driver call back reqd */ + bfa_status_t op_status; /* Status of bfa iocfc op */ struct bfa_dma_s cfg_info; struct bfi_iocfc_cfg_s *cfginfo; @@ -341,8 +343,6 @@ void bfa_hwct_msix_getvecs(struct bfa_s *bfa, u32 *vecmap, u32 *nvecs, void bfa_hwct_msix_get_rme_range(struct bfa_s *bfa, u32 *start, u32 *end); void bfa_iocfc_get_bootwwns(struct bfa_s *bfa, u8 *nwwns, wwn_t *wwns); -wwn_t bfa_iocfc_get_pwwn(struct bfa_s *bfa); -wwn_t bfa_iocfc_get_nwwn(struct bfa_s *bfa); int bfa_iocfc_get_pbc_vports(struct bfa_s *bfa, struct bfi_pbc_vport_s *pbc_vport); @@ -428,7 +428,6 @@ bfa_status_t bfa_iocfc_israttr_set(struct bfa_s *bfa, void bfa_iocfc_enable(struct bfa_s *bfa); void bfa_iocfc_disable(struct bfa_s *bfa); -void bfa_iocfc_cb_dconf_modinit(struct bfa_s *bfa, bfa_status_t status); #define bfa_timer_start(_bfa, _timer, _timercb, _arg, _timeout) \ bfa_timer_begin(&(_bfa)->timer_mod, _timer, _timercb, _arg, _timeout) diff --git a/drivers/scsi/bfa/bfa_core.c b/drivers/scsi/bfa/bfa_core.c index 4bd546bcc240..456e5762977d 100644 --- a/drivers/scsi/bfa/bfa_core.c +++ b/drivers/scsi/bfa/bfa_core.c @@ -200,13 +200,431 @@ enum { #define DEF_CFG_NUM_SBOOT_LUNS 16 /* + * IOCFC state machine definitions/declarations + */ +bfa_fsm_state_decl(bfa_iocfc, stopped, struct bfa_iocfc_s, enum iocfc_event); +bfa_fsm_state_decl(bfa_iocfc, initing, struct bfa_iocfc_s, enum iocfc_event); +bfa_fsm_state_decl(bfa_iocfc, dconf_read, struct bfa_iocfc_s, enum iocfc_event); +bfa_fsm_state_decl(bfa_iocfc, init_cfg_wait, + struct bfa_iocfc_s, enum iocfc_event); +bfa_fsm_state_decl(bfa_iocfc, init_cfg_done, + struct bfa_iocfc_s, enum iocfc_event); +bfa_fsm_state_decl(bfa_iocfc, operational, + struct bfa_iocfc_s, enum iocfc_event); +bfa_fsm_state_decl(bfa_iocfc, dconf_write, + struct bfa_iocfc_s, enum iocfc_event); +bfa_fsm_state_decl(bfa_iocfc, stopping, struct bfa_iocfc_s, enum iocfc_event); +bfa_fsm_state_decl(bfa_iocfc, enabling, struct bfa_iocfc_s, enum iocfc_event); +bfa_fsm_state_decl(bfa_iocfc, cfg_wait, struct bfa_iocfc_s, enum iocfc_event); +bfa_fsm_state_decl(bfa_iocfc, disabling, struct bfa_iocfc_s, enum iocfc_event); +bfa_fsm_state_decl(bfa_iocfc, disabled, struct bfa_iocfc_s, enum iocfc_event); +bfa_fsm_state_decl(bfa_iocfc, failed, struct bfa_iocfc_s, enum iocfc_event); +bfa_fsm_state_decl(bfa_iocfc, init_failed, + struct bfa_iocfc_s, enum iocfc_event); + +/* * forward declaration for IOC FC functions */ +static void bfa_iocfc_start_submod(struct bfa_s *bfa); +static void bfa_iocfc_disable_submod(struct bfa_s *bfa); +static void bfa_iocfc_send_cfg(void *bfa_arg); static void bfa_iocfc_enable_cbfn(void *bfa_arg, enum bfa_status status); static void bfa_iocfc_disable_cbfn(void *bfa_arg); static void bfa_iocfc_hbfail_cbfn(void *bfa_arg); static void bfa_iocfc_reset_cbfn(void *bfa_arg); static struct bfa_ioc_cbfn_s bfa_iocfc_cbfn; +static void bfa_iocfc_init_cb(void *bfa_arg, bfa_boolean_t complete); +static void bfa_iocfc_stop_cb(void *bfa_arg, bfa_boolean_t compl); +static void bfa_iocfc_enable_cb(void *bfa_arg, bfa_boolean_t compl); +static void bfa_iocfc_disable_cb(void *bfa_arg, bfa_boolean_t compl); + +static void +bfa_iocfc_sm_stopped_entry(struct bfa_iocfc_s *iocfc) +{ +} + +static void +bfa_iocfc_sm_stopped(struct bfa_iocfc_s *iocfc, enum iocfc_event event) +{ + bfa_trc(iocfc->bfa, event); + + switch (event) { + case IOCFC_E_INIT: + case IOCFC_E_ENABLE: + bfa_fsm_set_state(iocfc, bfa_iocfc_sm_initing); + break; + default: + bfa_sm_fault(iocfc->bfa, event); + break; + } +} + +static void +bfa_iocfc_sm_initing_entry(struct bfa_iocfc_s *iocfc) +{ + bfa_ioc_enable(&iocfc->bfa->ioc); +} + +static void +bfa_iocfc_sm_initing(struct bfa_iocfc_s *iocfc, enum iocfc_event event) +{ + bfa_trc(iocfc->bfa, event); + + switch (event) { + case IOCFC_E_IOC_ENABLED: + bfa_fsm_set_state(iocfc, bfa_iocfc_sm_dconf_read); + break; + case IOCFC_E_IOC_FAILED: + bfa_fsm_set_state(iocfc, bfa_iocfc_sm_init_failed); + break; + default: + bfa_sm_fault(iocfc->bfa, event); + break; + } +} + +static void +bfa_iocfc_sm_dconf_read_entry(struct bfa_iocfc_s *iocfc) +{ + bfa_dconf_modinit(iocfc->bfa); +} + +static void +bfa_iocfc_sm_dconf_read(struct bfa_iocfc_s *iocfc, enum iocfc_event event) +{ + bfa_trc(iocfc->bfa, event); + + switch (event) { + case IOCFC_E_DCONF_DONE: + bfa_fsm_set_state(iocfc, bfa_iocfc_sm_init_cfg_wait); + break; + case IOCFC_E_IOC_FAILED: + bfa_fsm_set_state(iocfc, bfa_iocfc_sm_init_failed); + break; + default: + bfa_sm_fault(iocfc->bfa, event); + break; + } +} + +static void +bfa_iocfc_sm_init_cfg_wait_entry(struct bfa_iocfc_s *iocfc) +{ + bfa_iocfc_send_cfg(iocfc->bfa); +} + +static void +bfa_iocfc_sm_init_cfg_wait(struct bfa_iocfc_s *iocfc, enum iocfc_event event) +{ + bfa_trc(iocfc->bfa, event); + + switch (event) { + case IOCFC_E_CFG_DONE: + bfa_fsm_set_state(iocfc, bfa_iocfc_sm_init_cfg_done); + break; + case IOCFC_E_IOC_FAILED: + bfa_fsm_set_state(iocfc, bfa_iocfc_sm_init_failed); + break; + default: + bfa_sm_fault(iocfc->bfa, event); + break; + } +} + +static void +bfa_iocfc_sm_init_cfg_done_entry(struct bfa_iocfc_s *iocfc) +{ + iocfc->bfa->iocfc.op_status = BFA_STATUS_OK; + bfa_cb_queue(iocfc->bfa, &iocfc->bfa->iocfc.init_hcb_qe, + bfa_iocfc_init_cb, iocfc->bfa); +} + +static void +bfa_iocfc_sm_init_cfg_done(struct bfa_iocfc_s *iocfc, enum iocfc_event event) +{ + bfa_trc(iocfc->bfa, event); + + switch (event) { + case IOCFC_E_START: + bfa_fsm_set_state(iocfc, bfa_iocfc_sm_operational); + break; + case IOCFC_E_STOP: + bfa_fsm_set_state(iocfc, bfa_iocfc_sm_stopping); + break; + case IOCFC_E_DISABLE: + bfa_fsm_set_state(iocfc, bfa_iocfc_sm_disabling); + break; + case IOCFC_E_IOC_FAILED: + bfa_fsm_set_state(iocfc, bfa_iocfc_sm_failed); + break; + default: + bfa_sm_fault(iocfc->bfa, event); + break; + } +} + +static void +bfa_iocfc_sm_operational_entry(struct bfa_iocfc_s *iocfc) +{ + bfa_fcport_init(iocfc->bfa); + bfa_iocfc_start_submod(iocfc->bfa); +} + +static void +bfa_iocfc_sm_operational(struct bfa_iocfc_s *iocfc, enum iocfc_event event) +{ + bfa_trc(iocfc->bfa, event); + + switch (event) { + case IOCFC_E_STOP: + bfa_fsm_set_state(iocfc, bfa_iocfc_sm_dconf_write); + break; + case IOCFC_E_DISABLE: + bfa_fsm_set_state(iocfc, bfa_iocfc_sm_disabling); + break; + case IOCFC_E_IOC_FAILED: + bfa_fsm_set_state(iocfc, bfa_iocfc_sm_failed); + break; + default: + bfa_sm_fault(iocfc->bfa, event); + break; + } +} + +static void +bfa_iocfc_sm_dconf_write_entry(struct bfa_iocfc_s *iocfc) +{ + bfa_dconf_modexit(iocfc->bfa); +} + +static void +bfa_iocfc_sm_dconf_write(struct bfa_iocfc_s *iocfc, enum iocfc_event event) +{ + bfa_trc(iocfc->bfa, event); + + switch (event) { + case IOCFC_E_DCONF_DONE: + case IOCFC_E_IOC_FAILED: + bfa_fsm_set_state(iocfc, bfa_iocfc_sm_stopping); + break; + default: + bfa_sm_fault(iocfc->bfa, event); + break; + } +} + +static void +bfa_iocfc_sm_stopping_entry(struct bfa_iocfc_s *iocfc) +{ + bfa_ioc_disable(&iocfc->bfa->ioc); +} + +static void +bfa_iocfc_sm_stopping(struct bfa_iocfc_s *iocfc, enum iocfc_event event) +{ + bfa_trc(iocfc->bfa, event); + + switch (event) { + case IOCFC_E_IOC_DISABLED: + bfa_isr_disable(iocfc->bfa); + bfa_iocfc_disable_submod(iocfc->bfa); + bfa_fsm_set_state(iocfc, bfa_iocfc_sm_stopped); + iocfc->bfa->iocfc.op_status = BFA_STATUS_OK; + bfa_cb_queue(iocfc->bfa, &iocfc->bfa->iocfc.stop_hcb_qe, + bfa_iocfc_stop_cb, iocfc->bfa); + break; + default: + bfa_sm_fault(iocfc->bfa, event); + break; + } +} + +static void +bfa_iocfc_sm_enabling_entry(struct bfa_iocfc_s *iocfc) +{ + bfa_ioc_enable(&iocfc->bfa->ioc); +} + +static void +bfa_iocfc_sm_enabling(struct bfa_iocfc_s *iocfc, enum iocfc_event event) +{ + bfa_trc(iocfc->bfa, event); + + switch (event) { + case IOCFC_E_IOC_ENABLED: + bfa_fsm_set_state(iocfc, bfa_iocfc_sm_cfg_wait); + break; + case IOCFC_E_IOC_FAILED: + bfa_fsm_set_state(iocfc, bfa_iocfc_sm_failed); + + if (iocfc->bfa->iocfc.cb_reqd == BFA_FALSE) + break; + + iocfc->bfa->iocfc.op_status = BFA_STATUS_FAILED; + bfa_cb_queue(iocfc->bfa, &iocfc->bfa->iocfc.en_hcb_qe, + bfa_iocfc_enable_cb, iocfc->bfa); + iocfc->bfa->iocfc.cb_reqd = BFA_FALSE; + break; + default: + bfa_sm_fault(iocfc->bfa, event); + break; + } +} + +static void +bfa_iocfc_sm_cfg_wait_entry(struct bfa_iocfc_s *iocfc) +{ + bfa_iocfc_send_cfg(iocfc->bfa); +} + +static void +bfa_iocfc_sm_cfg_wait(struct bfa_iocfc_s *iocfc, enum iocfc_event event) +{ + bfa_trc(iocfc->bfa, event); + + switch (event) { + case IOCFC_E_CFG_DONE: + bfa_fsm_set_state(iocfc, bfa_iocfc_sm_operational); + if (iocfc->bfa->iocfc.cb_reqd == BFA_FALSE) + break; + + iocfc->bfa->iocfc.op_status = BFA_STATUS_OK; + bfa_cb_queue(iocfc->bfa, &iocfc->bfa->iocfc.en_hcb_qe, + bfa_iocfc_enable_cb, iocfc->bfa); + iocfc->bfa->iocfc.cb_reqd = BFA_FALSE; + break; + case IOCFC_E_IOC_FAILED: + bfa_fsm_set_state(iocfc, bfa_iocfc_sm_failed); + if (iocfc->bfa->iocfc.cb_reqd == BFA_FALSE) + break; + + iocfc->bfa->iocfc.op_status = BFA_STATUS_FAILED; + bfa_cb_queue(iocfc->bfa, &iocfc->bfa->iocfc.en_hcb_qe, + bfa_iocfc_enable_cb, iocfc->bfa); + iocfc->bfa->iocfc.cb_reqd = BFA_FALSE; + break; + default: + bfa_sm_fault(iocfc->bfa, event); + break; + } +} + +static void +bfa_iocfc_sm_disabling_entry(struct bfa_iocfc_s *iocfc) +{ + bfa_ioc_disable(&iocfc->bfa->ioc); +} + +static void +bfa_iocfc_sm_disabling(struct bfa_iocfc_s *iocfc, enum iocfc_event event) +{ + bfa_trc(iocfc->bfa, event); + + switch (event) { + case IOCFC_E_IOC_DISABLED: + bfa_fsm_set_state(iocfc, bfa_iocfc_sm_disabled); + break; + default: + bfa_sm_fault(iocfc->bfa, event); + break; + } +} + +static void +bfa_iocfc_sm_disabled_entry(struct bfa_iocfc_s *iocfc) +{ + bfa_isr_disable(iocfc->bfa); + bfa_iocfc_disable_submod(iocfc->bfa); + iocfc->bfa->iocfc.op_status = BFA_STATUS_OK; + bfa_cb_queue(iocfc->bfa, &iocfc->bfa->iocfc.dis_hcb_qe, + bfa_iocfc_disable_cb, iocfc->bfa); +} + +static void +bfa_iocfc_sm_disabled(struct bfa_iocfc_s *iocfc, enum iocfc_event event) +{ + bfa_trc(iocfc->bfa, event); + + switch (event) { + case IOCFC_E_STOP: + bfa_fsm_set_state(iocfc, bfa_iocfc_sm_dconf_write); + break; + case IOCFC_E_ENABLE: + bfa_fsm_set_state(iocfc, bfa_iocfc_sm_enabling); + break; + default: + bfa_sm_fault(iocfc->bfa, event); + break; + } +} + +static void +bfa_iocfc_sm_failed_entry(struct bfa_iocfc_s *iocfc) +{ + bfa_isr_disable(iocfc->bfa); + bfa_iocfc_disable_submod(iocfc->bfa); +} + +static void +bfa_iocfc_sm_failed(struct bfa_iocfc_s *iocfc, enum iocfc_event event) +{ + bfa_trc(iocfc->bfa, event); + + switch (event) { + case IOCFC_E_STOP: + bfa_fsm_set_state(iocfc, bfa_iocfc_sm_dconf_write); + break; + case IOCFC_E_DISABLE: + bfa_fsm_set_state(iocfc, bfa_iocfc_sm_disabling); + break; + case IOCFC_E_IOC_ENABLED: + bfa_fsm_set_state(iocfc, bfa_iocfc_sm_cfg_wait); + break; + case IOCFC_E_IOC_FAILED: + break; + default: + bfa_sm_fault(iocfc->bfa, event); + break; + } +} + +static void +bfa_iocfc_sm_init_failed_entry(struct bfa_iocfc_s *iocfc) +{ + bfa_isr_disable(iocfc->bfa); + iocfc->bfa->iocfc.op_status = BFA_STATUS_FAILED; + bfa_cb_queue(iocfc->bfa, &iocfc->bfa->iocfc.init_hcb_qe, + bfa_iocfc_init_cb, iocfc->bfa); +} + +static void +bfa_iocfc_sm_init_failed(struct bfa_iocfc_s *iocfc, enum iocfc_event event) +{ + bfa_trc(iocfc->bfa, event); + + switch (event) { + case IOCFC_E_STOP: + bfa_fsm_set_state(iocfc, bfa_iocfc_sm_stopping); + break; + case IOCFC_E_DISABLE: + bfa_ioc_disable(&iocfc->bfa->ioc); + break; + case IOCFC_E_IOC_ENABLED: + bfa_fsm_set_state(iocfc, bfa_iocfc_sm_dconf_read); + break; + case IOCFC_E_IOC_DISABLED: + bfa_fsm_set_state(iocfc, bfa_iocfc_sm_stopped); + iocfc->bfa->iocfc.op_status = BFA_STATUS_OK; + bfa_cb_queue(iocfc->bfa, &iocfc->bfa->iocfc.dis_hcb_qe, + bfa_iocfc_disable_cb, iocfc->bfa); + break; + case IOCFC_E_IOC_FAILED: + break; + default: + bfa_sm_fault(iocfc->bfa, event); + break; + } +} /* * BFA Interrupt handling functions @@ -231,16 +649,19 @@ bfa_reqq_resume(struct bfa_s *bfa, int qid) } } -static inline void +bfa_boolean_t bfa_isr_rspq(struct bfa_s *bfa, int qid) { struct bfi_msg_s *m; u32 pi, ci; struct list_head *waitq; + bfa_boolean_t ret; ci = bfa_rspq_ci(bfa, qid); pi = bfa_rspq_pi(bfa, qid); + ret = (ci != pi); + while (ci != pi) { m = bfa_rspq_elem(bfa, qid, ci); WARN_ON(m->mhdr.msg_class >= BFI_MC_MAX); @@ -260,6 +681,8 @@ bfa_isr_rspq(struct bfa_s *bfa, int qid) waitq = bfa_reqq(bfa, qid); if (!list_empty(waitq)) bfa_reqq_resume(bfa, qid); + + return ret; } static inline void @@ -320,6 +743,7 @@ bfa_intx(struct bfa_s *bfa) { u32 intr, qintr; int queue; + bfa_boolean_t rspq_comp = BFA_FALSE; intr = readl(bfa->iocfc.bfa_regs.intr_status); @@ -332,11 +756,12 @@ bfa_intx(struct bfa_s *bfa) */ if (bfa->queue_process) { for (queue = 0; queue < BFI_IOC_MAX_CQS; queue++) - bfa_isr_rspq(bfa, queue); + if (bfa_isr_rspq(bfa, queue)) + rspq_comp = BFA_TRUE; } if (!intr) - return BFA_TRUE; + return (qintr | rspq_comp) ? BFA_TRUE : BFA_FALSE; /* * CPE completion queue interrupt @@ -525,11 +950,9 @@ bfa_iocfc_send_cfg(void *bfa_arg) * Enable interrupt coalescing if it is driver init path * and not ioc disable/enable path. */ - if (!iocfc->cfgdone) + if (bfa_fsm_cmp_state(iocfc, bfa_iocfc_sm_init_cfg_wait)) cfg_info->intr_attr.coalesce = BFA_TRUE; - iocfc->cfgdone = BFA_FALSE; - /* * dma map IOC configuration itself */ @@ -549,8 +972,6 @@ bfa_iocfc_init_mem(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg, bfa->bfad = bfad; iocfc->bfa = bfa; - iocfc->action = BFA_IOCFC_ACT_NONE; - iocfc->cfg = *cfg; /* @@ -683,6 +1104,8 @@ bfa_iocfc_start_submod(struct bfa_s *bfa) for (i = 0; hal_mods[i]; i++) hal_mods[i]->start(bfa); + + bfa->iocfc.submod_enabled = BFA_TRUE; } /* @@ -693,8 +1116,13 @@ bfa_iocfc_disable_submod(struct bfa_s *bfa) { int i; + if (bfa->iocfc.submod_enabled == BFA_FALSE) + return; + for (i = 0; hal_mods[i]; i++) hal_mods[i]->iocdisable(bfa); + + bfa->iocfc.submod_enabled = BFA_FALSE; } static void @@ -702,15 +1130,8 @@ bfa_iocfc_init_cb(void *bfa_arg, bfa_boolean_t complete) { struct bfa_s *bfa = bfa_arg; - if (complete) { - if (bfa->iocfc.cfgdone && BFA_DCONF_MOD(bfa)->flashdone) - bfa_cb_init(bfa->bfad, BFA_STATUS_OK); - else - bfa_cb_init(bfa->bfad, BFA_STATUS_FAILED); - } else { - if (bfa->iocfc.cfgdone) - bfa->iocfc.action = BFA_IOCFC_ACT_NONE; - } + if (complete) + bfa_cb_init(bfa->bfad, bfa->iocfc.op_status); } static void @@ -721,8 +1142,6 @@ bfa_iocfc_stop_cb(void *bfa_arg, bfa_boolean_t compl) if (compl) complete(&bfad->comp); - else - bfa->iocfc.action = BFA_IOCFC_ACT_NONE; } static void @@ -794,8 +1213,6 @@ bfa_iocfc_cfgrsp(struct bfa_s *bfa) fwcfg->num_uf_bufs = be16_to_cpu(fwcfg->num_uf_bufs); fwcfg->num_rports = be16_to_cpu(fwcfg->num_rports); - iocfc->cfgdone = BFA_TRUE; - /* * configure queue register offsets as learnt from firmware */ @@ -811,22 +1228,13 @@ bfa_iocfc_cfgrsp(struct bfa_s *bfa) */ bfa_msix_queue_install(bfa); - /* - * Configuration is complete - initialize/start submodules - */ - bfa_fcport_init(bfa); - - if (iocfc->action == BFA_IOCFC_ACT_INIT) { - if (BFA_DCONF_MOD(bfa)->flashdone == BFA_TRUE) - bfa_cb_queue(bfa, &iocfc->init_hcb_qe, - bfa_iocfc_init_cb, bfa); - } else { - if (bfa->iocfc.action == BFA_IOCFC_ACT_ENABLE) - bfa_cb_queue(bfa, &bfa->iocfc.en_hcb_qe, - bfa_iocfc_enable_cb, bfa); - bfa_iocfc_start_submod(bfa); + if (bfa->iocfc.cfgrsp->pbc_cfg.pbc_pwwn != 0) { + bfa->ioc.attr->pwwn = bfa->iocfc.cfgrsp->pbc_cfg.pbc_pwwn; + bfa->ioc.attr->nwwn = bfa->iocfc.cfgrsp->pbc_cfg.pbc_nwwn; + bfa_fsm_send_event(iocfc, IOCFC_E_CFG_DONE); } } + void bfa_iocfc_reset_queues(struct bfa_s *bfa) { @@ -840,6 +1248,23 @@ bfa_iocfc_reset_queues(struct bfa_s *bfa) } } +/* + * Process FAA pwwn msg from fw. + */ +static void +bfa_iocfc_process_faa_addr(struct bfa_s *bfa, struct bfi_faa_addr_msg_s *msg) +{ + struct bfa_iocfc_s *iocfc = &bfa->iocfc; + struct bfi_iocfc_cfgrsp_s *cfgrsp = iocfc->cfgrsp; + + cfgrsp->pbc_cfg.pbc_pwwn = msg->pwwn; + cfgrsp->pbc_cfg.pbc_nwwn = msg->nwwn; + + bfa->ioc.attr->pwwn = msg->pwwn; + bfa->ioc.attr->nwwn = msg->nwwn; + bfa_fsm_send_event(iocfc, IOCFC_E_CFG_DONE); +} + /* Fabric Assigned Address specific functions */ /* @@ -855,84 +1280,13 @@ bfa_faa_validate_request(struct bfa_s *bfa) if ((ioc_type != BFA_IOC_TYPE_FC) || bfa_mfg_is_mezz(card_type)) return BFA_STATUS_FEATURE_NOT_SUPPORTED; } else { - if (!bfa_ioc_is_acq_addr(&bfa->ioc)) - return BFA_STATUS_IOC_NON_OP; + return BFA_STATUS_IOC_NON_OP; } return BFA_STATUS_OK; } bfa_status_t -bfa_faa_enable(struct bfa_s *bfa, bfa_cb_iocfc_t cbfn, void *cbarg) -{ - struct bfi_faa_en_dis_s faa_enable_req; - struct bfa_iocfc_s *iocfc = &bfa->iocfc; - bfa_status_t status; - - iocfc->faa_args.faa_cb.faa_cbfn = cbfn; - iocfc->faa_args.faa_cb.faa_cbarg = cbarg; - - status = bfa_faa_validate_request(bfa); - if (status != BFA_STATUS_OK) - return status; - - if (iocfc->faa_args.busy == BFA_TRUE) - return BFA_STATUS_DEVBUSY; - - if (iocfc->faa_args.faa_state == BFA_FAA_ENABLED) - return BFA_STATUS_FAA_ENABLED; - - if (bfa_fcport_is_trunk_enabled(bfa)) - return BFA_STATUS_ERROR_TRUNK_ENABLED; - - bfa_fcport_cfg_faa(bfa, BFA_FAA_ENABLED); - iocfc->faa_args.busy = BFA_TRUE; - - memset(&faa_enable_req, 0, sizeof(struct bfi_faa_en_dis_s)); - bfi_h2i_set(faa_enable_req.mh, BFI_MC_IOCFC, - BFI_IOCFC_H2I_FAA_ENABLE_REQ, bfa_fn_lpu(bfa)); - - bfa_ioc_mbox_send(&bfa->ioc, &faa_enable_req, - sizeof(struct bfi_faa_en_dis_s)); - - return BFA_STATUS_OK; -} - -bfa_status_t -bfa_faa_disable(struct bfa_s *bfa, bfa_cb_iocfc_t cbfn, - void *cbarg) -{ - struct bfi_faa_en_dis_s faa_disable_req; - struct bfa_iocfc_s *iocfc = &bfa->iocfc; - bfa_status_t status; - - iocfc->faa_args.faa_cb.faa_cbfn = cbfn; - iocfc->faa_args.faa_cb.faa_cbarg = cbarg; - - status = bfa_faa_validate_request(bfa); - if (status != BFA_STATUS_OK) - return status; - - if (iocfc->faa_args.busy == BFA_TRUE) - return BFA_STATUS_DEVBUSY; - - if (iocfc->faa_args.faa_state == BFA_FAA_DISABLED) - return BFA_STATUS_FAA_DISABLED; - - bfa_fcport_cfg_faa(bfa, BFA_FAA_DISABLED); - iocfc->faa_args.busy = BFA_TRUE; - - memset(&faa_disable_req, 0, sizeof(struct bfi_faa_en_dis_s)); - bfi_h2i_set(faa_disable_req.mh, BFI_MC_IOCFC, - BFI_IOCFC_H2I_FAA_DISABLE_REQ, bfa_fn_lpu(bfa)); - - bfa_ioc_mbox_send(&bfa->ioc, &faa_disable_req, - sizeof(struct bfi_faa_en_dis_s)); - - return BFA_STATUS_OK; -} - -bfa_status_t bfa_faa_query(struct bfa_s *bfa, struct bfa_faa_attr_s *attr, bfa_cb_iocfc_t cbfn, void *cbarg) { @@ -963,38 +1317,6 @@ bfa_faa_query(struct bfa_s *bfa, struct bfa_faa_attr_s *attr, } /* - * FAA enable response - */ -static void -bfa_faa_enable_reply(struct bfa_iocfc_s *iocfc, - struct bfi_faa_en_dis_rsp_s *rsp) -{ - void *cbarg = iocfc->faa_args.faa_cb.faa_cbarg; - bfa_status_t status = rsp->status; - - WARN_ON(!iocfc->faa_args.faa_cb.faa_cbfn); - - iocfc->faa_args.faa_cb.faa_cbfn(cbarg, status); - iocfc->faa_args.busy = BFA_FALSE; -} - -/* - * FAA disable response - */ -static void -bfa_faa_disable_reply(struct bfa_iocfc_s *iocfc, - struct bfi_faa_en_dis_rsp_s *rsp) -{ - void *cbarg = iocfc->faa_args.faa_cb.faa_cbarg; - bfa_status_t status = rsp->status; - - WARN_ON(!iocfc->faa_args.faa_cb.faa_cbfn); - - iocfc->faa_args.faa_cb.faa_cbfn(cbarg, status); - iocfc->faa_args.busy = BFA_FALSE; -} - -/* * FAA query response */ static void @@ -1023,25 +1345,10 @@ bfa_iocfc_enable_cbfn(void *bfa_arg, enum bfa_status status) { struct bfa_s *bfa = bfa_arg; - if (status == BFA_STATUS_FAA_ACQ_ADDR) { - bfa_cb_queue(bfa, &bfa->iocfc.init_hcb_qe, - bfa_iocfc_init_cb, bfa); - return; - } - - if (status != BFA_STATUS_OK) { - bfa_isr_disable(bfa); - if (bfa->iocfc.action == BFA_IOCFC_ACT_INIT) - bfa_cb_queue(bfa, &bfa->iocfc.init_hcb_qe, - bfa_iocfc_init_cb, bfa); - else if (bfa->iocfc.action == BFA_IOCFC_ACT_ENABLE) - bfa_cb_queue(bfa, &bfa->iocfc.en_hcb_qe, - bfa_iocfc_enable_cb, bfa); - return; - } - - bfa_iocfc_send_cfg(bfa); - bfa_dconf_modinit(bfa); + if (status == BFA_STATUS_OK) + bfa_fsm_send_event(&bfa->iocfc, IOCFC_E_IOC_ENABLED); + else + bfa_fsm_send_event(&bfa->iocfc, IOCFC_E_IOC_FAILED); } /* @@ -1052,17 +1359,7 @@ bfa_iocfc_disable_cbfn(void *bfa_arg) { struct bfa_s *bfa = bfa_arg; - bfa_isr_disable(bfa); - bfa_iocfc_disable_submod(bfa); - - if (bfa->iocfc.action == BFA_IOCFC_ACT_STOP) - bfa_cb_queue(bfa, &bfa->iocfc.stop_hcb_qe, bfa_iocfc_stop_cb, - bfa); - else { - WARN_ON(bfa->iocfc.action != BFA_IOCFC_ACT_DISABLE); - bfa_cb_queue(bfa, &bfa->iocfc.dis_hcb_qe, bfa_iocfc_disable_cb, - bfa); - } + bfa_fsm_send_event(&bfa->iocfc, IOCFC_E_IOC_DISABLED); } /* @@ -1074,13 +1371,7 @@ bfa_iocfc_hbfail_cbfn(void *bfa_arg) struct bfa_s *bfa = bfa_arg; bfa->queue_process = BFA_FALSE; - - bfa_isr_disable(bfa); - bfa_iocfc_disable_submod(bfa); - - if (bfa->iocfc.action == BFA_IOCFC_ACT_INIT) - bfa_cb_queue(bfa, &bfa->iocfc.init_hcb_qe, bfa_iocfc_init_cb, - bfa); + bfa_fsm_send_event(&bfa->iocfc, IOCFC_E_IOC_FAILED); } /* @@ -1095,7 +1386,6 @@ bfa_iocfc_reset_cbfn(void *bfa_arg) bfa_isr_enable(bfa); } - /* * Query IOC memory requirement information. */ @@ -1171,6 +1461,12 @@ bfa_iocfc_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg, INIT_LIST_HEAD(&bfa->comp_q); for (i = 0; i < BFI_IOC_MAX_CQS; i++) INIT_LIST_HEAD(&bfa->reqq_waitq[i]); + + bfa->iocfc.cb_reqd = BFA_FALSE; + bfa->iocfc.op_status = BFA_STATUS_OK; + bfa->iocfc.submod_enabled = BFA_FALSE; + + bfa_fsm_set_state(&bfa->iocfc, bfa_iocfc_sm_stopped); } /* @@ -1179,8 +1475,7 @@ bfa_iocfc_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg, void bfa_iocfc_init(struct bfa_s *bfa) { - bfa->iocfc.action = BFA_IOCFC_ACT_INIT; - bfa_ioc_enable(&bfa->ioc); + bfa_fsm_send_event(&bfa->iocfc, IOCFC_E_INIT); } /* @@ -1190,8 +1485,7 @@ bfa_iocfc_init(struct bfa_s *bfa) void bfa_iocfc_start(struct bfa_s *bfa) { - if (bfa->iocfc.cfgdone) - bfa_iocfc_start_submod(bfa); + bfa_fsm_send_event(&bfa->iocfc, IOCFC_E_START); } /* @@ -1201,12 +1495,8 @@ bfa_iocfc_start(struct bfa_s *bfa) void bfa_iocfc_stop(struct bfa_s *bfa) { - bfa->iocfc.action = BFA_IOCFC_ACT_STOP; - bfa->queue_process = BFA_FALSE; - bfa_dconf_modexit(bfa); - if (BFA_DCONF_MOD(bfa)->flashdone == BFA_TRUE) - bfa_ioc_disable(&bfa->ioc); + bfa_fsm_send_event(&bfa->iocfc, IOCFC_E_STOP); } void @@ -1226,13 +1516,9 @@ bfa_iocfc_isr(void *bfaarg, struct bfi_mbmsg_s *m) case BFI_IOCFC_I2H_UPDATEQ_RSP: iocfc->updateq_cbfn(iocfc->updateq_cbarg, BFA_STATUS_OK); break; - case BFI_IOCFC_I2H_FAA_ENABLE_RSP: - bfa_faa_enable_reply(iocfc, - (struct bfi_faa_en_dis_rsp_s *)msg); - break; - case BFI_IOCFC_I2H_FAA_DISABLE_RSP: - bfa_faa_disable_reply(iocfc, - (struct bfi_faa_en_dis_rsp_s *)msg); + case BFI_IOCFC_I2H_ADDR_MSG: + bfa_iocfc_process_faa_addr(bfa, + (struct bfi_faa_addr_msg_s *)msg); break; case BFI_IOCFC_I2H_FAA_QUERY_RSP: bfa_faa_query_reply(iocfc, (bfi_faa_query_rsp_t *)msg); @@ -1306,8 +1592,8 @@ bfa_iocfc_enable(struct bfa_s *bfa) { bfa_plog_str(bfa->plog, BFA_PL_MID_HAL, BFA_PL_EID_MISC, 0, "IOC Enable"); - bfa->iocfc.action = BFA_IOCFC_ACT_ENABLE; - bfa_ioc_enable(&bfa->ioc); + bfa->iocfc.cb_reqd = BFA_TRUE; + bfa_fsm_send_event(&bfa->iocfc, IOCFC_E_ENABLE); } void @@ -1315,17 +1601,16 @@ bfa_iocfc_disable(struct bfa_s *bfa) { bfa_plog_str(bfa->plog, BFA_PL_MID_HAL, BFA_PL_EID_MISC, 0, "IOC Disable"); - bfa->iocfc.action = BFA_IOCFC_ACT_DISABLE; bfa->queue_process = BFA_FALSE; - bfa_ioc_disable(&bfa->ioc); + bfa_fsm_send_event(&bfa->iocfc, IOCFC_E_DISABLE); } - bfa_boolean_t bfa_iocfc_is_operational(struct bfa_s *bfa) { - return bfa_ioc_is_operational(&bfa->ioc) && bfa->iocfc.cfgdone; + return bfa_ioc_is_operational(&bfa->ioc) && + bfa_fsm_cmp_state(&bfa->iocfc, bfa_iocfc_sm_operational); } /* @@ -1567,16 +1852,6 @@ bfa_comp_free(struct bfa_s *bfa, struct list_head *comp_q) } } -void -bfa_iocfc_cb_dconf_modinit(struct bfa_s *bfa, bfa_status_t status) -{ - if (bfa->iocfc.action == BFA_IOCFC_ACT_INIT) { - if (bfa->iocfc.cfgdone == BFA_TRUE) - bfa_cb_queue(bfa, &bfa->iocfc.init_hcb_qe, - bfa_iocfc_init_cb, bfa); - } -} - /* * Return the list of PCI vendor/device id lists supported by this * BFA instance. diff --git a/drivers/scsi/bfa/bfa_defs_svc.h b/drivers/scsi/bfa/bfa_defs_svc.h index cb07c628b2f1..36756ce0e58f 100644 --- a/drivers/scsi/bfa/bfa_defs_svc.h +++ b/drivers/scsi/bfa/bfa_defs_svc.h @@ -52,7 +52,7 @@ struct bfa_iocfc_fwcfg_s { u16 num_uf_bufs; /* unsolicited recv buffers */ u8 num_cqs; u8 fw_tick_res; /* FW clock resolution in ms */ - u8 rsvd[2]; + u8 rsvd[6]; }; #pragma pack() diff --git a/drivers/scsi/bfa/bfa_fcs_lport.c b/drivers/scsi/bfa/bfa_fcs_lport.c index d4f951fe753e..5d2a1307e5ce 100644 --- a/drivers/scsi/bfa/bfa_fcs_lport.c +++ b/drivers/scsi/bfa/bfa_fcs_lport.c @@ -5717,6 +5717,8 @@ bfa_fcs_vport_free(struct bfa_fcs_vport_s *vport) if (vport_drv->comp_del) complete(vport_drv->comp_del); + else + kfree(vport_drv); bfa_lps_delete(vport->lps); } diff --git a/drivers/scsi/bfa/bfa_fcs_rport.c b/drivers/scsi/bfa/bfa_fcs_rport.c index 52628d5d3c9b..fe0463a1db04 100644 --- a/drivers/scsi/bfa/bfa_fcs_rport.c +++ b/drivers/scsi/bfa/bfa_fcs_rport.c @@ -2169,7 +2169,10 @@ bfa_fcs_rport_update(struct bfa_fcs_rport_s *rport, struct fc_logi_s *plogi) * - MAX receive frame size */ rport->cisc = plogi->csp.cisc; - rport->maxfrsize = be16_to_cpu(plogi->class3.rxsz); + if (be16_to_cpu(plogi->class3.rxsz) < be16_to_cpu(plogi->csp.rxsz)) + rport->maxfrsize = be16_to_cpu(plogi->class3.rxsz); + else + rport->maxfrsize = be16_to_cpu(plogi->csp.rxsz); bfa_trc(port->fcs, be16_to_cpu(plogi->csp.bbcred)); bfa_trc(port->fcs, port->fabric->bb_credit); diff --git a/drivers/scsi/bfa/bfa_ioc.c b/drivers/scsi/bfa/bfa_ioc.c index eca7ab78085b..14e6284e48e4 100644 --- a/drivers/scsi/bfa/bfa_ioc.c +++ b/drivers/scsi/bfa/bfa_ioc.c @@ -88,7 +88,6 @@ static void bfa_ioc_hb_monitor(struct bfa_ioc_s *ioc); static void bfa_ioc_mbox_poll(struct bfa_ioc_s *ioc); static void bfa_ioc_mbox_flush(struct bfa_ioc_s *ioc); static void bfa_ioc_recover(struct bfa_ioc_s *ioc); -static void bfa_ioc_check_attr_wwns(struct bfa_ioc_s *ioc); static void bfa_ioc_event_notify(struct bfa_ioc_s *ioc , enum bfa_ioc_event_e event); static void bfa_ioc_disable_comp(struct bfa_ioc_s *ioc); @@ -97,7 +96,6 @@ static void bfa_ioc_debug_save_ftrc(struct bfa_ioc_s *ioc); static void bfa_ioc_fail_notify(struct bfa_ioc_s *ioc); static void bfa_ioc_pf_fwmismatch(struct bfa_ioc_s *ioc); - /* * IOC state machine definitions/declarations */ @@ -114,7 +112,6 @@ enum ioc_event { IOC_E_HWERROR = 10, /* hardware error interrupt */ IOC_E_TIMEOUT = 11, /* timeout */ IOC_E_HWFAILED = 12, /* PCI mapping failure notice */ - IOC_E_FWRSP_ACQ_ADDR = 13, /* Acquiring address */ }; bfa_fsm_state_decl(bfa_ioc, uninit, struct bfa_ioc_s, enum ioc_event); @@ -127,7 +124,6 @@ bfa_fsm_state_decl(bfa_ioc, fail, struct bfa_ioc_s, enum ioc_event); bfa_fsm_state_decl(bfa_ioc, disabling, struct bfa_ioc_s, enum ioc_event); bfa_fsm_state_decl(bfa_ioc, disabled, struct bfa_ioc_s, enum ioc_event); bfa_fsm_state_decl(bfa_ioc, hwfail, struct bfa_ioc_s, enum ioc_event); -bfa_fsm_state_decl(bfa_ioc, acq_addr, struct bfa_ioc_s, enum ioc_event); static struct bfa_sm_table_s ioc_sm_table[] = { {BFA_SM(bfa_ioc_sm_uninit), BFA_IOC_UNINIT}, @@ -140,7 +136,6 @@ static struct bfa_sm_table_s ioc_sm_table[] = { {BFA_SM(bfa_ioc_sm_disabling), BFA_IOC_DISABLING}, {BFA_SM(bfa_ioc_sm_disabled), BFA_IOC_DISABLED}, {BFA_SM(bfa_ioc_sm_hwfail), BFA_IOC_HWFAIL}, - {BFA_SM(bfa_ioc_sm_acq_addr), BFA_IOC_ACQ_ADDR}, }; /* @@ -371,17 +366,9 @@ bfa_ioc_sm_getattr(struct bfa_ioc_s *ioc, enum ioc_event event) switch (event) { case IOC_E_FWRSP_GETATTR: bfa_ioc_timer_stop(ioc); - bfa_ioc_check_attr_wwns(ioc); - bfa_ioc_hb_monitor(ioc); bfa_fsm_set_state(ioc, bfa_ioc_sm_op); break; - case IOC_E_FWRSP_ACQ_ADDR: - bfa_ioc_timer_stop(ioc); - bfa_ioc_hb_monitor(ioc); - bfa_fsm_set_state(ioc, bfa_ioc_sm_acq_addr); - break; - case IOC_E_PFFAILED: case IOC_E_HWERROR: bfa_ioc_timer_stop(ioc); @@ -406,51 +393,6 @@ bfa_ioc_sm_getattr(struct bfa_ioc_s *ioc, enum ioc_event event) } } -/* - * Acquiring address from fabric (entry function) - */ -static void -bfa_ioc_sm_acq_addr_entry(struct bfa_ioc_s *ioc) -{ -} - -/* - * Acquiring address from the fabric - */ -static void -bfa_ioc_sm_acq_addr(struct bfa_ioc_s *ioc, enum ioc_event event) -{ - bfa_trc(ioc, event); - - switch (event) { - case IOC_E_FWRSP_GETATTR: - bfa_ioc_check_attr_wwns(ioc); - bfa_fsm_set_state(ioc, bfa_ioc_sm_op); - break; - - case IOC_E_PFFAILED: - case IOC_E_HWERROR: - bfa_hb_timer_stop(ioc); - case IOC_E_HBFAIL: - ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE); - bfa_fsm_set_state(ioc, bfa_ioc_sm_fail); - if (event != IOC_E_PFFAILED) - bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_GETATTRFAIL); - break; - - case IOC_E_DISABLE: - bfa_hb_timer_stop(ioc); - bfa_fsm_set_state(ioc, bfa_ioc_sm_disabling); - break; - - case IOC_E_ENABLE: - break; - - default: - bfa_sm_fault(ioc, event); - } -} - static void bfa_ioc_sm_op_entry(struct bfa_ioc_s *ioc) { @@ -458,6 +400,7 @@ bfa_ioc_sm_op_entry(struct bfa_ioc_s *ioc) ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_OK); bfa_ioc_event_notify(ioc, BFA_IOC_E_ENABLED); + bfa_ioc_hb_monitor(ioc); BFA_LOG(KERN_INFO, bfad, bfa_log_level, "IOC enabled\n"); bfa_ioc_aen_post(ioc, BFA_IOC_AEN_ENABLE); } @@ -738,26 +681,60 @@ static void bfa_iocpf_sm_fwcheck_entry(struct bfa_iocpf_s *iocpf) { struct bfi_ioc_image_hdr_s fwhdr; - u32 fwstate = readl(iocpf->ioc->ioc_regs.ioc_fwstate); + u32 r32, fwstate, pgnum, pgoff, loff = 0; + int i; + + /* + * Spin on init semaphore to serialize. + */ + r32 = readl(iocpf->ioc->ioc_regs.ioc_init_sem_reg); + while (r32 & 0x1) { + udelay(20); + r32 = readl(iocpf->ioc->ioc_regs.ioc_init_sem_reg); + } /* h/w sem init */ - if (fwstate == BFI_IOC_UNINIT) + fwstate = readl(iocpf->ioc->ioc_regs.ioc_fwstate); + if (fwstate == BFI_IOC_UNINIT) { + writel(1, iocpf->ioc->ioc_regs.ioc_init_sem_reg); goto sem_get; + } bfa_ioc_fwver_get(iocpf->ioc, &fwhdr); - if (swab32(fwhdr.exec) == BFI_FWBOOT_TYPE_NORMAL) + if (swab32(fwhdr.exec) == BFI_FWBOOT_TYPE_NORMAL) { + writel(1, iocpf->ioc->ioc_regs.ioc_init_sem_reg); goto sem_get; + } + + /* + * Clear fwver hdr + */ + pgnum = PSS_SMEM_PGNUM(iocpf->ioc->ioc_regs.smem_pg0, loff); + pgoff = PSS_SMEM_PGOFF(loff); + writel(pgnum, iocpf->ioc->ioc_regs.host_page_num_fn); + + for (i = 0; i < sizeof(struct bfi_ioc_image_hdr_s) / sizeof(u32); i++) { + bfa_mem_write(iocpf->ioc->ioc_regs.smem_page_start, loff, 0); + loff += sizeof(u32); + } bfa_trc(iocpf->ioc, fwstate); - bfa_trc(iocpf->ioc, fwhdr.exec); + bfa_trc(iocpf->ioc, swab32(fwhdr.exec)); writel(BFI_IOC_UNINIT, iocpf->ioc->ioc_regs.ioc_fwstate); + writel(BFI_IOC_UNINIT, iocpf->ioc->ioc_regs.alt_ioc_fwstate); /* - * Try to lock and then unlock the semaphore. + * Unlock the hw semaphore. Should be here only once per boot. */ readl(iocpf->ioc->ioc_regs.ioc_sem_reg); writel(1, iocpf->ioc->ioc_regs.ioc_sem_reg); + + /* + * unlock init semaphore. + */ + writel(1, iocpf->ioc->ioc_regs.ioc_init_sem_reg); + sem_get: bfa_ioc_hw_sem_get(iocpf->ioc); } @@ -1707,11 +1684,6 @@ bfa_ioc_download_fw(struct bfa_ioc_s *ioc, u32 boot_type, u32 i; u32 asicmode; - /* - * Initialize LMEM first before code download - */ - bfa_ioc_lmem_init(ioc); - bfa_trc(ioc, bfa_cb_image_get_size(bfa_ioc_asic_gen(ioc))); fwimg = bfa_cb_image_get_chunk(bfa_ioc_asic_gen(ioc), chunkno); @@ -1999,6 +1971,12 @@ bfa_ioc_pll_init(struct bfa_ioc_s *ioc) bfa_ioc_pll_init_asic(ioc); ioc->pllinit = BFA_TRUE; + + /* + * Initialize LMEM + */ + bfa_ioc_lmem_init(ioc); + /* * release semaphore. */ @@ -2122,10 +2100,6 @@ bfa_ioc_isr(struct bfa_ioc_s *ioc, struct bfi_mbmsg_s *m) bfa_ioc_getattr_reply(ioc); break; - case BFI_IOC_I2H_ACQ_ADDR_REPLY: - bfa_fsm_send_event(ioc, IOC_E_FWRSP_ACQ_ADDR); - break; - default: bfa_trc(ioc, msg->mh.msg_id); WARN_ON(1); @@ -2416,15 +2390,6 @@ bfa_ioc_is_disabled(struct bfa_ioc_s *ioc) } /* - * Return TRUE if IOC is in acquiring address state - */ -bfa_boolean_t -bfa_ioc_is_acq_addr(struct bfa_ioc_s *ioc) -{ - return bfa_fsm_cmp_state(ioc, bfa_ioc_sm_acq_addr); -} - -/* * return true if IOC firmware is different. */ bfa_boolean_t @@ -2916,17 +2881,6 @@ bfa_ioc_recover(struct bfa_ioc_s *ioc) bfa_fsm_send_event(ioc, IOC_E_HBFAIL); } -static void -bfa_ioc_check_attr_wwns(struct bfa_ioc_s *ioc) -{ - if (bfa_ioc_get_type(ioc) == BFA_IOC_TYPE_LL) - return; - if (ioc->attr->nwwn == 0) - bfa_ioc_aen_post(ioc, BFA_IOC_AEN_INVALID_NWWN); - if (ioc->attr->pwwn == 0) - bfa_ioc_aen_post(ioc, BFA_IOC_AEN_INVALID_PWWN); -} - /* * BFA IOC PF private functions */ @@ -4495,7 +4449,7 @@ bfa_flash_read_part(struct bfa_flash_s *flash, enum bfa_flash_part_type type, */ #define BFA_DIAG_MEMTEST_TOV 50000 /* memtest timeout in msec */ -#define BFA_DIAG_FWPING_TOV 1000 /* msec */ +#define CT2_BFA_DIAG_MEMTEST_TOV (9*30*1000) /* 4.5 min */ /* IOC event handler */ static void @@ -4772,7 +4726,7 @@ diag_ledtest_send(struct bfa_diag_s *diag, struct bfa_diag_ledtest_s *ledtest) } static void -diag_ledtest_comp(struct bfa_diag_s *diag, struct bfi_diag_ledtest_rsp_s * msg) +diag_ledtest_comp(struct bfa_diag_s *diag, struct bfi_diag_ledtest_rsp_s *msg) { bfa_trc(diag, diag->ledtest.lock); diag->ledtest.lock = BFA_FALSE; @@ -4850,6 +4804,8 @@ bfa_diag_memtest(struct bfa_diag_s *diag, struct bfa_diag_memtest_s *memtest, u32 pattern, struct bfa_diag_memtest_result *result, bfa_cb_diag_t cbfn, void *cbarg) { + u32 memtest_tov; + bfa_trc(diag, pattern); if (!bfa_ioc_adapter_is_disabled(diag->ioc)) @@ -4869,8 +4825,10 @@ bfa_diag_memtest(struct bfa_diag_s *diag, struct bfa_diag_memtest_s *memtest, /* download memtest code and take LPU0 out of reset */ bfa_ioc_boot(diag->ioc, BFI_FWBOOT_TYPE_MEMTEST, BFI_FWBOOT_ENV_OS); + memtest_tov = (bfa_ioc_asic_gen(diag->ioc) == BFI_ASIC_GEN_CT2) ? + CT2_BFA_DIAG_MEMTEST_TOV : BFA_DIAG_MEMTEST_TOV; bfa_timer_begin(diag->ioc->timer_mod, &diag->timer, - bfa_diag_memtest_done, diag, BFA_DIAG_MEMTEST_TOV); + bfa_diag_memtest_done, diag, memtest_tov); diag->timer_active = 1; return BFA_STATUS_OK; } @@ -5641,24 +5599,27 @@ bfa_dconf_sm_uninit(struct bfa_dconf_mod_s *dconf, enum bfa_dconf_event event) case BFA_DCONF_SM_INIT: if (dconf->min_cfg) { bfa_trc(dconf->bfa, dconf->min_cfg); + bfa_fsm_send_event(&dconf->bfa->iocfc, + IOCFC_E_DCONF_DONE); return; } bfa_sm_set_state(dconf, bfa_dconf_sm_flash_read); - dconf->flashdone = BFA_FALSE; - bfa_trc(dconf->bfa, dconf->flashdone); + bfa_timer_start(dconf->bfa, &dconf->timer, + bfa_dconf_timer, dconf, BFA_DCONF_UPDATE_TOV); bfa_status = bfa_flash_read_part(BFA_FLASH(dconf->bfa), BFA_FLASH_PART_DRV, dconf->instance, dconf->dconf, sizeof(struct bfa_dconf_s), 0, bfa_dconf_init_cb, dconf->bfa); if (bfa_status != BFA_STATUS_OK) { + bfa_timer_stop(&dconf->timer); bfa_dconf_init_cb(dconf->bfa, BFA_STATUS_FAILED); bfa_sm_set_state(dconf, bfa_dconf_sm_uninit); return; } break; case BFA_DCONF_SM_EXIT: - dconf->flashdone = BFA_TRUE; + bfa_fsm_send_event(&dconf->bfa->iocfc, IOCFC_E_DCONF_DONE); case BFA_DCONF_SM_IOCDISABLE: case BFA_DCONF_SM_WR: case BFA_DCONF_SM_FLASH_COMP: @@ -5679,15 +5640,20 @@ bfa_dconf_sm_flash_read(struct bfa_dconf_mod_s *dconf, switch (event) { case BFA_DCONF_SM_FLASH_COMP: + bfa_timer_stop(&dconf->timer); bfa_sm_set_state(dconf, bfa_dconf_sm_ready); break; case BFA_DCONF_SM_TIMEOUT: bfa_sm_set_state(dconf, bfa_dconf_sm_ready); + bfa_fsm_send_event(&dconf->bfa->iocfc, IOCFC_E_IOC_FAILED); break; case BFA_DCONF_SM_EXIT: - dconf->flashdone = BFA_TRUE; - bfa_trc(dconf->bfa, dconf->flashdone); + bfa_timer_stop(&dconf->timer); + bfa_sm_set_state(dconf, bfa_dconf_sm_uninit); + bfa_fsm_send_event(&dconf->bfa->iocfc, IOCFC_E_DCONF_DONE); + break; case BFA_DCONF_SM_IOCDISABLE: + bfa_timer_stop(&dconf->timer); bfa_sm_set_state(dconf, bfa_dconf_sm_uninit); break; default: @@ -5710,9 +5676,8 @@ bfa_dconf_sm_ready(struct bfa_dconf_mod_s *dconf, enum bfa_dconf_event event) bfa_sm_set_state(dconf, bfa_dconf_sm_dirty); break; case BFA_DCONF_SM_EXIT: - dconf->flashdone = BFA_TRUE; - bfa_trc(dconf->bfa, dconf->flashdone); bfa_sm_set_state(dconf, bfa_dconf_sm_uninit); + bfa_fsm_send_event(&dconf->bfa->iocfc, IOCFC_E_DCONF_DONE); break; case BFA_DCONF_SM_INIT: case BFA_DCONF_SM_IOCDISABLE: @@ -5774,9 +5739,7 @@ bfa_dconf_sm_final_sync(struct bfa_dconf_mod_s *dconf, bfa_timer_stop(&dconf->timer); case BFA_DCONF_SM_TIMEOUT: bfa_sm_set_state(dconf, bfa_dconf_sm_uninit); - dconf->flashdone = BFA_TRUE; - bfa_trc(dconf->bfa, dconf->flashdone); - bfa_ioc_disable(&dconf->bfa->ioc); + bfa_fsm_send_event(&dconf->bfa->iocfc, IOCFC_E_DCONF_DONE); break; default: bfa_sm_fault(dconf->bfa, event); @@ -5823,8 +5786,8 @@ bfa_dconf_sm_iocdown_dirty(struct bfa_dconf_mod_s *dconf, bfa_sm_set_state(dconf, bfa_dconf_sm_dirty); break; case BFA_DCONF_SM_EXIT: - dconf->flashdone = BFA_TRUE; bfa_sm_set_state(dconf, bfa_dconf_sm_uninit); + bfa_fsm_send_event(&dconf->bfa->iocfc, IOCFC_E_DCONF_DONE); break; case BFA_DCONF_SM_IOCDISABLE: break; @@ -5865,11 +5828,6 @@ bfa_dconf_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg, if (cfg->drvcfg.min_cfg) { bfa_mem_kva_curp(dconf) += sizeof(struct bfa_dconf_hdr_s); dconf->min_cfg = BFA_TRUE; - /* - * Set the flashdone flag to TRUE explicitly as no flash - * write will happen in min_cfg mode. - */ - dconf->flashdone = BFA_TRUE; } else { dconf->min_cfg = BFA_FALSE; bfa_mem_kva_curp(dconf) += sizeof(struct bfa_dconf_s); @@ -5885,9 +5843,7 @@ bfa_dconf_init_cb(void *arg, bfa_status_t status) struct bfa_s *bfa = arg; struct bfa_dconf_mod_s *dconf = BFA_DCONF_MOD(bfa); - dconf->flashdone = BFA_TRUE; - bfa_trc(bfa, dconf->flashdone); - bfa_iocfc_cb_dconf_modinit(bfa, status); + bfa_sm_send_event(dconf, BFA_DCONF_SM_FLASH_COMP); if (status == BFA_STATUS_OK) { bfa_dconf_read_data_valid(bfa) = BFA_TRUE; if (dconf->dconf->hdr.signature != BFI_DCONF_SIGNATURE) @@ -5895,7 +5851,7 @@ bfa_dconf_init_cb(void *arg, bfa_status_t status) if (dconf->dconf->hdr.version != BFI_DCONF_VERSION) dconf->dconf->hdr.version = BFI_DCONF_VERSION; } - bfa_sm_send_event(dconf, BFA_DCONF_SM_FLASH_COMP); + bfa_fsm_send_event(&bfa->iocfc, IOCFC_E_DCONF_DONE); } void @@ -5977,7 +5933,5 @@ void bfa_dconf_modexit(struct bfa_s *bfa) { struct bfa_dconf_mod_s *dconf = BFA_DCONF_MOD(bfa); - BFA_DCONF_MOD(bfa)->flashdone = BFA_FALSE; - bfa_trc(bfa, BFA_DCONF_MOD(bfa)->flashdone); bfa_sm_send_event(dconf, BFA_DCONF_SM_EXIT); } diff --git a/drivers/scsi/bfa/bfa_ioc.h b/drivers/scsi/bfa/bfa_ioc.h index 546d46b37101..1a99d4b5b50f 100644 --- a/drivers/scsi/bfa/bfa_ioc.h +++ b/drivers/scsi/bfa/bfa_ioc.h @@ -373,6 +373,22 @@ struct bfa_cb_qe_s { }; /* + * IOCFC state machine definitions/declarations + */ +enum iocfc_event { + IOCFC_E_INIT = 1, /* IOCFC init request */ + IOCFC_E_START = 2, /* IOCFC mod start request */ + IOCFC_E_STOP = 3, /* IOCFC stop request */ + IOCFC_E_ENABLE = 4, /* IOCFC enable request */ + IOCFC_E_DISABLE = 5, /* IOCFC disable request */ + IOCFC_E_IOC_ENABLED = 6, /* IOC enabled message */ + IOCFC_E_IOC_DISABLED = 7, /* IOC disabled message */ + IOCFC_E_IOC_FAILED = 8, /* failure notice by IOC sm */ + IOCFC_E_DCONF_DONE = 9, /* dconf read/write done */ + IOCFC_E_CFG_DONE = 10, /* IOCFC config complete */ +}; + +/* * ASIC block configurtion related */ @@ -706,7 +722,6 @@ struct bfa_dconf_s { struct bfa_dconf_mod_s { bfa_sm_t sm; u8 instance; - bfa_boolean_t flashdone; bfa_boolean_t read_data_valid; bfa_boolean_t min_cfg; struct bfa_timer_s timer; diff --git a/drivers/scsi/bfa/bfa_ioc_ct.c b/drivers/scsi/bfa/bfa_ioc_ct.c index d1b8f0caaa79..2eb0c6a2938d 100644 --- a/drivers/scsi/bfa/bfa_ioc_ct.c +++ b/drivers/scsi/bfa/bfa_ioc_ct.c @@ -786,17 +786,73 @@ bfa_ioc_ct2_mac_reset(void __iomem *rb) } #define CT2_NFC_MAX_DELAY 1000 +#define CT2_NFC_VER_VALID 0x143 +#define BFA_IOC_PLL_POLL 1000000 + +static bfa_boolean_t +bfa_ioc_ct2_nfc_halted(void __iomem *rb) +{ + u32 r32; + + r32 = readl(rb + CT2_NFC_CSR_SET_REG); + if (r32 & __NFC_CONTROLLER_HALTED) + return BFA_TRUE; + + return BFA_FALSE; +} + +static void +bfa_ioc_ct2_nfc_resume(void __iomem *rb) +{ + u32 r32; + int i; + + writel(__HALT_NFC_CONTROLLER, rb + CT2_NFC_CSR_CLR_REG); + for (i = 0; i < CT2_NFC_MAX_DELAY; i++) { + r32 = readl(rb + CT2_NFC_CSR_SET_REG); + if (!(r32 & __NFC_CONTROLLER_HALTED)) + return; + udelay(1000); + } + WARN_ON(1); +} + bfa_status_t bfa_ioc_ct2_pll_init(void __iomem *rb, enum bfi_asic_mode mode) { - u32 wgn, r32; - int i; + u32 wgn, r32, nfc_ver, i; - /* - * Initialize PLL if not already done by NFC - */ wgn = readl(rb + CT2_WGN_STATUS); - if (!(wgn & __GLBL_PF_VF_CFG_RDY)) { + nfc_ver = readl(rb + CT2_RSC_GPR15_REG); + + if ((wgn == (__A2T_AHB_LOAD | __WGN_READY)) && + (nfc_ver >= CT2_NFC_VER_VALID)) { + if (bfa_ioc_ct2_nfc_halted(rb)) + bfa_ioc_ct2_nfc_resume(rb); + + writel(__RESET_AND_START_SCLK_LCLK_PLLS, + rb + CT2_CSI_FW_CTL_SET_REG); + + for (i = 0; i < BFA_IOC_PLL_POLL; i++) { + r32 = readl(rb + CT2_APP_PLL_LCLK_CTL_REG); + if (r32 & __RESET_AND_START_SCLK_LCLK_PLLS) + break; + } + + WARN_ON(!(r32 & __RESET_AND_START_SCLK_LCLK_PLLS)); + + for (i = 0; i < BFA_IOC_PLL_POLL; i++) { + r32 = readl(rb + CT2_APP_PLL_LCLK_CTL_REG); + if (!(r32 & __RESET_AND_START_SCLK_LCLK_PLLS)) + break; + } + + WARN_ON(r32 & __RESET_AND_START_SCLK_LCLK_PLLS); + udelay(1000); + + r32 = readl(rb + CT2_CSI_FW_CTL_REG); + WARN_ON(r32 & __RESET_AND_START_SCLK_LCLK_PLLS); + } else { writel(__HALT_NFC_CONTROLLER, rb + CT2_NFC_CSR_SET_REG); for (i = 0; i < CT2_NFC_MAX_DELAY; i++) { r32 = readl(rb + CT2_NFC_CSR_SET_REG); @@ -804,57 +860,62 @@ bfa_ioc_ct2_pll_init(void __iomem *rb, enum bfi_asic_mode mode) break; udelay(1000); } - } - /* - * Mask the interrupts and clear any - * pending interrupts. - */ - writel(1, (rb + CT2_LPU0_HOSTFN_MBOX0_MSK)); - writel(1, (rb + CT2_LPU1_HOSTFN_MBOX0_MSK)); - - r32 = readl((rb + CT2_LPU0_HOSTFN_CMD_STAT)); - if (r32 == 1) { - writel(1, (rb + CT2_LPU0_HOSTFN_CMD_STAT)); - readl((rb + CT2_LPU0_HOSTFN_CMD_STAT)); + bfa_ioc_ct2_mac_reset(rb); + bfa_ioc_ct2_sclk_init(rb); + bfa_ioc_ct2_lclk_init(rb); + + /* + * release soft reset on s_clk & l_clk + */ + r32 = readl(rb + CT2_APP_PLL_SCLK_CTL_REG); + writel(r32 & ~__APP_PLL_SCLK_LOGIC_SOFT_RESET, + (rb + CT2_APP_PLL_SCLK_CTL_REG)); + + /* + * release soft reset on s_clk & l_clk + */ + r32 = readl(rb + CT2_APP_PLL_LCLK_CTL_REG); + writel(r32 & ~__APP_PLL_LCLK_LOGIC_SOFT_RESET, + (rb + CT2_APP_PLL_LCLK_CTL_REG)); } - r32 = readl((rb + CT2_LPU1_HOSTFN_CMD_STAT)); - if (r32 == 1) { - writel(1, (rb + CT2_LPU1_HOSTFN_CMD_STAT)); - readl((rb + CT2_LPU1_HOSTFN_CMD_STAT)); - } - - bfa_ioc_ct2_mac_reset(rb); - bfa_ioc_ct2_sclk_init(rb); - bfa_ioc_ct2_lclk_init(rb); - - /* - * release soft reset on s_clk & l_clk - */ - r32 = readl((rb + CT2_APP_PLL_SCLK_CTL_REG)); - writel(r32 & ~__APP_PLL_SCLK_LOGIC_SOFT_RESET, - (rb + CT2_APP_PLL_SCLK_CTL_REG)); - - /* - * release soft reset on s_clk & l_clk - */ - r32 = readl((rb + CT2_APP_PLL_LCLK_CTL_REG)); - writel(r32 & ~__APP_PLL_LCLK_LOGIC_SOFT_RESET, - (rb + CT2_APP_PLL_LCLK_CTL_REG)); /* * Announce flash device presence, if flash was corrupted. */ if (wgn == (__WGN_READY | __GLBL_PF_VF_CFG_RDY)) { - r32 = readl((rb + PSS_GPIO_OUT_REG)); + r32 = readl(rb + PSS_GPIO_OUT_REG); writel(r32 & ~1, (rb + PSS_GPIO_OUT_REG)); - r32 = readl((rb + PSS_GPIO_OE_REG)); + r32 = readl(rb + PSS_GPIO_OE_REG); writel(r32 | 1, (rb + PSS_GPIO_OE_REG)); } + /* + * Mask the interrupts and clear any + * pending interrupts. + */ + writel(1, (rb + CT2_LPU0_HOSTFN_MBOX0_MSK)); + writel(1, (rb + CT2_LPU1_HOSTFN_MBOX0_MSK)); + + /* For first time initialization, no need to clear interrupts */ + r32 = readl(rb + HOST_SEM5_REG); + if (r32 & 0x1) { + r32 = readl(rb + CT2_LPU0_HOSTFN_CMD_STAT); + if (r32 == 1) { + writel(1, rb + CT2_LPU0_HOSTFN_CMD_STAT); + readl((rb + CT2_LPU0_HOSTFN_CMD_STAT)); + } + r32 = readl(rb + CT2_LPU1_HOSTFN_CMD_STAT); + if (r32 == 1) { + writel(1, rb + CT2_LPU1_HOSTFN_CMD_STAT); + readl(rb + CT2_LPU1_HOSTFN_CMD_STAT); + } + } + bfa_ioc_ct2_mem_init(rb); - writel(BFI_IOC_UNINIT, (rb + CT2_BFA_IOC0_STATE_REG)); - writel(BFI_IOC_UNINIT, (rb + CT2_BFA_IOC1_STATE_REG)); + writel(BFI_IOC_UNINIT, rb + CT2_BFA_IOC0_STATE_REG); + writel(BFI_IOC_UNINIT, rb + CT2_BFA_IOC1_STATE_REG); + return BFA_STATUS_OK; } diff --git a/drivers/scsi/bfa/bfa_svc.c b/drivers/scsi/bfa/bfa_svc.c index aa8a0eaf91f9..2e856e6710f7 100644 --- a/drivers/scsi/bfa/bfa_svc.c +++ b/drivers/scsi/bfa/bfa_svc.c @@ -1280,6 +1280,7 @@ bfa_lps_sm_loginwait(struct bfa_lps_s *lps, enum bfa_lps_event event) switch (event) { case BFA_LPS_SM_RESUME: bfa_sm_set_state(lps, bfa_lps_sm_login); + bfa_lps_send_login(lps); break; case BFA_LPS_SM_OFFLINE: @@ -1578,7 +1579,7 @@ bfa_lps_login_rsp(struct bfa_s *bfa, struct bfi_lps_login_rsp_s *rsp) break; case BFA_STATUS_VPORT_MAX: - if (!rsp->ext_status) + if (rsp->ext_status) bfa_lps_no_res(lps, rsp->ext_status); break; @@ -3084,33 +3085,6 @@ bfa_fcport_set_wwns(struct bfa_fcport_s *fcport) } static void -bfa_fcport_send_txcredit(void *port_cbarg) -{ - - struct bfa_fcport_s *fcport = port_cbarg; - struct bfi_fcport_set_svc_params_req_s *m; - - /* - * check for room in queue to send request now - */ - m = bfa_reqq_next(fcport->bfa, BFA_REQQ_PORT); - if (!m) { - bfa_trc(fcport->bfa, fcport->cfg.tx_bbcredit); - return; - } - - bfi_h2i_set(m->mh, BFI_MC_FCPORT, BFI_FCPORT_H2I_SET_SVC_PARAMS_REQ, - bfa_fn_lpu(fcport->bfa)); - m->tx_bbcredit = cpu_to_be16((u16)fcport->cfg.tx_bbcredit); - m->bb_scn = fcport->cfg.bb_scn; - - /* - * queue I/O message to firmware - */ - bfa_reqq_produce(fcport->bfa, BFA_REQQ_PORT, m->mh); -} - -static void bfa_fcport_qos_stats_swap(struct bfa_qos_stats_s *d, struct bfa_qos_stats_s *s) { @@ -3602,26 +3576,24 @@ bfa_fcport_cfg_speed(struct bfa_s *bfa, enum bfa_port_speed speed) return BFA_STATUS_UNSUPP_SPEED; } - /* For Mezz card, port speed entered needs to be checked */ - if (bfa_mfg_is_mezz(fcport->bfa->ioc.attr->card_type)) { - if (bfa_ioc_get_type(&fcport->bfa->ioc) == BFA_IOC_TYPE_FC) { - /* For CT2, 1G is not supported */ - if ((speed == BFA_PORT_SPEED_1GBPS) && - (bfa_asic_id_ct2(bfa->ioc.pcidev.device_id))) - return BFA_STATUS_UNSUPP_SPEED; + /* Port speed entered needs to be checked */ + if (bfa_ioc_get_type(&fcport->bfa->ioc) == BFA_IOC_TYPE_FC) { + /* For CT2, 1G is not supported */ + if ((speed == BFA_PORT_SPEED_1GBPS) && + (bfa_asic_id_ct2(bfa->ioc.pcidev.device_id))) + return BFA_STATUS_UNSUPP_SPEED; - /* Already checked for Auto Speed and Max Speed supp */ - if (!(speed == BFA_PORT_SPEED_1GBPS || - speed == BFA_PORT_SPEED_2GBPS || - speed == BFA_PORT_SPEED_4GBPS || - speed == BFA_PORT_SPEED_8GBPS || - speed == BFA_PORT_SPEED_16GBPS || - speed == BFA_PORT_SPEED_AUTO)) - return BFA_STATUS_UNSUPP_SPEED; - } else { - if (speed != BFA_PORT_SPEED_10GBPS) - return BFA_STATUS_UNSUPP_SPEED; - } + /* Already checked for Auto Speed and Max Speed supp */ + if (!(speed == BFA_PORT_SPEED_1GBPS || + speed == BFA_PORT_SPEED_2GBPS || + speed == BFA_PORT_SPEED_4GBPS || + speed == BFA_PORT_SPEED_8GBPS || + speed == BFA_PORT_SPEED_16GBPS || + speed == BFA_PORT_SPEED_AUTO)) + return BFA_STATUS_UNSUPP_SPEED; + } else { + if (speed != BFA_PORT_SPEED_10GBPS) + return BFA_STATUS_UNSUPP_SPEED; } fcport->cfg.speed = speed; @@ -3765,7 +3737,6 @@ bfa_fcport_set_tx_bbcredit(struct bfa_s *bfa, u16 tx_bbcredit, u8 bb_scn) fcport->cfg.bb_scn = bb_scn; if (bb_scn) fcport->bbsc_op_state = BFA_TRUE; - bfa_fcport_send_txcredit(fcport); } /* @@ -3825,8 +3796,6 @@ bfa_fcport_get_attr(struct bfa_s *bfa, struct bfa_port_attr_s *attr) attr->port_state = BFA_PORT_ST_IOCDIS; else if (bfa_ioc_fw_mismatch(&fcport->bfa->ioc)) attr->port_state = BFA_PORT_ST_FWMISMATCH; - else if (bfa_ioc_is_acq_addr(&fcport->bfa->ioc)) - attr->port_state = BFA_PORT_ST_ACQ_ADDR; } /* FCoE vlan */ diff --git a/drivers/scsi/bfa/bfa_svc.h b/drivers/scsi/bfa/bfa_svc.h index b52cbb6bcd5a..f30067564639 100644 --- a/drivers/scsi/bfa/bfa_svc.h +++ b/drivers/scsi/bfa/bfa_svc.h @@ -663,10 +663,6 @@ void bfa_cb_lps_fdisclogo_comp(void *bfad, void *uarg); void bfa_cb_lps_cvl_event(void *bfad, void *uarg); /* FAA specific APIs */ -bfa_status_t bfa_faa_enable(struct bfa_s *bfa, - bfa_cb_iocfc_t cbfn, void *cbarg); -bfa_status_t bfa_faa_disable(struct bfa_s *bfa, - bfa_cb_iocfc_t cbfn, void *cbarg); bfa_status_t bfa_faa_query(struct bfa_s *bfa, struct bfa_faa_attr_s *attr, bfa_cb_iocfc_t cbfn, void *cbarg); diff --git a/drivers/scsi/bfa/bfad_attr.c b/drivers/scsi/bfa/bfad_attr.c index 1938fe0473e9..7b1ecd2b3ffe 100644 --- a/drivers/scsi/bfa/bfad_attr.c +++ b/drivers/scsi/bfa/bfad_attr.c @@ -442,6 +442,43 @@ bfad_im_vport_create(struct fc_vport *fc_vport, bool disable) return status; } +int +bfad_im_issue_fc_host_lip(struct Scsi_Host *shost) +{ + struct bfad_im_port_s *im_port = + (struct bfad_im_port_s *) shost->hostdata[0]; + struct bfad_s *bfad = im_port->bfad; + struct bfad_hal_comp fcomp; + unsigned long flags; + uint32_t status; + + init_completion(&fcomp.comp); + spin_lock_irqsave(&bfad->bfad_lock, flags); + status = bfa_port_disable(&bfad->bfa.modules.port, + bfad_hcb_comp, &fcomp); + spin_unlock_irqrestore(&bfad->bfad_lock, flags); + + if (status != BFA_STATUS_OK) + return -EIO; + + wait_for_completion(&fcomp.comp); + if (fcomp.status != BFA_STATUS_OK) + return -EIO; + + spin_lock_irqsave(&bfad->bfad_lock, flags); + status = bfa_port_enable(&bfad->bfa.modules.port, + bfad_hcb_comp, &fcomp); + spin_unlock_irqrestore(&bfad->bfad_lock, flags); + if (status != BFA_STATUS_OK) + return -EIO; + + wait_for_completion(&fcomp.comp); + if (fcomp.status != BFA_STATUS_OK) + return -EIO; + + return 0; +} + static int bfad_im_vport_delete(struct fc_vport *fc_vport) { @@ -457,8 +494,11 @@ bfad_im_vport_delete(struct fc_vport *fc_vport) unsigned long flags; struct completion fcomp; - if (im_port->flags & BFAD_PORT_DELETE) - goto free_scsi_host; + if (im_port->flags & BFAD_PORT_DELETE) { + bfad_scsi_host_free(bfad, im_port); + list_del(&vport->list_entry); + return 0; + } port = im_port->port; @@ -489,7 +529,6 @@ bfad_im_vport_delete(struct fc_vport *fc_vport) wait_for_completion(vport->comp_del); -free_scsi_host: bfad_scsi_host_free(bfad, im_port); list_del(&vport->list_entry); kfree(vport); @@ -579,7 +618,7 @@ struct fc_function_template bfad_im_fc_function_template = { .show_rport_dev_loss_tmo = 1, .get_rport_dev_loss_tmo = bfad_im_get_rport_loss_tmo, .set_rport_dev_loss_tmo = bfad_im_set_rport_loss_tmo, - + .issue_fc_host_lip = bfad_im_issue_fc_host_lip, .vport_create = bfad_im_vport_create, .vport_delete = bfad_im_vport_delete, .vport_disable = bfad_im_vport_disable, diff --git a/drivers/scsi/bfa/bfad_bsg.c b/drivers/scsi/bfa/bfad_bsg.c index 8005c6c5a080..e1f4b10df42a 100644 --- a/drivers/scsi/bfa/bfad_bsg.c +++ b/drivers/scsi/bfa/bfad_bsg.c @@ -1288,50 +1288,6 @@ out: } int -bfad_iocmd_faa_enable(struct bfad_s *bfad, void *cmd) -{ - struct bfa_bsg_gen_s *iocmd = (struct bfa_bsg_gen_s *)cmd; - unsigned long flags; - struct bfad_hal_comp fcomp; - - init_completion(&fcomp.comp); - iocmd->status = BFA_STATUS_OK; - spin_lock_irqsave(&bfad->bfad_lock, flags); - iocmd->status = bfa_faa_enable(&bfad->bfa, bfad_hcb_comp, &fcomp); - spin_unlock_irqrestore(&bfad->bfad_lock, flags); - - if (iocmd->status != BFA_STATUS_OK) - goto out; - - wait_for_completion(&fcomp.comp); - iocmd->status = fcomp.status; -out: - return 0; -} - -int -bfad_iocmd_faa_disable(struct bfad_s *bfad, void *cmd) -{ - struct bfa_bsg_gen_s *iocmd = (struct bfa_bsg_gen_s *)cmd; - unsigned long flags; - struct bfad_hal_comp fcomp; - - init_completion(&fcomp.comp); - iocmd->status = BFA_STATUS_OK; - spin_lock_irqsave(&bfad->bfad_lock, flags); - iocmd->status = bfa_faa_disable(&bfad->bfa, bfad_hcb_comp, &fcomp); - spin_unlock_irqrestore(&bfad->bfad_lock, flags); - - if (iocmd->status != BFA_STATUS_OK) - goto out; - - wait_for_completion(&fcomp.comp); - iocmd->status = fcomp.status; -out: - return 0; -} - -int bfad_iocmd_faa_query(struct bfad_s *bfad, void *cmd) { struct bfa_bsg_faa_attr_s *iocmd = (struct bfa_bsg_faa_attr_s *)cmd; @@ -1918,6 +1874,7 @@ bfad_iocmd_debug_fw_core(struct bfad_s *bfad, void *cmd, struct bfa_bsg_debug_s *iocmd = (struct bfa_bsg_debug_s *)cmd; void *iocmd_bufptr; unsigned long flags; + u32 offset; if (bfad_chk_iocmd_sz(payload_len, sizeof(struct bfa_bsg_debug_s), BFA_DEBUG_FW_CORE_CHUNK_SZ) != BFA_STATUS_OK) { @@ -1935,8 +1892,10 @@ bfad_iocmd_debug_fw_core(struct bfad_s *bfad, void *cmd, iocmd_bufptr = (char *)iocmd + sizeof(struct bfa_bsg_debug_s); spin_lock_irqsave(&bfad->bfad_lock, flags); + offset = iocmd->offset; iocmd->status = bfa_ioc_debug_fwcore(&bfad->bfa.ioc, iocmd_bufptr, - (u32 *)&iocmd->offset, &iocmd->bufsz); + &offset, &iocmd->bufsz); + iocmd->offset = offset; spin_unlock_irqrestore(&bfad->bfad_lock, flags); out: return 0; @@ -2633,12 +2592,6 @@ bfad_iocmd_handler(struct bfad_s *bfad, unsigned int cmd, void *iocmd, case IOCMD_FLASH_DISABLE_OPTROM: rc = bfad_iocmd_ablk_optrom(bfad, cmd, iocmd); break; - case IOCMD_FAA_ENABLE: - rc = bfad_iocmd_faa_enable(bfad, iocmd); - break; - case IOCMD_FAA_DISABLE: - rc = bfad_iocmd_faa_disable(bfad, iocmd); - break; case IOCMD_FAA_QUERY: rc = bfad_iocmd_faa_query(bfad, iocmd); break; @@ -2809,9 +2762,16 @@ bfad_im_bsg_vendor_request(struct fc_bsg_job *job) struct bfad_im_port_s *im_port = (struct bfad_im_port_s *) job->shost->hostdata[0]; struct bfad_s *bfad = im_port->bfad; + struct request_queue *request_q = job->req->q; void *payload_kbuf; int rc = -EINVAL; + /* + * Set the BSG device request_queue size to 256 to support + * payloads larger than 512*1024K bytes. + */ + blk_queue_max_segments(request_q, 256); + /* Allocate a temp buffer to hold the passed in user space command */ payload_kbuf = kzalloc(job->request_payload.payload_len, GFP_KERNEL); if (!payload_kbuf) { diff --git a/drivers/scsi/bfa/bfad_bsg.h b/drivers/scsi/bfa/bfad_bsg.h index e859adb9aa9e..17ad67283130 100644 --- a/drivers/scsi/bfa/bfad_bsg.h +++ b/drivers/scsi/bfa/bfad_bsg.h @@ -83,8 +83,6 @@ enum { IOCMD_PORT_CFG_MODE, IOCMD_FLASH_ENABLE_OPTROM, IOCMD_FLASH_DISABLE_OPTROM, - IOCMD_FAA_ENABLE, - IOCMD_FAA_DISABLE, IOCMD_FAA_QUERY, IOCMD_CEE_GET_ATTR, IOCMD_CEE_GET_STATS, diff --git a/drivers/scsi/bfa/bfad_drv.h b/drivers/scsi/bfa/bfad_drv.h index dc5b9d99c450..7f74f1d19124 100644 --- a/drivers/scsi/bfa/bfad_drv.h +++ b/drivers/scsi/bfa/bfad_drv.h @@ -56,7 +56,7 @@ #ifdef BFA_DRIVER_VERSION #define BFAD_DRIVER_VERSION BFA_DRIVER_VERSION #else -#define BFAD_DRIVER_VERSION "3.0.2.2" +#define BFAD_DRIVER_VERSION "3.0.23.0" #endif #define BFAD_PROTO_NAME FCPI_NAME diff --git a/drivers/scsi/bfa/bfi_ms.h b/drivers/scsi/bfa/bfi_ms.h index 0d9f1fb50db0..d4220e13cafa 100644 --- a/drivers/scsi/bfa/bfi_ms.h +++ b/drivers/scsi/bfa/bfi_ms.h @@ -28,17 +28,15 @@ enum bfi_iocfc_h2i_msgs { BFI_IOCFC_H2I_CFG_REQ = 1, BFI_IOCFC_H2I_SET_INTR_REQ = 2, BFI_IOCFC_H2I_UPDATEQ_REQ = 3, - BFI_IOCFC_H2I_FAA_ENABLE_REQ = 4, - BFI_IOCFC_H2I_FAA_DISABLE_REQ = 5, - BFI_IOCFC_H2I_FAA_QUERY_REQ = 6, + BFI_IOCFC_H2I_FAA_QUERY_REQ = 4, + BFI_IOCFC_H2I_ADDR_REQ = 5, }; enum bfi_iocfc_i2h_msgs { BFI_IOCFC_I2H_CFG_REPLY = BFA_I2HM(1), BFI_IOCFC_I2H_UPDATEQ_RSP = BFA_I2HM(3), - BFI_IOCFC_I2H_FAA_ENABLE_RSP = BFA_I2HM(4), - BFI_IOCFC_I2H_FAA_DISABLE_RSP = BFA_I2HM(5), - BFI_IOCFC_I2H_FAA_QUERY_RSP = BFA_I2HM(6), + BFI_IOCFC_I2H_FAA_QUERY_RSP = BFA_I2HM(4), + BFI_IOCFC_I2H_ADDR_MSG = BFA_I2HM(5), }; struct bfi_iocfc_cfg_s { @@ -184,6 +182,13 @@ struct bfi_faa_en_dis_s { struct bfi_mhdr_s mh; /* common msg header */ }; +struct bfi_faa_addr_msg_s { + struct bfi_mhdr_s mh; /* common msg header */ + u8 rsvd[4]; + wwn_t pwwn; /* Fabric acquired PWWN */ + wwn_t nwwn; /* Fabric acquired PWWN */ +}; + /* * BFI_IOCFC_H2I_FAA_QUERY_REQ message */ diff --git a/drivers/scsi/bfa/bfi_reg.h b/drivers/scsi/bfa/bfi_reg.h index d892064b64a8..ed5f159e1867 100644 --- a/drivers/scsi/bfa/bfi_reg.h +++ b/drivers/scsi/bfa/bfi_reg.h @@ -335,11 +335,17 @@ enum { #define __PMM_1T_PNDB_P 0x00000002 #define CT2_PMM_1T_CONTROL_REG_P1 0x00023c1c #define CT2_WGN_STATUS 0x00014990 +#define __A2T_AHB_LOAD 0x00000800 #define __WGN_READY 0x00000400 #define __GLBL_PF_VF_CFG_RDY 0x00000200 +#define CT2_NFC_CSR_CLR_REG 0x00027420 #define CT2_NFC_CSR_SET_REG 0x00027424 #define __HALT_NFC_CONTROLLER 0x00000002 #define __NFC_CONTROLLER_HALTED 0x00001000 +#define CT2_RSC_GPR15_REG 0x0002765c +#define CT2_CSI_FW_CTL_REG 0x00027080 +#define CT2_CSI_FW_CTL_SET_REG 0x00027088 +#define __RESET_AND_START_SCLK_LCLK_PLLS 0x00010000 #define CT2_CSI_MAC0_CONTROL_REG 0x000270d0 #define __CSI_MAC_RESET 0x00000010 diff --git a/drivers/scsi/bnx2fc/bnx2fc_fcoe.c b/drivers/scsi/bnx2fc/bnx2fc_fcoe.c index abd72a01856d..c1c6a92a0b98 100644 --- a/drivers/scsi/bnx2fc/bnx2fc_fcoe.c +++ b/drivers/scsi/bnx2fc/bnx2fc_fcoe.c @@ -439,13 +439,13 @@ static int bnx2fc_rcv(struct sk_buff *skb, struct net_device *dev, fr->fr_dev = lport; bg = &bnx2fc_global; - spin_lock_bh(&bg->fcoe_rx_list.lock); + spin_lock(&bg->fcoe_rx_list.lock); __skb_queue_tail(&bg->fcoe_rx_list, skb); if (bg->fcoe_rx_list.qlen == 1) wake_up_process(bg->thread); - spin_unlock_bh(&bg->fcoe_rx_list.lock); + spin_unlock(&bg->fcoe_rx_list.lock); return 0; err: diff --git a/drivers/scsi/dtc.c b/drivers/scsi/dtc.c index c2677ba29c74..4b11bb04f5c4 100644 --- a/drivers/scsi/dtc.c +++ b/drivers/scsi/dtc.c @@ -72,7 +72,6 @@ #endif -#include <asm/system.h> #include <linux/module.h> #include <linux/signal.h> #include <linux/blkdev.h> diff --git a/drivers/scsi/fcoe/fcoe.c b/drivers/scsi/fcoe/fcoe.c index ae7d15c44e2a..335e85192807 100644 --- a/drivers/scsi/fcoe/fcoe.c +++ b/drivers/scsi/fcoe/fcoe.c @@ -1436,7 +1436,7 @@ static int fcoe_rcv(struct sk_buff *skb, struct net_device *netdev, goto err; fps = &per_cpu(fcoe_percpu, cpu); - spin_lock_bh(&fps->fcoe_rx_list.lock); + spin_lock(&fps->fcoe_rx_list.lock); if (unlikely(!fps->thread)) { /* * The targeted CPU is not ready, let's target @@ -1447,12 +1447,12 @@ static int fcoe_rcv(struct sk_buff *skb, struct net_device *netdev, "ready for incoming skb- using first online " "CPU.\n"); - spin_unlock_bh(&fps->fcoe_rx_list.lock); + spin_unlock(&fps->fcoe_rx_list.lock); cpu = cpumask_first(cpu_online_mask); fps = &per_cpu(fcoe_percpu, cpu); - spin_lock_bh(&fps->fcoe_rx_list.lock); + spin_lock(&fps->fcoe_rx_list.lock); if (!fps->thread) { - spin_unlock_bh(&fps->fcoe_rx_list.lock); + spin_unlock(&fps->fcoe_rx_list.lock); goto err; } } @@ -1463,24 +1463,17 @@ static int fcoe_rcv(struct sk_buff *skb, struct net_device *netdev, * so we're free to queue skbs into it's queue. */ - /* If this is a SCSI-FCP frame, and this is already executing on the - * correct CPU, and the queue for this CPU is empty, then go ahead - * and process the frame directly in the softirq context. - * This lets us process completions without context switching from the - * NET_RX softirq, to our receive processing thread, and then back to - * BLOCK softirq context. + /* + * Note: We used to have a set of conditions under which we would + * call fcoe_recv_frame directly, rather than queuing to the rx list + * as it could save a few cycles, but doing so is prohibited, as + * fcoe_recv_frame has several paths that may sleep, which is forbidden + * in softirq context. */ - if (fh->fh_type == FC_TYPE_FCP && - cpu == smp_processor_id() && - skb_queue_empty(&fps->fcoe_rx_list)) { - spin_unlock_bh(&fps->fcoe_rx_list.lock); - fcoe_recv_frame(skb); - } else { - __skb_queue_tail(&fps->fcoe_rx_list, skb); - if (fps->fcoe_rx_list.qlen == 1) - wake_up_process(fps->thread); - spin_unlock_bh(&fps->fcoe_rx_list.lock); - } + __skb_queue_tail(&fps->fcoe_rx_list, skb); + if (fps->thread->state == TASK_INTERRUPTIBLE) + wake_up_process(fps->thread); + spin_unlock(&fps->fcoe_rx_list.lock); return 0; err: @@ -1797,23 +1790,29 @@ static int fcoe_percpu_receive_thread(void *arg) { struct fcoe_percpu_s *p = arg; struct sk_buff *skb; + struct sk_buff_head tmp; + + skb_queue_head_init(&tmp); set_user_nice(current, -20); while (!kthread_should_stop()) { spin_lock_bh(&p->fcoe_rx_list.lock); - while ((skb = __skb_dequeue(&p->fcoe_rx_list)) == NULL) { + skb_queue_splice_init(&p->fcoe_rx_list, &tmp); + spin_unlock_bh(&p->fcoe_rx_list.lock); + + while ((skb = __skb_dequeue(&tmp)) != NULL) + fcoe_recv_frame(skb); + + spin_lock_bh(&p->fcoe_rx_list.lock); + if (!skb_queue_len(&p->fcoe_rx_list)) { set_current_state(TASK_INTERRUPTIBLE); spin_unlock_bh(&p->fcoe_rx_list.lock); schedule(); set_current_state(TASK_RUNNING); - if (kthread_should_stop()) - return 0; - spin_lock_bh(&p->fcoe_rx_list.lock); - } - spin_unlock_bh(&p->fcoe_rx_list.lock); - fcoe_recv_frame(skb); + } else + spin_unlock_bh(&p->fcoe_rx_list.lock); } return 0; } @@ -2187,8 +2186,12 @@ static int fcoe_create(struct net_device *netdev, enum fip_state fip_mode) /* start FIP Discovery and FLOGI */ lport->boot_time = jiffies; fc_fabric_login(lport); - if (!fcoe_link_ok(lport)) + if (!fcoe_link_ok(lport)) { + rtnl_unlock(); fcoe_ctlr_link_up(&fcoe->ctlr); + mutex_unlock(&fcoe_config_mutex); + return rc; + } out_nodev: rtnl_unlock(); @@ -2261,31 +2264,14 @@ static int fcoe_link_ok(struct fc_lport *lport) static void fcoe_percpu_clean(struct fc_lport *lport) { struct fcoe_percpu_s *pp; - struct fcoe_rcv_info *fr; - struct sk_buff_head *list; - struct sk_buff *skb, *next; - struct sk_buff *head; + struct sk_buff *skb; unsigned int cpu; for_each_possible_cpu(cpu) { pp = &per_cpu(fcoe_percpu, cpu); - spin_lock_bh(&pp->fcoe_rx_list.lock); - list = &pp->fcoe_rx_list; - head = list->next; - for (skb = head; skb != (struct sk_buff *)list; - skb = next) { - next = skb->next; - fr = fcoe_dev_from_skb(skb); - if (fr->fr_dev == lport) { - __skb_unlink(skb, list); - kfree_skb(skb); - } - } - if (!pp->thread || !cpu_online(cpu)) { - spin_unlock_bh(&pp->fcoe_rx_list.lock); + if (!pp->thread || !cpu_online(cpu)) continue; - } skb = dev_alloc_skb(0); if (!skb) { @@ -2294,6 +2280,7 @@ static void fcoe_percpu_clean(struct fc_lport *lport) } skb->destructor = fcoe_percpu_flush_done; + spin_lock_bh(&pp->fcoe_rx_list.lock); __skb_queue_tail(&pp->fcoe_rx_list, skb); if (pp->fcoe_rx_list.qlen == 1) wake_up_process(pp->thread); diff --git a/drivers/scsi/fcoe/fcoe_ctlr.c b/drivers/scsi/fcoe/fcoe_ctlr.c index e7522dcc296e..249a106888d9 100644 --- a/drivers/scsi/fcoe/fcoe_ctlr.c +++ b/drivers/scsi/fcoe/fcoe_ctlr.c @@ -242,7 +242,7 @@ static void fcoe_ctlr_announce(struct fcoe_ctlr *fip) printk(KERN_INFO "libfcoe: host%d: FIP selected " "Fibre-Channel Forwarder MAC %pM\n", fip->lp->host->host_no, sel->fcf_mac); - memcpy(fip->dest_addr, sel->fcf_mac, ETH_ALEN); + memcpy(fip->dest_addr, sel->fcoe_mac, ETH_ALEN); fip->map_dest = 0; } unlock: @@ -824,6 +824,7 @@ static int fcoe_ctlr_parse_adv(struct fcoe_ctlr *fip, memcpy(fcf->fcf_mac, ((struct fip_mac_desc *)desc)->fd_mac, ETH_ALEN); + memcpy(fcf->fcoe_mac, fcf->fcf_mac, ETH_ALEN); if (!is_valid_ether_addr(fcf->fcf_mac)) { LIBFCOE_FIP_DBG(fip, "Invalid MAC addr %pM in FIP adv\n", @@ -1013,6 +1014,7 @@ static void fcoe_ctlr_recv_els(struct fcoe_ctlr *fip, struct sk_buff *skb) struct fip_desc *desc; struct fip_encaps *els; struct fcoe_dev_stats *stats; + struct fcoe_fcf *sel; enum fip_desc_type els_dtype = 0; u8 els_op; u8 sub; @@ -1040,7 +1042,8 @@ static void fcoe_ctlr_recv_els(struct fcoe_ctlr *fip, struct sk_buff *skb) goto drop; /* Drop ELS if there are duplicate critical descriptors */ if (desc->fip_dtype < 32) { - if (desc_mask & 1U << desc->fip_dtype) { + if ((desc->fip_dtype != FIP_DT_MAC) && + (desc_mask & 1U << desc->fip_dtype)) { LIBFCOE_FIP_DBG(fip, "Duplicate Critical " "Descriptors in FIP ELS\n"); goto drop; @@ -1049,17 +1052,32 @@ static void fcoe_ctlr_recv_els(struct fcoe_ctlr *fip, struct sk_buff *skb) } switch (desc->fip_dtype) { case FIP_DT_MAC: + sel = fip->sel_fcf; if (desc_cnt == 1) { LIBFCOE_FIP_DBG(fip, "FIP descriptors " "received out of order\n"); goto drop; } + /* + * Some switch implementations send two MAC descriptors, + * with first MAC(granted_mac) being the FPMA, and the + * second one(fcoe_mac) is used as destination address + * for sending/receiving FCoE packets. FIP traffic is + * sent using fip_mac. For regular switches, both + * fip_mac and fcoe_mac would be the same. + */ + if (desc_cnt == 2) + memcpy(granted_mac, + ((struct fip_mac_desc *)desc)->fd_mac, + ETH_ALEN); if (dlen != sizeof(struct fip_mac_desc)) goto len_err; - memcpy(granted_mac, - ((struct fip_mac_desc *)desc)->fd_mac, - ETH_ALEN); + + if ((desc_cnt == 3) && (sel)) + memcpy(sel->fcoe_mac, + ((struct fip_mac_desc *)desc)->fd_mac, + ETH_ALEN); break; case FIP_DT_FLOGI: case FIP_DT_FDISC: @@ -1273,11 +1291,6 @@ static void fcoe_ctlr_recv_clr_vlink(struct fcoe_ctlr *fip, * No Vx_Port description. Clear all NPIV ports, * followed by physical port */ - mutex_lock(&lport->lp_mutex); - list_for_each_entry(vn_port, &lport->vports, list) - fc_lport_reset(vn_port); - mutex_unlock(&lport->lp_mutex); - mutex_lock(&fip->ctlr_mutex); per_cpu_ptr(lport->dev_stats, get_cpu())->VLinkFailureCount++; @@ -1285,6 +1298,11 @@ static void fcoe_ctlr_recv_clr_vlink(struct fcoe_ctlr *fip, fcoe_ctlr_reset(fip); mutex_unlock(&fip->ctlr_mutex); + mutex_lock(&lport->lp_mutex); + list_for_each_entry(vn_port, &lport->vports, list) + fc_lport_reset(vn_port); + mutex_unlock(&lport->lp_mutex); + fc_lport_reset(fip->lp); fcoe_ctlr_solicit(fip, NULL); } else { diff --git a/drivers/scsi/fd_mcs.c b/drivers/scsi/fd_mcs.c index a2c6135d337e..53bfcaa86f09 100644 --- a/drivers/scsi/fd_mcs.c +++ b/drivers/scsi/fd_mcs.c @@ -93,7 +93,6 @@ #include <linux/mca-legacy.h> #include <asm/io.h> -#include <asm/system.h> #include "scsi.h" #include <scsi/scsi_host.h> diff --git a/drivers/scsi/fdomain.c b/drivers/scsi/fdomain.c index 643f6d500fe7..1a2a1e5824e3 100644 --- a/drivers/scsi/fdomain.c +++ b/drivers/scsi/fdomain.c @@ -282,7 +282,6 @@ #include <linux/slab.h> #include <scsi/scsicam.h> -#include <asm/system.h> #include <scsi/scsi.h> #include <scsi/scsi_cmnd.h> diff --git a/drivers/scsi/g_NCR5380.c b/drivers/scsi/g_NCR5380.c index 81182badfeb1..1a5954f0915a 100644 --- a/drivers/scsi/g_NCR5380.c +++ b/drivers/scsi/g_NCR5380.c @@ -100,7 +100,6 @@ #undef NCR5380_STAT_LIMIT #endif -#include <asm/system.h> #include <asm/io.h> #include <linux/signal.h> #include <linux/blkdev.h> diff --git a/drivers/scsi/gdth.c b/drivers/scsi/gdth.c index d42ec921de46..5d72274c507f 100644 --- a/drivers/scsi/gdth.c +++ b/drivers/scsi/gdth.c @@ -129,7 +129,6 @@ #include <linux/reboot.h> #include <asm/dma.h> -#include <asm/system.h> #include <asm/io.h> #include <asm/uaccess.h> #include <linux/spinlock.h> diff --git a/drivers/scsi/ibmmca.c b/drivers/scsi/ibmmca.c index 67fc8ffd52e6..cd09132d5d7d 100644 --- a/drivers/scsi/ibmmca.c +++ b/drivers/scsi/ibmmca.c @@ -32,7 +32,6 @@ #include <linux/spinlock.h> #include <linux/init.h> -#include <asm/system.h> #include <asm/io.h> #include "scsi.h" diff --git a/drivers/scsi/ibmvscsi/ibmvfc.c b/drivers/scsi/ibmvscsi/ibmvfc.c index bdfa223a7dbb..134a0ae85bb7 100644 --- a/drivers/scsi/ibmvscsi/ibmvfc.c +++ b/drivers/scsi/ibmvscsi/ibmvfc.c @@ -4890,11 +4890,8 @@ static struct vio_driver ibmvfc_driver = { .probe = ibmvfc_probe, .remove = ibmvfc_remove, .get_desired_dma = ibmvfc_get_desired_dma, - .driver = { - .name = IBMVFC_NAME, - .owner = THIS_MODULE, - .pm = &ibmvfc_pm_ops, - } + .name = IBMVFC_NAME, + .pm = &ibmvfc_pm_ops, }; static struct fc_function_template ibmvfc_transport_functions = { diff --git a/drivers/scsi/ibmvscsi/ibmvscsi.c b/drivers/scsi/ibmvscsi/ibmvscsi.c index e984951baeb6..3a6c4742951e 100644 --- a/drivers/scsi/ibmvscsi/ibmvscsi.c +++ b/drivers/scsi/ibmvscsi/ibmvscsi.c @@ -2061,11 +2061,8 @@ static struct vio_driver ibmvscsi_driver = { .probe = ibmvscsi_probe, .remove = ibmvscsi_remove, .get_desired_dma = ibmvscsi_get_desired_dma, - .driver = { - .name = "ibmvscsi", - .owner = THIS_MODULE, - .pm = &ibmvscsi_pm_ops, - } + .name = "ibmvscsi", + .pm = &ibmvscsi_pm_ops, }; static struct srp_function_template ibmvscsi_transport_functions = { diff --git a/drivers/scsi/ibmvscsi/ibmvstgt.c b/drivers/scsi/ibmvscsi/ibmvstgt.c index 2256babe0474..aa7ed81e9237 100644 --- a/drivers/scsi/ibmvscsi/ibmvstgt.c +++ b/drivers/scsi/ibmvscsi/ibmvstgt.c @@ -918,10 +918,7 @@ static struct vio_driver ibmvstgt_driver = { .id_table = ibmvstgt_device_table, .probe = ibmvstgt_probe, .remove = ibmvstgt_remove, - .driver = { - .name = "ibmvscsis", - .owner = THIS_MODULE, - } + .name = "ibmvscsis", }; static int get_system_info(void) diff --git a/drivers/scsi/in2000.c b/drivers/scsi/in2000.c index 112f1bec7756..deb5b6d8398e 100644 --- a/drivers/scsi/in2000.c +++ b/drivers/scsi/in2000.c @@ -123,7 +123,6 @@ #include <linux/stat.h> #include <asm/io.h> -#include <asm/system.h> #include "scsi.h" #include <scsi/scsi_host.h> diff --git a/drivers/scsi/ipr.c b/drivers/scsi/ipr.c index cdfe5a16de2a..e002cd466e9a 100644 --- a/drivers/scsi/ipr.c +++ b/drivers/scsi/ipr.c @@ -104,7 +104,9 @@ static DEFINE_SPINLOCK(ipr_driver_lock); static const struct ipr_chip_cfg_t ipr_chip_cfg[] = { { /* Gemstone, Citrine, Obsidian, and Obsidian-E */ .mailbox = 0x0042C, + .max_cmds = 100, .cache_line_size = 0x20, + .clear_isr = 1, { .set_interrupt_mask_reg = 0x0022C, .clr_interrupt_mask_reg = 0x00230, @@ -126,7 +128,9 @@ static const struct ipr_chip_cfg_t ipr_chip_cfg[] = { }, { /* Snipe and Scamp */ .mailbox = 0x0052C, + .max_cmds = 100, .cache_line_size = 0x20, + .clear_isr = 1, { .set_interrupt_mask_reg = 0x00288, .clr_interrupt_mask_reg = 0x0028C, @@ -148,7 +152,9 @@ static const struct ipr_chip_cfg_t ipr_chip_cfg[] = { }, { /* CRoC */ .mailbox = 0x00044, + .max_cmds = 1000, .cache_line_size = 0x20, + .clear_isr = 0, { .set_interrupt_mask_reg = 0x00010, .clr_interrupt_mask_reg = 0x00018, @@ -847,8 +853,6 @@ static void ipr_do_req(struct ipr_cmnd *ipr_cmd, ipr_trc_hook(ipr_cmd, IPR_TRACE_START, 0); - mb(); - ipr_send_command(ipr_cmd); } @@ -982,8 +986,6 @@ static void ipr_send_hcam(struct ipr_ioa_cfg *ioa_cfg, u8 type, ipr_trc_hook(ipr_cmd, IPR_TRACE_START, IPR_IOA_RES_ADDR); - mb(); - ipr_send_command(ipr_cmd); } else { list_add_tail(&hostrcb->queue, &ioa_cfg->hostrcb_free_q); @@ -4339,8 +4341,7 @@ static struct ipr_resource_entry *ipr_find_starget(struct scsi_target *starget) list_for_each_entry(res, &ioa_cfg->used_res_q, queue) { if ((res->bus == starget->channel) && - (res->target == starget->id) && - (res->lun == 0)) { + (res->target == starget->id)) { return res; } } @@ -4414,12 +4415,14 @@ static void ipr_target_destroy(struct scsi_target *starget) struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) shost->hostdata; if (ioa_cfg->sis64) { - if (starget->channel == IPR_ARRAY_VIRTUAL_BUS) - clear_bit(starget->id, ioa_cfg->array_ids); - else if (starget->channel == IPR_VSET_VIRTUAL_BUS) - clear_bit(starget->id, ioa_cfg->vset_ids); - else if (starget->channel == 0) - clear_bit(starget->id, ioa_cfg->target_ids); + if (!ipr_find_starget(starget)) { + if (starget->channel == IPR_ARRAY_VIRTUAL_BUS) + clear_bit(starget->id, ioa_cfg->array_ids); + else if (starget->channel == IPR_VSET_VIRTUAL_BUS) + clear_bit(starget->id, ioa_cfg->vset_ids); + else if (starget->channel == 0) + clear_bit(starget->id, ioa_cfg->target_ids); + } } if (sata_port) { @@ -5048,12 +5051,14 @@ static irqreturn_t ipr_handle_other_interrupt(struct ipr_ioa_cfg *ioa_cfg, del_timer(&ioa_cfg->reset_cmd->timer); ipr_reset_ioa_job(ioa_cfg->reset_cmd); } else if ((int_reg & IPR_PCII_HRRQ_UPDATED) == int_reg) { - if (ipr_debug && printk_ratelimit()) - dev_err(&ioa_cfg->pdev->dev, - "Spurious interrupt detected. 0x%08X\n", int_reg); - writel(IPR_PCII_HRRQ_UPDATED, ioa_cfg->regs.clr_interrupt_reg32); - int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32); - return IRQ_NONE; + if (ioa_cfg->clear_isr) { + if (ipr_debug && printk_ratelimit()) + dev_err(&ioa_cfg->pdev->dev, + "Spurious interrupt detected. 0x%08X\n", int_reg); + writel(IPR_PCII_HRRQ_UPDATED, ioa_cfg->regs.clr_interrupt_reg32); + int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32); + return IRQ_NONE; + } } else { if (int_reg & IPR_PCII_IOA_UNIT_CHECKED) ioa_cfg->ioa_unit_checked = 1; @@ -5153,6 +5158,9 @@ static irqreturn_t ipr_isr(int irq, void *devp) } } + if (ipr_cmd && !ioa_cfg->clear_isr) + break; + if (ipr_cmd != NULL) { /* Clear the PCI interrupt */ num_hrrq = 0; @@ -5854,14 +5862,12 @@ static int ipr_queuecommand_lck(struct scsi_cmnd *scsi_cmd, rc = ipr_build_ioadl(ioa_cfg, ipr_cmd); } - if (likely(rc == 0)) { - mb(); - ipr_send_command(ipr_cmd); - } else { - list_move_tail(&ipr_cmd->queue, &ioa_cfg->free_q); - return SCSI_MLQUEUE_HOST_BUSY; + if (unlikely(rc != 0)) { + list_move_tail(&ipr_cmd->queue, &ioa_cfg->free_q); + return SCSI_MLQUEUE_HOST_BUSY; } + ipr_send_command(ipr_cmd); return 0; } @@ -6239,8 +6245,6 @@ static unsigned int ipr_qc_issue(struct ata_queued_cmd *qc) return AC_ERR_INVALID; } - mb(); - ipr_send_command(ipr_cmd); return 0; @@ -8277,6 +8281,10 @@ static void ipr_free_cmd_blks(struct ipr_ioa_cfg *ioa_cfg) if (ioa_cfg->ipr_cmd_pool) pci_pool_destroy (ioa_cfg->ipr_cmd_pool); + kfree(ioa_cfg->ipr_cmnd_list); + kfree(ioa_cfg->ipr_cmnd_list_dma); + ioa_cfg->ipr_cmnd_list = NULL; + ioa_cfg->ipr_cmnd_list_dma = NULL; ioa_cfg->ipr_cmd_pool = NULL; } @@ -8352,11 +8360,19 @@ static int __devinit ipr_alloc_cmd_blks(struct ipr_ioa_cfg *ioa_cfg) int i; ioa_cfg->ipr_cmd_pool = pci_pool_create (IPR_NAME, ioa_cfg->pdev, - sizeof(struct ipr_cmnd), 16, 0); + sizeof(struct ipr_cmnd), 512, 0); if (!ioa_cfg->ipr_cmd_pool) return -ENOMEM; + ioa_cfg->ipr_cmnd_list = kcalloc(IPR_NUM_CMD_BLKS, sizeof(struct ipr_cmnd *), GFP_KERNEL); + ioa_cfg->ipr_cmnd_list_dma = kcalloc(IPR_NUM_CMD_BLKS, sizeof(dma_addr_t), GFP_KERNEL); + + if (!ioa_cfg->ipr_cmnd_list || !ioa_cfg->ipr_cmnd_list_dma) { + ipr_free_cmd_blks(ioa_cfg); + return -ENOMEM; + } + for (i = 0; i < IPR_NUM_CMD_BLKS; i++) { ipr_cmd = pci_pool_alloc (ioa_cfg->ipr_cmd_pool, GFP_KERNEL, &dma_addr); @@ -8584,6 +8600,7 @@ static void __devinit ipr_init_ioa_cfg(struct ipr_ioa_cfg *ioa_cfg, host->max_channel = IPR_MAX_BUS_TO_SCAN; host->unique_id = host->host_no; host->max_cmd_len = IPR_MAX_CDB_LEN; + host->can_queue = ioa_cfg->max_cmds; pci_set_drvdata(pdev, ioa_cfg); p = &ioa_cfg->chip_cfg->regs; @@ -8768,6 +8785,8 @@ static int __devinit ipr_probe_ioa(struct pci_dev *pdev, /* set SIS 32 or SIS 64 */ ioa_cfg->sis64 = ioa_cfg->ipr_chip->sis_type == IPR_SIS64 ? 1 : 0; ioa_cfg->chip_cfg = ioa_cfg->ipr_chip->cfg; + ioa_cfg->clear_isr = ioa_cfg->chip_cfg->clear_isr; + ioa_cfg->max_cmds = ioa_cfg->chip_cfg->max_cmds; if (ipr_transop_timeout) ioa_cfg->transop_timeout = ipr_transop_timeout; diff --git a/drivers/scsi/ipr.h b/drivers/scsi/ipr.h index f94eaee2ff16..153b8bd91d1e 100644 --- a/drivers/scsi/ipr.h +++ b/drivers/scsi/ipr.h @@ -38,8 +38,8 @@ /* * Literals */ -#define IPR_DRIVER_VERSION "2.5.2" -#define IPR_DRIVER_DATE "(April 27, 2011)" +#define IPR_DRIVER_VERSION "2.5.3" +#define IPR_DRIVER_DATE "(March 10, 2012)" /* * IPR_MAX_CMD_PER_LUN: This defines the maximum number of outstanding @@ -53,7 +53,7 @@ * IPR_NUM_BASE_CMD_BLKS: This defines the maximum number of * ops the mid-layer can send to the adapter. */ -#define IPR_NUM_BASE_CMD_BLKS 100 +#define IPR_NUM_BASE_CMD_BLKS (ioa_cfg->max_cmds) #define PCI_DEVICE_ID_IBM_OBSIDIAN_E 0x0339 @@ -153,7 +153,7 @@ #define IPR_NUM_INTERNAL_CMD_BLKS (IPR_NUM_HCAMS + \ ((IPR_NUM_RESET_RELOAD_RETRIES + 1) * 2) + 4) -#define IPR_MAX_COMMANDS IPR_NUM_BASE_CMD_BLKS +#define IPR_MAX_COMMANDS 100 #define IPR_NUM_CMD_BLKS (IPR_NUM_BASE_CMD_BLKS + \ IPR_NUM_INTERNAL_CMD_BLKS) @@ -1305,7 +1305,9 @@ struct ipr_interrupts { struct ipr_chip_cfg_t { u32 mailbox; + u16 max_cmds; u8 cache_line_size; + u8 clear_isr; struct ipr_interrupt_offsets regs; }; @@ -1388,6 +1390,7 @@ struct ipr_ioa_cfg { u8 sis64:1; u8 dump_timeout:1; u8 cfg_locked:1; + u8 clear_isr:1; u8 revid; @@ -1501,8 +1504,9 @@ struct ipr_ioa_cfg { struct ata_host ata_host; char ipr_cmd_label[8]; #define IPR_CMD_LABEL "ipr_cmd" - struct ipr_cmnd *ipr_cmnd_list[IPR_NUM_CMD_BLKS]; - dma_addr_t ipr_cmnd_list_dma[IPR_NUM_CMD_BLKS]; + u32 max_cmds; + struct ipr_cmnd **ipr_cmnd_list; + dma_addr_t *ipr_cmnd_list_dma; }; /* struct ipr_ioa_cfg */ struct ipr_cmnd { diff --git a/drivers/scsi/libfc/fc_exch.c b/drivers/scsi/libfc/fc_exch.c index 630291f01826..aceffadb21c7 100644 --- a/drivers/scsi/libfc/fc_exch.c +++ b/drivers/scsi/libfc/fc_exch.c @@ -2263,7 +2263,18 @@ struct fc_exch_mgr *fc_exch_mgr_alloc(struct fc_lport *lport, mp->class = class; /* adjust em exch xid range for offload */ mp->min_xid = min_xid; - mp->max_xid = max_xid; + + /* reduce range so per cpu pool fits into PCPU_MIN_UNIT_SIZE pool */ + pool_exch_range = (PCPU_MIN_UNIT_SIZE - sizeof(*pool)) / + sizeof(struct fc_exch *); + if ((max_xid - min_xid + 1) / (fc_cpu_mask + 1) > pool_exch_range) { + mp->max_xid = pool_exch_range * (fc_cpu_mask + 1) + + min_xid - 1; + } else { + mp->max_xid = max_xid; + pool_exch_range = (mp->max_xid - mp->min_xid + 1) / + (fc_cpu_mask + 1); + } mp->ep_pool = mempool_create_slab_pool(2, fc_em_cachep); if (!mp->ep_pool) @@ -2274,7 +2285,6 @@ struct fc_exch_mgr *fc_exch_mgr_alloc(struct fc_lport *lport, * divided across all cpus. The exch pointers array memory is * allocated for exch range per pool. */ - pool_exch_range = (mp->max_xid - mp->min_xid + 1) / (fc_cpu_mask + 1); mp->pool_max_index = pool_exch_range - 1; /* diff --git a/drivers/scsi/libfc/fc_lport.c b/drivers/scsi/libfc/fc_lport.c index bd5d31d022d9..ef9560dff295 100644 --- a/drivers/scsi/libfc/fc_lport.c +++ b/drivers/scsi/libfc/fc_lport.c @@ -1743,8 +1743,16 @@ void fc_lport_flogi_resp(struct fc_seq *sp, struct fc_frame *fp, mfs = ntohs(flp->fl_csp.sp_bb_data) & FC_SP_BB_DATA_MASK; if (mfs >= FC_SP_MIN_MAX_PAYLOAD && - mfs < lport->mfs) + mfs <= lport->mfs) { lport->mfs = mfs; + fc_host_maxframe_size(lport->host) = mfs; + } else { + FC_LPORT_DBG(lport, "FLOGI bad mfs:%hu response, " + "lport->mfs:%hu\n", mfs, lport->mfs); + fc_lport_error(lport, fp); + goto err; + } + csp_flags = ntohs(flp->fl_csp.sp_features); r_a_tov = ntohl(flp->fl_csp.sp_r_a_tov); e_d_tov = ntohl(flp->fl_csp.sp_e_d_tov); diff --git a/drivers/scsi/lpfc/Makefile b/drivers/scsi/lpfc/Makefile index 88928f00aa2d..fe5d396aca73 100644 --- a/drivers/scsi/lpfc/Makefile +++ b/drivers/scsi/lpfc/Makefile @@ -1,7 +1,7 @@ #/******************************************************************* # * This file is part of the Emulex Linux Device Driver for * # * Fibre Channel Host Bus Adapters. * -# * Copyright (C) 2004-2011 Emulex. All rights reserved. * +# * Copyright (C) 2004-2012 Emulex. All rights reserved. * # * EMULEX and SLI are trademarks of Emulex. * # * www.emulex.com * # * * @@ -22,6 +22,8 @@ ccflags-$(GCOV) := -fprofile-arcs -ftest-coverage ccflags-$(GCOV) += -O0 +ccflags-y += -Werror + obj-$(CONFIG_SCSI_LPFC) := lpfc.o lpfc-objs := lpfc_mem.o lpfc_sli.o lpfc_ct.o lpfc_els.o lpfc_hbadisc.o \ diff --git a/drivers/scsi/lpfc/lpfc.h b/drivers/scsi/lpfc/lpfc.h index 5fc044ff656e..3a1ffdd6d831 100644 --- a/drivers/scsi/lpfc/lpfc.h +++ b/drivers/scsi/lpfc/lpfc.h @@ -1,7 +1,7 @@ /******************************************************************* * This file is part of the Emulex Linux Device Driver for * * Fibre Channel Host Bus Adapters. * - * Copyright (C) 2004-2011 Emulex. All rights reserved. * + * Copyright (C) 2004-2012 Emulex. All rights reserved. * * EMULEX and SLI are trademarks of Emulex. * * www.emulex.com * * Portions Copyright (C) 2004-2005 Christoph Hellwig * @@ -840,6 +840,8 @@ struct lpfc_hba { struct dentry *debug_dumpData; /* BlockGuard BPL */ struct dentry *debug_dumpDif; /* BlockGuard BPL */ struct dentry *debug_InjErrLBA; /* LBA to inject errors at */ + struct dentry *debug_InjErrNPortID; /* NPortID to inject errors at */ + struct dentry *debug_InjErrWWPN; /* WWPN to inject errors at */ struct dentry *debug_writeGuard; /* inject write guard_tag errors */ struct dentry *debug_writeApp; /* inject write app_tag errors */ struct dentry *debug_writeRef; /* inject write ref_tag errors */ @@ -854,6 +856,8 @@ struct lpfc_hba { uint32_t lpfc_injerr_rgrd_cnt; uint32_t lpfc_injerr_rapp_cnt; uint32_t lpfc_injerr_rref_cnt; + uint32_t lpfc_injerr_nportid; + struct lpfc_name lpfc_injerr_wwpn; sector_t lpfc_injerr_lba; #define LPFC_INJERR_LBA_OFF (sector_t)(-1) @@ -908,6 +912,8 @@ struct lpfc_hba { atomic_t fast_event_count; uint32_t fcoe_eventtag; uint32_t fcoe_eventtag_at_fcf_scan; + uint32_t fcoe_cvl_eventtag; + uint32_t fcoe_cvl_eventtag_attn; struct lpfc_fcf fcf; uint8_t fc_map[3]; uint8_t valid_vlan; diff --git a/drivers/scsi/lpfc/lpfc_attr.c b/drivers/scsi/lpfc/lpfc_attr.c index 296ad5bc4240..5eb2bc116183 100644 --- a/drivers/scsi/lpfc/lpfc_attr.c +++ b/drivers/scsi/lpfc/lpfc_attr.c @@ -1,7 +1,7 @@ /******************************************************************* * This file is part of the Emulex Linux Device Driver for * * Fibre Channel Host Bus Adapters. * - * Copyright (C) 2004-2011 Emulex. All rights reserved. * + * Copyright (C) 2004-2012 Emulex. All rights reserved. * * EMULEX and SLI are trademarks of Emulex. * * www.emulex.com * * Portions Copyright (C) 2004-2005 Christoph Hellwig * @@ -2575,7 +2575,7 @@ LPFC_VPORT_ATTR_HEX_RW(log_verbose, 0x0, 0x0, 0xffffffff, # lpfc_enable_da_id: This turns on the DA_ID CT command that deregisters # objects that have been registered with the nameserver after login. */ -LPFC_VPORT_ATTR_R(enable_da_id, 0, 0, 1, +LPFC_VPORT_ATTR_R(enable_da_id, 1, 0, 1, "Deregister nameserver objects before LOGO"); /* diff --git a/drivers/scsi/lpfc/lpfc_debugfs.c b/drivers/scsi/lpfc/lpfc_debugfs.c index 22e17be04d8a..af04b0d6688d 100644 --- a/drivers/scsi/lpfc/lpfc_debugfs.c +++ b/drivers/scsi/lpfc/lpfc_debugfs.c @@ -1,7 +1,7 @@ /******************************************************************* * This file is part of the Emulex Linux Device Driver for * * Fibre Channel Host Bus Adapters. * - * Copyright (C) 2007-2011 Emulex. All rights reserved. * + * Copyright (C) 2007-2012 Emulex. All rights reserved. * * EMULEX and SLI are trademarks of Emulex. * * www.emulex.com * * * @@ -997,38 +997,41 @@ lpfc_debugfs_dumpDataDif_write(struct file *file, const char __user *buf, return nbytes; } -static int -lpfc_debugfs_dif_err_open(struct inode *inode, struct file *file) -{ - file->private_data = inode->i_private; - return 0; -} - static ssize_t lpfc_debugfs_dif_err_read(struct file *file, char __user *buf, size_t nbytes, loff_t *ppos) { struct dentry *dent = file->f_dentry; struct lpfc_hba *phba = file->private_data; - char cbuf[16]; + char cbuf[32]; + uint64_t tmp = 0; int cnt = 0; if (dent == phba->debug_writeGuard) - cnt = snprintf(cbuf, 16, "%u\n", phba->lpfc_injerr_wgrd_cnt); + cnt = snprintf(cbuf, 32, "%u\n", phba->lpfc_injerr_wgrd_cnt); else if (dent == phba->debug_writeApp) - cnt = snprintf(cbuf, 16, "%u\n", phba->lpfc_injerr_wapp_cnt); + cnt = snprintf(cbuf, 32, "%u\n", phba->lpfc_injerr_wapp_cnt); else if (dent == phba->debug_writeRef) - cnt = snprintf(cbuf, 16, "%u\n", phba->lpfc_injerr_wref_cnt); + cnt = snprintf(cbuf, 32, "%u\n", phba->lpfc_injerr_wref_cnt); else if (dent == phba->debug_readGuard) - cnt = snprintf(cbuf, 16, "%u\n", phba->lpfc_injerr_rgrd_cnt); + cnt = snprintf(cbuf, 32, "%u\n", phba->lpfc_injerr_rgrd_cnt); else if (dent == phba->debug_readApp) - cnt = snprintf(cbuf, 16, "%u\n", phba->lpfc_injerr_rapp_cnt); + cnt = snprintf(cbuf, 32, "%u\n", phba->lpfc_injerr_rapp_cnt); else if (dent == phba->debug_readRef) - cnt = snprintf(cbuf, 16, "%u\n", phba->lpfc_injerr_rref_cnt); - else if (dent == phba->debug_InjErrLBA) - cnt = snprintf(cbuf, 16, "0x%lx\n", - (unsigned long) phba->lpfc_injerr_lba); - else + cnt = snprintf(cbuf, 32, "%u\n", phba->lpfc_injerr_rref_cnt); + else if (dent == phba->debug_InjErrNPortID) + cnt = snprintf(cbuf, 32, "0x%06x\n", phba->lpfc_injerr_nportid); + else if (dent == phba->debug_InjErrWWPN) { + memcpy(&tmp, &phba->lpfc_injerr_wwpn, sizeof(struct lpfc_name)); + tmp = cpu_to_be64(tmp); + cnt = snprintf(cbuf, 32, "0x%016llx\n", tmp); + } else if (dent == phba->debug_InjErrLBA) { + if (phba->lpfc_injerr_lba == (sector_t)(-1)) + cnt = snprintf(cbuf, 32, "off\n"); + else + cnt = snprintf(cbuf, 32, "0x%llx\n", + (uint64_t) phba->lpfc_injerr_lba); + } else lpfc_printf_log(phba, KERN_ERR, LOG_INIT, "0547 Unknown debugfs error injection entry\n"); @@ -1042,7 +1045,7 @@ lpfc_debugfs_dif_err_write(struct file *file, const char __user *buf, struct dentry *dent = file->f_dentry; struct lpfc_hba *phba = file->private_data; char dstbuf[32]; - unsigned long tmp; + uint64_t tmp = 0; int size; memset(dstbuf, 0, 32); @@ -1050,7 +1053,12 @@ lpfc_debugfs_dif_err_write(struct file *file, const char __user *buf, if (copy_from_user(dstbuf, buf, size)) return 0; - if (strict_strtoul(dstbuf, 0, &tmp)) + if (dent == phba->debug_InjErrLBA) { + if ((buf[0] == 'o') && (buf[1] == 'f') && (buf[2] == 'f')) + tmp = (uint64_t)(-1); + } + + if ((tmp == 0) && (kstrtoull(dstbuf, 0, &tmp))) return 0; if (dent == phba->debug_writeGuard) @@ -1067,7 +1075,12 @@ lpfc_debugfs_dif_err_write(struct file *file, const char __user *buf, phba->lpfc_injerr_rref_cnt = (uint32_t)tmp; else if (dent == phba->debug_InjErrLBA) phba->lpfc_injerr_lba = (sector_t)tmp; - else + else if (dent == phba->debug_InjErrNPortID) + phba->lpfc_injerr_nportid = (uint32_t)(tmp & Mask_DID); + else if (dent == phba->debug_InjErrWWPN) { + tmp = cpu_to_be64(tmp); + memcpy(&phba->lpfc_injerr_wwpn, &tmp, sizeof(struct lpfc_name)); + } else lpfc_printf_log(phba, KERN_ERR, LOG_INIT, "0548 Unknown debugfs error injection entry\n"); @@ -3521,7 +3534,7 @@ static const struct file_operations lpfc_debugfs_op_dumpDif = { #undef lpfc_debugfs_op_dif_err static const struct file_operations lpfc_debugfs_op_dif_err = { .owner = THIS_MODULE, - .open = lpfc_debugfs_dif_err_open, + .open = simple_open, .llseek = lpfc_debugfs_lseek, .read = lpfc_debugfs_dif_err_read, .write = lpfc_debugfs_dif_err_write, @@ -3949,6 +3962,28 @@ lpfc_debugfs_initialize(struct lpfc_vport *vport) } phba->lpfc_injerr_lba = LPFC_INJERR_LBA_OFF; + snprintf(name, sizeof(name), "InjErrNPortID"); + phba->debug_InjErrNPortID = + debugfs_create_file(name, S_IFREG|S_IRUGO|S_IWUSR, + phba->hba_debugfs_root, + phba, &lpfc_debugfs_op_dif_err); + if (!phba->debug_InjErrNPortID) { + lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT, + "0809 Cannot create debugfs InjErrNPortID\n"); + goto debug_failed; + } + + snprintf(name, sizeof(name), "InjErrWWPN"); + phba->debug_InjErrWWPN = + debugfs_create_file(name, S_IFREG|S_IRUGO|S_IWUSR, + phba->hba_debugfs_root, + phba, &lpfc_debugfs_op_dif_err); + if (!phba->debug_InjErrWWPN) { + lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT, + "0810 Cannot create debugfs InjErrWWPN\n"); + goto debug_failed; + } + snprintf(name, sizeof(name), "writeGuardInjErr"); phba->debug_writeGuard = debugfs_create_file(name, S_IFREG|S_IRUGO|S_IWUSR, @@ -4321,6 +4356,14 @@ lpfc_debugfs_terminate(struct lpfc_vport *vport) debugfs_remove(phba->debug_InjErrLBA); /* InjErrLBA */ phba->debug_InjErrLBA = NULL; } + if (phba->debug_InjErrNPortID) { /* InjErrNPortID */ + debugfs_remove(phba->debug_InjErrNPortID); + phba->debug_InjErrNPortID = NULL; + } + if (phba->debug_InjErrWWPN) { + debugfs_remove(phba->debug_InjErrWWPN); /* InjErrWWPN */ + phba->debug_InjErrWWPN = NULL; + } if (phba->debug_writeGuard) { debugfs_remove(phba->debug_writeGuard); /* writeGuard */ phba->debug_writeGuard = NULL; diff --git a/drivers/scsi/lpfc/lpfc_els.c b/drivers/scsi/lpfc/lpfc_els.c index 8db2fb3b45ec..3407b39e0a3f 100644 --- a/drivers/scsi/lpfc/lpfc_els.c +++ b/drivers/scsi/lpfc/lpfc_els.c @@ -1,7 +1,7 @@ /******************************************************************* * This file is part of the Emulex Linux Device Driver for * * Fibre Channel Host Bus Adapters. * - * Copyright (C) 2004-2011 Emulex. All rights reserved. * + * Copyright (C) 2004-2012 Emulex. All rights reserved. * * EMULEX and SLI are trademarks of Emulex. * * www.emulex.com * * Portions Copyright (C) 2004-2005 Christoph Hellwig * @@ -925,9 +925,17 @@ lpfc_cmpl_els_flogi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, * due to new FCF discovery */ if ((phba->hba_flag & HBA_FIP_SUPPORT) && - (phba->fcf.fcf_flag & FCF_DISCOVERY) && - !((irsp->ulpStatus == IOSTAT_LOCAL_REJECT) && - (irsp->un.ulpWord[4] == IOERR_SLI_ABORTED))) { + (phba->fcf.fcf_flag & FCF_DISCOVERY)) { + if (phba->link_state < LPFC_LINK_UP) + goto stop_rr_fcf_flogi; + if ((phba->fcoe_cvl_eventtag_attn == + phba->fcoe_cvl_eventtag) && + (irsp->ulpStatus == IOSTAT_LOCAL_REJECT) && + (irsp->un.ulpWord[4] == IOERR_SLI_ABORTED)) + goto stop_rr_fcf_flogi; + else + phba->fcoe_cvl_eventtag_attn = + phba->fcoe_cvl_eventtag; lpfc_printf_log(phba, KERN_WARNING, LOG_FIP | LOG_ELS, "2611 FLOGI failed on FCF (x%x), " "status:x%x/x%x, tmo:x%x, perform " @@ -943,6 +951,7 @@ lpfc_cmpl_els_flogi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, goto out; } +stop_rr_fcf_flogi: /* FLOGI failure */ lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS, "2858 FLOGI failure Status:x%x/x%x TMO:x%x\n", diff --git a/drivers/scsi/lpfc/lpfc_hbadisc.c b/drivers/scsi/lpfc/lpfc_hbadisc.c index 343d87ba4df8..b507536dc5b5 100644 --- a/drivers/scsi/lpfc/lpfc_hbadisc.c +++ b/drivers/scsi/lpfc/lpfc_hbadisc.c @@ -1,7 +1,7 @@ /******************************************************************* * This file is part of the Emulex Linux Device Driver for * * Fibre Channel Host Bus Adapters. * - * Copyright (C) 2004-2011 Emulex. All rights reserved. * + * Copyright (C) 2004-2012 Emulex. All rights reserved. * * EMULEX and SLI are trademarks of Emulex. * * www.emulex.com * * Portions Copyright (C) 2004-2005 Christoph Hellwig * @@ -2843,7 +2843,14 @@ lpfc_mbx_cmpl_reg_vfi(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq) struct lpfc_vport *vport = mboxq->vport; struct Scsi_Host *shost = lpfc_shost_from_vport(vport); - if (mboxq->u.mb.mbxStatus) { + /* + * VFI not supported for interface type 0, so ignore any mailbox + * error (except VFI in use) and continue with the discovery. + */ + if (mboxq->u.mb.mbxStatus && + (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) != + LPFC_SLI_INTF_IF_TYPE_0) && + mboxq->u.mb.mbxStatus != MBX_VFI_IN_USE) { lpfc_printf_vlog(vport, KERN_ERR, LOG_MBOX, "2018 REG_VFI mbxStatus error x%x " "HBA state x%x\n", @@ -5673,14 +5680,13 @@ lpfc_fcf_inuse(struct lpfc_hba *phba) ret = 1; spin_unlock_irq(shost->host_lock); goto out; - } else { + } else if (ndlp->nlp_flag & NLP_RPI_REGISTERED) { + ret = 1; lpfc_printf_log(phba, KERN_INFO, LOG_ELS, - "2624 RPI %x DID %x flg %x still " - "logged in\n", - ndlp->nlp_rpi, ndlp->nlp_DID, - ndlp->nlp_flag); - if (ndlp->nlp_flag & NLP_RPI_REGISTERED) - ret = 1; + "2624 RPI %x DID %x flag %x " + "still logged in\n", + ndlp->nlp_rpi, ndlp->nlp_DID, + ndlp->nlp_flag); } } spin_unlock_irq(shost->host_lock); diff --git a/drivers/scsi/lpfc/lpfc_hw4.h b/drivers/scsi/lpfc/lpfc_hw4.h index 9e2b9b227e1a..91f09761bd32 100644 --- a/drivers/scsi/lpfc/lpfc_hw4.h +++ b/drivers/scsi/lpfc/lpfc_hw4.h @@ -1,7 +1,7 @@ /******************************************************************* * This file is part of the Emulex Linux Device Driver for * * Fibre Channel Host Bus Adapters. * - * Copyright (C) 2009 Emulex. All rights reserved. * + * Copyright (C) 2009-2012 Emulex. All rights reserved. * * EMULEX and SLI are trademarks of Emulex. * * www.emulex.com * * * @@ -338,6 +338,12 @@ struct lpfc_cqe { #define CQE_CODE_XRI_ABORTED 0x5 #define CQE_CODE_RECEIVE_V1 0x9 +/* + * Define mask value for xri_aborted and wcqe completed CQE extended status. + * Currently, extended status is limited to 9 bits (0x0 -> 0x103) . + */ +#define WCQE_PARAM_MASK 0x1FF; + /* completion queue entry for wqe completions */ struct lpfc_wcqe_complete { uint32_t word0; diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c index b38f99f3be32..9598fdcb08ab 100644 --- a/drivers/scsi/lpfc/lpfc_init.c +++ b/drivers/scsi/lpfc/lpfc_init.c @@ -1,7 +1,7 @@ /******************************************************************* * This file is part of the Emulex Linux Device Driver for * * Fibre Channel Host Bus Adapters. * - * Copyright (C) 2004-2011 Emulex. All rights reserved. * + * Copyright (C) 2004-2012 Emulex. All rights reserved. * * EMULEX and SLI are trademarks of Emulex. * * www.emulex.com * * Portions Copyright (C) 2004-2005 Christoph Hellwig * @@ -2704,16 +2704,14 @@ lpfc_offline_prep(struct lpfc_hba * phba) } spin_lock_irq(shost->host_lock); ndlp->nlp_flag &= ~NLP_NPR_ADISC; - + spin_unlock_irq(shost->host_lock); /* * Whenever an SLI4 port goes offline, free the - * RPI. A new RPI when the adapter port comes - * back online. + * RPI. Get a new RPI when the adapter port + * comes back online. */ if (phba->sli_rev == LPFC_SLI_REV4) lpfc_sli4_free_rpi(phba, ndlp->nlp_rpi); - - spin_unlock_irq(shost->host_lock); lpfc_unreg_rpi(vports[i], ndlp); } } @@ -2786,9 +2784,13 @@ lpfc_scsi_buf_update(struct lpfc_hba *phba) spin_lock_irq(&phba->hbalock); spin_lock(&phba->scsi_buf_list_lock); - list_for_each_entry_safe(sb, sb_next, &phba->lpfc_scsi_buf_list, list) + list_for_each_entry_safe(sb, sb_next, &phba->lpfc_scsi_buf_list, list) { sb->cur_iocbq.sli4_xritag = phba->sli4_hba.xri_ids[sb->cur_iocbq.sli4_lxritag]; + set_bit(sb->cur_iocbq.sli4_lxritag, phba->sli4_hba.xri_bmask); + phba->sli4_hba.max_cfg_param.xri_used++; + phba->sli4_hba.xri_count++; + } spin_unlock(&phba->scsi_buf_list_lock); spin_unlock_irq(&phba->hbalock); return 0; @@ -3723,6 +3725,7 @@ lpfc_sli4_async_fip_evt(struct lpfc_hba *phba, break; case LPFC_FIP_EVENT_TYPE_FCF_DEAD: + phba->fcoe_cvl_eventtag = acqe_fip->event_tag; lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_DISCOVERY, "2549 FCF (x%x) disconnected from network, " "tag:x%x\n", acqe_fip->index, acqe_fip->event_tag); @@ -3784,6 +3787,7 @@ lpfc_sli4_async_fip_evt(struct lpfc_hba *phba, } break; case LPFC_FIP_EVENT_TYPE_CVL: + phba->fcoe_cvl_eventtag = acqe_fip->event_tag; lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_DISCOVERY, "2718 Clear Virtual Link Received for VPI 0x%x" " tag 0x%x\n", acqe_fip->index, acqe_fip->event_tag); @@ -5226,8 +5230,7 @@ lpfc_sli4_create_rpi_hdr(struct lpfc_hba *phba) * rpi is normalized to a zero base because the physical rpi is * port based. */ - curr_rpi_range = phba->sli4_hba.next_rpi - - phba->sli4_hba.max_cfg_param.rpi_base; + curr_rpi_range = phba->sli4_hba.next_rpi; spin_unlock_irq(&phba->hbalock); /* @@ -5818,10 +5821,9 @@ lpfc_sli4_post_status_check(struct lpfc_hba *phba) readl(phba->sli4_hba.u.if_type2. ERR2regaddr); lpfc_printf_log(phba, KERN_ERR, LOG_INIT, - "2888 Port Error Detected " - "during POST: " - "port status reg 0x%x, " - "port_smphr reg 0x%x, " + "2888 Unrecoverable port error " + "following POST: port status reg " + "0x%x, port_smphr reg 0x%x, " "error 1=0x%x, error 2=0x%x\n", reg_data.word0, portsmphr_reg.word0, @@ -6142,7 +6144,6 @@ lpfc_sli4_read_config(struct lpfc_hba *phba) phba->sli4_hba.next_xri = phba->sli4_hba.max_cfg_param.xri_base; phba->vpi_base = phba->sli4_hba.max_cfg_param.vpi_base; phba->vfi_base = phba->sli4_hba.max_cfg_param.vfi_base; - phba->sli4_hba.next_rpi = phba->sli4_hba.max_cfg_param.rpi_base; phba->max_vpi = (phba->sli4_hba.max_cfg_param.max_vpi > 0) ? (phba->sli4_hba.max_cfg_param.max_vpi - 1) : 0; phba->max_vports = phba->max_vpi; @@ -7231,6 +7232,7 @@ lpfc_pci_function_reset(struct lpfc_hba *phba) uint32_t rdy_chk, num_resets = 0, reset_again = 0; union lpfc_sli4_cfg_shdr *shdr; struct lpfc_register reg_data; + uint16_t devid; if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf); switch (if_type) { @@ -7277,7 +7279,9 @@ lpfc_pci_function_reset(struct lpfc_hba *phba) LPFC_SLIPORT_INIT_PORT); writel(reg_data.word0, phba->sli4_hba.u.if_type2. CTRLregaddr); - + /* flush */ + pci_read_config_word(phba->pcidev, + PCI_DEVICE_ID, &devid); /* * Poll the Port Status Register and wait for RDY for * up to 10 seconds. If the port doesn't respond, treat @@ -7315,11 +7319,10 @@ lpfc_pci_function_reset(struct lpfc_hba *phba) phba->work_status[1] = readl( phba->sli4_hba.u.if_type2.ERR2regaddr); lpfc_printf_log(phba, KERN_ERR, LOG_INIT, - "2890 Port Error Detected " - "during Port Reset: " - "port status reg 0x%x, " + "2890 Port error detected during port " + "reset(%d): port status reg 0x%x, " "error 1=0x%x, error 2=0x%x\n", - reg_data.word0, + num_resets, reg_data.word0, phba->work_status[0], phba->work_status[1]); rc = -ENODEV; diff --git a/drivers/scsi/lpfc/lpfc_nportdisc.c b/drivers/scsi/lpfc/lpfc_nportdisc.c index 7b6b2aa5795a..15ca2a9a0cdd 100644 --- a/drivers/scsi/lpfc/lpfc_nportdisc.c +++ b/drivers/scsi/lpfc/lpfc_nportdisc.c @@ -1,7 +1,7 @@ /******************************************************************* * This file is part of the Emulex Linux Device Driver for * * Fibre Channel Host Bus Adapters. * - * Copyright (C) 2004-2009 Emulex. All rights reserved. * + * Copyright (C) 2004-2012 Emulex. All rights reserved. * * EMULEX and SLI are trademarks of Emulex. * * www.emulex.com * * Portions Copyright (C) 2004-2005 Christoph Hellwig * @@ -440,11 +440,15 @@ lpfc_rcv_plogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, spin_unlock_irq(shost->host_lock); stat.un.b.lsRjtRsnCode = LSRJT_INVALID_CMD; stat.un.b.lsRjtRsnCodeExp = LSEXP_NOTHING_MORE; - lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, + rc = lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp, mbox); + if (rc) + mempool_free(mbox, phba->mbox_mem_pool); return 1; } - lpfc_els_rsp_acc(vport, ELS_CMD_PLOGI, cmdiocb, ndlp, mbox); + rc = lpfc_els_rsp_acc(vport, ELS_CMD_PLOGI, cmdiocb, ndlp, mbox); + if (rc) + mempool_free(mbox, phba->mbox_mem_pool); return 1; out: stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC; diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c index efc055b6bac4..88f3a83dbd2e 100644 --- a/drivers/scsi/lpfc/lpfc_scsi.c +++ b/drivers/scsi/lpfc/lpfc_scsi.c @@ -39,8 +39,8 @@ #include "lpfc_sli4.h" #include "lpfc_nl.h" #include "lpfc_disc.h" -#include "lpfc_scsi.h" #include "lpfc.h" +#include "lpfc_scsi.h" #include "lpfc_logmsg.h" #include "lpfc_crtn.h" #include "lpfc_vport.h" @@ -51,13 +51,19 @@ int _dump_buf_done; static char *dif_op_str[] = { - "SCSI_PROT_NORMAL", - "SCSI_PROT_READ_INSERT", - "SCSI_PROT_WRITE_STRIP", - "SCSI_PROT_READ_STRIP", - "SCSI_PROT_WRITE_INSERT", - "SCSI_PROT_READ_PASS", - "SCSI_PROT_WRITE_PASS", + "PROT_NORMAL", + "PROT_READ_INSERT", + "PROT_WRITE_STRIP", + "PROT_READ_STRIP", + "PROT_WRITE_INSERT", + "PROT_READ_PASS", + "PROT_WRITE_PASS", +}; + +static char *dif_grd_str[] = { + "NO_GUARD", + "DIF_CRC", + "DIX_IP", }; struct scsi_dif_tuple { @@ -1281,10 +1287,14 @@ lpfc_cmd_blksize(struct scsi_cmnd *sc) #ifdef CONFIG_SCSI_LPFC_DEBUG_FS -#define BG_ERR_INIT 1 -#define BG_ERR_TGT 2 -#define BG_ERR_SWAP 3 -#define BG_ERR_CHECK 4 +/* Return if if error injection is detected by Initiator */ +#define BG_ERR_INIT 0x1 +/* Return if if error injection is detected by Target */ +#define BG_ERR_TGT 0x2 +/* Return if if swapping CSUM<-->CRC is required for error injection */ +#define BG_ERR_SWAP 0x10 +/* Return if disabling Guard/Ref/App checking is required for error injection */ +#define BG_ERR_CHECK 0x20 /** * lpfc_bg_err_inject - Determine if we should inject an error @@ -1294,10 +1304,7 @@ lpfc_cmd_blksize(struct scsi_cmnd *sc) * @apptag: (out) BlockGuard application tag for transmitted data * @new_guard (in) Value to replace CRC with if needed * - * Returns (1) if error injection is detected by Initiator - * Returns (2) if error injection is detected by Target - * Returns (3) if swapping CSUM->CRC is required for error injection - * Returns (4) disabling Guard/Ref/App checking is required for error injection + * Returns BG_ERR_* bit mask or 0 if request ignored **/ static int lpfc_bg_err_inject(struct lpfc_hba *phba, struct scsi_cmnd *sc, @@ -1305,7 +1312,10 @@ lpfc_bg_err_inject(struct lpfc_hba *phba, struct scsi_cmnd *sc, { struct scatterlist *sgpe; /* s/g prot entry */ struct scatterlist *sgde; /* s/g data entry */ + struct lpfc_scsi_buf *lpfc_cmd = NULL; struct scsi_dif_tuple *src = NULL; + struct lpfc_nodelist *ndlp; + struct lpfc_rport_data *rdata; uint32_t op = scsi_get_prot_op(sc); uint32_t blksize; uint32_t numblks; @@ -1318,8 +1328,9 @@ lpfc_bg_err_inject(struct lpfc_hba *phba, struct scsi_cmnd *sc, sgpe = scsi_prot_sglist(sc); sgde = scsi_sglist(sc); - lba = scsi_get_lba(sc); + + /* First check if we need to match the LBA */ if (phba->lpfc_injerr_lba != LPFC_INJERR_LBA_OFF) { blksize = lpfc_cmd_blksize(sc); numblks = (scsi_bufflen(sc) + blksize - 1) / blksize; @@ -1334,66 +1345,123 @@ lpfc_bg_err_inject(struct lpfc_hba *phba, struct scsi_cmnd *sc, sizeof(struct scsi_dif_tuple); if (numblks < blockoff) blockoff = numblks; - src = (struct scsi_dif_tuple *)sg_virt(sgpe); - src += blockoff; } } + /* Next check if we need to match the remote NPortID or WWPN */ + rdata = sc->device->hostdata; + if (rdata && rdata->pnode) { + ndlp = rdata->pnode; + + /* Make sure we have the right NPortID if one is specified */ + if (phba->lpfc_injerr_nportid && + (phba->lpfc_injerr_nportid != ndlp->nlp_DID)) + return 0; + + /* + * Make sure we have the right WWPN if one is specified. + * wwn[0] should be a non-zero NAA in a good WWPN. + */ + if (phba->lpfc_injerr_wwpn.u.wwn[0] && + (memcmp(&ndlp->nlp_portname, &phba->lpfc_injerr_wwpn, + sizeof(struct lpfc_name)) != 0)) + return 0; + } + + /* Setup a ptr to the protection data if the SCSI host provides it */ + if (sgpe) { + src = (struct scsi_dif_tuple *)sg_virt(sgpe); + src += blockoff; + lpfc_cmd = (struct lpfc_scsi_buf *)sc->host_scribble; + } + /* Should we change the Reference Tag */ if (reftag) { if (phba->lpfc_injerr_wref_cnt) { switch (op) { case SCSI_PROT_WRITE_PASS: - if (blockoff && src) { - /* Insert error in middle of the IO */ + if (src) { + /* + * For WRITE_PASS, force the error + * to be sent on the wire. It should + * be detected by the Target. + * If blockoff != 0 error will be + * inserted in middle of the IO. + */ lpfc_printf_log(phba, KERN_ERR, LOG_BG, "9076 BLKGRD: Injecting reftag error: " "write lba x%lx + x%x oldrefTag x%x\n", (unsigned long)lba, blockoff, - src->ref_tag); + be32_to_cpu(src->ref_tag)); /* - * NOTE, this will change ref tag in - * the memory location forever! + * Save the old ref_tag so we can + * restore it on completion. */ - src->ref_tag = 0xDEADBEEF; + if (lpfc_cmd) { + lpfc_cmd->prot_data_type = + LPFC_INJERR_REFTAG; + lpfc_cmd->prot_data_segment = + src; + lpfc_cmd->prot_data = + src->ref_tag; + } + src->ref_tag = cpu_to_be32(0xDEADBEEF); phba->lpfc_injerr_wref_cnt--; - phba->lpfc_injerr_lba = - LPFC_INJERR_LBA_OFF; - rc = BG_ERR_CHECK; + if (phba->lpfc_injerr_wref_cnt == 0) { + phba->lpfc_injerr_nportid = 0; + phba->lpfc_injerr_lba = + LPFC_INJERR_LBA_OFF; + memset(&phba->lpfc_injerr_wwpn, + 0, sizeof(struct lpfc_name)); + } + rc = BG_ERR_TGT | BG_ERR_CHECK; + break; } /* Drop thru */ - case SCSI_PROT_WRITE_STRIP: + case SCSI_PROT_WRITE_INSERT: /* - * For WRITE_STRIP and WRITE_PASS, - * force the error on data - * being copied from SLI-Host to SLI-Port. + * For WRITE_INSERT, force the error + * to be sent on the wire. It should be + * detected by the Target. */ + /* DEADBEEF will be the reftag on the wire */ *reftag = 0xDEADBEEF; phba->lpfc_injerr_wref_cnt--; - phba->lpfc_injerr_lba = LPFC_INJERR_LBA_OFF; - rc = BG_ERR_INIT; + if (phba->lpfc_injerr_wref_cnt == 0) { + phba->lpfc_injerr_nportid = 0; + phba->lpfc_injerr_lba = + LPFC_INJERR_LBA_OFF; + memset(&phba->lpfc_injerr_wwpn, + 0, sizeof(struct lpfc_name)); + } + rc = BG_ERR_TGT | BG_ERR_CHECK; lpfc_printf_log(phba, KERN_ERR, LOG_BG, - "9077 BLKGRD: Injecting reftag error: " + "9078 BLKGRD: Injecting reftag error: " "write lba x%lx\n", (unsigned long)lba); break; - case SCSI_PROT_WRITE_INSERT: + case SCSI_PROT_WRITE_STRIP: /* - * For WRITE_INSERT, force the - * error to be sent on the wire. It should be - * detected by the Target. + * For WRITE_STRIP and WRITE_PASS, + * force the error on data + * being copied from SLI-Host to SLI-Port. */ - /* DEADBEEF will be the reftag on the wire */ *reftag = 0xDEADBEEF; phba->lpfc_injerr_wref_cnt--; - phba->lpfc_injerr_lba = LPFC_INJERR_LBA_OFF; - rc = BG_ERR_TGT; + if (phba->lpfc_injerr_wref_cnt == 0) { + phba->lpfc_injerr_nportid = 0; + phba->lpfc_injerr_lba = + LPFC_INJERR_LBA_OFF; + memset(&phba->lpfc_injerr_wwpn, + 0, sizeof(struct lpfc_name)); + } + rc = BG_ERR_INIT; lpfc_printf_log(phba, KERN_ERR, LOG_BG, - "9078 BLKGRD: Injecting reftag error: " + "9077 BLKGRD: Injecting reftag error: " "write lba x%lx\n", (unsigned long)lba); break; } @@ -1401,11 +1469,6 @@ lpfc_bg_err_inject(struct lpfc_hba *phba, struct scsi_cmnd *sc, if (phba->lpfc_injerr_rref_cnt) { switch (op) { case SCSI_PROT_READ_INSERT: - /* - * For READ_INSERT, it doesn't make sense - * to change the reftag. - */ - break; case SCSI_PROT_READ_STRIP: case SCSI_PROT_READ_PASS: /* @@ -1415,7 +1478,13 @@ lpfc_bg_err_inject(struct lpfc_hba *phba, struct scsi_cmnd *sc, */ *reftag = 0xDEADBEEF; phba->lpfc_injerr_rref_cnt--; - phba->lpfc_injerr_lba = LPFC_INJERR_LBA_OFF; + if (phba->lpfc_injerr_rref_cnt == 0) { + phba->lpfc_injerr_nportid = 0; + phba->lpfc_injerr_lba = + LPFC_INJERR_LBA_OFF; + memset(&phba->lpfc_injerr_wwpn, + 0, sizeof(struct lpfc_name)); + } rc = BG_ERR_INIT; lpfc_printf_log(phba, KERN_ERR, LOG_BG, @@ -1431,56 +1500,87 @@ lpfc_bg_err_inject(struct lpfc_hba *phba, struct scsi_cmnd *sc, if (phba->lpfc_injerr_wapp_cnt) { switch (op) { case SCSI_PROT_WRITE_PASS: - if (blockoff && src) { - /* Insert error in middle of the IO */ + if (src) { + /* + * For WRITE_PASS, force the error + * to be sent on the wire. It should + * be detected by the Target. + * If blockoff != 0 error will be + * inserted in middle of the IO. + */ lpfc_printf_log(phba, KERN_ERR, LOG_BG, "9080 BLKGRD: Injecting apptag error: " "write lba x%lx + x%x oldappTag x%x\n", (unsigned long)lba, blockoff, - src->app_tag); + be16_to_cpu(src->app_tag)); /* - * NOTE, this will change app tag in - * the memory location forever! + * Save the old app_tag so we can + * restore it on completion. */ - src->app_tag = 0xDEAD; + if (lpfc_cmd) { + lpfc_cmd->prot_data_type = + LPFC_INJERR_APPTAG; + lpfc_cmd->prot_data_segment = + src; + lpfc_cmd->prot_data = + src->app_tag; + } + src->app_tag = cpu_to_be16(0xDEAD); phba->lpfc_injerr_wapp_cnt--; - phba->lpfc_injerr_lba = - LPFC_INJERR_LBA_OFF; - rc = BG_ERR_CHECK; + if (phba->lpfc_injerr_wapp_cnt == 0) { + phba->lpfc_injerr_nportid = 0; + phba->lpfc_injerr_lba = + LPFC_INJERR_LBA_OFF; + memset(&phba->lpfc_injerr_wwpn, + 0, sizeof(struct lpfc_name)); + } + rc = BG_ERR_TGT | BG_ERR_CHECK; break; } /* Drop thru */ - case SCSI_PROT_WRITE_STRIP: + case SCSI_PROT_WRITE_INSERT: /* - * For WRITE_STRIP and WRITE_PASS, - * force the error on data - * being copied from SLI-Host to SLI-Port. + * For WRITE_INSERT, force the + * error to be sent on the wire. It should be + * detected by the Target. */ + /* DEAD will be the apptag on the wire */ *apptag = 0xDEAD; phba->lpfc_injerr_wapp_cnt--; - phba->lpfc_injerr_lba = LPFC_INJERR_LBA_OFF; - rc = BG_ERR_INIT; + if (phba->lpfc_injerr_wapp_cnt == 0) { + phba->lpfc_injerr_nportid = 0; + phba->lpfc_injerr_lba = + LPFC_INJERR_LBA_OFF; + memset(&phba->lpfc_injerr_wwpn, + 0, sizeof(struct lpfc_name)); + } + rc = BG_ERR_TGT | BG_ERR_CHECK; lpfc_printf_log(phba, KERN_ERR, LOG_BG, - "0812 BLKGRD: Injecting apptag error: " + "0813 BLKGRD: Injecting apptag error: " "write lba x%lx\n", (unsigned long)lba); break; - case SCSI_PROT_WRITE_INSERT: + case SCSI_PROT_WRITE_STRIP: /* - * For WRITE_INSERT, force the - * error to be sent on the wire. It should be - * detected by the Target. + * For WRITE_STRIP and WRITE_PASS, + * force the error on data + * being copied from SLI-Host to SLI-Port. */ - /* DEAD will be the apptag on the wire */ *apptag = 0xDEAD; phba->lpfc_injerr_wapp_cnt--; - phba->lpfc_injerr_lba = LPFC_INJERR_LBA_OFF; - rc = BG_ERR_TGT; + if (phba->lpfc_injerr_wapp_cnt == 0) { + phba->lpfc_injerr_nportid = 0; + phba->lpfc_injerr_lba = + LPFC_INJERR_LBA_OFF; + memset(&phba->lpfc_injerr_wwpn, + 0, sizeof(struct lpfc_name)); + } + rc = BG_ERR_INIT; lpfc_printf_log(phba, KERN_ERR, LOG_BG, - "0813 BLKGRD: Injecting apptag error: " + "0812 BLKGRD: Injecting apptag error: " "write lba x%lx\n", (unsigned long)lba); break; } @@ -1488,11 +1588,6 @@ lpfc_bg_err_inject(struct lpfc_hba *phba, struct scsi_cmnd *sc, if (phba->lpfc_injerr_rapp_cnt) { switch (op) { case SCSI_PROT_READ_INSERT: - /* - * For READ_INSERT, it doesn't make sense - * to change the apptag. - */ - break; case SCSI_PROT_READ_STRIP: case SCSI_PROT_READ_PASS: /* @@ -1502,7 +1597,13 @@ lpfc_bg_err_inject(struct lpfc_hba *phba, struct scsi_cmnd *sc, */ *apptag = 0xDEAD; phba->lpfc_injerr_rapp_cnt--; - phba->lpfc_injerr_lba = LPFC_INJERR_LBA_OFF; + if (phba->lpfc_injerr_rapp_cnt == 0) { + phba->lpfc_injerr_nportid = 0; + phba->lpfc_injerr_lba = + LPFC_INJERR_LBA_OFF; + memset(&phba->lpfc_injerr_wwpn, + 0, sizeof(struct lpfc_name)); + } rc = BG_ERR_INIT; lpfc_printf_log(phba, KERN_ERR, LOG_BG, @@ -1519,57 +1620,51 @@ lpfc_bg_err_inject(struct lpfc_hba *phba, struct scsi_cmnd *sc, if (phba->lpfc_injerr_wgrd_cnt) { switch (op) { case SCSI_PROT_WRITE_PASS: - if (blockoff && src) { - /* Insert error in middle of the IO */ - - lpfc_printf_log(phba, KERN_ERR, LOG_BG, - "0815 BLKGRD: Injecting guard error: " - "write lba x%lx + x%x oldgrdTag x%x\n", - (unsigned long)lba, blockoff, - src->guard_tag); - - /* - * NOTE, this will change guard tag in - * the memory location forever! - */ - src->guard_tag = 0xDEAD; - phba->lpfc_injerr_wgrd_cnt--; - phba->lpfc_injerr_lba = - LPFC_INJERR_LBA_OFF; - rc = BG_ERR_CHECK; - break; - } + rc = BG_ERR_CHECK; /* Drop thru */ - case SCSI_PROT_WRITE_STRIP: + + case SCSI_PROT_WRITE_INSERT: /* - * For WRITE_STRIP and WRITE_PASS, - * force the error on data - * being copied from SLI-Host to SLI-Port. + * For WRITE_INSERT, force the + * error to be sent on the wire. It should be + * detected by the Target. */ phba->lpfc_injerr_wgrd_cnt--; - phba->lpfc_injerr_lba = LPFC_INJERR_LBA_OFF; + if (phba->lpfc_injerr_wgrd_cnt == 0) { + phba->lpfc_injerr_nportid = 0; + phba->lpfc_injerr_lba = + LPFC_INJERR_LBA_OFF; + memset(&phba->lpfc_injerr_wwpn, + 0, sizeof(struct lpfc_name)); + } - rc = BG_ERR_SWAP; + rc |= BG_ERR_TGT | BG_ERR_SWAP; /* Signals the caller to swap CRC->CSUM */ lpfc_printf_log(phba, KERN_ERR, LOG_BG, - "0816 BLKGRD: Injecting guard error: " + "0817 BLKGRD: Injecting guard error: " "write lba x%lx\n", (unsigned long)lba); break; - case SCSI_PROT_WRITE_INSERT: + case SCSI_PROT_WRITE_STRIP: /* - * For WRITE_INSERT, force the - * error to be sent on the wire. It should be - * detected by the Target. + * For WRITE_STRIP and WRITE_PASS, + * force the error on data + * being copied from SLI-Host to SLI-Port. */ phba->lpfc_injerr_wgrd_cnt--; - phba->lpfc_injerr_lba = LPFC_INJERR_LBA_OFF; + if (phba->lpfc_injerr_wgrd_cnt == 0) { + phba->lpfc_injerr_nportid = 0; + phba->lpfc_injerr_lba = + LPFC_INJERR_LBA_OFF; + memset(&phba->lpfc_injerr_wwpn, + 0, sizeof(struct lpfc_name)); + } - rc = BG_ERR_SWAP; + rc = BG_ERR_INIT | BG_ERR_SWAP; /* Signals the caller to swap CRC->CSUM */ lpfc_printf_log(phba, KERN_ERR, LOG_BG, - "0817 BLKGRD: Injecting guard error: " + "0816 BLKGRD: Injecting guard error: " "write lba x%lx\n", (unsigned long)lba); break; } @@ -1577,11 +1672,6 @@ lpfc_bg_err_inject(struct lpfc_hba *phba, struct scsi_cmnd *sc, if (phba->lpfc_injerr_rgrd_cnt) { switch (op) { case SCSI_PROT_READ_INSERT: - /* - * For READ_INSERT, it doesn't make sense - * to change the guard tag. - */ - break; case SCSI_PROT_READ_STRIP: case SCSI_PROT_READ_PASS: /* @@ -1589,11 +1679,16 @@ lpfc_bg_err_inject(struct lpfc_hba *phba, struct scsi_cmnd *sc, * error on data being read off the wire. It * should force an IO error to the driver. */ - *apptag = 0xDEAD; phba->lpfc_injerr_rgrd_cnt--; - phba->lpfc_injerr_lba = LPFC_INJERR_LBA_OFF; + if (phba->lpfc_injerr_rgrd_cnt == 0) { + phba->lpfc_injerr_nportid = 0; + phba->lpfc_injerr_lba = + LPFC_INJERR_LBA_OFF; + memset(&phba->lpfc_injerr_wwpn, + 0, sizeof(struct lpfc_name)); + } - rc = BG_ERR_SWAP; + rc = BG_ERR_INIT | BG_ERR_SWAP; /* Signals the caller to swap CRC->CSUM */ lpfc_printf_log(phba, KERN_ERR, LOG_BG, @@ -1629,20 +1724,20 @@ lpfc_sc_to_bg_opcodes(struct lpfc_hba *phba, struct scsi_cmnd *sc, switch (scsi_get_prot_op(sc)) { case SCSI_PROT_READ_INSERT: case SCSI_PROT_WRITE_STRIP: - *txop = BG_OP_IN_CSUM_OUT_NODIF; *rxop = BG_OP_IN_NODIF_OUT_CSUM; + *txop = BG_OP_IN_CSUM_OUT_NODIF; break; case SCSI_PROT_READ_STRIP: case SCSI_PROT_WRITE_INSERT: - *txop = BG_OP_IN_NODIF_OUT_CRC; *rxop = BG_OP_IN_CRC_OUT_NODIF; + *txop = BG_OP_IN_NODIF_OUT_CRC; break; case SCSI_PROT_READ_PASS: case SCSI_PROT_WRITE_PASS: - *txop = BG_OP_IN_CSUM_OUT_CRC; *rxop = BG_OP_IN_CRC_OUT_CSUM; + *txop = BG_OP_IN_CSUM_OUT_CRC; break; case SCSI_PROT_NORMAL: @@ -1658,20 +1753,20 @@ lpfc_sc_to_bg_opcodes(struct lpfc_hba *phba, struct scsi_cmnd *sc, switch (scsi_get_prot_op(sc)) { case SCSI_PROT_READ_STRIP: case SCSI_PROT_WRITE_INSERT: - *txop = BG_OP_IN_NODIF_OUT_CRC; *rxop = BG_OP_IN_CRC_OUT_NODIF; + *txop = BG_OP_IN_NODIF_OUT_CRC; break; case SCSI_PROT_READ_PASS: case SCSI_PROT_WRITE_PASS: - *txop = BG_OP_IN_CRC_OUT_CRC; *rxop = BG_OP_IN_CRC_OUT_CRC; + *txop = BG_OP_IN_CRC_OUT_CRC; break; case SCSI_PROT_READ_INSERT: case SCSI_PROT_WRITE_STRIP: - *txop = BG_OP_IN_CRC_OUT_NODIF; *rxop = BG_OP_IN_NODIF_OUT_CRC; + *txop = BG_OP_IN_CRC_OUT_NODIF; break; case SCSI_PROT_NORMAL: @@ -1710,20 +1805,20 @@ lpfc_bg_err_opcodes(struct lpfc_hba *phba, struct scsi_cmnd *sc, switch (scsi_get_prot_op(sc)) { case SCSI_PROT_READ_INSERT: case SCSI_PROT_WRITE_STRIP: - *txop = BG_OP_IN_CRC_OUT_NODIF; *rxop = BG_OP_IN_NODIF_OUT_CRC; + *txop = BG_OP_IN_CRC_OUT_NODIF; break; case SCSI_PROT_READ_STRIP: case SCSI_PROT_WRITE_INSERT: - *txop = BG_OP_IN_NODIF_OUT_CSUM; *rxop = BG_OP_IN_CSUM_OUT_NODIF; + *txop = BG_OP_IN_NODIF_OUT_CSUM; break; case SCSI_PROT_READ_PASS: case SCSI_PROT_WRITE_PASS: - *txop = BG_OP_IN_CRC_OUT_CRC; - *rxop = BG_OP_IN_CRC_OUT_CRC; + *rxop = BG_OP_IN_CSUM_OUT_CRC; + *txop = BG_OP_IN_CRC_OUT_CSUM; break; case SCSI_PROT_NORMAL: @@ -1735,20 +1830,20 @@ lpfc_bg_err_opcodes(struct lpfc_hba *phba, struct scsi_cmnd *sc, switch (scsi_get_prot_op(sc)) { case SCSI_PROT_READ_STRIP: case SCSI_PROT_WRITE_INSERT: - *txop = BG_OP_IN_NODIF_OUT_CSUM; *rxop = BG_OP_IN_CSUM_OUT_NODIF; + *txop = BG_OP_IN_NODIF_OUT_CSUM; break; case SCSI_PROT_READ_PASS: case SCSI_PROT_WRITE_PASS: - *txop = BG_OP_IN_CSUM_OUT_CRC; - *rxop = BG_OP_IN_CRC_OUT_CSUM; + *rxop = BG_OP_IN_CSUM_OUT_CSUM; + *txop = BG_OP_IN_CSUM_OUT_CSUM; break; case SCSI_PROT_READ_INSERT: case SCSI_PROT_WRITE_STRIP: - *txop = BG_OP_IN_CSUM_OUT_NODIF; *rxop = BG_OP_IN_NODIF_OUT_CSUM; + *txop = BG_OP_IN_CSUM_OUT_NODIF; break; case SCSI_PROT_NORMAL: @@ -1817,11 +1912,11 @@ lpfc_bg_setup_bpl(struct lpfc_hba *phba, struct scsi_cmnd *sc, reftag = (uint32_t)scsi_get_lba(sc); /* Truncate LBA */ #ifdef CONFIG_SCSI_LPFC_DEBUG_FS - rc = lpfc_bg_err_inject(phba, sc, &reftag, 0, 1); + rc = lpfc_bg_err_inject(phba, sc, &reftag, NULL, 1); if (rc) { - if (rc == BG_ERR_SWAP) + if (rc & BG_ERR_SWAP) lpfc_bg_err_opcodes(phba, sc, &txop, &rxop); - if (rc == BG_ERR_CHECK) + if (rc & BG_ERR_CHECK) checking = 0; } #endif @@ -1964,11 +2059,11 @@ lpfc_bg_setup_bpl_prot(struct lpfc_hba *phba, struct scsi_cmnd *sc, reftag = (uint32_t)scsi_get_lba(sc); /* Truncate LBA */ #ifdef CONFIG_SCSI_LPFC_DEBUG_FS - rc = lpfc_bg_err_inject(phba, sc, &reftag, 0, 1); + rc = lpfc_bg_err_inject(phba, sc, &reftag, NULL, 1); if (rc) { - if (rc == BG_ERR_SWAP) + if (rc & BG_ERR_SWAP) lpfc_bg_err_opcodes(phba, sc, &txop, &rxop); - if (rc == BG_ERR_CHECK) + if (rc & BG_ERR_CHECK) checking = 0; } #endif @@ -2172,11 +2267,11 @@ lpfc_bg_setup_sgl(struct lpfc_hba *phba, struct scsi_cmnd *sc, reftag = (uint32_t)scsi_get_lba(sc); /* Truncate LBA */ #ifdef CONFIG_SCSI_LPFC_DEBUG_FS - rc = lpfc_bg_err_inject(phba, sc, &reftag, 0, 1); + rc = lpfc_bg_err_inject(phba, sc, &reftag, NULL, 1); if (rc) { - if (rc == BG_ERR_SWAP) + if (rc & BG_ERR_SWAP) lpfc_bg_err_opcodes(phba, sc, &txop, &rxop); - if (rc == BG_ERR_CHECK) + if (rc & BG_ERR_CHECK) checking = 0; } #endif @@ -2312,11 +2407,11 @@ lpfc_bg_setup_sgl_prot(struct lpfc_hba *phba, struct scsi_cmnd *sc, reftag = (uint32_t)scsi_get_lba(sc); /* Truncate LBA */ #ifdef CONFIG_SCSI_LPFC_DEBUG_FS - rc = lpfc_bg_err_inject(phba, sc, &reftag, 0, 1); + rc = lpfc_bg_err_inject(phba, sc, &reftag, NULL, 1); if (rc) { - if (rc == BG_ERR_SWAP) + if (rc & BG_ERR_SWAP) lpfc_bg_err_opcodes(phba, sc, &txop, &rxop); - if (rc == BG_ERR_CHECK) + if (rc & BG_ERR_CHECK) checking = 0; } #endif @@ -2788,7 +2883,7 @@ lpfc_parse_bg_err(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd, /* No error was reported - problem in FW? */ cmd->result = ScsiResult(DID_ERROR, 0); lpfc_printf_log(phba, KERN_ERR, LOG_BG, - "9057 BLKGRD: no errors reported!\n"); + "9057 BLKGRD: Unknown error reported!\n"); } out: @@ -3460,6 +3555,37 @@ lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn, /* pick up SLI4 exhange busy status from HBA */ lpfc_cmd->exch_busy = pIocbOut->iocb_flag & LPFC_EXCHANGE_BUSY; +#ifdef CONFIG_SCSI_LPFC_DEBUG_FS + if (lpfc_cmd->prot_data_type) { + struct scsi_dif_tuple *src = NULL; + + src = (struct scsi_dif_tuple *)lpfc_cmd->prot_data_segment; + /* + * Used to restore any changes to protection + * data for error injection. + */ + switch (lpfc_cmd->prot_data_type) { + case LPFC_INJERR_REFTAG: + src->ref_tag = + lpfc_cmd->prot_data; + break; + case LPFC_INJERR_APPTAG: + src->app_tag = + (uint16_t)lpfc_cmd->prot_data; + break; + case LPFC_INJERR_GUARD: + src->guard_tag = + (uint16_t)lpfc_cmd->prot_data; + break; + default: + break; + } + + lpfc_cmd->prot_data = 0; + lpfc_cmd->prot_data_type = 0; + lpfc_cmd->prot_data_segment = NULL; + } +#endif if (pnode && NLP_CHK_NODE_ACT(pnode)) atomic_dec(&pnode->cmd_pending); @@ -4061,15 +4187,6 @@ lpfc_queuecommand_lck(struct scsi_cmnd *cmnd, void (*done) (struct scsi_cmnd *)) cmnd->result = err; goto out_fail_command; } - /* - * Do not let the mid-layer retry I/O too fast. If an I/O is retried - * without waiting a bit then indicate that the device is busy. - */ - if (cmnd->retries && - time_before(jiffies, (cmnd->jiffies_at_alloc + - msecs_to_jiffies(LPFC_RETRY_PAUSE * - cmnd->retries)))) - return SCSI_MLQUEUE_DEVICE_BUSY; ndlp = rdata->pnode; if ((scsi_get_prot_op(cmnd) != SCSI_PROT_NORMAL) && @@ -4119,63 +4236,48 @@ lpfc_queuecommand_lck(struct scsi_cmnd *cmnd, void (*done) (struct scsi_cmnd *)) if (scsi_get_prot_op(cmnd) != SCSI_PROT_NORMAL) { if (vport->phba->cfg_enable_bg) { lpfc_printf_vlog(vport, KERN_WARNING, LOG_BG, - "9033 BLKGRD: rcvd protected cmd:%02x op:%02x " - "str=%s\n", - cmnd->cmnd[0], scsi_get_prot_op(cmnd), - dif_op_str[scsi_get_prot_op(cmnd)]); - lpfc_printf_vlog(vport, KERN_WARNING, LOG_BG, - "9034 BLKGRD: CDB: %02x %02x %02x %02x %02x " - "%02x %02x %02x %02x %02x\n", - cmnd->cmnd[0], cmnd->cmnd[1], cmnd->cmnd[2], - cmnd->cmnd[3], cmnd->cmnd[4], cmnd->cmnd[5], - cmnd->cmnd[6], cmnd->cmnd[7], cmnd->cmnd[8], - cmnd->cmnd[9]); + "9033 BLKGRD: rcvd protected cmd:%02x op=%s " + "guard=%s\n", cmnd->cmnd[0], + dif_op_str[scsi_get_prot_op(cmnd)], + dif_grd_str[scsi_host_get_guard(shost)]); if (cmnd->cmnd[0] == READ_10) lpfc_printf_vlog(vport, KERN_WARNING, LOG_BG, "9035 BLKGRD: READ @ sector %llu, " - "count %u\n", + "cnt %u, rpt %d\n", (unsigned long long)scsi_get_lba(cmnd), - blk_rq_sectors(cmnd->request)); + blk_rq_sectors(cmnd->request), + (cmnd->cmnd[1]>>5)); else if (cmnd->cmnd[0] == WRITE_10) lpfc_printf_vlog(vport, KERN_WARNING, LOG_BG, "9036 BLKGRD: WRITE @ sector %llu, " - "count %u cmd=%p\n", + "cnt %u, wpt %d\n", (unsigned long long)scsi_get_lba(cmnd), blk_rq_sectors(cmnd->request), - cmnd); + (cmnd->cmnd[1]>>5)); } err = lpfc_bg_scsi_prep_dma_buf(phba, lpfc_cmd); } else { if (vport->phba->cfg_enable_bg) { lpfc_printf_vlog(vport, KERN_WARNING, LOG_BG, - "9038 BLKGRD: rcvd unprotected cmd:" - "%02x op:%02x str=%s\n", - cmnd->cmnd[0], scsi_get_prot_op(cmnd), - dif_op_str[scsi_get_prot_op(cmnd)]); - lpfc_printf_vlog(vport, KERN_WARNING, LOG_BG, - "9039 BLKGRD: CDB: %02x %02x %02x " - "%02x %02x %02x %02x %02x %02x %02x\n", - cmnd->cmnd[0], cmnd->cmnd[1], - cmnd->cmnd[2], cmnd->cmnd[3], - cmnd->cmnd[4], cmnd->cmnd[5], - cmnd->cmnd[6], cmnd->cmnd[7], - cmnd->cmnd[8], cmnd->cmnd[9]); + "9038 BLKGRD: rcvd unprotected cmd:" + "%02x op=%s guard=%s\n", cmnd->cmnd[0], + dif_op_str[scsi_get_prot_op(cmnd)], + dif_grd_str[scsi_host_get_guard(shost)]); if (cmnd->cmnd[0] == READ_10) lpfc_printf_vlog(vport, KERN_WARNING, LOG_BG, "9040 dbg: READ @ sector %llu, " - "count %u\n", + "cnt %u, rpt %d\n", (unsigned long long)scsi_get_lba(cmnd), - blk_rq_sectors(cmnd->request)); + blk_rq_sectors(cmnd->request), + (cmnd->cmnd[1]>>5)); else if (cmnd->cmnd[0] == WRITE_10) lpfc_printf_vlog(vport, KERN_WARNING, LOG_BG, - "9041 dbg: WRITE @ sector %llu, " - "count %u cmd=%p\n", - (unsigned long long)scsi_get_lba(cmnd), - blk_rq_sectors(cmnd->request), cmnd); - else - lpfc_printf_vlog(vport, KERN_WARNING, LOG_BG, - "9042 dbg: parser not implemented\n"); + "9041 dbg: WRITE @ sector %llu, " + "cnt %u, wpt %d\n", + (unsigned long long)scsi_get_lba(cmnd), + blk_rq_sectors(cmnd->request), + (cmnd->cmnd[1]>>5)); } err = lpfc_scsi_prep_dma_buf(phba, lpfc_cmd); } diff --git a/drivers/scsi/lpfc/lpfc_scsi.h b/drivers/scsi/lpfc/lpfc_scsi.h index 9075a08cf781..21a2ffe67eac 100644 --- a/drivers/scsi/lpfc/lpfc_scsi.h +++ b/drivers/scsi/lpfc/lpfc_scsi.h @@ -1,7 +1,7 @@ /******************************************************************* * This file is part of the Emulex Linux Device Driver for * * Fibre Channel Host Bus Adapters. * - * Copyright (C) 2004-2006 Emulex. All rights reserved. * + * Copyright (C) 2004-2012 Emulex. All rights reserved. * * EMULEX and SLI are trademarks of Emulex. * * www.emulex.com * * * @@ -150,9 +150,18 @@ struct lpfc_scsi_buf { struct lpfc_iocbq cur_iocbq; wait_queue_head_t *waitq; unsigned long start_time; + +#ifdef CONFIG_SCSI_LPFC_DEBUG_FS + /* Used to restore any changes to protection data for error injection */ + void *prot_data_segment; + uint32_t prot_data; + uint32_t prot_data_type; +#define LPFC_INJERR_REFTAG 1 +#define LPFC_INJERR_APPTAG 2 +#define LPFC_INJERR_GUARD 3 +#endif }; #define LPFC_SCSI_DMA_EXT_SIZE 264 #define LPFC_BPL_SIZE 1024 -#define LPFC_RETRY_PAUSE 300 #define MDAC_DIRECT_CMD 0x22 diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c index e0e4d8d18244..dbaf5b963bff 100644 --- a/drivers/scsi/lpfc/lpfc_sli.c +++ b/drivers/scsi/lpfc/lpfc_sli.c @@ -1,7 +1,7 @@ /******************************************************************* * This file is part of the Emulex Linux Device Driver for * * Fibre Channel Host Bus Adapters. * - * Copyright (C) 2004-2011 Emulex. All rights reserved. * + * Copyright (C) 2004-2012 Emulex. All rights reserved. * * EMULEX and SLI are trademarks of Emulex. * * www.emulex.com * * Portions Copyright (C) 2004-2005 Christoph Hellwig * @@ -5578,8 +5578,6 @@ lpfc_sli4_alloc_resource_identifiers(struct lpfc_hba *phba) for (i = 0; i < count; i++) phba->sli4_hba.rpi_ids[i] = base + i; - lpfc_sli4_node_prep(phba); - /* VPIs. */ count = phba->sli4_hba.max_cfg_param.max_vpi; base = phba->sli4_hba.max_cfg_param.vpi_base; @@ -5613,6 +5611,8 @@ lpfc_sli4_alloc_resource_identifiers(struct lpfc_hba *phba) rc = -ENOMEM; goto free_vpi_ids; } + phba->sli4_hba.max_cfg_param.xri_used = 0; + phba->sli4_hba.xri_count = 0; phba->sli4_hba.xri_ids = kzalloc(count * sizeof(uint16_t), GFP_KERNEL); @@ -6147,6 +6147,7 @@ lpfc_sli4_hba_setup(struct lpfc_hba *phba) rc = -ENODEV; goto out_free_mbox; } + lpfc_sli4_node_prep(phba); /* Create all the SLI4 queues */ rc = lpfc_sli4_queue_create(phba); @@ -7251,11 +7252,13 @@ lpfc_sli4_post_async_mbox(struct lpfc_hba *phba) out_not_finished: spin_lock_irqsave(&phba->hbalock, iflags); - mboxq->u.mb.mbxStatus = MBX_NOT_FINISHED; - __lpfc_mbox_cmpl_put(phba, mboxq); - /* Release the token */ - psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE; - phba->sli.mbox_active = NULL; + if (phba->sli.mbox_active) { + mboxq->u.mb.mbxStatus = MBX_NOT_FINISHED; + __lpfc_mbox_cmpl_put(phba, mboxq); + /* Release the token */ + psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE; + phba->sli.mbox_active = NULL; + } spin_unlock_irqrestore(&phba->hbalock, iflags); return MBX_NOT_FINISHED; @@ -7743,6 +7746,7 @@ lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq, if (pcmd && (*pcmd == ELS_CMD_FLOGI || *pcmd == ELS_CMD_SCR || *pcmd == ELS_CMD_FDISC || + *pcmd == ELS_CMD_LOGO || *pcmd == ELS_CMD_PLOGI)) { bf_set(els_req64_sp, &wqe->els_req, 1); bf_set(els_req64_sid, &wqe->els_req, @@ -8385,6 +8389,7 @@ lpfc_sli4_abts_err_handler(struct lpfc_hba *phba, struct sli4_wcqe_xri_aborted *axri) { struct lpfc_vport *vport; + uint32_t ext_status = 0; if (!ndlp || !NLP_CHK_NODE_ACT(ndlp)) { lpfc_printf_log(phba, KERN_INFO, LOG_SLI, @@ -8396,12 +8401,20 @@ lpfc_sli4_abts_err_handler(struct lpfc_hba *phba, vport = ndlp->vport; lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, "3116 Port generated FCP XRI ABORT event on " - "vpi %d rpi %d xri x%x status 0x%x\n", + "vpi %d rpi %d xri x%x status 0x%x parameter x%x\n", ndlp->vport->vpi, ndlp->nlp_rpi, bf_get(lpfc_wcqe_xa_xri, axri), - bf_get(lpfc_wcqe_xa_status, axri)); + bf_get(lpfc_wcqe_xa_status, axri), + axri->parameter); - if (bf_get(lpfc_wcqe_xa_status, axri) == IOSTAT_LOCAL_REJECT) + /* + * Catch the ABTS protocol failure case. Older OCe FW releases returned + * LOCAL_REJECT and 0 for a failed ABTS exchange and later OCe and + * LPe FW releases returned LOCAL_REJECT and SEQUENCE_TIMEOUT. + */ + ext_status = axri->parameter & WCQE_PARAM_MASK; + if ((bf_get(lpfc_wcqe_xa_status, axri) == IOSTAT_LOCAL_REJECT) && + ((ext_status == IOERR_SEQUENCE_TIMEOUT) || (ext_status == 0))) lpfc_sli_abts_recover_port(vport, ndlp); } @@ -9807,12 +9820,11 @@ lpfc_sli_mbox_sys_shutdown(struct lpfc_hba *phba) unsigned long timeout; timeout = msecs_to_jiffies(LPFC_MBOX_TMO * 1000) + jiffies; + spin_lock_irq(&phba->hbalock); psli->sli_flag |= LPFC_SLI_ASYNC_MBX_BLK; - spin_unlock_irq(&phba->hbalock); if (psli->sli_flag & LPFC_SLI_ACTIVE) { - spin_lock_irq(&phba->hbalock); /* Determine how long we might wait for the active mailbox * command to be gracefully completed by firmware. */ @@ -9831,7 +9843,9 @@ lpfc_sli_mbox_sys_shutdown(struct lpfc_hba *phba) */ break; } - } + } else + spin_unlock_irq(&phba->hbalock); + lpfc_sli_mbox_sys_flush(phba); } @@ -13272,7 +13286,7 @@ lpfc_sli4_post_els_sgl_list_ext(struct lpfc_hba *phba) LPFC_MBOXQ_t *mbox; uint32_t reqlen, alloclen, index; uint32_t mbox_tmo; - uint16_t rsrc_start, rsrc_size, els_xri_cnt; + uint16_t rsrc_start, rsrc_size, els_xri_cnt, post_els_xri_cnt; uint16_t xritag_start = 0, lxri = 0; struct lpfc_rsrc_blks *rsrc_blk; int cnt, ttl_cnt, rc = 0; @@ -13294,6 +13308,7 @@ lpfc_sli4_post_els_sgl_list_ext(struct lpfc_hba *phba) cnt = 0; ttl_cnt = 0; + post_els_xri_cnt = els_xri_cnt; list_for_each_entry(rsrc_blk, &phba->sli4_hba.lpfc_xri_blk_list, list) { rsrc_start = rsrc_blk->rsrc_start; @@ -13303,11 +13318,12 @@ lpfc_sli4_post_els_sgl_list_ext(struct lpfc_hba *phba) "3014 Working ELS Extent start %d, cnt %d\n", rsrc_start, rsrc_size); - loop_cnt = min(els_xri_cnt, rsrc_size); - if (ttl_cnt + loop_cnt >= els_xri_cnt) { - loop_cnt = els_xri_cnt - ttl_cnt; - ttl_cnt = els_xri_cnt; - } + loop_cnt = min(post_els_xri_cnt, rsrc_size); + if (loop_cnt < post_els_xri_cnt) { + post_els_xri_cnt -= loop_cnt; + ttl_cnt += loop_cnt; + } else + ttl_cnt += post_els_xri_cnt; mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); if (!mbox) @@ -14203,15 +14219,14 @@ lpfc_sli4_seq_abort_rsp(struct lpfc_hba *phba, * field and RX_ID from ABTS for RX_ID field. */ bf_set(lpfc_abts_orig, &icmd->un.bls_rsp, LPFC_ABTS_UNSOL_RSP); - bf_set(lpfc_abts_rxid, &icmd->un.bls_rsp, rxid); } else { /* ABTS sent by initiator to CT exchange, construction * of BA_ACC will need to allocate a new XRI as for the - * XRI_TAG and RX_ID fields. + * XRI_TAG field. */ bf_set(lpfc_abts_orig, &icmd->un.bls_rsp, LPFC_ABTS_UNSOL_INT); - bf_set(lpfc_abts_rxid, &icmd->un.bls_rsp, NO_XRI); } + bf_set(lpfc_abts_rxid, &icmd->un.bls_rsp, rxid); bf_set(lpfc_abts_oxid, &icmd->un.bls_rsp, oxid); /* Xmit CT abts response on exchange <xid> */ @@ -15042,6 +15057,7 @@ lpfc_sli4_fcf_scan_read_fcf_rec(struct lpfc_hba *phba, uint16_t fcf_index) LPFC_MBOXQ_t *mboxq; phba->fcoe_eventtag_at_fcf_scan = phba->fcoe_eventtag; + phba->fcoe_cvl_eventtag_attn = phba->fcoe_cvl_eventtag; mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); if (!mboxq) { lpfc_printf_log(phba, KERN_ERR, LOG_INIT, diff --git a/drivers/scsi/lpfc/lpfc_version.h b/drivers/scsi/lpfc/lpfc_version.h index f2a2602e5c35..25cefc254b76 100644 --- a/drivers/scsi/lpfc/lpfc_version.h +++ b/drivers/scsi/lpfc/lpfc_version.h @@ -1,7 +1,7 @@ /******************************************************************* * This file is part of the Emulex Linux Device Driver for * * Fibre Channel Host Bus Adapters. * - * Copyright (C) 2004-2011 Emulex. All rights reserved. * + * Copyright (C) 2004-2012 Emulex. All rights reserved. * * EMULEX and SLI are trademarks of Emulex. * * www.emulex.com * * * @@ -18,7 +18,7 @@ * included with this package. * *******************************************************************/ -#define LPFC_DRIVER_VERSION "8.3.29" +#define LPFC_DRIVER_VERSION "8.3.30" #define LPFC_DRIVER_NAME "lpfc" #define LPFC_SP_DRIVER_HANDLER_NAME "lpfc:sp" #define LPFC_FP_DRIVER_HANDLER_NAME "lpfc:fp" diff --git a/drivers/scsi/mac53c94.c b/drivers/scsi/mac53c94.c index e6173376605d..e5cd8d8d4ce7 100644 --- a/drivers/scsi/mac53c94.c +++ b/drivers/scsi/mac53c94.c @@ -22,7 +22,6 @@ #include <asm/io.h> #include <asm/pgtable.h> #include <asm/prom.h> -#include <asm/system.h> #include <asm/pci-bridge.h> #include <asm/macio.h> diff --git a/drivers/scsi/mac_scsi.c b/drivers/scsi/mac_scsi.c index 2bccfbe5661e..24828b54773a 100644 --- a/drivers/scsi/mac_scsi.c +++ b/drivers/scsi/mac_scsi.c @@ -43,7 +43,6 @@ #include <asm/io.h> #include <asm/irq.h> -#include <asm/system.h> #include <asm/macintosh.h> #include <asm/macints.h> diff --git a/drivers/scsi/mesh.c b/drivers/scsi/mesh.c index 494474779532..e8a04ae3276a 100644 --- a/drivers/scsi/mesh.c +++ b/drivers/scsi/mesh.c @@ -33,7 +33,6 @@ #include <asm/io.h> #include <asm/pgtable.h> #include <asm/prom.h> -#include <asm/system.h> #include <asm/irq.h> #include <asm/hydra.h> #include <asm/processor.h> diff --git a/drivers/scsi/mpt2sas/mpt2sas_base.c b/drivers/scsi/mpt2sas/mpt2sas_base.c index 5e69f468535f..8a59a772fdf2 100644 --- a/drivers/scsi/mpt2sas/mpt2sas_base.c +++ b/drivers/scsi/mpt2sas/mpt2sas_base.c @@ -657,7 +657,7 @@ _base_sas_log_info(struct MPT2SAS_ADAPTER *ioc , u32 log_info) return; /* eat the loginfos associated with task aborts */ - if (ioc->ignore_loginfos && (log_info == 30050000 || log_info == + if (ioc->ignore_loginfos && (log_info == 0x30050000 || log_info == 0x31140000 || log_info == 0x31130000)) return; @@ -2060,12 +2060,10 @@ _base_display_ioc_capabilities(struct MPT2SAS_ADAPTER *ioc) { int i = 0; char desc[16]; - u8 revision; u32 iounit_pg1_flags; u32 bios_version; bios_version = le32_to_cpu(ioc->bios_pg3.BiosVersion); - pci_read_config_byte(ioc->pdev, PCI_CLASS_REVISION, &revision); strncpy(desc, ioc->manu_pg0.ChipName, 16); printk(MPT2SAS_INFO_FMT "%s: FWVersion(%02d.%02d.%02d.%02d), " "ChipRevision(0x%02x), BiosVersion(%02d.%02d.%02d.%02d)\n", @@ -2074,7 +2072,7 @@ _base_display_ioc_capabilities(struct MPT2SAS_ADAPTER *ioc) (ioc->facts.FWVersion.Word & 0x00FF0000) >> 16, (ioc->facts.FWVersion.Word & 0x0000FF00) >> 8, ioc->facts.FWVersion.Word & 0x000000FF, - revision, + ioc->pdev->revision, (bios_version & 0xFF000000) >> 24, (bios_version & 0x00FF0000) >> 16, (bios_version & 0x0000FF00) >> 8, diff --git a/drivers/scsi/mpt2sas/mpt2sas_ctl.c b/drivers/scsi/mpt2sas/mpt2sas_ctl.c index 7fceb899029e..3b9a28efea82 100644 --- a/drivers/scsi/mpt2sas/mpt2sas_ctl.c +++ b/drivers/scsi/mpt2sas/mpt2sas_ctl.c @@ -1026,7 +1026,6 @@ _ctl_getiocinfo(void __user *arg) { struct mpt2_ioctl_iocinfo karg; struct MPT2SAS_ADAPTER *ioc; - u8 revision; if (copy_from_user(&karg, arg, sizeof(karg))) { printk(KERN_ERR "failure at %s:%d/%s()!\n", @@ -1046,8 +1045,7 @@ _ctl_getiocinfo(void __user *arg) karg.adapter_type = MPT2_IOCTL_INTERFACE_SAS2; if (ioc->pfacts) karg.port_number = ioc->pfacts[0].PortNumber; - pci_read_config_byte(ioc->pdev, PCI_CLASS_REVISION, &revision); - karg.hw_rev = revision; + karg.hw_rev = ioc->pdev->revision; karg.pci_id = ioc->pdev->device; karg.subsystem_device = ioc->pdev->subsystem_device; karg.subsystem_vendor = ioc->pdev->subsystem_vendor; diff --git a/drivers/scsi/ncr53c8xx.c b/drivers/scsi/ncr53c8xx.c index 4b3b4755945c..5982a587babc 100644 --- a/drivers/scsi/ncr53c8xx.c +++ b/drivers/scsi/ncr53c8xx.c @@ -115,7 +115,6 @@ #include <asm/dma.h> #include <asm/io.h> -#include <asm/system.h> #include <scsi/scsi.h> #include <scsi/scsi_cmnd.h> diff --git a/drivers/scsi/nsp32.c b/drivers/scsi/nsp32.c index 002924963cd8..62b616891a33 100644 --- a/drivers/scsi/nsp32.c +++ b/drivers/scsi/nsp32.c @@ -38,7 +38,6 @@ #include <linux/dma-mapping.h> #include <asm/dma.h> -#include <asm/system.h> #include <asm/io.h> #include <scsi/scsi.h> diff --git a/drivers/scsi/osst.c b/drivers/scsi/osst.c index de0b1a704fb5..21883a2d6324 100644 --- a/drivers/scsi/osst.c +++ b/drivers/scsi/osst.c @@ -54,7 +54,6 @@ static const char * osst_version = "0.99.4"; #include <linux/mutex.h> #include <asm/uaccess.h> #include <asm/dma.h> -#include <asm/system.h> /* The driver prints some debugging information on the console if DEBUG is defined and non-zero. */ diff --git a/drivers/scsi/pas16.c b/drivers/scsi/pas16.c index f2018b46f494..2f72c9807b12 100644 --- a/drivers/scsi/pas16.c +++ b/drivers/scsi/pas16.c @@ -113,7 +113,6 @@ #include <linux/module.h> -#include <asm/system.h> #include <linux/signal.h> #include <linux/proc_fs.h> #include <asm/io.h> diff --git a/drivers/scsi/pm8001/pm8001_hwi.c b/drivers/scsi/pm8001/pm8001_hwi.c index 3619f6eeeeda..9d82ee5c10de 100644 --- a/drivers/scsi/pm8001/pm8001_hwi.c +++ b/drivers/scsi/pm8001/pm8001_hwi.c @@ -2093,6 +2093,7 @@ mpi_sata_completion(struct pm8001_hba_info *pm8001_ha, void *piomb) struct ata_task_resp *resp ; u32 *sata_resp; struct pm8001_device *pm8001_dev; + unsigned long flags; psataPayload = (struct sata_completion_resp *)(piomb + 4); status = le32_to_cpu(psataPayload->status); @@ -2382,26 +2383,26 @@ mpi_sata_completion(struct pm8001_hba_info *pm8001_ha, void *piomb) ts->stat = SAS_DEV_NO_RESPONSE; break; } - spin_lock_irq(&t->task_state_lock); + spin_lock_irqsave(&t->task_state_lock, flags); t->task_state_flags &= ~SAS_TASK_STATE_PENDING; t->task_state_flags &= ~SAS_TASK_AT_INITIATOR; t->task_state_flags |= SAS_TASK_STATE_DONE; if (unlikely((t->task_state_flags & SAS_TASK_STATE_ABORTED))) { - spin_unlock_irq(&t->task_state_lock); + spin_unlock_irqrestore(&t->task_state_lock, flags); PM8001_FAIL_DBG(pm8001_ha, pm8001_printk("task 0x%p done with io_status 0x%x" " resp 0x%x stat 0x%x but aborted by upper layer!\n", t, status, ts->resp, ts->stat)); pm8001_ccb_task_free(pm8001_ha, t, ccb, tag); } else if (t->uldd_task) { - spin_unlock_irq(&t->task_state_lock); + spin_unlock_irqrestore(&t->task_state_lock, flags); pm8001_ccb_task_free(pm8001_ha, t, ccb, tag); mb();/* ditto */ spin_unlock_irq(&pm8001_ha->lock); t->task_done(t); spin_lock_irq(&pm8001_ha->lock); } else if (!t->uldd_task) { - spin_unlock_irq(&t->task_state_lock); + spin_unlock_irqrestore(&t->task_state_lock, flags); pm8001_ccb_task_free(pm8001_ha, t, ccb, tag); mb();/*ditto*/ spin_unlock_irq(&pm8001_ha->lock); @@ -2423,6 +2424,7 @@ static void mpi_sata_event(struct pm8001_hba_info *pm8001_ha , void *piomb) u32 tag = le32_to_cpu(psataPayload->tag); u32 port_id = le32_to_cpu(psataPayload->port_id); u32 dev_id = le32_to_cpu(psataPayload->device_id); + unsigned long flags; ccb = &pm8001_ha->ccb_info[tag]; t = ccb->task; @@ -2593,26 +2595,26 @@ static void mpi_sata_event(struct pm8001_hba_info *pm8001_ha , void *piomb) ts->stat = SAS_OPEN_TO; break; } - spin_lock_irq(&t->task_state_lock); + spin_lock_irqsave(&t->task_state_lock, flags); t->task_state_flags &= ~SAS_TASK_STATE_PENDING; t->task_state_flags &= ~SAS_TASK_AT_INITIATOR; t->task_state_flags |= SAS_TASK_STATE_DONE; if (unlikely((t->task_state_flags & SAS_TASK_STATE_ABORTED))) { - spin_unlock_irq(&t->task_state_lock); + spin_unlock_irqrestore(&t->task_state_lock, flags); PM8001_FAIL_DBG(pm8001_ha, pm8001_printk("task 0x%p done with io_status 0x%x" " resp 0x%x stat 0x%x but aborted by upper layer!\n", t, event, ts->resp, ts->stat)); pm8001_ccb_task_free(pm8001_ha, t, ccb, tag); } else if (t->uldd_task) { - spin_unlock_irq(&t->task_state_lock); + spin_unlock_irqrestore(&t->task_state_lock, flags); pm8001_ccb_task_free(pm8001_ha, t, ccb, tag); mb();/* ditto */ spin_unlock_irq(&pm8001_ha->lock); t->task_done(t); spin_lock_irq(&pm8001_ha->lock); } else if (!t->uldd_task) { - spin_unlock_irq(&t->task_state_lock); + spin_unlock_irqrestore(&t->task_state_lock, flags); pm8001_ccb_task_free(pm8001_ha, t, ccb, tag); mb();/*ditto*/ spin_unlock_irq(&pm8001_ha->lock); diff --git a/drivers/scsi/qla1280.c b/drivers/scsi/qla1280.c index d838205ab169..6c6486f626ee 100644 --- a/drivers/scsi/qla1280.c +++ b/drivers/scsi/qla1280.c @@ -359,7 +359,6 @@ #include <asm/byteorder.h> #include <asm/processor.h> #include <asm/types.h> -#include <asm/system.h> #include <scsi/scsi.h> #include <scsi/scsi_cmnd.h> diff --git a/drivers/scsi/qla4xxx/ql4_isr.c b/drivers/scsi/qla4xxx/ql4_isr.c index 7c9f28b7da72..fc542a9bb106 100644 --- a/drivers/scsi/qla4xxx/ql4_isr.c +++ b/drivers/scsi/qla4xxx/ql4_isr.c @@ -431,9 +431,9 @@ static void qla4xxx_mbox_status_entry(struct scsi_qla_host *ha, mbox_sts_entry->out_mbox[6])); if (mbox_sts_entry->out_mbox[0] == MBOX_STS_COMMAND_COMPLETE) - status = QLA_SUCCESS; + status = ISCSI_PING_SUCCESS; else - status = QLA_ERROR; + status = mbox_sts_entry->out_mbox[6]; data_size = sizeof(mbox_sts_entry->out_mbox); diff --git a/drivers/scsi/qla4xxx/ql4_os.c b/drivers/scsi/qla4xxx/ql4_os.c index 3d9419460e0c..ee47820c30a6 100644 --- a/drivers/scsi/qla4xxx/ql4_os.c +++ b/drivers/scsi/qla4xxx/ql4_os.c @@ -834,7 +834,7 @@ static enum blk_eh_timer_return qla4xxx_eh_cmd_timed_out(struct scsi_cmnd *sc) static void qla4xxx_set_port_speed(struct Scsi_Host *shost) { struct scsi_qla_host *ha = to_qla_host(shost); - struct iscsi_cls_host *ihost = shost_priv(shost); + struct iscsi_cls_host *ihost = shost->shost_data; uint32_t speed = ISCSI_PORT_SPEED_UNKNOWN; qla4xxx_get_firmware_state(ha); @@ -859,7 +859,7 @@ static void qla4xxx_set_port_speed(struct Scsi_Host *shost) static void qla4xxx_set_port_state(struct Scsi_Host *shost) { struct scsi_qla_host *ha = to_qla_host(shost); - struct iscsi_cls_host *ihost = shost_priv(shost); + struct iscsi_cls_host *ihost = shost->shost_data; uint32_t state = ISCSI_PORT_STATE_DOWN; if (test_bit(AF_LINK_UP, &ha->flags)) @@ -3445,7 +3445,6 @@ static void qla4xxx_free_adapter(struct scsi_qla_host *ha) int qla4_8xxx_iospace_config(struct scsi_qla_host *ha) { int status = 0; - uint8_t revision_id; unsigned long mem_base, mem_len, db_base, db_len; struct pci_dev *pdev = ha->pdev; @@ -3457,10 +3456,9 @@ int qla4_8xxx_iospace_config(struct scsi_qla_host *ha) goto iospace_error_exit; } - pci_read_config_byte(pdev, PCI_REVISION_ID, &revision_id); DEBUG2(printk(KERN_INFO "%s: revision-id=%d\n", - __func__, revision_id)); - ha->revision_id = revision_id; + __func__, pdev->revision)); + ha->revision_id = pdev->revision; /* remap phys address */ mem_base = pci_resource_start(pdev, 0); /* 0 is for BAR 0 */ diff --git a/drivers/scsi/qla4xxx/ql4_version.h b/drivers/scsi/qla4xxx/ql4_version.h index ede9af944141..97b30c108e36 100644 --- a/drivers/scsi/qla4xxx/ql4_version.h +++ b/drivers/scsi/qla4xxx/ql4_version.h @@ -5,4 +5,4 @@ * See LICENSE.qla4xxx for copyright and licensing details. */ -#define QLA4XXX_DRIVER_VERSION "5.02.00-k15" +#define QLA4XXX_DRIVER_VERSION "5.02.00-k16" diff --git a/drivers/scsi/qlogicpti.c b/drivers/scsi/qlogicpti.c index e40dc1cb09a0..b191dd549207 100644 --- a/drivers/scsi/qlogicpti.c +++ b/drivers/scsi/qlogicpti.c @@ -35,7 +35,6 @@ #include "qlogicpti.h" #include <asm/dma.h> -#include <asm/system.h> #include <asm/ptrace.h> #include <asm/pgtable.h> #include <asm/oplib.h> diff --git a/drivers/scsi/scsi_debug.c b/drivers/scsi/scsi_debug.c index 591856131c4e..182d5a57ab74 100644 --- a/drivers/scsi/scsi_debug.c +++ b/drivers/scsi/scsi_debug.c @@ -101,6 +101,7 @@ static const char * scsi_debug_version_date = "20100324"; #define DEF_LBPU 0 #define DEF_LBPWS 0 #define DEF_LBPWS10 0 +#define DEF_LBPRZ 1 #define DEF_LOWEST_ALIGNED 0 #define DEF_NO_LUN_0 0 #define DEF_NUM_PARTS 0 @@ -186,6 +187,7 @@ static int scsi_debug_vpd_use_hostno = DEF_VPD_USE_HOSTNO; static unsigned int scsi_debug_lbpu = DEF_LBPU; static unsigned int scsi_debug_lbpws = DEF_LBPWS; static unsigned int scsi_debug_lbpws10 = DEF_LBPWS10; +static unsigned int scsi_debug_lbprz = DEF_LBPRZ; static unsigned int scsi_debug_unmap_alignment = DEF_UNMAP_ALIGNMENT; static unsigned int scsi_debug_unmap_granularity = DEF_UNMAP_GRANULARITY; static unsigned int scsi_debug_unmap_max_blocks = DEF_UNMAP_MAX_BLOCKS; @@ -775,10 +777,10 @@ static int inquiry_evpd_b1(unsigned char *arr) return 0x3c; } -/* Thin provisioning VPD page (SBC-3) */ +/* Logical block provisioning VPD page (SBC-3) */ static int inquiry_evpd_b2(unsigned char *arr) { - memset(arr, 0, 0x8); + memset(arr, 0, 0x4); arr[0] = 0; /* threshold exponent */ if (scsi_debug_lbpu) @@ -790,7 +792,10 @@ static int inquiry_evpd_b2(unsigned char *arr) if (scsi_debug_lbpws10) arr[1] |= 1 << 5; - return 0x8; + if (scsi_debug_lbprz) + arr[1] |= 1 << 2; + + return 0x4; } #define SDEBUG_LONG_INQ_SZ 96 @@ -1071,8 +1076,11 @@ static int resp_readcap16(struct scsi_cmnd * scp, arr[13] = scsi_debug_physblk_exp & 0xf; arr[14] = (scsi_debug_lowest_aligned >> 8) & 0x3f; - if (scsi_debug_lbp()) + if (scsi_debug_lbp()) { arr[14] |= 0x80; /* LBPME */ + if (scsi_debug_lbprz) + arr[14] |= 0x40; /* LBPRZ */ + } arr[15] = scsi_debug_lowest_aligned & 0xff; @@ -2046,10 +2054,13 @@ static void unmap_region(sector_t lba, unsigned int len) block = lba + alignment; rem = do_div(block, granularity); - if (rem == 0 && lba + granularity <= end && - block < map_size) + if (rem == 0 && lba + granularity <= end && block < map_size) { clear_bit(block, map_storep); - + if (scsi_debug_lbprz) + memset(fake_storep + + block * scsi_debug_sector_size, 0, + scsi_debug_sector_size); + } lba += granularity - rem; } } @@ -2731,6 +2742,7 @@ module_param_named(guard, scsi_debug_guard, int, S_IRUGO); module_param_named(lbpu, scsi_debug_lbpu, int, S_IRUGO); module_param_named(lbpws, scsi_debug_lbpws, int, S_IRUGO); module_param_named(lbpws10, scsi_debug_lbpws10, int, S_IRUGO); +module_param_named(lbprz, scsi_debug_lbprz, int, S_IRUGO); module_param_named(lowest_aligned, scsi_debug_lowest_aligned, int, S_IRUGO); module_param_named(max_luns, scsi_debug_max_luns, int, S_IRUGO | S_IWUSR); module_param_named(max_queue, scsi_debug_max_queue, int, S_IRUGO | S_IWUSR); @@ -2772,6 +2784,7 @@ MODULE_PARM_DESC(guard, "protection checksum: 0=crc, 1=ip (def=0)"); MODULE_PARM_DESC(lbpu, "enable LBP, support UNMAP command (def=0)"); MODULE_PARM_DESC(lbpws, "enable LBP, support WRITE SAME(16) with UNMAP bit (def=0)"); MODULE_PARM_DESC(lbpws10, "enable LBP, support WRITE SAME(10) with UNMAP bit (def=0)"); +MODULE_PARM_DESC(lbprz, "unmapped blocks return 0 on read (def=1)"); MODULE_PARM_DESC(lowest_aligned, "lowest aligned lba (def=0)"); MODULE_PARM_DESC(max_luns, "number of LUNs per target to simulate(def=1)"); MODULE_PARM_DESC(max_queue, "max number of queued commands (1 to 255(def))"); diff --git a/drivers/scsi/scsi_transport_iscsi.c b/drivers/scsi/scsi_transport_iscsi.c index fac31730addf..1cf640e575da 100644 --- a/drivers/scsi/scsi_transport_iscsi.c +++ b/drivers/scsi/scsi_transport_iscsi.c @@ -1486,7 +1486,7 @@ void iscsi_post_host_event(uint32_t host_no, struct iscsi_transport *transport, struct iscsi_uevent *ev; int len = NLMSG_SPACE(sizeof(*ev) + data_size); - skb = alloc_skb(len, GFP_KERNEL); + skb = alloc_skb(len, GFP_NOIO); if (!skb) { printk(KERN_ERR "gracefully ignored host event (%d):%d OOM\n", host_no, code); @@ -1504,7 +1504,7 @@ void iscsi_post_host_event(uint32_t host_no, struct iscsi_transport *transport, if (data_size) memcpy((char *)ev + sizeof(*ev), data, data_size); - iscsi_multicast_skb(skb, ISCSI_NL_GRP_ISCSID, GFP_KERNEL); + iscsi_multicast_skb(skb, ISCSI_NL_GRP_ISCSID, GFP_NOIO); } EXPORT_SYMBOL_GPL(iscsi_post_host_event); @@ -1517,7 +1517,7 @@ void iscsi_ping_comp_event(uint32_t host_no, struct iscsi_transport *transport, struct iscsi_uevent *ev; int len = NLMSG_SPACE(sizeof(*ev) + data_size); - skb = alloc_skb(len, GFP_KERNEL); + skb = alloc_skb(len, GFP_NOIO); if (!skb) { printk(KERN_ERR "gracefully ignored ping comp: OOM\n"); return; @@ -1533,7 +1533,7 @@ void iscsi_ping_comp_event(uint32_t host_no, struct iscsi_transport *transport, ev->r.ping_comp.data_size = data_size; memcpy((char *)ev + sizeof(*ev), data, data_size); - iscsi_multicast_skb(skb, ISCSI_NL_GRP_ISCSID, GFP_KERNEL); + iscsi_multicast_skb(skb, ISCSI_NL_GRP_ISCSID, GFP_NOIO); } EXPORT_SYMBOL_GPL(iscsi_ping_comp_event); diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c index 09e3df42a402..5ba5c2a9e8e9 100644 --- a/drivers/scsi/sd.c +++ b/drivers/scsi/sd.c @@ -664,7 +664,7 @@ static void sd_unprep_fn(struct request_queue *q, struct request *rq) } /** - * sd_init_command - build a scsi (read or write) command from + * sd_prep_fn - build a scsi (read or write) command from * information in the request structure. * @SCpnt: pointer to mid-level's per scsi command structure that * contains request and into which the scsi command is written @@ -711,7 +711,7 @@ static int sd_prep_fn(struct request_queue *q, struct request *rq) ret = BLKPREP_KILL; SCSI_LOG_HLQUEUE(1, scmd_printk(KERN_INFO, SCpnt, - "sd_init_command: block=%llu, " + "sd_prep_fn: block=%llu, " "count=%d\n", (unsigned long long)block, this_count)); @@ -1212,9 +1212,14 @@ static unsigned int sd_check_events(struct gendisk *disk, unsigned int clearing) retval = -ENODEV; if (scsi_block_when_processing_errors(sdp)) { + retval = scsi_autopm_get_device(sdp); + if (retval) + goto out; + sshdr = kzalloc(sizeof(*sshdr), GFP_KERNEL); retval = scsi_test_unit_ready(sdp, SD_TIMEOUT, SD_MAX_RETRIES, sshdr); + scsi_autopm_put_device(sdp); } /* failed to execute TUR, assume media not present */ @@ -2644,8 +2649,8 @@ static void sd_probe_async(void *data, async_cookie_t cookie) * (e.g. /dev/sda). More precisely it is the block device major * and minor number that is chosen here. * - * Assume sd_attach is not re-entrant (for time being) - * Also think about sd_attach() and sd_remove() running coincidentally. + * Assume sd_probe is not re-entrant (for time being) + * Also think about sd_probe() and sd_remove() running coincidentally. **/ static int sd_probe(struct device *dev) { @@ -2660,7 +2665,7 @@ static int sd_probe(struct device *dev) goto out; SCSI_LOG_HLQUEUE(3, sdev_printk(KERN_INFO, sdp, - "sd_attach\n")); + "sd_probe\n")); error = -ENOMEM; sdkp = kzalloc(sizeof(*sdkp), GFP_KERNEL); diff --git a/drivers/scsi/st.c b/drivers/scsi/st.c index 9262cdfa4b23..e41998cb098e 100644 --- a/drivers/scsi/st.c +++ b/drivers/scsi/st.c @@ -42,7 +42,6 @@ static const char *verstr = "20101219"; #include <asm/uaccess.h> #include <asm/dma.h> -#include <asm/system.h> #include <scsi/scsi.h> #include <scsi/scsi_dbg.h> @@ -1106,6 +1105,12 @@ static int check_tape(struct scsi_tape *STp, struct file *filp) STp->drv_buffer)); } STp->drv_write_prot = ((STp->buffer)->b_data[2] & 0x80) != 0; + if (!STp->drv_buffer && STp->immediate_filemark) { + printk(KERN_WARNING + "%s: non-buffered tape: disabling writing immediate filemarks\n", + name); + STp->immediate_filemark = 0; + } } st_release_request(SRpnt); SRpnt = NULL; @@ -1314,6 +1319,8 @@ static int st_flush(struct file *filp, fl_owner_t id) memset(cmd, 0, MAX_COMMAND_SIZE); cmd[0] = WRITE_FILEMARKS; + if (STp->immediate_filemark) + cmd[1] = 1; cmd[4] = 1 + STp->two_fm; SRpnt = st_do_scsi(NULL, STp, cmd, 0, DMA_NONE, @@ -2181,8 +2188,9 @@ static void st_log_options(struct scsi_tape * STp, struct st_modedef * STm, char name, STm->defaults_for_writes, STp->omit_blklims, STp->can_partitions, STp->scsi2_logical); printk(KERN_INFO - "%s: sysv: %d nowait: %d sili: %d\n", name, STm->sysv, STp->immediate, - STp->sili); + "%s: sysv: %d nowait: %d sili: %d nowait_filemark: %d\n", + name, STm->sysv, STp->immediate, STp->sili, + STp->immediate_filemark); printk(KERN_INFO "%s: debugging: %d\n", name, debugging); } @@ -2224,6 +2232,7 @@ static int st_set_options(struct scsi_tape *STp, long options) STp->can_partitions = (options & MT_ST_CAN_PARTITIONS) != 0; STp->scsi2_logical = (options & MT_ST_SCSI2LOGICAL) != 0; STp->immediate = (options & MT_ST_NOWAIT) != 0; + STp->immediate_filemark = (options & MT_ST_NOWAIT_EOF) != 0; STm->sysv = (options & MT_ST_SYSV) != 0; STp->sili = (options & MT_ST_SILI) != 0; DEB( debugging = (options & MT_ST_DEBUGGING) != 0; @@ -2255,6 +2264,8 @@ static int st_set_options(struct scsi_tape *STp, long options) STp->scsi2_logical = value; if ((options & MT_ST_NOWAIT) != 0) STp->immediate = value; + if ((options & MT_ST_NOWAIT_EOF) != 0) + STp->immediate_filemark = value; if ((options & MT_ST_SYSV) != 0) STm->sysv = value; if ((options & MT_ST_SILI) != 0) @@ -2714,7 +2725,8 @@ static int st_int_ioctl(struct scsi_tape *STp, unsigned int cmd_in, unsigned lon cmd[0] = WRITE_FILEMARKS; if (cmd_in == MTWSM) cmd[1] = 2; - if (cmd_in == MTWEOFI) + if (cmd_in == MTWEOFI || + (cmd_in == MTWEOF && STp->immediate_filemark)) cmd[1] |= 1; cmd[2] = (arg >> 16); cmd[3] = (arg >> 8); @@ -4093,6 +4105,7 @@ static int st_probe(struct device *dev) tpnt->scsi2_logical = ST_SCSI2LOGICAL; tpnt->sili = ST_SILI; tpnt->immediate = ST_NOWAIT; + tpnt->immediate_filemark = 0; tpnt->default_drvbuffer = 0xff; /* No forced buffering */ tpnt->partition = 0; tpnt->new_partition = 0; @@ -4478,6 +4491,7 @@ st_options_show(struct device *dev, struct device_attribute *attr, char *buf) options |= STp->scsi2_logical ? MT_ST_SCSI2LOGICAL : 0; options |= STm->sysv ? MT_ST_SYSV : 0; options |= STp->immediate ? MT_ST_NOWAIT : 0; + options |= STp->immediate_filemark ? MT_ST_NOWAIT_EOF : 0; options |= STp->sili ? MT_ST_SILI : 0; l = snprintf(buf, PAGE_SIZE, "0x%08x\n", options); diff --git a/drivers/scsi/st.h b/drivers/scsi/st.h index f91a67c6d968..ea35632b986c 100644 --- a/drivers/scsi/st.h +++ b/drivers/scsi/st.h @@ -120,6 +120,7 @@ struct scsi_tape { unsigned char c_algo; /* compression algorithm */ unsigned char pos_unknown; /* after reset position unknown */ unsigned char sili; /* use SILI when reading in variable b mode */ + unsigned char immediate_filemark; /* write filemark immediately */ int tape_type; int long_timeout; /* timeout for commands known to take long time */ diff --git a/drivers/scsi/sun3_scsi.c b/drivers/scsi/sun3_scsi.c index baf7328de956..6e25889db9d4 100644 --- a/drivers/scsi/sun3_scsi.c +++ b/drivers/scsi/sun3_scsi.c @@ -63,7 +63,6 @@ #include <linux/blkdev.h> #include <asm/io.h> -#include <asm/system.h> #include <asm/sun3ints.h> #include <asm/dvma.h> diff --git a/drivers/scsi/sun3_scsi_vme.c b/drivers/scsi/sun3_scsi_vme.c index fbba78e5722e..a3dd55d1d2fd 100644 --- a/drivers/scsi/sun3_scsi_vme.c +++ b/drivers/scsi/sun3_scsi_vme.c @@ -25,7 +25,6 @@ #include <linux/blkdev.h> #include <asm/io.h> -#include <asm/system.h> #include <asm/sun3ints.h> #include <asm/dvma.h> diff --git a/drivers/scsi/sym53c416.c b/drivers/scsi/sym53c416.c index 012c86edd59f..ac4eca6a5328 100644 --- a/drivers/scsi/sym53c416.c +++ b/drivers/scsi/sym53c416.c @@ -37,7 +37,6 @@ #include <linux/proc_fs.h> #include <linux/spinlock.h> #include <asm/dma.h> -#include <asm/system.h> #include <asm/io.h> #include <linux/blkdev.h> #include <linux/isapnp.h> diff --git a/drivers/scsi/t128.c b/drivers/scsi/t128.c index 041eaaace2c3..d672d97fb84a 100644 --- a/drivers/scsi/t128.c +++ b/drivers/scsi/t128.c @@ -106,7 +106,6 @@ * $Log: t128.c,v $ */ -#include <asm/system.h> #include <linux/signal.h> #include <linux/io.h> #include <linux/blkdev.h> diff --git a/drivers/scsi/u14-34f.c b/drivers/scsi/u14-34f.c index 90e104d6b558..9c216e563568 100644 --- a/drivers/scsi/u14-34f.c +++ b/drivers/scsi/u14-34f.c @@ -410,7 +410,6 @@ #include <linux/ioport.h> #include <linux/delay.h> #include <asm/io.h> -#include <asm/system.h> #include <asm/byteorder.h> #include <linux/proc_fs.h> #include <linux/blkdev.h> diff --git a/drivers/scsi/ufs/Kconfig b/drivers/scsi/ufs/Kconfig new file mode 100644 index 000000000000..8f27f9d6f91d --- /dev/null +++ b/drivers/scsi/ufs/Kconfig @@ -0,0 +1,49 @@ +# +# Kernel configuration file for the UFS Host Controller +# +# This code is based on drivers/scsi/ufs/Kconfig +# Copyright (C) 2011 Samsung Samsung India Software Operations +# +# Santosh Yaraganavi <santosh.sy@samsung.com> +# Vinayak Holikatti <h.vinayak@samsung.com> + +# This program is free software; you can redistribute it and/or +# modify it under the terms of the GNU General Public License +# as published by the Free Software Foundation; either version 2 +# of the License, or (at your option) any later version. + +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. + +# NO WARRANTY +# THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR +# CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT +# LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT, +# MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is +# solely responsible for determining the appropriateness of using and +# distributing the Program and assumes all risks associated with its +# exercise of rights under this Agreement, including but not limited to +# the risks and costs of program errors, damage to or loss of data, +# programs or equipment, and unavailability or interruption of operations. + +# DISCLAIMER OF LIABILITY +# NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY +# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +# DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND +# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR +# TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE +# USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED +# HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES + +# You should have received a copy of the GNU General Public License +# along with this program; if not, write to the Free Software +# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, +# USA. + +config SCSI_UFSHCD + tristate "Universal Flash Storage host controller driver" + depends on PCI && SCSI + ---help--- + This is a generic driver which supports PCIe UFS Host controllers. diff --git a/drivers/scsi/ufs/Makefile b/drivers/scsi/ufs/Makefile new file mode 100644 index 000000000000..adf7895a6a91 --- /dev/null +++ b/drivers/scsi/ufs/Makefile @@ -0,0 +1,2 @@ +# UFSHCD makefile +obj-$(CONFIG_SCSI_UFSHCD) += ufshcd.o diff --git a/drivers/scsi/ufs/ufs.h b/drivers/scsi/ufs/ufs.h new file mode 100644 index 000000000000..b207529f8d54 --- /dev/null +++ b/drivers/scsi/ufs/ufs.h @@ -0,0 +1,207 @@ +/* + * Universal Flash Storage Host controller driver + * + * This code is based on drivers/scsi/ufs/ufs.h + * Copyright (C) 2011-2012 Samsung India Software Operations + * + * Santosh Yaraganavi <santosh.sy@samsung.com> + * Vinayak Holikatti <h.vinayak@samsung.com> + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version 2 + * of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * NO WARRANTY + * THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR + * CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT + * LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT, + * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is + * solely responsible for determining the appropriateness of using and + * distributing the Program and assumes all risks associated with its + * exercise of rights under this Agreement, including but not limited to + * the risks and costs of program errors, damage to or loss of data, + * programs or equipment, and unavailability or interruption of operations. + + * DISCLAIMER OF LIABILITY + * NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR + * TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE + * USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED + * HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES + + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, + * USA. + */ + +#ifndef _UFS_H +#define _UFS_H + +#define MAX_CDB_SIZE 16 + +#define UPIU_HEADER_DWORD(byte3, byte2, byte1, byte0)\ + ((byte3 << 24) | (byte2 << 16) |\ + (byte1 << 8) | (byte0)) + +/* + * UFS Protocol Information Unit related definitions + */ + +/* Task management functions */ +enum { + UFS_ABORT_TASK = 0x01, + UFS_ABORT_TASK_SET = 0x02, + UFS_CLEAR_TASK_SET = 0x04, + UFS_LOGICAL_RESET = 0x08, + UFS_QUERY_TASK = 0x80, + UFS_QUERY_TASK_SET = 0x81, +}; + +/* UTP UPIU Transaction Codes Initiator to Target */ +enum { + UPIU_TRANSACTION_NOP_OUT = 0x00, + UPIU_TRANSACTION_COMMAND = 0x01, + UPIU_TRANSACTION_DATA_OUT = 0x02, + UPIU_TRANSACTION_TASK_REQ = 0x04, + UPIU_TRANSACTION_QUERY_REQ = 0x26, +}; + +/* UTP UPIU Transaction Codes Target to Initiator */ +enum { + UPIU_TRANSACTION_NOP_IN = 0x20, + UPIU_TRANSACTION_RESPONSE = 0x21, + UPIU_TRANSACTION_DATA_IN = 0x22, + UPIU_TRANSACTION_TASK_RSP = 0x24, + UPIU_TRANSACTION_READY_XFER = 0x31, + UPIU_TRANSACTION_QUERY_RSP = 0x36, +}; + +/* UPIU Read/Write flags */ +enum { + UPIU_CMD_FLAGS_NONE = 0x00, + UPIU_CMD_FLAGS_WRITE = 0x20, + UPIU_CMD_FLAGS_READ = 0x40, +}; + +/* UPIU Task Attributes */ +enum { + UPIU_TASK_ATTR_SIMPLE = 0x00, + UPIU_TASK_ATTR_ORDERED = 0x01, + UPIU_TASK_ATTR_HEADQ = 0x02, + UPIU_TASK_ATTR_ACA = 0x03, +}; + +/* UTP QUERY Transaction Specific Fields OpCode */ +enum { + UPIU_QUERY_OPCODE_NOP = 0x0, + UPIU_QUERY_OPCODE_READ_DESC = 0x1, + UPIU_QUERY_OPCODE_WRITE_DESC = 0x2, + UPIU_QUERY_OPCODE_READ_ATTR = 0x3, + UPIU_QUERY_OPCODE_WRITE_ATTR = 0x4, + UPIU_QUERY_OPCODE_READ_FLAG = 0x5, + UPIU_QUERY_OPCODE_SET_FLAG = 0x6, + UPIU_QUERY_OPCODE_CLEAR_FLAG = 0x7, + UPIU_QUERY_OPCODE_TOGGLE_FLAG = 0x8, +}; + +/* UTP Transfer Request Command Type (CT) */ +enum { + UPIU_COMMAND_SET_TYPE_SCSI = 0x0, + UPIU_COMMAND_SET_TYPE_UFS = 0x1, + UPIU_COMMAND_SET_TYPE_QUERY = 0x2, +}; + +enum { + MASK_SCSI_STATUS = 0xFF, + MASK_TASK_RESPONSE = 0xFF00, + MASK_RSP_UPIU_RESULT = 0xFFFF, +}; + +/* Task management service response */ +enum { + UPIU_TASK_MANAGEMENT_FUNC_COMPL = 0x00, + UPIU_TASK_MANAGEMENT_FUNC_NOT_SUPPORTED = 0x04, + UPIU_TASK_MANAGEMENT_FUNC_SUCCEEDED = 0x08, + UPIU_TASK_MANAGEMENT_FUNC_FAILED = 0x05, + UPIU_INCORRECT_LOGICAL_UNIT_NO = 0x09, +}; +/** + * struct utp_upiu_header - UPIU header structure + * @dword_0: UPIU header DW-0 + * @dword_1: UPIU header DW-1 + * @dword_2: UPIU header DW-2 + */ +struct utp_upiu_header { + u32 dword_0; + u32 dword_1; + u32 dword_2; +}; + +/** + * struct utp_upiu_cmd - Command UPIU structure + * @header: UPIU header structure DW-0 to DW-2 + * @data_transfer_len: Data Transfer Length DW-3 + * @cdb: Command Descriptor Block CDB DW-4 to DW-7 + */ +struct utp_upiu_cmd { + struct utp_upiu_header header; + u32 exp_data_transfer_len; + u8 cdb[MAX_CDB_SIZE]; +}; + +/** + * struct utp_upiu_rsp - Response UPIU structure + * @header: UPIU header DW-0 to DW-2 + * @residual_transfer_count: Residual transfer count DW-3 + * @reserved: Reserved double words DW-4 to DW-7 + * @sense_data_len: Sense data length DW-8 U16 + * @sense_data: Sense data field DW-8 to DW-12 + */ +struct utp_upiu_rsp { + struct utp_upiu_header header; + u32 residual_transfer_count; + u32 reserved[4]; + u16 sense_data_len; + u8 sense_data[18]; +}; + +/** + * struct utp_upiu_task_req - Task request UPIU structure + * @header - UPIU header structure DW0 to DW-2 + * @input_param1: Input parameter 1 DW-3 + * @input_param2: Input parameter 2 DW-4 + * @input_param3: Input parameter 3 DW-5 + * @reserved: Reserved double words DW-6 to DW-7 + */ +struct utp_upiu_task_req { + struct utp_upiu_header header; + u32 input_param1; + u32 input_param2; + u32 input_param3; + u32 reserved[2]; +}; + +/** + * struct utp_upiu_task_rsp - Task Management Response UPIU structure + * @header: UPIU header structure DW0-DW-2 + * @output_param1: Ouput parameter 1 DW3 + * @output_param2: Output parameter 2 DW4 + * @reserved: Reserved double words DW-5 to DW-7 + */ +struct utp_upiu_task_rsp { + struct utp_upiu_header header; + u32 output_param1; + u32 output_param2; + u32 reserved[3]; +}; + +#endif /* End of Header */ diff --git a/drivers/scsi/ufs/ufshcd.c b/drivers/scsi/ufs/ufshcd.c new file mode 100644 index 000000000000..52b96e8bf92e --- /dev/null +++ b/drivers/scsi/ufs/ufshcd.c @@ -0,0 +1,1978 @@ +/* + * Universal Flash Storage Host controller driver + * + * This code is based on drivers/scsi/ufs/ufshcd.c + * Copyright (C) 2011-2012 Samsung India Software Operations + * + * Santosh Yaraganavi <santosh.sy@samsung.com> + * Vinayak Holikatti <h.vinayak@samsung.com> + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version 2 + * of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * NO WARRANTY + * THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR + * CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT + * LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT, + * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is + * solely responsible for determining the appropriateness of using and + * distributing the Program and assumes all risks associated with its + * exercise of rights under this Agreement, including but not limited to + * the risks and costs of program errors, damage to or loss of data, + * programs or equipment, and unavailability or interruption of operations. + + * DISCLAIMER OF LIABILITY + * NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR + * TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE + * USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED + * HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES + + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, + * USA. + */ + +#include <linux/module.h> +#include <linux/kernel.h> +#include <linux/init.h> +#include <linux/pci.h> +#include <linux/interrupt.h> +#include <linux/io.h> +#include <linux/delay.h> +#include <linux/slab.h> +#include <linux/spinlock.h> +#include <linux/workqueue.h> +#include <linux/errno.h> +#include <linux/types.h> +#include <linux/wait.h> +#include <linux/bitops.h> + +#include <asm/irq.h> +#include <asm/byteorder.h> +#include <scsi/scsi.h> +#include <scsi/scsi_cmnd.h> +#include <scsi/scsi_host.h> +#include <scsi/scsi_tcq.h> +#include <scsi/scsi_dbg.h> +#include <scsi/scsi_eh.h> + +#include "ufs.h" +#include "ufshci.h" + +#define UFSHCD "ufshcd" +#define UFSHCD_DRIVER_VERSION "0.1" + +enum { + UFSHCD_MAX_CHANNEL = 0, + UFSHCD_MAX_ID = 1, + UFSHCD_MAX_LUNS = 8, + UFSHCD_CMD_PER_LUN = 32, + UFSHCD_CAN_QUEUE = 32, +}; + +/* UFSHCD states */ +enum { + UFSHCD_STATE_OPERATIONAL, + UFSHCD_STATE_RESET, + UFSHCD_STATE_ERROR, +}; + +/* Interrupt configuration options */ +enum { + UFSHCD_INT_DISABLE, + UFSHCD_INT_ENABLE, + UFSHCD_INT_CLEAR, +}; + +/* Interrupt aggregation options */ +enum { + INT_AGGR_RESET, + INT_AGGR_CONFIG, +}; + +/** + * struct uic_command - UIC command structure + * @command: UIC command + * @argument1: UIC command argument 1 + * @argument2: UIC command argument 2 + * @argument3: UIC command argument 3 + * @cmd_active: Indicate if UIC command is outstanding + * @result: UIC command result + */ +struct uic_command { + u32 command; + u32 argument1; + u32 argument2; + u32 argument3; + int cmd_active; + int result; +}; + +/** + * struct ufs_hba - per adapter private structure + * @mmio_base: UFSHCI base register address + * @ucdl_base_addr: UFS Command Descriptor base address + * @utrdl_base_addr: UTP Transfer Request Descriptor base address + * @utmrdl_base_addr: UTP Task Management Descriptor base address + * @ucdl_dma_addr: UFS Command Descriptor DMA address + * @utrdl_dma_addr: UTRDL DMA address + * @utmrdl_dma_addr: UTMRDL DMA address + * @host: Scsi_Host instance of the driver + * @pdev: PCI device handle + * @lrb: local reference block + * @outstanding_tasks: Bits representing outstanding task requests + * @outstanding_reqs: Bits representing outstanding transfer requests + * @capabilities: UFS Controller Capabilities + * @nutrs: Transfer Request Queue depth supported by controller + * @nutmrs: Task Management Queue depth supported by controller + * @active_uic_cmd: handle of active UIC command + * @ufshcd_tm_wait_queue: wait queue for task management + * @tm_condition: condition variable for task management + * @ufshcd_state: UFSHCD states + * @int_enable_mask: Interrupt Mask Bits + * @uic_workq: Work queue for UIC completion handling + * @feh_workq: Work queue for fatal controller error handling + * @errors: HBA errors + */ +struct ufs_hba { + void __iomem *mmio_base; + + /* Virtual memory reference */ + struct utp_transfer_cmd_desc *ucdl_base_addr; + struct utp_transfer_req_desc *utrdl_base_addr; + struct utp_task_req_desc *utmrdl_base_addr; + + /* DMA memory reference */ + dma_addr_t ucdl_dma_addr; + dma_addr_t utrdl_dma_addr; + dma_addr_t utmrdl_dma_addr; + + struct Scsi_Host *host; + struct pci_dev *pdev; + + struct ufshcd_lrb *lrb; + + unsigned long outstanding_tasks; + unsigned long outstanding_reqs; + + u32 capabilities; + int nutrs; + int nutmrs; + u32 ufs_version; + + struct uic_command active_uic_cmd; + wait_queue_head_t ufshcd_tm_wait_queue; + unsigned long tm_condition; + + u32 ufshcd_state; + u32 int_enable_mask; + + /* Work Queues */ + struct work_struct uic_workq; + struct work_struct feh_workq; + + /* HBA Errors */ + u32 errors; +}; + +/** + * struct ufshcd_lrb - local reference block + * @utr_descriptor_ptr: UTRD address of the command + * @ucd_cmd_ptr: UCD address of the command + * @ucd_rsp_ptr: Response UPIU address for this command + * @ucd_prdt_ptr: PRDT address of the command + * @cmd: pointer to SCSI command + * @sense_buffer: pointer to sense buffer address of the SCSI command + * @sense_bufflen: Length of the sense buffer + * @scsi_status: SCSI status of the command + * @command_type: SCSI, UFS, Query. + * @task_tag: Task tag of the command + * @lun: LUN of the command + */ +struct ufshcd_lrb { + struct utp_transfer_req_desc *utr_descriptor_ptr; + struct utp_upiu_cmd *ucd_cmd_ptr; + struct utp_upiu_rsp *ucd_rsp_ptr; + struct ufshcd_sg_entry *ucd_prdt_ptr; + + struct scsi_cmnd *cmd; + u8 *sense_buffer; + unsigned int sense_bufflen; + int scsi_status; + + int command_type; + int task_tag; + unsigned int lun; +}; + +/** + * ufshcd_get_ufs_version - Get the UFS version supported by the HBA + * @hba - Pointer to adapter instance + * + * Returns UFSHCI version supported by the controller + */ +static inline u32 ufshcd_get_ufs_version(struct ufs_hba *hba) +{ + return readl(hba->mmio_base + REG_UFS_VERSION); +} + +/** + * ufshcd_is_device_present - Check if any device connected to + * the host controller + * @reg_hcs - host controller status register value + * + * Returns 0 if device present, non-zero if no device detected + */ +static inline int ufshcd_is_device_present(u32 reg_hcs) +{ + return (DEVICE_PRESENT & reg_hcs) ? 0 : -1; +} + +/** + * ufshcd_get_tr_ocs - Get the UTRD Overall Command Status + * @lrb: pointer to local command reference block + * + * This function is used to get the OCS field from UTRD + * Returns the OCS field in the UTRD + */ +static inline int ufshcd_get_tr_ocs(struct ufshcd_lrb *lrbp) +{ + return lrbp->utr_descriptor_ptr->header.dword_2 & MASK_OCS; +} + +/** + * ufshcd_get_tmr_ocs - Get the UTMRD Overall Command Status + * @task_req_descp: pointer to utp_task_req_desc structure + * + * This function is used to get the OCS field from UTMRD + * Returns the OCS field in the UTMRD + */ +static inline int +ufshcd_get_tmr_ocs(struct utp_task_req_desc *task_req_descp) +{ + return task_req_descp->header.dword_2 & MASK_OCS; +} + +/** + * ufshcd_get_tm_free_slot - get a free slot for task management request + * @hba: per adapter instance + * + * Returns maximum number of task management request slots in case of + * task management queue full or returns the free slot number + */ +static inline int ufshcd_get_tm_free_slot(struct ufs_hba *hba) +{ + return find_first_zero_bit(&hba->outstanding_tasks, hba->nutmrs); +} + +/** + * ufshcd_utrl_clear - Clear a bit in UTRLCLR register + * @hba: per adapter instance + * @pos: position of the bit to be cleared + */ +static inline void ufshcd_utrl_clear(struct ufs_hba *hba, u32 pos) +{ + writel(~(1 << pos), + (hba->mmio_base + REG_UTP_TRANSFER_REQ_LIST_CLEAR)); +} + +/** + * ufshcd_get_lists_status - Check UCRDY, UTRLRDY and UTMRLRDY + * @reg: Register value of host controller status + * + * Returns integer, 0 on Success and positive value if failed + */ +static inline int ufshcd_get_lists_status(u32 reg) +{ + /* + * The mask 0xFF is for the following HCS register bits + * Bit Description + * 0 Device Present + * 1 UTRLRDY + * 2 UTMRLRDY + * 3 UCRDY + * 4 HEI + * 5 DEI + * 6-7 reserved + */ + return (((reg) & (0xFF)) >> 1) ^ (0x07); +} + +/** + * ufshcd_get_uic_cmd_result - Get the UIC command result + * @hba: Pointer to adapter instance + * + * This function gets the result of UIC command completion + * Returns 0 on success, non zero value on error + */ +static inline int ufshcd_get_uic_cmd_result(struct ufs_hba *hba) +{ + return readl(hba->mmio_base + REG_UIC_COMMAND_ARG_2) & + MASK_UIC_COMMAND_RESULT; +} + +/** + * ufshcd_free_hba_memory - Free allocated memory for LRB, request + * and task lists + * @hba: Pointer to adapter instance + */ +static inline void ufshcd_free_hba_memory(struct ufs_hba *hba) +{ + size_t utmrdl_size, utrdl_size, ucdl_size; + + kfree(hba->lrb); + + if (hba->utmrdl_base_addr) { + utmrdl_size = sizeof(struct utp_task_req_desc) * hba->nutmrs; + dma_free_coherent(&hba->pdev->dev, utmrdl_size, + hba->utmrdl_base_addr, hba->utmrdl_dma_addr); + } + + if (hba->utrdl_base_addr) { + utrdl_size = + (sizeof(struct utp_transfer_req_desc) * hba->nutrs); + dma_free_coherent(&hba->pdev->dev, utrdl_size, + hba->utrdl_base_addr, hba->utrdl_dma_addr); + } + + if (hba->ucdl_base_addr) { + ucdl_size = + (sizeof(struct utp_transfer_cmd_desc) * hba->nutrs); + dma_free_coherent(&hba->pdev->dev, ucdl_size, + hba->ucdl_base_addr, hba->ucdl_dma_addr); + } +} + +/** + * ufshcd_is_valid_req_rsp - checks if controller TR response is valid + * @ucd_rsp_ptr: pointer to response UPIU + * + * This function checks the response UPIU for valid transaction type in + * response field + * Returns 0 on success, non-zero on failure + */ +static inline int +ufshcd_is_valid_req_rsp(struct utp_upiu_rsp *ucd_rsp_ptr) +{ + return ((be32_to_cpu(ucd_rsp_ptr->header.dword_0) >> 24) == + UPIU_TRANSACTION_RESPONSE) ? 0 : DID_ERROR << 16; +} + +/** + * ufshcd_get_rsp_upiu_result - Get the result from response UPIU + * @ucd_rsp_ptr: pointer to response UPIU + * + * This function gets the response status and scsi_status from response UPIU + * Returns the response result code. + */ +static inline int +ufshcd_get_rsp_upiu_result(struct utp_upiu_rsp *ucd_rsp_ptr) +{ + return be32_to_cpu(ucd_rsp_ptr->header.dword_1) & MASK_RSP_UPIU_RESULT; +} + +/** + * ufshcd_config_int_aggr - Configure interrupt aggregation values. + * Currently there is no use case where we want to configure + * interrupt aggregation dynamically. So to configure interrupt + * aggregation, #define INT_AGGR_COUNTER_THRESHOLD_VALUE and + * INT_AGGR_TIMEOUT_VALUE are used. + * @hba: per adapter instance + * @option: Interrupt aggregation option + */ +static inline void +ufshcd_config_int_aggr(struct ufs_hba *hba, int option) +{ + switch (option) { + case INT_AGGR_RESET: + writel((INT_AGGR_ENABLE | + INT_AGGR_COUNTER_AND_TIMER_RESET), + (hba->mmio_base + + REG_UTP_TRANSFER_REQ_INT_AGG_CONTROL)); + break; + case INT_AGGR_CONFIG: + writel((INT_AGGR_ENABLE | + INT_AGGR_PARAM_WRITE | + INT_AGGR_COUNTER_THRESHOLD_VALUE | + INT_AGGR_TIMEOUT_VALUE), + (hba->mmio_base + + REG_UTP_TRANSFER_REQ_INT_AGG_CONTROL)); + break; + } +} + +/** + * ufshcd_enable_run_stop_reg - Enable run-stop registers, + * When run-stop registers are set to 1, it indicates the + * host controller that it can process the requests + * @hba: per adapter instance + */ +static void ufshcd_enable_run_stop_reg(struct ufs_hba *hba) +{ + writel(UTP_TASK_REQ_LIST_RUN_STOP_BIT, + (hba->mmio_base + + REG_UTP_TASK_REQ_LIST_RUN_STOP)); + writel(UTP_TRANSFER_REQ_LIST_RUN_STOP_BIT, + (hba->mmio_base + + REG_UTP_TRANSFER_REQ_LIST_RUN_STOP)); +} + +/** + * ufshcd_hba_stop - Send controller to reset state + * @hba: per adapter instance + */ +static inline void ufshcd_hba_stop(struct ufs_hba *hba) +{ + writel(CONTROLLER_DISABLE, (hba->mmio_base + REG_CONTROLLER_ENABLE)); +} + +/** + * ufshcd_hba_start - Start controller initialization sequence + * @hba: per adapter instance + */ +static inline void ufshcd_hba_start(struct ufs_hba *hba) +{ + writel(CONTROLLER_ENABLE , (hba->mmio_base + REG_CONTROLLER_ENABLE)); +} + +/** + * ufshcd_is_hba_active - Get controller state + * @hba: per adapter instance + * + * Returns zero if controller is active, 1 otherwise + */ +static inline int ufshcd_is_hba_active(struct ufs_hba *hba) +{ + return (readl(hba->mmio_base + REG_CONTROLLER_ENABLE) & 0x1) ? 0 : 1; +} + +/** + * ufshcd_send_command - Send SCSI or device management commands + * @hba: per adapter instance + * @task_tag: Task tag of the command + */ +static inline +void ufshcd_send_command(struct ufs_hba *hba, unsigned int task_tag) +{ + __set_bit(task_tag, &hba->outstanding_reqs); + writel((1 << task_tag), + (hba->mmio_base + REG_UTP_TRANSFER_REQ_DOOR_BELL)); +} + +/** + * ufshcd_copy_sense_data - Copy sense data in case of check condition + * @lrb - pointer to local reference block + */ +static inline void ufshcd_copy_sense_data(struct ufshcd_lrb *lrbp) +{ + int len; + if (lrbp->sense_buffer) { + len = be16_to_cpu(lrbp->ucd_rsp_ptr->sense_data_len); + memcpy(lrbp->sense_buffer, + lrbp->ucd_rsp_ptr->sense_data, + min_t(int, len, SCSI_SENSE_BUFFERSIZE)); + } +} + +/** + * ufshcd_hba_capabilities - Read controller capabilities + * @hba: per adapter instance + */ +static inline void ufshcd_hba_capabilities(struct ufs_hba *hba) +{ + hba->capabilities = + readl(hba->mmio_base + REG_CONTROLLER_CAPABILITIES); + + /* nutrs and nutmrs are 0 based values */ + hba->nutrs = (hba->capabilities & MASK_TRANSFER_REQUESTS_SLOTS) + 1; + hba->nutmrs = + ((hba->capabilities & MASK_TASK_MANAGEMENT_REQUEST_SLOTS) >> 16) + 1; +} + +/** + * ufshcd_send_uic_command - Send UIC commands to unipro layers + * @hba: per adapter instance + * @uic_command: UIC command + */ +static inline void +ufshcd_send_uic_command(struct ufs_hba *hba, struct uic_command *uic_cmnd) +{ + /* Write Args */ + writel(uic_cmnd->argument1, + (hba->mmio_base + REG_UIC_COMMAND_ARG_1)); + writel(uic_cmnd->argument2, + (hba->mmio_base + REG_UIC_COMMAND_ARG_2)); + writel(uic_cmnd->argument3, + (hba->mmio_base + REG_UIC_COMMAND_ARG_3)); + + /* Write UIC Cmd */ + writel((uic_cmnd->command & COMMAND_OPCODE_MASK), + (hba->mmio_base + REG_UIC_COMMAND)); +} + +/** + * ufshcd_map_sg - Map scatter-gather list to prdt + * @lrbp - pointer to local reference block + * + * Returns 0 in case of success, non-zero value in case of failure + */ +static int ufshcd_map_sg(struct ufshcd_lrb *lrbp) +{ + struct ufshcd_sg_entry *prd_table; + struct scatterlist *sg; + struct scsi_cmnd *cmd; + int sg_segments; + int i; + + cmd = lrbp->cmd; + sg_segments = scsi_dma_map(cmd); + if (sg_segments < 0) + return sg_segments; + + if (sg_segments) { + lrbp->utr_descriptor_ptr->prd_table_length = + cpu_to_le16((u16) (sg_segments)); + + prd_table = (struct ufshcd_sg_entry *)lrbp->ucd_prdt_ptr; + + scsi_for_each_sg(cmd, sg, sg_segments, i) { + prd_table[i].size = + cpu_to_le32(((u32) sg_dma_len(sg))-1); + prd_table[i].base_addr = + cpu_to_le32(lower_32_bits(sg->dma_address)); + prd_table[i].upper_addr = + cpu_to_le32(upper_32_bits(sg->dma_address)); + } + } else { + lrbp->utr_descriptor_ptr->prd_table_length = 0; + } + + return 0; +} + +/** + * ufshcd_int_config - enable/disable interrupts + * @hba: per adapter instance + * @option: interrupt option + */ +static void ufshcd_int_config(struct ufs_hba *hba, u32 option) +{ + switch (option) { + case UFSHCD_INT_ENABLE: + writel(hba->int_enable_mask, + (hba->mmio_base + REG_INTERRUPT_ENABLE)); + break; + case UFSHCD_INT_DISABLE: + if (hba->ufs_version == UFSHCI_VERSION_10) + writel(INTERRUPT_DISABLE_MASK_10, + (hba->mmio_base + REG_INTERRUPT_ENABLE)); + else + writel(INTERRUPT_DISABLE_MASK_11, + (hba->mmio_base + REG_INTERRUPT_ENABLE)); + break; + } +} + +/** + * ufshcd_compose_upiu - form UFS Protocol Information Unit(UPIU) + * @lrb - pointer to local reference block + */ +static void ufshcd_compose_upiu(struct ufshcd_lrb *lrbp) +{ + struct utp_transfer_req_desc *req_desc; + struct utp_upiu_cmd *ucd_cmd_ptr; + u32 data_direction; + u32 upiu_flags; + + ucd_cmd_ptr = lrbp->ucd_cmd_ptr; + req_desc = lrbp->utr_descriptor_ptr; + + switch (lrbp->command_type) { + case UTP_CMD_TYPE_SCSI: + if (lrbp->cmd->sc_data_direction == DMA_FROM_DEVICE) { + data_direction = UTP_DEVICE_TO_HOST; + upiu_flags = UPIU_CMD_FLAGS_READ; + } else if (lrbp->cmd->sc_data_direction == DMA_TO_DEVICE) { + data_direction = UTP_HOST_TO_DEVICE; + upiu_flags = UPIU_CMD_FLAGS_WRITE; + } else { + data_direction = UTP_NO_DATA_TRANSFER; + upiu_flags = UPIU_CMD_FLAGS_NONE; + } + + /* Transfer request descriptor header fields */ + req_desc->header.dword_0 = + cpu_to_le32(data_direction | UTP_SCSI_COMMAND); + + /* + * assigning invalid value for command status. Controller + * updates OCS on command completion, with the command + * status + */ + req_desc->header.dword_2 = + cpu_to_le32(OCS_INVALID_COMMAND_STATUS); + + /* command descriptor fields */ + ucd_cmd_ptr->header.dword_0 = + cpu_to_be32(UPIU_HEADER_DWORD(UPIU_TRANSACTION_COMMAND, + upiu_flags, + lrbp->lun, + lrbp->task_tag)); + ucd_cmd_ptr->header.dword_1 = + cpu_to_be32( + UPIU_HEADER_DWORD(UPIU_COMMAND_SET_TYPE_SCSI, + 0, + 0, + 0)); + + /* Total EHS length and Data segment length will be zero */ + ucd_cmd_ptr->header.dword_2 = 0; + + ucd_cmd_ptr->exp_data_transfer_len = + cpu_to_be32(lrbp->cmd->transfersize); + + memcpy(ucd_cmd_ptr->cdb, + lrbp->cmd->cmnd, + (min_t(unsigned short, + lrbp->cmd->cmd_len, + MAX_CDB_SIZE))); + break; + case UTP_CMD_TYPE_DEV_MANAGE: + /* For query function implementation */ + break; + case UTP_CMD_TYPE_UFS: + /* For UFS native command implementation */ + break; + } /* end of switch */ +} + +/** + * ufshcd_queuecommand - main entry point for SCSI requests + * @cmd: command from SCSI Midlayer + * @done: call back function + * + * Returns 0 for success, non-zero in case of failure + */ +static int ufshcd_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd) +{ + struct ufshcd_lrb *lrbp; + struct ufs_hba *hba; + unsigned long flags; + int tag; + int err = 0; + + hba = shost_priv(host); + + tag = cmd->request->tag; + + if (hba->ufshcd_state != UFSHCD_STATE_OPERATIONAL) { + err = SCSI_MLQUEUE_HOST_BUSY; + goto out; + } + + lrbp = &hba->lrb[tag]; + + lrbp->cmd = cmd; + lrbp->sense_bufflen = SCSI_SENSE_BUFFERSIZE; + lrbp->sense_buffer = cmd->sense_buffer; + lrbp->task_tag = tag; + lrbp->lun = cmd->device->lun; + + lrbp->command_type = UTP_CMD_TYPE_SCSI; + + /* form UPIU before issuing the command */ + ufshcd_compose_upiu(lrbp); + err = ufshcd_map_sg(lrbp); + if (err) + goto out; + + /* issue command to the controller */ + spin_lock_irqsave(hba->host->host_lock, flags); + ufshcd_send_command(hba, tag); + spin_unlock_irqrestore(hba->host->host_lock, flags); +out: + return err; +} + +/** + * ufshcd_memory_alloc - allocate memory for host memory space data structures + * @hba: per adapter instance + * + * 1. Allocate DMA memory for Command Descriptor array + * Each command descriptor consist of Command UPIU, Response UPIU and PRDT + * 2. Allocate DMA memory for UTP Transfer Request Descriptor List (UTRDL). + * 3. Allocate DMA memory for UTP Task Management Request Descriptor List + * (UTMRDL) + * 4. Allocate memory for local reference block(lrb). + * + * Returns 0 for success, non-zero in case of failure + */ +static int ufshcd_memory_alloc(struct ufs_hba *hba) +{ + size_t utmrdl_size, utrdl_size, ucdl_size; + + /* Allocate memory for UTP command descriptors */ + ucdl_size = (sizeof(struct utp_transfer_cmd_desc) * hba->nutrs); + hba->ucdl_base_addr = dma_alloc_coherent(&hba->pdev->dev, + ucdl_size, + &hba->ucdl_dma_addr, + GFP_KERNEL); + + /* + * UFSHCI requires UTP command descriptor to be 128 byte aligned. + * make sure hba->ucdl_dma_addr is aligned to PAGE_SIZE + * if hba->ucdl_dma_addr is aligned to PAGE_SIZE, then it will + * be aligned to 128 bytes as well + */ + if (!hba->ucdl_base_addr || + WARN_ON(hba->ucdl_dma_addr & (PAGE_SIZE - 1))) { + dev_err(&hba->pdev->dev, + "Command Descriptor Memory allocation failed\n"); + goto out; + } + + /* + * Allocate memory for UTP Transfer descriptors + * UFSHCI requires 1024 byte alignment of UTRD + */ + utrdl_size = (sizeof(struct utp_transfer_req_desc) * hba->nutrs); + hba->utrdl_base_addr = dma_alloc_coherent(&hba->pdev->dev, + utrdl_size, + &hba->utrdl_dma_addr, + GFP_KERNEL); + if (!hba->utrdl_base_addr || + WARN_ON(hba->utrdl_dma_addr & (PAGE_SIZE - 1))) { + dev_err(&hba->pdev->dev, + "Transfer Descriptor Memory allocation failed\n"); + goto out; + } + + /* + * Allocate memory for UTP Task Management descriptors + * UFSHCI requires 1024 byte alignment of UTMRD + */ + utmrdl_size = sizeof(struct utp_task_req_desc) * hba->nutmrs; + hba->utmrdl_base_addr = dma_alloc_coherent(&hba->pdev->dev, + utmrdl_size, + &hba->utmrdl_dma_addr, + GFP_KERNEL); + if (!hba->utmrdl_base_addr || + WARN_ON(hba->utmrdl_dma_addr & (PAGE_SIZE - 1))) { + dev_err(&hba->pdev->dev, + "Task Management Descriptor Memory allocation failed\n"); + goto out; + } + + /* Allocate memory for local reference block */ + hba->lrb = kcalloc(hba->nutrs, sizeof(struct ufshcd_lrb), GFP_KERNEL); + if (!hba->lrb) { + dev_err(&hba->pdev->dev, "LRB Memory allocation failed\n"); + goto out; + } + return 0; +out: + ufshcd_free_hba_memory(hba); + return -ENOMEM; +} + +/** + * ufshcd_host_memory_configure - configure local reference block with + * memory offsets + * @hba: per adapter instance + * + * Configure Host memory space + * 1. Update Corresponding UTRD.UCDBA and UTRD.UCDBAU with UCD DMA + * address. + * 2. Update each UTRD with Response UPIU offset, Response UPIU length + * and PRDT offset. + * 3. Save the corresponding addresses of UTRD, UCD.CMD, UCD.RSP and UCD.PRDT + * into local reference block. + */ +static void ufshcd_host_memory_configure(struct ufs_hba *hba) +{ + struct utp_transfer_cmd_desc *cmd_descp; + struct utp_transfer_req_desc *utrdlp; + dma_addr_t cmd_desc_dma_addr; + dma_addr_t cmd_desc_element_addr; + u16 response_offset; + u16 prdt_offset; + int cmd_desc_size; + int i; + + utrdlp = hba->utrdl_base_addr; + cmd_descp = hba->ucdl_base_addr; + + response_offset = + offsetof(struct utp_transfer_cmd_desc, response_upiu); + prdt_offset = + offsetof(struct utp_transfer_cmd_desc, prd_table); + + cmd_desc_size = sizeof(struct utp_transfer_cmd_desc); + cmd_desc_dma_addr = hba->ucdl_dma_addr; + + for (i = 0; i < hba->nutrs; i++) { + /* Configure UTRD with command descriptor base address */ + cmd_desc_element_addr = + (cmd_desc_dma_addr + (cmd_desc_size * i)); + utrdlp[i].command_desc_base_addr_lo = + cpu_to_le32(lower_32_bits(cmd_desc_element_addr)); + utrdlp[i].command_desc_base_addr_hi = + cpu_to_le32(upper_32_bits(cmd_desc_element_addr)); + + /* Response upiu and prdt offset should be in double words */ + utrdlp[i].response_upiu_offset = + cpu_to_le16((response_offset >> 2)); + utrdlp[i].prd_table_offset = + cpu_to_le16((prdt_offset >> 2)); + utrdlp[i].response_upiu_length = + cpu_to_le16(ALIGNED_UPIU_SIZE); + + hba->lrb[i].utr_descriptor_ptr = (utrdlp + i); + hba->lrb[i].ucd_cmd_ptr = + (struct utp_upiu_cmd *)(cmd_descp + i); + hba->lrb[i].ucd_rsp_ptr = + (struct utp_upiu_rsp *)cmd_descp[i].response_upiu; + hba->lrb[i].ucd_prdt_ptr = + (struct ufshcd_sg_entry *)cmd_descp[i].prd_table; + } +} + +/** + * ufshcd_dme_link_startup - Notify Unipro to perform link startup + * @hba: per adapter instance + * + * UIC_CMD_DME_LINK_STARTUP command must be issued to Unipro layer, + * in order to initialize the Unipro link startup procedure. + * Once the Unipro links are up, the device connected to the controller + * is detected. + * + * Returns 0 on success, non-zero value on failure + */ +static int ufshcd_dme_link_startup(struct ufs_hba *hba) +{ + struct uic_command *uic_cmd; + unsigned long flags; + + /* check if controller is ready to accept UIC commands */ + if (((readl(hba->mmio_base + REG_CONTROLLER_STATUS)) & + UIC_COMMAND_READY) == 0x0) { + dev_err(&hba->pdev->dev, + "Controller not ready" + " to accept UIC commands\n"); + return -EIO; + } + + spin_lock_irqsave(hba->host->host_lock, flags); + + /* form UIC command */ + uic_cmd = &hba->active_uic_cmd; + uic_cmd->command = UIC_CMD_DME_LINK_STARTUP; + uic_cmd->argument1 = 0; + uic_cmd->argument2 = 0; + uic_cmd->argument3 = 0; + + /* enable UIC related interrupts */ + hba->int_enable_mask |= UIC_COMMAND_COMPL; + ufshcd_int_config(hba, UFSHCD_INT_ENABLE); + + /* sending UIC commands to controller */ + ufshcd_send_uic_command(hba, uic_cmd); + spin_unlock_irqrestore(hba->host->host_lock, flags); + return 0; +} + +/** + * ufshcd_make_hba_operational - Make UFS controller operational + * @hba: per adapter instance + * + * To bring UFS host controller to operational state, + * 1. Check if device is present + * 2. Configure run-stop-registers + * 3. Enable required interrupts + * 4. Configure interrupt aggregation + * + * Returns 0 on success, non-zero value on failure + */ +static int ufshcd_make_hba_operational(struct ufs_hba *hba) +{ + int err = 0; + u32 reg; + + /* check if device present */ + reg = readl((hba->mmio_base + REG_CONTROLLER_STATUS)); + if (ufshcd_is_device_present(reg)) { + dev_err(&hba->pdev->dev, "cc: Device not present\n"); + err = -ENXIO; + goto out; + } + + /* + * UCRDY, UTMRLDY and UTRLRDY bits must be 1 + * DEI, HEI bits must be 0 + */ + if (!(ufshcd_get_lists_status(reg))) { + ufshcd_enable_run_stop_reg(hba); + } else { + dev_err(&hba->pdev->dev, + "Host controller not ready to process requests"); + err = -EIO; + goto out; + } + + /* Enable required interrupts */ + hba->int_enable_mask |= (UTP_TRANSFER_REQ_COMPL | + UIC_ERROR | + UTP_TASK_REQ_COMPL | + DEVICE_FATAL_ERROR | + CONTROLLER_FATAL_ERROR | + SYSTEM_BUS_FATAL_ERROR); + ufshcd_int_config(hba, UFSHCD_INT_ENABLE); + + /* Configure interrupt aggregation */ + ufshcd_config_int_aggr(hba, INT_AGGR_CONFIG); + + if (hba->ufshcd_state == UFSHCD_STATE_RESET) + scsi_unblock_requests(hba->host); + + hba->ufshcd_state = UFSHCD_STATE_OPERATIONAL; + scsi_scan_host(hba->host); +out: + return err; +} + +/** + * ufshcd_hba_enable - initialize the controller + * @hba: per adapter instance + * + * The controller resets itself and controller firmware initialization + * sequence kicks off. When controller is ready it will set + * the Host Controller Enable bit to 1. + * + * Returns 0 on success, non-zero value on failure + */ +static int ufshcd_hba_enable(struct ufs_hba *hba) +{ + int retry; + + /* + * msleep of 1 and 5 used in this function might result in msleep(20), + * but it was necessary to send the UFS FPGA to reset mode during + * development and testing of this driver. msleep can be changed to + * mdelay and retry count can be reduced based on the controller. + */ + if (!ufshcd_is_hba_active(hba)) { + + /* change controller state to "reset state" */ + ufshcd_hba_stop(hba); + + /* + * This delay is based on the testing done with UFS host + * controller FPGA. The delay can be changed based on the + * host controller used. + */ + msleep(5); + } + + /* start controller initialization sequence */ + ufshcd_hba_start(hba); + + /* + * To initialize a UFS host controller HCE bit must be set to 1. + * During initialization the HCE bit value changes from 1->0->1. + * When the host controller completes initialization sequence + * it sets the value of HCE bit to 1. The same HCE bit is read back + * to check if the controller has completed initialization sequence. + * So without this delay the value HCE = 1, set in the previous + * instruction might be read back. + * This delay can be changed based on the controller. + */ + msleep(1); + + /* wait for the host controller to complete initialization */ + retry = 10; + while (ufshcd_is_hba_active(hba)) { + if (retry) { + retry--; + } else { + dev_err(&hba->pdev->dev, + "Controller enable failed\n"); + return -EIO; + } + msleep(5); + } + return 0; +} + +/** + * ufshcd_initialize_hba - start the initialization process + * @hba: per adapter instance + * + * 1. Enable the controller via ufshcd_hba_enable. + * 2. Program the Transfer Request List Address with the starting address of + * UTRDL. + * 3. Program the Task Management Request List Address with starting address + * of UTMRDL. + * + * Returns 0 on success, non-zero value on failure. + */ +static int ufshcd_initialize_hba(struct ufs_hba *hba) +{ + if (ufshcd_hba_enable(hba)) + return -EIO; + + /* Configure UTRL and UTMRL base address registers */ + writel(hba->utrdl_dma_addr, + (hba->mmio_base + REG_UTP_TRANSFER_REQ_LIST_BASE_L)); + writel(lower_32_bits(hba->utrdl_dma_addr), + (hba->mmio_base + REG_UTP_TRANSFER_REQ_LIST_BASE_H)); + writel(hba->utmrdl_dma_addr, + (hba->mmio_base + REG_UTP_TASK_REQ_LIST_BASE_L)); + writel(upper_32_bits(hba->utmrdl_dma_addr), + (hba->mmio_base + REG_UTP_TASK_REQ_LIST_BASE_H)); + + /* Initialize unipro link startup procedure */ + return ufshcd_dme_link_startup(hba); +} + +/** + * ufshcd_do_reset - reset the host controller + * @hba: per adapter instance + * + * Returns SUCCESS/FAILED + */ +static int ufshcd_do_reset(struct ufs_hba *hba) +{ + struct ufshcd_lrb *lrbp; + unsigned long flags; + int tag; + + /* block commands from midlayer */ + scsi_block_requests(hba->host); + + spin_lock_irqsave(hba->host->host_lock, flags); + hba->ufshcd_state = UFSHCD_STATE_RESET; + + /* send controller to reset state */ + ufshcd_hba_stop(hba); + spin_unlock_irqrestore(hba->host->host_lock, flags); + + /* abort outstanding commands */ + for (tag = 0; tag < hba->nutrs; tag++) { + if (test_bit(tag, &hba->outstanding_reqs)) { + lrbp = &hba->lrb[tag]; + scsi_dma_unmap(lrbp->cmd); + lrbp->cmd->result = DID_RESET << 16; + lrbp->cmd->scsi_done(lrbp->cmd); + lrbp->cmd = NULL; + } + } + + /* clear outstanding request/task bit maps */ + hba->outstanding_reqs = 0; + hba->outstanding_tasks = 0; + + /* start the initialization process */ + if (ufshcd_initialize_hba(hba)) { + dev_err(&hba->pdev->dev, + "Reset: Controller initialization failed\n"); + return FAILED; + } + return SUCCESS; +} + +/** + * ufshcd_slave_alloc - handle initial SCSI device configurations + * @sdev: pointer to SCSI device + * + * Returns success + */ +static int ufshcd_slave_alloc(struct scsi_device *sdev) +{ + struct ufs_hba *hba; + + hba = shost_priv(sdev->host); + sdev->tagged_supported = 1; + + /* Mode sense(6) is not supported by UFS, so use Mode sense(10) */ + sdev->use_10_for_ms = 1; + scsi_set_tag_type(sdev, MSG_SIMPLE_TAG); + + /* + * Inform SCSI Midlayer that the LUN queue depth is same as the + * controller queue depth. If a LUN queue depth is less than the + * controller queue depth and if the LUN reports + * SAM_STAT_TASK_SET_FULL, the LUN queue depth will be adjusted + * with scsi_adjust_queue_depth. + */ + scsi_activate_tcq(sdev, hba->nutrs); + return 0; +} + +/** + * ufshcd_slave_destroy - remove SCSI device configurations + * @sdev: pointer to SCSI device + */ +static void ufshcd_slave_destroy(struct scsi_device *sdev) +{ + struct ufs_hba *hba; + + hba = shost_priv(sdev->host); + scsi_deactivate_tcq(sdev, hba->nutrs); +} + +/** + * ufshcd_task_req_compl - handle task management request completion + * @hba: per adapter instance + * @index: index of the completed request + * + * Returns SUCCESS/FAILED + */ +static int ufshcd_task_req_compl(struct ufs_hba *hba, u32 index) +{ + struct utp_task_req_desc *task_req_descp; + struct utp_upiu_task_rsp *task_rsp_upiup; + unsigned long flags; + int ocs_value; + int task_result; + + spin_lock_irqsave(hba->host->host_lock, flags); + + /* Clear completed tasks from outstanding_tasks */ + __clear_bit(index, &hba->outstanding_tasks); + + task_req_descp = hba->utmrdl_base_addr; + ocs_value = ufshcd_get_tmr_ocs(&task_req_descp[index]); + + if (ocs_value == OCS_SUCCESS) { + task_rsp_upiup = (struct utp_upiu_task_rsp *) + task_req_descp[index].task_rsp_upiu; + task_result = be32_to_cpu(task_rsp_upiup->header.dword_1); + task_result = ((task_result & MASK_TASK_RESPONSE) >> 8); + + if (task_result != UPIU_TASK_MANAGEMENT_FUNC_COMPL || + task_result != UPIU_TASK_MANAGEMENT_FUNC_SUCCEEDED) + task_result = FAILED; + } else { + task_result = FAILED; + dev_err(&hba->pdev->dev, + "trc: Invalid ocs = %x\n", ocs_value); + } + spin_unlock_irqrestore(hba->host->host_lock, flags); + return task_result; +} + +/** + * ufshcd_adjust_lun_qdepth - Update LUN queue depth if device responds with + * SAM_STAT_TASK_SET_FULL SCSI command status. + * @cmd: pointer to SCSI command + */ +static void ufshcd_adjust_lun_qdepth(struct scsi_cmnd *cmd) +{ + struct ufs_hba *hba; + int i; + int lun_qdepth = 0; + + hba = shost_priv(cmd->device->host); + + /* + * LUN queue depth can be obtained by counting outstanding commands + * on the LUN. + */ + for (i = 0; i < hba->nutrs; i++) { + if (test_bit(i, &hba->outstanding_reqs)) { + + /* + * Check if the outstanding command belongs + * to the LUN which reported SAM_STAT_TASK_SET_FULL. + */ + if (cmd->device->lun == hba->lrb[i].lun) + lun_qdepth++; + } + } + + /* + * LUN queue depth will be total outstanding commands, except the + * command for which the LUN reported SAM_STAT_TASK_SET_FULL. + */ + scsi_adjust_queue_depth(cmd->device, MSG_SIMPLE_TAG, lun_qdepth - 1); +} + +/** + * ufshcd_scsi_cmd_status - Update SCSI command result based on SCSI status + * @lrb: pointer to local reference block of completed command + * @scsi_status: SCSI command status + * + * Returns value base on SCSI command status + */ +static inline int +ufshcd_scsi_cmd_status(struct ufshcd_lrb *lrbp, int scsi_status) +{ + int result = 0; + + switch (scsi_status) { + case SAM_STAT_GOOD: + result |= DID_OK << 16 | + COMMAND_COMPLETE << 8 | + SAM_STAT_GOOD; + break; + case SAM_STAT_CHECK_CONDITION: + result |= DID_OK << 16 | + COMMAND_COMPLETE << 8 | + SAM_STAT_CHECK_CONDITION; + ufshcd_copy_sense_data(lrbp); + break; + case SAM_STAT_BUSY: + result |= SAM_STAT_BUSY; + break; + case SAM_STAT_TASK_SET_FULL: + + /* + * If a LUN reports SAM_STAT_TASK_SET_FULL, then the LUN queue + * depth needs to be adjusted to the exact number of + * outstanding commands the LUN can handle at any given time. + */ + ufshcd_adjust_lun_qdepth(lrbp->cmd); + result |= SAM_STAT_TASK_SET_FULL; + break; + case SAM_STAT_TASK_ABORTED: + result |= SAM_STAT_TASK_ABORTED; + break; + default: + result |= DID_ERROR << 16; + break; + } /* end of switch */ + + return result; +} + +/** + * ufshcd_transfer_rsp_status - Get overall status of the response + * @hba: per adapter instance + * @lrb: pointer to local reference block of completed command + * + * Returns result of the command to notify SCSI midlayer + */ +static inline int +ufshcd_transfer_rsp_status(struct ufs_hba *hba, struct ufshcd_lrb *lrbp) +{ + int result = 0; + int scsi_status; + int ocs; + + /* overall command status of utrd */ + ocs = ufshcd_get_tr_ocs(lrbp); + + switch (ocs) { + case OCS_SUCCESS: + + /* check if the returned transfer response is valid */ + result = ufshcd_is_valid_req_rsp(lrbp->ucd_rsp_ptr); + if (result) { + dev_err(&hba->pdev->dev, + "Invalid response = %x\n", result); + break; + } + + /* + * get the response UPIU result to extract + * the SCSI command status + */ + result = ufshcd_get_rsp_upiu_result(lrbp->ucd_rsp_ptr); + + /* + * get the result based on SCSI status response + * to notify the SCSI midlayer of the command status + */ + scsi_status = result & MASK_SCSI_STATUS; + result = ufshcd_scsi_cmd_status(lrbp, scsi_status); + break; + case OCS_ABORTED: + result |= DID_ABORT << 16; + break; + case OCS_INVALID_CMD_TABLE_ATTR: + case OCS_INVALID_PRDT_ATTR: + case OCS_MISMATCH_DATA_BUF_SIZE: + case OCS_MISMATCH_RESP_UPIU_SIZE: + case OCS_PEER_COMM_FAILURE: + case OCS_FATAL_ERROR: + default: + result |= DID_ERROR << 16; + dev_err(&hba->pdev->dev, + "OCS error from controller = %x\n", ocs); + break; + } /* end of switch */ + + return result; +} + +/** + * ufshcd_transfer_req_compl - handle SCSI and query command completion + * @hba: per adapter instance + */ +static void ufshcd_transfer_req_compl(struct ufs_hba *hba) +{ + struct ufshcd_lrb *lrb; + unsigned long completed_reqs; + u32 tr_doorbell; + int result; + int index; + + lrb = hba->lrb; + tr_doorbell = + readl(hba->mmio_base + REG_UTP_TRANSFER_REQ_DOOR_BELL); + completed_reqs = tr_doorbell ^ hba->outstanding_reqs; + + for (index = 0; index < hba->nutrs; index++) { + if (test_bit(index, &completed_reqs)) { + + result = ufshcd_transfer_rsp_status(hba, &lrb[index]); + + if (lrb[index].cmd) { + scsi_dma_unmap(lrb[index].cmd); + lrb[index].cmd->result = result; + lrb[index].cmd->scsi_done(lrb[index].cmd); + + /* Mark completed command as NULL in LRB */ + lrb[index].cmd = NULL; + } + } /* end of if */ + } /* end of for */ + + /* clear corresponding bits of completed commands */ + hba->outstanding_reqs ^= completed_reqs; + + /* Reset interrupt aggregation counters */ + ufshcd_config_int_aggr(hba, INT_AGGR_RESET); +} + +/** + * ufshcd_uic_cc_handler - handle UIC command completion + * @work: pointer to a work queue structure + * + * Returns 0 on success, non-zero value on failure + */ +static void ufshcd_uic_cc_handler (struct work_struct *work) +{ + struct ufs_hba *hba; + + hba = container_of(work, struct ufs_hba, uic_workq); + + if ((hba->active_uic_cmd.command == UIC_CMD_DME_LINK_STARTUP) && + !(ufshcd_get_uic_cmd_result(hba))) { + + if (ufshcd_make_hba_operational(hba)) + dev_err(&hba->pdev->dev, + "cc: hba not operational state\n"); + return; + } +} + +/** + * ufshcd_fatal_err_handler - handle fatal errors + * @hba: per adapter instance + */ +static void ufshcd_fatal_err_handler(struct work_struct *work) +{ + struct ufs_hba *hba; + hba = container_of(work, struct ufs_hba, feh_workq); + + /* check if reset is already in progress */ + if (hba->ufshcd_state != UFSHCD_STATE_RESET) + ufshcd_do_reset(hba); +} + +/** + * ufshcd_err_handler - Check for fatal errors + * @work: pointer to a work queue structure + */ +static void ufshcd_err_handler(struct ufs_hba *hba) +{ + u32 reg; + + if (hba->errors & INT_FATAL_ERRORS) + goto fatal_eh; + + if (hba->errors & UIC_ERROR) { + + reg = readl(hba->mmio_base + + REG_UIC_ERROR_CODE_PHY_ADAPTER_LAYER); + if (reg & UIC_DATA_LINK_LAYER_ERROR_PA_INIT) + goto fatal_eh; + } + return; +fatal_eh: + hba->ufshcd_state = UFSHCD_STATE_ERROR; + schedule_work(&hba->feh_workq); +} + +/** + * ufshcd_tmc_handler - handle task management function completion + * @hba: per adapter instance + */ +static void ufshcd_tmc_handler(struct ufs_hba *hba) +{ + u32 tm_doorbell; + + tm_doorbell = readl(hba->mmio_base + REG_UTP_TASK_REQ_DOOR_BELL); + hba->tm_condition = tm_doorbell ^ hba->outstanding_tasks; + wake_up_interruptible(&hba->ufshcd_tm_wait_queue); +} + +/** + * ufshcd_sl_intr - Interrupt service routine + * @hba: per adapter instance + * @intr_status: contains interrupts generated by the controller + */ +static void ufshcd_sl_intr(struct ufs_hba *hba, u32 intr_status) +{ + hba->errors = UFSHCD_ERROR_MASK & intr_status; + if (hba->errors) + ufshcd_err_handler(hba); + + if (intr_status & UIC_COMMAND_COMPL) + schedule_work(&hba->uic_workq); + + if (intr_status & UTP_TASK_REQ_COMPL) + ufshcd_tmc_handler(hba); + + if (intr_status & UTP_TRANSFER_REQ_COMPL) + ufshcd_transfer_req_compl(hba); +} + +/** + * ufshcd_intr - Main interrupt service routine + * @irq: irq number + * @__hba: pointer to adapter instance + * + * Returns IRQ_HANDLED - If interrupt is valid + * IRQ_NONE - If invalid interrupt + */ +static irqreturn_t ufshcd_intr(int irq, void *__hba) +{ + u32 intr_status; + irqreturn_t retval = IRQ_NONE; + struct ufs_hba *hba = __hba; + + spin_lock(hba->host->host_lock); + intr_status = readl(hba->mmio_base + REG_INTERRUPT_STATUS); + + if (intr_status) { + ufshcd_sl_intr(hba, intr_status); + + /* If UFSHCI 1.0 then clear interrupt status register */ + if (hba->ufs_version == UFSHCI_VERSION_10) + writel(intr_status, + (hba->mmio_base + REG_INTERRUPT_STATUS)); + retval = IRQ_HANDLED; + } + spin_unlock(hba->host->host_lock); + return retval; +} + +/** + * ufshcd_issue_tm_cmd - issues task management commands to controller + * @hba: per adapter instance + * @lrbp: pointer to local reference block + * + * Returns SUCCESS/FAILED + */ +static int +ufshcd_issue_tm_cmd(struct ufs_hba *hba, + struct ufshcd_lrb *lrbp, + u8 tm_function) +{ + struct utp_task_req_desc *task_req_descp; + struct utp_upiu_task_req *task_req_upiup; + struct Scsi_Host *host; + unsigned long flags; + int free_slot = 0; + int err; + + host = hba->host; + + spin_lock_irqsave(host->host_lock, flags); + + /* If task management queue is full */ + free_slot = ufshcd_get_tm_free_slot(hba); + if (free_slot >= hba->nutmrs) { + spin_unlock_irqrestore(host->host_lock, flags); + dev_err(&hba->pdev->dev, "Task management queue full\n"); + err = FAILED; + goto out; + } + + task_req_descp = hba->utmrdl_base_addr; + task_req_descp += free_slot; + + /* Configure task request descriptor */ + task_req_descp->header.dword_0 = cpu_to_le32(UTP_REQ_DESC_INT_CMD); + task_req_descp->header.dword_2 = + cpu_to_le32(OCS_INVALID_COMMAND_STATUS); + + /* Configure task request UPIU */ + task_req_upiup = + (struct utp_upiu_task_req *) task_req_descp->task_req_upiu; + task_req_upiup->header.dword_0 = + cpu_to_be32(UPIU_HEADER_DWORD(UPIU_TRANSACTION_TASK_REQ, 0, + lrbp->lun, lrbp->task_tag)); + task_req_upiup->header.dword_1 = + cpu_to_be32(UPIU_HEADER_DWORD(0, tm_function, 0, 0)); + + task_req_upiup->input_param1 = lrbp->lun; + task_req_upiup->input_param1 = + cpu_to_be32(task_req_upiup->input_param1); + task_req_upiup->input_param2 = lrbp->task_tag; + task_req_upiup->input_param2 = + cpu_to_be32(task_req_upiup->input_param2); + + /* send command to the controller */ + __set_bit(free_slot, &hba->outstanding_tasks); + writel((1 << free_slot), + (hba->mmio_base + REG_UTP_TASK_REQ_DOOR_BELL)); + + spin_unlock_irqrestore(host->host_lock, flags); + + /* wait until the task management command is completed */ + err = + wait_event_interruptible_timeout(hba->ufshcd_tm_wait_queue, + (test_bit(free_slot, + &hba->tm_condition) != 0), + 60 * HZ); + if (!err) { + dev_err(&hba->pdev->dev, + "Task management command timed-out\n"); + err = FAILED; + goto out; + } + clear_bit(free_slot, &hba->tm_condition); + return ufshcd_task_req_compl(hba, free_slot); +out: + return err; +} + +/** + * ufshcd_device_reset - reset device and abort all the pending commands + * @cmd: SCSI command pointer + * + * Returns SUCCESS/FAILED + */ +static int ufshcd_device_reset(struct scsi_cmnd *cmd) +{ + struct Scsi_Host *host; + struct ufs_hba *hba; + unsigned int tag; + u32 pos; + int err; + + host = cmd->device->host; + hba = shost_priv(host); + tag = cmd->request->tag; + + err = ufshcd_issue_tm_cmd(hba, &hba->lrb[tag], UFS_LOGICAL_RESET); + if (err) + goto out; + + for (pos = 0; pos < hba->nutrs; pos++) { + if (test_bit(pos, &hba->outstanding_reqs) && + (hba->lrb[tag].lun == hba->lrb[pos].lun)) { + + /* clear the respective UTRLCLR register bit */ + ufshcd_utrl_clear(hba, pos); + + clear_bit(pos, &hba->outstanding_reqs); + + if (hba->lrb[pos].cmd) { + scsi_dma_unmap(hba->lrb[pos].cmd); + hba->lrb[pos].cmd->result = + DID_ABORT << 16; + hba->lrb[pos].cmd->scsi_done(cmd); + hba->lrb[pos].cmd = NULL; + } + } + } /* end of for */ +out: + return err; +} + +/** + * ufshcd_host_reset - Main reset function registered with scsi layer + * @cmd: SCSI command pointer + * + * Returns SUCCESS/FAILED + */ +static int ufshcd_host_reset(struct scsi_cmnd *cmd) +{ + struct ufs_hba *hba; + + hba = shost_priv(cmd->device->host); + + if (hba->ufshcd_state == UFSHCD_STATE_RESET) + return SUCCESS; + + return (ufshcd_do_reset(hba) == SUCCESS) ? SUCCESS : FAILED; +} + +/** + * ufshcd_abort - abort a specific command + * @cmd: SCSI command pointer + * + * Returns SUCCESS/FAILED + */ +static int ufshcd_abort(struct scsi_cmnd *cmd) +{ + struct Scsi_Host *host; + struct ufs_hba *hba; + unsigned long flags; + unsigned int tag; + int err; + + host = cmd->device->host; + hba = shost_priv(host); + tag = cmd->request->tag; + + spin_lock_irqsave(host->host_lock, flags); + + /* check if command is still pending */ + if (!(test_bit(tag, &hba->outstanding_reqs))) { + err = FAILED; + spin_unlock_irqrestore(host->host_lock, flags); + goto out; + } + spin_unlock_irqrestore(host->host_lock, flags); + + err = ufshcd_issue_tm_cmd(hba, &hba->lrb[tag], UFS_ABORT_TASK); + if (err) + goto out; + + scsi_dma_unmap(cmd); + + spin_lock_irqsave(host->host_lock, flags); + + /* clear the respective UTRLCLR register bit */ + ufshcd_utrl_clear(hba, tag); + + __clear_bit(tag, &hba->outstanding_reqs); + hba->lrb[tag].cmd = NULL; + spin_unlock_irqrestore(host->host_lock, flags); +out: + return err; +} + +static struct scsi_host_template ufshcd_driver_template = { + .module = THIS_MODULE, + .name = UFSHCD, + .proc_name = UFSHCD, + .queuecommand = ufshcd_queuecommand, + .slave_alloc = ufshcd_slave_alloc, + .slave_destroy = ufshcd_slave_destroy, + .eh_abort_handler = ufshcd_abort, + .eh_device_reset_handler = ufshcd_device_reset, + .eh_host_reset_handler = ufshcd_host_reset, + .this_id = -1, + .sg_tablesize = SG_ALL, + .cmd_per_lun = UFSHCD_CMD_PER_LUN, + .can_queue = UFSHCD_CAN_QUEUE, +}; + +/** + * ufshcd_shutdown - main function to put the controller in reset state + * @pdev: pointer to PCI device handle + */ +static void ufshcd_shutdown(struct pci_dev *pdev) +{ + ufshcd_hba_stop((struct ufs_hba *)pci_get_drvdata(pdev)); +} + +#ifdef CONFIG_PM +/** + * ufshcd_suspend - suspend power management function + * @pdev: pointer to PCI device handle + * @state: power state + * + * Returns -ENOSYS + */ +static int ufshcd_suspend(struct pci_dev *pdev, pm_message_t state) +{ + /* + * TODO: + * 1. Block SCSI requests from SCSI midlayer + * 2. Change the internal driver state to non operational + * 3. Set UTRLRSR and UTMRLRSR bits to zero + * 4. Wait until outstanding commands are completed + * 5. Set HCE to zero to send the UFS host controller to reset state + */ + + return -ENOSYS; +} + +/** + * ufshcd_resume - resume power management function + * @pdev: pointer to PCI device handle + * + * Returns -ENOSYS + */ +static int ufshcd_resume(struct pci_dev *pdev) +{ + /* + * TODO: + * 1. Set HCE to 1, to start the UFS host controller + * initialization process + * 2. Set UTRLRSR and UTMRLRSR bits to 1 + * 3. Change the internal driver state to operational + * 4. Unblock SCSI requests from SCSI midlayer + */ + + return -ENOSYS; +} +#endif /* CONFIG_PM */ + +/** + * ufshcd_hba_free - free allocated memory for + * host memory space data structures + * @hba: per adapter instance + */ +static void ufshcd_hba_free(struct ufs_hba *hba) +{ + iounmap(hba->mmio_base); + ufshcd_free_hba_memory(hba); + pci_release_regions(hba->pdev); +} + +/** + * ufshcd_remove - de-allocate PCI/SCSI host and host memory space + * data structure memory + * @pdev - pointer to PCI handle + */ +static void ufshcd_remove(struct pci_dev *pdev) +{ + struct ufs_hba *hba = pci_get_drvdata(pdev); + + /* disable interrupts */ + ufshcd_int_config(hba, UFSHCD_INT_DISABLE); + free_irq(pdev->irq, hba); + + ufshcd_hba_stop(hba); + ufshcd_hba_free(hba); + + scsi_remove_host(hba->host); + scsi_host_put(hba->host); + pci_set_drvdata(pdev, NULL); + pci_clear_master(pdev); + pci_disable_device(pdev); +} + +/** + * ufshcd_set_dma_mask - Set dma mask based on the controller + * addressing capability + * @pdev: PCI device structure + * + * Returns 0 for success, non-zero for failure + */ +static int ufshcd_set_dma_mask(struct ufs_hba *hba) +{ + int err; + u64 dma_mask; + + /* + * If controller supports 64 bit addressing mode, then set the DMA + * mask to 64-bit, else set the DMA mask to 32-bit + */ + if (hba->capabilities & MASK_64_ADDRESSING_SUPPORT) + dma_mask = DMA_BIT_MASK(64); + else + dma_mask = DMA_BIT_MASK(32); + + err = pci_set_dma_mask(hba->pdev, dma_mask); + if (err) + return err; + + err = pci_set_consistent_dma_mask(hba->pdev, dma_mask); + + return err; +} + +/** + * ufshcd_probe - probe routine of the driver + * @pdev: pointer to PCI device handle + * @id: PCI device id + * + * Returns 0 on success, non-zero value on failure + */ +static int __devinit +ufshcd_probe(struct pci_dev *pdev, const struct pci_device_id *id) +{ + struct Scsi_Host *host; + struct ufs_hba *hba; + int err; + + err = pci_enable_device(pdev); + if (err) { + dev_err(&pdev->dev, "pci_enable_device failed\n"); + goto out_error; + } + + pci_set_master(pdev); + + host = scsi_host_alloc(&ufshcd_driver_template, + sizeof(struct ufs_hba)); + if (!host) { + dev_err(&pdev->dev, "scsi_host_alloc failed\n"); + err = -ENOMEM; + goto out_disable; + } + hba = shost_priv(host); + + err = pci_request_regions(pdev, UFSHCD); + if (err < 0) { + dev_err(&pdev->dev, "request regions failed\n"); + goto out_disable; + } + + hba->mmio_base = pci_ioremap_bar(pdev, 0); + if (!hba->mmio_base) { + dev_err(&pdev->dev, "memory map failed\n"); + err = -ENOMEM; + goto out_release_regions; + } + + hba->host = host; + hba->pdev = pdev; + + /* Read capabilities registers */ + ufshcd_hba_capabilities(hba); + + /* Get UFS version supported by the controller */ + hba->ufs_version = ufshcd_get_ufs_version(hba); + + err = ufshcd_set_dma_mask(hba); + if (err) { + dev_err(&pdev->dev, "set dma mask failed\n"); + goto out_iounmap; + } + + /* Allocate memory for host memory space */ + err = ufshcd_memory_alloc(hba); + if (err) { + dev_err(&pdev->dev, "Memory allocation failed\n"); + goto out_iounmap; + } + + /* Configure LRB */ + ufshcd_host_memory_configure(hba); + + host->can_queue = hba->nutrs; + host->cmd_per_lun = hba->nutrs; + host->max_id = UFSHCD_MAX_ID; + host->max_lun = UFSHCD_MAX_LUNS; + host->max_channel = UFSHCD_MAX_CHANNEL; + host->unique_id = host->host_no; + host->max_cmd_len = MAX_CDB_SIZE; + + /* Initailize wait queue for task management */ + init_waitqueue_head(&hba->ufshcd_tm_wait_queue); + + /* Initialize work queues */ + INIT_WORK(&hba->uic_workq, ufshcd_uic_cc_handler); + INIT_WORK(&hba->feh_workq, ufshcd_fatal_err_handler); + + /* IRQ registration */ + err = request_irq(pdev->irq, ufshcd_intr, IRQF_SHARED, UFSHCD, hba); + if (err) { + dev_err(&pdev->dev, "request irq failed\n"); + goto out_lrb_free; + } + + /* Enable SCSI tag mapping */ + err = scsi_init_shared_tag_map(host, host->can_queue); + if (err) { + dev_err(&pdev->dev, "init shared queue failed\n"); + goto out_free_irq; + } + + pci_set_drvdata(pdev, hba); + + err = scsi_add_host(host, &pdev->dev); + if (err) { + dev_err(&pdev->dev, "scsi_add_host failed\n"); + goto out_free_irq; + } + + /* Initialization routine */ + err = ufshcd_initialize_hba(hba); + if (err) { + dev_err(&pdev->dev, "Initialization failed\n"); + goto out_free_irq; + } + + return 0; + +out_free_irq: + free_irq(pdev->irq, hba); +out_lrb_free: + ufshcd_free_hba_memory(hba); +out_iounmap: + iounmap(hba->mmio_base); +out_release_regions: + pci_release_regions(pdev); +out_disable: + scsi_host_put(host); + pci_clear_master(pdev); + pci_disable_device(pdev); +out_error: + return err; +} + +static DEFINE_PCI_DEVICE_TABLE(ufshcd_pci_tbl) = { + { PCI_VENDOR_ID_SAMSUNG, 0xC00C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 }, + { } /* terminate list */ +}; + +MODULE_DEVICE_TABLE(pci, ufshcd_pci_tbl); + +static struct pci_driver ufshcd_pci_driver = { + .name = UFSHCD, + .id_table = ufshcd_pci_tbl, + .probe = ufshcd_probe, + .remove = __devexit_p(ufshcd_remove), + .shutdown = ufshcd_shutdown, +#ifdef CONFIG_PM + .suspend = ufshcd_suspend, + .resume = ufshcd_resume, +#endif +}; + +/** + * ufshcd_init - Driver registration routine + */ +static int __init ufshcd_init(void) +{ + return pci_register_driver(&ufshcd_pci_driver); +} +module_init(ufshcd_init); + +/** + * ufshcd_exit - Driver exit clean-up routine + */ +static void __exit ufshcd_exit(void) +{ + pci_unregister_driver(&ufshcd_pci_driver); +} +module_exit(ufshcd_exit); + + +MODULE_AUTHOR("Santosh Yaragnavi <santosh.sy@samsung.com>, " + "Vinayak Holikatti <h.vinayak@samsung.com>"); +MODULE_DESCRIPTION("Generic UFS host controller driver"); +MODULE_LICENSE("GPL"); +MODULE_VERSION(UFSHCD_DRIVER_VERSION); diff --git a/drivers/scsi/ufs/ufshci.h b/drivers/scsi/ufs/ufshci.h new file mode 100644 index 000000000000..6e3510f71167 --- /dev/null +++ b/drivers/scsi/ufs/ufshci.h @@ -0,0 +1,376 @@ +/* + * Universal Flash Storage Host controller driver + * + * This code is based on drivers/scsi/ufs/ufshci.h + * Copyright (C) 2011-2012 Samsung India Software Operations + * + * Santosh Yaraganavi <santosh.sy@samsung.com> + * Vinayak Holikatti <h.vinayak@samsung.com> + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version 2 + * of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * NO WARRANTY + * THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR + * CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT + * LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT, + * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is + * solely responsible for determining the appropriateness of using and + * distributing the Program and assumes all risks associated with its + * exercise of rights under this Agreement, including but not limited to + * the risks and costs of program errors, damage to or loss of data, + * programs or equipment, and unavailability or interruption of operations. + + * DISCLAIMER OF LIABILITY + * NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR + * TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE + * USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED + * HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES + + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, + * USA. + */ + +#ifndef _UFSHCI_H +#define _UFSHCI_H + +enum { + TASK_REQ_UPIU_SIZE_DWORDS = 8, + TASK_RSP_UPIU_SIZE_DWORDS = 8, + ALIGNED_UPIU_SIZE = 128, +}; + +/* UFSHCI Registers */ +enum { + REG_CONTROLLER_CAPABILITIES = 0x00, + REG_UFS_VERSION = 0x08, + REG_CONTROLLER_DEV_ID = 0x10, + REG_CONTROLLER_PROD_ID = 0x14, + REG_INTERRUPT_STATUS = 0x20, + REG_INTERRUPT_ENABLE = 0x24, + REG_CONTROLLER_STATUS = 0x30, + REG_CONTROLLER_ENABLE = 0x34, + REG_UIC_ERROR_CODE_PHY_ADAPTER_LAYER = 0x38, + REG_UIC_ERROR_CODE_DATA_LINK_LAYER = 0x3C, + REG_UIC_ERROR_CODE_NETWORK_LAYER = 0x40, + REG_UIC_ERROR_CODE_TRANSPORT_LAYER = 0x44, + REG_UIC_ERROR_CODE_DME = 0x48, + REG_UTP_TRANSFER_REQ_INT_AGG_CONTROL = 0x4C, + REG_UTP_TRANSFER_REQ_LIST_BASE_L = 0x50, + REG_UTP_TRANSFER_REQ_LIST_BASE_H = 0x54, + REG_UTP_TRANSFER_REQ_DOOR_BELL = 0x58, + REG_UTP_TRANSFER_REQ_LIST_CLEAR = 0x5C, + REG_UTP_TRANSFER_REQ_LIST_RUN_STOP = 0x60, + REG_UTP_TASK_REQ_LIST_BASE_L = 0x70, + REG_UTP_TASK_REQ_LIST_BASE_H = 0x74, + REG_UTP_TASK_REQ_DOOR_BELL = 0x78, + REG_UTP_TASK_REQ_LIST_CLEAR = 0x7C, + REG_UTP_TASK_REQ_LIST_RUN_STOP = 0x80, + REG_UIC_COMMAND = 0x90, + REG_UIC_COMMAND_ARG_1 = 0x94, + REG_UIC_COMMAND_ARG_2 = 0x98, + REG_UIC_COMMAND_ARG_3 = 0x9C, +}; + +/* Controller capability masks */ +enum { + MASK_TRANSFER_REQUESTS_SLOTS = 0x0000001F, + MASK_TASK_MANAGEMENT_REQUEST_SLOTS = 0x00070000, + MASK_64_ADDRESSING_SUPPORT = 0x01000000, + MASK_OUT_OF_ORDER_DATA_DELIVERY_SUPPORT = 0x02000000, + MASK_UIC_DME_TEST_MODE_SUPPORT = 0x04000000, +}; + +/* UFS Version 08h */ +#define MINOR_VERSION_NUM_MASK UFS_MASK(0xFFFF, 0) +#define MAJOR_VERSION_NUM_MASK UFS_MASK(0xFFFF, 16) + +/* Controller UFSHCI version */ +enum { + UFSHCI_VERSION_10 = 0x00010000, + UFSHCI_VERSION_11 = 0x00010100, +}; + +/* + * HCDDID - Host Controller Identification Descriptor + * - Device ID and Device Class 10h + */ +#define DEVICE_CLASS UFS_MASK(0xFFFF, 0) +#define DEVICE_ID UFS_MASK(0xFF, 24) + +/* + * HCPMID - Host Controller Identification Descriptor + * - Product/Manufacturer ID 14h + */ +#define MANUFACTURE_ID_MASK UFS_MASK(0xFFFF, 0) +#define PRODUCT_ID_MASK UFS_MASK(0xFFFF, 16) + +#define UFS_BIT(x) (1L << (x)) + +#define UTP_TRANSFER_REQ_COMPL UFS_BIT(0) +#define UIC_DME_END_PT_RESET UFS_BIT(1) +#define UIC_ERROR UFS_BIT(2) +#define UIC_TEST_MODE UFS_BIT(3) +#define UIC_POWER_MODE UFS_BIT(4) +#define UIC_HIBERNATE_EXIT UFS_BIT(5) +#define UIC_HIBERNATE_ENTER UFS_BIT(6) +#define UIC_LINK_LOST UFS_BIT(7) +#define UIC_LINK_STARTUP UFS_BIT(8) +#define UTP_TASK_REQ_COMPL UFS_BIT(9) +#define UIC_COMMAND_COMPL UFS_BIT(10) +#define DEVICE_FATAL_ERROR UFS_BIT(11) +#define CONTROLLER_FATAL_ERROR UFS_BIT(16) +#define SYSTEM_BUS_FATAL_ERROR UFS_BIT(17) + +#define UFSHCD_ERROR_MASK (UIC_ERROR |\ + DEVICE_FATAL_ERROR |\ + CONTROLLER_FATAL_ERROR |\ + SYSTEM_BUS_FATAL_ERROR) + +#define INT_FATAL_ERRORS (DEVICE_FATAL_ERROR |\ + CONTROLLER_FATAL_ERROR |\ + SYSTEM_BUS_FATAL_ERROR) + +/* HCS - Host Controller Status 30h */ +#define DEVICE_PRESENT UFS_BIT(0) +#define UTP_TRANSFER_REQ_LIST_READY UFS_BIT(1) +#define UTP_TASK_REQ_LIST_READY UFS_BIT(2) +#define UIC_COMMAND_READY UFS_BIT(3) +#define HOST_ERROR_INDICATOR UFS_BIT(4) +#define DEVICE_ERROR_INDICATOR UFS_BIT(5) +#define UIC_POWER_MODE_CHANGE_REQ_STATUS_MASK UFS_MASK(0x7, 8) + +/* HCE - Host Controller Enable 34h */ +#define CONTROLLER_ENABLE UFS_BIT(0) +#define CONTROLLER_DISABLE 0x0 + +/* UECPA - Host UIC Error Code PHY Adapter Layer 38h */ +#define UIC_PHY_ADAPTER_LAYER_ERROR UFS_BIT(31) +#define UIC_PHY_ADAPTER_LAYER_ERROR_CODE_MASK 0x1F + +/* UECDL - Host UIC Error Code Data Link Layer 3Ch */ +#define UIC_DATA_LINK_LAYER_ERROR UFS_BIT(31) +#define UIC_DATA_LINK_LAYER_ERROR_CODE_MASK 0x7FFF +#define UIC_DATA_LINK_LAYER_ERROR_PA_INIT 0x2000 + +/* UECN - Host UIC Error Code Network Layer 40h */ +#define UIC_NETWORK_LAYER_ERROR UFS_BIT(31) +#define UIC_NETWORK_LAYER_ERROR_CODE_MASK 0x7 + +/* UECT - Host UIC Error Code Transport Layer 44h */ +#define UIC_TRANSPORT_LAYER_ERROR UFS_BIT(31) +#define UIC_TRANSPORT_LAYER_ERROR_CODE_MASK 0x7F + +/* UECDME - Host UIC Error Code DME 48h */ +#define UIC_DME_ERROR UFS_BIT(31) +#define UIC_DME_ERROR_CODE_MASK 0x1 + +#define INT_AGGR_TIMEOUT_VAL_MASK 0xFF +#define INT_AGGR_COUNTER_THRESHOLD_MASK UFS_MASK(0x1F, 8) +#define INT_AGGR_COUNTER_AND_TIMER_RESET UFS_BIT(16) +#define INT_AGGR_STATUS_BIT UFS_BIT(20) +#define INT_AGGR_PARAM_WRITE UFS_BIT(24) +#define INT_AGGR_ENABLE UFS_BIT(31) + +/* UTRLRSR - UTP Transfer Request Run-Stop Register 60h */ +#define UTP_TRANSFER_REQ_LIST_RUN_STOP_BIT UFS_BIT(0) + +/* UTMRLRSR - UTP Task Management Request Run-Stop Register 80h */ +#define UTP_TASK_REQ_LIST_RUN_STOP_BIT UFS_BIT(0) + +/* UICCMD - UIC Command */ +#define COMMAND_OPCODE_MASK 0xFF +#define GEN_SELECTOR_INDEX_MASK 0xFFFF + +#define MIB_ATTRIBUTE_MASK UFS_MASK(0xFFFF, 16) +#define RESET_LEVEL 0xFF + +#define ATTR_SET_TYPE_MASK UFS_MASK(0xFF, 16) +#define CONFIG_RESULT_CODE_MASK 0xFF +#define GENERIC_ERROR_CODE_MASK 0xFF + +/* UIC Commands */ +enum { + UIC_CMD_DME_GET = 0x01, + UIC_CMD_DME_SET = 0x02, + UIC_CMD_DME_PEER_GET = 0x03, + UIC_CMD_DME_PEER_SET = 0x04, + UIC_CMD_DME_POWERON = 0x10, + UIC_CMD_DME_POWEROFF = 0x11, + UIC_CMD_DME_ENABLE = 0x12, + UIC_CMD_DME_RESET = 0x14, + UIC_CMD_DME_END_PT_RST = 0x15, + UIC_CMD_DME_LINK_STARTUP = 0x16, + UIC_CMD_DME_HIBER_ENTER = 0x17, + UIC_CMD_DME_HIBER_EXIT = 0x18, + UIC_CMD_DME_TEST_MODE = 0x1A, +}; + +/* UIC Config result code / Generic error code */ +enum { + UIC_CMD_RESULT_SUCCESS = 0x00, + UIC_CMD_RESULT_INVALID_ATTR = 0x01, + UIC_CMD_RESULT_FAILURE = 0x01, + UIC_CMD_RESULT_INVALID_ATTR_VALUE = 0x02, + UIC_CMD_RESULT_READ_ONLY_ATTR = 0x03, + UIC_CMD_RESULT_WRITE_ONLY_ATTR = 0x04, + UIC_CMD_RESULT_BAD_INDEX = 0x05, + UIC_CMD_RESULT_LOCKED_ATTR = 0x06, + UIC_CMD_RESULT_BAD_TEST_FEATURE_INDEX = 0x07, + UIC_CMD_RESULT_PEER_COMM_FAILURE = 0x08, + UIC_CMD_RESULT_BUSY = 0x09, + UIC_CMD_RESULT_DME_FAILURE = 0x0A, +}; + +#define MASK_UIC_COMMAND_RESULT 0xFF + +#define INT_AGGR_COUNTER_THRESHOLD_VALUE (0x1F << 8) +#define INT_AGGR_TIMEOUT_VALUE (0x02) + +/* Interrupt disable masks */ +enum { + /* Interrupt disable mask for UFSHCI v1.0 */ + INTERRUPT_DISABLE_MASK_10 = 0xFFFF, + + /* Interrupt disable mask for UFSHCI v1.1 */ + INTERRUPT_DISABLE_MASK_11 = 0x0, +}; + +/* + * Request Descriptor Definitions + */ + +/* Transfer request command type */ +enum { + UTP_CMD_TYPE_SCSI = 0x0, + UTP_CMD_TYPE_UFS = 0x1, + UTP_CMD_TYPE_DEV_MANAGE = 0x2, +}; + +enum { + UTP_SCSI_COMMAND = 0x00000000, + UTP_NATIVE_UFS_COMMAND = 0x10000000, + UTP_DEVICE_MANAGEMENT_FUNCTION = 0x20000000, + UTP_REQ_DESC_INT_CMD = 0x01000000, +}; + +/* UTP Transfer Request Data Direction (DD) */ +enum { + UTP_NO_DATA_TRANSFER = 0x00000000, + UTP_HOST_TO_DEVICE = 0x02000000, + UTP_DEVICE_TO_HOST = 0x04000000, +}; + +/* Overall command status values */ +enum { + OCS_SUCCESS = 0x0, + OCS_INVALID_CMD_TABLE_ATTR = 0x1, + OCS_INVALID_PRDT_ATTR = 0x2, + OCS_MISMATCH_DATA_BUF_SIZE = 0x3, + OCS_MISMATCH_RESP_UPIU_SIZE = 0x4, + OCS_PEER_COMM_FAILURE = 0x5, + OCS_ABORTED = 0x6, + OCS_FATAL_ERROR = 0x7, + OCS_INVALID_COMMAND_STATUS = 0x0F, + MASK_OCS = 0x0F, +}; + +/** + * struct ufshcd_sg_entry - UFSHCI PRD Entry + * @base_addr: Lower 32bit physical address DW-0 + * @upper_addr: Upper 32bit physical address DW-1 + * @reserved: Reserved for future use DW-2 + * @size: size of physical segment DW-3 + */ +struct ufshcd_sg_entry { + u32 base_addr; + u32 upper_addr; + u32 reserved; + u32 size; +}; + +/** + * struct utp_transfer_cmd_desc - UFS Command Descriptor structure + * @command_upiu: Command UPIU Frame address + * @response_upiu: Response UPIU Frame address + * @prd_table: Physical Region Descriptor + */ +struct utp_transfer_cmd_desc { + u8 command_upiu[ALIGNED_UPIU_SIZE]; + u8 response_upiu[ALIGNED_UPIU_SIZE]; + struct ufshcd_sg_entry prd_table[SG_ALL]; +}; + +/** + * struct request_desc_header - Descriptor Header common to both UTRD and UTMRD + * @dword0: Descriptor Header DW0 + * @dword1: Descriptor Header DW1 + * @dword2: Descriptor Header DW2 + * @dword3: Descriptor Header DW3 + */ +struct request_desc_header { + u32 dword_0; + u32 dword_1; + u32 dword_2; + u32 dword_3; +}; + +/** + * struct utp_transfer_req_desc - UTRD structure + * @header: UTRD header DW-0 to DW-3 + * @command_desc_base_addr_lo: UCD base address low DW-4 + * @command_desc_base_addr_hi: UCD base address high DW-5 + * @response_upiu_length: response UPIU length DW-6 + * @response_upiu_offset: response UPIU offset DW-6 + * @prd_table_length: Physical region descriptor length DW-7 + * @prd_table_offset: Physical region descriptor offset DW-7 + */ +struct utp_transfer_req_desc { + + /* DW 0-3 */ + struct request_desc_header header; + + /* DW 4-5*/ + u32 command_desc_base_addr_lo; + u32 command_desc_base_addr_hi; + + /* DW 6 */ + u16 response_upiu_length; + u16 response_upiu_offset; + + /* DW 7 */ + u16 prd_table_length; + u16 prd_table_offset; +}; + +/** + * struct utp_task_req_desc - UTMRD structure + * @header: UTMRD header DW-0 to DW-3 + * @task_req_upiu: Pointer to task request UPIU DW-4 to DW-11 + * @task_rsp_upiu: Pointer to task response UPIU DW12 to DW-19 + */ +struct utp_task_req_desc { + + /* DW 0-3 */ + struct request_desc_header header; + + /* DW 4-11 */ + u32 task_req_upiu[TASK_REQ_UPIU_SIZE_DWORDS]; + + /* DW 12-19 */ + u32 task_rsp_upiu[TASK_RSP_UPIU_SIZE_DWORDS]; +}; + +#endif /* End of Header */ diff --git a/drivers/scsi/ultrastor.c b/drivers/scsi/ultrastor.c index 7e22b737dfd8..14e0c40a68c9 100644 --- a/drivers/scsi/ultrastor.c +++ b/drivers/scsi/ultrastor.c @@ -141,7 +141,6 @@ #include <linux/delay.h> #include <asm/io.h> -#include <asm/system.h> #include <asm/dma.h> #define ULTRASTOR_PRIVATE /* Get the private stuff from ultrastor.h */ diff --git a/drivers/scsi/vmw_pvscsi.c b/drivers/scsi/vmw_pvscsi.c index 7264116185d5..4411d4224401 100644 --- a/drivers/scsi/vmw_pvscsi.c +++ b/drivers/scsi/vmw_pvscsi.c @@ -17,7 +17,7 @@ * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. * - * Maintained by: Alok N Kataria <akataria@vmware.com> + * Maintained by: Arvind Kumar <arvindkumar@vmware.com> * */ @@ -1178,11 +1178,67 @@ static int __devinit pvscsi_allocate_sg(struct pvscsi_adapter *adapter) return 0; } +/* + * Query the device, fetch the config info and return the + * maximum number of targets on the adapter. In case of + * failure due to any reason return default i.e. 16. + */ +static u32 pvscsi_get_max_targets(struct pvscsi_adapter *adapter) +{ + struct PVSCSICmdDescConfigCmd cmd; + struct PVSCSIConfigPageHeader *header; + struct device *dev; + dma_addr_t configPagePA; + void *config_page; + u32 numPhys = 16; + + dev = pvscsi_dev(adapter); + config_page = pci_alloc_consistent(adapter->dev, PAGE_SIZE, + &configPagePA); + if (!config_page) { + dev_warn(dev, "vmw_pvscsi: failed to allocate memory for config page\n"); + goto exit; + } + BUG_ON(configPagePA & ~PAGE_MASK); + + /* Fetch config info from the device. */ + cmd.configPageAddress = ((u64)PVSCSI_CONFIG_CONTROLLER_ADDRESS) << 32; + cmd.configPageNum = PVSCSI_CONFIG_PAGE_CONTROLLER; + cmd.cmpAddr = configPagePA; + cmd._pad = 0; + + /* + * Mark the completion page header with error values. If the device + * completes the command successfully, it sets the status values to + * indicate success. + */ + header = config_page; + memset(header, 0, sizeof *header); + header->hostStatus = BTSTAT_INVPARAM; + header->scsiStatus = SDSTAT_CHECK; + + pvscsi_write_cmd_desc(adapter, PVSCSI_CMD_CONFIG, &cmd, sizeof cmd); + + if (header->hostStatus == BTSTAT_SUCCESS && + header->scsiStatus == SDSTAT_GOOD) { + struct PVSCSIConfigPageController *config; + + config = config_page; + numPhys = config->numPhys; + } else + dev_warn(dev, "vmw_pvscsi: PVSCSI_CMD_CONFIG failed. hostStatus = 0x%x, scsiStatus = 0x%x\n", + header->hostStatus, header->scsiStatus); + pci_free_consistent(adapter->dev, PAGE_SIZE, config_page, configPagePA); +exit: + return numPhys; +} + static int __devinit pvscsi_probe(struct pci_dev *pdev, const struct pci_device_id *id) { struct pvscsi_adapter *adapter; struct Scsi_Host *host; + struct device *dev; unsigned int i; unsigned long flags = 0; int error; @@ -1272,6 +1328,13 @@ static int __devinit pvscsi_probe(struct pci_dev *pdev, } /* + * Ask the device for max number of targets. + */ + host->max_id = pvscsi_get_max_targets(adapter); + dev = pvscsi_dev(adapter); + dev_info(dev, "vmw_pvscsi: host->max_id: %u\n", host->max_id); + + /* * From this point on we should reset the adapter if anything goes * wrong. */ diff --git a/drivers/scsi/vmw_pvscsi.h b/drivers/scsi/vmw_pvscsi.h index 62e36e75715e..3546e8662e30 100644 --- a/drivers/scsi/vmw_pvscsi.h +++ b/drivers/scsi/vmw_pvscsi.h @@ -17,7 +17,7 @@ * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. * - * Maintained by: Alok N Kataria <akataria@vmware.com> + * Maintained by: Arvind Kumar <arvindkumar@vmware.com> * */ @@ -26,7 +26,7 @@ #include <linux/types.h> -#define PVSCSI_DRIVER_VERSION_STRING "1.0.1.0-k" +#define PVSCSI_DRIVER_VERSION_STRING "1.0.2.0-k" #define PVSCSI_MAX_NUM_SG_ENTRIES_PER_SEGMENT 128 @@ -39,28 +39,45 @@ * host adapter status/error codes */ enum HostBusAdapterStatus { - BTSTAT_SUCCESS = 0x00, /* CCB complete normally with no errors */ - BTSTAT_LINKED_COMMAND_COMPLETED = 0x0a, - BTSTAT_LINKED_COMMAND_COMPLETED_WITH_FLAG = 0x0b, - BTSTAT_DATA_UNDERRUN = 0x0c, - BTSTAT_SELTIMEO = 0x11, /* SCSI selection timeout */ - BTSTAT_DATARUN = 0x12, /* data overrun/underrun */ - BTSTAT_BUSFREE = 0x13, /* unexpected bus free */ - BTSTAT_INVPHASE = 0x14, /* invalid bus phase or sequence requested by target */ - BTSTAT_LUNMISMATCH = 0x17, /* linked CCB has different LUN from first CCB */ - BTSTAT_SENSFAILED = 0x1b, /* auto request sense failed */ - BTSTAT_TAGREJECT = 0x1c, /* SCSI II tagged queueing message rejected by target */ - BTSTAT_BADMSG = 0x1d, /* unsupported message received by the host adapter */ - BTSTAT_HAHARDWARE = 0x20, /* host adapter hardware failed */ - BTSTAT_NORESPONSE = 0x21, /* target did not respond to SCSI ATN, sent a SCSI RST */ - BTSTAT_SENTRST = 0x22, /* host adapter asserted a SCSI RST */ - BTSTAT_RECVRST = 0x23, /* other SCSI devices asserted a SCSI RST */ - BTSTAT_DISCONNECT = 0x24, /* target device reconnected improperly (w/o tag) */ - BTSTAT_BUSRESET = 0x25, /* host adapter issued BUS device reset */ - BTSTAT_ABORTQUEUE = 0x26, /* abort queue generated */ - BTSTAT_HASOFTWARE = 0x27, /* host adapter software error */ - BTSTAT_HATIMEOUT = 0x30, /* host adapter hardware timeout error */ - BTSTAT_SCSIPARITY = 0x34, /* SCSI parity error detected */ + BTSTAT_SUCCESS = 0x00, /* CCB complete normally with no errors */ + BTSTAT_LINKED_COMMAND_COMPLETED = 0x0a, + BTSTAT_LINKED_COMMAND_COMPLETED_WITH_FLAG = 0x0b, + BTSTAT_DATA_UNDERRUN = 0x0c, + BTSTAT_SELTIMEO = 0x11, /* SCSI selection timeout */ + BTSTAT_DATARUN = 0x12, /* data overrun/underrun */ + BTSTAT_BUSFREE = 0x13, /* unexpected bus free */ + BTSTAT_INVPHASE = 0x14, /* invalid bus phase or sequence + * requested by target */ + BTSTAT_LUNMISMATCH = 0x17, /* linked CCB has different LUN from + * first CCB */ + BTSTAT_INVPARAM = 0x1a, /* invalid parameter in CCB or segment + * list */ + BTSTAT_SENSFAILED = 0x1b, /* auto request sense failed */ + BTSTAT_TAGREJECT = 0x1c, /* SCSI II tagged queueing message + * rejected by target */ + BTSTAT_BADMSG = 0x1d, /* unsupported message received by the + * host adapter */ + BTSTAT_HAHARDWARE = 0x20, /* host adapter hardware failed */ + BTSTAT_NORESPONSE = 0x21, /* target did not respond to SCSI ATN, + * sent a SCSI RST */ + BTSTAT_SENTRST = 0x22, /* host adapter asserted a SCSI RST */ + BTSTAT_RECVRST = 0x23, /* other SCSI devices asserted a SCSI + * RST */ + BTSTAT_DISCONNECT = 0x24, /* target device reconnected improperly + * (w/o tag) */ + BTSTAT_BUSRESET = 0x25, /* host adapter issued BUS device reset */ + BTSTAT_ABORTQUEUE = 0x26, /* abort queue generated */ + BTSTAT_HASOFTWARE = 0x27, /* host adapter software error */ + BTSTAT_HATIMEOUT = 0x30, /* host adapter hardware timeout error */ + BTSTAT_SCSIPARITY = 0x34, /* SCSI parity error detected */ +}; + +/* + * SCSI device status values. + */ +enum ScsiDeviceStatus { + SDSTAT_GOOD = 0x00, /* No errors. */ + SDSTAT_CHECK = 0x02, /* Check condition. */ }; /* @@ -114,6 +131,29 @@ struct PVSCSICmdDescResetDevice { } __packed; /* + * Command descriptor for PVSCSI_CMD_CONFIG -- + */ + +struct PVSCSICmdDescConfigCmd { + u64 cmpAddr; + u64 configPageAddress; + u32 configPageNum; + u32 _pad; +} __packed; + +enum PVSCSIConfigPageType { + PVSCSI_CONFIG_PAGE_CONTROLLER = 0x1958, + PVSCSI_CONFIG_PAGE_PHY = 0x1959, + PVSCSI_CONFIG_PAGE_DEVICE = 0x195a, +}; + +enum PVSCSIConfigPageAddressType { + PVSCSI_CONFIG_CONTROLLER_ADDRESS = 0x2120, + PVSCSI_CONFIG_BUSTARGET_ADDRESS = 0x2121, + PVSCSI_CONFIG_PHY_ADDRESS = 0x2122, +}; + +/* * Command descriptor for PVSCSI_CMD_ABORT_CMD -- * * - currently does not support specifying the LUN. @@ -332,6 +372,27 @@ struct PVSCSIRingCmpDesc { u32 _pad[2]; } __packed; +struct PVSCSIConfigPageHeader { + u32 pageNum; + u16 numDwords; + u16 hostStatus; + u16 scsiStatus; + u16 reserved[3]; +} __packed; + +struct PVSCSIConfigPageController { + struct PVSCSIConfigPageHeader header; + u64 nodeWWN; /* Device name as defined in the SAS spec. */ + u16 manufacturer[64]; + u16 serialNumber[64]; + u16 opromVersion[32]; + u16 hwVersion[32]; + u16 firmwareVersion[32]; + u32 numPhys; + u8 useConsecutivePhyWWNs; + u8 reserved[3]; +} __packed; + /* * Interrupt status / IRQ bits. */ diff --git a/drivers/scsi/wd7000.c b/drivers/scsi/wd7000.c index 9ee0afef2d16..d89a5dfd3ade 100644 --- a/drivers/scsi/wd7000.c +++ b/drivers/scsi/wd7000.c @@ -179,7 +179,6 @@ #include <linux/stat.h> #include <linux/io.h> -#include <asm/system.h> #include <asm/dma.h> #include <scsi/scsi.h> |