diff options
Diffstat (limited to 'drivers')
53 files changed, 11897 insertions, 674 deletions
diff --git a/drivers/message/fusion/mptbase.c b/drivers/message/fusion/mptbase.c index a5c591ffe395..444143e5f28c 100644 --- a/drivers/message/fusion/mptbase.c +++ b/drivers/message/fusion/mptbase.c @@ -6483,6 +6483,7 @@ mpt_config(MPT_ADAPTER *ioc, CONFIGPARMS *pCfg) printk(MYIOC_s_INFO_FMT "%s: host reset in" " progress mpt_config timed out.!!\n", __func__, ioc->name); + mutex_unlock(&ioc->mptbase_cmds.mutex); return -EFAULT; } spin_unlock_irqrestore(&ioc->taskmgmt_lock, flags); diff --git a/drivers/scsi/be2iscsi/be_mgmt.c b/drivers/scsi/be2iscsi/be_mgmt.c index 01bb04cd9e75..2a096795b9aa 100644 --- a/drivers/scsi/be2iscsi/be_mgmt.c +++ b/drivers/scsi/be2iscsi/be_mgmt.c @@ -571,13 +571,12 @@ free_cmd: static int mgmt_alloc_cmd_data(struct beiscsi_hba *phba, struct be_dma_mem *cmd, int iscsi_cmd, int size) { - cmd->va = pci_alloc_consistent(phba->ctrl.pdev, sizeof(size), - &cmd->dma); + cmd->va = pci_alloc_consistent(phba->ctrl.pdev, size, &cmd->dma); if (!cmd->va) { SE_DEBUG(DBG_LVL_1, "Failed to allocate memory for if info\n"); return -ENOMEM; } - memset(cmd->va, 0, sizeof(size)); + memset(cmd->va, 0, size); cmd->size = size; be_cmd_hdr_prepare(cmd->va, CMD_SUBSYSTEM_ISCSI, iscsi_cmd, size); return 0; diff --git a/drivers/scsi/bfa/bfad_attr.c b/drivers/scsi/bfa/bfad_attr.c index 8b6c6bf7837e..b83927440171 100644 --- a/drivers/scsi/bfa/bfad_attr.c +++ b/drivers/scsi/bfa/bfad_attr.c @@ -426,6 +426,23 @@ bfad_im_vport_create(struct fc_vport *fc_vport, bool disable) vshost = vport->drv_port.im_port->shost; fc_host_node_name(vshost) = wwn_to_u64((u8 *)&port_cfg.nwwn); fc_host_port_name(vshost) = wwn_to_u64((u8 *)&port_cfg.pwwn); + fc_host_supported_classes(vshost) = FC_COS_CLASS3; + + memset(fc_host_supported_fc4s(vshost), 0, + sizeof(fc_host_supported_fc4s(vshost))); + + /* For FCP type 0x08 */ + if (supported_fc4s & BFA_LPORT_ROLE_FCP_IM) + fc_host_supported_fc4s(vshost)[2] = 1; + + /* For fibre channel services type 0x20 */ + fc_host_supported_fc4s(vshost)[7] = 1; + + fc_host_supported_speeds(vshost) = + bfad_im_supported_speeds(&bfad->bfa); + fc_host_maxframe_size(vshost) = + bfa_fcport_get_maxfrsize(&bfad->bfa); + fc_vport->dd_data = vport; vport->drv_port.im_port->fc_vport = fc_vport; } else if (rc == BFA_STATUS_INVALID_WWN) diff --git a/drivers/scsi/bfa/bfad_im.c b/drivers/scsi/bfa/bfad_im.c index 3153923f5b60..1ac09afe35ee 100644 --- a/drivers/scsi/bfa/bfad_im.c +++ b/drivers/scsi/bfa/bfad_im.c @@ -987,7 +987,7 @@ done: return 0; } -static u32 +u32 bfad_im_supported_speeds(struct bfa_s *bfa) { struct bfa_ioc_attr_s *ioc_attr; diff --git a/drivers/scsi/bfa/bfad_im.h b/drivers/scsi/bfa/bfad_im.h index 0814367ef101..f6c1023e502a 100644 --- a/drivers/scsi/bfa/bfad_im.h +++ b/drivers/scsi/bfa/bfad_im.h @@ -37,6 +37,7 @@ int bfad_im_scsi_host_alloc(struct bfad_s *bfad, struct bfad_im_port_s *im_port, struct device *dev); void bfad_im_scsi_host_free(struct bfad_s *bfad, struct bfad_im_port_s *im_port); +u32 bfad_im_supported_speeds(struct bfa_s *bfa); #define MAX_FCP_TARGET 1024 #define MAX_FCP_LUN 16384 diff --git a/drivers/scsi/bnx2fc/bnx2fc.h b/drivers/scsi/bnx2fc/bnx2fc.h index a4953ef9e53a..0578fa0dc14b 100644 --- a/drivers/scsi/bnx2fc/bnx2fc.h +++ b/drivers/scsi/bnx2fc/bnx2fc.h @@ -62,7 +62,7 @@ #include "bnx2fc_constants.h" #define BNX2FC_NAME "bnx2fc" -#define BNX2FC_VERSION "1.0.10" +#define BNX2FC_VERSION "1.0.11" #define PFX "bnx2fc: " @@ -228,13 +228,16 @@ struct bnx2fc_interface { struct packet_type fip_packet_type; struct workqueue_struct *timer_work_queue; struct kref kref; - struct fcoe_ctlr ctlr; u8 vlan_enabled; int vlan_id; bool enabled; }; -#define bnx2fc_from_ctlr(fip) container_of(fip, struct bnx2fc_interface, ctlr) +#define bnx2fc_from_ctlr(x) \ + ((struct bnx2fc_interface *)((x) + 1)) + +#define bnx2fc_to_ctlr(x) \ + ((struct fcoe_ctlr *)(((struct fcoe_ctlr *)(x)) - 1)) struct bnx2fc_lport { struct list_head list; diff --git a/drivers/scsi/bnx2fc/bnx2fc_els.c b/drivers/scsi/bnx2fc/bnx2fc_els.c index ce0ce3e32f33..bdbbb13b8534 100644 --- a/drivers/scsi/bnx2fc/bnx2fc_els.c +++ b/drivers/scsi/bnx2fc/bnx2fc_els.c @@ -854,7 +854,6 @@ static void bnx2fc_flogi_resp(struct fc_seq *seq, struct fc_frame *fp, struct fc_exch *exch = fc_seq_exch(seq); struct fc_lport *lport = exch->lp; u8 *mac; - struct fc_frame_header *fh; u8 op; if (IS_ERR(fp)) @@ -862,13 +861,6 @@ static void bnx2fc_flogi_resp(struct fc_seq *seq, struct fc_frame *fp, mac = fr_cb(fp)->granted_mac; if (is_zero_ether_addr(mac)) { - fh = fc_frame_header_get(fp); - if (fh->fh_type != FC_TYPE_ELS) { - printk(KERN_ERR PFX "bnx2fc_flogi_resp:" - "fh_type != FC_TYPE_ELS\n"); - fc_frame_free(fp); - return; - } op = fc_frame_payload_op(fp); if (lport->vport) { if (op == ELS_LS_RJT) { @@ -878,12 +870,10 @@ static void bnx2fc_flogi_resp(struct fc_seq *seq, struct fc_frame *fp, return; } } - if (fcoe_ctlr_recv_flogi(fip, lport, fp)) { - fc_frame_free(fp); - return; - } + fcoe_ctlr_recv_flogi(fip, lport, fp); } - fip->update_mac(lport, mac); + if (!is_zero_ether_addr(mac)) + fip->update_mac(lport, mac); done: fc_lport_flogi_resp(seq, fp, lport); } @@ -910,7 +900,7 @@ struct fc_seq *bnx2fc_elsct_send(struct fc_lport *lport, u32 did, { struct fcoe_port *port = lport_priv(lport); struct bnx2fc_interface *interface = port->priv; - struct fcoe_ctlr *fip = &interface->ctlr; + struct fcoe_ctlr *fip = bnx2fc_to_ctlr(interface); struct fc_frame_header *fh = fc_frame_header_get(fp); switch (op) { diff --git a/drivers/scsi/bnx2fc/bnx2fc_fcoe.c b/drivers/scsi/bnx2fc/bnx2fc_fcoe.c index c1c6a92a0b98..f52f668fd247 100644 --- a/drivers/scsi/bnx2fc/bnx2fc_fcoe.c +++ b/drivers/scsi/bnx2fc/bnx2fc_fcoe.c @@ -22,7 +22,7 @@ DEFINE_PER_CPU(struct bnx2fc_percpu_s, bnx2fc_percpu); #define DRV_MODULE_NAME "bnx2fc" #define DRV_MODULE_VERSION BNX2FC_VERSION -#define DRV_MODULE_RELDATE "Jan 22, 2011" +#define DRV_MODULE_RELDATE "Apr 24, 2012" static char version[] __devinitdata = @@ -54,6 +54,7 @@ static struct cnic_ulp_ops bnx2fc_cnic_cb; static struct libfc_function_template bnx2fc_libfc_fcn_templ; static struct scsi_host_template bnx2fc_shost_template; static struct fc_function_template bnx2fc_transport_function; +static struct fcoe_sysfs_function_template bnx2fc_fcoe_sysfs_templ; static struct fc_function_template bnx2fc_vport_xport_function; static int bnx2fc_create(struct net_device *netdev, enum fip_state fip_mode); static void __bnx2fc_destroy(struct bnx2fc_interface *interface); @@ -88,6 +89,7 @@ static void bnx2fc_port_shutdown(struct fc_lport *lport); static void bnx2fc_stop(struct bnx2fc_interface *interface); static int __init bnx2fc_mod_init(void); static void __exit bnx2fc_mod_exit(void); +static void bnx2fc_ctlr_get_lesb(struct fcoe_ctlr_device *ctlr_dev); unsigned int bnx2fc_debug_level; module_param_named(debug_logging, bnx2fc_debug_level, int, S_IRUGO|S_IWUSR); @@ -118,6 +120,41 @@ static void bnx2fc_get_lesb(struct fc_lport *lport, __fcoe_get_lesb(lport, fc_lesb, netdev); } +static void bnx2fc_ctlr_get_lesb(struct fcoe_ctlr_device *ctlr_dev) +{ + struct fcoe_ctlr *fip = fcoe_ctlr_device_priv(ctlr_dev); + struct net_device *netdev = bnx2fc_netdev(fip->lp); + struct fcoe_fc_els_lesb *fcoe_lesb; + struct fc_els_lesb fc_lesb; + + __fcoe_get_lesb(fip->lp, &fc_lesb, netdev); + fcoe_lesb = (struct fcoe_fc_els_lesb *)(&fc_lesb); + + ctlr_dev->lesb.lesb_link_fail = + ntohl(fcoe_lesb->lesb_link_fail); + ctlr_dev->lesb.lesb_vlink_fail = + ntohl(fcoe_lesb->lesb_vlink_fail); + ctlr_dev->lesb.lesb_miss_fka = + ntohl(fcoe_lesb->lesb_miss_fka); + ctlr_dev->lesb.lesb_symb_err = + ntohl(fcoe_lesb->lesb_symb_err); + ctlr_dev->lesb.lesb_err_block = + ntohl(fcoe_lesb->lesb_err_block); + ctlr_dev->lesb.lesb_fcs_error = + ntohl(fcoe_lesb->lesb_fcs_error); +} +EXPORT_SYMBOL(bnx2fc_ctlr_get_lesb); + +static void bnx2fc_fcf_get_vlan_id(struct fcoe_fcf_device *fcf_dev) +{ + struct fcoe_ctlr_device *ctlr_dev = + fcoe_fcf_dev_to_ctlr_dev(fcf_dev); + struct fcoe_ctlr *ctlr = fcoe_ctlr_device_priv(ctlr_dev); + struct bnx2fc_interface *fcoe = fcoe_ctlr_priv(ctlr); + + fcf_dev->vlan_id = fcoe->vlan_id; +} + static void bnx2fc_clean_rx_queue(struct fc_lport *lp) { struct fcoe_percpu_s *bg; @@ -244,6 +281,7 @@ static int bnx2fc_xmit(struct fc_lport *lport, struct fc_frame *fp) struct sk_buff *skb; struct fc_frame_header *fh; struct bnx2fc_interface *interface; + struct fcoe_ctlr *ctlr; struct bnx2fc_hba *hba; struct fcoe_port *port; struct fcoe_hdr *hp; @@ -256,6 +294,7 @@ static int bnx2fc_xmit(struct fc_lport *lport, struct fc_frame *fp) port = (struct fcoe_port *)lport_priv(lport); interface = port->priv; + ctlr = bnx2fc_to_ctlr(interface); hba = interface->hba; fh = fc_frame_header_get(fp); @@ -268,12 +307,12 @@ static int bnx2fc_xmit(struct fc_lport *lport, struct fc_frame *fp) } if (unlikely(fh->fh_r_ctl == FC_RCTL_ELS_REQ)) { - if (!interface->ctlr.sel_fcf) { + if (!ctlr->sel_fcf) { BNX2FC_HBA_DBG(lport, "FCF not selected yet!\n"); kfree_skb(skb); return -EINVAL; } - if (fcoe_ctlr_els_send(&interface->ctlr, lport, skb)) + if (fcoe_ctlr_els_send(ctlr, lport, skb)) return 0; } @@ -346,14 +385,14 @@ static int bnx2fc_xmit(struct fc_lport *lport, struct fc_frame *fp) /* fill up mac and fcoe headers */ eh = eth_hdr(skb); eh->h_proto = htons(ETH_P_FCOE); - if (interface->ctlr.map_dest) + if (ctlr->map_dest) fc_fcoe_set_mac(eh->h_dest, fh->fh_d_id); else /* insert GW address */ - memcpy(eh->h_dest, interface->ctlr.dest_addr, ETH_ALEN); + memcpy(eh->h_dest, ctlr->dest_addr, ETH_ALEN); - if (unlikely(interface->ctlr.flogi_oxid != FC_XID_UNKNOWN)) - memcpy(eh->h_source, interface->ctlr.ctl_src_addr, ETH_ALEN); + if (unlikely(ctlr->flogi_oxid != FC_XID_UNKNOWN)) + memcpy(eh->h_source, ctlr->ctl_src_addr, ETH_ALEN); else memcpy(eh->h_source, port->data_src_addr, ETH_ALEN); @@ -403,6 +442,7 @@ static int bnx2fc_rcv(struct sk_buff *skb, struct net_device *dev, { struct fc_lport *lport; struct bnx2fc_interface *interface; + struct fcoe_ctlr *ctlr; struct fc_frame_header *fh; struct fcoe_rcv_info *fr; struct fcoe_percpu_s *bg; @@ -410,7 +450,8 @@ static int bnx2fc_rcv(struct sk_buff *skb, struct net_device *dev, interface = container_of(ptype, struct bnx2fc_interface, fcoe_packet_type); - lport = interface->ctlr.lp; + ctlr = bnx2fc_to_ctlr(interface); + lport = ctlr->lp; if (unlikely(lport == NULL)) { printk(KERN_ERR PFX "bnx2fc_rcv: lport is NULL\n"); @@ -758,11 +799,13 @@ static int bnx2fc_net_config(struct fc_lport *lport, struct net_device *netdev) { struct bnx2fc_hba *hba; struct bnx2fc_interface *interface; + struct fcoe_ctlr *ctlr; struct fcoe_port *port; u64 wwnn, wwpn; port = lport_priv(lport); interface = port->priv; + ctlr = bnx2fc_to_ctlr(interface); hba = interface->hba; /* require support for get_pauseparam ethtool op. */ @@ -781,13 +824,13 @@ static int bnx2fc_net_config(struct fc_lport *lport, struct net_device *netdev) if (!lport->vport) { if (fcoe_get_wwn(netdev, &wwnn, NETDEV_FCOE_WWNN)) - wwnn = fcoe_wwn_from_mac(interface->ctlr.ctl_src_addr, + wwnn = fcoe_wwn_from_mac(ctlr->ctl_src_addr, 1, 0); BNX2FC_HBA_DBG(lport, "WWNN = 0x%llx\n", wwnn); fc_set_wwnn(lport, wwnn); if (fcoe_get_wwn(netdev, &wwpn, NETDEV_FCOE_WWPN)) - wwpn = fcoe_wwn_from_mac(interface->ctlr.ctl_src_addr, + wwpn = fcoe_wwn_from_mac(ctlr->ctl_src_addr, 2, 0); BNX2FC_HBA_DBG(lport, "WWPN = 0x%llx\n", wwpn); @@ -824,6 +867,7 @@ static void bnx2fc_indicate_netevent(void *context, unsigned long event, struct fc_lport *lport; struct fc_lport *vport; struct bnx2fc_interface *interface, *tmp; + struct fcoe_ctlr *ctlr; int wait_for_upload = 0; u32 link_possible = 1; @@ -874,7 +918,8 @@ static void bnx2fc_indicate_netevent(void *context, unsigned long event, if (interface->hba != hba) continue; - lport = interface->ctlr.lp; + ctlr = bnx2fc_to_ctlr(interface); + lport = ctlr->lp; BNX2FC_HBA_DBG(lport, "netevent handler - event=%s %ld\n", interface->netdev->name, event); @@ -889,8 +934,8 @@ static void bnx2fc_indicate_netevent(void *context, unsigned long event, * on a stale vlan */ if (interface->enabled) - fcoe_ctlr_link_up(&interface->ctlr); - } else if (fcoe_ctlr_link_down(&interface->ctlr)) { + fcoe_ctlr_link_up(ctlr); + } else if (fcoe_ctlr_link_down(ctlr)) { mutex_lock(&lport->lp_mutex); list_for_each_entry(vport, &lport->vports, list) fc_host_port_type(vport->host) = @@ -995,9 +1040,11 @@ static int bnx2fc_fip_recv(struct sk_buff *skb, struct net_device *dev, struct net_device *orig_dev) { struct bnx2fc_interface *interface; + struct fcoe_ctlr *ctlr; interface = container_of(ptype, struct bnx2fc_interface, fip_packet_type); - fcoe_ctlr_recv(&interface->ctlr, skb); + ctlr = bnx2fc_to_ctlr(interface); + fcoe_ctlr_recv(ctlr, skb); return 0; } @@ -1155,6 +1202,7 @@ static int bnx2fc_interface_setup(struct bnx2fc_interface *interface) { struct net_device *netdev = interface->netdev; struct net_device *physdev = interface->hba->phys_dev; + struct fcoe_ctlr *ctlr = bnx2fc_to_ctlr(interface); struct netdev_hw_addr *ha; int sel_san_mac = 0; @@ -1169,7 +1217,7 @@ static int bnx2fc_interface_setup(struct bnx2fc_interface *interface) if ((ha->type == NETDEV_HW_ADDR_T_SAN) && (is_valid_ether_addr(ha->addr))) { - memcpy(interface->ctlr.ctl_src_addr, ha->addr, + memcpy(ctlr->ctl_src_addr, ha->addr, ETH_ALEN); sel_san_mac = 1; BNX2FC_MISC_DBG("Found SAN MAC\n"); @@ -1224,19 +1272,23 @@ static void bnx2fc_release_transport(void) static void bnx2fc_interface_release(struct kref *kref) { + struct fcoe_ctlr_device *ctlr_dev; struct bnx2fc_interface *interface; + struct fcoe_ctlr *ctlr; struct net_device *netdev; interface = container_of(kref, struct bnx2fc_interface, kref); BNX2FC_MISC_DBG("Interface is being released\n"); + ctlr = bnx2fc_to_ctlr(interface); + ctlr_dev = fcoe_ctlr_to_ctlr_dev(ctlr); netdev = interface->netdev; /* tear-down FIP controller */ if (test_and_clear_bit(BNX2FC_CTLR_INIT_DONE, &interface->if_flags)) - fcoe_ctlr_destroy(&interface->ctlr); + fcoe_ctlr_destroy(ctlr); - kfree(interface); + fcoe_ctlr_device_delete(ctlr_dev); dev_put(netdev); module_put(THIS_MODULE); @@ -1329,33 +1381,40 @@ struct bnx2fc_interface *bnx2fc_interface_create(struct bnx2fc_hba *hba, struct net_device *netdev, enum fip_state fip_mode) { + struct fcoe_ctlr_device *ctlr_dev; struct bnx2fc_interface *interface; + struct fcoe_ctlr *ctlr; + int size; int rc = 0; - interface = kzalloc(sizeof(*interface), GFP_KERNEL); - if (!interface) { + size = (sizeof(*interface) + sizeof(struct fcoe_ctlr)); + ctlr_dev = fcoe_ctlr_device_add(&netdev->dev, &bnx2fc_fcoe_sysfs_templ, + size); + if (!ctlr_dev) { printk(KERN_ERR PFX "Unable to allocate interface structure\n"); return NULL; } + ctlr = fcoe_ctlr_device_priv(ctlr_dev); + interface = fcoe_ctlr_priv(ctlr); dev_hold(netdev); kref_init(&interface->kref); interface->hba = hba; interface->netdev = netdev; /* Initialize FIP */ - fcoe_ctlr_init(&interface->ctlr, fip_mode); - interface->ctlr.send = bnx2fc_fip_send; - interface->ctlr.update_mac = bnx2fc_update_src_mac; - interface->ctlr.get_src_addr = bnx2fc_get_src_mac; + fcoe_ctlr_init(ctlr, fip_mode); + ctlr->send = bnx2fc_fip_send; + ctlr->update_mac = bnx2fc_update_src_mac; + ctlr->get_src_addr = bnx2fc_get_src_mac; set_bit(BNX2FC_CTLR_INIT_DONE, &interface->if_flags); rc = bnx2fc_interface_setup(interface); if (!rc) return interface; - fcoe_ctlr_destroy(&interface->ctlr); + fcoe_ctlr_destroy(ctlr); dev_put(netdev); - kfree(interface); + fcoe_ctlr_device_delete(ctlr_dev); return NULL; } @@ -1373,6 +1432,7 @@ struct bnx2fc_interface *bnx2fc_interface_create(struct bnx2fc_hba *hba, static struct fc_lport *bnx2fc_if_create(struct bnx2fc_interface *interface, struct device *parent, int npiv) { + struct fcoe_ctlr *ctlr = bnx2fc_to_ctlr(interface); struct fc_lport *lport, *n_port; struct fcoe_port *port; struct Scsi_Host *shost; @@ -1383,7 +1443,7 @@ static struct fc_lport *bnx2fc_if_create(struct bnx2fc_interface *interface, blport = kzalloc(sizeof(struct bnx2fc_lport), GFP_KERNEL); if (!blport) { - BNX2FC_HBA_DBG(interface->ctlr.lp, "Unable to alloc blport\n"); + BNX2FC_HBA_DBG(ctlr->lp, "Unable to alloc blport\n"); return NULL; } @@ -1479,7 +1539,8 @@ static void bnx2fc_net_cleanup(struct bnx2fc_interface *interface) static void bnx2fc_interface_cleanup(struct bnx2fc_interface *interface) { - struct fc_lport *lport = interface->ctlr.lp; + struct fcoe_ctlr *ctlr = bnx2fc_to_ctlr(interface); + struct fc_lport *lport = ctlr->lp; struct fcoe_port *port = lport_priv(lport); struct bnx2fc_hba *hba = interface->hba; @@ -1519,7 +1580,8 @@ static void bnx2fc_if_destroy(struct fc_lport *lport) static void __bnx2fc_destroy(struct bnx2fc_interface *interface) { - struct fc_lport *lport = interface->ctlr.lp; + struct fcoe_ctlr *ctlr = bnx2fc_to_ctlr(interface); + struct fc_lport *lport = ctlr->lp; struct fcoe_port *port = lport_priv(lport); bnx2fc_interface_cleanup(interface); @@ -1543,13 +1605,15 @@ static int bnx2fc_destroy(struct net_device *netdev) { struct bnx2fc_interface *interface = NULL; struct workqueue_struct *timer_work_queue; + struct fcoe_ctlr *ctlr; int rc = 0; rtnl_lock(); mutex_lock(&bnx2fc_dev_lock); interface = bnx2fc_interface_lookup(netdev); - if (!interface || !interface->ctlr.lp) { + ctlr = bnx2fc_to_ctlr(interface); + if (!interface || !ctlr->lp) { rc = -ENODEV; printk(KERN_ERR PFX "bnx2fc_destroy: interface or lport not found\n"); goto netdev_err; @@ -1646,6 +1710,7 @@ static void bnx2fc_ulp_start(void *handle) { struct bnx2fc_hba *hba = handle; struct bnx2fc_interface *interface; + struct fcoe_ctlr *ctlr; struct fc_lport *lport; mutex_lock(&bnx2fc_dev_lock); @@ -1657,7 +1722,8 @@ static void bnx2fc_ulp_start(void *handle) list_for_each_entry(interface, &if_list, list) { if (interface->hba == hba) { - lport = interface->ctlr.lp; + ctlr = bnx2fc_to_ctlr(interface); + lport = ctlr->lp; /* Kick off Fabric discovery*/ printk(KERN_ERR PFX "ulp_init: start discovery\n"); lport->tt.frame_send = bnx2fc_xmit; @@ -1677,13 +1743,14 @@ static void bnx2fc_port_shutdown(struct fc_lport *lport) static void bnx2fc_stop(struct bnx2fc_interface *interface) { + struct fcoe_ctlr *ctlr = bnx2fc_to_ctlr(interface); struct fc_lport *lport; struct fc_lport *vport; if (!test_bit(BNX2FC_FLAG_FW_INIT_DONE, &interface->hba->flags)) return; - lport = interface->ctlr.lp; + lport = ctlr->lp; bnx2fc_port_shutdown(lport); mutex_lock(&lport->lp_mutex); @@ -1692,7 +1759,7 @@ static void bnx2fc_stop(struct bnx2fc_interface *interface) FC_PORTTYPE_UNKNOWN; mutex_unlock(&lport->lp_mutex); fc_host_port_type(lport->host) = FC_PORTTYPE_UNKNOWN; - fcoe_ctlr_link_down(&interface->ctlr); + fcoe_ctlr_link_down(ctlr); fcoe_clean_pending_queue(lport); } @@ -1804,6 +1871,7 @@ exit: static void bnx2fc_start_disc(struct bnx2fc_interface *interface) { + struct fcoe_ctlr *ctlr = bnx2fc_to_ctlr(interface); struct fc_lport *lport; int wait_cnt = 0; @@ -1814,18 +1882,18 @@ static void bnx2fc_start_disc(struct bnx2fc_interface *interface) return; } - lport = interface->ctlr.lp; + lport = ctlr->lp; BNX2FC_HBA_DBG(lport, "calling fc_fabric_login\n"); if (!bnx2fc_link_ok(lport) && interface->enabled) { BNX2FC_HBA_DBG(lport, "ctlr_link_up\n"); - fcoe_ctlr_link_up(&interface->ctlr); + fcoe_ctlr_link_up(ctlr); fc_host_port_type(lport->host) = FC_PORTTYPE_NPORT; set_bit(ADAPTER_STATE_READY, &interface->hba->adapter_state); } /* wait for the FCF to be selected before issuing FLOGI */ - while (!interface->ctlr.sel_fcf) { + while (!ctlr->sel_fcf) { msleep(250); /* give up after 3 secs */ if (++wait_cnt > 12) @@ -1889,19 +1957,21 @@ static void bnx2fc_ulp_init(struct cnic_dev *dev) static int bnx2fc_disable(struct net_device *netdev) { struct bnx2fc_interface *interface; + struct fcoe_ctlr *ctlr; int rc = 0; rtnl_lock(); mutex_lock(&bnx2fc_dev_lock); interface = bnx2fc_interface_lookup(netdev); - if (!interface || !interface->ctlr.lp) { + ctlr = bnx2fc_to_ctlr(interface); + if (!interface || !ctlr->lp) { rc = -ENODEV; printk(KERN_ERR PFX "bnx2fc_disable: interface or lport not found\n"); } else { interface->enabled = false; - fcoe_ctlr_link_down(&interface->ctlr); - fcoe_clean_pending_queue(interface->ctlr.lp); + fcoe_ctlr_link_down(ctlr); + fcoe_clean_pending_queue(ctlr->lp); } mutex_unlock(&bnx2fc_dev_lock); @@ -1913,17 +1983,19 @@ static int bnx2fc_disable(struct net_device *netdev) static int bnx2fc_enable(struct net_device *netdev) { struct bnx2fc_interface *interface; + struct fcoe_ctlr *ctlr; int rc = 0; rtnl_lock(); mutex_lock(&bnx2fc_dev_lock); interface = bnx2fc_interface_lookup(netdev); - if (!interface || !interface->ctlr.lp) { + ctlr = bnx2fc_to_ctlr(interface); + if (!interface || !ctlr->lp) { rc = -ENODEV; printk(KERN_ERR PFX "bnx2fc_enable: interface or lport not found\n"); - } else if (!bnx2fc_link_ok(interface->ctlr.lp)) { - fcoe_ctlr_link_up(&interface->ctlr); + } else if (!bnx2fc_link_ok(ctlr->lp)) { + fcoe_ctlr_link_up(ctlr); interface->enabled = true; } @@ -1944,6 +2016,7 @@ static int bnx2fc_enable(struct net_device *netdev) */ static int bnx2fc_create(struct net_device *netdev, enum fip_state fip_mode) { + struct fcoe_ctlr *ctlr; struct bnx2fc_interface *interface; struct bnx2fc_hba *hba; struct net_device *phys_dev; @@ -2010,6 +2083,7 @@ static int bnx2fc_create(struct net_device *netdev, enum fip_state fip_mode) goto ifput_err; } + ctlr = bnx2fc_to_ctlr(interface); interface->vlan_id = vlan_id; interface->vlan_enabled = 1; @@ -2035,10 +2109,10 @@ static int bnx2fc_create(struct net_device *netdev, enum fip_state fip_mode) lport->boot_time = jiffies; /* Make this master N_port */ - interface->ctlr.lp = lport; + ctlr->lp = lport; if (!bnx2fc_link_ok(lport)) { - fcoe_ctlr_link_up(&interface->ctlr); + fcoe_ctlr_link_up(ctlr); fc_host_port_type(lport->host) = FC_PORTTYPE_NPORT; set_bit(ADAPTER_STATE_READY, &interface->hba->adapter_state); } @@ -2439,6 +2513,19 @@ static void __exit bnx2fc_mod_exit(void) module_init(bnx2fc_mod_init); module_exit(bnx2fc_mod_exit); +static struct fcoe_sysfs_function_template bnx2fc_fcoe_sysfs_templ = { + .get_fcoe_ctlr_mode = fcoe_ctlr_get_fip_mode, + .get_fcoe_ctlr_link_fail = bnx2fc_ctlr_get_lesb, + .get_fcoe_ctlr_vlink_fail = bnx2fc_ctlr_get_lesb, + .get_fcoe_ctlr_miss_fka = bnx2fc_ctlr_get_lesb, + .get_fcoe_ctlr_symb_err = bnx2fc_ctlr_get_lesb, + .get_fcoe_ctlr_err_block = bnx2fc_ctlr_get_lesb, + .get_fcoe_ctlr_fcs_error = bnx2fc_ctlr_get_lesb, + + .get_fcoe_fcf_selected = fcoe_fcf_get_selected, + .get_fcoe_fcf_vlan_id = bnx2fc_fcf_get_vlan_id, +}; + static struct fc_function_template bnx2fc_transport_function = { .show_host_node_name = 1, .show_host_port_name = 1, diff --git a/drivers/scsi/bnx2fc/bnx2fc_hwi.c b/drivers/scsi/bnx2fc/bnx2fc_hwi.c index afd570962b8c..2ca6bfe4ce5e 100644 --- a/drivers/scsi/bnx2fc/bnx2fc_hwi.c +++ b/drivers/scsi/bnx2fc/bnx2fc_hwi.c @@ -167,6 +167,7 @@ int bnx2fc_send_session_ofld_req(struct fcoe_port *port, { struct fc_lport *lport = port->lport; struct bnx2fc_interface *interface = port->priv; + struct fcoe_ctlr *ctlr = bnx2fc_to_ctlr(interface); struct bnx2fc_hba *hba = interface->hba; struct kwqe *kwqe_arr[4]; struct fcoe_kwqe_conn_offload1 ofld_req1; @@ -314,13 +315,13 @@ int bnx2fc_send_session_ofld_req(struct fcoe_port *port, ofld_req4.src_mac_addr_mid[1] = port->data_src_addr[2]; ofld_req4.src_mac_addr_hi[0] = port->data_src_addr[1]; ofld_req4.src_mac_addr_hi[1] = port->data_src_addr[0]; - ofld_req4.dst_mac_addr_lo[0] = interface->ctlr.dest_addr[5]; + ofld_req4.dst_mac_addr_lo[0] = ctlr->dest_addr[5]; /* fcf mac */ - ofld_req4.dst_mac_addr_lo[1] = interface->ctlr.dest_addr[4]; - ofld_req4.dst_mac_addr_mid[0] = interface->ctlr.dest_addr[3]; - ofld_req4.dst_mac_addr_mid[1] = interface->ctlr.dest_addr[2]; - ofld_req4.dst_mac_addr_hi[0] = interface->ctlr.dest_addr[1]; - ofld_req4.dst_mac_addr_hi[1] = interface->ctlr.dest_addr[0]; + ofld_req4.dst_mac_addr_lo[1] = ctlr->dest_addr[4]; + ofld_req4.dst_mac_addr_mid[0] = ctlr->dest_addr[3]; + ofld_req4.dst_mac_addr_mid[1] = ctlr->dest_addr[2]; + ofld_req4.dst_mac_addr_hi[0] = ctlr->dest_addr[1]; + ofld_req4.dst_mac_addr_hi[1] = ctlr->dest_addr[0]; ofld_req4.lcq_addr_lo = (u32) tgt->lcq_dma; ofld_req4.lcq_addr_hi = (u32)((u64) tgt->lcq_dma >> 32); @@ -351,6 +352,7 @@ static int bnx2fc_send_session_enable_req(struct fcoe_port *port, { struct kwqe *kwqe_arr[2]; struct bnx2fc_interface *interface = port->priv; + struct fcoe_ctlr *ctlr = bnx2fc_to_ctlr(interface); struct bnx2fc_hba *hba = interface->hba; struct fcoe_kwqe_conn_enable_disable enbl_req; struct fc_lport *lport = port->lport; @@ -374,12 +376,12 @@ static int bnx2fc_send_session_enable_req(struct fcoe_port *port, enbl_req.src_mac_addr_hi[1] = port->data_src_addr[0]; memcpy(tgt->src_addr, port->data_src_addr, ETH_ALEN); - enbl_req.dst_mac_addr_lo[0] = interface->ctlr.dest_addr[5]; - enbl_req.dst_mac_addr_lo[1] = interface->ctlr.dest_addr[4]; - enbl_req.dst_mac_addr_mid[0] = interface->ctlr.dest_addr[3]; - enbl_req.dst_mac_addr_mid[1] = interface->ctlr.dest_addr[2]; - enbl_req.dst_mac_addr_hi[0] = interface->ctlr.dest_addr[1]; - enbl_req.dst_mac_addr_hi[1] = interface->ctlr.dest_addr[0]; + enbl_req.dst_mac_addr_lo[0] = ctlr->dest_addr[5]; + enbl_req.dst_mac_addr_lo[1] = ctlr->dest_addr[4]; + enbl_req.dst_mac_addr_mid[0] = ctlr->dest_addr[3]; + enbl_req.dst_mac_addr_mid[1] = ctlr->dest_addr[2]; + enbl_req.dst_mac_addr_hi[0] = ctlr->dest_addr[1]; + enbl_req.dst_mac_addr_hi[1] = ctlr->dest_addr[0]; port_id = fc_host_port_id(lport->host); if (port_id != tgt->sid) { @@ -419,6 +421,7 @@ int bnx2fc_send_session_disable_req(struct fcoe_port *port, struct bnx2fc_rport *tgt) { struct bnx2fc_interface *interface = port->priv; + struct fcoe_ctlr *ctlr = bnx2fc_to_ctlr(interface); struct bnx2fc_hba *hba = interface->hba; struct fcoe_kwqe_conn_enable_disable disable_req; struct kwqe *kwqe_arr[2]; @@ -440,12 +443,12 @@ int bnx2fc_send_session_disable_req(struct fcoe_port *port, disable_req.src_mac_addr_hi[0] = tgt->src_addr[1]; disable_req.src_mac_addr_hi[1] = tgt->src_addr[0]; - disable_req.dst_mac_addr_lo[0] = interface->ctlr.dest_addr[5]; - disable_req.dst_mac_addr_lo[1] = interface->ctlr.dest_addr[4]; - disable_req.dst_mac_addr_mid[0] = interface->ctlr.dest_addr[3]; - disable_req.dst_mac_addr_mid[1] = interface->ctlr.dest_addr[2]; - disable_req.dst_mac_addr_hi[0] = interface->ctlr.dest_addr[1]; - disable_req.dst_mac_addr_hi[1] = interface->ctlr.dest_addr[0]; + disable_req.dst_mac_addr_lo[0] = ctlr->dest_addr[5]; + disable_req.dst_mac_addr_lo[1] = ctlr->dest_addr[4]; + disable_req.dst_mac_addr_mid[0] = ctlr->dest_addr[3]; + disable_req.dst_mac_addr_mid[1] = ctlr->dest_addr[2]; + disable_req.dst_mac_addr_hi[0] = ctlr->dest_addr[1]; + disable_req.dst_mac_addr_hi[1] = ctlr->dest_addr[0]; port_id = tgt->sid; disable_req.s_id[0] = (port_id & 0x000000FF); diff --git a/drivers/scsi/bnx2fc/bnx2fc_io.c b/drivers/scsi/bnx2fc/bnx2fc_io.c index e897ce975bb8..4f7453b9e41e 100644 --- a/drivers/scsi/bnx2fc/bnx2fc_io.c +++ b/drivers/scsi/bnx2fc/bnx2fc_io.c @@ -810,8 +810,22 @@ retry_tmf: spin_lock_bh(&tgt->tgt_lock); io_req->wait_for_comp = 0; - if (!(test_bit(BNX2FC_FLAG_TM_COMPL, &io_req->req_flags))) + if (!(test_bit(BNX2FC_FLAG_TM_COMPL, &io_req->req_flags))) { set_bit(BNX2FC_FLAG_TM_TIMEOUT, &io_req->req_flags); + if (io_req->on_tmf_queue) { + list_del_init(&io_req->link); + io_req->on_tmf_queue = 0; + } + io_req->wait_for_comp = 1; + bnx2fc_initiate_cleanup(io_req); + spin_unlock_bh(&tgt->tgt_lock); + rc = wait_for_completion_timeout(&io_req->tm_done, + BNX2FC_FW_TIMEOUT); + spin_lock_bh(&tgt->tgt_lock); + io_req->wait_for_comp = 0; + if (!rc) + kref_put(&io_req->refcount, bnx2fc_cmd_release); + } spin_unlock_bh(&tgt->tgt_lock); @@ -1089,6 +1103,48 @@ int bnx2fc_eh_device_reset(struct scsi_cmnd *sc_cmd) return bnx2fc_initiate_tmf(sc_cmd, FCP_TMF_LUN_RESET); } +int bnx2fc_expl_logo(struct fc_lport *lport, struct bnx2fc_cmd *io_req) +{ + struct bnx2fc_rport *tgt = io_req->tgt; + struct fc_rport_priv *rdata = tgt->rdata; + int logo_issued; + int rc = SUCCESS; + int wait_cnt = 0; + + BNX2FC_IO_DBG(io_req, "Expl logo - tgt flags = 0x%lx\n", + tgt->flags); + logo_issued = test_and_set_bit(BNX2FC_FLAG_EXPL_LOGO, + &tgt->flags); + io_req->wait_for_comp = 1; + bnx2fc_initiate_cleanup(io_req); + + spin_unlock_bh(&tgt->tgt_lock); + + wait_for_completion(&io_req->tm_done); + + io_req->wait_for_comp = 0; + /* + * release the reference taken in eh_abort to allow the + * target to re-login after flushing IOs + */ + kref_put(&io_req->refcount, bnx2fc_cmd_release); + + if (!logo_issued) { + clear_bit(BNX2FC_FLAG_SESSION_READY, &tgt->flags); + mutex_lock(&lport->disc.disc_mutex); + lport->tt.rport_logoff(rdata); + mutex_unlock(&lport->disc.disc_mutex); + do { + msleep(BNX2FC_RELOGIN_WAIT_TIME); + if (wait_cnt++ > BNX2FC_RELOGIN_WAIT_CNT) { + rc = FAILED; + break; + } + } while (!test_bit(BNX2FC_FLAG_SESSION_READY, &tgt->flags)); + } + spin_lock_bh(&tgt->tgt_lock); + return rc; +} /** * bnx2fc_eh_abort - eh_abort_handler api to abort an outstanding * SCSI command @@ -1103,10 +1159,7 @@ int bnx2fc_eh_abort(struct scsi_cmnd *sc_cmd) struct fc_rport_libfc_priv *rp = rport->dd_data; struct bnx2fc_cmd *io_req; struct fc_lport *lport; - struct fc_rport_priv *rdata; struct bnx2fc_rport *tgt; - int logo_issued; - int wait_cnt = 0; int rc = FAILED; @@ -1183,58 +1236,31 @@ int bnx2fc_eh_abort(struct scsi_cmnd *sc_cmd) list_add_tail(&io_req->link, &tgt->io_retire_queue); init_completion(&io_req->tm_done); - io_req->wait_for_comp = 1; - if (!test_and_set_bit(BNX2FC_FLAG_ISSUE_ABTS, &io_req->req_flags)) { - /* Cancel the current timer running on this io_req */ - if (cancel_delayed_work(&io_req->timeout_work)) - kref_put(&io_req->refcount, - bnx2fc_cmd_release); /* drop timer hold */ - set_bit(BNX2FC_FLAG_EH_ABORT, &io_req->req_flags); - rc = bnx2fc_initiate_abts(io_req); - } else { + if (test_and_set_bit(BNX2FC_FLAG_ISSUE_ABTS, &io_req->req_flags)) { printk(KERN_ERR PFX "eh_abort: io_req (xid = 0x%x) " "already in abts processing\n", io_req->xid); if (cancel_delayed_work(&io_req->timeout_work)) kref_put(&io_req->refcount, bnx2fc_cmd_release); /* drop timer hold */ - bnx2fc_initiate_cleanup(io_req); + rc = bnx2fc_expl_logo(lport, io_req); + goto out; + } + /* Cancel the current timer running on this io_req */ + if (cancel_delayed_work(&io_req->timeout_work)) + kref_put(&io_req->refcount, + bnx2fc_cmd_release); /* drop timer hold */ + set_bit(BNX2FC_FLAG_EH_ABORT, &io_req->req_flags); + io_req->wait_for_comp = 1; + rc = bnx2fc_initiate_abts(io_req); + if (rc == FAILED) { + bnx2fc_initiate_cleanup(io_req); spin_unlock_bh(&tgt->tgt_lock); - wait_for_completion(&io_req->tm_done); - spin_lock_bh(&tgt->tgt_lock); io_req->wait_for_comp = 0; - rdata = io_req->tgt->rdata; - logo_issued = test_and_set_bit(BNX2FC_FLAG_EXPL_LOGO, - &tgt->flags); - kref_put(&io_req->refcount, bnx2fc_cmd_release); - spin_unlock_bh(&tgt->tgt_lock); - - if (!logo_issued) { - BNX2FC_IO_DBG(io_req, "Expl logo - tgt flags = 0x%lx\n", - tgt->flags); - mutex_lock(&lport->disc.disc_mutex); - lport->tt.rport_logoff(rdata); - mutex_unlock(&lport->disc.disc_mutex); - do { - msleep(BNX2FC_RELOGIN_WAIT_TIME); - /* - * If session not recovered, let SCSI-ml - * escalate error recovery. - */ - if (wait_cnt++ > BNX2FC_RELOGIN_WAIT_CNT) - return FAILED; - } while (!test_bit(BNX2FC_FLAG_SESSION_READY, - &tgt->flags)); - } - return SUCCESS; - } - if (rc == FAILED) { - kref_put(&io_req->refcount, bnx2fc_cmd_release); - spin_unlock_bh(&tgt->tgt_lock); - return rc; + goto done; } spin_unlock_bh(&tgt->tgt_lock); @@ -1247,7 +1273,8 @@ int bnx2fc_eh_abort(struct scsi_cmnd *sc_cmd) /* Let the scsi-ml try to recover this command */ printk(KERN_ERR PFX "abort failed, xid = 0x%x\n", io_req->xid); - rc = FAILED; + rc = bnx2fc_expl_logo(lport, io_req); + goto out; } else { /* * We come here even when there was a race condition @@ -1259,9 +1286,10 @@ int bnx2fc_eh_abort(struct scsi_cmnd *sc_cmd) bnx2fc_scsi_done(io_req, DID_ABORT); kref_put(&io_req->refcount, bnx2fc_cmd_release); } - +done: /* release the reference taken in eh_abort */ kref_put(&io_req->refcount, bnx2fc_cmd_release); +out: spin_unlock_bh(&tgt->tgt_lock); return rc; } diff --git a/drivers/scsi/bnx2fc/bnx2fc_tgt.c b/drivers/scsi/bnx2fc/bnx2fc_tgt.c index c1800b531270..082a25c3117e 100644 --- a/drivers/scsi/bnx2fc/bnx2fc_tgt.c +++ b/drivers/scsi/bnx2fc/bnx2fc_tgt.c @@ -185,6 +185,16 @@ void bnx2fc_flush_active_ios(struct bnx2fc_rport *tgt) BUG_ON(rc); } + list_for_each_safe(list, tmp, &tgt->active_tm_queue) { + i++; + io_req = (struct bnx2fc_cmd *)list; + list_del_init(&io_req->link); + io_req->on_tmf_queue = 0; + BNX2FC_IO_DBG(io_req, "tm_queue cleanup\n"); + if (io_req->wait_for_comp) + complete(&io_req->tm_done); + } + list_for_each_safe(list, tmp, &tgt->els_queue) { i++; io_req = (struct bnx2fc_cmd *)list; @@ -213,8 +223,17 @@ void bnx2fc_flush_active_ios(struct bnx2fc_rport *tgt) BNX2FC_IO_DBG(io_req, "retire_queue flush\n"); - if (cancel_delayed_work(&io_req->timeout_work)) + if (cancel_delayed_work(&io_req->timeout_work)) { + if (test_and_clear_bit(BNX2FC_FLAG_EH_ABORT, + &io_req->req_flags)) { + /* Handle eh_abort timeout */ + BNX2FC_IO_DBG(io_req, "eh_abort for IO " + "in retire_q\n"); + if (io_req->wait_for_comp) + complete(&io_req->tm_done); + } kref_put(&io_req->refcount, bnx2fc_cmd_release); + } clear_bit(BNX2FC_FLAG_ISSUE_RRQ, &io_req->req_flags); } diff --git a/drivers/scsi/fcoe/Makefile b/drivers/scsi/fcoe/Makefile index f6d37d0271f7..aed0f5db3668 100644 --- a/drivers/scsi/fcoe/Makefile +++ b/drivers/scsi/fcoe/Makefile @@ -1,4 +1,4 @@ obj-$(CONFIG_FCOE) += fcoe.o obj-$(CONFIG_LIBFCOE) += libfcoe.o -libfcoe-objs := fcoe_ctlr.o fcoe_transport.o +libfcoe-objs := fcoe_ctlr.o fcoe_transport.o fcoe_sysfs.o diff --git a/drivers/scsi/fcoe/fcoe.c b/drivers/scsi/fcoe/fcoe.c index 76e3d0b5bfa6..fe30b1b65e1d 100644 --- a/drivers/scsi/fcoe/fcoe.c +++ b/drivers/scsi/fcoe/fcoe.c @@ -41,6 +41,7 @@ #include <scsi/fc/fc_encaps.h> #include <scsi/fc/fc_fip.h> +#include <scsi/fc/fc_fcoe.h> #include <scsi/libfc.h> #include <scsi/fc_frame.h> @@ -150,6 +151,21 @@ static int fcoe_vport_create(struct fc_vport *, bool disabled); static int fcoe_vport_disable(struct fc_vport *, bool disable); static void fcoe_set_vport_symbolic_name(struct fc_vport *); static void fcoe_set_port_id(struct fc_lport *, u32, struct fc_frame *); +static void fcoe_ctlr_get_lesb(struct fcoe_ctlr_device *); +static void fcoe_fcf_get_vlan_id(struct fcoe_fcf_device *); + +static struct fcoe_sysfs_function_template fcoe_sysfs_templ = { + .get_fcoe_ctlr_mode = fcoe_ctlr_get_fip_mode, + .get_fcoe_ctlr_link_fail = fcoe_ctlr_get_lesb, + .get_fcoe_ctlr_vlink_fail = fcoe_ctlr_get_lesb, + .get_fcoe_ctlr_miss_fka = fcoe_ctlr_get_lesb, + .get_fcoe_ctlr_symb_err = fcoe_ctlr_get_lesb, + .get_fcoe_ctlr_err_block = fcoe_ctlr_get_lesb, + .get_fcoe_ctlr_fcs_error = fcoe_ctlr_get_lesb, + + .get_fcoe_fcf_selected = fcoe_fcf_get_selected, + .get_fcoe_fcf_vlan_id = fcoe_fcf_get_vlan_id, +}; static struct libfc_function_template fcoe_libfc_fcn_templ = { .frame_send = fcoe_xmit, @@ -282,7 +298,7 @@ static struct scsi_host_template fcoe_shost_template = { static int fcoe_interface_setup(struct fcoe_interface *fcoe, struct net_device *netdev) { - struct fcoe_ctlr *fip = &fcoe->ctlr; + struct fcoe_ctlr *fip = fcoe_to_ctlr(fcoe); struct netdev_hw_addr *ha; struct net_device *real_dev; u8 flogi_maddr[ETH_ALEN]; @@ -366,7 +382,10 @@ static int fcoe_interface_setup(struct fcoe_interface *fcoe, static struct fcoe_interface *fcoe_interface_create(struct net_device *netdev, enum fip_state fip_mode) { + struct fcoe_ctlr_device *ctlr_dev; + struct fcoe_ctlr *ctlr; struct fcoe_interface *fcoe; + int size; int err; if (!try_module_get(THIS_MODULE)) { @@ -376,27 +395,32 @@ static struct fcoe_interface *fcoe_interface_create(struct net_device *netdev, goto out; } - fcoe = kzalloc(sizeof(*fcoe), GFP_KERNEL); - if (!fcoe) { - FCOE_NETDEV_DBG(netdev, "Could not allocate fcoe structure\n"); + size = sizeof(struct fcoe_ctlr) + sizeof(struct fcoe_interface); + ctlr_dev = fcoe_ctlr_device_add(&netdev->dev, &fcoe_sysfs_templ, + size); + if (!ctlr_dev) { + FCOE_DBG("Failed to add fcoe_ctlr_device\n"); fcoe = ERR_PTR(-ENOMEM); goto out_putmod; } + ctlr = fcoe_ctlr_device_priv(ctlr_dev); + fcoe = fcoe_ctlr_priv(ctlr); + dev_hold(netdev); /* * Initialize FIP. */ - fcoe_ctlr_init(&fcoe->ctlr, fip_mode); - fcoe->ctlr.send = fcoe_fip_send; - fcoe->ctlr.update_mac = fcoe_update_src_mac; - fcoe->ctlr.get_src_addr = fcoe_get_src_mac; + fcoe_ctlr_init(ctlr, fip_mode); + ctlr->send = fcoe_fip_send; + ctlr->update_mac = fcoe_update_src_mac; + ctlr->get_src_addr = fcoe_get_src_mac; err = fcoe_interface_setup(fcoe, netdev); if (err) { - fcoe_ctlr_destroy(&fcoe->ctlr); - kfree(fcoe); + fcoe_ctlr_destroy(ctlr); + fcoe_ctlr_device_delete(ctlr_dev); dev_put(netdev); fcoe = ERR_PTR(err); goto out_putmod; @@ -419,7 +443,7 @@ out: static void fcoe_interface_remove(struct fcoe_interface *fcoe) { struct net_device *netdev = fcoe->netdev; - struct fcoe_ctlr *fip = &fcoe->ctlr; + struct fcoe_ctlr *fip = fcoe_to_ctlr(fcoe); u8 flogi_maddr[ETH_ALEN]; const struct net_device_ops *ops; @@ -462,7 +486,8 @@ static void fcoe_interface_remove(struct fcoe_interface *fcoe) static void fcoe_interface_cleanup(struct fcoe_interface *fcoe) { struct net_device *netdev = fcoe->netdev; - struct fcoe_ctlr *fip = &fcoe->ctlr; + struct fcoe_ctlr *fip = fcoe_to_ctlr(fcoe); + struct fcoe_ctlr_device *ctlr_dev = fcoe_ctlr_to_ctlr_dev(fip); rtnl_lock(); if (!fcoe->removed) @@ -472,8 +497,8 @@ static void fcoe_interface_cleanup(struct fcoe_interface *fcoe) /* Release the self-reference taken during fcoe_interface_create() */ /* tear-down the FCoE controller */ fcoe_ctlr_destroy(fip); - scsi_host_put(fcoe->ctlr.lp->host); - kfree(fcoe); + scsi_host_put(fip->lp->host); + fcoe_ctlr_device_delete(ctlr_dev); dev_put(netdev); module_put(THIS_MODULE); } @@ -493,9 +518,11 @@ static int fcoe_fip_recv(struct sk_buff *skb, struct net_device *netdev, struct net_device *orig_dev) { struct fcoe_interface *fcoe; + struct fcoe_ctlr *ctlr; fcoe = container_of(ptype, struct fcoe_interface, fip_packet_type); - fcoe_ctlr_recv(&fcoe->ctlr, skb); + ctlr = fcoe_to_ctlr(fcoe); + fcoe_ctlr_recv(ctlr, skb); return 0; } @@ -645,11 +672,13 @@ static int fcoe_netdev_config(struct fc_lport *lport, struct net_device *netdev) u32 mfs; u64 wwnn, wwpn; struct fcoe_interface *fcoe; + struct fcoe_ctlr *ctlr; struct fcoe_port *port; /* Setup lport private data to point to fcoe softc */ port = lport_priv(lport); fcoe = port->priv; + ctlr = fcoe_to_ctlr(fcoe); /* * Determine max frame size based on underlying device and optional @@ -676,10 +705,10 @@ static int fcoe_netdev_config(struct fc_lport *lport, struct net_device *netdev) if (!lport->vport) { if (fcoe_get_wwn(netdev, &wwnn, NETDEV_FCOE_WWNN)) - wwnn = fcoe_wwn_from_mac(fcoe->ctlr.ctl_src_addr, 1, 0); + wwnn = fcoe_wwn_from_mac(ctlr->ctl_src_addr, 1, 0); fc_set_wwnn(lport, wwnn); if (fcoe_get_wwn(netdev, &wwpn, NETDEV_FCOE_WWPN)) - wwpn = fcoe_wwn_from_mac(fcoe->ctlr.ctl_src_addr, + wwpn = fcoe_wwn_from_mac(ctlr->ctl_src_addr, 2, 0); fc_set_wwpn(lport, wwpn); } @@ -1056,6 +1085,7 @@ static int fcoe_ddp_done(struct fc_lport *lport, u16 xid) static struct fc_lport *fcoe_if_create(struct fcoe_interface *fcoe, struct device *parent, int npiv) { + struct fcoe_ctlr *ctlr = fcoe_to_ctlr(fcoe); struct net_device *netdev = fcoe->netdev; struct fc_lport *lport, *n_port; struct fcoe_port *port; @@ -1119,7 +1149,7 @@ static struct fc_lport *fcoe_if_create(struct fcoe_interface *fcoe, } /* Initialize the library */ - rc = fcoe_libfc_config(lport, &fcoe->ctlr, &fcoe_libfc_fcn_templ, 1); + rc = fcoe_libfc_config(lport, ctlr, &fcoe_libfc_fcn_templ, 1); if (rc) { FCOE_NETDEV_DBG(netdev, "Could not configure libfc for the " "interface\n"); @@ -1386,6 +1416,7 @@ static int fcoe_rcv(struct sk_buff *skb, struct net_device *netdev, { struct fc_lport *lport; struct fcoe_rcv_info *fr; + struct fcoe_ctlr *ctlr; struct fcoe_interface *fcoe; struct fc_frame_header *fh; struct fcoe_percpu_s *fps; @@ -1393,7 +1424,8 @@ static int fcoe_rcv(struct sk_buff *skb, struct net_device *netdev, unsigned int cpu; fcoe = container_of(ptype, struct fcoe_interface, fcoe_packet_type); - lport = fcoe->ctlr.lp; + ctlr = fcoe_to_ctlr(fcoe); + lport = ctlr->lp; if (unlikely(!lport)) { FCOE_NETDEV_DBG(netdev, "Cannot find hba structure"); goto err2; @@ -1409,8 +1441,8 @@ static int fcoe_rcv(struct sk_buff *skb, struct net_device *netdev, eh = eth_hdr(skb); - if (is_fip_mode(&fcoe->ctlr) && - compare_ether_addr(eh->h_source, fcoe->ctlr.dest_addr)) { + if (is_fip_mode(ctlr) && + compare_ether_addr(eh->h_source, ctlr->dest_addr)) { FCOE_NETDEV_DBG(netdev, "wrong source mac address:%pM\n", eh->h_source); goto err; @@ -1544,6 +1576,7 @@ static int fcoe_xmit(struct fc_lport *lport, struct fc_frame *fp) unsigned int elen; /* eth header, may include vlan */ struct fcoe_port *port = lport_priv(lport); struct fcoe_interface *fcoe = port->priv; + struct fcoe_ctlr *ctlr = fcoe_to_ctlr(fcoe); u8 sof, eof; struct fcoe_hdr *hp; @@ -1559,7 +1592,7 @@ static int fcoe_xmit(struct fc_lport *lport, struct fc_frame *fp) } if (unlikely(fh->fh_type == FC_TYPE_ELS) && - fcoe_ctlr_els_send(&fcoe->ctlr, lport, skb)) + fcoe_ctlr_els_send(ctlr, lport, skb)) return 0; sof = fr_sof(fp); @@ -1623,12 +1656,12 @@ static int fcoe_xmit(struct fc_lport *lport, struct fc_frame *fp) /* fill up mac and fcoe headers */ eh = eth_hdr(skb); eh->h_proto = htons(ETH_P_FCOE); - memcpy(eh->h_dest, fcoe->ctlr.dest_addr, ETH_ALEN); - if (fcoe->ctlr.map_dest) + memcpy(eh->h_dest, ctlr->dest_addr, ETH_ALEN); + if (ctlr->map_dest) memcpy(eh->h_dest + 3, fh->fh_d_id, 3); - if (unlikely(fcoe->ctlr.flogi_oxid != FC_XID_UNKNOWN)) - memcpy(eh->h_source, fcoe->ctlr.ctl_src_addr, ETH_ALEN); + if (unlikely(ctlr->flogi_oxid != FC_XID_UNKNOWN)) + memcpy(eh->h_source, ctlr->ctl_src_addr, ETH_ALEN); else memcpy(eh->h_source, port->data_src_addr, ETH_ALEN); @@ -1677,6 +1710,7 @@ static void fcoe_percpu_flush_done(struct sk_buff *skb) static inline int fcoe_filter_frames(struct fc_lport *lport, struct fc_frame *fp) { + struct fcoe_ctlr *ctlr; struct fcoe_interface *fcoe; struct fc_frame_header *fh; struct sk_buff *skb = (struct sk_buff *)fp; @@ -1698,7 +1732,8 @@ static inline int fcoe_filter_frames(struct fc_lport *lport, return 0; fcoe = ((struct fcoe_port *)lport_priv(lport))->priv; - if (is_fip_mode(&fcoe->ctlr) && fc_frame_payload_op(fp) == ELS_LOGO && + ctlr = fcoe_to_ctlr(fcoe); + if (is_fip_mode(ctlr) && fc_frame_payload_op(fp) == ELS_LOGO && ntoh24(fh->fh_s_id) == FC_FID_FLOGI) { FCOE_DBG("fcoe: dropping FCoE lport LOGO in fip mode\n"); return -EINVAL; @@ -1877,6 +1912,7 @@ static int fcoe_dcb_app_notification(struct notifier_block *notifier, ulong event, void *ptr) { struct dcb_app_type *entry = ptr; + struct fcoe_ctlr *ctlr; struct fcoe_interface *fcoe; struct net_device *netdev; struct fcoe_port *port; @@ -1894,6 +1930,8 @@ static int fcoe_dcb_app_notification(struct notifier_block *notifier, if (!fcoe) return NOTIFY_OK; + ctlr = fcoe_to_ctlr(fcoe); + if (entry->dcbx & DCB_CAP_DCBX_VER_CEE) prio = ffs(entry->app.priority) - 1; else @@ -1904,10 +1942,10 @@ static int fcoe_dcb_app_notification(struct notifier_block *notifier, if (entry->app.protocol == ETH_P_FIP || entry->app.protocol == ETH_P_FCOE) - fcoe->ctlr.priority = prio; + ctlr->priority = prio; if (entry->app.protocol == ETH_P_FCOE) { - port = lport_priv(fcoe->ctlr.lp); + port = lport_priv(ctlr->lp); port->priority = prio; } @@ -1929,6 +1967,7 @@ static int fcoe_device_notification(struct notifier_block *notifier, { struct fc_lport *lport = NULL; struct net_device *netdev = ptr; + struct fcoe_ctlr *ctlr; struct fcoe_interface *fcoe; struct fcoe_port *port; struct fcoe_dev_stats *stats; @@ -1938,7 +1977,8 @@ static int fcoe_device_notification(struct notifier_block *notifier, list_for_each_entry(fcoe, &fcoe_hostlist, list) { if (fcoe->netdev == netdev) { - lport = fcoe->ctlr.lp; + ctlr = fcoe_to_ctlr(fcoe); + lport = ctlr->lp; break; } } @@ -1967,7 +2007,7 @@ static int fcoe_device_notification(struct notifier_block *notifier, break; case NETDEV_UNREGISTER: list_del(&fcoe->list); - port = lport_priv(fcoe->ctlr.lp); + port = lport_priv(ctlr->lp); queue_work(fcoe_wq, &port->destroy_work); goto out; break; @@ -1982,8 +2022,8 @@ static int fcoe_device_notification(struct notifier_block *notifier, fcoe_link_speed_update(lport); if (link_possible && !fcoe_link_ok(lport)) - fcoe_ctlr_link_up(&fcoe->ctlr); - else if (fcoe_ctlr_link_down(&fcoe->ctlr)) { + fcoe_ctlr_link_up(ctlr); + else if (fcoe_ctlr_link_down(ctlr)) { stats = per_cpu_ptr(lport->dev_stats, get_cpu()); stats->LinkFailureCount++; put_cpu(); @@ -2003,6 +2043,7 @@ out: */ static int fcoe_disable(struct net_device *netdev) { + struct fcoe_ctlr *ctlr; struct fcoe_interface *fcoe; int rc = 0; @@ -2013,8 +2054,9 @@ static int fcoe_disable(struct net_device *netdev) rtnl_unlock(); if (fcoe) { - fcoe_ctlr_link_down(&fcoe->ctlr); - fcoe_clean_pending_queue(fcoe->ctlr.lp); + ctlr = fcoe_to_ctlr(fcoe); + fcoe_ctlr_link_down(ctlr); + fcoe_clean_pending_queue(ctlr->lp); } else rc = -ENODEV; @@ -2032,6 +2074,7 @@ static int fcoe_disable(struct net_device *netdev) */ static int fcoe_enable(struct net_device *netdev) { + struct fcoe_ctlr *ctlr; struct fcoe_interface *fcoe; int rc = 0; @@ -2040,11 +2083,17 @@ static int fcoe_enable(struct net_device *netdev) fcoe = fcoe_hostlist_lookup_port(netdev); rtnl_unlock(); - if (!fcoe) + if (!fcoe) { rc = -ENODEV; - else if (!fcoe_link_ok(fcoe->ctlr.lp)) - fcoe_ctlr_link_up(&fcoe->ctlr); + goto out; + } + + ctlr = fcoe_to_ctlr(fcoe); + + if (!fcoe_link_ok(ctlr->lp)) + fcoe_ctlr_link_up(ctlr); +out: mutex_unlock(&fcoe_config_mutex); return rc; } @@ -2059,6 +2108,7 @@ static int fcoe_enable(struct net_device *netdev) */ static int fcoe_destroy(struct net_device *netdev) { + struct fcoe_ctlr *ctlr; struct fcoe_interface *fcoe; struct fc_lport *lport; struct fcoe_port *port; @@ -2071,7 +2121,8 @@ static int fcoe_destroy(struct net_device *netdev) rc = -ENODEV; goto out_nodev; } - lport = fcoe->ctlr.lp; + ctlr = fcoe_to_ctlr(fcoe); + lport = ctlr->lp; port = lport_priv(lport); list_del(&fcoe->list); queue_work(fcoe_wq, &port->destroy_work); @@ -2126,7 +2177,8 @@ static void fcoe_dcb_create(struct fcoe_interface *fcoe) int dcbx; u8 fup, up; struct net_device *netdev = fcoe->realdev; - struct fcoe_port *port = lport_priv(fcoe->ctlr.lp); + struct fcoe_ctlr *ctlr = fcoe_to_ctlr(fcoe); + struct fcoe_port *port = lport_priv(ctlr->lp); struct dcb_app app = { .priority = 0, .protocol = ETH_P_FCOE @@ -2149,7 +2201,7 @@ static void fcoe_dcb_create(struct fcoe_interface *fcoe) } port->priority = ffs(up) ? ffs(up) - 1 : 0; - fcoe->ctlr.priority = ffs(fup) ? ffs(fup) - 1 : port->priority; + ctlr->priority = ffs(fup) ? ffs(fup) - 1 : port->priority; } #endif } @@ -2166,6 +2218,8 @@ static void fcoe_dcb_create(struct fcoe_interface *fcoe) static int fcoe_create(struct net_device *netdev, enum fip_state fip_mode) { int rc = 0; + struct fcoe_ctlr_device *ctlr_dev; + struct fcoe_ctlr *ctlr; struct fcoe_interface *fcoe; struct fc_lport *lport; @@ -2184,7 +2238,9 @@ static int fcoe_create(struct net_device *netdev, enum fip_state fip_mode) goto out_nodev; } - lport = fcoe_if_create(fcoe, &netdev->dev, 0); + ctlr = fcoe_to_ctlr(fcoe); + ctlr_dev = fcoe_ctlr_to_ctlr_dev(ctlr); + lport = fcoe_if_create(fcoe, &ctlr_dev->dev, 0); if (IS_ERR(lport)) { printk(KERN_ERR "fcoe: Failed to create interface (%s)\n", netdev->name); @@ -2195,7 +2251,7 @@ static int fcoe_create(struct net_device *netdev, enum fip_state fip_mode) } /* Make this the "master" N_Port */ - fcoe->ctlr.lp = lport; + ctlr->lp = lport; /* setup DCB priority attributes. */ fcoe_dcb_create(fcoe); @@ -2208,7 +2264,7 @@ static int fcoe_create(struct net_device *netdev, enum fip_state fip_mode) fc_fabric_login(lport); if (!fcoe_link_ok(lport)) { rtnl_unlock(); - fcoe_ctlr_link_up(&fcoe->ctlr); + fcoe_ctlr_link_up(ctlr); mutex_unlock(&fcoe_config_mutex); return rc; } @@ -2320,11 +2376,12 @@ static int fcoe_reset(struct Scsi_Host *shost) struct fc_lport *lport = shost_priv(shost); struct fcoe_port *port = lport_priv(lport); struct fcoe_interface *fcoe = port->priv; + struct fcoe_ctlr *ctlr = fcoe_to_ctlr(fcoe); - fcoe_ctlr_link_down(&fcoe->ctlr); - fcoe_clean_pending_queue(fcoe->ctlr.lp); - if (!fcoe_link_ok(fcoe->ctlr.lp)) - fcoe_ctlr_link_up(&fcoe->ctlr); + fcoe_ctlr_link_down(ctlr); + fcoe_clean_pending_queue(ctlr->lp); + if (!fcoe_link_ok(ctlr->lp)) + fcoe_ctlr_link_up(ctlr); return 0; } @@ -2359,10 +2416,12 @@ fcoe_hostlist_lookup_port(const struct net_device *netdev) */ static struct fc_lport *fcoe_hostlist_lookup(const struct net_device *netdev) { + struct fcoe_ctlr *ctlr; struct fcoe_interface *fcoe; fcoe = fcoe_hostlist_lookup_port(netdev); - return (fcoe) ? fcoe->ctlr.lp : NULL; + ctlr = fcoe_to_ctlr(fcoe); + return (fcoe) ? ctlr->lp : NULL; } /** @@ -2466,6 +2525,7 @@ module_init(fcoe_init); static void __exit fcoe_exit(void) { struct fcoe_interface *fcoe, *tmp; + struct fcoe_ctlr *ctlr; struct fcoe_port *port; unsigned int cpu; @@ -2477,7 +2537,8 @@ static void __exit fcoe_exit(void) rtnl_lock(); list_for_each_entry_safe(fcoe, tmp, &fcoe_hostlist, list) { list_del(&fcoe->list); - port = lport_priv(fcoe->ctlr.lp); + ctlr = fcoe_to_ctlr(fcoe); + port = lport_priv(ctlr->lp); queue_work(fcoe_wq, &port->destroy_work); } rtnl_unlock(); @@ -2573,7 +2634,7 @@ static struct fc_seq *fcoe_elsct_send(struct fc_lport *lport, u32 did, { struct fcoe_port *port = lport_priv(lport); struct fcoe_interface *fcoe = port->priv; - struct fcoe_ctlr *fip = &fcoe->ctlr; + struct fcoe_ctlr *fip = fcoe_to_ctlr(fcoe); struct fc_frame_header *fh = fc_frame_header_get(fp); switch (op) { @@ -2730,6 +2791,40 @@ static void fcoe_get_lesb(struct fc_lport *lport, __fcoe_get_lesb(lport, fc_lesb, netdev); } +static void fcoe_ctlr_get_lesb(struct fcoe_ctlr_device *ctlr_dev) +{ + struct fcoe_ctlr *fip = fcoe_ctlr_device_priv(ctlr_dev); + struct net_device *netdev = fcoe_netdev(fip->lp); + struct fcoe_fc_els_lesb *fcoe_lesb; + struct fc_els_lesb fc_lesb; + + __fcoe_get_lesb(fip->lp, &fc_lesb, netdev); + fcoe_lesb = (struct fcoe_fc_els_lesb *)(&fc_lesb); + + ctlr_dev->lesb.lesb_link_fail = + ntohl(fcoe_lesb->lesb_link_fail); + ctlr_dev->lesb.lesb_vlink_fail = + ntohl(fcoe_lesb->lesb_vlink_fail); + ctlr_dev->lesb.lesb_miss_fka = + ntohl(fcoe_lesb->lesb_miss_fka); + ctlr_dev->lesb.lesb_symb_err = + ntohl(fcoe_lesb->lesb_symb_err); + ctlr_dev->lesb.lesb_err_block = + ntohl(fcoe_lesb->lesb_err_block); + ctlr_dev->lesb.lesb_fcs_error = + ntohl(fcoe_lesb->lesb_fcs_error); +} + +static void fcoe_fcf_get_vlan_id(struct fcoe_fcf_device *fcf_dev) +{ + struct fcoe_ctlr_device *ctlr_dev = + fcoe_fcf_dev_to_ctlr_dev(fcf_dev); + struct fcoe_ctlr *ctlr = fcoe_ctlr_device_priv(ctlr_dev); + struct fcoe_interface *fcoe = fcoe_ctlr_priv(ctlr); + + fcf_dev->vlan_id = vlan_dev_vlan_id(fcoe->netdev); +} + /** * fcoe_set_port_id() - Callback from libfc when Port_ID is set. * @lport: the local port @@ -2747,7 +2842,8 @@ static void fcoe_set_port_id(struct fc_lport *lport, { struct fcoe_port *port = lport_priv(lport); struct fcoe_interface *fcoe = port->priv; + struct fcoe_ctlr *ctlr = fcoe_to_ctlr(fcoe); if (fp && fc_frame_payload_op(fp) == ELS_FLOGI) - fcoe_ctlr_recv_flogi(&fcoe->ctlr, lport, fp); + fcoe_ctlr_recv_flogi(ctlr, lport, fp); } diff --git a/drivers/scsi/fcoe/fcoe.h b/drivers/scsi/fcoe/fcoe.h index 96ac938d39cc..a624add4f8ec 100644 --- a/drivers/scsi/fcoe/fcoe.h +++ b/drivers/scsi/fcoe/fcoe.h @@ -68,7 +68,6 @@ do { \ * @netdev: The associated net device * @fcoe_packet_type: FCoE packet type * @fip_packet_type: FIP packet type - * @ctlr: The FCoE controller (for FIP) * @oem: The offload exchange manager for all local port * instances associated with this port * @removed: Indicates fcoe interface removed from net device @@ -80,12 +79,15 @@ struct fcoe_interface { struct net_device *realdev; struct packet_type fcoe_packet_type; struct packet_type fip_packet_type; - struct fcoe_ctlr ctlr; struct fc_exch_mgr *oem; u8 removed; }; -#define fcoe_from_ctlr(fip) container_of(fip, struct fcoe_interface, ctlr) +#define fcoe_to_ctlr(x) \ + (struct fcoe_ctlr *)(((struct fcoe_ctlr *)(x)) - 1) + +#define fcoe_from_ctlr(x) \ + ((struct fcoe_interface *)((x) + 1)) /** * fcoe_netdev() - Return the net device associated with a local port diff --git a/drivers/scsi/fcoe/fcoe_ctlr.c b/drivers/scsi/fcoe/fcoe_ctlr.c index 5a4c7250aa77..d68d57241ee6 100644 --- a/drivers/scsi/fcoe/fcoe_ctlr.c +++ b/drivers/scsi/fcoe/fcoe_ctlr.c @@ -160,6 +160,76 @@ void fcoe_ctlr_init(struct fcoe_ctlr *fip, enum fip_state mode) } EXPORT_SYMBOL(fcoe_ctlr_init); +static int fcoe_sysfs_fcf_add(struct fcoe_fcf *new) +{ + struct fcoe_ctlr *fip = new->fip; + struct fcoe_ctlr_device *ctlr_dev = fcoe_ctlr_to_ctlr_dev(fip); + struct fcoe_fcf_device temp, *fcf_dev; + int rc = 0; + + LIBFCOE_FIP_DBG(fip, "New FCF fab %16.16llx mac %pM\n", + new->fabric_name, new->fcf_mac); + + mutex_lock(&ctlr_dev->lock); + + temp.fabric_name = new->fabric_name; + temp.switch_name = new->switch_name; + temp.fc_map = new->fc_map; + temp.vfid = new->vfid; + memcpy(temp.mac, new->fcf_mac, ETH_ALEN); + temp.priority = new->pri; + temp.fka_period = new->fka_period; + temp.selected = 0; /* default to unselected */ + + fcf_dev = fcoe_fcf_device_add(ctlr_dev, &temp); + if (unlikely(!fcf_dev)) { + rc = -ENOMEM; + goto out; + } + + /* + * The fcoe_sysfs layer can return a CONNECTED fcf that + * has a priv (fcf was never deleted) or a CONNECTED fcf + * that doesn't have a priv (fcf was deleted). However, + * libfcoe will always delete FCFs before trying to add + * them. This is ensured because both recv_adv and + * age_fcfs are protected by the the fcoe_ctlr's mutex. + * This means that we should never get a FCF with a + * non-NULL priv pointer. + */ + BUG_ON(fcf_dev->priv); + + fcf_dev->priv = new; + new->fcf_dev = fcf_dev; + + list_add(&new->list, &fip->fcfs); + fip->fcf_count++; + +out: + mutex_unlock(&ctlr_dev->lock); + return rc; +} + +static void fcoe_sysfs_fcf_del(struct fcoe_fcf *new) +{ + struct fcoe_ctlr *fip = new->fip; + struct fcoe_ctlr_device *ctlr_dev = fcoe_ctlr_to_ctlr_dev(fip); + struct fcoe_fcf_device *fcf_dev; + + list_del(&new->list); + fip->fcf_count--; + + mutex_lock(&ctlr_dev->lock); + + fcf_dev = fcoe_fcf_to_fcf_dev(new); + WARN_ON(!fcf_dev); + new->fcf_dev = NULL; + fcoe_fcf_device_delete(fcf_dev); + kfree(new); + + mutex_unlock(&ctlr_dev->lock); +} + /** * fcoe_ctlr_reset_fcfs() - Reset and free all FCFs for a controller * @fip: The FCoE controller whose FCFs are to be reset @@ -173,10 +243,10 @@ static void fcoe_ctlr_reset_fcfs(struct fcoe_ctlr *fip) fip->sel_fcf = NULL; list_for_each_entry_safe(fcf, next, &fip->fcfs, list) { - list_del(&fcf->list); - kfree(fcf); + fcoe_sysfs_fcf_del(fcf); } - fip->fcf_count = 0; + WARN_ON(fip->fcf_count); + fip->sel_time = 0; } @@ -717,8 +787,11 @@ static unsigned long fcoe_ctlr_age_fcfs(struct fcoe_ctlr *fip) unsigned long next_timer = jiffies + msecs_to_jiffies(FIP_VN_KA_PERIOD); unsigned long deadline; unsigned long sel_time = 0; + struct list_head del_list; struct fcoe_dev_stats *stats; + INIT_LIST_HEAD(&del_list); + stats = per_cpu_ptr(fip->lp->dev_stats, get_cpu()); list_for_each_entry_safe(fcf, next, &fip->fcfs, list) { @@ -739,10 +812,13 @@ static unsigned long fcoe_ctlr_age_fcfs(struct fcoe_ctlr *fip) if (time_after_eq(jiffies, deadline)) { if (fip->sel_fcf == fcf) fip->sel_fcf = NULL; + /* + * Move to delete list so we can call + * fcoe_sysfs_fcf_del (which can sleep) + * after the put_cpu(). + */ list_del(&fcf->list); - WARN_ON(!fip->fcf_count); - fip->fcf_count--; - kfree(fcf); + list_add(&fcf->list, &del_list); stats->VLinkFailureCount++; } else { if (time_after(next_timer, deadline)) @@ -753,6 +829,12 @@ static unsigned long fcoe_ctlr_age_fcfs(struct fcoe_ctlr *fip) } } put_cpu(); + + list_for_each_entry_safe(fcf, next, &del_list, list) { + /* Removes fcf from current list */ + fcoe_sysfs_fcf_del(fcf); + } + if (sel_time && !fip->sel_fcf && !fip->sel_time) { sel_time += msecs_to_jiffies(FCOE_CTLR_START_DELAY); fip->sel_time = sel_time; @@ -903,23 +985,23 @@ static void fcoe_ctlr_recv_adv(struct fcoe_ctlr *fip, struct sk_buff *skb) { struct fcoe_fcf *fcf; struct fcoe_fcf new; - struct fcoe_fcf *found; unsigned long sol_tov = msecs_to_jiffies(FCOE_CTRL_SOL_TOV); int first = 0; int mtu_valid; + int found = 0; + int rc = 0; if (fcoe_ctlr_parse_adv(fip, skb, &new)) return; mutex_lock(&fip->ctlr_mutex); first = list_empty(&fip->fcfs); - found = NULL; list_for_each_entry(fcf, &fip->fcfs, list) { if (fcf->switch_name == new.switch_name && fcf->fabric_name == new.fabric_name && fcf->fc_map == new.fc_map && compare_ether_addr(fcf->fcf_mac, new.fcf_mac) == 0) { - found = fcf; + found = 1; break; } } @@ -931,9 +1013,16 @@ static void fcoe_ctlr_recv_adv(struct fcoe_ctlr *fip, struct sk_buff *skb) if (!fcf) goto out; - fip->fcf_count++; memcpy(fcf, &new, sizeof(new)); - list_add(&fcf->list, &fip->fcfs); + fcf->fip = fip; + rc = fcoe_sysfs_fcf_add(fcf); + if (rc) { + printk(KERN_ERR "Failed to allocate sysfs instance " + "for FCF, fab %16.16llx mac %pM\n", + new.fabric_name, new.fcf_mac); + kfree(fcf); + goto out; + } } else { /* * Update the FCF's keep-alive descriptor flags. @@ -954,6 +1043,7 @@ static void fcoe_ctlr_recv_adv(struct fcoe_ctlr *fip, struct sk_buff *skb) fcf->fka_period = new.fka_period; memcpy(fcf->fcf_mac, new.fcf_mac, ETH_ALEN); } + mtu_valid = fcoe_ctlr_mtu_valid(fcf); fcf->time = jiffies; if (!found) @@ -996,6 +1086,7 @@ static void fcoe_ctlr_recv_adv(struct fcoe_ctlr *fip, struct sk_buff *skb) time_before(fip->sel_time, fip->timer.expires)) mod_timer(&fip->timer, fip->sel_time); } + out: mutex_unlock(&fip->ctlr_mutex); } @@ -2718,9 +2809,9 @@ unlock: /** * fcoe_libfc_config() - Sets up libfc related properties for local port - * @lp: The local port to configure libfc for - * @fip: The FCoE controller in use by the local port - * @tt: The libfc function template + * @lport: The local port to configure libfc for + * @fip: The FCoE controller in use by the local port + * @tt: The libfc function template * @init_fcp: If non-zero, the FCP portion of libfc should be initialized * * Returns : 0 for success @@ -2753,3 +2844,43 @@ int fcoe_libfc_config(struct fc_lport *lport, struct fcoe_ctlr *fip, return 0; } EXPORT_SYMBOL_GPL(fcoe_libfc_config); + +void fcoe_fcf_get_selected(struct fcoe_fcf_device *fcf_dev) +{ + struct fcoe_ctlr_device *ctlr_dev = fcoe_fcf_dev_to_ctlr_dev(fcf_dev); + struct fcoe_ctlr *fip = fcoe_ctlr_device_priv(ctlr_dev); + struct fcoe_fcf *fcf; + + mutex_lock(&fip->ctlr_mutex); + mutex_lock(&ctlr_dev->lock); + + fcf = fcoe_fcf_device_priv(fcf_dev); + if (fcf) + fcf_dev->selected = (fcf == fip->sel_fcf) ? 1 : 0; + else + fcf_dev->selected = 0; + + mutex_unlock(&ctlr_dev->lock); + mutex_unlock(&fip->ctlr_mutex); +} +EXPORT_SYMBOL(fcoe_fcf_get_selected); + +void fcoe_ctlr_get_fip_mode(struct fcoe_ctlr_device *ctlr_dev) +{ + struct fcoe_ctlr *ctlr = fcoe_ctlr_device_priv(ctlr_dev); + + mutex_lock(&ctlr->ctlr_mutex); + switch (ctlr->mode) { + case FIP_MODE_FABRIC: + ctlr_dev->mode = FIP_CONN_TYPE_FABRIC; + break; + case FIP_MODE_VN2VN: + ctlr_dev->mode = FIP_CONN_TYPE_VN2VN; + break; + default: + ctlr_dev->mode = FIP_CONN_TYPE_UNKNOWN; + break; + } + mutex_unlock(&ctlr->ctlr_mutex); +} +EXPORT_SYMBOL(fcoe_ctlr_get_fip_mode); diff --git a/drivers/scsi/fcoe/fcoe_sysfs.c b/drivers/scsi/fcoe/fcoe_sysfs.c new file mode 100644 index 000000000000..2bc163198d33 --- /dev/null +++ b/drivers/scsi/fcoe/fcoe_sysfs.c @@ -0,0 +1,832 @@ +/* + * Copyright(c) 2011 - 2012 Intel Corporation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program; if not, write to the Free Software Foundation, Inc., + * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + * + * Maintained at www.Open-FCoE.org + */ + +#include <linux/module.h> +#include <linux/types.h> +#include <linux/kernel.h> +#include <linux/etherdevice.h> + +#include <scsi/fcoe_sysfs.h> + +static atomic_t ctlr_num; +static atomic_t fcf_num; + +/* + * fcoe_fcf_dev_loss_tmo: the default number of seconds that fcoe sysfs + * should insulate the loss of a fcf. + */ +static unsigned int fcoe_fcf_dev_loss_tmo = 1800; /* seconds */ + +module_param_named(fcf_dev_loss_tmo, fcoe_fcf_dev_loss_tmo, + uint, S_IRUGO|S_IWUSR); +MODULE_PARM_DESC(fcf_dev_loss_tmo, + "Maximum number of seconds that libfcoe should" + " insulate the loss of a fcf. Once this value is" + " exceeded, the fcf is removed."); + +/* + * These are used by the fcoe_*_show_function routines, they + * are intentionally placed in the .c file as they're not intended + * for use throughout the code. + */ +#define fcoe_ctlr_id(x) \ + ((x)->id) +#define fcoe_ctlr_work_q_name(x) \ + ((x)->work_q_name) +#define fcoe_ctlr_work_q(x) \ + ((x)->work_q) +#define fcoe_ctlr_devloss_work_q_name(x) \ + ((x)->devloss_work_q_name) +#define fcoe_ctlr_devloss_work_q(x) \ + ((x)->devloss_work_q) +#define fcoe_ctlr_mode(x) \ + ((x)->mode) +#define fcoe_ctlr_fcf_dev_loss_tmo(x) \ + ((x)->fcf_dev_loss_tmo) +#define fcoe_ctlr_link_fail(x) \ + ((x)->lesb.lesb_link_fail) +#define fcoe_ctlr_vlink_fail(x) \ + ((x)->lesb.lesb_vlink_fail) +#define fcoe_ctlr_miss_fka(x) \ + ((x)->lesb.lesb_miss_fka) +#define fcoe_ctlr_symb_err(x) \ + ((x)->lesb.lesb_symb_err) +#define fcoe_ctlr_err_block(x) \ + ((x)->lesb.lesb_err_block) +#define fcoe_ctlr_fcs_error(x) \ + ((x)->lesb.lesb_fcs_error) +#define fcoe_fcf_state(x) \ + ((x)->state) +#define fcoe_fcf_fabric_name(x) \ + ((x)->fabric_name) +#define fcoe_fcf_switch_name(x) \ + ((x)->switch_name) +#define fcoe_fcf_fc_map(x) \ + ((x)->fc_map) +#define fcoe_fcf_vfid(x) \ + ((x)->vfid) +#define fcoe_fcf_mac(x) \ + ((x)->mac) +#define fcoe_fcf_priority(x) \ + ((x)->priority) +#define fcoe_fcf_fka_period(x) \ + ((x)->fka_period) +#define fcoe_fcf_dev_loss_tmo(x) \ + ((x)->dev_loss_tmo) +#define fcoe_fcf_selected(x) \ + ((x)->selected) +#define fcoe_fcf_vlan_id(x) \ + ((x)->vlan_id) + +/* + * dev_loss_tmo attribute + */ +static int fcoe_str_to_dev_loss(const char *buf, unsigned long *val) +{ + int ret; + + ret = kstrtoul(buf, 0, val); + if (ret || *val < 0) + return -EINVAL; + /* + * Check for overflow; dev_loss_tmo is u32 + */ + if (*val > UINT_MAX) + return -EINVAL; + + return 0; +} + +static int fcoe_fcf_set_dev_loss_tmo(struct fcoe_fcf_device *fcf, + unsigned long val) +{ + if ((fcf->state == FCOE_FCF_STATE_UNKNOWN) || + (fcf->state == FCOE_FCF_STATE_DISCONNECTED) || + (fcf->state == FCOE_FCF_STATE_DELETED)) + return -EBUSY; + /* + * Check for overflow; dev_loss_tmo is u32 + */ + if (val > UINT_MAX) + return -EINVAL; + + fcoe_fcf_dev_loss_tmo(fcf) = val; + return 0; +} + +#define FCOE_DEVICE_ATTR(_prefix, _name, _mode, _show, _store) \ +struct device_attribute device_attr_fcoe_##_prefix##_##_name = \ + __ATTR(_name, _mode, _show, _store) + +#define fcoe_ctlr_show_function(field, format_string, sz, cast) \ +static ssize_t show_fcoe_ctlr_device_##field(struct device *dev, \ + struct device_attribute *attr, \ + char *buf) \ +{ \ + struct fcoe_ctlr_device *ctlr = dev_to_ctlr(dev); \ + if (ctlr->f->get_fcoe_ctlr_##field) \ + ctlr->f->get_fcoe_ctlr_##field(ctlr); \ + return snprintf(buf, sz, format_string, \ + cast fcoe_ctlr_##field(ctlr)); \ +} + +#define fcoe_fcf_show_function(field, format_string, sz, cast) \ +static ssize_t show_fcoe_fcf_device_##field(struct device *dev, \ + struct device_attribute *attr, \ + char *buf) \ +{ \ + struct fcoe_fcf_device *fcf = dev_to_fcf(dev); \ + struct fcoe_ctlr_device *ctlr = fcoe_fcf_dev_to_ctlr_dev(fcf); \ + if (ctlr->f->get_fcoe_fcf_##field) \ + ctlr->f->get_fcoe_fcf_##field(fcf); \ + return snprintf(buf, sz, format_string, \ + cast fcoe_fcf_##field(fcf)); \ +} + +#define fcoe_ctlr_private_show_function(field, format_string, sz, cast) \ +static ssize_t show_fcoe_ctlr_device_##field(struct device *dev, \ + struct device_attribute *attr, \ + char *buf) \ +{ \ + struct fcoe_ctlr_device *ctlr = dev_to_ctlr(dev); \ + return snprintf(buf, sz, format_string, cast fcoe_ctlr_##field(ctlr)); \ +} + +#define fcoe_fcf_private_show_function(field, format_string, sz, cast) \ +static ssize_t show_fcoe_fcf_device_##field(struct device *dev, \ + struct device_attribute *attr, \ + char *buf) \ +{ \ + struct fcoe_fcf_device *fcf = dev_to_fcf(dev); \ + return snprintf(buf, sz, format_string, cast fcoe_fcf_##field(fcf)); \ +} + +#define fcoe_ctlr_private_rd_attr(field, format_string, sz) \ + fcoe_ctlr_private_show_function(field, format_string, sz, ) \ + static FCOE_DEVICE_ATTR(ctlr, field, S_IRUGO, \ + show_fcoe_ctlr_device_##field, NULL) + +#define fcoe_ctlr_rd_attr(field, format_string, sz) \ + fcoe_ctlr_show_function(field, format_string, sz, ) \ + static FCOE_DEVICE_ATTR(ctlr, field, S_IRUGO, \ + show_fcoe_ctlr_device_##field, NULL) + +#define fcoe_fcf_rd_attr(field, format_string, sz) \ + fcoe_fcf_show_function(field, format_string, sz, ) \ + static FCOE_DEVICE_ATTR(fcf, field, S_IRUGO, \ + show_fcoe_fcf_device_##field, NULL) + +#define fcoe_fcf_private_rd_attr(field, format_string, sz) \ + fcoe_fcf_private_show_function(field, format_string, sz, ) \ + static FCOE_DEVICE_ATTR(fcf, field, S_IRUGO, \ + show_fcoe_fcf_device_##field, NULL) + +#define fcoe_ctlr_private_rd_attr_cast(field, format_string, sz, cast) \ + fcoe_ctlr_private_show_function(field, format_string, sz, (cast)) \ + static FCOE_DEVICE_ATTR(ctlr, field, S_IRUGO, \ + show_fcoe_ctlr_device_##field, NULL) + +#define fcoe_fcf_private_rd_attr_cast(field, format_string, sz, cast) \ + fcoe_fcf_private_show_function(field, format_string, sz, (cast)) \ + static FCOE_DEVICE_ATTR(fcf, field, S_IRUGO, \ + show_fcoe_fcf_device_##field, NULL) + +#define fcoe_enum_name_search(title, table_type, table) \ +static const char *get_fcoe_##title##_name(enum table_type table_key) \ +{ \ + int i; \ + char *name = NULL; \ + \ + for (i = 0; i < ARRAY_SIZE(table); i++) { \ + if (table[i].value == table_key) { \ + name = table[i].name; \ + break; \ + } \ + } \ + return name; \ +} + +static struct { + enum fcf_state value; + char *name; +} fcf_state_names[] = { + { FCOE_FCF_STATE_UNKNOWN, "Unknown" }, + { FCOE_FCF_STATE_DISCONNECTED, "Disconnected" }, + { FCOE_FCF_STATE_CONNECTED, "Connected" }, +}; +fcoe_enum_name_search(fcf_state, fcf_state, fcf_state_names) +#define FCOE_FCF_STATE_MAX_NAMELEN 50 + +static ssize_t show_fcf_state(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct fcoe_fcf_device *fcf = dev_to_fcf(dev); + const char *name; + name = get_fcoe_fcf_state_name(fcf->state); + if (!name) + return -EINVAL; + return snprintf(buf, FCOE_FCF_STATE_MAX_NAMELEN, "%s\n", name); +} +static FCOE_DEVICE_ATTR(fcf, state, S_IRUGO, show_fcf_state, NULL); + +static struct { + enum fip_conn_type value; + char *name; +} fip_conn_type_names[] = { + { FIP_CONN_TYPE_UNKNOWN, "Unknown" }, + { FIP_CONN_TYPE_FABRIC, "Fabric" }, + { FIP_CONN_TYPE_VN2VN, "VN2VN" }, +}; +fcoe_enum_name_search(ctlr_mode, fip_conn_type, fip_conn_type_names) +#define FCOE_CTLR_MODE_MAX_NAMELEN 50 + +static ssize_t show_ctlr_mode(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct fcoe_ctlr_device *ctlr = dev_to_ctlr(dev); + const char *name; + + if (ctlr->f->get_fcoe_ctlr_mode) + ctlr->f->get_fcoe_ctlr_mode(ctlr); + + name = get_fcoe_ctlr_mode_name(ctlr->mode); + if (!name) + return -EINVAL; + return snprintf(buf, FCOE_CTLR_MODE_MAX_NAMELEN, + "%s\n", name); +} +static FCOE_DEVICE_ATTR(ctlr, mode, S_IRUGO, + show_ctlr_mode, NULL); + +static ssize_t +store_private_fcoe_ctlr_fcf_dev_loss_tmo(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + struct fcoe_ctlr_device *ctlr = dev_to_ctlr(dev); + struct fcoe_fcf_device *fcf; + unsigned long val; + int rc; + + rc = fcoe_str_to_dev_loss(buf, &val); + if (rc) + return rc; + + fcoe_ctlr_fcf_dev_loss_tmo(ctlr) = val; + mutex_lock(&ctlr->lock); + list_for_each_entry(fcf, &ctlr->fcfs, peers) + fcoe_fcf_set_dev_loss_tmo(fcf, val); + mutex_unlock(&ctlr->lock); + return count; +} +fcoe_ctlr_private_show_function(fcf_dev_loss_tmo, "%d\n", 20, ); +static FCOE_DEVICE_ATTR(ctlr, fcf_dev_loss_tmo, S_IRUGO | S_IWUSR, + show_fcoe_ctlr_device_fcf_dev_loss_tmo, + store_private_fcoe_ctlr_fcf_dev_loss_tmo); + +/* Link Error Status Block (LESB) */ +fcoe_ctlr_rd_attr(link_fail, "%u\n", 20); +fcoe_ctlr_rd_attr(vlink_fail, "%u\n", 20); +fcoe_ctlr_rd_attr(miss_fka, "%u\n", 20); +fcoe_ctlr_rd_attr(symb_err, "%u\n", 20); +fcoe_ctlr_rd_attr(err_block, "%u\n", 20); +fcoe_ctlr_rd_attr(fcs_error, "%u\n", 20); + +fcoe_fcf_private_rd_attr_cast(fabric_name, "0x%llx\n", 20, unsigned long long); +fcoe_fcf_private_rd_attr_cast(switch_name, "0x%llx\n", 20, unsigned long long); +fcoe_fcf_private_rd_attr(priority, "%u\n", 20); +fcoe_fcf_private_rd_attr(fc_map, "0x%x\n", 20); +fcoe_fcf_private_rd_attr(vfid, "%u\n", 20); +fcoe_fcf_private_rd_attr(mac, "%pM\n", 20); +fcoe_fcf_private_rd_attr(fka_period, "%u\n", 20); +fcoe_fcf_rd_attr(selected, "%u\n", 20); +fcoe_fcf_rd_attr(vlan_id, "%u\n", 20); + +fcoe_fcf_private_show_function(dev_loss_tmo, "%d\n", 20, ) +static ssize_t +store_fcoe_fcf_dev_loss_tmo(struct device *dev, struct device_attribute *attr, + const char *buf, size_t count) +{ + struct fcoe_fcf_device *fcf = dev_to_fcf(dev); + unsigned long val; + int rc; + + rc = fcoe_str_to_dev_loss(buf, &val); + if (rc) + return rc; + + rc = fcoe_fcf_set_dev_loss_tmo(fcf, val); + if (rc) + return rc; + return count; +} +static FCOE_DEVICE_ATTR(fcf, dev_loss_tmo, S_IRUGO | S_IWUSR, + show_fcoe_fcf_device_dev_loss_tmo, + store_fcoe_fcf_dev_loss_tmo); + +static struct attribute *fcoe_ctlr_lesb_attrs[] = { + &device_attr_fcoe_ctlr_link_fail.attr, + &device_attr_fcoe_ctlr_vlink_fail.attr, + &device_attr_fcoe_ctlr_miss_fka.attr, + &device_attr_fcoe_ctlr_symb_err.attr, + &device_attr_fcoe_ctlr_err_block.attr, + &device_attr_fcoe_ctlr_fcs_error.attr, + NULL, +}; + +static struct attribute_group fcoe_ctlr_lesb_attr_group = { + .name = "lesb", + .attrs = fcoe_ctlr_lesb_attrs, +}; + +static struct attribute *fcoe_ctlr_attrs[] = { + &device_attr_fcoe_ctlr_fcf_dev_loss_tmo.attr, + &device_attr_fcoe_ctlr_mode.attr, + NULL, +}; + +static struct attribute_group fcoe_ctlr_attr_group = { + .attrs = fcoe_ctlr_attrs, +}; + +static const struct attribute_group *fcoe_ctlr_attr_groups[] = { + &fcoe_ctlr_attr_group, + &fcoe_ctlr_lesb_attr_group, + NULL, +}; + +static struct attribute *fcoe_fcf_attrs[] = { + &device_attr_fcoe_fcf_fabric_name.attr, + &device_attr_fcoe_fcf_switch_name.attr, + &device_attr_fcoe_fcf_dev_loss_tmo.attr, + &device_attr_fcoe_fcf_fc_map.attr, + &device_attr_fcoe_fcf_vfid.attr, + &device_attr_fcoe_fcf_mac.attr, + &device_attr_fcoe_fcf_priority.attr, + &device_attr_fcoe_fcf_fka_period.attr, + &device_attr_fcoe_fcf_state.attr, + &device_attr_fcoe_fcf_selected.attr, + &device_attr_fcoe_fcf_vlan_id.attr, + NULL +}; + +static struct attribute_group fcoe_fcf_attr_group = { + .attrs = fcoe_fcf_attrs, +}; + +static const struct attribute_group *fcoe_fcf_attr_groups[] = { + &fcoe_fcf_attr_group, + NULL, +}; + +struct bus_type fcoe_bus_type; + +static int fcoe_bus_match(struct device *dev, + struct device_driver *drv) +{ + if (dev->bus == &fcoe_bus_type) + return 1; + return 0; +} + +/** + * fcoe_ctlr_device_release() - Release the FIP ctlr memory + * @dev: Pointer to the FIP ctlr's embedded device + * + * Called when the last FIP ctlr reference is released. + */ +static void fcoe_ctlr_device_release(struct device *dev) +{ + struct fcoe_ctlr_device *ctlr = dev_to_ctlr(dev); + kfree(ctlr); +} + +/** + * fcoe_fcf_device_release() - Release the FIP fcf memory + * @dev: Pointer to the fcf's embedded device + * + * Called when the last FIP fcf reference is released. + */ +static void fcoe_fcf_device_release(struct device *dev) +{ + struct fcoe_fcf_device *fcf = dev_to_fcf(dev); + kfree(fcf); +} + +struct device_type fcoe_ctlr_device_type = { + .name = "fcoe_ctlr", + .groups = fcoe_ctlr_attr_groups, + .release = fcoe_ctlr_device_release, +}; + +struct device_type fcoe_fcf_device_type = { + .name = "fcoe_fcf", + .groups = fcoe_fcf_attr_groups, + .release = fcoe_fcf_device_release, +}; + +struct bus_type fcoe_bus_type = { + .name = "fcoe", + .match = &fcoe_bus_match, +}; + +/** + * fcoe_ctlr_device_flush_work() - Flush a FIP ctlr's workqueue + * @ctlr: Pointer to the FIP ctlr whose workqueue is to be flushed + */ +void fcoe_ctlr_device_flush_work(struct fcoe_ctlr_device *ctlr) +{ + if (!fcoe_ctlr_work_q(ctlr)) { + printk(KERN_ERR + "ERROR: FIP Ctlr '%d' attempted to flush work, " + "when no workqueue created.\n", ctlr->id); + dump_stack(); + return; + } + + flush_workqueue(fcoe_ctlr_work_q(ctlr)); +} + +/** + * fcoe_ctlr_device_queue_work() - Schedule work for a FIP ctlr's workqueue + * @ctlr: Pointer to the FIP ctlr who owns the devloss workqueue + * @work: Work to queue for execution + * + * Return value: + * 1 on success / 0 already queued / < 0 for error + */ +int fcoe_ctlr_device_queue_work(struct fcoe_ctlr_device *ctlr, + struct work_struct *work) +{ + if (unlikely(!fcoe_ctlr_work_q(ctlr))) { + printk(KERN_ERR + "ERROR: FIP Ctlr '%d' attempted to queue work, " + "when no workqueue created.\n", ctlr->id); + dump_stack(); + + return -EINVAL; + } + + return queue_work(fcoe_ctlr_work_q(ctlr), work); +} + +/** + * fcoe_ctlr_device_flush_devloss() - Flush a FIP ctlr's devloss workqueue + * @ctlr: Pointer to FIP ctlr whose workqueue is to be flushed + */ +void fcoe_ctlr_device_flush_devloss(struct fcoe_ctlr_device *ctlr) +{ + if (!fcoe_ctlr_devloss_work_q(ctlr)) { + printk(KERN_ERR + "ERROR: FIP Ctlr '%d' attempted to flush work, " + "when no workqueue created.\n", ctlr->id); + dump_stack(); + return; + } + + flush_workqueue(fcoe_ctlr_devloss_work_q(ctlr)); +} + +/** + * fcoe_ctlr_device_queue_devloss_work() - Schedule work for a FIP ctlr's devloss workqueue + * @ctlr: Pointer to the FIP ctlr who owns the devloss workqueue + * @work: Work to queue for execution + * @delay: jiffies to delay the work queuing + * + * Return value: + * 1 on success / 0 already queued / < 0 for error + */ +int fcoe_ctlr_device_queue_devloss_work(struct fcoe_ctlr_device *ctlr, + struct delayed_work *work, + unsigned long delay) +{ + if (unlikely(!fcoe_ctlr_devloss_work_q(ctlr))) { + printk(KERN_ERR + "ERROR: FIP Ctlr '%d' attempted to queue work, " + "when no workqueue created.\n", ctlr->id); + dump_stack(); + + return -EINVAL; + } + + return queue_delayed_work(fcoe_ctlr_devloss_work_q(ctlr), work, delay); +} + +static int fcoe_fcf_device_match(struct fcoe_fcf_device *new, + struct fcoe_fcf_device *old) +{ + if (new->switch_name == old->switch_name && + new->fabric_name == old->fabric_name && + new->fc_map == old->fc_map && + compare_ether_addr(new->mac, old->mac) == 0) + return 1; + return 0; +} + +/** + * fcoe_ctlr_device_add() - Add a FIP ctlr to sysfs + * @parent: The parent device to which the fcoe_ctlr instance + * should be attached + * @f: The LLD's FCoE sysfs function template pointer + * @priv_size: Size to be allocated with the fcoe_ctlr_device for the LLD + * + * This routine allocates a FIP ctlr object with some additional memory + * for the LLD. The FIP ctlr is initialized, added to sysfs and then + * attributes are added to it. + */ +struct fcoe_ctlr_device *fcoe_ctlr_device_add(struct device *parent, + struct fcoe_sysfs_function_template *f, + int priv_size) +{ + struct fcoe_ctlr_device *ctlr; + int error = 0; + + ctlr = kzalloc(sizeof(struct fcoe_ctlr_device) + priv_size, + GFP_KERNEL); + if (!ctlr) + goto out; + + ctlr->id = atomic_inc_return(&ctlr_num) - 1; + ctlr->f = f; + INIT_LIST_HEAD(&ctlr->fcfs); + mutex_init(&ctlr->lock); + ctlr->dev.parent = parent; + ctlr->dev.bus = &fcoe_bus_type; + ctlr->dev.type = &fcoe_ctlr_device_type; + + ctlr->fcf_dev_loss_tmo = fcoe_fcf_dev_loss_tmo; + + snprintf(ctlr->work_q_name, sizeof(ctlr->work_q_name), + "ctlr_wq_%d", ctlr->id); + ctlr->work_q = create_singlethread_workqueue( + ctlr->work_q_name); + if (!ctlr->work_q) + goto out_del; + + snprintf(ctlr->devloss_work_q_name, + sizeof(ctlr->devloss_work_q_name), + "ctlr_dl_wq_%d", ctlr->id); + ctlr->devloss_work_q = create_singlethread_workqueue( + ctlr->devloss_work_q_name); + if (!ctlr->devloss_work_q) + goto out_del_q; + + dev_set_name(&ctlr->dev, "ctlr_%d", ctlr->id); + error = device_register(&ctlr->dev); + if (error) + goto out_del_q2; + + return ctlr; + +out_del_q2: + destroy_workqueue(ctlr->devloss_work_q); + ctlr->devloss_work_q = NULL; +out_del_q: + destroy_workqueue(ctlr->work_q); + ctlr->work_q = NULL; +out_del: + kfree(ctlr); +out: + return NULL; +} +EXPORT_SYMBOL_GPL(fcoe_ctlr_device_add); + +/** + * fcoe_ctlr_device_delete() - Delete a FIP ctlr and its subtree from sysfs + * @ctlr: A pointer to the ctlr to be deleted + * + * Deletes a FIP ctlr and any fcfs attached + * to it. Deleting fcfs will cause their childen + * to be deleted as well. + * + * The ctlr is detached from sysfs and it's resources + * are freed (work q), but the memory is not freed + * until its last reference is released. + * + * This routine expects no locks to be held before + * calling. + * + * TODO: Currently there are no callbacks to clean up LLD data + * for a fcoe_fcf_device. LLDs must keep this in mind as they need + * to clean up each of their LLD data for all fcoe_fcf_device before + * calling fcoe_ctlr_device_delete. + */ +void fcoe_ctlr_device_delete(struct fcoe_ctlr_device *ctlr) +{ + struct fcoe_fcf_device *fcf, *next; + /* Remove any attached fcfs */ + mutex_lock(&ctlr->lock); + list_for_each_entry_safe(fcf, next, + &ctlr->fcfs, peers) { + list_del(&fcf->peers); + fcf->state = FCOE_FCF_STATE_DELETED; + fcoe_ctlr_device_queue_work(ctlr, &fcf->delete_work); + } + mutex_unlock(&ctlr->lock); + + fcoe_ctlr_device_flush_work(ctlr); + + destroy_workqueue(ctlr->devloss_work_q); + ctlr->devloss_work_q = NULL; + destroy_workqueue(ctlr->work_q); + ctlr->work_q = NULL; + + device_unregister(&ctlr->dev); +} +EXPORT_SYMBOL_GPL(fcoe_ctlr_device_delete); + +/** + * fcoe_fcf_device_final_delete() - Final delete routine + * @work: The FIP fcf's embedded work struct + * + * It is expected that the fcf has been removed from + * the FIP ctlr's list before calling this routine. + */ +static void fcoe_fcf_device_final_delete(struct work_struct *work) +{ + struct fcoe_fcf_device *fcf = + container_of(work, struct fcoe_fcf_device, delete_work); + struct fcoe_ctlr_device *ctlr = fcoe_fcf_dev_to_ctlr_dev(fcf); + + /* + * Cancel any outstanding timers. These should really exist + * only when rmmod'ing the LLDD and we're asking for + * immediate termination of the rports + */ + if (!cancel_delayed_work(&fcf->dev_loss_work)) + fcoe_ctlr_device_flush_devloss(ctlr); + + device_unregister(&fcf->dev); +} + +/** + * fip_timeout_deleted_fcf() - Delete a fcf when the devloss timer fires + * @work: The FIP fcf's embedded work struct + * + * Removes the fcf from the FIP ctlr's list of fcfs and + * queues the final deletion. + */ +static void fip_timeout_deleted_fcf(struct work_struct *work) +{ + struct fcoe_fcf_device *fcf = + container_of(work, struct fcoe_fcf_device, dev_loss_work.work); + struct fcoe_ctlr_device *ctlr = fcoe_fcf_dev_to_ctlr_dev(fcf); + + mutex_lock(&ctlr->lock); + + /* + * If the fcf is deleted or reconnected before the timer + * fires the devloss queue will be flushed, but the state will + * either be CONNECTED or DELETED. If that is the case we + * cancel deleting the fcf. + */ + if (fcf->state != FCOE_FCF_STATE_DISCONNECTED) + goto out; + + dev_printk(KERN_ERR, &fcf->dev, + "FIP fcf connection time out: removing fcf\n"); + + list_del(&fcf->peers); + fcf->state = FCOE_FCF_STATE_DELETED; + fcoe_ctlr_device_queue_work(ctlr, &fcf->delete_work); + +out: + mutex_unlock(&ctlr->lock); +} + +/** + * fcoe_fcf_device_delete() - Delete a FIP fcf + * @fcf: Pointer to the fcf which is to be deleted + * + * Queues the FIP fcf on the devloss workqueue + * + * Expects the ctlr_attrs mutex to be held for fcf + * state change. + */ +void fcoe_fcf_device_delete(struct fcoe_fcf_device *fcf) +{ + struct fcoe_ctlr_device *ctlr = fcoe_fcf_dev_to_ctlr_dev(fcf); + int timeout = fcf->dev_loss_tmo; + + if (fcf->state != FCOE_FCF_STATE_CONNECTED) + return; + + fcf->state = FCOE_FCF_STATE_DISCONNECTED; + + /* + * FCF will only be re-connected by the LLD calling + * fcoe_fcf_device_add, and it should be setting up + * priv then. + */ + fcf->priv = NULL; + + fcoe_ctlr_device_queue_devloss_work(ctlr, &fcf->dev_loss_work, + timeout * HZ); +} +EXPORT_SYMBOL_GPL(fcoe_fcf_device_delete); + +/** + * fcoe_fcf_device_add() - Add a FCoE sysfs fcoe_fcf_device to the system + * @ctlr: The fcoe_ctlr_device that will be the fcoe_fcf_device parent + * @new_fcf: A temporary FCF used for lookups on the current list of fcfs + * + * Expects to be called with the ctlr->lock held + */ +struct fcoe_fcf_device *fcoe_fcf_device_add(struct fcoe_ctlr_device *ctlr, + struct fcoe_fcf_device *new_fcf) +{ + struct fcoe_fcf_device *fcf; + int error = 0; + + list_for_each_entry(fcf, &ctlr->fcfs, peers) { + if (fcoe_fcf_device_match(new_fcf, fcf)) { + if (fcf->state == FCOE_FCF_STATE_CONNECTED) + return fcf; + + fcf->state = FCOE_FCF_STATE_CONNECTED; + + if (!cancel_delayed_work(&fcf->dev_loss_work)) + fcoe_ctlr_device_flush_devloss(ctlr); + + return fcf; + } + } + + fcf = kzalloc(sizeof(struct fcoe_fcf_device), GFP_ATOMIC); + if (unlikely(!fcf)) + goto out; + + INIT_WORK(&fcf->delete_work, fcoe_fcf_device_final_delete); + INIT_DELAYED_WORK(&fcf->dev_loss_work, fip_timeout_deleted_fcf); + + fcf->dev.parent = &ctlr->dev; + fcf->dev.bus = &fcoe_bus_type; + fcf->dev.type = &fcoe_fcf_device_type; + fcf->id = atomic_inc_return(&fcf_num) - 1; + fcf->state = FCOE_FCF_STATE_UNKNOWN; + + fcf->dev_loss_tmo = ctlr->fcf_dev_loss_tmo; + + dev_set_name(&fcf->dev, "fcf_%d", fcf->id); + + fcf->fabric_name = new_fcf->fabric_name; + fcf->switch_name = new_fcf->switch_name; + fcf->fc_map = new_fcf->fc_map; + fcf->vfid = new_fcf->vfid; + memcpy(fcf->mac, new_fcf->mac, ETH_ALEN); + fcf->priority = new_fcf->priority; + fcf->fka_period = new_fcf->fka_period; + fcf->selected = new_fcf->selected; + + error = device_register(&fcf->dev); + if (error) + goto out_del; + + fcf->state = FCOE_FCF_STATE_CONNECTED; + list_add_tail(&fcf->peers, &ctlr->fcfs); + + return fcf; + +out_del: + kfree(fcf); +out: + return NULL; +} +EXPORT_SYMBOL_GPL(fcoe_fcf_device_add); + +int __init fcoe_sysfs_setup(void) +{ + int error; + + atomic_set(&ctlr_num, 0); + atomic_set(&fcf_num, 0); + + error = bus_register(&fcoe_bus_type); + if (error) + return error; + + return 0; +} + +void __exit fcoe_sysfs_teardown(void) +{ + bus_unregister(&fcoe_bus_type); +} diff --git a/drivers/scsi/fcoe/fcoe_transport.c b/drivers/scsi/fcoe/fcoe_transport.c index 710e149d41b6..b46f43dced78 100644 --- a/drivers/scsi/fcoe/fcoe_transport.c +++ b/drivers/scsi/fcoe/fcoe_transport.c @@ -815,9 +815,17 @@ out_nodev: */ static int __init libfcoe_init(void) { - fcoe_transport_init(); + int rc = 0; - return 0; + rc = fcoe_transport_init(); + if (rc) + return rc; + + rc = fcoe_sysfs_setup(); + if (rc) + fcoe_transport_exit(); + + return rc; } module_init(libfcoe_init); @@ -826,6 +834,7 @@ module_init(libfcoe_init); */ static void __exit libfcoe_exit(void) { + fcoe_sysfs_teardown(); fcoe_transport_exit(); } module_exit(libfcoe_exit); diff --git a/drivers/scsi/qla2xxx/Kconfig b/drivers/scsi/qla2xxx/Kconfig index 6208d562890d..317a7fdc3b82 100644 --- a/drivers/scsi/qla2xxx/Kconfig +++ b/drivers/scsi/qla2xxx/Kconfig @@ -25,3 +25,12 @@ config SCSI_QLA_FC Firmware images can be retrieved from: ftp://ftp.qlogic.com/outgoing/linux/firmware/ + +config TCM_QLA2XXX + tristate "TCM_QLA2XXX fabric module for Qlogic 2xxx series target mode HBAs" + depends on SCSI_QLA_FC && TARGET_CORE + select LIBFC + select BTREE + default n + ---help--- + Say Y here to enable the TCM_QLA2XXX fabric module for Qlogic 2xxx series target mode HBAs diff --git a/drivers/scsi/qla2xxx/Makefile b/drivers/scsi/qla2xxx/Makefile index 5df782f4a097..dce7d788cdc9 100644 --- a/drivers/scsi/qla2xxx/Makefile +++ b/drivers/scsi/qla2xxx/Makefile @@ -1,5 +1,6 @@ qla2xxx-y := qla_os.o qla_init.o qla_mbx.o qla_iocb.o qla_isr.o qla_gs.o \ qla_dbg.o qla_sup.o qla_attr.o qla_mid.o qla_dfs.o qla_bsg.o \ - qla_nx.o + qla_nx.o qla_target.o obj-$(CONFIG_SCSI_QLA_FC) += qla2xxx.o +obj-$(CONFIG_TCM_QLA2XXX) += tcm_qla2xxx.o diff --git a/drivers/scsi/qla2xxx/qla_attr.c b/drivers/scsi/qla2xxx/qla_attr.c index 5926f5a87ea8..5ab953029f8d 100644 --- a/drivers/scsi/qla2xxx/qla_attr.c +++ b/drivers/scsi/qla2xxx/qla_attr.c @@ -5,6 +5,7 @@ * See LICENSE.qla2xxx for copyright and licensing details. */ #include "qla_def.h" +#include "qla_target.h" #include <linux/kthread.h> #include <linux/vmalloc.h> @@ -576,6 +577,7 @@ qla2x00_sysfs_write_reset(struct file *filp, struct kobject *kobj, scsi_block_requests(vha->host); set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); if (IS_QLA82XX(ha)) { + ha->flags.isp82xx_no_md_cap = 1; qla82xx_idc_lock(ha); qla82xx_set_reset_owner(vha); qla82xx_idc_unlock(ha); @@ -585,7 +587,7 @@ qla2x00_sysfs_write_reset(struct file *filp, struct kobject *kobj, scsi_unblock_requests(vha->host); break; case 0x2025d: - if (!IS_QLA81XX(ha)) + if (!IS_QLA81XX(ha) || !IS_QLA8031(ha)) return -EPERM; ql_log(ql_log_info, vha, 0x706f, @@ -1105,9 +1107,8 @@ qla2x00_total_isp_aborts_show(struct device *dev, struct device_attribute *attr, char *buf) { scsi_qla_host_t *vha = shost_priv(class_to_shost(dev)); - struct qla_hw_data *ha = vha->hw; return snprintf(buf, PAGE_SIZE, "%d\n", - ha->qla_stats.total_isp_aborts); + vha->qla_stats.total_isp_aborts); } static ssize_t @@ -1154,7 +1155,7 @@ qla2x00_phy_version_show(struct device *dev, struct device_attribute *attr, scsi_qla_host_t *vha = shost_priv(class_to_shost(dev)); struct qla_hw_data *ha = vha->hw; - if (!IS_QLA81XX(ha) && !IS_QLA83XX(ha)) + if (!IS_QLA81XX(ha) && !IS_QLA8031(ha)) return snprintf(buf, PAGE_SIZE, "\n"); return snprintf(buf, PAGE_SIZE, "%d.%02d.%02d\n", @@ -1537,7 +1538,7 @@ qla2x00_get_fc_host_stats(struct Scsi_Host *shost) dma_addr_t stats_dma; struct fc_host_statistics *pfc_host_stat; - pfc_host_stat = &ha->fc_host_stat; + pfc_host_stat = &vha->fc_host_stat; memset(pfc_host_stat, -1, sizeof(struct fc_host_statistics)); if (test_bit(UNLOADING, &vha->dpc_flags)) @@ -1580,8 +1581,8 @@ qla2x00_get_fc_host_stats(struct Scsi_Host *shost) pfc_host_stat->dumped_frames = stats->dumped_frames; pfc_host_stat->nos_count = stats->nos_rcvd; } - pfc_host_stat->fcp_input_megabytes = ha->qla_stats.input_bytes >> 20; - pfc_host_stat->fcp_output_megabytes = ha->qla_stats.output_bytes >> 20; + pfc_host_stat->fcp_input_megabytes = vha->qla_stats.input_bytes >> 20; + pfc_host_stat->fcp_output_megabytes = vha->qla_stats.output_bytes >> 20; done_free: dma_pool_free(ha->s_dma_pool, stats, stats_dma); @@ -1737,6 +1738,7 @@ qla24xx_vport_create(struct fc_vport *fc_vport, bool disable) fc_host_supported_speeds(vha->host) = fc_host_supported_speeds(base_vha->host); + qlt_vport_create(vha, ha); qla24xx_vport_disable(fc_vport, disable); if (ha->flags.cpu_affinity_enabled) { @@ -1951,12 +1953,16 @@ qla2x00_init_host_attr(scsi_qla_host_t *vha) fc_host_dev_loss_tmo(vha->host) = ha->port_down_retry_count; fc_host_node_name(vha->host) = wwn_to_u64(vha->node_name); fc_host_port_name(vha->host) = wwn_to_u64(vha->port_name); - fc_host_supported_classes(vha->host) = FC_COS_CLASS3; + fc_host_supported_classes(vha->host) = ha->tgt.enable_class_2 ? + (FC_COS_CLASS2|FC_COS_CLASS3) : FC_COS_CLASS3; fc_host_max_npiv_vports(vha->host) = ha->max_npiv_vports; fc_host_npiv_vports_inuse(vha->host) = ha->cur_vport_count; if (IS_CNA_CAPABLE(ha)) speed = FC_PORTSPEED_10GBIT; + else if (IS_QLA2031(ha)) + speed = FC_PORTSPEED_16GBIT | FC_PORTSPEED_8GBIT | + FC_PORTSPEED_4GBIT; else if (IS_QLA25XX(ha)) speed = FC_PORTSPEED_8GBIT | FC_PORTSPEED_4GBIT | FC_PORTSPEED_2GBIT | FC_PORTSPEED_1GBIT; diff --git a/drivers/scsi/qla2xxx/qla_bsg.c b/drivers/scsi/qla2xxx/qla_bsg.c index bc3cc6d91117..c68883806c54 100644 --- a/drivers/scsi/qla2xxx/qla_bsg.c +++ b/drivers/scsi/qla2xxx/qla_bsg.c @@ -297,7 +297,6 @@ qla2x00_process_els(struct fc_bsg_job *bsg_job) /* Initialize all required fields of fcport */ fcport->vha = vha; - fcport->vp_idx = vha->vp_idx; fcport->d_id.b.al_pa = bsg_job->request->rqst_data.h_els.port_id[0]; fcport->d_id.b.area = @@ -483,7 +482,6 @@ qla2x00_process_ct(struct fc_bsg_job *bsg_job) /* Initialize all required fields of fcport */ fcport->vha = vha; - fcport->vp_idx = vha->vp_idx; fcport->d_id.b.al_pa = bsg_job->request->rqst_data.h_ct.port_id[0]; fcport->d_id.b.area = bsg_job->request->rqst_data.h_ct.port_id[1]; fcport->d_id.b.domain = bsg_job->request->rqst_data.h_ct.port_id[2]; @@ -544,7 +542,7 @@ qla81xx_set_internal_loopback(scsi_qla_host_t *vha, uint16_t *config, int rval = 0; struct qla_hw_data *ha = vha->hw; - if (!IS_QLA81XX(ha) && !IS_QLA83XX(ha)) + if (!IS_QLA81XX(ha) && !IS_QLA8031(ha)) goto done_set_internal; new_config[0] = config[0] | (ENABLE_INTERNAL_LOOPBACK << 1); @@ -586,7 +584,7 @@ qla81xx_reset_internal_loopback(scsi_qla_host_t *vha, uint16_t *config, uint16_t new_config[4]; struct qla_hw_data *ha = vha->hw; - if (!IS_QLA81XX(ha) && !IS_QLA83XX(ha)) + if (!IS_QLA81XX(ha) && !IS_QLA8031(ha)) goto done_reset_internal; memset(new_config, 0 , sizeof(new_config)); @@ -710,8 +708,7 @@ qla2x00_process_loopback(struct fc_bsg_job *bsg_job) elreq.options = bsg_job->request->rqst_data.h_vendor.vendor_cmd[1]; if ((ha->current_topology == ISP_CFG_F || - (atomic_read(&vha->loop_state) == LOOP_DOWN) || - ((IS_QLA81XX(ha) || IS_QLA83XX(ha)) && + ((IS_QLA81XX(ha) || IS_QLA8031(ha)) && le32_to_cpu(*(uint32_t *)req_data) == ELS_OPCODE_BYTE && req_data_len == MAX_ELS_FRAME_PAYLOAD)) && elreq.options == EXTERNAL_LOOPBACK) { @@ -1402,6 +1399,9 @@ qla2x00_update_optrom(struct fc_bsg_job *bsg_job) if (rval) return rval; + /* Set the isp82xx_no_md_cap not to capture minidump */ + ha->flags.isp82xx_no_md_cap = 1; + sg_copy_to_buffer(bsg_job->request_payload.sg_list, bsg_job->request_payload.sg_cnt, ha->optrom_buffer, ha->optrom_region_size); diff --git a/drivers/scsi/qla2xxx/qla_dbg.c b/drivers/scsi/qla2xxx/qla_dbg.c index 62324a1d5573..fdee5611f3e2 100644 --- a/drivers/scsi/qla2xxx/qla_dbg.c +++ b/drivers/scsi/qla2xxx/qla_dbg.c @@ -11,27 +11,31 @@ * ---------------------------------------------------------------------- * | Level | Last Value Used | Holes | * ---------------------------------------------------------------------- - * | Module Init and Probe | 0x0120 | 0x4b,0xba,0xfa | - * | Mailbox commands | 0x113e | 0x112c-0x112e | + * | Module Init and Probe | 0x0122 | 0x4b,0xba,0xfa | + * | Mailbox commands | 0x1140 | 0x111a-0x111b | + * | | | 0x112c-0x112e | * | | | 0x113a | * | Device Discovery | 0x2086 | 0x2020-0x2022 | * | Queue Command and IO tracing | 0x3030 | 0x3006,0x3008 | * | | | 0x302d-0x302e | - * | DPC Thread | 0x401c | | - * | Async Events | 0x505d | 0x502b-0x502f | + * | DPC Thread | 0x401c | 0x4002,0x4013 | + * | Async Events | 0x505f | 0x502b-0x502f | * | | | 0x5047,0x5052 | - * | Timer Routines | 0x6011 | 0x600e-0x600f | + * | Timer Routines | 0x6011 | | * | User Space Interactions | 0x709f | 0x7018,0x702e, | * | | | 0x7039,0x7045, | * | | | 0x7073-0x7075, | * | | | 0x708c | * | Task Management | 0x803c | 0x8025-0x8026 | * | | | 0x800b,0x8039 | - * | AER/EEH | 0x900f | | + * | AER/EEH | 0x9011 | | * | Virtual Port | 0xa007 | | - * | ISP82XX Specific | 0xb054 | 0xb053 | + * | ISP82XX Specific | 0xb054 | 0xb024 | * | MultiQ | 0xc00c | | * | Misc | 0xd010 | | + * | Target Mode | 0xe06f | | + * | Target Mode Management | 0xf071 | | + * | Target Mode Task Management | 0x1000b | | * ---------------------------------------------------------------------- */ @@ -379,6 +383,54 @@ qla25xx_copy_fce(struct qla_hw_data *ha, void *ptr, uint32_t **last_chain) } static inline void * +qla2xxx_copy_atioqueues(struct qla_hw_data *ha, void *ptr, + uint32_t **last_chain) +{ + struct qla2xxx_mqueue_chain *q; + struct qla2xxx_mqueue_header *qh; + uint32_t num_queues; + int que; + struct { + int length; + void *ring; + } aq, *aqp; + + if (!ha->tgt.atio_q_length) + return ptr; + + num_queues = 1; + aqp = &aq; + aqp->length = ha->tgt.atio_q_length; + aqp->ring = ha->tgt.atio_ring; + + for (que = 0; que < num_queues; que++) { + /* aqp = ha->atio_q_map[que]; */ + q = ptr; + *last_chain = &q->type; + q->type = __constant_htonl(DUMP_CHAIN_QUEUE); + q->chain_size = htonl( + sizeof(struct qla2xxx_mqueue_chain) + + sizeof(struct qla2xxx_mqueue_header) + + (aqp->length * sizeof(request_t))); + ptr += sizeof(struct qla2xxx_mqueue_chain); + + /* Add header. */ + qh = ptr; + qh->queue = __constant_htonl(TYPE_ATIO_QUEUE); + qh->number = htonl(que); + qh->size = htonl(aqp->length * sizeof(request_t)); + ptr += sizeof(struct qla2xxx_mqueue_header); + + /* Add data. */ + memcpy(ptr, aqp->ring, aqp->length * sizeof(request_t)); + + ptr += aqp->length * sizeof(request_t); + } + + return ptr; +} + +static inline void * qla25xx_copy_mqueues(struct qla_hw_data *ha, void *ptr, uint32_t **last_chain) { struct qla2xxx_mqueue_chain *q; @@ -873,6 +925,8 @@ qla24xx_fw_dump(scsi_qla_host_t *vha, int hardware_locked) struct qla24xx_fw_dump *fw; uint32_t ext_mem_cnt; void *nxt; + void *nxt_chain; + uint32_t *last_chain = NULL; struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev); if (IS_QLA82XX(ha)) @@ -1091,6 +1145,16 @@ qla24xx_fw_dump(scsi_qla_host_t *vha, int hardware_locked) qla24xx_copy_eft(ha, nxt); + nxt_chain = (void *)ha->fw_dump + ha->chain_offset; + nxt_chain = qla2xxx_copy_atioqueues(ha, nxt_chain, &last_chain); + if (last_chain) { + ha->fw_dump->version |= __constant_htonl(DUMP_CHAIN_VARIANT); + *last_chain |= __constant_htonl(DUMP_CHAIN_LAST); + } + + /* Adjust valid length. */ + ha->fw_dump_len = (nxt_chain - (void *)ha->fw_dump); + qla24xx_fw_dump_failed_0: qla2xxx_dump_post_process(base_vha, rval); @@ -1399,6 +1463,7 @@ qla25xx_fw_dump(scsi_qla_host_t *vha, int hardware_locked) /* Chain entries -- started with MQ. */ nxt_chain = qla25xx_copy_fce(ha, nxt_chain, &last_chain); nxt_chain = qla25xx_copy_mqueues(ha, nxt_chain, &last_chain); + nxt_chain = qla2xxx_copy_atioqueues(ha, nxt_chain, &last_chain); if (last_chain) { ha->fw_dump->version |= __constant_htonl(DUMP_CHAIN_VARIANT); *last_chain |= __constant_htonl(DUMP_CHAIN_LAST); @@ -1717,6 +1782,7 @@ qla81xx_fw_dump(scsi_qla_host_t *vha, int hardware_locked) /* Chain entries -- started with MQ. */ nxt_chain = qla25xx_copy_fce(ha, nxt_chain, &last_chain); nxt_chain = qla25xx_copy_mqueues(ha, nxt_chain, &last_chain); + nxt_chain = qla2xxx_copy_atioqueues(ha, nxt_chain, &last_chain); if (last_chain) { ha->fw_dump->version |= __constant_htonl(DUMP_CHAIN_VARIANT); *last_chain |= __constant_htonl(DUMP_CHAIN_LAST); @@ -2218,6 +2284,7 @@ copy_queue: /* Chain entries -- started with MQ. */ nxt_chain = qla25xx_copy_fce(ha, nxt_chain, &last_chain); nxt_chain = qla25xx_copy_mqueues(ha, nxt_chain, &last_chain); + nxt_chain = qla2xxx_copy_atioqueues(ha, nxt_chain, &last_chain); if (last_chain) { ha->fw_dump->version |= __constant_htonl(DUMP_CHAIN_VARIANT); *last_chain |= __constant_htonl(DUMP_CHAIN_LAST); diff --git a/drivers/scsi/qla2xxx/qla_dbg.h b/drivers/scsi/qla2xxx/qla_dbg.h index 2157bdf1569a..f278df8cce0f 100644 --- a/drivers/scsi/qla2xxx/qla_dbg.h +++ b/drivers/scsi/qla2xxx/qla_dbg.h @@ -244,6 +244,7 @@ struct qla2xxx_mqueue_header { uint32_t queue; #define TYPE_REQUEST_QUEUE 0x1 #define TYPE_RESPONSE_QUEUE 0x2 +#define TYPE_ATIO_QUEUE 0x3 uint32_t number; uint32_t size; }; @@ -339,3 +340,11 @@ ql_log_pci(uint32_t, struct pci_dev *pdev, int32_t, const char *fmt, ...); #define ql_dbg_misc 0x00010000 /* For dumping everything that is not * not covered by upper categories */ +#define ql_dbg_verbose 0x00008000 /* More verbosity for each level + * This is to be used with other levels where + * more verbosity is required. It might not + * be applicable to all the levels. + */ +#define ql_dbg_tgt 0x00004000 /* Target mode */ +#define ql_dbg_tgt_mgt 0x00002000 /* Target mode management */ +#define ql_dbg_tgt_tmr 0x00001000 /* Target mode task management */ diff --git a/drivers/scsi/qla2xxx/qla_def.h b/drivers/scsi/qla2xxx/qla_def.h index a2443031dbe7..39007f53aec0 100644 --- a/drivers/scsi/qla2xxx/qla_def.h +++ b/drivers/scsi/qla2xxx/qla_def.h @@ -186,6 +186,7 @@ #define RESPONSE_ENTRY_CNT_2100 64 /* Number of response entries.*/ #define RESPONSE_ENTRY_CNT_2300 512 /* Number of response entries.*/ #define RESPONSE_ENTRY_CNT_MQ 128 /* Number of response entries.*/ +#define ATIO_ENTRY_CNT_24XX 4096 /* Number of ATIO entries. */ struct req_que; @@ -1234,11 +1235,27 @@ typedef struct { * ISP queue - response queue entry definition. */ typedef struct { - uint8_t data[60]; + uint8_t entry_type; /* Entry type. */ + uint8_t entry_count; /* Entry count. */ + uint8_t sys_define; /* System defined. */ + uint8_t entry_status; /* Entry Status. */ + uint32_t handle; /* System defined handle */ + uint8_t data[52]; uint32_t signature; #define RESPONSE_PROCESSED 0xDEADDEAD /* Signature */ } response_t; +/* + * ISP queue - ATIO queue entry definition. + */ +struct atio { + uint8_t entry_type; /* Entry type. */ + uint8_t entry_count; /* Entry count. */ + uint8_t data[58]; + uint32_t signature; +#define ATIO_PROCESSED 0xDEADDEAD /* Signature */ +}; + typedef union { uint16_t extended; struct { @@ -1719,11 +1736,13 @@ typedef struct fc_port { struct fc_rport *rport, *drport; u32 supported_classes; - uint16_t vp_idx; uint8_t fc4_type; uint8_t scan_state; } fc_port_t; +#define QLA_FCPORT_SCAN_NONE 0 +#define QLA_FCPORT_SCAN_FOUND 1 + /* * Fibre channel port/lun states. */ @@ -1747,6 +1766,7 @@ static const char * const port_state_str[] = { #define FCF_LOGIN_NEEDED BIT_1 #define FCF_FCP2_DEVICE BIT_2 #define FCF_ASYNC_SENT BIT_3 +#define FCF_CONF_COMP_SUPPORTED BIT_4 /* No loop ID flag. */ #define FC_NO_LOOP_ID 0x1000 @@ -2419,6 +2439,40 @@ struct qlfc_fw { uint32_t len; }; +struct qlt_hw_data { + /* Protected by hw lock */ + uint32_t enable_class_2:1; + uint32_t enable_explicit_conf:1; + uint32_t ini_mode_force_reverse:1; + uint32_t node_name_set:1; + + dma_addr_t atio_dma; /* Physical address. */ + struct atio *atio_ring; /* Base virtual address */ + struct atio *atio_ring_ptr; /* Current address. */ + uint16_t atio_ring_index; /* Current index. */ + uint16_t atio_q_length; + + void *target_lport_ptr; + struct qla_tgt_func_tmpl *tgt_ops; + struct qla_tgt *qla_tgt; + struct qla_tgt_cmd *cmds[MAX_OUTSTANDING_COMMANDS]; + uint16_t current_handle; + + struct qla_tgt_vp_map *tgt_vp_map; + struct mutex tgt_mutex; + struct mutex tgt_host_action_mutex; + + int saved_set; + uint16_t saved_exchange_count; + uint32_t saved_firmware_options_1; + uint32_t saved_firmware_options_2; + uint32_t saved_firmware_options_3; + uint8_t saved_firmware_options[2]; + uint8_t saved_add_firmware_options[2]; + + uint8_t tgt_node_name[WWN_SIZE]; +}; + /* * Qlogic host adapter specific data structure. */ @@ -2460,7 +2514,9 @@ struct qla_hw_data { uint32_t thermal_supported:1; uint32_t isp82xx_reset_hdlr_active:1; uint32_t isp82xx_reset_owner:1; - /* 28 bits */ + uint32_t isp82xx_no_md_cap:1; + uint32_t host_shutting_down:1; + /* 30 bits */ } flags; /* This spinlock is used to protect "io transactions", you must @@ -2804,7 +2860,6 @@ struct qla_hw_data { /* ISP2322: red, green, amber. */ uint16_t zio_mode; uint16_t zio_timer; - struct fc_host_statistics fc_host_stat; struct qla_msix_entry *msix_entries; @@ -2817,7 +2872,6 @@ struct qla_hw_data { int cur_vport_count; struct qla_chip_state_84xx *cs84xx; - struct qla_statistics qla_stats; struct isp_operations *isp_ops; struct workqueue_struct *wq; struct qlfc_fw fw_buf; @@ -2863,6 +2917,8 @@ struct qla_hw_data { dma_addr_t md_tmplt_hdr_dma; void *md_dump; uint32_t md_dump_size; + + struct qlt_hw_data tgt; }; /* @@ -2920,6 +2976,7 @@ typedef struct scsi_qla_host { #define FCOE_CTX_RESET_NEEDED 18 /* Initiate FCoE context reset */ #define MPI_RESET_NEEDED 19 /* Initiate MPI FW reset */ #define ISP_QUIESCE_NEEDED 20 /* Driver need some quiescence */ +#define SCR_PENDING 21 /* SCR in target mode */ uint32_t device_flags; #define SWITCH_FOUND BIT_0 @@ -2979,10 +3036,21 @@ typedef struct scsi_qla_host { struct req_que *req; int fw_heartbeat_counter; int seconds_since_last_heartbeat; + struct fc_host_statistics fc_host_stat; + struct qla_statistics qla_stats; atomic_t vref_count; } scsi_qla_host_t; +#define SET_VP_IDX 1 +#define SET_AL_PA 2 +#define RESET_VP_IDX 3 +#define RESET_AL_PA 4 +struct qla_tgt_vp_map { + uint8_t idx; + scsi_qla_host_t *vha; +}; + /* * Macros to help code, maintain, etc. */ diff --git a/drivers/scsi/qla2xxx/qla_gbl.h b/drivers/scsi/qla2xxx/qla_gbl.h index 9f065804bd12..9eacd2df111b 100644 --- a/drivers/scsi/qla2xxx/qla_gbl.h +++ b/drivers/scsi/qla2xxx/qla_gbl.h @@ -175,6 +175,7 @@ extern int qla2x00_vp_abort_isp(scsi_qla_host_t *); /* * Global Function Prototypes in qla_iocb.c source file. */ + extern uint16_t qla2x00_calc_iocbs_32(uint16_t); extern uint16_t qla2x00_calc_iocbs_64(uint16_t); extern void qla2x00_build_scsi_iocbs_32(srb_t *, cmd_entry_t *, uint16_t); @@ -188,6 +189,8 @@ extern uint16_t qla24xx_calc_iocbs(scsi_qla_host_t *, uint16_t); extern void qla24xx_build_scsi_iocbs(srb_t *, struct cmd_type_7 *, uint16_t); extern int qla24xx_dif_start_scsi(srb_t *); +extern void *qla2x00_alloc_iocbs(scsi_qla_host_t *, srb_t *); +extern int qla2x00_issue_marker(scsi_qla_host_t *, int); /* * Global Function Prototypes in qla_mbx.c source file. @@ -239,6 +242,9 @@ extern int qla2x00_init_firmware(scsi_qla_host_t *, uint16_t); extern int +qla2x00_get_node_name_list(scsi_qla_host_t *, void **, int *); + +extern int qla2x00_get_port_database(scsi_qla_host_t *, fc_port_t *, uint8_t); extern int @@ -383,6 +389,8 @@ extern int qla2x00_request_irqs(struct qla_hw_data *, struct rsp_que *); extern void qla2x00_free_irqs(scsi_qla_host_t *); extern int qla2x00_get_data_rate(scsi_qla_host_t *); +extern char *qla2x00_get_link_speed_str(struct qla_hw_data *); + /* * Global Function Prototypes in qla_sup.c source file. */ @@ -546,6 +554,7 @@ extern void qla2x00_sp_free(void *, void *); extern void qla2x00_sp_timeout(unsigned long); extern void qla2x00_bsg_job_done(void *, void *, int); extern void qla2x00_bsg_sp_free(void *, void *); +extern void qla2x00_start_iocbs(struct scsi_qla_host *, struct req_que *); /* Interrupt related */ extern irqreturn_t qla82xx_intr_handler(int, void *); diff --git a/drivers/scsi/qla2xxx/qla_gs.c b/drivers/scsi/qla2xxx/qla_gs.c index 3128f80441f5..05260d25fe46 100644 --- a/drivers/scsi/qla2xxx/qla_gs.c +++ b/drivers/scsi/qla2xxx/qla_gs.c @@ -5,6 +5,7 @@ * See LICENSE.qla2xxx for copyright and licensing details. */ #include "qla_def.h" +#include "qla_target.h" static int qla2x00_sns_ga_nxt(scsi_qla_host_t *, fc_port_t *); static int qla2x00_sns_gid_pt(scsi_qla_host_t *, sw_info_t *); @@ -556,7 +557,8 @@ qla2x00_rff_id(scsi_qla_host_t *vha) ct_req->req.rff_id.port_id[1] = vha->d_id.b.area; ct_req->req.rff_id.port_id[2] = vha->d_id.b.al_pa; - ct_req->req.rff_id.fc4_feature = BIT_1; + qlt_rff_id(vha, ct_req); + ct_req->req.rff_id.fc4_type = 0x08; /* SCSI - FCP */ /* Execute MS IOCB */ diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c index b9465643396b..ca5084743135 100644 --- a/drivers/scsi/qla2xxx/qla_init.c +++ b/drivers/scsi/qla2xxx/qla_init.c @@ -17,6 +17,9 @@ #include <asm/prom.h> #endif +#include <target/target_core_base.h> +#include "qla_target.h" + /* * QLogic ISP2x00 Hardware Support Function Prototypes. */ @@ -518,7 +521,10 @@ qla2x00_initialize_adapter(scsi_qla_host_t *vha) return QLA_FUNCTION_FAILED; } } - rval = qla2x00_init_rings(vha); + + if (qla_ini_mode_enabled(vha)) + rval = qla2x00_init_rings(vha); + ha->flags.chip_reset_done = 1; if (rval == QLA_SUCCESS && IS_QLA84XX(ha)) { @@ -1233,6 +1239,8 @@ qla2x00_alloc_fw_dump(scsi_qla_host_t *vha) mq_size += ha->max_rsp_queues * (rsp->length * sizeof(response_t)); } + if (ha->tgt.atio_q_length) + mq_size += ha->tgt.atio_q_length * sizeof(request_t); /* Allocate memory for Fibre Channel Event Buffer. */ if (!IS_QLA25XX(ha) && !IS_QLA81XX(ha) && !IS_QLA83XX(ha)) goto try_eft; @@ -1696,6 +1704,12 @@ qla24xx_config_rings(struct scsi_qla_host *vha) icb->response_q_address[0] = cpu_to_le32(LSD(rsp->dma)); icb->response_q_address[1] = cpu_to_le32(MSD(rsp->dma)); + /* Setup ATIO queue dma pointers for target mode */ + icb->atio_q_inpointer = __constant_cpu_to_le16(0); + icb->atio_q_length = cpu_to_le16(ha->tgt.atio_q_length); + icb->atio_q_address[0] = cpu_to_le32(LSD(ha->tgt.atio_dma)); + icb->atio_q_address[1] = cpu_to_le32(MSD(ha->tgt.atio_dma)); + if (ha->mqenable || IS_QLA83XX(ha)) { icb->qos = __constant_cpu_to_le16(QLA_DEFAULT_QUE_QOS); icb->rid = __constant_cpu_to_le16(rid); @@ -1739,6 +1753,8 @@ qla24xx_config_rings(struct scsi_qla_host *vha) WRT_REG_DWORD(®->isp24.rsp_q_in, 0); WRT_REG_DWORD(®->isp24.rsp_q_out, 0); } + qlt_24xx_config_rings(vha, reg); + /* PCI posting */ RD_REG_DWORD(&ioreg->hccr); } @@ -1794,6 +1810,11 @@ qla2x00_init_rings(scsi_qla_host_t *vha) spin_unlock(&ha->vport_slock); + ha->tgt.atio_ring_ptr = ha->tgt.atio_ring; + ha->tgt.atio_ring_index = 0; + /* Initialize ATIO queue entries */ + qlt_init_atio_q_entries(vha); + ha->isp_ops->config_rings(vha); spin_unlock_irqrestore(&ha->hardware_lock, flags); @@ -2051,6 +2072,10 @@ qla2x00_configure_hba(scsi_qla_host_t *vha) vha->d_id.b.area = area; vha->d_id.b.al_pa = al_pa; + spin_lock(&ha->vport_slock); + qlt_update_vp_map(vha, SET_AL_PA); + spin_unlock(&ha->vport_slock); + if (!vha->flags.init_done) ql_log(ql_log_info, vha, 0x2010, "Topology - %s, Host Loop address 0x%x.\n", @@ -2185,7 +2210,7 @@ qla2x00_nvram_config(scsi_qla_host_t *vha) nv->id[2] != 'P' || nv->id[3] != ' ' || nv->nvram_version < 1) { /* Reset NVRAM data. */ ql_log(ql_log_warn, vha, 0x0064, - "Inconisistent NVRAM " + "Inconsistent NVRAM " "detected: checksum=0x%x id=%c version=0x%x.\n", chksum, nv->id[0], nv->nvram_version); ql_log(ql_log_warn, vha, 0x0065, @@ -2270,7 +2295,7 @@ qla2x00_nvram_config(scsi_qla_host_t *vha) if (IS_QLA23XX(ha)) { nv->firmware_options[0] |= BIT_2; nv->firmware_options[0] &= ~BIT_3; - nv->firmware_options[0] &= ~BIT_6; + nv->special_options[0] &= ~BIT_6; nv->add_firmware_options[1] |= BIT_5 | BIT_4; if (IS_QLA2300(ha)) { @@ -2467,14 +2492,21 @@ qla2x00_rport_del(void *data) { fc_port_t *fcport = data; struct fc_rport *rport; + scsi_qla_host_t *vha = fcport->vha; unsigned long flags; spin_lock_irqsave(fcport->vha->host->host_lock, flags); rport = fcport->drport ? fcport->drport: fcport->rport; fcport->drport = NULL; spin_unlock_irqrestore(fcport->vha->host->host_lock, flags); - if (rport) + if (rport) { fc_remote_port_delete(rport); + /* + * Release the target mode FC NEXUS in qla_target.c code + * if target mod is enabled. + */ + qlt_fc_port_deleted(vha, fcport); + } } /** @@ -2495,11 +2527,11 @@ qla2x00_alloc_fcport(scsi_qla_host_t *vha, gfp_t flags) /* Setup fcport template structure. */ fcport->vha = vha; - fcport->vp_idx = vha->vp_idx; fcport->port_type = FCT_UNKNOWN; fcport->loop_id = FC_NO_LOOP_ID; qla2x00_set_fcport_state(fcport, FCS_UNCONFIGURED); fcport->supported_classes = FC_COS_UNSPECIFIED; + fcport->scan_state = QLA_FCPORT_SCAN_NONE; return fcport; } @@ -2726,7 +2758,6 @@ qla2x00_configure_local_loop(scsi_qla_host_t *vha) new_fcport->d_id.b.area = area; new_fcport->d_id.b.al_pa = al_pa; new_fcport->loop_id = loop_id; - new_fcport->vp_idx = vha->vp_idx; rval2 = qla2x00_get_port_database(vha, new_fcport, 0); if (rval2 != QLA_SUCCESS) { ql_dbg(ql_dbg_disc, vha, 0x201a, @@ -2760,10 +2791,6 @@ qla2x00_configure_local_loop(scsi_qla_host_t *vha) if (!found) { /* New device, add to fcports list. */ - if (vha->vp_idx) { - new_fcport->vha = vha; - new_fcport->vp_idx = vha->vp_idx; - } list_add_tail(&new_fcport->list, &vha->vp_fcports); /* Allocate a new replacement fcport. */ @@ -2800,8 +2827,6 @@ cleanup_allocation: static void qla2x00_iidma_fcport(scsi_qla_host_t *vha, fc_port_t *fcport) { -#define LS_UNKNOWN 2 - static char *link_speeds[] = { "1", "2", "?", "4", "8", "10" }; char *link_speed; int rval; uint16_t mb[4]; @@ -2829,11 +2854,7 @@ qla2x00_iidma_fcport(scsi_qla_host_t *vha, fc_port_t *fcport) fcport->port_name[6], fcport->port_name[7], rval, fcport->fp_speed, mb[0], mb[1]); } else { - link_speed = link_speeds[LS_UNKNOWN]; - if (fcport->fp_speed < 5) - link_speed = link_speeds[fcport->fp_speed]; - else if (fcport->fp_speed == 0x13) - link_speed = link_speeds[5]; + link_speed = qla2x00_get_link_speed_str(ha); ql_dbg(ql_dbg_disc, vha, 0x2005, "iIDMA adjusted to %s GB/s " "on %02x%02x%02x%02x%02x%02x%02x%02x.\n", link_speed, @@ -2864,6 +2885,12 @@ qla2x00_reg_remote_port(scsi_qla_host_t *vha, fc_port_t *fcport) "Unable to allocate fc remote port.\n"); return; } + /* + * Create target mode FC NEXUS in qla_target.c if target mode is + * enabled.. + */ + qlt_fc_port_added(vha, fcport); + spin_lock_irqsave(fcport->vha->host->host_lock, flags); *((fc_port_t **)rport->dd_data) = fcport; spin_unlock_irqrestore(fcport->vha->host->host_lock, flags); @@ -2921,7 +2948,7 @@ static int qla2x00_configure_fabric(scsi_qla_host_t *vha) { int rval; - fc_port_t *fcport, *fcptemp; + fc_port_t *fcport; uint16_t next_loopid; uint16_t mb[MAILBOX_REGISTER_COUNT]; uint16_t loop_id; @@ -2959,7 +2986,7 @@ qla2x00_configure_fabric(scsi_qla_host_t *vha) 0xfc, mb, BIT_1|BIT_0); if (rval != QLA_SUCCESS) { set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags); - return rval; + break; } if (mb[0] != MBS_COMMAND_COMPLETE) { ql_dbg(ql_dbg_disc, vha, 0x2042, @@ -2991,21 +3018,16 @@ qla2x00_configure_fabric(scsi_qla_host_t *vha) } } -#define QLA_FCPORT_SCAN 1 -#define QLA_FCPORT_FOUND 2 - - list_for_each_entry(fcport, &vha->vp_fcports, list) { - fcport->scan_state = QLA_FCPORT_SCAN; - } - rval = qla2x00_find_all_fabric_devs(vha, &new_fcports); if (rval != QLA_SUCCESS) break; - /* - * Logout all previous fabric devices marked lost, except - * FCP2 devices. - */ + /* Add new ports to existing port list */ + list_splice_tail_init(&new_fcports, &vha->vp_fcports); + + /* Starting free loop ID. */ + next_loopid = ha->min_external_loopid; + list_for_each_entry(fcport, &vha->vp_fcports, list) { if (test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags)) break; @@ -3013,7 +3035,8 @@ qla2x00_configure_fabric(scsi_qla_host_t *vha) if ((fcport->flags & FCF_FABRIC_DEVICE) == 0) continue; - if (fcport->scan_state == QLA_FCPORT_SCAN && + /* Logout lost/gone fabric devices (non-FCP2) */ + if (fcport->scan_state != QLA_FCPORT_SCAN_FOUND && atomic_read(&fcport->state) == FCS_ONLINE) { qla2x00_mark_device_lost(vha, fcport, ql2xplogiabsentdevice, 0); @@ -3026,78 +3049,30 @@ qla2x00_configure_fabric(scsi_qla_host_t *vha) fcport->d_id.b.domain, fcport->d_id.b.area, fcport->d_id.b.al_pa); - fcport->loop_id = FC_NO_LOOP_ID; } - } - } - - /* Starting free loop ID. */ - next_loopid = ha->min_external_loopid; - - /* - * Scan through our port list and login entries that need to be - * logged in. - */ - list_for_each_entry(fcport, &vha->vp_fcports, list) { - if (atomic_read(&vha->loop_down_timer) || - test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags)) - break; - - if ((fcport->flags & FCF_FABRIC_DEVICE) == 0 || - (fcport->flags & FCF_LOGIN_NEEDED) == 0) continue; - - if (fcport->loop_id == FC_NO_LOOP_ID) { - fcport->loop_id = next_loopid; - rval = qla2x00_find_new_loop_id( - base_vha, fcport); - if (rval != QLA_SUCCESS) { - /* Ran out of IDs to use */ - break; - } } - /* Login and update database */ - qla2x00_fabric_dev_login(vha, fcport, &next_loopid); - } - - /* Exit if out of loop IDs. */ - if (rval != QLA_SUCCESS) { - break; - } - - /* - * Login and add the new devices to our port list. - */ - list_for_each_entry_safe(fcport, fcptemp, &new_fcports, list) { - if (atomic_read(&vha->loop_down_timer) || - test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags)) - break; - - /* Find a new loop ID to use. */ - fcport->loop_id = next_loopid; - rval = qla2x00_find_new_loop_id(base_vha, fcport); - if (rval != QLA_SUCCESS) { - /* Ran out of IDs to use */ - break; + fcport->scan_state = QLA_FCPORT_SCAN_NONE; + + /* Login fabric devices that need a login */ + if ((fcport->flags & FCF_LOGIN_NEEDED) != 0 && + atomic_read(&vha->loop_down_timer) == 0) { + if (fcport->loop_id == FC_NO_LOOP_ID) { + fcport->loop_id = next_loopid; + rval = qla2x00_find_new_loop_id( + base_vha, fcport); + if (rval != QLA_SUCCESS) { + /* Ran out of IDs to use */ + continue; + } + } } /* Login and update database */ qla2x00_fabric_dev_login(vha, fcport, &next_loopid); - - if (vha->vp_idx) { - fcport->vha = vha; - fcport->vp_idx = vha->vp_idx; - } - list_move_tail(&fcport->list, &vha->vp_fcports); } } while (0); - /* Free all new device structures not processed. */ - list_for_each_entry_safe(fcport, fcptemp, &new_fcports, list) { - list_del(&fcport->list); - kfree(fcport); - } - if (rval) { ql_dbg(ql_dbg_disc, vha, 0x2068, "Configure fabric error exit rval=%d.\n", rval); @@ -3287,7 +3262,7 @@ qla2x00_find_all_fabric_devs(scsi_qla_host_t *vha, WWN_SIZE)) continue; - fcport->scan_state = QLA_FCPORT_FOUND; + fcport->scan_state = QLA_FCPORT_SCAN_FOUND; found++; @@ -3595,6 +3570,12 @@ qla2x00_fabric_login(scsi_qla_host_t *vha, fc_port_t *fcport, if (mb[10] & BIT_1) fcport->supported_classes |= FC_COS_CLASS3; + if (IS_FWI2_CAPABLE(ha)) { + if (mb[10] & BIT_7) + fcport->flags |= + FCF_CONF_COMP_SUPPORTED; + } + rval = QLA_SUCCESS; break; } else if (mb[0] == MBS_LOOP_ID_USED) { @@ -3841,7 +3822,7 @@ qla2x00_abort_isp_cleanup(scsi_qla_host_t *vha) vha->flags.online = 0; ha->flags.chip_reset_done = 0; clear_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); - ha->qla_stats.total_isp_aborts++; + vha->qla_stats.total_isp_aborts++; ql_log(ql_log_info, vha, 0x00af, "Performing ISP error recovery - ha=%p.\n", ha); @@ -4066,6 +4047,7 @@ qla2x00_restart_isp(scsi_qla_host_t *vha) struct qla_hw_data *ha = vha->hw; struct req_que *req = ha->req_q_map[0]; struct rsp_que *rsp = ha->rsp_q_map[0]; + unsigned long flags; /* If firmware needs to be loaded */ if (qla2x00_isp_firmware(vha)) { @@ -4090,6 +4072,16 @@ qla2x00_restart_isp(scsi_qla_host_t *vha) qla2x00_marker(vha, req, rsp, 0, 0, MK_SYNC_ALL); vha->flags.online = 1; + + /* + * Process any ATIO queue entries that came in + * while we weren't online. + */ + spin_lock_irqsave(&ha->hardware_lock, flags); + if (qla_tgt_mode_enabled(vha)) + qlt_24xx_process_atio_queue(vha); + spin_unlock_irqrestore(&ha->hardware_lock, flags); + /* Wait at most MAX_TARGET RSCNs for a stable link. */ wait_time = 256; do { @@ -4279,7 +4271,7 @@ qla24xx_nvram_config(scsi_qla_host_t *vha) nv->nvram_version < __constant_cpu_to_le16(ICB_VERSION)) { /* Reset NVRAM data. */ ql_log(ql_log_warn, vha, 0x006b, - "Inconisistent NVRAM detected: checksum=0x%x id=%c " + "Inconsistent NVRAM detected: checksum=0x%x id=%c " "version=0x%x.\n", chksum, nv->id[0], nv->nvram_version); ql_log(ql_log_warn, vha, 0x006c, "Falling back to functioning (yet invalid -- WWPN) " @@ -4330,6 +4322,15 @@ qla24xx_nvram_config(scsi_qla_host_t *vha) rval = 1; } + if (!qla_ini_mode_enabled(vha)) { + /* Don't enable full login after initial LIP */ + nv->firmware_options_1 &= __constant_cpu_to_le32(~BIT_13); + /* Don't enable LIP full login for initiator */ + nv->host_p &= __constant_cpu_to_le32(~BIT_10); + } + + qlt_24xx_config_nvram_stage1(vha, nv); + /* Reset Initialization control block */ memset(icb, 0, ha->init_cb_size); @@ -4357,8 +4358,10 @@ qla24xx_nvram_config(scsi_qla_host_t *vha) qla2x00_set_model_info(vha, nv->model_name, sizeof(nv->model_name), "QLA2462"); - /* Use alternate WWN? */ + qlt_24xx_config_nvram_stage2(vha, icb); + if (nv->host_p & __constant_cpu_to_le32(BIT_15)) { + /* Use alternate WWN? */ memcpy(icb->node_name, nv->alternate_node_name, WWN_SIZE); memcpy(icb->port_name, nv->alternate_port_name, WWN_SIZE); } @@ -5029,7 +5032,7 @@ qla81xx_nvram_config(scsi_qla_host_t *vha) nv->nvram_version < __constant_cpu_to_le16(ICB_VERSION)) { /* Reset NVRAM data. */ ql_log(ql_log_info, vha, 0x0073, - "Inconisistent NVRAM detected: checksum=0x%x id=%c " + "Inconsistent NVRAM detected: checksum=0x%x id=%c " "version=0x%x.\n", chksum, nv->id[0], le16_to_cpu(nv->nvram_version)); ql_log(ql_log_info, vha, 0x0074, diff --git a/drivers/scsi/qla2xxx/qla_iocb.c b/drivers/scsi/qla2xxx/qla_iocb.c index eac950924497..70dbf53d9e0f 100644 --- a/drivers/scsi/qla2xxx/qla_iocb.c +++ b/drivers/scsi/qla2xxx/qla_iocb.c @@ -5,6 +5,7 @@ * See LICENSE.qla2xxx for copyright and licensing details. */ #include "qla_def.h" +#include "qla_target.h" #include <linux/blkdev.h> #include <linux/delay.h> @@ -23,18 +24,17 @@ qla2x00_get_cmd_direction(srb_t *sp) { uint16_t cflags; struct scsi_cmnd *cmd = GET_CMD_SP(sp); + struct scsi_qla_host *vha = sp->fcport->vha; cflags = 0; /* Set transfer direction */ if (cmd->sc_data_direction == DMA_TO_DEVICE) { cflags = CF_WRITE; - sp->fcport->vha->hw->qla_stats.output_bytes += - scsi_bufflen(cmd); + vha->qla_stats.output_bytes += scsi_bufflen(cmd); } else if (cmd->sc_data_direction == DMA_FROM_DEVICE) { cflags = CF_READ; - sp->fcport->vha->hw->qla_stats.input_bytes += - scsi_bufflen(cmd); + vha->qla_stats.input_bytes += scsi_bufflen(cmd); } return (cflags); } @@ -385,9 +385,10 @@ qla2x00_start_scsi(srb_t *sp) else req->cnt = req->length - (req->ring_index - cnt); + /* If still no head room then bail out */ + if (req->cnt < (req_cnt + 2)) + goto queuing_error; } - if (req->cnt < (req_cnt + 2)) - goto queuing_error; /* Build command packet */ req->current_outstanding_cmd = handle; @@ -470,7 +471,7 @@ queuing_error: /** * qla2x00_start_iocbs() - Execute the IOCB command */ -static void +void qla2x00_start_iocbs(struct scsi_qla_host *vha, struct req_que *req) { struct qla_hw_data *ha = vha->hw; @@ -571,6 +572,29 @@ qla2x00_marker(struct scsi_qla_host *vha, struct req_que *req, return (ret); } +/* + * qla2x00_issue_marker + * + * Issue marker + * Caller CAN have hardware lock held as specified by ha_locked parameter. + * Might release it, then reaquire. + */ +int qla2x00_issue_marker(scsi_qla_host_t *vha, int ha_locked) +{ + if (ha_locked) { + if (__qla2x00_marker(vha, vha->req, vha->req->rsp, 0, 0, + MK_SYNC_ALL) != QLA_SUCCESS) + return QLA_FUNCTION_FAILED; + } else { + if (qla2x00_marker(vha, vha->req, vha->req->rsp, 0, 0, + MK_SYNC_ALL) != QLA_SUCCESS) + return QLA_FUNCTION_FAILED; + } + vha->marker_needed = 0; + + return QLA_SUCCESS; +} + /** * qla24xx_calc_iocbs() - Determine number of Command Type 3 and * Continuation Type 1 IOCBs to allocate. @@ -629,11 +653,11 @@ qla24xx_build_scsi_type_6_iocbs(srb_t *sp, struct cmd_type_6 *cmd_pkt, if (cmd->sc_data_direction == DMA_TO_DEVICE) { cmd_pkt->control_flags = __constant_cpu_to_le16(CF_WRITE_DATA); - ha->qla_stats.output_bytes += scsi_bufflen(cmd); + vha->qla_stats.output_bytes += scsi_bufflen(cmd); } else if (cmd->sc_data_direction == DMA_FROM_DEVICE) { cmd_pkt->control_flags = __constant_cpu_to_le16(CF_READ_DATA); - ha->qla_stats.input_bytes += scsi_bufflen(cmd); + vha->qla_stats.input_bytes += scsi_bufflen(cmd); } cur_seg = scsi_sglist(cmd); @@ -745,13 +769,11 @@ qla24xx_build_scsi_iocbs(srb_t *sp, struct cmd_type_7 *cmd_pkt, if (cmd->sc_data_direction == DMA_TO_DEVICE) { cmd_pkt->task_mgmt_flags = __constant_cpu_to_le16(TMF_WRITE_DATA); - sp->fcport->vha->hw->qla_stats.output_bytes += - scsi_bufflen(cmd); + vha->qla_stats.output_bytes += scsi_bufflen(cmd); } else if (cmd->sc_data_direction == DMA_FROM_DEVICE) { cmd_pkt->task_mgmt_flags = __constant_cpu_to_le16(TMF_READ_DATA); - sp->fcport->vha->hw->qla_stats.input_bytes += - scsi_bufflen(cmd); + vha->qla_stats.input_bytes += scsi_bufflen(cmd); } /* One DSD is available in the Command Type 3 IOCB */ @@ -1245,7 +1267,7 @@ qla24xx_build_scsi_crc_2_iocbs(srb_t *sp, struct cmd_type_crc_2 *cmd_pkt, return QLA_SUCCESS; } - cmd_pkt->vp_index = sp->fcport->vp_idx; + cmd_pkt->vp_index = sp->fcport->vha->vp_idx; /* Set transfer direction */ if (cmd->sc_data_direction == DMA_TO_DEVICE) { @@ -1502,9 +1524,9 @@ qla24xx_start_scsi(srb_t *sp) else req->cnt = req->length - (req->ring_index - cnt); + if (req->cnt < (req_cnt + 2)) + goto queuing_error; } - if (req->cnt < (req_cnt + 2)) - goto queuing_error; /* Build command packet. */ req->current_outstanding_cmd = handle; @@ -1527,7 +1549,7 @@ qla24xx_start_scsi(srb_t *sp) cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa; cmd_pkt->port_id[1] = sp->fcport->d_id.b.area; cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain; - cmd_pkt->vp_index = sp->fcport->vp_idx; + cmd_pkt->vp_index = sp->fcport->vha->vp_idx; int_to_scsilun(cmd->device->lun, &cmd_pkt->lun); host_to_fcp_swap((uint8_t *)&cmd_pkt->lun, sizeof(cmd_pkt->lun)); @@ -1717,11 +1739,10 @@ qla24xx_dif_start_scsi(srb_t *sp) else req->cnt = req->length - (req->ring_index - cnt); + if (req->cnt < (req_cnt + 2)) + goto queuing_error; } - if (req->cnt < (req_cnt + 2)) - goto queuing_error; - status |= QDSS_GOT_Q_SPACE; /* Build header part of command packet (excluding the OPCODE). */ @@ -1898,7 +1919,7 @@ qla24xx_login_iocb(srb_t *sp, struct logio_entry_24xx *logio) logio->port_id[0] = sp->fcport->d_id.b.al_pa; logio->port_id[1] = sp->fcport->d_id.b.area; logio->port_id[2] = sp->fcport->d_id.b.domain; - logio->vp_index = sp->fcport->vp_idx; + logio->vp_index = sp->fcport->vha->vp_idx; } static void @@ -1922,7 +1943,7 @@ qla2x00_login_iocb(srb_t *sp, struct mbx_entry *mbx) mbx->mb2 = cpu_to_le16(sp->fcport->d_id.b.domain); mbx->mb3 = cpu_to_le16(sp->fcport->d_id.b.area << 8 | sp->fcport->d_id.b.al_pa); - mbx->mb9 = cpu_to_le16(sp->fcport->vp_idx); + mbx->mb9 = cpu_to_le16(sp->fcport->vha->vp_idx); } static void @@ -1935,7 +1956,7 @@ qla24xx_logout_iocb(srb_t *sp, struct logio_entry_24xx *logio) logio->port_id[0] = sp->fcport->d_id.b.al_pa; logio->port_id[1] = sp->fcport->d_id.b.area; logio->port_id[2] = sp->fcport->d_id.b.domain; - logio->vp_index = sp->fcport->vp_idx; + logio->vp_index = sp->fcport->vha->vp_idx; } static void @@ -1952,7 +1973,7 @@ qla2x00_logout_iocb(srb_t *sp, struct mbx_entry *mbx) mbx->mb2 = cpu_to_le16(sp->fcport->d_id.b.domain); mbx->mb3 = cpu_to_le16(sp->fcport->d_id.b.area << 8 | sp->fcport->d_id.b.al_pa); - mbx->mb9 = cpu_to_le16(sp->fcport->vp_idx); + mbx->mb9 = cpu_to_le16(sp->fcport->vha->vp_idx); /* Implicit: mbx->mbx10 = 0. */ } @@ -1962,7 +1983,7 @@ qla24xx_adisc_iocb(srb_t *sp, struct logio_entry_24xx *logio) logio->entry_type = LOGINOUT_PORT_IOCB_TYPE; logio->control_flags = cpu_to_le16(LCF_COMMAND_ADISC); logio->nport_handle = cpu_to_le16(sp->fcport->loop_id); - logio->vp_index = sp->fcport->vp_idx; + logio->vp_index = sp->fcport->vha->vp_idx; } static void @@ -1983,7 +2004,7 @@ qla2x00_adisc_iocb(srb_t *sp, struct mbx_entry *mbx) mbx->mb3 = cpu_to_le16(LSW(ha->async_pd_dma)); mbx->mb6 = cpu_to_le16(MSW(MSD(ha->async_pd_dma))); mbx->mb7 = cpu_to_le16(LSW(MSD(ha->async_pd_dma))); - mbx->mb9 = cpu_to_le16(sp->fcport->vp_idx); + mbx->mb9 = cpu_to_le16(sp->fcport->vha->vp_idx); } static void @@ -2009,7 +2030,7 @@ qla24xx_tm_iocb(srb_t *sp, struct tsk_mgmt_entry *tsk) tsk->port_id[0] = fcport->d_id.b.al_pa; tsk->port_id[1] = fcport->d_id.b.area; tsk->port_id[2] = fcport->d_id.b.domain; - tsk->vp_index = fcport->vp_idx; + tsk->vp_index = fcport->vha->vp_idx; if (flags == TCF_LUN_RESET) { int_to_scsilun(lun, &tsk->lun); @@ -2030,7 +2051,7 @@ qla24xx_els_iocb(srb_t *sp, struct els_entry_24xx *els_iocb) els_iocb->handle = sp->handle; els_iocb->nport_handle = cpu_to_le16(sp->fcport->loop_id); els_iocb->tx_dsd_count = __constant_cpu_to_le16(bsg_job->request_payload.sg_cnt); - els_iocb->vp_index = sp->fcport->vp_idx; + els_iocb->vp_index = sp->fcport->vha->vp_idx; els_iocb->sof_type = EST_SOFI3; els_iocb->rx_dsd_count = __constant_cpu_to_le16(bsg_job->reply_payload.sg_cnt); @@ -2160,7 +2181,7 @@ qla24xx_ct_iocb(srb_t *sp, struct ct_entry_24xx *ct_iocb) ct_iocb->handle = sp->handle; ct_iocb->nport_handle = cpu_to_le16(sp->fcport->loop_id); - ct_iocb->vp_index = sp->fcport->vp_idx; + ct_iocb->vp_index = sp->fcport->vha->vp_idx; ct_iocb->comp_status = __constant_cpu_to_le16(0); ct_iocb->cmd_dsd_count = @@ -2343,11 +2364,10 @@ sufficient_dsds: else req->cnt = req->length - (req->ring_index - cnt); + if (req->cnt < (req_cnt + 2)) + goto queuing_error; } - if (req->cnt < (req_cnt + 2)) - goto queuing_error; - ctx = sp->u.scmd.ctx = mempool_alloc(ha->ctx_mempool, GFP_ATOMIC); if (!ctx) { @@ -2362,7 +2382,7 @@ sufficient_dsds: if (!ctx->fcp_cmnd) { ql_log(ql_log_fatal, vha, 0x3011, "Failed to allocate fcp_cmnd for cmd=%p.\n", cmd); - goto queuing_error_fcp_cmnd; + goto queuing_error; } /* Initialize the DSD list and dma handle */ @@ -2400,7 +2420,7 @@ sufficient_dsds: cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa; cmd_pkt->port_id[1] = sp->fcport->d_id.b.area; cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain; - cmd_pkt->vp_index = sp->fcport->vp_idx; + cmd_pkt->vp_index = sp->fcport->vha->vp_idx; /* Build IOCB segments */ if (qla24xx_build_scsi_type_6_iocbs(sp, cmd_pkt, tot_dsds)) @@ -2489,7 +2509,7 @@ sufficient_dsds: cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa; cmd_pkt->port_id[1] = sp->fcport->d_id.b.area; cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain; - cmd_pkt->vp_index = sp->fcport->vp_idx; + cmd_pkt->vp_index = sp->fcport->vha->vp_idx; int_to_scsilun(cmd->device->lun, &cmd_pkt->lun); host_to_fcp_swap((uint8_t *)&cmd_pkt->lun, diff --git a/drivers/scsi/qla2xxx/qla_isr.c b/drivers/scsi/qla2xxx/qla_isr.c index ce42288049b5..6f67a9d4998b 100644 --- a/drivers/scsi/qla2xxx/qla_isr.c +++ b/drivers/scsi/qla2xxx/qla_isr.c @@ -5,6 +5,7 @@ * See LICENSE.qla2xxx for copyright and licensing details. */ #include "qla_def.h" +#include "qla_target.h" #include <linux/delay.h> #include <linux/slab.h> @@ -309,6 +310,28 @@ qla81xx_idc_event(scsi_qla_host_t *vha, uint16_t aen, uint16_t descr) "IDC failed to post ACK.\n"); } +#define LS_UNKNOWN 2 +char * +qla2x00_get_link_speed_str(struct qla_hw_data *ha) +{ + static char *link_speeds[] = {"1", "2", "?", "4", "8", "16", "10"}; + char *link_speed; + int fw_speed = ha->link_data_rate; + + if (IS_QLA2100(ha) || IS_QLA2200(ha)) + link_speed = link_speeds[0]; + else if (fw_speed == 0x13) + link_speed = link_speeds[6]; + else { + link_speed = link_speeds[LS_UNKNOWN]; + if (fw_speed < 6) + link_speed = + link_speeds[fw_speed]; + } + + return link_speed; +} + /** * qla2x00_async_event() - Process aynchronous events. * @ha: SCSI driver HA context @@ -317,9 +340,6 @@ qla81xx_idc_event(scsi_qla_host_t *vha, uint16_t aen, uint16_t descr) void qla2x00_async_event(scsi_qla_host_t *vha, struct rsp_que *rsp, uint16_t *mb) { -#define LS_UNKNOWN 2 - static char *link_speeds[] = { "1", "2", "?", "4", "8", "16", "10" }; - char *link_speed; uint16_t handle_cnt; uint16_t cnt, mbx; uint32_t handles[5]; @@ -454,8 +474,8 @@ skip_rio: case MBA_WAKEUP_THRES: /* Request Queue Wake-up */ ql_dbg(ql_dbg_async, vha, 0x5008, "Asynchronous WAKEUP_THRES.\n"); - break; + break; case MBA_LIP_OCCURRED: /* Loop Initialization Procedure */ ql_dbg(ql_dbg_async, vha, 0x5009, "LIP occurred (%x).\n", mb[1]); @@ -479,20 +499,14 @@ skip_rio: break; case MBA_LOOP_UP: /* Loop Up Event */ - if (IS_QLA2100(ha) || IS_QLA2200(ha)) { - link_speed = link_speeds[0]; + if (IS_QLA2100(ha) || IS_QLA2200(ha)) ha->link_data_rate = PORT_SPEED_1GB; - } else { - link_speed = link_speeds[LS_UNKNOWN]; - if (mb[1] < 6) - link_speed = link_speeds[mb[1]]; - else if (mb[1] == 0x13) - link_speed = link_speeds[6]; + else ha->link_data_rate = mb[1]; - } ql_dbg(ql_dbg_async, vha, 0x500a, - "LOOP UP detected (%s Gbps).\n", link_speed); + "LOOP UP detected (%s Gbps).\n", + qla2x00_get_link_speed_str(ha)); vha->flags.management_server_logged_in = 0; qla2x00_post_aen_work(vha, FCH_EVT_LINKUP, ha->link_data_rate); @@ -638,6 +652,8 @@ skip_rio: ql_dbg(ql_dbg_async, vha, 0x5010, "Port unavailable %04x %04x %04x.\n", mb[1], mb[2], mb[3]); + ql_log(ql_log_warn, vha, 0x505e, + "Link is offline.\n"); if (atomic_read(&vha->loop_state) != LOOP_DOWN) { atomic_set(&vha->loop_state, LOOP_DOWN); @@ -670,12 +686,17 @@ skip_rio: ql_dbg(ql_dbg_async, vha, 0x5011, "Asynchronous PORT UPDATE ignored %04x/%04x/%04x.\n", mb[1], mb[2], mb[3]); + + qlt_async_event(mb[0], vha, mb); break; } ql_dbg(ql_dbg_async, vha, 0x5012, "Port database changed %04x %04x %04x.\n", mb[1], mb[2], mb[3]); + ql_log(ql_log_warn, vha, 0x505f, + "Link is operational (%s Gbps).\n", + qla2x00_get_link_speed_str(ha)); /* * Mark all devices as missing so we will login again. @@ -684,8 +705,13 @@ skip_rio: qla2x00_mark_all_devices_lost(vha, 1); + if (vha->vp_idx == 0 && !qla_ini_mode_enabled(vha)) + set_bit(SCR_PENDING, &vha->dpc_flags); + set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags); set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags); + + qlt_async_event(mb[0], vha, mb); break; case MBA_RSCN_UPDATE: /* State Change Registration */ @@ -807,6 +833,8 @@ skip_rio: mb[0], mb[1], mb[2], mb[3]); } + qlt_async_event(mb[0], vha, mb); + if (!vha->vp_idx && ha->num_vhosts) qla2x00_alert_all_vps(rsp, mb); } @@ -1172,6 +1200,9 @@ qla24xx_logio_entry(scsi_qla_host_t *vha, struct req_que *req, } else if (iop[0] & BIT_5) fcport->port_type = FCT_INITIATOR; + if (iop[0] & BIT_7) + fcport->flags |= FCF_CONF_COMP_SUPPORTED; + if (logio->io_parameter[7] || logio->io_parameter[8]) fcport->supported_classes |= FC_COS_CLASS2; if (logio->io_parameter[9] || logio->io_parameter[10]) @@ -1986,6 +2017,9 @@ void qla24xx_process_response_queue(struct scsi_qla_host *vha, if (pkt->entry_status != 0) { qla2x00_error_entry(vha, rsp, (sts_entry_t *) pkt); + + (void)qlt_24xx_process_response_error(vha, pkt); + ((response_t *)pkt)->signature = RESPONSE_PROCESSED; wmb(); continue; @@ -2016,6 +2050,14 @@ void qla24xx_process_response_queue(struct scsi_qla_host *vha, case ELS_IOCB_TYPE: qla24xx_els_ct_entry(vha, rsp->req, pkt, ELS_IOCB_TYPE); break; + case ABTS_RECV_24XX: + /* ensure that the ATIO queue is empty */ + qlt_24xx_process_atio_queue(vha); + case ABTS_RESP_24XX: + case CTIO_TYPE7: + case NOTIFY_ACK_TYPE: + qlt_response_pkt_all_vps(vha, (response_t *)pkt); + break; case MARKER_TYPE: /* Do nothing in this case, this check is to prevent it * from falling into default case @@ -2168,6 +2210,13 @@ qla24xx_intr_handler(int irq, void *dev_id) case 0x14: qla24xx_process_response_queue(vha, rsp); break; + case 0x1C: /* ATIO queue updated */ + qlt_24xx_process_atio_queue(vha); + break; + case 0x1D: /* ATIO and response queues updated */ + qlt_24xx_process_atio_queue(vha); + qla24xx_process_response_queue(vha, rsp); + break; default: ql_dbg(ql_dbg_async, vha, 0x504f, "Unrecognized interrupt type (%d).\n", stat * 0xff); @@ -2312,6 +2361,13 @@ qla24xx_msix_default(int irq, void *dev_id) case 0x14: qla24xx_process_response_queue(vha, rsp); break; + case 0x1C: /* ATIO queue updated */ + qlt_24xx_process_atio_queue(vha); + break; + case 0x1D: /* ATIO and response queues updated */ + qlt_24xx_process_atio_queue(vha); + qla24xx_process_response_queue(vha, rsp); + break; default: ql_dbg(ql_dbg_async, vha, 0x5051, "Unrecognized interrupt type (%d).\n", stat & 0xff); @@ -2564,7 +2620,15 @@ void qla2x00_free_irqs(scsi_qla_host_t *vha) { struct qla_hw_data *ha = vha->hw; - struct rsp_que *rsp = ha->rsp_q_map[0]; + struct rsp_que *rsp; + + /* + * We need to check that ha->rsp_q_map is valid in case we are called + * from a probe failure context. + */ + if (!ha->rsp_q_map || !ha->rsp_q_map[0]) + return; + rsp = ha->rsp_q_map[0]; if (ha->flags.msix_enabled) qla24xx_disable_msix(ha); diff --git a/drivers/scsi/qla2xxx/qla_mbx.c b/drivers/scsi/qla2xxx/qla_mbx.c index b4a23394a7bd..d5ce92c0a8fc 100644 --- a/drivers/scsi/qla2xxx/qla_mbx.c +++ b/drivers/scsi/qla2xxx/qla_mbx.c @@ -5,6 +5,7 @@ * See LICENSE.qla2xxx for copyright and licensing details. */ #include "qla_def.h" +#include "qla_target.h" #include <linux/delay.h> #include <linux/gfp.h> @@ -270,11 +271,8 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp) ictrl = RD_REG_WORD(®->isp.ictrl); } ql_dbg(ql_dbg_mbx + ql_dbg_buffer, vha, 0x1119, - "MBX Command timeout for cmd %x.\n", command); - ql_dbg(ql_dbg_mbx + ql_dbg_buffer, vha, 0x111a, - "iocontrol=%x jiffies=%lx.\n", ictrl, jiffies); - ql_dbg(ql_dbg_mbx + ql_dbg_buffer, vha, 0x111b, - "mb[0] = 0x%x.\n", mb0); + "MBX Command timeout for cmd %x, iocontrol=%x jiffies=%lx " + "mb[0]=0x%x\n", command, ictrl, jiffies, mb0); ql_dump_regs(ql_dbg_mbx + ql_dbg_buffer, vha, 0x1019); /* @@ -320,7 +318,7 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp) CRB_NIU_XG_PAUSE_CTL_P1); } ql_log(ql_log_info, base_vha, 0x101c, - "Mailbox cmd timeout occured, cmd=0x%x, " + "Mailbox cmd timeout occurred, cmd=0x%x, " "mb[0]=0x%x, eeh_busy=0x%x. Scheduling ISP " "abort.\n", command, mcp->mb[0], ha->flags.eeh_busy); @@ -345,7 +343,7 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp) CRB_NIU_XG_PAUSE_CTL_P1); } ql_log(ql_log_info, base_vha, 0x101e, - "Mailbox cmd timeout occured, cmd=0x%x, " + "Mailbox cmd timeout occurred, cmd=0x%x, " "mb[0]=0x%x. Scheduling ISP abort ", command, mcp->mb[0]); set_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags); @@ -390,7 +388,8 @@ qla2x00_load_ram(scsi_qla_host_t *vha, dma_addr_t req_dma, uint32_t risc_addr, mbx_cmd_t mc; mbx_cmd_t *mcp = &mc; - ql_dbg(ql_dbg_mbx, vha, 0x1022, "Entered %s.\n", __func__); + ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1022, + "Entered %s.\n", __func__); if (MSW(risc_addr) || IS_FWI2_CAPABLE(ha)) { mcp->mb[0] = MBC_LOAD_RISC_RAM_EXTENDED; @@ -424,7 +423,8 @@ qla2x00_load_ram(scsi_qla_host_t *vha, dma_addr_t req_dma, uint32_t risc_addr, ql_dbg(ql_dbg_mbx, vha, 0x1023, "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]); } else { - ql_dbg(ql_dbg_mbx, vha, 0x1024, "Done %s.\n", __func__); + ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1024, + "Done %s.\n", __func__); } return rval; @@ -454,7 +454,8 @@ qla2x00_execute_fw(scsi_qla_host_t *vha, uint32_t risc_addr) mbx_cmd_t mc; mbx_cmd_t *mcp = &mc; - ql_dbg(ql_dbg_mbx, vha, 0x1025, "Entered %s.\n", __func__); + ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1025, + "Entered %s.\n", __func__); mcp->mb[0] = MBC_EXECUTE_FIRMWARE; mcp->out_mb = MBX_0; @@ -489,10 +490,11 @@ qla2x00_execute_fw(scsi_qla_host_t *vha, uint32_t risc_addr) "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]); } else { if (IS_FWI2_CAPABLE(ha)) { - ql_dbg(ql_dbg_mbx, vha, 0x1027, + ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1027, "Done exchanges=%x.\n", mcp->mb[1]); } else { - ql_dbg(ql_dbg_mbx, vha, 0x1028, "Done %s.\n", __func__); + ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1028, + "Done %s.\n", __func__); } } @@ -523,7 +525,8 @@ qla2x00_get_fw_version(scsi_qla_host_t *vha) mbx_cmd_t *mcp = &mc; struct qla_hw_data *ha = vha->hw; - ql_dbg(ql_dbg_mbx, vha, 0x1029, "Entered %s.\n", __func__); + ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1029, + "Entered %s.\n", __func__); mcp->mb[0] = MBC_GET_FIRMWARE_VERSION; mcp->out_mb = MBX_0; @@ -561,11 +564,11 @@ qla2x00_get_fw_version(scsi_qla_host_t *vha) ha->fw_attributes_h = mcp->mb[15]; ha->fw_attributes_ext[0] = mcp->mb[16]; ha->fw_attributes_ext[1] = mcp->mb[17]; - ql_dbg(ql_dbg_mbx, vha, 0x1139, + ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1139, "%s: FW_attributes Upper: 0x%x, Lower: 0x%x.\n", __func__, mcp->mb[15], mcp->mb[6]); } else - ql_dbg(ql_dbg_mbx, vha, 0x112f, + ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x112f, "%s: FwAttributes [Upper] invalid, MB6:%04x\n", __func__, mcp->mb[6]); } @@ -576,7 +579,8 @@ failed: ql_dbg(ql_dbg_mbx, vha, 0x102a, "Failed=%x.\n", rval); } else { /*EMPTY*/ - ql_dbg(ql_dbg_mbx, vha, 0x102b, "Done %s.\n", __func__); + ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x102b, + "Done %s.\n", __func__); } return rval; } @@ -602,7 +606,8 @@ qla2x00_get_fw_options(scsi_qla_host_t *vha, uint16_t *fwopts) mbx_cmd_t mc; mbx_cmd_t *mcp = &mc; - ql_dbg(ql_dbg_mbx, vha, 0x102c, "Entered %s.\n", __func__); + ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x102c, + "Entered %s.\n", __func__); mcp->mb[0] = MBC_GET_FIRMWARE_OPTION; mcp->out_mb = MBX_0; @@ -620,7 +625,8 @@ qla2x00_get_fw_options(scsi_qla_host_t *vha, uint16_t *fwopts) fwopts[2] = mcp->mb[2]; fwopts[3] = mcp->mb[3]; - ql_dbg(ql_dbg_mbx, vha, 0x102e, "Done %s.\n", __func__); + ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x102e, + "Done %s.\n", __func__); } return rval; @@ -648,7 +654,8 @@ qla2x00_set_fw_options(scsi_qla_host_t *vha, uint16_t *fwopts) mbx_cmd_t mc; mbx_cmd_t *mcp = &mc; - ql_dbg(ql_dbg_mbx, vha, 0x102f, "Entered %s.\n", __func__); + ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x102f, + "Entered %s.\n", __func__); mcp->mb[0] = MBC_SET_FIRMWARE_OPTION; mcp->mb[1] = fwopts[1]; @@ -676,7 +683,8 @@ qla2x00_set_fw_options(scsi_qla_host_t *vha, uint16_t *fwopts) "Failed=%x (%x/%x).\n", rval, mcp->mb[0], mcp->mb[1]); } else { /*EMPTY*/ - ql_dbg(ql_dbg_mbx, vha, 0x1031, "Done %s.\n", __func__); + ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1031, + "Done %s.\n", __func__); } return rval; @@ -704,7 +712,8 @@ qla2x00_mbx_reg_test(scsi_qla_host_t *vha) mbx_cmd_t mc; mbx_cmd_t *mcp = &mc; - ql_dbg(ql_dbg_mbx, vha, 0x1032, "Entered %s.\n", __func__); + ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1032, + "Entered %s.\n", __func__); mcp->mb[0] = MBC_MAILBOX_REGISTER_TEST; mcp->mb[1] = 0xAAAA; @@ -734,7 +743,8 @@ qla2x00_mbx_reg_test(scsi_qla_host_t *vha) ql_dbg(ql_dbg_mbx, vha, 0x1033, "Failed=%x.\n", rval); } else { /*EMPTY*/ - ql_dbg(ql_dbg_mbx, vha, 0x1034, "Done %s.\n", __func__); + ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1034, + "Done %s.\n", __func__); } return rval; @@ -762,7 +772,8 @@ qla2x00_verify_checksum(scsi_qla_host_t *vha, uint32_t risc_addr) mbx_cmd_t mc; mbx_cmd_t *mcp = &mc; - ql_dbg(ql_dbg_mbx, vha, 0x1035, "Entered %s.\n", __func__); + ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1035, + "Entered %s.\n", __func__); mcp->mb[0] = MBC_VERIFY_CHECKSUM; mcp->out_mb = MBX_0; @@ -787,7 +798,8 @@ qla2x00_verify_checksum(scsi_qla_host_t *vha, uint32_t risc_addr) "Failed=%x chm sum=%x.\n", rval, IS_FWI2_CAPABLE(vha->hw) ? (mcp->mb[2] << 16) | mcp->mb[1] : mcp->mb[1]); } else { - ql_dbg(ql_dbg_mbx, vha, 0x1037, "Done %s.\n", __func__); + ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1037, + "Done %s.\n", __func__); } return rval; @@ -819,7 +831,8 @@ qla2x00_issue_iocb_timeout(scsi_qla_host_t *vha, void *buffer, mbx_cmd_t mc; mbx_cmd_t *mcp = &mc; - ql_dbg(ql_dbg_mbx, vha, 0x1038, "Entered %s.\n", __func__); + ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1038, + "Entered %s.\n", __func__); mcp->mb[0] = MBC_IOCB_COMMAND_A64; mcp->mb[1] = 0; @@ -842,7 +855,8 @@ qla2x00_issue_iocb_timeout(scsi_qla_host_t *vha, void *buffer, /* Mask reserved bits. */ sts_entry->entry_status &= IS_FWI2_CAPABLE(vha->hw) ? RF_MASK_24XX : RF_MASK; - ql_dbg(ql_dbg_mbx, vha, 0x103a, "Done %s.\n", __func__); + ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x103a, + "Done %s.\n", __func__); } return rval; @@ -884,7 +898,8 @@ qla2x00_abort_command(srb_t *sp) struct req_que *req = vha->req; struct scsi_cmnd *cmd = GET_CMD_SP(sp); - ql_dbg(ql_dbg_mbx, vha, 0x103b, "Entered %s.\n", __func__); + ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x103b, + "Entered %s.\n", __func__); spin_lock_irqsave(&ha->hardware_lock, flags); for (handle = 1; handle < MAX_OUTSTANDING_COMMANDS; handle++) { @@ -915,7 +930,8 @@ qla2x00_abort_command(srb_t *sp) if (rval != QLA_SUCCESS) { ql_dbg(ql_dbg_mbx, vha, 0x103c, "Failed=%x.\n", rval); } else { - ql_dbg(ql_dbg_mbx, vha, 0x103d, "Done %s.\n", __func__); + ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x103d, + "Done %s.\n", __func__); } return rval; @@ -934,7 +950,8 @@ qla2x00_abort_target(struct fc_port *fcport, unsigned int l, int tag) l = l; vha = fcport->vha; - ql_dbg(ql_dbg_mbx, vha, 0x103e, "Entered %s.\n", __func__); + ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x103e, + "Entered %s.\n", __func__); req = vha->hw->req_q_map[0]; rsp = req->rsp; @@ -955,7 +972,8 @@ qla2x00_abort_target(struct fc_port *fcport, unsigned int l, int tag) mcp->flags = 0; rval = qla2x00_mailbox_command(vha, mcp); if (rval != QLA_SUCCESS) { - ql_dbg(ql_dbg_mbx, vha, 0x103f, "Failed=%x.\n", rval); + ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x103f, + "Failed=%x.\n", rval); } /* Issue marker IOCB. */ @@ -965,7 +983,8 @@ qla2x00_abort_target(struct fc_port *fcport, unsigned int l, int tag) ql_dbg(ql_dbg_mbx, vha, 0x1040, "Failed to issue marker IOCB (%x).\n", rval2); } else { - ql_dbg(ql_dbg_mbx, vha, 0x1041, "Done %s.\n", __func__); + ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1041, + "Done %s.\n", __func__); } return rval; @@ -983,7 +1002,8 @@ qla2x00_lun_reset(struct fc_port *fcport, unsigned int l, int tag) vha = fcport->vha; - ql_dbg(ql_dbg_mbx, vha, 0x1042, "Entered %s.\n", __func__); + ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1042, + "Entered %s.\n", __func__); req = vha->hw->req_q_map[0]; rsp = req->rsp; @@ -1012,7 +1032,8 @@ qla2x00_lun_reset(struct fc_port *fcport, unsigned int l, int tag) ql_dbg(ql_dbg_mbx, vha, 0x1044, "Failed to issue marker IOCB (%x).\n", rval2); } else { - ql_dbg(ql_dbg_mbx, vha, 0x1045, "Done %s.\n", __func__); + ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1045, + "Done %s.\n", __func__); } return rval; @@ -1046,7 +1067,8 @@ qla2x00_get_adapter_id(scsi_qla_host_t *vha, uint16_t *id, uint8_t *al_pa, mbx_cmd_t mc; mbx_cmd_t *mcp = &mc; - ql_dbg(ql_dbg_mbx, vha, 0x1046, "Entered %s.\n", __func__); + ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1046, + "Entered %s.\n", __func__); mcp->mb[0] = MBC_GET_ADAPTER_LOOP_ID; mcp->mb[9] = vha->vp_idx; @@ -1074,7 +1096,8 @@ qla2x00_get_adapter_id(scsi_qla_host_t *vha, uint16_t *id, uint8_t *al_pa, /*EMPTY*/ ql_dbg(ql_dbg_mbx, vha, 0x1047, "Failed=%x.\n", rval); } else { - ql_dbg(ql_dbg_mbx, vha, 0x1048, "Done %s.\n", __func__); + ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1048, + "Done %s.\n", __func__); if (IS_CNA_CAPABLE(vha->hw)) { vha->fcoe_vlan_id = mcp->mb[9] & 0xfff; @@ -1115,7 +1138,8 @@ qla2x00_get_retry_cnt(scsi_qla_host_t *vha, uint8_t *retry_cnt, uint8_t *tov, mbx_cmd_t mc; mbx_cmd_t *mcp = &mc; - ql_dbg(ql_dbg_mbx, vha, 0x1049, "Entered %s.\n", __func__); + ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1049, + "Entered %s.\n", __func__); mcp->mb[0] = MBC_GET_RETRY_COUNT; mcp->out_mb = MBX_0; @@ -1138,7 +1162,7 @@ qla2x00_get_retry_cnt(scsi_qla_host_t *vha, uint8_t *retry_cnt, uint8_t *tov, *tov = ratov; } - ql_dbg(ql_dbg_mbx, vha, 0x104b, + ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x104b, "Done %s mb3=%d ratov=%d.\n", __func__, mcp->mb[3], ratov); } @@ -1170,7 +1194,8 @@ qla2x00_init_firmware(scsi_qla_host_t *vha, uint16_t size) mbx_cmd_t *mcp = &mc; struct qla_hw_data *ha = vha->hw; - ql_dbg(ql_dbg_mbx, vha, 0x104c, "Entered %s.\n", __func__); + ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x104c, + "Entered %s.\n", __func__); if (IS_QLA82XX(ha) && ql2xdbwr) qla82xx_wr_32(ha, ha->nxdb_wr_ptr, @@ -1213,9 +1238,100 @@ qla2x00_init_firmware(scsi_qla_host_t *vha, uint16_t size) rval, mcp->mb[0], mcp->mb[1], mcp->mb[2], mcp->mb[3]); } else { /*EMPTY*/ - ql_dbg(ql_dbg_mbx, vha, 0x104e, "Done %s.\n", __func__); + ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x104e, + "Done %s.\n", __func__); + } + + return rval; +} + +/* + * qla2x00_get_node_name_list + * Issue get node name list mailbox command, kmalloc() + * and return the resulting list. Caller must kfree() it! + * + * Input: + * ha = adapter state pointer. + * out_data = resulting list + * out_len = length of the resulting list + * + * Returns: + * qla2x00 local function return status code. + * + * Context: + * Kernel context. + */ +int +qla2x00_get_node_name_list(scsi_qla_host_t *vha, void **out_data, int *out_len) +{ + struct qla_hw_data *ha = vha->hw; + struct qla_port_24xx_data *list = NULL; + void *pmap; + mbx_cmd_t mc; + dma_addr_t pmap_dma; + ulong dma_size; + int rval, left; + + left = 1; + while (left > 0) { + dma_size = left * sizeof(*list); + pmap = dma_alloc_coherent(&ha->pdev->dev, dma_size, + &pmap_dma, GFP_KERNEL); + if (!pmap) { + ql_log(ql_log_warn, vha, 0x113f, + "%s(%ld): DMA Alloc failed of %ld\n", + __func__, vha->host_no, dma_size); + rval = QLA_MEMORY_ALLOC_FAILED; + goto out; + } + + mc.mb[0] = MBC_PORT_NODE_NAME_LIST; + mc.mb[1] = BIT_1 | BIT_3; + mc.mb[2] = MSW(pmap_dma); + mc.mb[3] = LSW(pmap_dma); + mc.mb[6] = MSW(MSD(pmap_dma)); + mc.mb[7] = LSW(MSD(pmap_dma)); + mc.mb[8] = dma_size; + mc.out_mb = MBX_0|MBX_1|MBX_2|MBX_3|MBX_6|MBX_7|MBX_8; + mc.in_mb = MBX_0|MBX_1; + mc.tov = 30; + mc.flags = MBX_DMA_IN; + + rval = qla2x00_mailbox_command(vha, &mc); + if (rval != QLA_SUCCESS) { + if ((mc.mb[0] == MBS_COMMAND_ERROR) && + (mc.mb[1] == 0xA)) { + left += le16_to_cpu(mc.mb[2]) / + sizeof(struct qla_port_24xx_data); + goto restart; + } + goto out_free; + } + + left = 0; + + list = kzalloc(dma_size, GFP_KERNEL); + if (!list) { + ql_log(ql_log_warn, vha, 0x1140, + "%s(%ld): failed to allocate node names list " + "structure.\n", __func__, vha->host_no); + rval = QLA_MEMORY_ALLOC_FAILED; + goto out_free; + } + + memcpy(list, pmap, dma_size); +restart: + dma_free_coherent(&ha->pdev->dev, dma_size, pmap, pmap_dma); } + *out_data = list; + *out_len = dma_size; + +out: + return rval; + +out_free: + dma_free_coherent(&ha->pdev->dev, dma_size, pmap, pmap_dma); return rval; } @@ -1246,7 +1362,8 @@ qla2x00_get_port_database(scsi_qla_host_t *vha, fc_port_t *fcport, uint8_t opt) dma_addr_t pd_dma; struct qla_hw_data *ha = vha->hw; - ql_dbg(ql_dbg_mbx, vha, 0x104f, "Entered %s.\n", __func__); + ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x104f, + "Entered %s.\n", __func__); pd24 = NULL; pd = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &pd_dma); @@ -1326,6 +1443,13 @@ qla2x00_get_port_database(scsi_qla_host_t *vha, fc_port_t *fcport, uint8_t opt) fcport->port_type = FCT_INITIATOR; else fcport->port_type = FCT_TARGET; + + /* Passback COS information. */ + fcport->supported_classes = (pd24->flags & PDF_CLASS_2) ? + FC_COS_CLASS2 : FC_COS_CLASS3; + + if (pd24->prli_svc_param_word_3[0] & BIT_7) + fcport->flags |= FCF_CONF_COMP_SUPPORTED; } else { uint64_t zero = 0; @@ -1378,7 +1502,8 @@ gpd_error_out: "Failed=%x mb[0]=%x mb[1]=%x.\n", rval, mcp->mb[0], mcp->mb[1]); } else { - ql_dbg(ql_dbg_mbx, vha, 0x1053, "Done %s.\n", __func__); + ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1053, + "Done %s.\n", __func__); } return rval; @@ -1407,7 +1532,8 @@ qla2x00_get_firmware_state(scsi_qla_host_t *vha, uint16_t *states) mbx_cmd_t mc; mbx_cmd_t *mcp = &mc; - ql_dbg(ql_dbg_mbx, vha, 0x1054, "Entered %s.\n", __func__); + ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1054, + "Entered %s.\n", __func__); mcp->mb[0] = MBC_GET_FIRMWARE_STATE; mcp->out_mb = MBX_0; @@ -1433,7 +1559,8 @@ qla2x00_get_firmware_state(scsi_qla_host_t *vha, uint16_t *states) ql_dbg(ql_dbg_mbx, vha, 0x1055, "Failed=%x.\n", rval); } else { /*EMPTY*/ - ql_dbg(ql_dbg_mbx, vha, 0x1056, "Done %s.\n", __func__); + ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1056, + "Done %s.\n", __func__); } return rval; @@ -1465,7 +1592,8 @@ qla2x00_get_port_name(scsi_qla_host_t *vha, uint16_t loop_id, uint8_t *name, mbx_cmd_t mc; mbx_cmd_t *mcp = &mc; - ql_dbg(ql_dbg_mbx, vha, 0x1057, "Entered %s.\n", __func__); + ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1057, + "Entered %s.\n", __func__); mcp->mb[0] = MBC_GET_PORT_NAME; mcp->mb[9] = vha->vp_idx; @@ -1499,7 +1627,8 @@ qla2x00_get_port_name(scsi_qla_host_t *vha, uint16_t loop_id, uint8_t *name, name[7] = LSB(mcp->mb[7]); } - ql_dbg(ql_dbg_mbx, vha, 0x1059, "Done %s.\n", __func__); + ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1059, + "Done %s.\n", __func__); } return rval; @@ -1527,7 +1656,8 @@ qla2x00_lip_reset(scsi_qla_host_t *vha) mbx_cmd_t mc; mbx_cmd_t *mcp = &mc; - ql_dbg(ql_dbg_mbx, vha, 0x105a, "Entered %s.\n", __func__); + ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x105a, + "Entered %s.\n", __func__); if (IS_CNA_CAPABLE(vha->hw)) { /* Logout across all FCFs. */ @@ -1564,7 +1694,8 @@ qla2x00_lip_reset(scsi_qla_host_t *vha) ql_dbg(ql_dbg_mbx, vha, 0x105b, "Failed=%x.\n", rval); } else { /*EMPTY*/ - ql_dbg(ql_dbg_mbx, vha, 0x105c, "Done %s.\n", __func__); + ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x105c, + "Done %s.\n", __func__); } return rval; @@ -1596,9 +1727,10 @@ qla2x00_send_sns(scsi_qla_host_t *vha, dma_addr_t sns_phys_address, mbx_cmd_t mc; mbx_cmd_t *mcp = &mc; - ql_dbg(ql_dbg_mbx, vha, 0x105d, "Entered %s.\n", __func__); + ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x105d, + "Entered %s.\n", __func__); - ql_dbg(ql_dbg_mbx, vha, 0x105e, + ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x105e, "Retry cnt=%d ratov=%d total tov=%d.\n", vha->hw->retry_count, vha->hw->login_timeout, mcp->tov); @@ -1622,7 +1754,8 @@ qla2x00_send_sns(scsi_qla_host_t *vha, dma_addr_t sns_phys_address, rval, mcp->mb[0], mcp->mb[1]); } else { /*EMPTY*/ - ql_dbg(ql_dbg_mbx, vha, 0x1060, "Done %s.\n", __func__); + ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1060, + "Done %s.\n", __func__); } return rval; @@ -1641,7 +1774,8 @@ qla24xx_login_fabric(scsi_qla_host_t *vha, uint16_t loop_id, uint8_t domain, struct req_que *req; struct rsp_que *rsp; - ql_dbg(ql_dbg_mbx, vha, 0x1061, "Entered %s.\n", __func__); + ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1061, + "Entered %s.\n", __func__); if (ha->flags.cpu_affinity_enabled) req = ha->req_q_map[0]; @@ -1715,7 +1849,8 @@ qla24xx_login_fabric(scsi_qla_host_t *vha, uint16_t loop_id, uint8_t domain, break; } } else { - ql_dbg(ql_dbg_mbx, vha, 0x1066, "Done %s.\n", __func__); + ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1066, + "Done %s.\n", __func__); iop[0] = le32_to_cpu(lg->io_parameter[0]); @@ -1733,6 +1868,10 @@ qla24xx_login_fabric(scsi_qla_host_t *vha, uint16_t loop_id, uint8_t domain, mb[10] |= BIT_0; /* Class 2. */ if (lg->io_parameter[9] || lg->io_parameter[10]) mb[10] |= BIT_1; /* Class 3. */ + if (lg->io_parameter[0] & __constant_cpu_to_le32(BIT_7)) + mb[10] |= BIT_7; /* Confirmed Completion + * Allowed + */ } dma_pool_free(ha->s_dma_pool, lg, lg_dma); @@ -1770,7 +1909,8 @@ qla2x00_login_fabric(scsi_qla_host_t *vha, uint16_t loop_id, uint8_t domain, mbx_cmd_t *mcp = &mc; struct qla_hw_data *ha = vha->hw; - ql_dbg(ql_dbg_mbx, vha, 0x1067, "Entered %s.\n", __func__); + ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1067, + "Entered %s.\n", __func__); mcp->mb[0] = MBC_LOGIN_FABRIC_PORT; mcp->out_mb = MBX_3|MBX_2|MBX_1|MBX_0; @@ -1818,7 +1958,8 @@ qla2x00_login_fabric(scsi_qla_host_t *vha, uint16_t loop_id, uint8_t domain, rval, mcp->mb[0], mcp->mb[1], mcp->mb[2]); } else { /*EMPTY*/ - ql_dbg(ql_dbg_mbx, vha, 0x1069, "Done %s.\n", __func__); + ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1069, + "Done %s.\n", __func__); } return rval; @@ -1849,7 +1990,8 @@ qla2x00_login_local_device(scsi_qla_host_t *vha, fc_port_t *fcport, mbx_cmd_t *mcp = &mc; struct qla_hw_data *ha = vha->hw; - ql_dbg(ql_dbg_mbx, vha, 0x106a, "Entered %s.\n", __func__); + ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x106a, + "Entered %s.\n", __func__); if (IS_FWI2_CAPABLE(ha)) return qla24xx_login_fabric(vha, fcport->loop_id, @@ -1891,7 +2033,8 @@ qla2x00_login_local_device(scsi_qla_host_t *vha, fc_port_t *fcport, rval, mcp->mb[0], mcp->mb[1], mcp->mb[6], mcp->mb[7]); } else { /*EMPTY*/ - ql_dbg(ql_dbg_mbx, vha, 0x106c, "Done %s.\n", __func__); + ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x106c, + "Done %s.\n", __func__); } return (rval); @@ -1908,7 +2051,8 @@ qla24xx_fabric_logout(scsi_qla_host_t *vha, uint16_t loop_id, uint8_t domain, struct req_que *req; struct rsp_que *rsp; - ql_dbg(ql_dbg_mbx, vha, 0x106d, "Entered %s.\n", __func__); + ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x106d, + "Entered %s.\n", __func__); lg = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &lg_dma); if (lg == NULL) { @@ -1952,7 +2096,8 @@ qla24xx_fabric_logout(scsi_qla_host_t *vha, uint16_t loop_id, uint8_t domain, le32_to_cpu(lg->io_parameter[1])); } else { /*EMPTY*/ - ql_dbg(ql_dbg_mbx, vha, 0x1072, "Done %s.\n", __func__); + ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1072, + "Done %s.\n", __func__); } dma_pool_free(ha->s_dma_pool, lg, lg_dma); @@ -1984,7 +2129,8 @@ qla2x00_fabric_logout(scsi_qla_host_t *vha, uint16_t loop_id, uint8_t domain, mbx_cmd_t mc; mbx_cmd_t *mcp = &mc; - ql_dbg(ql_dbg_mbx, vha, 0x1073, "Entered %s.\n", __func__); + ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1073, + "Entered %s.\n", __func__); mcp->mb[0] = MBC_LOGOUT_FABRIC_PORT; mcp->out_mb = MBX_1|MBX_0; @@ -2007,7 +2153,8 @@ qla2x00_fabric_logout(scsi_qla_host_t *vha, uint16_t loop_id, uint8_t domain, "Failed=%x mb[1]=%x.\n", rval, mcp->mb[1]); } else { /*EMPTY*/ - ql_dbg(ql_dbg_mbx, vha, 0x1075, "Done %s.\n", __func__); + ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1075, + "Done %s.\n", __func__); } return rval; @@ -2035,7 +2182,8 @@ qla2x00_full_login_lip(scsi_qla_host_t *vha) mbx_cmd_t mc; mbx_cmd_t *mcp = &mc; - ql_dbg(ql_dbg_mbx, vha, 0x1076, "Entered %s.\n", __func__); + ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1076, + "Entered %s.\n", __func__); mcp->mb[0] = MBC_LIP_FULL_LOGIN; mcp->mb[1] = IS_FWI2_CAPABLE(vha->hw) ? BIT_3 : 0; @@ -2052,7 +2200,8 @@ qla2x00_full_login_lip(scsi_qla_host_t *vha) ql_dbg(ql_dbg_mbx, vha, 0x1077, "Failed=%x.\n", rval); } else { /*EMPTY*/ - ql_dbg(ql_dbg_mbx, vha, 0x1078, "Done %s.\n", __func__); + ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1078, + "Done %s.\n", __func__); } return rval; @@ -2078,7 +2227,8 @@ qla2x00_get_id_list(scsi_qla_host_t *vha, void *id_list, dma_addr_t id_list_dma, mbx_cmd_t mc; mbx_cmd_t *mcp = &mc; - ql_dbg(ql_dbg_mbx, vha, 0x1079, "Entered %s.\n", __func__); + ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1079, + "Entered %s.\n", __func__); if (id_list == NULL) return QLA_FUNCTION_FAILED; @@ -2110,7 +2260,8 @@ qla2x00_get_id_list(scsi_qla_host_t *vha, void *id_list, dma_addr_t id_list_dma, ql_dbg(ql_dbg_mbx, vha, 0x107a, "Failed=%x.\n", rval); } else { *entries = mcp->mb[1]; - ql_dbg(ql_dbg_mbx, vha, 0x107b, "Done %s.\n", __func__); + ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x107b, + "Done %s.\n", __func__); } return rval; @@ -2138,7 +2289,8 @@ qla2x00_get_resource_cnts(scsi_qla_host_t *vha, uint16_t *cur_xchg_cnt, mbx_cmd_t mc; mbx_cmd_t *mcp = &mc; - ql_dbg(ql_dbg_mbx, vha, 0x107c, "Entered %s.\n", __func__); + ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x107c, + "Entered %s.\n", __func__); mcp->mb[0] = MBC_GET_RESOURCE_COUNTS; mcp->out_mb = MBX_0; @@ -2154,7 +2306,7 @@ qla2x00_get_resource_cnts(scsi_qla_host_t *vha, uint16_t *cur_xchg_cnt, ql_dbg(ql_dbg_mbx, vha, 0x107d, "Failed mb[0]=%x.\n", mcp->mb[0]); } else { - ql_dbg(ql_dbg_mbx, vha, 0x107e, + ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x107e, "Done %s mb1=%x mb2=%x mb3=%x mb6=%x mb7=%x mb10=%x " "mb11=%x mb12=%x.\n", __func__, mcp->mb[1], mcp->mb[2], mcp->mb[3], mcp->mb[6], mcp->mb[7], mcp->mb[10], @@ -2201,7 +2353,8 @@ qla2x00_get_fcal_position_map(scsi_qla_host_t *vha, char *pos_map) dma_addr_t pmap_dma; struct qla_hw_data *ha = vha->hw; - ql_dbg(ql_dbg_mbx, vha, 0x107f, "Entered %s.\n", __func__); + ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x107f, + "Entered %s.\n", __func__); pmap = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &pmap_dma); if (pmap == NULL) { @@ -2224,7 +2377,7 @@ qla2x00_get_fcal_position_map(scsi_qla_host_t *vha, char *pos_map) rval = qla2x00_mailbox_command(vha, mcp); if (rval == QLA_SUCCESS) { - ql_dbg(ql_dbg_mbx, vha, 0x1081, + ql_dbg(ql_dbg_mbx + ql_dbg_buffer, vha, 0x1081, "mb0/mb1=%x/%X FC/AL position map size (%x).\n", mcp->mb[0], mcp->mb[1], (unsigned)pmap[0]); ql_dump_buffer(ql_dbg_mbx + ql_dbg_buffer, vha, 0x111d, @@ -2238,7 +2391,8 @@ qla2x00_get_fcal_position_map(scsi_qla_host_t *vha, char *pos_map) if (rval != QLA_SUCCESS) { ql_dbg(ql_dbg_mbx, vha, 0x1082, "Failed=%x.\n", rval); } else { - ql_dbg(ql_dbg_mbx, vha, 0x1083, "Done %s.\n", __func__); + ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1083, + "Done %s.\n", __func__); } return rval; @@ -2267,7 +2421,8 @@ qla2x00_get_link_status(scsi_qla_host_t *vha, uint16_t loop_id, uint32_t *siter, *diter, dwords; struct qla_hw_data *ha = vha->hw; - ql_dbg(ql_dbg_mbx, vha, 0x1084, "Entered %s.\n", __func__); + ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1084, + "Entered %s.\n", __func__); mcp->mb[0] = MBC_GET_LINK_STATUS; mcp->mb[2] = MSW(stats_dma); @@ -2301,7 +2456,8 @@ qla2x00_get_link_status(scsi_qla_host_t *vha, uint16_t loop_id, rval = QLA_FUNCTION_FAILED; } else { /* Copy over data -- firmware data is LE. */ - ql_dbg(ql_dbg_mbx, vha, 0x1086, "Done %s.\n", __func__); + ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1086, + "Done %s.\n", __func__); dwords = offsetof(struct link_statistics, unused1) / 4; siter = diter = &stats->link_fail_cnt; while (dwords--) @@ -2324,7 +2480,8 @@ qla24xx_get_isp_stats(scsi_qla_host_t *vha, struct link_statistics *stats, mbx_cmd_t *mcp = &mc; uint32_t *siter, *diter, dwords; - ql_dbg(ql_dbg_mbx, vha, 0x1088, "Entered %s.\n", __func__); + ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1088, + "Entered %s.\n", __func__); mcp->mb[0] = MBC_GET_LINK_PRIV_STATS; mcp->mb[2] = MSW(stats_dma); @@ -2346,7 +2503,8 @@ qla24xx_get_isp_stats(scsi_qla_host_t *vha, struct link_statistics *stats, "Failed mb[0]=%x.\n", mcp->mb[0]); rval = QLA_FUNCTION_FAILED; } else { - ql_dbg(ql_dbg_mbx, vha, 0x108a, "Done %s.\n", __func__); + ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x108a, + "Done %s.\n", __func__); /* Copy over data -- firmware data is LE. */ dwords = sizeof(struct link_statistics) / 4; siter = diter = &stats->link_fail_cnt; @@ -2375,7 +2533,8 @@ qla24xx_abort_command(srb_t *sp) struct qla_hw_data *ha = vha->hw; struct req_que *req = vha->req; - ql_dbg(ql_dbg_mbx, vha, 0x108c, "Entered %s.\n", __func__); + ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x108c, + "Entered %s.\n", __func__); spin_lock_irqsave(&ha->hardware_lock, flags); for (handle = 1; handle < MAX_OUTSTANDING_COMMANDS; handle++) { @@ -2404,7 +2563,7 @@ qla24xx_abort_command(srb_t *sp) abt->port_id[0] = fcport->d_id.b.al_pa; abt->port_id[1] = fcport->d_id.b.area; abt->port_id[2] = fcport->d_id.b.domain; - abt->vp_index = fcport->vp_idx; + abt->vp_index = fcport->vha->vp_idx; abt->req_que_no = cpu_to_le16(req->id); @@ -2423,7 +2582,8 @@ qla24xx_abort_command(srb_t *sp) le16_to_cpu(abt->nport_handle)); rval = QLA_FUNCTION_FAILED; } else { - ql_dbg(ql_dbg_mbx, vha, 0x1091, "Done %s.\n", __func__); + ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1091, + "Done %s.\n", __func__); } dma_pool_free(ha->s_dma_pool, abt, abt_dma); @@ -2455,7 +2615,8 @@ __qla24xx_issue_tmf(char *name, uint32_t type, struct fc_port *fcport, ha = vha->hw; req = vha->req; - ql_dbg(ql_dbg_mbx, vha, 0x1092, "Entered %s.\n", __func__); + ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1092, + "Entered %s.\n", __func__); if (ha->flags.cpu_affinity_enabled) rsp = ha->rsp_q_map[tag + 1]; @@ -2478,7 +2639,7 @@ __qla24xx_issue_tmf(char *name, uint32_t type, struct fc_port *fcport, tsk->p.tsk.port_id[0] = fcport->d_id.b.al_pa; tsk->p.tsk.port_id[1] = fcport->d_id.b.area; tsk->p.tsk.port_id[2] = fcport->d_id.b.domain; - tsk->p.tsk.vp_index = fcport->vp_idx; + tsk->p.tsk.vp_index = fcport->vha->vp_idx; if (type == TCF_LUN_RESET) { int_to_scsilun(l, &tsk->p.tsk.lun); host_to_fcp_swap((uint8_t *)&tsk->p.tsk.lun, @@ -2504,7 +2665,7 @@ __qla24xx_issue_tmf(char *name, uint32_t type, struct fc_port *fcport, } else if (le16_to_cpu(sts->scsi_status) & SS_RESPONSE_INFO_LEN_VALID) { if (le32_to_cpu(sts->rsp_data_len) < 4) { - ql_dbg(ql_dbg_mbx, vha, 0x1097, + ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1097, "Ignoring inconsistent data length -- not enough " "response info (%d).\n", le32_to_cpu(sts->rsp_data_len)); @@ -2523,7 +2684,8 @@ __qla24xx_issue_tmf(char *name, uint32_t type, struct fc_port *fcport, ql_dbg(ql_dbg_mbx, vha, 0x1099, "Failed to issue marker IOCB (%x).\n", rval2); } else { - ql_dbg(ql_dbg_mbx, vha, 0x109a, "Done %s.\n", __func__); + ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x109a, + "Done %s.\n", __func__); } dma_pool_free(ha->s_dma_pool, tsk, tsk_dma); @@ -2564,7 +2726,8 @@ qla2x00_system_error(scsi_qla_host_t *vha) if (!IS_QLA23XX(ha) && !IS_FWI2_CAPABLE(ha)) return QLA_FUNCTION_FAILED; - ql_dbg(ql_dbg_mbx, vha, 0x109b, "Entered %s.\n", __func__); + ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x109b, + "Entered %s.\n", __func__); mcp->mb[0] = MBC_GEN_SYSTEM_ERROR; mcp->out_mb = MBX_0; @@ -2576,7 +2739,8 @@ qla2x00_system_error(scsi_qla_host_t *vha) if (rval != QLA_SUCCESS) { ql_dbg(ql_dbg_mbx, vha, 0x109c, "Failed=%x.\n", rval); } else { - ql_dbg(ql_dbg_mbx, vha, 0x109d, "Done %s.\n", __func__); + ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x109d, + "Done %s.\n", __func__); } return rval; @@ -2596,7 +2760,8 @@ qla2x00_set_serdes_params(scsi_qla_host_t *vha, uint16_t sw_em_1g, mbx_cmd_t mc; mbx_cmd_t *mcp = &mc; - ql_dbg(ql_dbg_mbx, vha, 0x109e, "Entered %s.\n", __func__); + ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x109e, + "Entered %s.\n", __func__); mcp->mb[0] = MBC_SERDES_PARAMS; mcp->mb[1] = BIT_0; @@ -2615,7 +2780,8 @@ qla2x00_set_serdes_params(scsi_qla_host_t *vha, uint16_t sw_em_1g, "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]); } else { /*EMPTY*/ - ql_dbg(ql_dbg_mbx, vha, 0x10a0, "Done %s.\n", __func__); + ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10a0, + "Done %s.\n", __func__); } return rval; @@ -2631,7 +2797,8 @@ qla2x00_stop_firmware(scsi_qla_host_t *vha) if (!IS_FWI2_CAPABLE(vha->hw)) return QLA_FUNCTION_FAILED; - ql_dbg(ql_dbg_mbx, vha, 0x10a1, "Entered %s.\n", __func__); + ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10a1, + "Entered %s.\n", __func__); mcp->mb[0] = MBC_STOP_FIRMWARE; mcp->mb[1] = 0; @@ -2646,7 +2813,8 @@ qla2x00_stop_firmware(scsi_qla_host_t *vha) if (mcp->mb[0] == MBS_INVALID_COMMAND) rval = QLA_INVALID_COMMAND; } else { - ql_dbg(ql_dbg_mbx, vha, 0x10a3, "Done %s.\n", __func__); + ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10a3, + "Done %s.\n", __func__); } return rval; @@ -2660,7 +2828,8 @@ qla2x00_enable_eft_trace(scsi_qla_host_t *vha, dma_addr_t eft_dma, mbx_cmd_t mc; mbx_cmd_t *mcp = &mc; - ql_dbg(ql_dbg_mbx, vha, 0x10a4, "Entered %s.\n", __func__); + ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10a4, + "Entered %s.\n", __func__); if (!IS_FWI2_CAPABLE(vha->hw)) return QLA_FUNCTION_FAILED; @@ -2686,7 +2855,8 @@ qla2x00_enable_eft_trace(scsi_qla_host_t *vha, dma_addr_t eft_dma, "Failed=%x mb[0]=%x mb[1]=%x.\n", rval, mcp->mb[0], mcp->mb[1]); } else { - ql_dbg(ql_dbg_mbx, vha, 0x10a6, "Done %s.\n", __func__); + ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10a6, + "Done %s.\n", __func__); } return rval; @@ -2699,7 +2869,8 @@ qla2x00_disable_eft_trace(scsi_qla_host_t *vha) mbx_cmd_t mc; mbx_cmd_t *mcp = &mc; - ql_dbg(ql_dbg_mbx, vha, 0x10a7, "Entered %s.\n", __func__); + ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10a7, + "Entered %s.\n", __func__); if (!IS_FWI2_CAPABLE(vha->hw)) return QLA_FUNCTION_FAILED; @@ -2719,7 +2890,8 @@ qla2x00_disable_eft_trace(scsi_qla_host_t *vha) "Failed=%x mb[0]=%x mb[1]=%x.\n", rval, mcp->mb[0], mcp->mb[1]); } else { - ql_dbg(ql_dbg_mbx, vha, 0x10a9, "Done %s.\n", __func__); + ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10a9, + "Done %s.\n", __func__); } return rval; @@ -2733,7 +2905,8 @@ qla2x00_enable_fce_trace(scsi_qla_host_t *vha, dma_addr_t fce_dma, mbx_cmd_t mc; mbx_cmd_t *mcp = &mc; - ql_dbg(ql_dbg_mbx, vha, 0x10aa, "Entered %s.\n", __func__); + ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10aa, + "Entered %s.\n", __func__); if (!IS_QLA25XX(vha->hw) && !IS_QLA81XX(vha->hw) && !IS_QLA83XX(vha->hw)) @@ -2764,7 +2937,8 @@ qla2x00_enable_fce_trace(scsi_qla_host_t *vha, dma_addr_t fce_dma, "Failed=%x mb[0]=%x mb[1]=%x.\n", rval, mcp->mb[0], mcp->mb[1]); } else { - ql_dbg(ql_dbg_mbx, vha, 0x10ac, "Done %s.\n", __func__); + ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10ac, + "Done %s.\n", __func__); if (mb) memcpy(mb, mcp->mb, 8 * sizeof(*mb)); @@ -2782,7 +2956,8 @@ qla2x00_disable_fce_trace(scsi_qla_host_t *vha, uint64_t *wr, uint64_t *rd) mbx_cmd_t mc; mbx_cmd_t *mcp = &mc; - ql_dbg(ql_dbg_mbx, vha, 0x10ad, "Entered %s.\n", __func__); + ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10ad, + "Entered %s.\n", __func__); if (!IS_FWI2_CAPABLE(vha->hw)) return QLA_FUNCTION_FAILED; @@ -2804,7 +2979,8 @@ qla2x00_disable_fce_trace(scsi_qla_host_t *vha, uint64_t *wr, uint64_t *rd) "Failed=%x mb[0]=%x mb[1]=%x.\n", rval, mcp->mb[0], mcp->mb[1]); } else { - ql_dbg(ql_dbg_mbx, vha, 0x10af, "Done %s.\n", __func__); + ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10af, + "Done %s.\n", __func__); if (wr) *wr = (uint64_t) mcp->mb[5] << 48 | @@ -2829,7 +3005,8 @@ qla2x00_get_idma_speed(scsi_qla_host_t *vha, uint16_t loop_id, mbx_cmd_t mc; mbx_cmd_t *mcp = &mc; - ql_dbg(ql_dbg_mbx, vha, 0x10b0, "Entered %s.\n", __func__); + ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10b0, + "Entered %s.\n", __func__); if (!IS_IIDMA_CAPABLE(vha->hw)) return QLA_FUNCTION_FAILED; @@ -2854,7 +3031,8 @@ qla2x00_get_idma_speed(scsi_qla_host_t *vha, uint16_t loop_id, if (rval != QLA_SUCCESS) { ql_dbg(ql_dbg_mbx, vha, 0x10b1, "Failed=%x.\n", rval); } else { - ql_dbg(ql_dbg_mbx, vha, 0x10b2, "Done %s.\n", __func__); + ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10b2, + "Done %s.\n", __func__); if (port_speed) *port_speed = mcp->mb[3]; } @@ -2870,7 +3048,8 @@ qla2x00_set_idma_speed(scsi_qla_host_t *vha, uint16_t loop_id, mbx_cmd_t mc; mbx_cmd_t *mcp = &mc; - ql_dbg(ql_dbg_mbx, vha, 0x10b3, "Entered %s.\n", __func__); + ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10b3, + "Entered %s.\n", __func__); if (!IS_IIDMA_CAPABLE(vha->hw)) return QLA_FUNCTION_FAILED; @@ -2897,9 +3076,11 @@ qla2x00_set_idma_speed(scsi_qla_host_t *vha, uint16_t loop_id, } if (rval != QLA_SUCCESS) { - ql_dbg(ql_dbg_mbx, vha, 0x10b4, "Failed=%x.\n", rval); + ql_dbg(ql_dbg_mbx, vha, 0x10b4, + "Failed=%x.\n", rval); } else { - ql_dbg(ql_dbg_mbx, vha, 0x10b5, "Done %s.\n", __func__); + ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10b5, + "Done %s.\n", __func__); } return rval; @@ -2915,24 +3096,25 @@ qla24xx_report_id_acquisition(scsi_qla_host_t *vha, scsi_qla_host_t *vp; unsigned long flags; - ql_dbg(ql_dbg_mbx, vha, 0x10b6, "Entered %s.\n", __func__); + ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10b6, + "Entered %s.\n", __func__); if (rptid_entry->entry_status != 0) return; if (rptid_entry->format == 0) { - ql_dbg(ql_dbg_mbx, vha, 0x10b7, + ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10b7, "Format 0 : Number of VPs setup %d, number of " "VPs acquired %d.\n", MSB(le16_to_cpu(rptid_entry->vp_count)), LSB(le16_to_cpu(rptid_entry->vp_count))); - ql_dbg(ql_dbg_mbx, vha, 0x10b8, + ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10b8, "Primary port id %02x%02x%02x.\n", rptid_entry->port_id[2], rptid_entry->port_id[1], rptid_entry->port_id[0]); } else if (rptid_entry->format == 1) { vp_idx = LSB(stat); - ql_dbg(ql_dbg_mbx, vha, 0x10b9, + ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10b9, "Format 1: VP[%d] enabled - status %d - with " "port id %02x%02x%02x.\n", vp_idx, MSB(stat), rptid_entry->port_id[2], rptid_entry->port_id[1], @@ -2999,7 +3181,8 @@ qla24xx_modify_vp_config(scsi_qla_host_t *vha) /* This can be called by the parent */ - ql_dbg(ql_dbg_mbx, vha, 0x10bb, "Entered %s.\n", __func__); + ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10bb, + "Entered %s.\n", __func__); vpmod = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &vpmod_dma); if (!vpmod) { @@ -3015,6 +3198,9 @@ qla24xx_modify_vp_config(scsi_qla_host_t *vha) vpmod->vp_count = 1; vpmod->vp_index1 = vha->vp_idx; vpmod->options_idx1 = BIT_3|BIT_4|BIT_5; + + qlt_modify_vp_config(vha, vpmod); + memcpy(vpmod->node_name_idx1, vha->node_name, WWN_SIZE); memcpy(vpmod->port_name_idx1, vha->port_name, WWN_SIZE); vpmod->entry_count = 1; @@ -3035,7 +3221,8 @@ qla24xx_modify_vp_config(scsi_qla_host_t *vha) rval = QLA_FUNCTION_FAILED; } else { /* EMPTY */ - ql_dbg(ql_dbg_mbx, vha, 0x10c0, "Done %s.\n", __func__); + ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10c0, + "Done %s.\n", __func__); fc_vport_set_state(vha->fc_vport, FC_VPORT_INITIALIZING); } dma_pool_free(ha->s_dma_pool, vpmod, vpmod_dma); @@ -3069,7 +3256,7 @@ qla24xx_control_vp(scsi_qla_host_t *vha, int cmd) int vp_index = vha->vp_idx; struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev); - ql_dbg(ql_dbg_mbx, vha, 0x10c1, + ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10c1, "Entered %s enabling index %d.\n", __func__, vp_index); if (vp_index == 0 || vp_index >= ha->max_npiv_vports) @@ -3112,7 +3299,8 @@ qla24xx_control_vp(scsi_qla_host_t *vha, int cmd) le16_to_cpu(vce->comp_status)); rval = QLA_FUNCTION_FAILED; } else { - ql_dbg(ql_dbg_mbx, vha, 0x10c6, "Done %s.\n", __func__); + ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10c6, + "Done %s.\n", __func__); } dma_pool_free(ha->s_dma_pool, vce, vce_dma); @@ -3149,14 +3337,8 @@ qla2x00_send_change_request(scsi_qla_host_t *vha, uint16_t format, mbx_cmd_t mc; mbx_cmd_t *mcp = &mc; - ql_dbg(ql_dbg_mbx, vha, 0x10c7, "Entered %s.\n", __func__); - - /* - * This command is implicitly executed by firmware during login for the - * physical hosts - */ - if (vp_idx == 0) - return QLA_FUNCTION_FAILED; + ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10c7, + "Entered %s.\n", __func__); mcp->mb[0] = MBC_SEND_CHANGE_REQUEST; mcp->mb[1] = format; @@ -3185,7 +3367,8 @@ qla2x00_dump_ram(scsi_qla_host_t *vha, dma_addr_t req_dma, uint32_t addr, mbx_cmd_t mc; mbx_cmd_t *mcp = &mc; - ql_dbg(ql_dbg_mbx, vha, 0x1009, "Entered %s.\n", __func__); + ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1009, + "Entered %s.\n", __func__); if (MSW(addr) || IS_FWI2_CAPABLE(vha->hw)) { mcp->mb[0] = MBC_DUMP_RISC_RAM_EXTENDED; @@ -3219,7 +3402,8 @@ qla2x00_dump_ram(scsi_qla_host_t *vha, dma_addr_t req_dma, uint32_t addr, ql_dbg(ql_dbg_mbx, vha, 0x1008, "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]); } else { - ql_dbg(ql_dbg_mbx, vha, 0x1007, "Done %s.\n", __func__); + ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1007, + "Done %s.\n", __func__); } return rval; @@ -3244,7 +3428,8 @@ qla84xx_verify_chip(struct scsi_qla_host *vha, uint16_t *status) unsigned long flags; struct qla_hw_data *ha = vha->hw; - ql_dbg(ql_dbg_mbx, vha, 0x10c8, "Entered %s.\n", __func__); + ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10c8, + "Entered %s.\n", __func__); mn = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &mn_dma); if (mn == NULL) { @@ -3285,7 +3470,7 @@ qla84xx_verify_chip(struct scsi_qla_host *vha, uint16_t *status) status[0] = le16_to_cpu(mn->p.rsp.comp_status); status[1] = status[0] == CS_VCS_CHIP_FAILURE ? le16_to_cpu(mn->p.rsp.failure_code) : 0; - ql_dbg(ql_dbg_mbx, vha, 0x10ce, + ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10ce, "cs=%x fc=%x.\n", status[0], status[1]); if (status[0] != CS_COMPLETE) { @@ -3299,7 +3484,7 @@ qla84xx_verify_chip(struct scsi_qla_host *vha, uint16_t *status) retry = 1; } } else { - ql_dbg(ql_dbg_mbx, vha, 0x10d0, + ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10d0, "Firmware updated to %x.\n", le32_to_cpu(mn->p.rsp.fw_ver)); @@ -3316,9 +3501,11 @@ verify_done: dma_pool_free(ha->s_dma_pool, mn, mn_dma); if (rval != QLA_SUCCESS) { - ql_dbg(ql_dbg_mbx, vha, 0x10d1, "Failed=%x.\n", rval); + ql_dbg(ql_dbg_mbx, vha, 0x10d1, + "Failed=%x.\n", rval); } else { - ql_dbg(ql_dbg_mbx, vha, 0x10d2, "Done %s.\n", __func__); + ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10d2, + "Done %s.\n", __func__); } return rval; @@ -3334,7 +3521,8 @@ qla25xx_init_req_que(struct scsi_qla_host *vha, struct req_que *req) struct device_reg_25xxmq __iomem *reg; struct qla_hw_data *ha = vha->hw; - ql_dbg(ql_dbg_mbx, vha, 0x10d3, "Entered %s.\n", __func__); + ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10d3, + "Entered %s.\n", __func__); mcp->mb[0] = MBC_INITIALIZE_MULTIQ; mcp->mb[1] = req->options; @@ -3388,7 +3576,8 @@ qla25xx_init_req_que(struct scsi_qla_host *vha, struct req_que *req) ql_dbg(ql_dbg_mbx, vha, 0x10d4, "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]); } else { - ql_dbg(ql_dbg_mbx, vha, 0x10d5, "Done %s.\n", __func__); + ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10d5, + "Done %s.\n", __func__); } return rval; @@ -3404,7 +3593,8 @@ qla25xx_init_rsp_que(struct scsi_qla_host *vha, struct rsp_que *rsp) struct device_reg_25xxmq __iomem *reg; struct qla_hw_data *ha = vha->hw; - ql_dbg(ql_dbg_mbx, vha, 0x10d6, "Entered %s.\n", __func__); + ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10d6, + "Entered %s.\n", __func__); mcp->mb[0] = MBC_INITIALIZE_MULTIQ; mcp->mb[1] = rsp->options; @@ -3456,7 +3646,8 @@ qla25xx_init_rsp_que(struct scsi_qla_host *vha, struct rsp_que *rsp) ql_dbg(ql_dbg_mbx, vha, 0x10d7, "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]); } else { - ql_dbg(ql_dbg_mbx, vha, 0x10d8, "Done %s.\n", __func__); + ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10d8, + "Done %s.\n", __func__); } return rval; @@ -3469,7 +3660,8 @@ qla81xx_idc_ack(scsi_qla_host_t *vha, uint16_t *mb) mbx_cmd_t mc; mbx_cmd_t *mcp = &mc; - ql_dbg(ql_dbg_mbx, vha, 0x10d9, "Entered %s.\n", __func__); + ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10d9, + "Entered %s.\n", __func__); mcp->mb[0] = MBC_IDC_ACK; memcpy(&mcp->mb[1], mb, QLA_IDC_ACK_REGS * sizeof(uint16_t)); @@ -3483,7 +3675,8 @@ qla81xx_idc_ack(scsi_qla_host_t *vha, uint16_t *mb) ql_dbg(ql_dbg_mbx, vha, 0x10da, "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]); } else { - ql_dbg(ql_dbg_mbx, vha, 0x10db, "Done %s.\n", __func__); + ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10db, + "Done %s.\n", __func__); } return rval; @@ -3496,7 +3689,8 @@ qla81xx_fac_get_sector_size(scsi_qla_host_t *vha, uint32_t *sector_size) mbx_cmd_t mc; mbx_cmd_t *mcp = &mc; - ql_dbg(ql_dbg_mbx, vha, 0x10dc, "Entered %s.\n", __func__); + ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10dc, + "Entered %s.\n", __func__); if (!IS_QLA81XX(vha->hw) && !IS_QLA83XX(vha->hw)) return QLA_FUNCTION_FAILED; @@ -3514,7 +3708,8 @@ qla81xx_fac_get_sector_size(scsi_qla_host_t *vha, uint32_t *sector_size) "Failed=%x mb[0]=%x mb[1]=%x.\n", rval, mcp->mb[0], mcp->mb[1]); } else { - ql_dbg(ql_dbg_mbx, vha, 0x10de, "Done %s.\n", __func__); + ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10de, + "Done %s.\n", __func__); *sector_size = mcp->mb[1]; } @@ -3531,7 +3726,8 @@ qla81xx_fac_do_write_enable(scsi_qla_host_t *vha, int enable) if (!IS_QLA81XX(vha->hw) && !IS_QLA83XX(vha->hw)) return QLA_FUNCTION_FAILED; - ql_dbg(ql_dbg_mbx, vha, 0x10df, "Entered %s.\n", __func__); + ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10df, + "Entered %s.\n", __func__); mcp->mb[0] = MBC_FLASH_ACCESS_CTRL; mcp->mb[1] = enable ? FAC_OPT_CMD_WRITE_ENABLE : @@ -3547,7 +3743,8 @@ qla81xx_fac_do_write_enable(scsi_qla_host_t *vha, int enable) "Failed=%x mb[0]=%x mb[1]=%x.\n", rval, mcp->mb[0], mcp->mb[1]); } else { - ql_dbg(ql_dbg_mbx, vha, 0x10e1, "Done %s.\n", __func__); + ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10e1, + "Done %s.\n", __func__); } return rval; @@ -3563,7 +3760,8 @@ qla81xx_fac_erase_sector(scsi_qla_host_t *vha, uint32_t start, uint32_t finish) if (!IS_QLA81XX(vha->hw) && !IS_QLA83XX(vha->hw)) return QLA_FUNCTION_FAILED; - ql_dbg(ql_dbg_mbx, vha, 0x10e2, "Entered %s.\n", __func__); + ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10e2, + "Entered %s.\n", __func__); mcp->mb[0] = MBC_FLASH_ACCESS_CTRL; mcp->mb[1] = FAC_OPT_CMD_ERASE_SECTOR; @@ -3582,7 +3780,8 @@ qla81xx_fac_erase_sector(scsi_qla_host_t *vha, uint32_t start, uint32_t finish) "Failed=%x mb[0]=%x mb[1]=%x mb[2]=%x.\n", rval, mcp->mb[0], mcp->mb[1], mcp->mb[2]); } else { - ql_dbg(ql_dbg_mbx, vha, 0x10e4, "Done %s.\n", __func__); + ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10e4, + "Done %s.\n", __func__); } return rval; @@ -3595,7 +3794,8 @@ qla81xx_restart_mpi_firmware(scsi_qla_host_t *vha) mbx_cmd_t mc; mbx_cmd_t *mcp = &mc; - ql_dbg(ql_dbg_mbx, vha, 0x10e5, "Entered %s.\n", __func__); + ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10e5, + "Entered %s.\n", __func__); mcp->mb[0] = MBC_RESTART_MPI_FW; mcp->out_mb = MBX_0; @@ -3609,7 +3809,8 @@ qla81xx_restart_mpi_firmware(scsi_qla_host_t *vha) "Failed=%x mb[0]=%x mb[1]=%x.\n", rval, mcp->mb[0], mcp->mb[1]); } else { - ql_dbg(ql_dbg_mbx, vha, 0x10e7, "Done %s.\n", __func__); + ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10e7, + "Done %s.\n", __func__); } return rval; @@ -3624,7 +3825,8 @@ qla2x00_read_sfp(scsi_qla_host_t *vha, dma_addr_t sfp_dma, uint8_t *sfp, mbx_cmd_t *mcp = &mc; struct qla_hw_data *ha = vha->hw; - ql_dbg(ql_dbg_mbx, vha, 0x10e8, "Entered %s.\n", __func__); + ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10e8, + "Entered %s.\n", __func__); if (!IS_FWI2_CAPABLE(ha)) return QLA_FUNCTION_FAILED; @@ -3654,7 +3856,8 @@ qla2x00_read_sfp(scsi_qla_host_t *vha, dma_addr_t sfp_dma, uint8_t *sfp, ql_dbg(ql_dbg_mbx, vha, 0x10e9, "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]); } else { - ql_dbg(ql_dbg_mbx, vha, 0x10ea, "Done %s.\n", __func__); + ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10ea, + "Done %s.\n", __func__); } return rval; @@ -3669,7 +3872,8 @@ qla2x00_write_sfp(scsi_qla_host_t *vha, dma_addr_t sfp_dma, uint8_t *sfp, mbx_cmd_t *mcp = &mc; struct qla_hw_data *ha = vha->hw; - ql_dbg(ql_dbg_mbx, vha, 0x10eb, "Entered %s.\n", __func__); + ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10eb, + "Entered %s.\n", __func__); if (!IS_FWI2_CAPABLE(ha)) return QLA_FUNCTION_FAILED; @@ -3699,7 +3903,8 @@ qla2x00_write_sfp(scsi_qla_host_t *vha, dma_addr_t sfp_dma, uint8_t *sfp, ql_dbg(ql_dbg_mbx, vha, 0x10ec, "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]); } else { - ql_dbg(ql_dbg_mbx, vha, 0x10ed, "Done %s.\n", __func__); + ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10ed, + "Done %s.\n", __func__); } return rval; @@ -3713,7 +3918,8 @@ qla2x00_get_xgmac_stats(scsi_qla_host_t *vha, dma_addr_t stats_dma, mbx_cmd_t mc; mbx_cmd_t *mcp = &mc; - ql_dbg(ql_dbg_mbx, vha, 0x10ee, "Entered %s.\n", __func__); + ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10ee, + "Entered %s.\n", __func__); if (!IS_CNA_CAPABLE(vha->hw)) return QLA_FUNCTION_FAILED; @@ -3735,7 +3941,8 @@ qla2x00_get_xgmac_stats(scsi_qla_host_t *vha, dma_addr_t stats_dma, "Failed=%x mb[0]=%x mb[1]=%x mb[2]=%x.\n", rval, mcp->mb[0], mcp->mb[1], mcp->mb[2]); } else { - ql_dbg(ql_dbg_mbx, vha, 0x10f0, "Done %s.\n", __func__); + ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10f0, + "Done %s.\n", __func__); *actual_size = mcp->mb[2] << 2; @@ -3752,7 +3959,8 @@ qla2x00_get_dcbx_params(scsi_qla_host_t *vha, dma_addr_t tlv_dma, mbx_cmd_t mc; mbx_cmd_t *mcp = &mc; - ql_dbg(ql_dbg_mbx, vha, 0x10f1, "Entered %s.\n", __func__); + ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10f1, + "Entered %s.\n", __func__); if (!IS_CNA_CAPABLE(vha->hw)) return QLA_FUNCTION_FAILED; @@ -3775,7 +3983,8 @@ qla2x00_get_dcbx_params(scsi_qla_host_t *vha, dma_addr_t tlv_dma, "Failed=%x mb[0]=%x mb[1]=%x mb[2]=%x.\n", rval, mcp->mb[0], mcp->mb[1], mcp->mb[2]); } else { - ql_dbg(ql_dbg_mbx, vha, 0x10f3, "Done %s.\n", __func__); + ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10f3, + "Done %s.\n", __func__); } return rval; @@ -3788,7 +3997,8 @@ qla2x00_read_ram_word(scsi_qla_host_t *vha, uint32_t risc_addr, uint32_t *data) mbx_cmd_t mc; mbx_cmd_t *mcp = &mc; - ql_dbg(ql_dbg_mbx, vha, 0x10f4, "Entered %s.\n", __func__); + ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10f4, + "Entered %s.\n", __func__); if (!IS_FWI2_CAPABLE(vha->hw)) return QLA_FUNCTION_FAILED; @@ -3805,7 +4015,8 @@ qla2x00_read_ram_word(scsi_qla_host_t *vha, uint32_t risc_addr, uint32_t *data) ql_dbg(ql_dbg_mbx, vha, 0x10f5, "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]); } else { - ql_dbg(ql_dbg_mbx, vha, 0x10f6, "Done %s.\n", __func__); + ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10f6, + "Done %s.\n", __func__); *data = mcp->mb[3] << 16 | mcp->mb[2]; } @@ -3821,7 +4032,8 @@ qla2x00_loopback_test(scsi_qla_host_t *vha, struct msg_echo_lb *mreq, mbx_cmd_t *mcp = &mc; uint32_t iter_cnt = 0x1; - ql_dbg(ql_dbg_mbx, vha, 0x10f7, "Entered %s.\n", __func__); + ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10f7, + "Entered %s.\n", __func__); memset(mcp->mb, 0 , sizeof(mcp->mb)); mcp->mb[0] = MBC_DIAGNOSTIC_LOOP_BACK; @@ -3865,7 +4077,8 @@ qla2x00_loopback_test(scsi_qla_host_t *vha, struct msg_echo_lb *mreq, "mb[19]=%x.\n", rval, mcp->mb[0], mcp->mb[1], mcp->mb[2], mcp->mb[3], mcp->mb[18], mcp->mb[19]); } else { - ql_dbg(ql_dbg_mbx, vha, 0x10f9, "Done %s.\n", __func__); + ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10f9, + "Done %s.\n", __func__); } /* Copy mailbox information */ @@ -3882,7 +4095,8 @@ qla2x00_echo_test(scsi_qla_host_t *vha, struct msg_echo_lb *mreq, mbx_cmd_t *mcp = &mc; struct qla_hw_data *ha = vha->hw; - ql_dbg(ql_dbg_mbx, vha, 0x10fa, "Entered %s.\n", __func__); + ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10fa, + "Entered %s.\n", __func__); memset(mcp->mb, 0 , sizeof(mcp->mb)); mcp->mb[0] = MBC_DIAGNOSTIC_ECHO; @@ -3926,7 +4140,8 @@ qla2x00_echo_test(scsi_qla_host_t *vha, struct msg_echo_lb *mreq, "Failed=%x mb[0]=%x mb[1]=%x.\n", rval, mcp->mb[0], mcp->mb[1]); } else { - ql_dbg(ql_dbg_mbx, vha, 0x10fc, "Done %s.\n", __func__); + ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10fc, + "Done %s.\n", __func__); } /* Copy mailbox information */ @@ -3941,7 +4156,7 @@ qla84xx_reset_chip(scsi_qla_host_t *vha, uint16_t enable_diagnostic) mbx_cmd_t mc; mbx_cmd_t *mcp = &mc; - ql_dbg(ql_dbg_mbx, vha, 0x10fd, + ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10fd, "Entered %s enable_diag=%d.\n", __func__, enable_diagnostic); mcp->mb[0] = MBC_ISP84XX_RESET; @@ -3955,7 +4170,8 @@ qla84xx_reset_chip(scsi_qla_host_t *vha, uint16_t enable_diagnostic) if (rval != QLA_SUCCESS) ql_dbg(ql_dbg_mbx, vha, 0x10fe, "Failed=%x.\n", rval); else - ql_dbg(ql_dbg_mbx, vha, 0x10ff, "Done %s.\n", __func__); + ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10ff, + "Done %s.\n", __func__); return rval; } @@ -3967,7 +4183,8 @@ qla2x00_write_ram_word(scsi_qla_host_t *vha, uint32_t risc_addr, uint32_t data) mbx_cmd_t mc; mbx_cmd_t *mcp = &mc; - ql_dbg(ql_dbg_mbx, vha, 0x1100, "Entered %s.\n", __func__); + ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1100, + "Entered %s.\n", __func__); if (!IS_FWI2_CAPABLE(vha->hw)) return QLA_FUNCTION_FAILED; @@ -3986,7 +4203,8 @@ qla2x00_write_ram_word(scsi_qla_host_t *vha, uint32_t risc_addr, uint32_t data) ql_dbg(ql_dbg_mbx, vha, 0x1101, "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]); } else { - ql_dbg(ql_dbg_mbx, vha, 0x1102, "Done %s.\n", __func__); + ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1102, + "Done %s.\n", __func__); } return rval; @@ -4003,7 +4221,8 @@ qla81xx_write_mpi_register(scsi_qla_host_t *vha, uint16_t *mb) rval = QLA_SUCCESS; - ql_dbg(ql_dbg_mbx, vha, 0x1103, "Entered %s.\n", __func__); + ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1103, + "Entered %s.\n", __func__); clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags); @@ -4046,7 +4265,8 @@ qla81xx_write_mpi_register(scsi_qla_host_t *vha, uint16_t *mb) ql_dbg(ql_dbg_mbx, vha, 0x1104, "Failed=%x mb[0]=%x.\n", rval, mb[0]); } else { - ql_dbg(ql_dbg_mbx, vha, 0x1105, "Done %s.\n", __func__); + ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1105, + "Done %s.\n", __func__); } return rval; @@ -4060,7 +4280,8 @@ qla2x00_get_data_rate(scsi_qla_host_t *vha) mbx_cmd_t *mcp = &mc; struct qla_hw_data *ha = vha->hw; - ql_dbg(ql_dbg_mbx, vha, 0x1106, "Entered %s.\n", __func__); + ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1106, + "Entered %s.\n", __func__); if (!IS_FWI2_CAPABLE(ha)) return QLA_FUNCTION_FAILED; @@ -4078,7 +4299,8 @@ qla2x00_get_data_rate(scsi_qla_host_t *vha) ql_dbg(ql_dbg_mbx, vha, 0x1107, "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]); } else { - ql_dbg(ql_dbg_mbx, vha, 0x1108, "Done %s.\n", __func__); + ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1108, + "Done %s.\n", __func__); if (mcp->mb[1] != 0x7) ha->link_data_rate = mcp->mb[1]; } @@ -4094,7 +4316,8 @@ qla81xx_get_port_config(scsi_qla_host_t *vha, uint16_t *mb) mbx_cmd_t *mcp = &mc; struct qla_hw_data *ha = vha->hw; - ql_dbg(ql_dbg_mbx, vha, 0x1109, "Entered %s.\n", __func__); + ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1109, + "Entered %s.\n", __func__); if (!IS_QLA81XX(ha) && !IS_QLA83XX(ha)) return QLA_FUNCTION_FAILED; @@ -4113,7 +4336,8 @@ qla81xx_get_port_config(scsi_qla_host_t *vha, uint16_t *mb) /* Copy all bits to preserve original value */ memcpy(mb, &mcp->mb[1], sizeof(uint16_t) * 4); - ql_dbg(ql_dbg_mbx, vha, 0x110b, "Done %s.\n", __func__); + ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x110b, + "Done %s.\n", __func__); } return rval; } @@ -4125,7 +4349,8 @@ qla81xx_set_port_config(scsi_qla_host_t *vha, uint16_t *mb) mbx_cmd_t mc; mbx_cmd_t *mcp = &mc; - ql_dbg(ql_dbg_mbx, vha, 0x110c, "Entered %s.\n", __func__); + ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x110c, + "Entered %s.\n", __func__); mcp->mb[0] = MBC_SET_PORT_CONFIG; /* Copy all bits to preserve original setting */ @@ -4140,7 +4365,8 @@ qla81xx_set_port_config(scsi_qla_host_t *vha, uint16_t *mb) ql_dbg(ql_dbg_mbx, vha, 0x110d, "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]); } else - ql_dbg(ql_dbg_mbx, vha, 0x110e, "Done %s.\n", __func__); + ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x110e, + "Done %s.\n", __func__); return rval; } @@ -4155,7 +4381,8 @@ qla24xx_set_fcp_prio(scsi_qla_host_t *vha, uint16_t loop_id, uint16_t priority, mbx_cmd_t *mcp = &mc; struct qla_hw_data *ha = vha->hw; - ql_dbg(ql_dbg_mbx, vha, 0x110f, "Entered %s.\n", __func__); + ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x110f, + "Entered %s.\n", __func__); if (!IS_QLA24XX_TYPE(ha) && !IS_QLA25XX(ha)) return QLA_FUNCTION_FAILED; @@ -4183,7 +4410,8 @@ qla24xx_set_fcp_prio(scsi_qla_host_t *vha, uint16_t loop_id, uint16_t priority, if (rval != QLA_SUCCESS) { ql_dbg(ql_dbg_mbx, vha, 0x10cd, "Failed=%x.\n", rval); } else { - ql_dbg(ql_dbg_mbx, vha, 0x10cc, "Done %s.\n", __func__); + ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10cc, + "Done %s.\n", __func__); } return rval; @@ -4196,7 +4424,8 @@ qla2x00_get_thermal_temp(scsi_qla_host_t *vha, uint16_t *temp, uint16_t *frac) uint8_t byte; struct qla_hw_data *ha = vha->hw; - ql_dbg(ql_dbg_mbx, vha, 0x10ca, "Entered %s.\n", __func__); + ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10ca, + "Entered %s.\n", __func__); /* Integer part */ rval = qla2x00_read_sfp(vha, 0, &byte, 0x98, 0x01, 1, BIT_13|BIT_0); @@ -4216,7 +4445,8 @@ qla2x00_get_thermal_temp(scsi_qla_host_t *vha, uint16_t *temp, uint16_t *frac) } *frac = (byte >> 6) * 25; - ql_dbg(ql_dbg_mbx, vha, 0x1018, "Done %s.\n", __func__); + ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1018, + "Done %s.\n", __func__); fail: return rval; } @@ -4229,7 +4459,8 @@ qla82xx_mbx_intr_enable(scsi_qla_host_t *vha) mbx_cmd_t mc; mbx_cmd_t *mcp = &mc; - ql_dbg(ql_dbg_mbx, vha, 0x1017, "Entered %s.\n", __func__); + ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1017, + "Entered %s.\n", __func__); if (!IS_FWI2_CAPABLE(ha)) return QLA_FUNCTION_FAILED; @@ -4248,7 +4479,8 @@ qla82xx_mbx_intr_enable(scsi_qla_host_t *vha) ql_dbg(ql_dbg_mbx, vha, 0x1016, "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]); } else { - ql_dbg(ql_dbg_mbx, vha, 0x100e, "Done %s.\n", __func__); + ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x100e, + "Done %s.\n", __func__); } return rval; @@ -4262,7 +4494,8 @@ qla82xx_mbx_intr_disable(scsi_qla_host_t *vha) mbx_cmd_t mc; mbx_cmd_t *mcp = &mc; - ql_dbg(ql_dbg_mbx, vha, 0x100d, "Entered %s.\n", __func__); + ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x100d, + "Entered %s.\n", __func__); if (!IS_QLA82XX(ha)) return QLA_FUNCTION_FAILED; @@ -4281,7 +4514,8 @@ qla82xx_mbx_intr_disable(scsi_qla_host_t *vha) ql_dbg(ql_dbg_mbx, vha, 0x100c, "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]); } else { - ql_dbg(ql_dbg_mbx, vha, 0x100b, "Done %s.\n", __func__); + ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x100b, + "Done %s.\n", __func__); } return rval; @@ -4295,7 +4529,8 @@ qla82xx_md_get_template_size(scsi_qla_host_t *vha) mbx_cmd_t *mcp = &mc; int rval = QLA_FUNCTION_FAILED; - ql_dbg(ql_dbg_mbx, vha, 0x111f, "Entered %s.\n", __func__); + ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x111f, + "Entered %s.\n", __func__); memset(mcp->mb, 0 , sizeof(mcp->mb)); mcp->mb[0] = LSW(MBC_DIAGNOSTIC_MINIDUMP_TEMPLATE); @@ -4318,7 +4553,8 @@ qla82xx_md_get_template_size(scsi_qla_host_t *vha) (mcp->mb[1] << 16) | mcp->mb[0], (mcp->mb[3] << 16) | mcp->mb[2]); } else { - ql_dbg(ql_dbg_mbx, vha, 0x1121, "Done %s.\n", __func__); + ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1121, + "Done %s.\n", __func__); ha->md_template_size = ((mcp->mb[3] << 16) | mcp->mb[2]); if (!ha->md_template_size) { ql_dbg(ql_dbg_mbx, vha, 0x1122, @@ -4337,7 +4573,8 @@ qla82xx_md_get_template(scsi_qla_host_t *vha) mbx_cmd_t *mcp = &mc; int rval = QLA_FUNCTION_FAILED; - ql_dbg(ql_dbg_mbx, vha, 0x1123, "Entered %s.\n", __func__); + ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1123, + "Entered %s.\n", __func__); ha->md_tmplt_hdr = dma_alloc_coherent(&ha->pdev->dev, ha->md_template_size, &ha->md_tmplt_hdr_dma, GFP_KERNEL); @@ -4372,7 +4609,8 @@ qla82xx_md_get_template(scsi_qla_host_t *vha) ((mcp->mb[1] << 16) | mcp->mb[0]), ((mcp->mb[3] << 16) | mcp->mb[2])); } else - ql_dbg(ql_dbg_mbx, vha, 0x1126, "Done %s.\n", __func__); + ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1126, + "Done %s.\n", __func__); return rval; } @@ -4387,7 +4625,8 @@ qla81xx_set_led_config(scsi_qla_host_t *vha, uint16_t *led_cfg) if (!IS_QLA81XX(ha) && !IS_QLA8031(ha)) return QLA_FUNCTION_FAILED; - ql_dbg(ql_dbg_mbx, vha, 0x1133, "Entered %s.\n", __func__); + ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1133, + "Entered %s.\n", __func__); memset(mcp, 0, sizeof(mbx_cmd_t)); mcp->mb[0] = MBC_SET_LED_CONFIG; @@ -4412,7 +4651,8 @@ qla81xx_set_led_config(scsi_qla_host_t *vha, uint16_t *led_cfg) ql_dbg(ql_dbg_mbx, vha, 0x1134, "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]); } else { - ql_dbg(ql_dbg_mbx, vha, 0x1135, "Done %s.\n", __func__); + ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1135, + "Done %s.\n", __func__); } return rval; @@ -4429,7 +4669,8 @@ qla81xx_get_led_config(scsi_qla_host_t *vha, uint16_t *led_cfg) if (!IS_QLA81XX(ha) && !IS_QLA8031(ha)) return QLA_FUNCTION_FAILED; - ql_dbg(ql_dbg_mbx, vha, 0x1136, "Entered %s.\n", __func__); + ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1136, + "Entered %s.\n", __func__); memset(mcp, 0, sizeof(mbx_cmd_t)); mcp->mb[0] = MBC_GET_LED_CONFIG; @@ -4454,7 +4695,8 @@ qla81xx_get_led_config(scsi_qla_host_t *vha, uint16_t *led_cfg) led_cfg[4] = mcp->mb[5]; led_cfg[5] = mcp->mb[6]; } - ql_dbg(ql_dbg_mbx, vha, 0x1138, "Done %s.\n", __func__); + ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1138, + "Done %s.\n", __func__); } return rval; @@ -4471,7 +4713,7 @@ qla82xx_mbx_beacon_ctl(scsi_qla_host_t *vha, int enable) if (!IS_QLA82XX(ha)) return QLA_FUNCTION_FAILED; - ql_dbg(ql_dbg_mbx, vha, 0x1127, + ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1127, "Entered %s.\n", __func__); memset(mcp, 0, sizeof(mbx_cmd_t)); @@ -4491,7 +4733,7 @@ qla82xx_mbx_beacon_ctl(scsi_qla_host_t *vha, int enable) ql_dbg(ql_dbg_mbx, vha, 0x1128, "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]); } else { - ql_dbg(ql_dbg_mbx, vha, 0x1129, + ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1129, "Done %s.\n", __func__); } @@ -4509,7 +4751,8 @@ qla83xx_write_remote_reg(scsi_qla_host_t *vha, uint32_t reg, uint32_t data) if (!IS_QLA83XX(ha)) return QLA_FUNCTION_FAILED; - ql_dbg(ql_dbg_mbx, vha, 0x1130, "Entered %s.\n", __func__); + ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1130, + "Entered %s.\n", __func__); mcp->mb[0] = MBC_WRITE_REMOTE_REG; mcp->mb[1] = LSW(reg); @@ -4527,7 +4770,7 @@ qla83xx_write_remote_reg(scsi_qla_host_t *vha, uint32_t reg, uint32_t data) ql_dbg(ql_dbg_mbx, vha, 0x1131, "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]); } else { - ql_dbg(ql_dbg_mbx, vha, 0x1132, + ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1132, "Done %s.\n", __func__); } @@ -4543,13 +4786,14 @@ qla2x00_port_logout(scsi_qla_host_t *vha, struct fc_port *fcport) mbx_cmd_t *mcp = &mc; if (IS_QLA2100(ha) || IS_QLA2200(ha)) { - ql_dbg(ql_dbg_mbx, vha, 0x113b, + ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x113b, "Implicit LOGO Unsupported.\n"); return QLA_FUNCTION_FAILED; } - ql_dbg(ql_dbg_mbx, vha, 0x113c, "Done %s.\n", __func__); + ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x113c, + "Entering %s.\n", __func__); /* Perform Implicit LOGO. */ mcp->mb[0] = MBC_PORT_LOGOUT; @@ -4564,7 +4808,8 @@ qla2x00_port_logout(scsi_qla_host_t *vha, struct fc_port *fcport) ql_dbg(ql_dbg_mbx, vha, 0x113d, "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]); else - ql_dbg(ql_dbg_mbx, vha, 0x113e, "Done %s.\n", __func__); + ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x113e, + "Done %s.\n", __func__); return rval; } diff --git a/drivers/scsi/qla2xxx/qla_mid.c b/drivers/scsi/qla2xxx/qla_mid.c index aa062a1b0ca4..3e8b32419e68 100644 --- a/drivers/scsi/qla2xxx/qla_mid.c +++ b/drivers/scsi/qla2xxx/qla_mid.c @@ -6,6 +6,7 @@ */ #include "qla_def.h" #include "qla_gbl.h" +#include "qla_target.h" #include <linux/moduleparam.h> #include <linux/vmalloc.h> @@ -49,6 +50,9 @@ qla24xx_allocate_vp_id(scsi_qla_host_t *vha) spin_lock_irqsave(&ha->vport_slock, flags); list_add_tail(&vha->list, &ha->vp_list); + + qlt_update_vp_map(vha, SET_VP_IDX); + spin_unlock_irqrestore(&ha->vport_slock, flags); mutex_unlock(&ha->vport_lock); @@ -79,6 +83,7 @@ qla24xx_deallocate_vp_id(scsi_qla_host_t *vha) spin_lock_irqsave(&ha->vport_slock, flags); } list_del(&vha->list); + qlt_update_vp_map(vha, RESET_VP_IDX); spin_unlock_irqrestore(&ha->vport_slock, flags); vp_id = vha->vp_idx; @@ -134,7 +139,7 @@ qla2x00_mark_vp_devices_dead(scsi_qla_host_t *vha) list_for_each_entry(fcport, &vha->vp_fcports, list) { ql_dbg(ql_dbg_vport, vha, 0xa001, "Marking port dead, loop_id=0x%04x : %x.\n", - fcport->loop_id, fcport->vp_idx); + fcport->loop_id, fcport->vha->vp_idx); qla2x00_mark_device_lost(vha, fcport, 0, 0); qla2x00_set_fcport_state(fcport, FCS_UNCONFIGURED); @@ -150,6 +155,9 @@ qla24xx_disable_vp(scsi_qla_host_t *vha) atomic_set(&vha->loop_state, LOOP_DOWN); atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME); + /* Remove port id from vp target map */ + qlt_update_vp_map(vha, RESET_AL_PA); + qla2x00_mark_vp_devices_dead(vha); atomic_set(&vha->vp_state, VP_FAILED); vha->flags.management_server_logged_in = 0; @@ -295,10 +303,8 @@ qla2x00_vp_abort_isp(scsi_qla_host_t *vha) static int qla2x00_do_dpc_vp(scsi_qla_host_t *vha) { - ql_dbg(ql_dbg_dpc, vha, 0x4012, - "Entering %s.\n", __func__); - ql_dbg(ql_dbg_dpc, vha, 0x4013, - "vp_flags: 0x%lx.\n", vha->vp_flags); + ql_dbg(ql_dbg_dpc + ql_dbg_verbose, vha, 0x4012, + "Entering %s vp_flags: 0x%lx.\n", __func__, vha->vp_flags); qla2x00_do_work(vha); @@ -348,7 +354,7 @@ qla2x00_do_dpc_vp(scsi_qla_host_t *vha) } } - ql_dbg(ql_dbg_dpc, vha, 0x401c, + ql_dbg(ql_dbg_dpc + ql_dbg_verbose, vha, 0x401c, "Exiting %s.\n", __func__); return 0; } diff --git a/drivers/scsi/qla2xxx/qla_nx.c b/drivers/scsi/qla2xxx/qla_nx.c index de722a933438..caf627ba7fa8 100644 --- a/drivers/scsi/qla2xxx/qla_nx.c +++ b/drivers/scsi/qla2xxx/qla_nx.c @@ -1190,12 +1190,12 @@ qla82xx_pinit_from_rom(scsi_qla_host_t *vha) } /* Offset in flash = lower 16 bits - * Number of enteries = upper 16 bits + * Number of entries = upper 16 bits */ offset = n & 0xffffU; n = (n >> 16) & 0xffffU; - /* number of addr/value pair should not exceed 1024 enteries */ + /* number of addr/value pair should not exceed 1024 entries */ if (n >= 1024) { ql_log(ql_log_fatal, vha, 0x0071, "Card flash not initialized:n=0x%x.\n", n); @@ -2050,7 +2050,7 @@ qla82xx_intr_handler(int irq, void *dev_id) rsp = (struct rsp_que *) dev_id; if (!rsp) { - ql_log(ql_log_info, NULL, 0xb054, + ql_log(ql_log_info, NULL, 0xb053, "%s: NULL response queue pointer.\n", __func__); return IRQ_NONE; } @@ -2446,7 +2446,7 @@ qla82xx_load_fw(scsi_qla_host_t *vha) if (qla82xx_fw_load_from_flash(ha) == QLA_SUCCESS) { ql_log(ql_log_info, vha, 0x00a1, - "Firmware loaded successully from flash.\n"); + "Firmware loaded successfully from flash.\n"); return QLA_SUCCESS; } else { ql_log(ql_log_warn, vha, 0x0108, @@ -2461,7 +2461,7 @@ try_blob_fw: blob = ha->hablob = qla2x00_request_firmware(vha); if (!blob) { ql_log(ql_log_fatal, vha, 0x00a3, - "Firmware image not preset.\n"); + "Firmware image not present.\n"); goto fw_load_failed; } @@ -2689,7 +2689,7 @@ qla82xx_write_flash_data(struct scsi_qla_host *vha, uint32_t *dwptr, if (!optrom) { ql_log(ql_log_warn, vha, 0xb01b, "Unable to allocate memory " - "for optron burst write (%x KB).\n", + "for optrom burst write (%x KB).\n", OPTROM_BURST_SIZE / 1024); } } @@ -2960,9 +2960,8 @@ qla82xx_need_qsnt_handler(scsi_qla_host_t *vha) * changing the state to DEV_READY */ ql_log(ql_log_info, vha, 0xb023, - "%s : QUIESCENT TIMEOUT.\n", QLA2XXX_DRIVER_NAME); - ql_log(ql_log_info, vha, 0xb024, - "DRV_ACTIVE:%d DRV_STATE:%d.\n", + "%s : QUIESCENT TIMEOUT DRV_ACTIVE:%d " + "DRV_STATE:%d.\n", QLA2XXX_DRIVER_NAME, drv_active, drv_state); qla82xx_wr_32(ha, QLA82XX_CRB_DEV_STATE, QLA82XX_DEV_READY); @@ -3129,7 +3128,7 @@ qla82xx_need_reset_handler(scsi_qla_host_t *vha) if (ql2xmdenable) { if (qla82xx_md_collect(vha)) ql_log(ql_log_warn, vha, 0xb02c, - "Not able to collect minidump.\n"); + "Minidump not collected.\n"); } else ql_log(ql_log_warn, vha, 0xb04f, "Minidump disabled.\n"); @@ -3160,11 +3159,11 @@ qla82xx_check_md_needed(scsi_qla_host_t *vha) "Firmware version differs " "Previous version: %d:%d:%d - " "New version: %d:%d:%d\n", + fw_major_version, fw_minor_version, + fw_subminor_version, ha->fw_major_version, ha->fw_minor_version, - ha->fw_subminor_version, - fw_major_version, fw_minor_version, - fw_subminor_version); + ha->fw_subminor_version); /* Release MiniDump resources */ qla82xx_md_free(vha); /* ALlocate MiniDump resources */ @@ -3325,6 +3324,30 @@ exit: return rval; } +static int qla82xx_check_temp(scsi_qla_host_t *vha) +{ + uint32_t temp, temp_state, temp_val; + struct qla_hw_data *ha = vha->hw; + + temp = qla82xx_rd_32(ha, CRB_TEMP_STATE); + temp_state = qla82xx_get_temp_state(temp); + temp_val = qla82xx_get_temp_val(temp); + + if (temp_state == QLA82XX_TEMP_PANIC) { + ql_log(ql_log_warn, vha, 0x600e, + "Device temperature %d degrees C exceeds " + " maximum allowed. Hardware has been shut down.\n", + temp_val); + return 1; + } else if (temp_state == QLA82XX_TEMP_WARN) { + ql_log(ql_log_warn, vha, 0x600f, + "Device temperature %d degrees C exceeds " + "operating range. Immediate action needed.\n", + temp_val); + } + return 0; +} + void qla82xx_clear_pending_mbx(scsi_qla_host_t *vha) { struct qla_hw_data *ha = vha->hw; @@ -3347,18 +3370,20 @@ void qla82xx_watchdog(scsi_qla_host_t *vha) /* don't poll if reset is going on */ if (!ha->flags.isp82xx_reset_hdlr_active) { dev_state = qla82xx_rd_32(ha, QLA82XX_CRB_DEV_STATE); - if (dev_state == QLA82XX_DEV_NEED_RESET && + if (qla82xx_check_temp(vha)) { + set_bit(ISP_UNRECOVERABLE, &vha->dpc_flags); + ha->flags.isp82xx_fw_hung = 1; + qla82xx_clear_pending_mbx(vha); + } else if (dev_state == QLA82XX_DEV_NEED_RESET && !test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags)) { ql_log(ql_log_warn, vha, 0x6001, "Adapter reset needed.\n"); set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); - qla2xxx_wake_dpc(vha); } else if (dev_state == QLA82XX_DEV_NEED_QUIESCENT && !test_bit(ISP_QUIESCE_NEEDED, &vha->dpc_flags)) { ql_log(ql_log_warn, vha, 0x6002, "Quiescent needed.\n"); set_bit(ISP_QUIESCE_NEEDED, &vha->dpc_flags); - qla2xxx_wake_dpc(vha); } else { if (qla82xx_check_fw_alive(vha)) { ql_dbg(ql_dbg_timer, vha, 0x6011, @@ -3398,7 +3423,6 @@ void qla82xx_watchdog(scsi_qla_host_t *vha) set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); } - qla2xxx_wake_dpc(vha); ha->flags.isp82xx_fw_hung = 1; ql_log(ql_log_warn, vha, 0x6007, "Firmware hung.\n"); qla82xx_clear_pending_mbx(vha); @@ -4113,6 +4137,14 @@ qla82xx_md_collect(scsi_qla_host_t *vha) goto md_failed; } + if (ha->flags.isp82xx_no_md_cap) { + ql_log(ql_log_warn, vha, 0xb054, + "Forced reset from application, " + "ignore minidump capture\n"); + ha->flags.isp82xx_no_md_cap = 0; + goto md_failed; + } + if (qla82xx_validate_template_chksum(vha)) { ql_log(ql_log_info, vha, 0xb039, "Template checksum validation error\n"); diff --git a/drivers/scsi/qla2xxx/qla_nx.h b/drivers/scsi/qla2xxx/qla_nx.h index 4ac50e274661..6eb210e3cc63 100644 --- a/drivers/scsi/qla2xxx/qla_nx.h +++ b/drivers/scsi/qla2xxx/qla_nx.h @@ -26,6 +26,7 @@ #define CRB_RCVPEG_STATE QLA82XX_REG(0x13c) #define BOOT_LOADER_DIMM_STATUS QLA82XX_REG(0x54) #define CRB_DMA_SHIFT QLA82XX_REG(0xcc) +#define CRB_TEMP_STATE QLA82XX_REG(0x1b4) #define QLA82XX_DMA_SHIFT_VALUE 0x55555555 #define QLA82XX_HW_H0_CH_HUB_ADR 0x05 @@ -561,7 +562,6 @@ #define QLA82XX_FW_VERSION_SUB (QLA82XX_CAM_RAM(0x158)) #define QLA82XX_PCIE_REG(reg) (QLA82XX_CRB_PCIE + (reg)) -#define PCIE_CHICKEN3 (0x120c8) #define PCIE_SETUP_FUNCTION (0x12040) #define PCIE_SETUP_FUNCTION2 (0x12048) @@ -1178,4 +1178,16 @@ static const int MD_MIU_TEST_AGT_RDDATA[] = { 0x410000A8, 0x410000AC, #define CRB_NIU_XG_PAUSE_CTL_P0 0x1 #define CRB_NIU_XG_PAUSE_CTL_P1 0x8 +#define qla82xx_get_temp_val(x) ((x) >> 16) +#define qla82xx_get_temp_state(x) ((x) & 0xffff) +#define qla82xx_encode_temp(val, state) (((val) << 16) | (state)) + +/* + * Temperature control. + */ +enum { + QLA82XX_TEMP_NORMAL = 0x1, /* Normal operating range */ + QLA82XX_TEMP_WARN, /* Sound alert, temperature getting high */ + QLA82XX_TEMP_PANIC /* Fatal error, hardware has shut down. */ +}; #endif diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c index c9c56a8427f3..6d1d873a20e2 100644 --- a/drivers/scsi/qla2xxx/qla_os.c +++ b/drivers/scsi/qla2xxx/qla_os.c @@ -13,12 +13,13 @@ #include <linux/mutex.h> #include <linux/kobject.h> #include <linux/slab.h> - #include <scsi/scsi_tcq.h> #include <scsi/scsicam.h> #include <scsi/scsi_transport.h> #include <scsi/scsi_transport_fc.h> +#include "qla_target.h" + /* * Driver version */ @@ -40,6 +41,12 @@ static struct kmem_cache *ctx_cachep; */ int ql_errlev = ql_log_all; +int ql2xenableclass2; +module_param(ql2xenableclass2, int, S_IRUGO|S_IRUSR); +MODULE_PARM_DESC(ql2xenableclass2, + "Specify if Class 2 operations are supported from the very " + "beginning. Default is 0 - class 2 not supported."); + int ql2xlogintimeout = 20; module_param(ql2xlogintimeout, int, S_IRUGO); MODULE_PARM_DESC(ql2xlogintimeout, @@ -255,6 +262,8 @@ struct scsi_host_template qla2xxx_driver_template = { .max_sectors = 0xFFFF, .shost_attrs = qla2x00_host_attrs, + + .supported_mode = MODE_INITIATOR, }; static struct scsi_transport_template *qla2xxx_transport_template = NULL; @@ -306,7 +315,8 @@ static void qla2x00_free_fw_dump(struct qla_hw_data *); static void qla2x00_mem_free(struct qla_hw_data *); /* -------------------------------------------------------------------------- */ -static int qla2x00_alloc_queues(struct qla_hw_data *ha) +static int qla2x00_alloc_queues(struct qla_hw_data *ha, struct req_que *req, + struct rsp_que *rsp) { scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev); ha->req_q_map = kzalloc(sizeof(struct req_que *) * ha->max_req_queues, @@ -324,6 +334,12 @@ static int qla2x00_alloc_queues(struct qla_hw_data *ha) "Unable to allocate memory for response queue ptrs.\n"); goto fail_rsp_map; } + /* + * Make sure we record at least the request and response queue zero in + * case we need to free them if part of the probe fails. + */ + ha->rsp_q_map[0] = rsp; + ha->req_q_map[0] = req; set_bit(0, ha->rsp_qid_map); set_bit(0, ha->req_qid_map); return 1; @@ -642,12 +658,12 @@ qla2xxx_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd) if (ha->flags.eeh_busy) { if (ha->flags.pci_channel_io_perm_failure) { - ql_dbg(ql_dbg_io, vha, 0x3001, + ql_dbg(ql_dbg_aer, vha, 0x9010, "PCI Channel IO permanent failure, exiting " "cmd=%p.\n", cmd); cmd->result = DID_NO_CONNECT << 16; } else { - ql_dbg(ql_dbg_io, vha, 0x3002, + ql_dbg(ql_dbg_aer, vha, 0x9011, "EEH_Busy, Requeuing the cmd=%p.\n", cmd); cmd->result = DID_REQUEUE << 16; } @@ -657,7 +673,7 @@ qla2xxx_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd) rval = fc_remote_port_chkready(rport); if (rval) { cmd->result = rval; - ql_dbg(ql_dbg_io, vha, 0x3003, + ql_dbg(ql_dbg_io + ql_dbg_verbose, vha, 0x3003, "fc_remote_port_chkready failed for cmd=%p, rval=0x%x.\n", cmd, rval); goto qc24_fail_command; @@ -1136,7 +1152,7 @@ qla2xxx_eh_bus_reset(struct scsi_cmnd *cmd) ret = FAILED; ql_log(ql_log_info, vha, 0x8012, - "BUS RESET ISSUED nexus=%ld:%d%d.\n", vha->host_no, id, lun); + "BUS RESET ISSUED nexus=%ld:%d:%d.\n", vha->host_no, id, lun); if (qla2x00_wait_for_hba_online(vha) != QLA_SUCCESS) { ql_log(ql_log_fatal, vha, 0x8013, @@ -2180,6 +2196,7 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id) ql_dbg_pci(ql_dbg_init, pdev, 0x000a, "Memory allocated for ha=%p.\n", ha); ha->pdev = pdev; + ha->tgt.enable_class_2 = ql2xenableclass2; /* Clear our data area */ ha->bars = bars; @@ -2243,6 +2260,7 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id) ha->mbx_count = MAILBOX_REGISTER_COUNT; req_length = REQUEST_ENTRY_CNT_24XX; rsp_length = RESPONSE_ENTRY_CNT_2300; + ha->tgt.atio_q_length = ATIO_ENTRY_CNT_24XX; ha->max_loop_id = SNS_LAST_LOOP_ID_2300; ha->init_cb_size = sizeof(struct mid_init_cb_24xx); ha->gid_list_info_size = 8; @@ -2258,6 +2276,7 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id) ha->mbx_count = MAILBOX_REGISTER_COUNT; req_length = REQUEST_ENTRY_CNT_24XX; rsp_length = RESPONSE_ENTRY_CNT_2300; + ha->tgt.atio_q_length = ATIO_ENTRY_CNT_24XX; ha->max_loop_id = SNS_LAST_LOOP_ID_2300; ha->init_cb_size = sizeof(struct mid_init_cb_24xx); ha->gid_list_info_size = 8; @@ -2417,6 +2436,17 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id) host->max_cmd_len, host->max_channel, host->max_lun, host->transportt, sht->vendor_id); +que_init: + /* Alloc arrays of request and response ring ptrs */ + if (!qla2x00_alloc_queues(ha, req, rsp)) { + ql_log(ql_log_fatal, base_vha, 0x003d, + "Failed to allocate memory for queue pointers..." + "aborting.\n"); + goto probe_init_failed; + } + + qlt_probe_one_stage1(base_vha, ha); + /* Set up the irqs */ ret = qla2x00_request_irqs(ha, rsp); if (ret) @@ -2424,20 +2454,10 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id) pci_save_state(pdev); - /* Alloc arrays of request and response ring ptrs */ -que_init: - if (!qla2x00_alloc_queues(ha)) { - ql_log(ql_log_fatal, base_vha, 0x003d, - "Failed to allocate memory for queue pointers.. aborting.\n"); - goto probe_init_failed; - } - - ha->rsp_q_map[0] = rsp; - ha->req_q_map[0] = req; + /* Assign back pointers */ rsp->req = req; req->rsp = rsp; - set_bit(0, ha->req_qid_map); - set_bit(0, ha->rsp_qid_map); + /* FWI2-capable only. */ req->req_q_in = &ha->iobase->isp24.req_q_in; req->req_q_out = &ha->iobase->isp24.req_q_out; @@ -2514,6 +2534,14 @@ que_init: ql_dbg(ql_dbg_init, base_vha, 0x00ee, "DPC thread started successfully.\n"); + /* + * If we're not coming up in initiator mode, we might sit for + * a while without waking up the dpc thread, which leads to a + * stuck process warning. So just kick the dpc once here and + * let the kthread start (and go back to sleep in qla2x00_do_dpc). + */ + qla2xxx_wake_dpc(base_vha); + skip_dpc: list_add_tail(&base_vha->list, &ha->vp_list); base_vha->host->irq = ha->pdev->irq; @@ -2559,7 +2587,11 @@ skip_dpc: ql_dbg(ql_dbg_init, base_vha, 0x00f2, "Init done and hba is online.\n"); - scsi_scan_host(host); + if (qla_ini_mode_enabled(base_vha)) + scsi_scan_host(host); + else + ql_dbg(ql_dbg_init, base_vha, 0x0122, + "skipping scsi_scan_host() for non-initiator port\n"); qla2x00_alloc_sysfs_attr(base_vha); @@ -2577,11 +2609,17 @@ skip_dpc: base_vha->host_no, ha->isp_ops->fw_version_str(base_vha, fw_str)); + qlt_add_target(ha, base_vha); + return 0; probe_init_failed: qla2x00_free_req_que(ha, req); + ha->req_q_map[0] = NULL; + clear_bit(0, ha->req_qid_map); qla2x00_free_rsp_que(ha, rsp); + ha->rsp_q_map[0] = NULL; + clear_bit(0, ha->rsp_qid_map); ha->max_req_queues = ha->max_rsp_queues = 0; probe_failed: @@ -2621,6 +2659,22 @@ probe_out: } static void +qla2x00_stop_dpc_thread(scsi_qla_host_t *vha) +{ + struct qla_hw_data *ha = vha->hw; + struct task_struct *t = ha->dpc_thread; + + if (ha->dpc_thread == NULL) + return; + /* + * qla2xxx_wake_dpc checks for ->dpc_thread + * so we need to zero it out. + */ + ha->dpc_thread = NULL; + kthread_stop(t); +} + +static void qla2x00_shutdown(struct pci_dev *pdev) { scsi_qla_host_t *vha; @@ -2663,9 +2717,18 @@ qla2x00_remove_one(struct pci_dev *pdev) struct qla_hw_data *ha; unsigned long flags; + /* + * If the PCI device is disabled that means that probe failed and any + * resources should be have cleaned up on probe exit. + */ + if (!atomic_read(&pdev->enable_cnt)) + return; + base_vha = pci_get_drvdata(pdev); ha = base_vha->hw; + ha->flags.host_shutting_down = 1; + mutex_lock(&ha->vport_lock); while (ha->cur_vport_count) { struct Scsi_Host *scsi_host; @@ -2719,6 +2782,7 @@ qla2x00_remove_one(struct pci_dev *pdev) ha->dpc_thread = NULL; kthread_stop(t); } + qlt_remove_target(ha, base_vha); qla2x00_free_sysfs_attr(base_vha); @@ -2770,17 +2834,7 @@ qla2x00_free_device(scsi_qla_host_t *vha) if (vha->timer_active) qla2x00_stop_timer(vha); - /* Kill the kernel thread for this host */ - if (ha->dpc_thread) { - struct task_struct *t = ha->dpc_thread; - - /* - * qla2xxx_wake_dpc checks for ->dpc_thread - * so we need to zero it out. - */ - ha->dpc_thread = NULL; - kthread_stop(t); - } + qla2x00_stop_dpc_thread(vha); qla25xx_delete_queues(vha); @@ -2842,8 +2896,10 @@ qla2x00_schedule_rport_del(struct scsi_qla_host *vha, fc_port_t *fcport, spin_unlock_irqrestore(vha->host->host_lock, flags); set_bit(FCPORT_UPDATE_NEEDED, &base_vha->dpc_flags); qla2xxx_wake_dpc(base_vha); - } else + } else { fc_remote_port_delete(rport); + qlt_fc_port_deleted(vha, fcport); + } } /* @@ -2859,7 +2915,7 @@ void qla2x00_mark_device_lost(scsi_qla_host_t *vha, fc_port_t *fcport, int do_login, int defer) { if (atomic_read(&fcport->state) == FCS_ONLINE && - vha->vp_idx == fcport->vp_idx) { + vha->vp_idx == fcport->vha->vp_idx) { qla2x00_set_fcport_state(fcport, FCS_DEVICE_LOST); qla2x00_schedule_rport_del(vha, fcport, defer); } @@ -2908,7 +2964,7 @@ qla2x00_mark_all_devices_lost(scsi_qla_host_t *vha, int defer) fc_port_t *fcport; list_for_each_entry(fcport, &vha->vp_fcports, list) { - if (vha->vp_idx != 0 && vha->vp_idx != fcport->vp_idx) + if (vha->vp_idx != 0 && vha->vp_idx != fcport->vha->vp_idx) continue; /* @@ -2921,7 +2977,7 @@ qla2x00_mark_all_devices_lost(scsi_qla_host_t *vha, int defer) qla2x00_set_fcport_state(fcport, FCS_DEVICE_LOST); if (defer) qla2x00_schedule_rport_del(vha, fcport, defer); - else if (vha->vp_idx == fcport->vp_idx) + else if (vha->vp_idx == fcport->vha->vp_idx) qla2x00_schedule_rport_del(vha, fcport, defer); } } @@ -2946,10 +3002,13 @@ qla2x00_mem_alloc(struct qla_hw_data *ha, uint16_t req_len, uint16_t rsp_len, if (!ha->init_cb) goto fail; + if (qlt_mem_alloc(ha) < 0) + goto fail_free_init_cb; + ha->gid_list = dma_alloc_coherent(&ha->pdev->dev, qla2x00_gid_list_size(ha), &ha->gid_list_dma, GFP_KERNEL); if (!ha->gid_list) - goto fail_free_init_cb; + goto fail_free_tgt_mem; ha->srb_mempool = mempool_create_slab_pool(SRB_MIN_REQ, srb_cachep); if (!ha->srb_mempool) @@ -3167,6 +3226,8 @@ fail_free_gid_list: ha->gid_list_dma); ha->gid_list = NULL; ha->gid_list_dma = 0; +fail_free_tgt_mem: + qlt_mem_free(ha); fail_free_init_cb: dma_free_coherent(&ha->pdev->dev, ha->init_cb_size, ha->init_cb, ha->init_cb_dma); @@ -3282,6 +3343,8 @@ qla2x00_mem_free(struct qla_hw_data *ha) if (ha->ctx_mempool) mempool_destroy(ha->ctx_mempool); + qlt_mem_free(ha); + if (ha->init_cb) dma_free_coherent(&ha->pdev->dev, ha->init_cb_size, ha->init_cb, ha->init_cb_dma); @@ -3311,6 +3374,10 @@ qla2x00_mem_free(struct qla_hw_data *ha) ha->gid_list = NULL; ha->gid_list_dma = 0; + + ha->tgt.atio_ring = NULL; + ha->tgt.atio_dma = 0; + ha->tgt.tgt_vp_map = NULL; } struct scsi_qla_host *qla2x00_create_host(struct scsi_host_template *sht, @@ -3671,10 +3738,9 @@ qla2x00_do_dpc(void *data) ha->dpc_active = 1; - ql_dbg(ql_dbg_dpc, base_vha, 0x4001, - "DPC handler waking up.\n"); - ql_dbg(ql_dbg_dpc, base_vha, 0x4002, - "dpc_flags=0x%lx.\n", base_vha->dpc_flags); + ql_dbg(ql_dbg_dpc + ql_dbg_verbose, base_vha, 0x4001, + "DPC handler waking up, dpc_flags=0x%lx.\n", + base_vha->dpc_flags); qla2x00_do_work(base_vha); @@ -3740,6 +3806,16 @@ qla2x00_do_dpc(void *data) clear_bit(FCPORT_UPDATE_NEEDED, &base_vha->dpc_flags); } + if (test_bit(SCR_PENDING, &base_vha->dpc_flags)) { + int ret; + ret = qla2x00_send_change_request(base_vha, 0x3, 0); + if (ret != QLA_SUCCESS) + ql_log(ql_log_warn, base_vha, 0x121, + "Failed to enable receiving of RSCN " + "requests: 0x%x.\n", ret); + clear_bit(SCR_PENDING, &base_vha->dpc_flags); + } + if (test_bit(ISP_QUIESCE_NEEDED, &base_vha->dpc_flags)) { ql_dbg(ql_dbg_dpc, base_vha, 0x4009, "Quiescence mode scheduled.\n"); @@ -4457,6 +4533,21 @@ qla2x00_module_init(void) return -ENOMEM; } + /* Initialize target kmem_cache and mem_pools */ + ret = qlt_init(); + if (ret < 0) { + kmem_cache_destroy(srb_cachep); + return ret; + } else if (ret > 0) { + /* + * If initiator mode is explictly disabled by qlt_init(), + * prevent scsi_transport_fc.c:fc_scsi_scan_rport() from + * performing scsi_scan_target() during LOOP UP event. + */ + qla2xxx_transport_functions.disable_target_scan = 1; + qla2xxx_transport_vport_functions.disable_target_scan = 1; + } + /* Derive version string. */ strcpy(qla2x00_version_str, QLA2XXX_VERSION); if (ql2xextended_error_logging) @@ -4468,6 +4559,7 @@ qla2x00_module_init(void) kmem_cache_destroy(srb_cachep); ql_log(ql_log_fatal, NULL, 0x0002, "fc_attach_transport failed...Failing load!.\n"); + qlt_exit(); return -ENODEV; } @@ -4481,6 +4573,7 @@ qla2x00_module_init(void) fc_attach_transport(&qla2xxx_transport_vport_functions); if (!qla2xxx_transport_vport_template) { kmem_cache_destroy(srb_cachep); + qlt_exit(); fc_release_transport(qla2xxx_transport_template); ql_log(ql_log_fatal, NULL, 0x0004, "fc_attach_transport vport failed...Failing load!.\n"); @@ -4492,6 +4585,7 @@ qla2x00_module_init(void) ret = pci_register_driver(&qla2xxx_pci_driver); if (ret) { kmem_cache_destroy(srb_cachep); + qlt_exit(); fc_release_transport(qla2xxx_transport_template); fc_release_transport(qla2xxx_transport_vport_template); ql_log(ql_log_fatal, NULL, 0x0006, @@ -4511,6 +4605,7 @@ qla2x00_module_exit(void) pci_unregister_driver(&qla2xxx_pci_driver); qla2x00_release_firmware(); kmem_cache_destroy(srb_cachep); + qlt_exit(); if (ctx_cachep) kmem_cache_destroy(ctx_cachep); fc_release_transport(qla2xxx_transport_template); diff --git a/drivers/scsi/qla2xxx/qla_target.c b/drivers/scsi/qla2xxx/qla_target.c new file mode 100644 index 000000000000..04f80ebf09eb --- /dev/null +++ b/drivers/scsi/qla2xxx/qla_target.c @@ -0,0 +1,4973 @@ +/* + * qla_target.c SCSI LLD infrastructure for QLogic 22xx/23xx/24xx/25xx + * + * based on qla2x00t.c code: + * + * Copyright (C) 2004 - 2010 Vladislav Bolkhovitin <vst@vlnb.net> + * Copyright (C) 2004 - 2005 Leonid Stoljar + * Copyright (C) 2006 Nathaniel Clark <nate@misrule.us> + * Copyright (C) 2006 - 2010 ID7 Ltd. + * + * Forward port and refactoring to modern qla2xxx and target/configfs + * + * Copyright (C) 2010-2011 Nicholas A. Bellinger <nab@kernel.org> + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation, version 2 + * of the License. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include <linux/module.h> +#include <linux/init.h> +#include <linux/types.h> +#include <linux/version.h> +#include <linux/blkdev.h> +#include <linux/interrupt.h> +#include <linux/pci.h> +#include <linux/delay.h> +#include <linux/list.h> +#include <linux/workqueue.h> +#include <asm/unaligned.h> +#include <scsi/scsi.h> +#include <scsi/scsi_host.h> +#include <scsi/scsi_tcq.h> +#include <target/target_core_base.h> +#include <target/target_core_fabric.h> + +#include "qla_def.h" +#include "qla_target.h" + +static char *qlini_mode = QLA2XXX_INI_MODE_STR_ENABLED; +module_param(qlini_mode, charp, S_IRUGO); +MODULE_PARM_DESC(qlini_mode, + "Determines when initiator mode will be enabled. Possible values: " + "\"exclusive\" - initiator mode will be enabled on load, " + "disabled on enabling target mode and then on disabling target mode " + "enabled back; " + "\"disabled\" - initiator mode will never be enabled; " + "\"enabled\" (default) - initiator mode will always stay enabled."); + +static int ql2x_ini_mode = QLA2XXX_INI_MODE_EXCLUSIVE; + +/* + * From scsi/fc/fc_fcp.h + */ +enum fcp_resp_rsp_codes { + FCP_TMF_CMPL = 0, + FCP_DATA_LEN_INVALID = 1, + FCP_CMND_FIELDS_INVALID = 2, + FCP_DATA_PARAM_MISMATCH = 3, + FCP_TMF_REJECTED = 4, + FCP_TMF_FAILED = 5, + FCP_TMF_INVALID_LUN = 9, +}; + +/* + * fc_pri_ta from scsi/fc/fc_fcp.h + */ +#define FCP_PTA_SIMPLE 0 /* simple task attribute */ +#define FCP_PTA_HEADQ 1 /* head of queue task attribute */ +#define FCP_PTA_ORDERED 2 /* ordered task attribute */ +#define FCP_PTA_ACA 4 /* auto. contigent allegiance */ +#define FCP_PTA_MASK 7 /* mask for task attribute field */ +#define FCP_PRI_SHIFT 3 /* priority field starts in bit 3 */ +#define FCP_PRI_RESVD_MASK 0x80 /* reserved bits in priority field */ + +/* + * This driver calls qla2x00_alloc_iocbs() and qla2x00_issue_marker(), which + * must be called under HW lock and could unlock/lock it inside. + * It isn't an issue, since in the current implementation on the time when + * those functions are called: + * + * - Either context is IRQ and only IRQ handler can modify HW data, + * including rings related fields, + * + * - Or access to target mode variables from struct qla_tgt doesn't + * cross those functions boundaries, except tgt_stop, which + * additionally protected by irq_cmd_count. + */ +/* Predefs for callbacks handed to qla2xxx LLD */ +static void qlt_24xx_atio_pkt(struct scsi_qla_host *ha, + struct atio_from_isp *pkt); +static void qlt_response_pkt(struct scsi_qla_host *ha, response_t *pkt); +static int qlt_issue_task_mgmt(struct qla_tgt_sess *sess, uint32_t lun, + int fn, void *iocb, int flags); +static void qlt_send_term_exchange(struct scsi_qla_host *ha, struct qla_tgt_cmd + *cmd, struct atio_from_isp *atio, int ha_locked); +static void qlt_reject_free_srr_imm(struct scsi_qla_host *ha, + struct qla_tgt_srr_imm *imm, int ha_lock); +/* + * Global Variables + */ +static struct kmem_cache *qla_tgt_cmd_cachep; +static struct kmem_cache *qla_tgt_mgmt_cmd_cachep; +static mempool_t *qla_tgt_mgmt_cmd_mempool; +static struct workqueue_struct *qla_tgt_wq; +static DEFINE_MUTEX(qla_tgt_mutex); +static LIST_HEAD(qla_tgt_glist); + +/* ha->hardware_lock supposed to be held on entry (to protect tgt->sess_list) */ +static struct qla_tgt_sess *qlt_find_sess_by_port_name( + struct qla_tgt *tgt, + const uint8_t *port_name) +{ + struct qla_tgt_sess *sess; + + list_for_each_entry(sess, &tgt->sess_list, sess_list_entry) { + if (!memcmp(sess->port_name, port_name, WWN_SIZE)) + return sess; + } + + return NULL; +} + +/* Might release hw lock, then reaquire!! */ +static inline int qlt_issue_marker(struct scsi_qla_host *vha, int vha_locked) +{ + /* Send marker if required */ + if (unlikely(vha->marker_needed != 0)) { + int rc = qla2x00_issue_marker(vha, vha_locked); + if (rc != QLA_SUCCESS) { + ql_dbg(ql_dbg_tgt, vha, 0xe03d, + "qla_target(%d): issue_marker() failed\n", + vha->vp_idx); + } + return rc; + } + return QLA_SUCCESS; +} + +static inline +struct scsi_qla_host *qlt_find_host_by_d_id(struct scsi_qla_host *vha, + uint8_t *d_id) +{ + struct qla_hw_data *ha = vha->hw; + uint8_t vp_idx; + + if ((vha->d_id.b.area != d_id[1]) || (vha->d_id.b.domain != d_id[0])) + return NULL; + + if (vha->d_id.b.al_pa == d_id[2]) + return vha; + + BUG_ON(ha->tgt.tgt_vp_map == NULL); + vp_idx = ha->tgt.tgt_vp_map[d_id[2]].idx; + if (likely(test_bit(vp_idx, ha->vp_idx_map))) + return ha->tgt.tgt_vp_map[vp_idx].vha; + + return NULL; +} + +static inline +struct scsi_qla_host *qlt_find_host_by_vp_idx(struct scsi_qla_host *vha, + uint16_t vp_idx) +{ + struct qla_hw_data *ha = vha->hw; + + if (vha->vp_idx == vp_idx) + return vha; + + BUG_ON(ha->tgt.tgt_vp_map == NULL); + if (likely(test_bit(vp_idx, ha->vp_idx_map))) + return ha->tgt.tgt_vp_map[vp_idx].vha; + + return NULL; +} + +void qlt_24xx_atio_pkt_all_vps(struct scsi_qla_host *vha, + struct atio_from_isp *atio) +{ + switch (atio->u.raw.entry_type) { + case ATIO_TYPE7: + { + struct scsi_qla_host *host = qlt_find_host_by_d_id(vha, + atio->u.isp24.fcp_hdr.d_id); + if (unlikely(NULL == host)) { + ql_dbg(ql_dbg_tgt, vha, 0xe03e, + "qla_target(%d): Received ATIO_TYPE7 " + "with unknown d_id %x:%x:%x\n", vha->vp_idx, + atio->u.isp24.fcp_hdr.d_id[0], + atio->u.isp24.fcp_hdr.d_id[1], + atio->u.isp24.fcp_hdr.d_id[2]); + break; + } + qlt_24xx_atio_pkt(host, atio); + break; + } + + case IMMED_NOTIFY_TYPE: + { + struct scsi_qla_host *host = vha; + struct imm_ntfy_from_isp *entry = + (struct imm_ntfy_from_isp *)atio; + + if ((entry->u.isp24.vp_index != 0xFF) && + (entry->u.isp24.nport_handle != 0xFFFF)) { + host = qlt_find_host_by_vp_idx(vha, + entry->u.isp24.vp_index); + if (unlikely(!host)) { + ql_dbg(ql_dbg_tgt, vha, 0xe03f, + "qla_target(%d): Received " + "ATIO (IMMED_NOTIFY_TYPE) " + "with unknown vp_index %d\n", + vha->vp_idx, entry->u.isp24.vp_index); + break; + } + } + qlt_24xx_atio_pkt(host, atio); + break; + } + + default: + ql_dbg(ql_dbg_tgt, vha, 0xe040, + "qla_target(%d): Received unknown ATIO atio " + "type %x\n", vha->vp_idx, atio->u.raw.entry_type); + break; + } + + return; +} + +void qlt_response_pkt_all_vps(struct scsi_qla_host *vha, response_t *pkt) +{ + switch (pkt->entry_type) { + case CTIO_TYPE7: + { + struct ctio7_from_24xx *entry = (struct ctio7_from_24xx *)pkt; + struct scsi_qla_host *host = qlt_find_host_by_vp_idx(vha, + entry->vp_index); + if (unlikely(!host)) { + ql_dbg(ql_dbg_tgt, vha, 0xe041, + "qla_target(%d): Response pkt (CTIO_TYPE7) " + "received, with unknown vp_index %d\n", + vha->vp_idx, entry->vp_index); + break; + } + qlt_response_pkt(host, pkt); + break; + } + + case IMMED_NOTIFY_TYPE: + { + struct scsi_qla_host *host = vha; + struct imm_ntfy_from_isp *entry = + (struct imm_ntfy_from_isp *)pkt; + + host = qlt_find_host_by_vp_idx(vha, entry->u.isp24.vp_index); + if (unlikely(!host)) { + ql_dbg(ql_dbg_tgt, vha, 0xe042, + "qla_target(%d): Response pkt (IMMED_NOTIFY_TYPE) " + "received, with unknown vp_index %d\n", + vha->vp_idx, entry->u.isp24.vp_index); + break; + } + qlt_response_pkt(host, pkt); + break; + } + + case NOTIFY_ACK_TYPE: + { + struct scsi_qla_host *host = vha; + struct nack_to_isp *entry = (struct nack_to_isp *)pkt; + + if (0xFF != entry->u.isp24.vp_index) { + host = qlt_find_host_by_vp_idx(vha, + entry->u.isp24.vp_index); + if (unlikely(!host)) { + ql_dbg(ql_dbg_tgt, vha, 0xe043, + "qla_target(%d): Response " + "pkt (NOTIFY_ACK_TYPE) " + "received, with unknown " + "vp_index %d\n", vha->vp_idx, + entry->u.isp24.vp_index); + break; + } + } + qlt_response_pkt(host, pkt); + break; + } + + case ABTS_RECV_24XX: + { + struct abts_recv_from_24xx *entry = + (struct abts_recv_from_24xx *)pkt; + struct scsi_qla_host *host = qlt_find_host_by_vp_idx(vha, + entry->vp_index); + if (unlikely(!host)) { + ql_dbg(ql_dbg_tgt, vha, 0xe044, + "qla_target(%d): Response pkt " + "(ABTS_RECV_24XX) received, with unknown " + "vp_index %d\n", vha->vp_idx, entry->vp_index); + break; + } + qlt_response_pkt(host, pkt); + break; + } + + case ABTS_RESP_24XX: + { + struct abts_resp_to_24xx *entry = + (struct abts_resp_to_24xx *)pkt; + struct scsi_qla_host *host = qlt_find_host_by_vp_idx(vha, + entry->vp_index); + if (unlikely(!host)) { + ql_dbg(ql_dbg_tgt, vha, 0xe045, + "qla_target(%d): Response pkt " + "(ABTS_RECV_24XX) received, with unknown " + "vp_index %d\n", vha->vp_idx, entry->vp_index); + break; + } + qlt_response_pkt(host, pkt); + break; + } + + default: + qlt_response_pkt(vha, pkt); + break; + } + +} + +static void qlt_free_session_done(struct work_struct *work) +{ + struct qla_tgt_sess *sess = container_of(work, struct qla_tgt_sess, + free_work); + struct qla_tgt *tgt = sess->tgt; + struct scsi_qla_host *vha = sess->vha; + struct qla_hw_data *ha = vha->hw; + + BUG_ON(!tgt); + /* + * Release the target session for FC Nexus from fabric module code. + */ + if (sess->se_sess != NULL) + ha->tgt.tgt_ops->free_session(sess); + + ql_dbg(ql_dbg_tgt_mgt, vha, 0xf001, + "Unregistration of sess %p finished\n", sess); + + kfree(sess); + /* + * We need to protect against race, when tgt is freed before or + * inside wake_up() + */ + tgt->sess_count--; + if (tgt->sess_count == 0) + wake_up_all(&tgt->waitQ); +} + +/* ha->hardware_lock supposed to be held on entry */ +void qlt_unreg_sess(struct qla_tgt_sess *sess) +{ + struct scsi_qla_host *vha = sess->vha; + + vha->hw->tgt.tgt_ops->clear_nacl_from_fcport_map(sess); + + list_del(&sess->sess_list_entry); + if (sess->deleted) + list_del(&sess->del_list_entry); + + INIT_WORK(&sess->free_work, qlt_free_session_done); + schedule_work(&sess->free_work); +} +EXPORT_SYMBOL(qlt_unreg_sess); + +/* ha->hardware_lock supposed to be held on entry */ +static int qlt_reset(struct scsi_qla_host *vha, void *iocb, int mcmd) +{ + struct qla_hw_data *ha = vha->hw; + struct qla_tgt_sess *sess = NULL; + uint32_t unpacked_lun, lun = 0; + uint16_t loop_id; + int res = 0; + struct imm_ntfy_from_isp *n = (struct imm_ntfy_from_isp *)iocb; + struct atio_from_isp *a = (struct atio_from_isp *)iocb; + + loop_id = le16_to_cpu(n->u.isp24.nport_handle); + if (loop_id == 0xFFFF) { +#if 0 /* FIXME: Re-enable Global event handling.. */ + /* Global event */ + atomic_inc(&ha->tgt.qla_tgt->tgt_global_resets_count); + qlt_clear_tgt_db(ha->tgt.qla_tgt, 1); + if (!list_empty(&ha->tgt.qla_tgt->sess_list)) { + sess = list_entry(ha->tgt.qla_tgt->sess_list.next, + typeof(*sess), sess_list_entry); + switch (mcmd) { + case QLA_TGT_NEXUS_LOSS_SESS: + mcmd = QLA_TGT_NEXUS_LOSS; + break; + case QLA_TGT_ABORT_ALL_SESS: + mcmd = QLA_TGT_ABORT_ALL; + break; + case QLA_TGT_NEXUS_LOSS: + case QLA_TGT_ABORT_ALL: + break; + default: + ql_dbg(ql_dbg_tgt, vha, 0xe046, + "qla_target(%d): Not allowed " + "command %x in %s", vha->vp_idx, + mcmd, __func__); + sess = NULL; + break; + } + } else + sess = NULL; +#endif + } else { + sess = ha->tgt.tgt_ops->find_sess_by_loop_id(vha, loop_id); + } + + ql_dbg(ql_dbg_tgt, vha, 0xe000, + "Using sess for qla_tgt_reset: %p\n", sess); + if (!sess) { + res = -ESRCH; + return res; + } + + ql_dbg(ql_dbg_tgt, vha, 0xe047, + "scsi(%ld): resetting (session %p from port " + "%02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x, " + "mcmd %x, loop_id %d)\n", vha->host_no, sess, + sess->port_name[0], sess->port_name[1], + sess->port_name[2], sess->port_name[3], + sess->port_name[4], sess->port_name[5], + sess->port_name[6], sess->port_name[7], + mcmd, loop_id); + + lun = a->u.isp24.fcp_cmnd.lun; + unpacked_lun = scsilun_to_int((struct scsi_lun *)&lun); + + return qlt_issue_task_mgmt(sess, unpacked_lun, mcmd, + iocb, QLA24XX_MGMT_SEND_NACK); +} + +/* ha->hardware_lock supposed to be held on entry */ +static void qlt_schedule_sess_for_deletion(struct qla_tgt_sess *sess, + bool immediate) +{ + struct qla_tgt *tgt = sess->tgt; + uint32_t dev_loss_tmo = tgt->ha->port_down_retry_count + 5; + + if (sess->deleted) + return; + + ql_dbg(ql_dbg_tgt, sess->vha, 0xe001, + "Scheduling sess %p for deletion\n", sess); + list_add_tail(&sess->del_list_entry, &tgt->del_sess_list); + sess->deleted = 1; + + if (immediate) + dev_loss_tmo = 0; + + sess->expires = jiffies + dev_loss_tmo * HZ; + + ql_dbg(ql_dbg_tgt, sess->vha, 0xe048, + "qla_target(%d): session for port %02x:%02x:%02x:" + "%02x:%02x:%02x:%02x:%02x (loop ID %d) scheduled for " + "deletion in %u secs (expires: %lu) immed: %d\n", + sess->vha->vp_idx, + sess->port_name[0], sess->port_name[1], + sess->port_name[2], sess->port_name[3], + sess->port_name[4], sess->port_name[5], + sess->port_name[6], sess->port_name[7], + sess->loop_id, dev_loss_tmo, sess->expires, immediate); + + if (immediate) + schedule_delayed_work(&tgt->sess_del_work, 0); + else + schedule_delayed_work(&tgt->sess_del_work, + jiffies - sess->expires); +} + +/* ha->hardware_lock supposed to be held on entry */ +static void qlt_clear_tgt_db(struct qla_tgt *tgt, bool local_only) +{ + struct qla_tgt_sess *sess; + + list_for_each_entry(sess, &tgt->sess_list, sess_list_entry) + qlt_schedule_sess_for_deletion(sess, true); + + /* At this point tgt could be already dead */ +} + +static int qla24xx_get_loop_id(struct scsi_qla_host *vha, const uint8_t *s_id, + uint16_t *loop_id) +{ + struct qla_hw_data *ha = vha->hw; + dma_addr_t gid_list_dma; + struct gid_list_info *gid_list; + char *id_iter; + int res, rc, i; + uint16_t entries; + + gid_list = dma_alloc_coherent(&ha->pdev->dev, qla2x00_gid_list_size(ha), + &gid_list_dma, GFP_KERNEL); + if (!gid_list) { + ql_dbg(ql_dbg_tgt_mgt, vha, 0xf044, + "qla_target(%d): DMA Alloc failed of %u\n", + vha->vp_idx, qla2x00_gid_list_size(ha)); + return -ENOMEM; + } + + /* Get list of logged in devices */ + rc = qla2x00_get_id_list(vha, gid_list, gid_list_dma, &entries); + if (rc != QLA_SUCCESS) { + ql_dbg(ql_dbg_tgt_mgt, vha, 0xf045, + "qla_target(%d): get_id_list() failed: %x\n", + vha->vp_idx, rc); + res = -1; + goto out_free_id_list; + } + + id_iter = (char *)gid_list; + res = -1; + for (i = 0; i < entries; i++) { + struct gid_list_info *gid = (struct gid_list_info *)id_iter; + if ((gid->al_pa == s_id[2]) && + (gid->area == s_id[1]) && + (gid->domain == s_id[0])) { + *loop_id = le16_to_cpu(gid->loop_id); + res = 0; + break; + } + id_iter += ha->gid_list_info_size; + } + +out_free_id_list: + dma_free_coherent(&ha->pdev->dev, qla2x00_gid_list_size(ha), + gid_list, gid_list_dma); + return res; +} + +static bool qlt_check_fcport_exist(struct scsi_qla_host *vha, + struct qla_tgt_sess *sess) +{ + struct qla_hw_data *ha = vha->hw; + struct qla_port_24xx_data *pmap24; + bool res, found = false; + int rc, i; + uint16_t loop_id = 0xFFFF; /* to eliminate compiler's warning */ + uint16_t entries; + void *pmap; + int pmap_len; + fc_port_t *fcport; + int global_resets; + +retry: + global_resets = atomic_read(&ha->tgt.qla_tgt->tgt_global_resets_count); + + rc = qla2x00_get_node_name_list(vha, &pmap, &pmap_len); + if (rc != QLA_SUCCESS) { + res = false; + goto out; + } + + pmap24 = pmap; + entries = pmap_len/sizeof(*pmap24); + + for (i = 0; i < entries; ++i) { + if (!memcmp(sess->port_name, pmap24[i].port_name, WWN_SIZE)) { + loop_id = le16_to_cpu(pmap24[i].loop_id); + found = true; + break; + } + } + + kfree(pmap); + + if (!found) { + res = false; + goto out; + } + + ql_dbg(ql_dbg_tgt_mgt, vha, 0xf046, + "qlt_check_fcport_exist(): loop_id %d", loop_id); + + fcport = kzalloc(sizeof(*fcport), GFP_KERNEL); + if (fcport == NULL) { + ql_dbg(ql_dbg_tgt_mgt, vha, 0xf047, + "qla_target(%d): Allocation of tmp FC port failed", + vha->vp_idx); + res = false; + goto out; + } + + fcport->loop_id = loop_id; + + rc = qla2x00_get_port_database(vha, fcport, 0); + if (rc != QLA_SUCCESS) { + ql_dbg(ql_dbg_tgt_mgt, vha, 0xf048, + "qla_target(%d): Failed to retrieve fcport " + "information -- get_port_database() returned %x " + "(loop_id=0x%04x)", vha->vp_idx, rc, loop_id); + res = false; + goto out_free_fcport; + } + + if (global_resets != + atomic_read(&ha->tgt.qla_tgt->tgt_global_resets_count)) { + ql_dbg(ql_dbg_tgt_mgt, vha, 0xf002, + "qla_target(%d): global reset during session discovery" + " (counter was %d, new %d), retrying", + vha->vp_idx, global_resets, + atomic_read(&ha->tgt.qla_tgt->tgt_global_resets_count)); + goto retry; + } + + ql_dbg(ql_dbg_tgt_mgt, vha, 0xf003, + "Updating sess %p s_id %x:%x:%x, loop_id %d) to d_id %x:%x:%x, " + "loop_id %d", sess, sess->s_id.b.domain, sess->s_id.b.al_pa, + sess->s_id.b.area, sess->loop_id, fcport->d_id.b.domain, + fcport->d_id.b.al_pa, fcport->d_id.b.area, fcport->loop_id); + + sess->s_id = fcport->d_id; + sess->loop_id = fcport->loop_id; + sess->conf_compl_supported = !!(fcport->flags & + FCF_CONF_COMP_SUPPORTED); + + res = true; + +out_free_fcport: + kfree(fcport); + +out: + return res; +} + +/* ha->hardware_lock supposed to be held on entry */ +static void qlt_undelete_sess(struct qla_tgt_sess *sess) +{ + BUG_ON(!sess->deleted); + + list_del(&sess->del_list_entry); + sess->deleted = 0; +} + +static void qlt_del_sess_work_fn(struct delayed_work *work) +{ + struct qla_tgt *tgt = container_of(work, struct qla_tgt, + sess_del_work); + struct scsi_qla_host *vha = tgt->vha; + struct qla_hw_data *ha = vha->hw; + struct qla_tgt_sess *sess; + unsigned long flags; + + spin_lock_irqsave(&ha->hardware_lock, flags); + while (!list_empty(&tgt->del_sess_list)) { + sess = list_entry(tgt->del_sess_list.next, typeof(*sess), + del_list_entry); + if (time_after_eq(jiffies, sess->expires)) { + bool cancel; + + qlt_undelete_sess(sess); + + spin_unlock_irqrestore(&ha->hardware_lock, flags); + cancel = qlt_check_fcport_exist(vha, sess); + + if (cancel) { + if (sess->deleted) { + /* + * sess was again deleted while we were + * discovering it + */ + spin_lock_irqsave(&ha->hardware_lock, + flags); + continue; + } + + ql_dbg(ql_dbg_tgt_mgt, vha, 0xf049, + "qla_target(%d): cancel deletion of " + "session for port %02x:%02x:%02x:%02x:%02x:" + "%02x:%02x:%02x (loop ID %d), because " + " it isn't deleted by firmware", + vha->vp_idx, sess->port_name[0], + sess->port_name[1], sess->port_name[2], + sess->port_name[3], sess->port_name[4], + sess->port_name[5], sess->port_name[6], + sess->port_name[7], sess->loop_id); + } else { + ql_dbg(ql_dbg_tgt_mgt, vha, 0xf004, + "Timeout: sess %p about to be deleted\n", + sess); + ha->tgt.tgt_ops->shutdown_sess(sess); + ha->tgt.tgt_ops->put_sess(sess); + } + + spin_lock_irqsave(&ha->hardware_lock, flags); + } else { + schedule_delayed_work(&tgt->sess_del_work, + jiffies - sess->expires); + break; + } + } + spin_unlock_irqrestore(&ha->hardware_lock, flags); +} + +/* + * Adds an extra ref to allow to drop hw lock after adding sess to the list. + * Caller must put it. + */ +static struct qla_tgt_sess *qlt_create_sess( + struct scsi_qla_host *vha, + fc_port_t *fcport, + bool local) +{ + struct qla_hw_data *ha = vha->hw; + struct qla_tgt_sess *sess; + unsigned long flags; + unsigned char be_sid[3]; + + /* Check to avoid double sessions */ + spin_lock_irqsave(&ha->hardware_lock, flags); + list_for_each_entry(sess, &ha->tgt.qla_tgt->sess_list, + sess_list_entry) { + if (!memcmp(sess->port_name, fcport->port_name, WWN_SIZE)) { + ql_dbg(ql_dbg_tgt_mgt, vha, 0xf005, + "Double sess %p found (s_id %x:%x:%x, " + "loop_id %d), updating to d_id %x:%x:%x, " + "loop_id %d", sess, sess->s_id.b.domain, + sess->s_id.b.al_pa, sess->s_id.b.area, + sess->loop_id, fcport->d_id.b.domain, + fcport->d_id.b.al_pa, fcport->d_id.b.area, + fcport->loop_id); + + if (sess->deleted) + qlt_undelete_sess(sess); + + kref_get(&sess->se_sess->sess_kref); + sess->s_id = fcport->d_id; + sess->loop_id = fcport->loop_id; + sess->conf_compl_supported = !!(fcport->flags & + FCF_CONF_COMP_SUPPORTED); + if (sess->local && !local) + sess->local = 0; + spin_unlock_irqrestore(&ha->hardware_lock, flags); + + return sess; + } + } + spin_unlock_irqrestore(&ha->hardware_lock, flags); + + sess = kzalloc(sizeof(*sess), GFP_KERNEL); + if (!sess) { + ql_dbg(ql_dbg_tgt_mgt, vha, 0xf04a, + "qla_target(%u): session allocation failed, " + "all commands from port %02x:%02x:%02x:%02x:" + "%02x:%02x:%02x:%02x will be refused", vha->vp_idx, + fcport->port_name[0], fcport->port_name[1], + fcport->port_name[2], fcport->port_name[3], + fcport->port_name[4], fcport->port_name[5], + fcport->port_name[6], fcport->port_name[7]); + + return NULL; + } + sess->tgt = ha->tgt.qla_tgt; + sess->vha = vha; + sess->s_id = fcport->d_id; + sess->loop_id = fcport->loop_id; + sess->local = local; + + ql_dbg(ql_dbg_tgt_mgt, vha, 0xf006, + "Adding sess %p to tgt %p via ->check_initiator_node_acl()\n", + sess, ha->tgt.qla_tgt); + + be_sid[0] = sess->s_id.b.domain; + be_sid[1] = sess->s_id.b.area; + be_sid[2] = sess->s_id.b.al_pa; + /* + * Determine if this fc_port->port_name is allowed to access + * target mode using explict NodeACLs+MappedLUNs, or using + * TPG demo mode. If this is successful a target mode FC nexus + * is created. + */ + if (ha->tgt.tgt_ops->check_initiator_node_acl(vha, + &fcport->port_name[0], sess, &be_sid[0], fcport->loop_id) < 0) { + kfree(sess); + return NULL; + } + /* + * Take an extra reference to ->sess_kref here to handle qla_tgt_sess + * access across ->hardware_lock reaquire. + */ + kref_get(&sess->se_sess->sess_kref); + + sess->conf_compl_supported = !!(fcport->flags & + FCF_CONF_COMP_SUPPORTED); + BUILD_BUG_ON(sizeof(sess->port_name) != sizeof(fcport->port_name)); + memcpy(sess->port_name, fcport->port_name, sizeof(sess->port_name)); + + spin_lock_irqsave(&ha->hardware_lock, flags); + list_add_tail(&sess->sess_list_entry, &ha->tgt.qla_tgt->sess_list); + ha->tgt.qla_tgt->sess_count++; + spin_unlock_irqrestore(&ha->hardware_lock, flags); + + ql_dbg(ql_dbg_tgt_mgt, vha, 0xf04b, + "qla_target(%d): %ssession for wwn %02x:%02x:%02x:%02x:" + "%02x:%02x:%02x:%02x (loop_id %d, s_id %x:%x:%x, confirmed" + " completion %ssupported) added\n", + vha->vp_idx, local ? "local " : "", fcport->port_name[0], + fcport->port_name[1], fcport->port_name[2], fcport->port_name[3], + fcport->port_name[4], fcport->port_name[5], fcport->port_name[6], + fcport->port_name[7], fcport->loop_id, sess->s_id.b.domain, + sess->s_id.b.area, sess->s_id.b.al_pa, sess->conf_compl_supported ? + "" : "not "); + + return sess; +} + +/* + * Called from drivers/scsi/qla2xxx/qla_init.c:qla2x00_reg_remote_port() + */ +void qlt_fc_port_added(struct scsi_qla_host *vha, fc_port_t *fcport) +{ + struct qla_hw_data *ha = vha->hw; + struct qla_tgt *tgt = ha->tgt.qla_tgt; + struct qla_tgt_sess *sess; + unsigned long flags; + + if (!vha->hw->tgt.tgt_ops) + return; + + if (!tgt || (fcport->port_type != FCT_INITIATOR)) + return; + + spin_lock_irqsave(&ha->hardware_lock, flags); + if (tgt->tgt_stop) { + spin_unlock_irqrestore(&ha->hardware_lock, flags); + return; + } + sess = qlt_find_sess_by_port_name(tgt, fcport->port_name); + if (!sess) { + spin_unlock_irqrestore(&ha->hardware_lock, flags); + + mutex_lock(&ha->tgt.tgt_mutex); + sess = qlt_create_sess(vha, fcport, false); + mutex_unlock(&ha->tgt.tgt_mutex); + + spin_lock_irqsave(&ha->hardware_lock, flags); + } else { + kref_get(&sess->se_sess->sess_kref); + + if (sess->deleted) { + qlt_undelete_sess(sess); + + ql_dbg(ql_dbg_tgt_mgt, vha, 0xf04c, + "qla_target(%u): %ssession for port %02x:" + "%02x:%02x:%02x:%02x:%02x:%02x:%02x (loop ID %d) " + "reappeared\n", vha->vp_idx, sess->local ? "local " + : "", sess->port_name[0], sess->port_name[1], + sess->port_name[2], sess->port_name[3], + sess->port_name[4], sess->port_name[5], + sess->port_name[6], sess->port_name[7], + sess->loop_id); + + ql_dbg(ql_dbg_tgt_mgt, vha, 0xf007, + "Reappeared sess %p\n", sess); + } + sess->s_id = fcport->d_id; + sess->loop_id = fcport->loop_id; + sess->conf_compl_supported = !!(fcport->flags & + FCF_CONF_COMP_SUPPORTED); + } + + if (sess && sess->local) { + ql_dbg(ql_dbg_tgt_mgt, vha, 0xf04d, + "qla_target(%u): local session for " + "port %02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x " + "(loop ID %d) became global\n", vha->vp_idx, + fcport->port_name[0], fcport->port_name[1], + fcport->port_name[2], fcport->port_name[3], + fcport->port_name[4], fcport->port_name[5], + fcport->port_name[6], fcport->port_name[7], + sess->loop_id); + sess->local = 0; + } + spin_unlock_irqrestore(&ha->hardware_lock, flags); + + ha->tgt.tgt_ops->put_sess(sess); +} + +void qlt_fc_port_deleted(struct scsi_qla_host *vha, fc_port_t *fcport) +{ + struct qla_hw_data *ha = vha->hw; + struct qla_tgt *tgt = ha->tgt.qla_tgt; + struct qla_tgt_sess *sess; + unsigned long flags; + + if (!vha->hw->tgt.tgt_ops) + return; + + if (!tgt || (fcport->port_type != FCT_INITIATOR)) + return; + + spin_lock_irqsave(&ha->hardware_lock, flags); + if (tgt->tgt_stop) { + spin_unlock_irqrestore(&ha->hardware_lock, flags); + return; + } + sess = qlt_find_sess_by_port_name(tgt, fcport->port_name); + if (!sess) { + spin_unlock_irqrestore(&ha->hardware_lock, flags); + return; + } + + ql_dbg(ql_dbg_tgt_mgt, vha, 0xf008, "qla_tgt_fc_port_deleted %p", sess); + + sess->local = 1; + qlt_schedule_sess_for_deletion(sess, false); + spin_unlock_irqrestore(&ha->hardware_lock, flags); +} + +static inline int test_tgt_sess_count(struct qla_tgt *tgt) +{ + struct qla_hw_data *ha = tgt->ha; + unsigned long flags; + int res; + /* + * We need to protect against race, when tgt is freed before or + * inside wake_up() + */ + spin_lock_irqsave(&ha->hardware_lock, flags); + ql_dbg(ql_dbg_tgt, tgt->vha, 0xe002, + "tgt %p, empty(sess_list)=%d sess_count=%d\n", + tgt, list_empty(&tgt->sess_list), tgt->sess_count); + res = (tgt->sess_count == 0); + spin_unlock_irqrestore(&ha->hardware_lock, flags); + + return res; +} + +/* Called by tcm_qla2xxx configfs code */ +void qlt_stop_phase1(struct qla_tgt *tgt) +{ + struct scsi_qla_host *vha = tgt->vha; + struct qla_hw_data *ha = tgt->ha; + unsigned long flags; + + if (tgt->tgt_stop || tgt->tgt_stopped) { + ql_dbg(ql_dbg_tgt_mgt, vha, 0xf04e, + "Already in tgt->tgt_stop or tgt_stopped state\n"); + dump_stack(); + return; + } + + ql_dbg(ql_dbg_tgt, vha, 0xe003, "Stopping target for host %ld(%p)\n", + vha->host_no, vha); + /* + * Mutex needed to sync with qla_tgt_fc_port_[added,deleted]. + * Lock is needed, because we still can get an incoming packet. + */ + mutex_lock(&ha->tgt.tgt_mutex); + spin_lock_irqsave(&ha->hardware_lock, flags); + tgt->tgt_stop = 1; + qlt_clear_tgt_db(tgt, true); + spin_unlock_irqrestore(&ha->hardware_lock, flags); + mutex_unlock(&ha->tgt.tgt_mutex); + + flush_delayed_work_sync(&tgt->sess_del_work); + + ql_dbg(ql_dbg_tgt_mgt, vha, 0xf009, + "Waiting for sess works (tgt %p)", tgt); + spin_lock_irqsave(&tgt->sess_work_lock, flags); + while (!list_empty(&tgt->sess_works_list)) { + spin_unlock_irqrestore(&tgt->sess_work_lock, flags); + flush_scheduled_work(); + spin_lock_irqsave(&tgt->sess_work_lock, flags); + } + spin_unlock_irqrestore(&tgt->sess_work_lock, flags); + + ql_dbg(ql_dbg_tgt_mgt, vha, 0xf00a, + "Waiting for tgt %p: list_empty(sess_list)=%d " + "sess_count=%d\n", tgt, list_empty(&tgt->sess_list), + tgt->sess_count); + + wait_event(tgt->waitQ, test_tgt_sess_count(tgt)); + + /* Big hammer */ + if (!ha->flags.host_shutting_down && qla_tgt_mode_enabled(vha)) + qlt_disable_vha(vha); + + /* Wait for sessions to clear out (just in case) */ + wait_event(tgt->waitQ, test_tgt_sess_count(tgt)); +} +EXPORT_SYMBOL(qlt_stop_phase1); + +/* Called by tcm_qla2xxx configfs code */ +void qlt_stop_phase2(struct qla_tgt *tgt) +{ + struct qla_hw_data *ha = tgt->ha; + unsigned long flags; + + if (tgt->tgt_stopped) { + ql_dbg(ql_dbg_tgt_mgt, tgt->vha, 0xf04f, + "Already in tgt->tgt_stopped state\n"); + dump_stack(); + return; + } + + ql_dbg(ql_dbg_tgt_mgt, tgt->vha, 0xf00b, + "Waiting for %d IRQ commands to complete (tgt %p)", + tgt->irq_cmd_count, tgt); + + mutex_lock(&ha->tgt.tgt_mutex); + spin_lock_irqsave(&ha->hardware_lock, flags); + while (tgt->irq_cmd_count != 0) { + spin_unlock_irqrestore(&ha->hardware_lock, flags); + udelay(2); + spin_lock_irqsave(&ha->hardware_lock, flags); + } + tgt->tgt_stop = 0; + tgt->tgt_stopped = 1; + spin_unlock_irqrestore(&ha->hardware_lock, flags); + mutex_unlock(&ha->tgt.tgt_mutex); + + ql_dbg(ql_dbg_tgt_mgt, tgt->vha, 0xf00c, "Stop of tgt %p finished", + tgt); +} +EXPORT_SYMBOL(qlt_stop_phase2); + +/* Called from qlt_remove_target() -> qla2x00_remove_one() */ +void qlt_release(struct qla_tgt *tgt) +{ + struct qla_hw_data *ha = tgt->ha; + + if ((ha->tgt.qla_tgt != NULL) && !tgt->tgt_stopped) + qlt_stop_phase2(tgt); + + ha->tgt.qla_tgt = NULL; + + ql_dbg(ql_dbg_tgt_mgt, tgt->vha, 0xf00d, + "Release of tgt %p finished\n", tgt); + + kfree(tgt); +} + +/* ha->hardware_lock supposed to be held on entry */ +static int qlt_sched_sess_work(struct qla_tgt *tgt, int type, + const void *param, unsigned int param_size) +{ + struct qla_tgt_sess_work_param *prm; + unsigned long flags; + + prm = kzalloc(sizeof(*prm), GFP_ATOMIC); + if (!prm) { + ql_dbg(ql_dbg_tgt_mgt, tgt->vha, 0xf050, + "qla_target(%d): Unable to create session " + "work, command will be refused", 0); + return -ENOMEM; + } + + ql_dbg(ql_dbg_tgt_mgt, tgt->vha, 0xf00e, + "Scheduling work (type %d, prm %p)" + " to find session for param %p (size %d, tgt %p)\n", + type, prm, param, param_size, tgt); + + prm->type = type; + memcpy(&prm->tm_iocb, param, param_size); + + spin_lock_irqsave(&tgt->sess_work_lock, flags); + list_add_tail(&prm->sess_works_list_entry, &tgt->sess_works_list); + spin_unlock_irqrestore(&tgt->sess_work_lock, flags); + + schedule_work(&tgt->sess_work); + + return 0; +} + +/* + * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire + */ +static void qlt_send_notify_ack(struct scsi_qla_host *vha, + struct imm_ntfy_from_isp *ntfy, + uint32_t add_flags, uint16_t resp_code, int resp_code_valid, + uint16_t srr_flags, uint16_t srr_reject_code, uint8_t srr_explan) +{ + struct qla_hw_data *ha = vha->hw; + request_t *pkt; + struct nack_to_isp *nack; + + ql_dbg(ql_dbg_tgt, vha, 0xe004, "Sending NOTIFY_ACK (ha=%p)\n", ha); + + /* Send marker if required */ + if (qlt_issue_marker(vha, 1) != QLA_SUCCESS) + return; + + pkt = (request_t *)qla2x00_alloc_iocbs(vha, NULL); + if (!pkt) { + ql_dbg(ql_dbg_tgt, vha, 0xe049, + "qla_target(%d): %s failed: unable to allocate " + "request packet\n", vha->vp_idx, __func__); + return; + } + + if (ha->tgt.qla_tgt != NULL) + ha->tgt.qla_tgt->notify_ack_expected++; + + pkt->entry_type = NOTIFY_ACK_TYPE; + pkt->entry_count = 1; + + nack = (struct nack_to_isp *)pkt; + nack->ox_id = ntfy->ox_id; + + nack->u.isp24.nport_handle = ntfy->u.isp24.nport_handle; + if (le16_to_cpu(ntfy->u.isp24.status) == IMM_NTFY_ELS) { + nack->u.isp24.flags = ntfy->u.isp24.flags & + __constant_cpu_to_le32(NOTIFY24XX_FLAGS_PUREX_IOCB); + } + nack->u.isp24.srr_rx_id = ntfy->u.isp24.srr_rx_id; + nack->u.isp24.status = ntfy->u.isp24.status; + nack->u.isp24.status_subcode = ntfy->u.isp24.status_subcode; + nack->u.isp24.exchange_address = ntfy->u.isp24.exchange_address; + nack->u.isp24.srr_rel_offs = ntfy->u.isp24.srr_rel_offs; + nack->u.isp24.srr_ui = ntfy->u.isp24.srr_ui; + nack->u.isp24.srr_flags = cpu_to_le16(srr_flags); + nack->u.isp24.srr_reject_code = srr_reject_code; + nack->u.isp24.srr_reject_code_expl = srr_explan; + nack->u.isp24.vp_index = ntfy->u.isp24.vp_index; + + ql_dbg(ql_dbg_tgt, vha, 0xe005, + "qla_target(%d): Sending 24xx Notify Ack %d\n", + vha->vp_idx, nack->u.isp24.status); + + qla2x00_start_iocbs(vha, vha->req); +} + +/* + * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire + */ +static void qlt_24xx_send_abts_resp(struct scsi_qla_host *vha, + struct abts_recv_from_24xx *abts, uint32_t status, + bool ids_reversed) +{ + struct qla_hw_data *ha = vha->hw; + struct abts_resp_to_24xx *resp; + uint32_t f_ctl; + uint8_t *p; + + ql_dbg(ql_dbg_tgt, vha, 0xe006, + "Sending task mgmt ABTS response (ha=%p, atio=%p, status=%x\n", + ha, abts, status); + + /* Send marker if required */ + if (qlt_issue_marker(vha, 1) != QLA_SUCCESS) + return; + + resp = (struct abts_resp_to_24xx *)qla2x00_alloc_iocbs(vha, NULL); + if (!resp) { + ql_dbg(ql_dbg_tgt, vha, 0xe04a, + "qla_target(%d): %s failed: unable to allocate " + "request packet", vha->vp_idx, __func__); + return; + } + + resp->entry_type = ABTS_RESP_24XX; + resp->entry_count = 1; + resp->nport_handle = abts->nport_handle; + resp->vp_index = vha->vp_idx; + resp->sof_type = abts->sof_type; + resp->exchange_address = abts->exchange_address; + resp->fcp_hdr_le = abts->fcp_hdr_le; + f_ctl = __constant_cpu_to_le32(F_CTL_EXCH_CONTEXT_RESP | + F_CTL_LAST_SEQ | F_CTL_END_SEQ | + F_CTL_SEQ_INITIATIVE); + p = (uint8_t *)&f_ctl; + resp->fcp_hdr_le.f_ctl[0] = *p++; + resp->fcp_hdr_le.f_ctl[1] = *p++; + resp->fcp_hdr_le.f_ctl[2] = *p; + if (ids_reversed) { + resp->fcp_hdr_le.d_id[0] = abts->fcp_hdr_le.d_id[0]; + resp->fcp_hdr_le.d_id[1] = abts->fcp_hdr_le.d_id[1]; + resp->fcp_hdr_le.d_id[2] = abts->fcp_hdr_le.d_id[2]; + resp->fcp_hdr_le.s_id[0] = abts->fcp_hdr_le.s_id[0]; + resp->fcp_hdr_le.s_id[1] = abts->fcp_hdr_le.s_id[1]; + resp->fcp_hdr_le.s_id[2] = abts->fcp_hdr_le.s_id[2]; + } else { + resp->fcp_hdr_le.d_id[0] = abts->fcp_hdr_le.s_id[0]; + resp->fcp_hdr_le.d_id[1] = abts->fcp_hdr_le.s_id[1]; + resp->fcp_hdr_le.d_id[2] = abts->fcp_hdr_le.s_id[2]; + resp->fcp_hdr_le.s_id[0] = abts->fcp_hdr_le.d_id[0]; + resp->fcp_hdr_le.s_id[1] = abts->fcp_hdr_le.d_id[1]; + resp->fcp_hdr_le.s_id[2] = abts->fcp_hdr_le.d_id[2]; + } + resp->exchange_addr_to_abort = abts->exchange_addr_to_abort; + if (status == FCP_TMF_CMPL) { + resp->fcp_hdr_le.r_ctl = R_CTL_BASIC_LINK_SERV | R_CTL_B_ACC; + resp->payload.ba_acct.seq_id_valid = SEQ_ID_INVALID; + resp->payload.ba_acct.low_seq_cnt = 0x0000; + resp->payload.ba_acct.high_seq_cnt = 0xFFFF; + resp->payload.ba_acct.ox_id = abts->fcp_hdr_le.ox_id; + resp->payload.ba_acct.rx_id = abts->fcp_hdr_le.rx_id; + } else { + resp->fcp_hdr_le.r_ctl = R_CTL_BASIC_LINK_SERV | R_CTL_B_RJT; + resp->payload.ba_rjt.reason_code = + BA_RJT_REASON_CODE_UNABLE_TO_PERFORM; + /* Other bytes are zero */ + } + + ha->tgt.qla_tgt->abts_resp_expected++; + + qla2x00_start_iocbs(vha, vha->req); +} + +/* + * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire + */ +static void qlt_24xx_retry_term_exchange(struct scsi_qla_host *vha, + struct abts_resp_from_24xx_fw *entry) +{ + struct ctio7_to_24xx *ctio; + + ql_dbg(ql_dbg_tgt, vha, 0xe007, + "Sending retry TERM EXCH CTIO7 (ha=%p)\n", vha->hw); + /* Send marker if required */ + if (qlt_issue_marker(vha, 1) != QLA_SUCCESS) + return; + + ctio = (struct ctio7_to_24xx *)qla2x00_alloc_iocbs(vha, NULL); + if (ctio == NULL) { + ql_dbg(ql_dbg_tgt, vha, 0xe04b, + "qla_target(%d): %s failed: unable to allocate " + "request packet\n", vha->vp_idx, __func__); + return; + } + + /* + * We've got on entrance firmware's response on by us generated + * ABTS response. So, in it ID fields are reversed. + */ + + ctio->entry_type = CTIO_TYPE7; + ctio->entry_count = 1; + ctio->nport_handle = entry->nport_handle; + ctio->handle = QLA_TGT_SKIP_HANDLE | CTIO_COMPLETION_HANDLE_MARK; + ctio->timeout = __constant_cpu_to_le16(QLA_TGT_TIMEOUT); + ctio->vp_index = vha->vp_idx; + ctio->initiator_id[0] = entry->fcp_hdr_le.d_id[0]; + ctio->initiator_id[1] = entry->fcp_hdr_le.d_id[1]; + ctio->initiator_id[2] = entry->fcp_hdr_le.d_id[2]; + ctio->exchange_addr = entry->exchange_addr_to_abort; + ctio->u.status1.flags = + __constant_cpu_to_le16(CTIO7_FLAGS_STATUS_MODE_1 | + CTIO7_FLAGS_TERMINATE); + ctio->u.status1.ox_id = entry->fcp_hdr_le.ox_id; + + qla2x00_start_iocbs(vha, vha->req); + + qlt_24xx_send_abts_resp(vha, (struct abts_recv_from_24xx *)entry, + FCP_TMF_CMPL, true); +} + +/* ha->hardware_lock supposed to be held on entry */ +static int __qlt_24xx_handle_abts(struct scsi_qla_host *vha, + struct abts_recv_from_24xx *abts, struct qla_tgt_sess *sess) +{ + struct qla_hw_data *ha = vha->hw; + struct qla_tgt_mgmt_cmd *mcmd; + int rc; + + ql_dbg(ql_dbg_tgt_mgt, vha, 0xf00f, + "qla_target(%d): task abort (tag=%d)\n", + vha->vp_idx, abts->exchange_addr_to_abort); + + mcmd = mempool_alloc(qla_tgt_mgmt_cmd_mempool, GFP_ATOMIC); + if (mcmd == NULL) { + ql_dbg(ql_dbg_tgt_mgt, vha, 0xf051, + "qla_target(%d): %s: Allocation of ABORT cmd failed", + vha->vp_idx, __func__); + return -ENOMEM; + } + memset(mcmd, 0, sizeof(*mcmd)); + + mcmd->sess = sess; + memcpy(&mcmd->orig_iocb.abts, abts, sizeof(mcmd->orig_iocb.abts)); + + rc = ha->tgt.tgt_ops->handle_tmr(mcmd, 0, TMR_ABORT_TASK, + abts->exchange_addr_to_abort); + if (rc != 0) { + ql_dbg(ql_dbg_tgt_mgt, vha, 0xf052, + "qla_target(%d): tgt_ops->handle_tmr()" + " failed: %d", vha->vp_idx, rc); + mempool_free(mcmd, qla_tgt_mgmt_cmd_mempool); + return -EFAULT; + } + + return 0; +} + +/* + * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire + */ +static void qlt_24xx_handle_abts(struct scsi_qla_host *vha, + struct abts_recv_from_24xx *abts) +{ + struct qla_hw_data *ha = vha->hw; + struct qla_tgt_sess *sess; + uint32_t tag = abts->exchange_addr_to_abort; + uint8_t s_id[3]; + int rc; + + if (le32_to_cpu(abts->fcp_hdr_le.parameter) & ABTS_PARAM_ABORT_SEQ) { + ql_dbg(ql_dbg_tgt_mgt, vha, 0xf053, + "qla_target(%d): ABTS: Abort Sequence not " + "supported\n", vha->vp_idx); + qlt_24xx_send_abts_resp(vha, abts, FCP_TMF_REJECTED, false); + return; + } + + if (tag == ATIO_EXCHANGE_ADDRESS_UNKNOWN) { + ql_dbg(ql_dbg_tgt_mgt, vha, 0xf010, + "qla_target(%d): ABTS: Unknown Exchange " + "Address received\n", vha->vp_idx); + qlt_24xx_send_abts_resp(vha, abts, FCP_TMF_REJECTED, false); + return; + } + + ql_dbg(ql_dbg_tgt_mgt, vha, 0xf011, + "qla_target(%d): task abort (s_id=%x:%x:%x, " + "tag=%d, param=%x)\n", vha->vp_idx, abts->fcp_hdr_le.s_id[2], + abts->fcp_hdr_le.s_id[1], abts->fcp_hdr_le.s_id[0], tag, + le32_to_cpu(abts->fcp_hdr_le.parameter)); + + s_id[0] = abts->fcp_hdr_le.s_id[2]; + s_id[1] = abts->fcp_hdr_le.s_id[1]; + s_id[2] = abts->fcp_hdr_le.s_id[0]; + + sess = ha->tgt.tgt_ops->find_sess_by_s_id(vha, s_id); + if (!sess) { + ql_dbg(ql_dbg_tgt_mgt, vha, 0xf012, + "qla_target(%d): task abort for non-existant session\n", + vha->vp_idx); + rc = qlt_sched_sess_work(ha->tgt.qla_tgt, + QLA_TGT_SESS_WORK_ABORT, abts, sizeof(*abts)); + if (rc != 0) { + qlt_24xx_send_abts_resp(vha, abts, FCP_TMF_REJECTED, + false); + } + return; + } + + rc = __qlt_24xx_handle_abts(vha, abts, sess); + if (rc != 0) { + ql_dbg(ql_dbg_tgt_mgt, vha, 0xf054, + "qla_target(%d): __qlt_24xx_handle_abts() failed: %d\n", + vha->vp_idx, rc); + qlt_24xx_send_abts_resp(vha, abts, FCP_TMF_REJECTED, false); + return; + } +} + +/* + * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire + */ +static void qlt_24xx_send_task_mgmt_ctio(struct scsi_qla_host *ha, + struct qla_tgt_mgmt_cmd *mcmd, uint32_t resp_code) +{ + struct atio_from_isp *atio = &mcmd->orig_iocb.atio; + struct ctio7_to_24xx *ctio; + + ql_dbg(ql_dbg_tgt, ha, 0xe008, + "Sending task mgmt CTIO7 (ha=%p, atio=%p, resp_code=%x\n", + ha, atio, resp_code); + + /* Send marker if required */ + if (qlt_issue_marker(ha, 1) != QLA_SUCCESS) + return; + + ctio = (struct ctio7_to_24xx *)qla2x00_alloc_iocbs(ha, NULL); + if (ctio == NULL) { + ql_dbg(ql_dbg_tgt, ha, 0xe04c, + "qla_target(%d): %s failed: unable to allocate " + "request packet\n", ha->vp_idx, __func__); + return; + } + + ctio->entry_type = CTIO_TYPE7; + ctio->entry_count = 1; + ctio->handle = QLA_TGT_SKIP_HANDLE | CTIO_COMPLETION_HANDLE_MARK; + ctio->nport_handle = mcmd->sess->loop_id; + ctio->timeout = __constant_cpu_to_le16(QLA_TGT_TIMEOUT); + ctio->vp_index = ha->vp_idx; + ctio->initiator_id[0] = atio->u.isp24.fcp_hdr.s_id[2]; + ctio->initiator_id[1] = atio->u.isp24.fcp_hdr.s_id[1]; + ctio->initiator_id[2] = atio->u.isp24.fcp_hdr.s_id[0]; + ctio->exchange_addr = atio->u.isp24.exchange_addr; + ctio->u.status1.flags = (atio->u.isp24.attr << 9) | + __constant_cpu_to_le16(CTIO7_FLAGS_STATUS_MODE_1 | + CTIO7_FLAGS_SEND_STATUS); + ctio->u.status1.ox_id = swab16(atio->u.isp24.fcp_hdr.ox_id); + ctio->u.status1.scsi_status = + __constant_cpu_to_le16(SS_RESPONSE_INFO_LEN_VALID); + ctio->u.status1.response_len = __constant_cpu_to_le16(8); + ((uint32_t *)ctio->u.status1.sense_data)[0] = cpu_to_be32(resp_code); + + qla2x00_start_iocbs(ha, ha->req); +} + +void qlt_free_mcmd(struct qla_tgt_mgmt_cmd *mcmd) +{ + mempool_free(mcmd, qla_tgt_mgmt_cmd_mempool); +} +EXPORT_SYMBOL(qlt_free_mcmd); + +/* callback from target fabric module code */ +void qlt_xmit_tm_rsp(struct qla_tgt_mgmt_cmd *mcmd) +{ + struct scsi_qla_host *vha = mcmd->sess->vha; + struct qla_hw_data *ha = vha->hw; + unsigned long flags; + + ql_dbg(ql_dbg_tgt_mgt, vha, 0xf013, + "TM response mcmd (%p) status %#x state %#x", + mcmd, mcmd->fc_tm_rsp, mcmd->flags); + + spin_lock_irqsave(&ha->hardware_lock, flags); + if (mcmd->flags == QLA24XX_MGMT_SEND_NACK) + qlt_send_notify_ack(vha, &mcmd->orig_iocb.imm_ntfy, + 0, 0, 0, 0, 0, 0); + else { + if (mcmd->se_cmd.se_tmr_req->function == TMR_ABORT_TASK) + qlt_24xx_send_abts_resp(vha, &mcmd->orig_iocb.abts, + mcmd->fc_tm_rsp, false); + else + qlt_24xx_send_task_mgmt_ctio(vha, mcmd, + mcmd->fc_tm_rsp); + } + /* + * Make the callback for ->free_mcmd() to queue_work() and invoke + * target_put_sess_cmd() to drop cmd_kref to 1. The final + * target_put_sess_cmd() call will be made from TFO->check_stop_free() + * -> tcm_qla2xxx_check_stop_free() to release the TMR associated se_cmd + * descriptor after TFO->queue_tm_rsp() -> tcm_qla2xxx_queue_tm_rsp() -> + * qlt_xmit_tm_rsp() returns here.. + */ + ha->tgt.tgt_ops->free_mcmd(mcmd); + spin_unlock_irqrestore(&ha->hardware_lock, flags); +} +EXPORT_SYMBOL(qlt_xmit_tm_rsp); + +/* No locks */ +static int qlt_pci_map_calc_cnt(struct qla_tgt_prm *prm) +{ + struct qla_tgt_cmd *cmd = prm->cmd; + + BUG_ON(cmd->sg_cnt == 0); + + prm->sg = (struct scatterlist *)cmd->sg; + prm->seg_cnt = pci_map_sg(prm->tgt->ha->pdev, cmd->sg, + cmd->sg_cnt, cmd->dma_data_direction); + if (unlikely(prm->seg_cnt == 0)) + goto out_err; + + prm->cmd->sg_mapped = 1; + + /* + * If greater than four sg entries then we need to allocate + * the continuation entries + */ + if (prm->seg_cnt > prm->tgt->datasegs_per_cmd) + prm->req_cnt += DIV_ROUND_UP(prm->seg_cnt - + prm->tgt->datasegs_per_cmd, prm->tgt->datasegs_per_cont); + + ql_dbg(ql_dbg_tgt, prm->cmd->vha, 0xe009, "seg_cnt=%d, req_cnt=%d\n", + prm->seg_cnt, prm->req_cnt); + return 0; + +out_err: + ql_dbg(ql_dbg_tgt, prm->cmd->vha, 0xe04d, + "qla_target(%d): PCI mapping failed: sg_cnt=%d", + 0, prm->cmd->sg_cnt); + return -1; +} + +static inline void qlt_unmap_sg(struct scsi_qla_host *vha, + struct qla_tgt_cmd *cmd) +{ + struct qla_hw_data *ha = vha->hw; + + BUG_ON(!cmd->sg_mapped); + pci_unmap_sg(ha->pdev, cmd->sg, cmd->sg_cnt, cmd->dma_data_direction); + cmd->sg_mapped = 0; +} + +static int qlt_check_reserve_free_req(struct scsi_qla_host *vha, + uint32_t req_cnt) +{ + struct qla_hw_data *ha = vha->hw; + device_reg_t __iomem *reg = ha->iobase; + uint32_t cnt; + + if (vha->req->cnt < (req_cnt + 2)) { + cnt = (uint16_t)RD_REG_DWORD(®->isp24.req_q_out); + + ql_dbg(ql_dbg_tgt, vha, 0xe00a, + "Request ring circled: cnt=%d, vha->->ring_index=%d, " + "vha->req->cnt=%d, req_cnt=%d\n", cnt, + vha->req->ring_index, vha->req->cnt, req_cnt); + if (vha->req->ring_index < cnt) + vha->req->cnt = cnt - vha->req->ring_index; + else + vha->req->cnt = vha->req->length - + (vha->req->ring_index - cnt); + } + + if (unlikely(vha->req->cnt < (req_cnt + 2))) { + ql_dbg(ql_dbg_tgt, vha, 0xe00b, + "qla_target(%d): There is no room in the " + "request ring: vha->req->ring_index=%d, vha->req->cnt=%d, " + "req_cnt=%d\n", vha->vp_idx, vha->req->ring_index, + vha->req->cnt, req_cnt); + return -EAGAIN; + } + vha->req->cnt -= req_cnt; + + return 0; +} + +/* + * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire + */ +static inline void *qlt_get_req_pkt(struct scsi_qla_host *vha) +{ + /* Adjust ring index. */ + vha->req->ring_index++; + if (vha->req->ring_index == vha->req->length) { + vha->req->ring_index = 0; + vha->req->ring_ptr = vha->req->ring; + } else { + vha->req->ring_ptr++; + } + return (cont_entry_t *)vha->req->ring_ptr; +} + +/* ha->hardware_lock supposed to be held on entry */ +static inline uint32_t qlt_make_handle(struct scsi_qla_host *vha) +{ + struct qla_hw_data *ha = vha->hw; + uint32_t h; + + h = ha->tgt.current_handle; + /* always increment cmd handle */ + do { + ++h; + if (h > MAX_OUTSTANDING_COMMANDS) + h = 1; /* 0 is QLA_TGT_NULL_HANDLE */ + if (h == ha->tgt.current_handle) { + ql_dbg(ql_dbg_tgt, vha, 0xe04e, + "qla_target(%d): Ran out of " + "empty cmd slots in ha %p\n", vha->vp_idx, ha); + h = QLA_TGT_NULL_HANDLE; + break; + } + } while ((h == QLA_TGT_NULL_HANDLE) || + (h == QLA_TGT_SKIP_HANDLE) || + (ha->tgt.cmds[h-1] != NULL)); + + if (h != QLA_TGT_NULL_HANDLE) + ha->tgt.current_handle = h; + + return h; +} + +/* ha->hardware_lock supposed to be held on entry */ +static int qlt_24xx_build_ctio_pkt(struct qla_tgt_prm *prm, + struct scsi_qla_host *vha) +{ + uint32_t h; + struct ctio7_to_24xx *pkt; + struct qla_hw_data *ha = vha->hw; + struct atio_from_isp *atio = &prm->cmd->atio; + + pkt = (struct ctio7_to_24xx *)vha->req->ring_ptr; + prm->pkt = pkt; + memset(pkt, 0, sizeof(*pkt)); + + pkt->entry_type = CTIO_TYPE7; + pkt->entry_count = (uint8_t)prm->req_cnt; + pkt->vp_index = vha->vp_idx; + + h = qlt_make_handle(vha); + if (unlikely(h == QLA_TGT_NULL_HANDLE)) { + /* + * CTIO type 7 from the firmware doesn't provide a way to + * know the initiator's LOOP ID, hence we can't find + * the session and, so, the command. + */ + return -EAGAIN; + } else + ha->tgt.cmds[h-1] = prm->cmd; + + pkt->handle = h | CTIO_COMPLETION_HANDLE_MARK; + pkt->nport_handle = prm->cmd->loop_id; + pkt->timeout = __constant_cpu_to_le16(QLA_TGT_TIMEOUT); + pkt->initiator_id[0] = atio->u.isp24.fcp_hdr.s_id[2]; + pkt->initiator_id[1] = atio->u.isp24.fcp_hdr.s_id[1]; + pkt->initiator_id[2] = atio->u.isp24.fcp_hdr.s_id[0]; + pkt->exchange_addr = atio->u.isp24.exchange_addr; + pkt->u.status0.flags |= (atio->u.isp24.attr << 9); + pkt->u.status0.ox_id = swab16(atio->u.isp24.fcp_hdr.ox_id); + pkt->u.status0.relative_offset = cpu_to_le32(prm->cmd->offset); + + ql_dbg(ql_dbg_tgt, vha, 0xe00c, + "qla_target(%d): handle(cmd) -> %08x, timeout %d, ox_id %#x\n", + vha->vp_idx, pkt->handle, QLA_TGT_TIMEOUT, + le16_to_cpu(pkt->u.status0.ox_id)); + return 0; +} + +/* + * ha->hardware_lock supposed to be held on entry. We have already made sure + * that there is sufficient amount of request entries to not drop it. + */ +static void qlt_load_cont_data_segments(struct qla_tgt_prm *prm, + struct scsi_qla_host *vha) +{ + int cnt; + uint32_t *dword_ptr; + int enable_64bit_addressing = prm->tgt->tgt_enable_64bit_addr; + + /* Build continuation packets */ + while (prm->seg_cnt > 0) { + cont_a64_entry_t *cont_pkt64 = + (cont_a64_entry_t *)qlt_get_req_pkt(vha); + + /* + * Make sure that from cont_pkt64 none of + * 64-bit specific fields used for 32-bit + * addressing. Cast to (cont_entry_t *) for + * that. + */ + + memset(cont_pkt64, 0, sizeof(*cont_pkt64)); + + cont_pkt64->entry_count = 1; + cont_pkt64->sys_define = 0; + + if (enable_64bit_addressing) { + cont_pkt64->entry_type = CONTINUE_A64_TYPE; + dword_ptr = + (uint32_t *)&cont_pkt64->dseg_0_address; + } else { + cont_pkt64->entry_type = CONTINUE_TYPE; + dword_ptr = + (uint32_t *)&((cont_entry_t *) + cont_pkt64)->dseg_0_address; + } + + /* Load continuation entry data segments */ + for (cnt = 0; + cnt < prm->tgt->datasegs_per_cont && prm->seg_cnt; + cnt++, prm->seg_cnt--) { + *dword_ptr++ = + cpu_to_le32(pci_dma_lo32 + (sg_dma_address(prm->sg))); + if (enable_64bit_addressing) { + *dword_ptr++ = + cpu_to_le32(pci_dma_hi32 + (sg_dma_address + (prm->sg))); + } + *dword_ptr++ = cpu_to_le32(sg_dma_len(prm->sg)); + + ql_dbg(ql_dbg_tgt, vha, 0xe00d, + "S/G Segment Cont. phys_addr=%llx:%llx, len=%d\n", + (long long unsigned int) + pci_dma_hi32(sg_dma_address(prm->sg)), + (long long unsigned int) + pci_dma_lo32(sg_dma_address(prm->sg)), + (int)sg_dma_len(prm->sg)); + + prm->sg = sg_next(prm->sg); + } + } +} + +/* + * ha->hardware_lock supposed to be held on entry. We have already made sure + * that there is sufficient amount of request entries to not drop it. + */ +static void qlt_load_data_segments(struct qla_tgt_prm *prm, + struct scsi_qla_host *vha) +{ + int cnt; + uint32_t *dword_ptr; + int enable_64bit_addressing = prm->tgt->tgt_enable_64bit_addr; + struct ctio7_to_24xx *pkt24 = (struct ctio7_to_24xx *)prm->pkt; + + ql_dbg(ql_dbg_tgt, vha, 0xe00e, + "iocb->scsi_status=%x, iocb->flags=%x\n", + le16_to_cpu(pkt24->u.status0.scsi_status), + le16_to_cpu(pkt24->u.status0.flags)); + + pkt24->u.status0.transfer_length = cpu_to_le32(prm->cmd->bufflen); + + /* Setup packet address segment pointer */ + dword_ptr = pkt24->u.status0.dseg_0_address; + + /* Set total data segment count */ + if (prm->seg_cnt) + pkt24->dseg_count = cpu_to_le16(prm->seg_cnt); + + if (prm->seg_cnt == 0) { + /* No data transfer */ + *dword_ptr++ = 0; + *dword_ptr = 0; + return; + } + + /* If scatter gather */ + ql_dbg(ql_dbg_tgt, vha, 0xe00f, "%s", "Building S/G data segments..."); + + /* Load command entry data segments */ + for (cnt = 0; + (cnt < prm->tgt->datasegs_per_cmd) && prm->seg_cnt; + cnt++, prm->seg_cnt--) { + *dword_ptr++ = + cpu_to_le32(pci_dma_lo32(sg_dma_address(prm->sg))); + if (enable_64bit_addressing) { + *dword_ptr++ = + cpu_to_le32(pci_dma_hi32( + sg_dma_address(prm->sg))); + } + *dword_ptr++ = cpu_to_le32(sg_dma_len(prm->sg)); + + ql_dbg(ql_dbg_tgt, vha, 0xe010, + "S/G Segment phys_addr=%llx:%llx, len=%d\n", + (long long unsigned int)pci_dma_hi32(sg_dma_address( + prm->sg)), + (long long unsigned int)pci_dma_lo32(sg_dma_address( + prm->sg)), + (int)sg_dma_len(prm->sg)); + + prm->sg = sg_next(prm->sg); + } + + qlt_load_cont_data_segments(prm, vha); +} + +static inline int qlt_has_data(struct qla_tgt_cmd *cmd) +{ + return cmd->bufflen > 0; +} + +/* + * Called without ha->hardware_lock held + */ +static int qlt_pre_xmit_response(struct qla_tgt_cmd *cmd, + struct qla_tgt_prm *prm, int xmit_type, uint8_t scsi_status, + uint32_t *full_req_cnt) +{ + struct qla_tgt *tgt = cmd->tgt; + struct scsi_qla_host *vha = tgt->vha; + struct qla_hw_data *ha = vha->hw; + struct se_cmd *se_cmd = &cmd->se_cmd; + + if (unlikely(cmd->aborted)) { + ql_dbg(ql_dbg_tgt_mgt, vha, 0xf014, + "qla_target(%d): terminating exchange " + "for aborted cmd=%p (se_cmd=%p, tag=%d)", vha->vp_idx, cmd, + se_cmd, cmd->tag); + + cmd->state = QLA_TGT_STATE_ABORTED; + + qlt_send_term_exchange(vha, cmd, &cmd->atio, 0); + + /* !! At this point cmd could be already freed !! */ + return QLA_TGT_PRE_XMIT_RESP_CMD_ABORTED; + } + + ql_dbg(ql_dbg_tgt, vha, 0xe011, "qla_target(%d): tag=%u\n", + vha->vp_idx, cmd->tag); + + prm->cmd = cmd; + prm->tgt = tgt; + prm->rq_result = scsi_status; + prm->sense_buffer = &cmd->sense_buffer[0]; + prm->sense_buffer_len = TRANSPORT_SENSE_BUFFER; + prm->sg = NULL; + prm->seg_cnt = -1; + prm->req_cnt = 1; + prm->add_status_pkt = 0; + + ql_dbg(ql_dbg_tgt, vha, 0xe012, "rq_result=%x, xmit_type=%x\n", + prm->rq_result, xmit_type); + + /* Send marker if required */ + if (qlt_issue_marker(vha, 0) != QLA_SUCCESS) + return -EFAULT; + + ql_dbg(ql_dbg_tgt, vha, 0xe013, "CTIO start: vha(%d)\n", vha->vp_idx); + + if ((xmit_type & QLA_TGT_XMIT_DATA) && qlt_has_data(cmd)) { + if (qlt_pci_map_calc_cnt(prm) != 0) + return -EAGAIN; + } + + *full_req_cnt = prm->req_cnt; + + if (se_cmd->se_cmd_flags & SCF_UNDERFLOW_BIT) { + prm->residual = se_cmd->residual_count; + ql_dbg(ql_dbg_tgt, vha, 0xe014, + "Residual underflow: %d (tag %d, " + "op %x, bufflen %d, rq_result %x)\n", prm->residual, + cmd->tag, se_cmd->t_task_cdb ? se_cmd->t_task_cdb[0] : 0, + cmd->bufflen, prm->rq_result); + prm->rq_result |= SS_RESIDUAL_UNDER; + } else if (se_cmd->se_cmd_flags & SCF_OVERFLOW_BIT) { + prm->residual = se_cmd->residual_count; + ql_dbg(ql_dbg_tgt, vha, 0xe015, + "Residual overflow: %d (tag %d, " + "op %x, bufflen %d, rq_result %x)\n", prm->residual, + cmd->tag, se_cmd->t_task_cdb ? se_cmd->t_task_cdb[0] : 0, + cmd->bufflen, prm->rq_result); + prm->rq_result |= SS_RESIDUAL_OVER; + } + + if (xmit_type & QLA_TGT_XMIT_STATUS) { + /* + * If QLA_TGT_XMIT_DATA is not set, add_status_pkt will be + * ignored in *xmit_response() below + */ + if (qlt_has_data(cmd)) { + if (QLA_TGT_SENSE_VALID(prm->sense_buffer) || + (IS_FWI2_CAPABLE(ha) && + (prm->rq_result != 0))) { + prm->add_status_pkt = 1; + (*full_req_cnt)++; + } + } + } + + ql_dbg(ql_dbg_tgt, vha, 0xe016, + "req_cnt=%d, full_req_cnt=%d, add_status_pkt=%d\n", + prm->req_cnt, *full_req_cnt, prm->add_status_pkt); + + return 0; +} + +static inline int qlt_need_explicit_conf(struct qla_hw_data *ha, + struct qla_tgt_cmd *cmd, int sending_sense) +{ + if (ha->tgt.enable_class_2) + return 0; + + if (sending_sense) + return cmd->conf_compl_supported; + else + return ha->tgt.enable_explicit_conf && + cmd->conf_compl_supported; +} + +#ifdef CONFIG_QLA_TGT_DEBUG_SRR +/* + * Original taken from the XFS code + */ +static unsigned long qlt_srr_random(void) +{ + static int Inited; + static unsigned long RandomValue; + static DEFINE_SPINLOCK(lock); + /* cycles pseudo-randomly through all values between 1 and 2^31 - 2 */ + register long rv; + register long lo; + register long hi; + unsigned long flags; + + spin_lock_irqsave(&lock, flags); + if (!Inited) { + RandomValue = jiffies; + Inited = 1; + } + rv = RandomValue; + hi = rv / 127773; + lo = rv % 127773; + rv = 16807 * lo - 2836 * hi; + if (rv <= 0) + rv += 2147483647; + RandomValue = rv; + spin_unlock_irqrestore(&lock, flags); + return rv; +} + +static void qlt_check_srr_debug(struct qla_tgt_cmd *cmd, int *xmit_type) +{ +#if 0 /* This is not a real status packets lost, so it won't lead to SRR */ + if ((*xmit_type & QLA_TGT_XMIT_STATUS) && (qlt_srr_random() % 200) + == 50) { + *xmit_type &= ~QLA_TGT_XMIT_STATUS; + ql_dbg(ql_dbg_tgt_mgt, cmd->vha, 0xf015, + "Dropping cmd %p (tag %d) status", cmd, cmd->tag); + } +#endif + /* + * It's currently not possible to simulate SRRs for FCP_WRITE without + * a physical link layer failure, so don't even try here.. + */ + if (cmd->dma_data_direction != DMA_FROM_DEVICE) + return; + + if (qlt_has_data(cmd) && (cmd->sg_cnt > 1) && + ((qlt_srr_random() % 100) == 20)) { + int i, leave = 0; + unsigned int tot_len = 0; + + while (leave == 0) + leave = qlt_srr_random() % cmd->sg_cnt; + + for (i = 0; i < leave; i++) + tot_len += cmd->sg[i].length; + + ql_dbg(ql_dbg_tgt_mgt, cmd->vha, 0xf016, + "Cutting cmd %p (tag %d) buffer" + " tail to len %d, sg_cnt %d (cmd->bufflen %d," + " cmd->sg_cnt %d)", cmd, cmd->tag, tot_len, leave, + cmd->bufflen, cmd->sg_cnt); + + cmd->bufflen = tot_len; + cmd->sg_cnt = leave; + } + + if (qlt_has_data(cmd) && ((qlt_srr_random() % 100) == 70)) { + unsigned int offset = qlt_srr_random() % cmd->bufflen; + + ql_dbg(ql_dbg_tgt_mgt, cmd->vha, 0xf017, + "Cutting cmd %p (tag %d) buffer head " + "to offset %d (cmd->bufflen %d)", cmd, cmd->tag, offset, + cmd->bufflen); + if (offset == 0) + *xmit_type &= ~QLA_TGT_XMIT_DATA; + else if (qlt_set_data_offset(cmd, offset)) { + ql_dbg(ql_dbg_tgt_mgt, cmd->vha, 0xf018, + "qlt_set_data_offset() failed (tag %d)", cmd->tag); + } + } +} +#else +static inline void qlt_check_srr_debug(struct qla_tgt_cmd *cmd, int *xmit_type) +{} +#endif + +static void qlt_24xx_init_ctio_to_isp(struct ctio7_to_24xx *ctio, + struct qla_tgt_prm *prm) +{ + prm->sense_buffer_len = min_t(uint32_t, prm->sense_buffer_len, + (uint32_t)sizeof(ctio->u.status1.sense_data)); + ctio->u.status0.flags |= + __constant_cpu_to_le16(CTIO7_FLAGS_SEND_STATUS); + if (qlt_need_explicit_conf(prm->tgt->ha, prm->cmd, 0)) { + ctio->u.status0.flags |= __constant_cpu_to_le16( + CTIO7_FLAGS_EXPLICIT_CONFORM | + CTIO7_FLAGS_CONFORM_REQ); + } + ctio->u.status0.residual = cpu_to_le32(prm->residual); + ctio->u.status0.scsi_status = cpu_to_le16(prm->rq_result); + if (QLA_TGT_SENSE_VALID(prm->sense_buffer)) { + int i; + + if (qlt_need_explicit_conf(prm->tgt->ha, prm->cmd, 1)) { + if (prm->cmd->se_cmd.scsi_status != 0) { + ql_dbg(ql_dbg_tgt, prm->cmd->vha, 0xe017, + "Skipping EXPLICIT_CONFORM and " + "CTIO7_FLAGS_CONFORM_REQ for FCP READ w/ " + "non GOOD status\n"); + goto skip_explict_conf; + } + ctio->u.status1.flags |= __constant_cpu_to_le16( + CTIO7_FLAGS_EXPLICIT_CONFORM | + CTIO7_FLAGS_CONFORM_REQ); + } +skip_explict_conf: + ctio->u.status1.flags &= + ~__constant_cpu_to_le16(CTIO7_FLAGS_STATUS_MODE_0); + ctio->u.status1.flags |= + __constant_cpu_to_le16(CTIO7_FLAGS_STATUS_MODE_1); + ctio->u.status1.scsi_status |= + __constant_cpu_to_le16(SS_SENSE_LEN_VALID); + ctio->u.status1.sense_length = + cpu_to_le16(prm->sense_buffer_len); + for (i = 0; i < prm->sense_buffer_len/4; i++) + ((uint32_t *)ctio->u.status1.sense_data)[i] = + cpu_to_be32(((uint32_t *)prm->sense_buffer)[i]); +#if 0 + if (unlikely((prm->sense_buffer_len % 4) != 0)) { + static int q; + if (q < 10) { + ql_dbg(ql_dbg_tgt, vha, 0xe04f, + "qla_target(%d): %d bytes of sense " + "lost", prm->tgt->ha->vp_idx, + prm->sense_buffer_len % 4); + q++; + } + } +#endif + } else { + ctio->u.status1.flags &= + ~__constant_cpu_to_le16(CTIO7_FLAGS_STATUS_MODE_0); + ctio->u.status1.flags |= + __constant_cpu_to_le16(CTIO7_FLAGS_STATUS_MODE_1); + ctio->u.status1.sense_length = 0; + memset(ctio->u.status1.sense_data, 0, + sizeof(ctio->u.status1.sense_data)); + } + + /* Sense with len > 24, is it possible ??? */ +} + +/* + * Callback to setup response of xmit_type of QLA_TGT_XMIT_DATA and * + * QLA_TGT_XMIT_STATUS for >= 24xx silicon + */ +int qlt_xmit_response(struct qla_tgt_cmd *cmd, int xmit_type, + uint8_t scsi_status) +{ + struct scsi_qla_host *vha = cmd->vha; + struct qla_hw_data *ha = vha->hw; + struct ctio7_to_24xx *pkt; + struct qla_tgt_prm prm; + uint32_t full_req_cnt = 0; + unsigned long flags = 0; + int res; + + memset(&prm, 0, sizeof(prm)); + qlt_check_srr_debug(cmd, &xmit_type); + + ql_dbg(ql_dbg_tgt, cmd->vha, 0xe018, + "is_send_status=%d, cmd->bufflen=%d, cmd->sg_cnt=%d, " + "cmd->dma_data_direction=%d\n", (xmit_type & QLA_TGT_XMIT_STATUS) ? + 1 : 0, cmd->bufflen, cmd->sg_cnt, cmd->dma_data_direction); + + res = qlt_pre_xmit_response(cmd, &prm, xmit_type, scsi_status, + &full_req_cnt); + if (unlikely(res != 0)) { + if (res == QLA_TGT_PRE_XMIT_RESP_CMD_ABORTED) + return 0; + + return res; + } + + spin_lock_irqsave(&ha->hardware_lock, flags); + + /* Does F/W have an IOCBs for this request */ + res = qlt_check_reserve_free_req(vha, full_req_cnt); + if (unlikely(res)) + goto out_unmap_unlock; + + res = qlt_24xx_build_ctio_pkt(&prm, vha); + if (unlikely(res != 0)) + goto out_unmap_unlock; + + + pkt = (struct ctio7_to_24xx *)prm.pkt; + + if (qlt_has_data(cmd) && (xmit_type & QLA_TGT_XMIT_DATA)) { + pkt->u.status0.flags |= + __constant_cpu_to_le16(CTIO7_FLAGS_DATA_IN | + CTIO7_FLAGS_STATUS_MODE_0); + + qlt_load_data_segments(&prm, vha); + + if (prm.add_status_pkt == 0) { + if (xmit_type & QLA_TGT_XMIT_STATUS) { + pkt->u.status0.scsi_status = + cpu_to_le16(prm.rq_result); + pkt->u.status0.residual = + cpu_to_le32(prm.residual); + pkt->u.status0.flags |= __constant_cpu_to_le16( + CTIO7_FLAGS_SEND_STATUS); + if (qlt_need_explicit_conf(ha, cmd, 0)) { + pkt->u.status0.flags |= + __constant_cpu_to_le16( + CTIO7_FLAGS_EXPLICIT_CONFORM | + CTIO7_FLAGS_CONFORM_REQ); + } + } + + } else { + /* + * We have already made sure that there is sufficient + * amount of request entries to not drop HW lock in + * req_pkt(). + */ + struct ctio7_to_24xx *ctio = + (struct ctio7_to_24xx *)qlt_get_req_pkt(vha); + + ql_dbg(ql_dbg_tgt, vha, 0xe019, + "Building additional status packet\n"); + + memcpy(ctio, pkt, sizeof(*ctio)); + ctio->entry_count = 1; + ctio->dseg_count = 0; + ctio->u.status1.flags &= ~__constant_cpu_to_le16( + CTIO7_FLAGS_DATA_IN); + + /* Real finish is ctio_m1's finish */ + pkt->handle |= CTIO_INTERMEDIATE_HANDLE_MARK; + pkt->u.status0.flags |= __constant_cpu_to_le16( + CTIO7_FLAGS_DONT_RET_CTIO); + qlt_24xx_init_ctio_to_isp((struct ctio7_to_24xx *)ctio, + &prm); + pr_debug("Status CTIO7: %p\n", ctio); + } + } else + qlt_24xx_init_ctio_to_isp(pkt, &prm); + + + cmd->state = QLA_TGT_STATE_PROCESSED; /* Mid-level is done processing */ + + ql_dbg(ql_dbg_tgt, vha, 0xe01a, + "Xmitting CTIO7 response pkt for 24xx: %p scsi_status: 0x%02x\n", + pkt, scsi_status); + + qla2x00_start_iocbs(vha, vha->req); + spin_unlock_irqrestore(&ha->hardware_lock, flags); + + return 0; + +out_unmap_unlock: + if (cmd->sg_mapped) + qlt_unmap_sg(vha, cmd); + spin_unlock_irqrestore(&ha->hardware_lock, flags); + + return res; +} +EXPORT_SYMBOL(qlt_xmit_response); + +int qlt_rdy_to_xfer(struct qla_tgt_cmd *cmd) +{ + struct ctio7_to_24xx *pkt; + struct scsi_qla_host *vha = cmd->vha; + struct qla_hw_data *ha = vha->hw; + struct qla_tgt *tgt = cmd->tgt; + struct qla_tgt_prm prm; + unsigned long flags; + int res = 0; + + memset(&prm, 0, sizeof(prm)); + prm.cmd = cmd; + prm.tgt = tgt; + prm.sg = NULL; + prm.req_cnt = 1; + + /* Send marker if required */ + if (qlt_issue_marker(vha, 0) != QLA_SUCCESS) + return -EIO; + + ql_dbg(ql_dbg_tgt, vha, 0xe01b, "CTIO_start: vha(%d)", + (int)vha->vp_idx); + + /* Calculate number of entries and segments required */ + if (qlt_pci_map_calc_cnt(&prm) != 0) + return -EAGAIN; + + spin_lock_irqsave(&ha->hardware_lock, flags); + + /* Does F/W have an IOCBs for this request */ + res = qlt_check_reserve_free_req(vha, prm.req_cnt); + if (res != 0) + goto out_unlock_free_unmap; + + res = qlt_24xx_build_ctio_pkt(&prm, vha); + if (unlikely(res != 0)) + goto out_unlock_free_unmap; + pkt = (struct ctio7_to_24xx *)prm.pkt; + pkt->u.status0.flags |= __constant_cpu_to_le16(CTIO7_FLAGS_DATA_OUT | + CTIO7_FLAGS_STATUS_MODE_0); + qlt_load_data_segments(&prm, vha); + + cmd->state = QLA_TGT_STATE_NEED_DATA; + + qla2x00_start_iocbs(vha, vha->req); + spin_unlock_irqrestore(&ha->hardware_lock, flags); + + return res; + +out_unlock_free_unmap: + if (cmd->sg_mapped) + qlt_unmap_sg(vha, cmd); + spin_unlock_irqrestore(&ha->hardware_lock, flags); + + return res; +} +EXPORT_SYMBOL(qlt_rdy_to_xfer); + +/* If hardware_lock held on entry, might drop it, then reaquire */ +/* This function sends the appropriate CTIO to ISP 2xxx or 24xx */ +static int __qlt_send_term_exchange(struct scsi_qla_host *vha, + struct qla_tgt_cmd *cmd, + struct atio_from_isp *atio) +{ + struct ctio7_to_24xx *ctio24; + struct qla_hw_data *ha = vha->hw; + request_t *pkt; + int ret = 0; + + ql_dbg(ql_dbg_tgt, vha, 0xe01c, "Sending TERM EXCH CTIO (ha=%p)\n", ha); + + pkt = (request_t *)qla2x00_alloc_iocbs(vha, NULL); + if (pkt == NULL) { + ql_dbg(ql_dbg_tgt, vha, 0xe050, + "qla_target(%d): %s failed: unable to allocate " + "request packet\n", vha->vp_idx, __func__); + return -ENOMEM; + } + + if (cmd != NULL) { + if (cmd->state < QLA_TGT_STATE_PROCESSED) { + ql_dbg(ql_dbg_tgt, vha, 0xe051, + "qla_target(%d): Terminating cmd %p with " + "incorrect state %d\n", vha->vp_idx, cmd, + cmd->state); + } else + ret = 1; + } + + pkt->entry_count = 1; + pkt->handle = QLA_TGT_SKIP_HANDLE | CTIO_COMPLETION_HANDLE_MARK; + + ctio24 = (struct ctio7_to_24xx *)pkt; + ctio24->entry_type = CTIO_TYPE7; + ctio24->nport_handle = cmd ? cmd->loop_id : CTIO7_NHANDLE_UNRECOGNIZED; + ctio24->timeout = __constant_cpu_to_le16(QLA_TGT_TIMEOUT); + ctio24->vp_index = vha->vp_idx; + ctio24->initiator_id[0] = atio->u.isp24.fcp_hdr.s_id[2]; + ctio24->initiator_id[1] = atio->u.isp24.fcp_hdr.s_id[1]; + ctio24->initiator_id[2] = atio->u.isp24.fcp_hdr.s_id[0]; + ctio24->exchange_addr = atio->u.isp24.exchange_addr; + ctio24->u.status1.flags = (atio->u.isp24.attr << 9) | + __constant_cpu_to_le16(CTIO7_FLAGS_STATUS_MODE_1 | + CTIO7_FLAGS_TERMINATE); + ctio24->u.status1.ox_id = swab16(atio->u.isp24.fcp_hdr.ox_id); + + /* Most likely, it isn't needed */ + ctio24->u.status1.residual = get_unaligned((uint32_t *) + &atio->u.isp24.fcp_cmnd.add_cdb[ + atio->u.isp24.fcp_cmnd.add_cdb_len]); + if (ctio24->u.status1.residual != 0) + ctio24->u.status1.scsi_status |= SS_RESIDUAL_UNDER; + + qla2x00_start_iocbs(vha, vha->req); + return ret; +} + +static void qlt_send_term_exchange(struct scsi_qla_host *vha, + struct qla_tgt_cmd *cmd, struct atio_from_isp *atio, int ha_locked) +{ + unsigned long flags; + int rc; + + if (qlt_issue_marker(vha, ha_locked) < 0) + return; + + if (ha_locked) { + rc = __qlt_send_term_exchange(vha, cmd, atio); + goto done; + } + spin_lock_irqsave(&vha->hw->hardware_lock, flags); + rc = __qlt_send_term_exchange(vha, cmd, atio); + spin_unlock_irqrestore(&vha->hw->hardware_lock, flags); +done: + if (rc == 1) { + if (!ha_locked && !in_interrupt()) + msleep(250); /* just in case */ + + vha->hw->tgt.tgt_ops->free_cmd(cmd); + } +} + +void qlt_free_cmd(struct qla_tgt_cmd *cmd) +{ + BUG_ON(cmd->sg_mapped); + + if (unlikely(cmd->free_sg)) + kfree(cmd->sg); + kmem_cache_free(qla_tgt_cmd_cachep, cmd); +} +EXPORT_SYMBOL(qlt_free_cmd); + +/* ha->hardware_lock supposed to be held on entry */ +static int qlt_prepare_srr_ctio(struct scsi_qla_host *vha, + struct qla_tgt_cmd *cmd, void *ctio) +{ + struct qla_tgt_srr_ctio *sc; + struct qla_hw_data *ha = vha->hw; + struct qla_tgt *tgt = ha->tgt.qla_tgt; + struct qla_tgt_srr_imm *imm; + + tgt->ctio_srr_id++; + + ql_dbg(ql_dbg_tgt_mgt, vha, 0xf019, + "qla_target(%d): CTIO with SRR status received\n", vha->vp_idx); + + if (!ctio) { + ql_dbg(ql_dbg_tgt_mgt, vha, 0xf055, + "qla_target(%d): SRR CTIO, but ctio is NULL\n", + vha->vp_idx); + return -EINVAL; + } + + sc = kzalloc(sizeof(*sc), GFP_ATOMIC); + if (sc != NULL) { + sc->cmd = cmd; + /* IRQ is already OFF */ + spin_lock(&tgt->srr_lock); + sc->srr_id = tgt->ctio_srr_id; + list_add_tail(&sc->srr_list_entry, + &tgt->srr_ctio_list); + ql_dbg(ql_dbg_tgt_mgt, vha, 0xf01a, + "CTIO SRR %p added (id %d)\n", sc, sc->srr_id); + if (tgt->imm_srr_id == tgt->ctio_srr_id) { + int found = 0; + list_for_each_entry(imm, &tgt->srr_imm_list, + srr_list_entry) { + if (imm->srr_id == sc->srr_id) { + found = 1; + break; + } + } + if (found) { + ql_dbg(ql_dbg_tgt_mgt, vha, 0xf01b, + "Scheduling srr work\n"); + schedule_work(&tgt->srr_work); + } else { + ql_dbg(ql_dbg_tgt_mgt, vha, 0xf056, + "qla_target(%d): imm_srr_id " + "== ctio_srr_id (%d), but there is no " + "corresponding SRR IMM, deleting CTIO " + "SRR %p\n", vha->vp_idx, + tgt->ctio_srr_id, sc); + list_del(&sc->srr_list_entry); + spin_unlock(&tgt->srr_lock); + + kfree(sc); + return -EINVAL; + } + } + spin_unlock(&tgt->srr_lock); + } else { + struct qla_tgt_srr_imm *ti; + + ql_dbg(ql_dbg_tgt_mgt, vha, 0xf057, + "qla_target(%d): Unable to allocate SRR CTIO entry\n", + vha->vp_idx); + spin_lock(&tgt->srr_lock); + list_for_each_entry_safe(imm, ti, &tgt->srr_imm_list, + srr_list_entry) { + if (imm->srr_id == tgt->ctio_srr_id) { + ql_dbg(ql_dbg_tgt_mgt, vha, 0xf01c, + "IMM SRR %p deleted (id %d)\n", + imm, imm->srr_id); + list_del(&imm->srr_list_entry); + qlt_reject_free_srr_imm(vha, imm, 1); + } + } + spin_unlock(&tgt->srr_lock); + + return -ENOMEM; + } + + return 0; +} + +/* + * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire + */ +static int qlt_term_ctio_exchange(struct scsi_qla_host *vha, void *ctio, + struct qla_tgt_cmd *cmd, uint32_t status) +{ + int term = 0; + + if (ctio != NULL) { + struct ctio7_from_24xx *c = (struct ctio7_from_24xx *)ctio; + term = !(c->flags & + __constant_cpu_to_le16(OF_TERM_EXCH)); + } else + term = 1; + + if (term) + qlt_send_term_exchange(vha, cmd, &cmd->atio, 1); + + return term; +} + +/* ha->hardware_lock supposed to be held on entry */ +static inline struct qla_tgt_cmd *qlt_get_cmd(struct scsi_qla_host *vha, + uint32_t handle) +{ + struct qla_hw_data *ha = vha->hw; + + handle--; + if (ha->tgt.cmds[handle] != NULL) { + struct qla_tgt_cmd *cmd = ha->tgt.cmds[handle]; + ha->tgt.cmds[handle] = NULL; + return cmd; + } else + return NULL; +} + +/* ha->hardware_lock supposed to be held on entry */ +static struct qla_tgt_cmd *qlt_ctio_to_cmd(struct scsi_qla_host *vha, + uint32_t handle, void *ctio) +{ + struct qla_tgt_cmd *cmd = NULL; + + /* Clear out internal marks */ + handle &= ~(CTIO_COMPLETION_HANDLE_MARK | + CTIO_INTERMEDIATE_HANDLE_MARK); + + if (handle != QLA_TGT_NULL_HANDLE) { + if (unlikely(handle == QLA_TGT_SKIP_HANDLE)) { + ql_dbg(ql_dbg_tgt, vha, 0xe01d, "%s", + "SKIP_HANDLE CTIO\n"); + return NULL; + } + /* handle-1 is actually used */ + if (unlikely(handle > MAX_OUTSTANDING_COMMANDS)) { + ql_dbg(ql_dbg_tgt, vha, 0xe052, + "qla_target(%d): Wrong handle %x received\n", + vha->vp_idx, handle); + return NULL; + } + cmd = qlt_get_cmd(vha, handle); + if (unlikely(cmd == NULL)) { + ql_dbg(ql_dbg_tgt, vha, 0xe053, + "qla_target(%d): Suspicious: unable to " + "find the command with handle %x\n", vha->vp_idx, + handle); + return NULL; + } + } else if (ctio != NULL) { + /* We can't get loop ID from CTIO7 */ + ql_dbg(ql_dbg_tgt, vha, 0xe054, + "qla_target(%d): Wrong CTIO received: QLA24xx doesn't " + "support NULL handles\n", vha->vp_idx); + return NULL; + } + + return cmd; +} + +/* + * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire + */ +static void qlt_do_ctio_completion(struct scsi_qla_host *vha, uint32_t handle, + uint32_t status, void *ctio) +{ + struct qla_hw_data *ha = vha->hw; + struct se_cmd *se_cmd; + struct target_core_fabric_ops *tfo; + struct qla_tgt_cmd *cmd; + + ql_dbg(ql_dbg_tgt, vha, 0xe01e, + "qla_target(%d): handle(ctio %p status %#x) <- %08x\n", + vha->vp_idx, ctio, status, handle); + + if (handle & CTIO_INTERMEDIATE_HANDLE_MARK) { + /* That could happen only in case of an error/reset/abort */ + if (status != CTIO_SUCCESS) { + ql_dbg(ql_dbg_tgt_mgt, vha, 0xf01d, + "Intermediate CTIO received" + " (status %x)\n", status); + } + return; + } + + cmd = qlt_ctio_to_cmd(vha, handle, ctio); + if (cmd == NULL) { + if (status != CTIO_SUCCESS) + qlt_term_ctio_exchange(vha, ctio, NULL, status); + return; + } + se_cmd = &cmd->se_cmd; + tfo = se_cmd->se_tfo; + + if (cmd->sg_mapped) + qlt_unmap_sg(vha, cmd); + + if (unlikely(status != CTIO_SUCCESS)) { + switch (status & 0xFFFF) { + case CTIO_LIP_RESET: + case CTIO_TARGET_RESET: + case CTIO_ABORTED: + case CTIO_TIMEOUT: + case CTIO_INVALID_RX_ID: + /* They are OK */ + ql_dbg(ql_dbg_tgt_mgt, vha, 0xf058, + "qla_target(%d): CTIO with " + "status %#x received, state %x, se_cmd %p, " + "(LIP_RESET=e, ABORTED=2, TARGET_RESET=17, " + "TIMEOUT=b, INVALID_RX_ID=8)\n", vha->vp_idx, + status, cmd->state, se_cmd); + break; + + case CTIO_PORT_LOGGED_OUT: + case CTIO_PORT_UNAVAILABLE: + ql_dbg(ql_dbg_tgt_mgt, vha, 0xf059, + "qla_target(%d): CTIO with PORT LOGGED " + "OUT (29) or PORT UNAVAILABLE (28) status %x " + "received (state %x, se_cmd %p)\n", vha->vp_idx, + status, cmd->state, se_cmd); + break; + + case CTIO_SRR_RECEIVED: + ql_dbg(ql_dbg_tgt_mgt, vha, 0xf05a, + "qla_target(%d): CTIO with SRR_RECEIVED" + " status %x received (state %x, se_cmd %p)\n", + vha->vp_idx, status, cmd->state, se_cmd); + if (qlt_prepare_srr_ctio(vha, cmd, ctio) != 0) + break; + else + return; + + default: + ql_dbg(ql_dbg_tgt_mgt, vha, 0xf05b, + "qla_target(%d): CTIO with error status " + "0x%x received (state %x, se_cmd %p\n", + vha->vp_idx, status, cmd->state, se_cmd); + break; + } + + if (cmd->state != QLA_TGT_STATE_NEED_DATA) + if (qlt_term_ctio_exchange(vha, ctio, cmd, status)) + return; + } + + if (cmd->state == QLA_TGT_STATE_PROCESSED) { + ql_dbg(ql_dbg_tgt, vha, 0xe01f, "Command %p finished\n", cmd); + } else if (cmd->state == QLA_TGT_STATE_NEED_DATA) { + int rx_status = 0; + + cmd->state = QLA_TGT_STATE_DATA_IN; + + if (unlikely(status != CTIO_SUCCESS)) + rx_status = -EIO; + else + cmd->write_data_transferred = 1; + + ql_dbg(ql_dbg_tgt, vha, 0xe020, + "Data received, context %x, rx_status %d\n", + 0x0, rx_status); + + ha->tgt.tgt_ops->handle_data(cmd); + return; + } else if (cmd->state == QLA_TGT_STATE_ABORTED) { + ql_dbg(ql_dbg_tgt_mgt, vha, 0xf01e, + "Aborted command %p (tag %d) finished\n", cmd, cmd->tag); + } else { + ql_dbg(ql_dbg_tgt_mgt, vha, 0xf05c, + "qla_target(%d): A command in state (%d) should " + "not return a CTIO complete\n", vha->vp_idx, cmd->state); + } + + if (unlikely(status != CTIO_SUCCESS)) { + ql_dbg(ql_dbg_tgt_mgt, vha, 0xf01f, "Finishing failed CTIO\n"); + dump_stack(); + } + + ha->tgt.tgt_ops->free_cmd(cmd); +} + +/* ha->hardware_lock supposed to be held on entry */ +/* called via callback from qla2xxx */ +void qlt_ctio_completion(struct scsi_qla_host *vha, uint32_t handle) +{ + struct qla_hw_data *ha = vha->hw; + struct qla_tgt *tgt = ha->tgt.qla_tgt; + + if (likely(tgt == NULL)) { + ql_dbg(ql_dbg_tgt, vha, 0xe021, + "CTIO, but target mode not enabled" + " (ha %d %p handle %#x)", vha->vp_idx, ha, handle); + return; + } + + tgt->irq_cmd_count++; + qlt_do_ctio_completion(vha, handle, CTIO_SUCCESS, NULL); + tgt->irq_cmd_count--; +} + +static inline int qlt_get_fcp_task_attr(struct scsi_qla_host *vha, + uint8_t task_codes) +{ + int fcp_task_attr; + + switch (task_codes) { + case ATIO_SIMPLE_QUEUE: + fcp_task_attr = MSG_SIMPLE_TAG; + break; + case ATIO_HEAD_OF_QUEUE: + fcp_task_attr = MSG_HEAD_TAG; + break; + case ATIO_ORDERED_QUEUE: + fcp_task_attr = MSG_ORDERED_TAG; + break; + case ATIO_ACA_QUEUE: + fcp_task_attr = MSG_ACA_TAG; + break; + case ATIO_UNTAGGED: + fcp_task_attr = MSG_SIMPLE_TAG; + break; + default: + ql_dbg(ql_dbg_tgt_mgt, vha, 0xf05d, + "qla_target: unknown task code %x, use ORDERED instead\n", + task_codes); + fcp_task_attr = MSG_ORDERED_TAG; + break; + } + + return fcp_task_attr; +} + +static struct qla_tgt_sess *qlt_make_local_sess(struct scsi_qla_host *, + uint8_t *); +/* + * Process context for I/O path into tcm_qla2xxx code + */ +static void qlt_do_work(struct work_struct *work) +{ + struct qla_tgt_cmd *cmd = container_of(work, struct qla_tgt_cmd, work); + scsi_qla_host_t *vha = cmd->vha; + struct qla_hw_data *ha = vha->hw; + struct qla_tgt *tgt = ha->tgt.qla_tgt; + struct qla_tgt_sess *sess = NULL; + struct atio_from_isp *atio = &cmd->atio; + unsigned char *cdb; + unsigned long flags; + uint32_t data_length; + int ret, fcp_task_attr, data_dir, bidi = 0; + + if (tgt->tgt_stop) + goto out_term; + + spin_lock_irqsave(&ha->hardware_lock, flags); + sess = ha->tgt.tgt_ops->find_sess_by_s_id(vha, + atio->u.isp24.fcp_hdr.s_id); + if (sess) { + if (unlikely(sess->tearing_down)) { + sess = NULL; + spin_unlock_irqrestore(&ha->hardware_lock, flags); + goto out_term; + } else { + /* + * Do the extra kref_get() before dropping + * qla_hw_data->hardware_lock. + */ + kref_get(&sess->se_sess->sess_kref); + } + } + spin_unlock_irqrestore(&ha->hardware_lock, flags); + + if (unlikely(!sess)) { + uint8_t *s_id = atio->u.isp24.fcp_hdr.s_id; + + ql_dbg(ql_dbg_tgt_mgt, vha, 0xf022, + "qla_target(%d): Unable to find wwn login" + " (s_id %x:%x:%x), trying to create it manually\n", + vha->vp_idx, s_id[0], s_id[1], s_id[2]); + + if (atio->u.raw.entry_count > 1) { + ql_dbg(ql_dbg_tgt_mgt, vha, 0xf023, + "Dropping multy entry cmd %p\n", cmd); + goto out_term; + } + + mutex_lock(&ha->tgt.tgt_mutex); + sess = qlt_make_local_sess(vha, s_id); + /* sess has an extra creation ref. */ + mutex_unlock(&ha->tgt.tgt_mutex); + + if (!sess) + goto out_term; + } + + cmd->sess = sess; + cmd->loop_id = sess->loop_id; + cmd->conf_compl_supported = sess->conf_compl_supported; + + cdb = &atio->u.isp24.fcp_cmnd.cdb[0]; + cmd->tag = atio->u.isp24.exchange_addr; + cmd->unpacked_lun = scsilun_to_int( + (struct scsi_lun *)&atio->u.isp24.fcp_cmnd.lun); + + if (atio->u.isp24.fcp_cmnd.rddata && + atio->u.isp24.fcp_cmnd.wrdata) { + bidi = 1; + data_dir = DMA_TO_DEVICE; + } else if (atio->u.isp24.fcp_cmnd.rddata) + data_dir = DMA_FROM_DEVICE; + else if (atio->u.isp24.fcp_cmnd.wrdata) + data_dir = DMA_TO_DEVICE; + else + data_dir = DMA_NONE; + + fcp_task_attr = qlt_get_fcp_task_attr(vha, + atio->u.isp24.fcp_cmnd.task_attr); + data_length = be32_to_cpu(get_unaligned((uint32_t *) + &atio->u.isp24.fcp_cmnd.add_cdb[ + atio->u.isp24.fcp_cmnd.add_cdb_len])); + + ql_dbg(ql_dbg_tgt, vha, 0xe022, + "qla_target: START qla command: %p lun: 0x%04x (tag %d)\n", + cmd, cmd->unpacked_lun, cmd->tag); + + ret = vha->hw->tgt.tgt_ops->handle_cmd(vha, cmd, cdb, data_length, + fcp_task_attr, data_dir, bidi); + if (ret != 0) + goto out_term; + /* + * Drop extra session reference from qla_tgt_handle_cmd_for_atio*( + */ + ha->tgt.tgt_ops->put_sess(sess); + return; + +out_term: + ql_dbg(ql_dbg_tgt_mgt, vha, 0xf020, "Terminating work cmd %p", cmd); + /* + * cmd has not sent to target yet, so pass NULL as the second argument + */ + spin_lock_irqsave(&ha->hardware_lock, flags); + qlt_send_term_exchange(vha, NULL, &cmd->atio, 1); + spin_unlock_irqrestore(&ha->hardware_lock, flags); + if (sess) + ha->tgt.tgt_ops->put_sess(sess); +} + +/* ha->hardware_lock supposed to be held on entry */ +static int qlt_handle_cmd_for_atio(struct scsi_qla_host *vha, + struct atio_from_isp *atio) +{ + struct qla_hw_data *ha = vha->hw; + struct qla_tgt *tgt = ha->tgt.qla_tgt; + struct qla_tgt_cmd *cmd; + + if (unlikely(tgt->tgt_stop)) { + ql_dbg(ql_dbg_tgt_mgt, vha, 0xf021, + "New command while device %p is shutting down\n", tgt); + return -EFAULT; + } + + cmd = kmem_cache_zalloc(qla_tgt_cmd_cachep, GFP_ATOMIC); + if (!cmd) { + ql_dbg(ql_dbg_tgt_mgt, vha, 0xf05e, + "qla_target(%d): Allocation of cmd failed\n", vha->vp_idx); + return -ENOMEM; + } + + INIT_LIST_HEAD(&cmd->cmd_list); + + memcpy(&cmd->atio, atio, sizeof(*atio)); + cmd->state = QLA_TGT_STATE_NEW; + cmd->tgt = ha->tgt.qla_tgt; + cmd->vha = vha; + + INIT_WORK(&cmd->work, qlt_do_work); + queue_work(qla_tgt_wq, &cmd->work); + return 0; + +} + +/* ha->hardware_lock supposed to be held on entry */ +static int qlt_issue_task_mgmt(struct qla_tgt_sess *sess, uint32_t lun, + int fn, void *iocb, int flags) +{ + struct scsi_qla_host *vha = sess->vha; + struct qla_hw_data *ha = vha->hw; + struct qla_tgt_mgmt_cmd *mcmd; + int res; + uint8_t tmr_func; + + mcmd = mempool_alloc(qla_tgt_mgmt_cmd_mempool, GFP_ATOMIC); + if (!mcmd) { + ql_dbg(ql_dbg_tgt_tmr, vha, 0x10009, + "qla_target(%d): Allocation of management " + "command failed, some commands and their data could " + "leak\n", vha->vp_idx); + return -ENOMEM; + } + memset(mcmd, 0, sizeof(*mcmd)); + mcmd->sess = sess; + + if (iocb) { + memcpy(&mcmd->orig_iocb.imm_ntfy, iocb, + sizeof(mcmd->orig_iocb.imm_ntfy)); + } + mcmd->tmr_func = fn; + mcmd->flags = flags; + + switch (fn) { + case QLA_TGT_CLEAR_ACA: + ql_dbg(ql_dbg_tgt_tmr, vha, 0x10000, + "qla_target(%d): CLEAR_ACA received\n", sess->vha->vp_idx); + tmr_func = TMR_CLEAR_ACA; + break; + + case QLA_TGT_TARGET_RESET: + ql_dbg(ql_dbg_tgt_tmr, vha, 0x10001, + "qla_target(%d): TARGET_RESET received\n", + sess->vha->vp_idx); + tmr_func = TMR_TARGET_WARM_RESET; + break; + + case QLA_TGT_LUN_RESET: + ql_dbg(ql_dbg_tgt_tmr, vha, 0x10002, + "qla_target(%d): LUN_RESET received\n", sess->vha->vp_idx); + tmr_func = TMR_LUN_RESET; + break; + + case QLA_TGT_CLEAR_TS: + ql_dbg(ql_dbg_tgt_tmr, vha, 0x10003, + "qla_target(%d): CLEAR_TS received\n", sess->vha->vp_idx); + tmr_func = TMR_CLEAR_TASK_SET; + break; + + case QLA_TGT_ABORT_TS: + ql_dbg(ql_dbg_tgt_tmr, vha, 0x10004, + "qla_target(%d): ABORT_TS received\n", sess->vha->vp_idx); + tmr_func = TMR_ABORT_TASK_SET; + break; +#if 0 + case QLA_TGT_ABORT_ALL: + ql_dbg(ql_dbg_tgt_tmr, vha, 0x10005, + "qla_target(%d): Doing ABORT_ALL_TASKS\n", + sess->vha->vp_idx); + tmr_func = 0; + break; + + case QLA_TGT_ABORT_ALL_SESS: + ql_dbg(ql_dbg_tgt_tmr, vha, 0x10006, + "qla_target(%d): Doing ABORT_ALL_TASKS_SESS\n", + sess->vha->vp_idx); + tmr_func = 0; + break; + + case QLA_TGT_NEXUS_LOSS_SESS: + ql_dbg(ql_dbg_tgt_tmr, vha, 0x10007, + "qla_target(%d): Doing NEXUS_LOSS_SESS\n", + sess->vha->vp_idx); + tmr_func = 0; + break; + + case QLA_TGT_NEXUS_LOSS: + ql_dbg(ql_dbg_tgt_tmr, vha, 0x10008, + "qla_target(%d): Doing NEXUS_LOSS\n", sess->vha->vp_idx); + tmr_func = 0; + break; +#endif + default: + ql_dbg(ql_dbg_tgt_tmr, vha, 0x1000a, + "qla_target(%d): Unknown task mgmt fn 0x%x\n", + sess->vha->vp_idx, fn); + mempool_free(mcmd, qla_tgt_mgmt_cmd_mempool); + return -ENOSYS; + } + + res = ha->tgt.tgt_ops->handle_tmr(mcmd, lun, tmr_func, 0); + if (res != 0) { + ql_dbg(ql_dbg_tgt_tmr, vha, 0x1000b, + "qla_target(%d): tgt.tgt_ops->handle_tmr() failed: %d\n", + sess->vha->vp_idx, res); + mempool_free(mcmd, qla_tgt_mgmt_cmd_mempool); + return -EFAULT; + } + + return 0; +} + +/* ha->hardware_lock supposed to be held on entry */ +static int qlt_handle_task_mgmt(struct scsi_qla_host *vha, void *iocb) +{ + struct atio_from_isp *a = (struct atio_from_isp *)iocb; + struct qla_hw_data *ha = vha->hw; + struct qla_tgt *tgt; + struct qla_tgt_sess *sess; + uint32_t lun, unpacked_lun; + int lun_size, fn; + + tgt = ha->tgt.qla_tgt; + + lun = a->u.isp24.fcp_cmnd.lun; + lun_size = sizeof(a->u.isp24.fcp_cmnd.lun); + fn = a->u.isp24.fcp_cmnd.task_mgmt_flags; + sess = ha->tgt.tgt_ops->find_sess_by_s_id(vha, + a->u.isp24.fcp_hdr.s_id); + unpacked_lun = scsilun_to_int((struct scsi_lun *)&lun); + + if (!sess) { + ql_dbg(ql_dbg_tgt_mgt, vha, 0xf024, + "qla_target(%d): task mgmt fn 0x%x for " + "non-existant session\n", vha->vp_idx, fn); + return qlt_sched_sess_work(tgt, QLA_TGT_SESS_WORK_TM, iocb, + sizeof(struct atio_from_isp)); + } + + return qlt_issue_task_mgmt(sess, unpacked_lun, fn, iocb, 0); +} + +/* ha->hardware_lock supposed to be held on entry */ +static int __qlt_abort_task(struct scsi_qla_host *vha, + struct imm_ntfy_from_isp *iocb, struct qla_tgt_sess *sess) +{ + struct atio_from_isp *a = (struct atio_from_isp *)iocb; + struct qla_hw_data *ha = vha->hw; + struct qla_tgt_mgmt_cmd *mcmd; + uint32_t lun, unpacked_lun; + int rc; + + mcmd = mempool_alloc(qla_tgt_mgmt_cmd_mempool, GFP_ATOMIC); + if (mcmd == NULL) { + ql_dbg(ql_dbg_tgt_mgt, vha, 0xf05f, + "qla_target(%d): %s: Allocation of ABORT cmd failed\n", + vha->vp_idx, __func__); + return -ENOMEM; + } + memset(mcmd, 0, sizeof(*mcmd)); + + mcmd->sess = sess; + memcpy(&mcmd->orig_iocb.imm_ntfy, iocb, + sizeof(mcmd->orig_iocb.imm_ntfy)); + + lun = a->u.isp24.fcp_cmnd.lun; + unpacked_lun = scsilun_to_int((struct scsi_lun *)&lun); + + rc = ha->tgt.tgt_ops->handle_tmr(mcmd, unpacked_lun, TMR_ABORT_TASK, + le16_to_cpu(iocb->u.isp2x.seq_id)); + if (rc != 0) { + ql_dbg(ql_dbg_tgt_mgt, vha, 0xf060, + "qla_target(%d): tgt_ops->handle_tmr() failed: %d\n", + vha->vp_idx, rc); + mempool_free(mcmd, qla_tgt_mgmt_cmd_mempool); + return -EFAULT; + } + + return 0; +} + +/* ha->hardware_lock supposed to be held on entry */ +static int qlt_abort_task(struct scsi_qla_host *vha, + struct imm_ntfy_from_isp *iocb) +{ + struct qla_hw_data *ha = vha->hw; + struct qla_tgt_sess *sess; + int loop_id; + + loop_id = GET_TARGET_ID(ha, (struct atio_from_isp *)iocb); + + sess = ha->tgt.tgt_ops->find_sess_by_loop_id(vha, loop_id); + if (sess == NULL) { + ql_dbg(ql_dbg_tgt_mgt, vha, 0xf025, + "qla_target(%d): task abort for unexisting " + "session\n", vha->vp_idx); + return qlt_sched_sess_work(ha->tgt.qla_tgt, + QLA_TGT_SESS_WORK_ABORT, iocb, sizeof(*iocb)); + } + + return __qlt_abort_task(vha, iocb, sess); +} + +/* + * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire + */ +static int qlt_24xx_handle_els(struct scsi_qla_host *vha, + struct imm_ntfy_from_isp *iocb) +{ + struct qla_hw_data *ha = vha->hw; + int res = 0; + + ql_dbg(ql_dbg_tgt_mgt, vha, 0xf026, + "qla_target(%d): Port ID: 0x%02x:%02x:%02x" + " ELS opcode: 0x%02x\n", vha->vp_idx, iocb->u.isp24.port_id[0], + iocb->u.isp24.port_id[1], iocb->u.isp24.port_id[2], + iocb->u.isp24.status_subcode); + + switch (iocb->u.isp24.status_subcode) { + case ELS_PLOGI: + case ELS_FLOGI: + case ELS_PRLI: + case ELS_LOGO: + case ELS_PRLO: + res = qlt_reset(vha, iocb, QLA_TGT_NEXUS_LOSS_SESS); + break; + case ELS_PDISC: + case ELS_ADISC: + { + struct qla_tgt *tgt = ha->tgt.qla_tgt; + if (tgt->link_reinit_iocb_pending) { + qlt_send_notify_ack(vha, &tgt->link_reinit_iocb, + 0, 0, 0, 0, 0, 0); + tgt->link_reinit_iocb_pending = 0; + } + res = 1; /* send notify ack */ + break; + } + + default: + ql_dbg(ql_dbg_tgt_mgt, vha, 0xf061, + "qla_target(%d): Unsupported ELS command %x " + "received\n", vha->vp_idx, iocb->u.isp24.status_subcode); + res = qlt_reset(vha, iocb, QLA_TGT_NEXUS_LOSS_SESS); + break; + } + + return res; +} + +static int qlt_set_data_offset(struct qla_tgt_cmd *cmd, uint32_t offset) +{ + struct scatterlist *sg, *sgp, *sg_srr, *sg_srr_start = NULL; + size_t first_offset = 0, rem_offset = offset, tmp = 0; + int i, sg_srr_cnt, bufflen = 0; + + ql_dbg(ql_dbg_tgt, cmd->vha, 0xe023, + "Entering qla_tgt_set_data_offset: cmd: %p, cmd->sg: %p, " + "cmd->sg_cnt: %u, direction: %d\n", + cmd, cmd->sg, cmd->sg_cnt, cmd->dma_data_direction); + + /* + * FIXME: Reject non zero SRR relative offset until we can test + * this code properly. + */ + pr_debug("Rejecting non zero SRR rel_offs: %u\n", offset); + return -1; + + if (!cmd->sg || !cmd->sg_cnt) { + ql_dbg(ql_dbg_tgt, cmd->vha, 0xe055, + "Missing cmd->sg or zero cmd->sg_cnt in" + " qla_tgt_set_data_offset\n"); + return -EINVAL; + } + /* + * Walk the current cmd->sg list until we locate the new sg_srr_start + */ + for_each_sg(cmd->sg, sg, cmd->sg_cnt, i) { + ql_dbg(ql_dbg_tgt, cmd->vha, 0xe024, + "sg[%d]: %p page: %p, length: %d, offset: %d\n", + i, sg, sg_page(sg), sg->length, sg->offset); + + if ((sg->length + tmp) > offset) { + first_offset = rem_offset; + sg_srr_start = sg; + ql_dbg(ql_dbg_tgt, cmd->vha, 0xe025, + "Found matching sg[%d], using %p as sg_srr_start, " + "and using first_offset: %zu\n", i, sg, + first_offset); + break; + } + tmp += sg->length; + rem_offset -= sg->length; + } + + if (!sg_srr_start) { + ql_dbg(ql_dbg_tgt, cmd->vha, 0xe056, + "Unable to locate sg_srr_start for offset: %u\n", offset); + return -EINVAL; + } + sg_srr_cnt = (cmd->sg_cnt - i); + + sg_srr = kzalloc(sizeof(struct scatterlist) * sg_srr_cnt, GFP_KERNEL); + if (!sg_srr) { + ql_dbg(ql_dbg_tgt, cmd->vha, 0xe057, + "Unable to allocate sgp\n"); + return -ENOMEM; + } + sg_init_table(sg_srr, sg_srr_cnt); + sgp = &sg_srr[0]; + /* + * Walk the remaining list for sg_srr_start, mapping to the newly + * allocated sg_srr taking first_offset into account. + */ + for_each_sg(sg_srr_start, sg, sg_srr_cnt, i) { + if (first_offset) { + sg_set_page(sgp, sg_page(sg), + (sg->length - first_offset), first_offset); + first_offset = 0; + } else { + sg_set_page(sgp, sg_page(sg), sg->length, 0); + } + bufflen += sgp->length; + + sgp = sg_next(sgp); + if (!sgp) + break; + } + + cmd->sg = sg_srr; + cmd->sg_cnt = sg_srr_cnt; + cmd->bufflen = bufflen; + cmd->offset += offset; + cmd->free_sg = 1; + + ql_dbg(ql_dbg_tgt, cmd->vha, 0xe026, "New cmd->sg: %p\n", cmd->sg); + ql_dbg(ql_dbg_tgt, cmd->vha, 0xe027, "New cmd->sg_cnt: %u\n", + cmd->sg_cnt); + ql_dbg(ql_dbg_tgt, cmd->vha, 0xe028, "New cmd->bufflen: %u\n", + cmd->bufflen); + ql_dbg(ql_dbg_tgt, cmd->vha, 0xe029, "New cmd->offset: %u\n", + cmd->offset); + + if (cmd->sg_cnt < 0) + BUG(); + + if (cmd->bufflen < 0) + BUG(); + + return 0; +} + +static inline int qlt_srr_adjust_data(struct qla_tgt_cmd *cmd, + uint32_t srr_rel_offs, int *xmit_type) +{ + int res = 0, rel_offs; + + rel_offs = srr_rel_offs - cmd->offset; + ql_dbg(ql_dbg_tgt_mgt, cmd->vha, 0xf027, "srr_rel_offs=%d, rel_offs=%d", + srr_rel_offs, rel_offs); + + *xmit_type = QLA_TGT_XMIT_ALL; + + if (rel_offs < 0) { + ql_dbg(ql_dbg_tgt_mgt, cmd->vha, 0xf062, + "qla_target(%d): SRR rel_offs (%d) < 0", + cmd->vha->vp_idx, rel_offs); + res = -1; + } else if (rel_offs == cmd->bufflen) + *xmit_type = QLA_TGT_XMIT_STATUS; + else if (rel_offs > 0) + res = qlt_set_data_offset(cmd, rel_offs); + + return res; +} + +/* No locks, thread context */ +static void qlt_handle_srr(struct scsi_qla_host *vha, + struct qla_tgt_srr_ctio *sctio, struct qla_tgt_srr_imm *imm) +{ + struct imm_ntfy_from_isp *ntfy = + (struct imm_ntfy_from_isp *)&imm->imm_ntfy; + struct qla_hw_data *ha = vha->hw; + struct qla_tgt_cmd *cmd = sctio->cmd; + struct se_cmd *se_cmd = &cmd->se_cmd; + unsigned long flags; + int xmit_type = 0, resp = 0; + uint32_t offset; + uint16_t srr_ui; + + offset = le32_to_cpu(ntfy->u.isp24.srr_rel_offs); + srr_ui = ntfy->u.isp24.srr_ui; + + ql_dbg(ql_dbg_tgt_mgt, vha, 0xf028, "SRR cmd %p, srr_ui %x\n", + cmd, srr_ui); + + switch (srr_ui) { + case SRR_IU_STATUS: + spin_lock_irqsave(&ha->hardware_lock, flags); + qlt_send_notify_ack(vha, ntfy, + 0, 0, 0, NOTIFY_ACK_SRR_FLAGS_ACCEPT, 0, 0); + spin_unlock_irqrestore(&ha->hardware_lock, flags); + xmit_type = QLA_TGT_XMIT_STATUS; + resp = 1; + break; + case SRR_IU_DATA_IN: + if (!cmd->sg || !cmd->sg_cnt) { + ql_dbg(ql_dbg_tgt_mgt, vha, 0xf063, + "Unable to process SRR_IU_DATA_IN due to" + " missing cmd->sg, state: %d\n", cmd->state); + dump_stack(); + goto out_reject; + } + if (se_cmd->scsi_status != 0) { + ql_dbg(ql_dbg_tgt, vha, 0xe02a, + "Rejecting SRR_IU_DATA_IN with non GOOD " + "scsi_status\n"); + goto out_reject; + } + cmd->bufflen = se_cmd->data_length; + + if (qlt_has_data(cmd)) { + if (qlt_srr_adjust_data(cmd, offset, &xmit_type) != 0) + goto out_reject; + spin_lock_irqsave(&ha->hardware_lock, flags); + qlt_send_notify_ack(vha, ntfy, + 0, 0, 0, NOTIFY_ACK_SRR_FLAGS_ACCEPT, 0, 0); + spin_unlock_irqrestore(&ha->hardware_lock, flags); + resp = 1; + } else { + ql_dbg(ql_dbg_tgt_mgt, vha, 0xf064, + "qla_target(%d): SRR for in data for cmd " + "without them (tag %d, SCSI status %d), " + "reject", vha->vp_idx, cmd->tag, + cmd->se_cmd.scsi_status); + goto out_reject; + } + break; + case SRR_IU_DATA_OUT: + if (!cmd->sg || !cmd->sg_cnt) { + ql_dbg(ql_dbg_tgt_mgt, vha, 0xf065, + "Unable to process SRR_IU_DATA_OUT due to" + " missing cmd->sg\n"); + dump_stack(); + goto out_reject; + } + if (se_cmd->scsi_status != 0) { + ql_dbg(ql_dbg_tgt, vha, 0xe02b, + "Rejecting SRR_IU_DATA_OUT" + " with non GOOD scsi_status\n"); + goto out_reject; + } + cmd->bufflen = se_cmd->data_length; + + if (qlt_has_data(cmd)) { + if (qlt_srr_adjust_data(cmd, offset, &xmit_type) != 0) + goto out_reject; + spin_lock_irqsave(&ha->hardware_lock, flags); + qlt_send_notify_ack(vha, ntfy, + 0, 0, 0, NOTIFY_ACK_SRR_FLAGS_ACCEPT, 0, 0); + spin_unlock_irqrestore(&ha->hardware_lock, flags); + if (xmit_type & QLA_TGT_XMIT_DATA) + qlt_rdy_to_xfer(cmd); + } else { + ql_dbg(ql_dbg_tgt_mgt, vha, 0xf066, + "qla_target(%d): SRR for out data for cmd " + "without them (tag %d, SCSI status %d), " + "reject", vha->vp_idx, cmd->tag, + cmd->se_cmd.scsi_status); + goto out_reject; + } + break; + default: + ql_dbg(ql_dbg_tgt_mgt, vha, 0xf067, + "qla_target(%d): Unknown srr_ui value %x", + vha->vp_idx, srr_ui); + goto out_reject; + } + + /* Transmit response in case of status and data-in cases */ + if (resp) + qlt_xmit_response(cmd, xmit_type, se_cmd->scsi_status); + + return; + +out_reject: + spin_lock_irqsave(&ha->hardware_lock, flags); + qlt_send_notify_ack(vha, ntfy, 0, 0, 0, + NOTIFY_ACK_SRR_FLAGS_REJECT, + NOTIFY_ACK_SRR_REJECT_REASON_UNABLE_TO_PERFORM, + NOTIFY_ACK_SRR_FLAGS_REJECT_EXPL_NO_EXPL); + if (cmd->state == QLA_TGT_STATE_NEED_DATA) { + cmd->state = QLA_TGT_STATE_DATA_IN; + dump_stack(); + } else + qlt_send_term_exchange(vha, cmd, &cmd->atio, 1); + spin_unlock_irqrestore(&ha->hardware_lock, flags); +} + +static void qlt_reject_free_srr_imm(struct scsi_qla_host *vha, + struct qla_tgt_srr_imm *imm, int ha_locked) +{ + struct qla_hw_data *ha = vha->hw; + unsigned long flags = 0; + + if (!ha_locked) + spin_lock_irqsave(&ha->hardware_lock, flags); + + qlt_send_notify_ack(vha, (void *)&imm->imm_ntfy, 0, 0, 0, + NOTIFY_ACK_SRR_FLAGS_REJECT, + NOTIFY_ACK_SRR_REJECT_REASON_UNABLE_TO_PERFORM, + NOTIFY_ACK_SRR_FLAGS_REJECT_EXPL_NO_EXPL); + + if (!ha_locked) + spin_unlock_irqrestore(&ha->hardware_lock, flags); + + kfree(imm); +} + +static void qlt_handle_srr_work(struct work_struct *work) +{ + struct qla_tgt *tgt = container_of(work, struct qla_tgt, srr_work); + struct scsi_qla_host *vha = tgt->vha; + struct qla_tgt_srr_ctio *sctio; + unsigned long flags; + + ql_dbg(ql_dbg_tgt_mgt, vha, 0xf029, "Entering SRR work (tgt %p)\n", + tgt); + +restart: + spin_lock_irqsave(&tgt->srr_lock, flags); + list_for_each_entry(sctio, &tgt->srr_ctio_list, srr_list_entry) { + struct qla_tgt_srr_imm *imm, *i, *ti; + struct qla_tgt_cmd *cmd; + struct se_cmd *se_cmd; + + imm = NULL; + list_for_each_entry_safe(i, ti, &tgt->srr_imm_list, + srr_list_entry) { + if (i->srr_id == sctio->srr_id) { + list_del(&i->srr_list_entry); + if (imm) { + ql_dbg(ql_dbg_tgt_mgt, vha, 0xf068, + "qla_target(%d): There must be " + "only one IMM SRR per CTIO SRR " + "(IMM SRR %p, id %d, CTIO %p\n", + vha->vp_idx, i, i->srr_id, sctio); + qlt_reject_free_srr_imm(tgt->vha, i, 0); + } else + imm = i; + } + } + + ql_dbg(ql_dbg_tgt_mgt, vha, 0xf02a, + "IMM SRR %p, CTIO SRR %p (id %d)\n", imm, sctio, + sctio->srr_id); + + if (imm == NULL) { + ql_dbg(ql_dbg_tgt_mgt, vha, 0xf02b, + "Not found matching IMM for SRR CTIO (id %d)\n", + sctio->srr_id); + continue; + } else + list_del(&sctio->srr_list_entry); + + spin_unlock_irqrestore(&tgt->srr_lock, flags); + + cmd = sctio->cmd; + /* + * Reset qla_tgt_cmd SRR values and SGL pointer+count to follow + * tcm_qla2xxx_write_pending() and tcm_qla2xxx_queue_data_in() + * logic.. + */ + cmd->offset = 0; + if (cmd->free_sg) { + kfree(cmd->sg); + cmd->sg = NULL; + cmd->free_sg = 0; + } + se_cmd = &cmd->se_cmd; + + cmd->sg_cnt = se_cmd->t_data_nents; + cmd->sg = se_cmd->t_data_sg; + + ql_dbg(ql_dbg_tgt_mgt, vha, 0xf02c, + "SRR cmd %p (se_cmd %p, tag %d, op %x), " + "sg_cnt=%d, offset=%d", cmd, &cmd->se_cmd, cmd->tag, + se_cmd->t_task_cdb[0], cmd->sg_cnt, cmd->offset); + + qlt_handle_srr(vha, sctio, imm); + + kfree(imm); + kfree(sctio); + goto restart; + } + spin_unlock_irqrestore(&tgt->srr_lock, flags); +} + +/* ha->hardware_lock supposed to be held on entry */ +static void qlt_prepare_srr_imm(struct scsi_qla_host *vha, + struct imm_ntfy_from_isp *iocb) +{ + struct qla_tgt_srr_imm *imm; + struct qla_hw_data *ha = vha->hw; + struct qla_tgt *tgt = ha->tgt.qla_tgt; + struct qla_tgt_srr_ctio *sctio; + + tgt->imm_srr_id++; + + ql_dbg(ql_dbg_tgt_mgt, vha, 0xf02d, "qla_target(%d): SRR received\n", + vha->vp_idx); + + imm = kzalloc(sizeof(*imm), GFP_ATOMIC); + if (imm != NULL) { + memcpy(&imm->imm_ntfy, iocb, sizeof(imm->imm_ntfy)); + + /* IRQ is already OFF */ + spin_lock(&tgt->srr_lock); + imm->srr_id = tgt->imm_srr_id; + list_add_tail(&imm->srr_list_entry, + &tgt->srr_imm_list); + ql_dbg(ql_dbg_tgt_mgt, vha, 0xf02e, + "IMM NTFY SRR %p added (id %d, ui %x)\n", + imm, imm->srr_id, iocb->u.isp24.srr_ui); + if (tgt->imm_srr_id == tgt->ctio_srr_id) { + int found = 0; + list_for_each_entry(sctio, &tgt->srr_ctio_list, + srr_list_entry) { + if (sctio->srr_id == imm->srr_id) { + found = 1; + break; + } + } + if (found) { + ql_dbg(ql_dbg_tgt_mgt, vha, 0xf02f, "%s", + "Scheduling srr work\n"); + schedule_work(&tgt->srr_work); + } else { + ql_dbg(ql_dbg_tgt_mgt, vha, 0xf030, + "qla_target(%d): imm_srr_id " + "== ctio_srr_id (%d), but there is no " + "corresponding SRR CTIO, deleting IMM " + "SRR %p\n", vha->vp_idx, tgt->ctio_srr_id, + imm); + list_del(&imm->srr_list_entry); + + kfree(imm); + + spin_unlock(&tgt->srr_lock); + goto out_reject; + } + } + spin_unlock(&tgt->srr_lock); + } else { + struct qla_tgt_srr_ctio *ts; + + ql_dbg(ql_dbg_tgt_mgt, vha, 0xf069, + "qla_target(%d): Unable to allocate SRR IMM " + "entry, SRR request will be rejected\n", vha->vp_idx); + + /* IRQ is already OFF */ + spin_lock(&tgt->srr_lock); + list_for_each_entry_safe(sctio, ts, &tgt->srr_ctio_list, + srr_list_entry) { + if (sctio->srr_id == tgt->imm_srr_id) { + ql_dbg(ql_dbg_tgt_mgt, vha, 0xf031, + "CTIO SRR %p deleted (id %d)\n", + sctio, sctio->srr_id); + list_del(&sctio->srr_list_entry); + qlt_send_term_exchange(vha, sctio->cmd, + &sctio->cmd->atio, 1); + kfree(sctio); + } + } + spin_unlock(&tgt->srr_lock); + goto out_reject; + } + + return; + +out_reject: + qlt_send_notify_ack(vha, iocb, 0, 0, 0, + NOTIFY_ACK_SRR_FLAGS_REJECT, + NOTIFY_ACK_SRR_REJECT_REASON_UNABLE_TO_PERFORM, + NOTIFY_ACK_SRR_FLAGS_REJECT_EXPL_NO_EXPL); +} + +/* + * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire + */ +static void qlt_handle_imm_notify(struct scsi_qla_host *vha, + struct imm_ntfy_from_isp *iocb) +{ + struct qla_hw_data *ha = vha->hw; + uint32_t add_flags = 0; + int send_notify_ack = 1; + uint16_t status; + + status = le16_to_cpu(iocb->u.isp2x.status); + switch (status) { + case IMM_NTFY_LIP_RESET: + { + ql_dbg(ql_dbg_tgt_mgt, vha, 0xf032, + "qla_target(%d): LIP reset (loop %#x), subcode %x\n", + vha->vp_idx, le16_to_cpu(iocb->u.isp24.nport_handle), + iocb->u.isp24.status_subcode); + + if (qlt_reset(vha, iocb, QLA_TGT_ABORT_ALL) == 0) + send_notify_ack = 0; + break; + } + + case IMM_NTFY_LIP_LINK_REINIT: + { + struct qla_tgt *tgt = ha->tgt.qla_tgt; + ql_dbg(ql_dbg_tgt_mgt, vha, 0xf033, + "qla_target(%d): LINK REINIT (loop %#x, " + "subcode %x)\n", vha->vp_idx, + le16_to_cpu(iocb->u.isp24.nport_handle), + iocb->u.isp24.status_subcode); + if (tgt->link_reinit_iocb_pending) { + qlt_send_notify_ack(vha, &tgt->link_reinit_iocb, + 0, 0, 0, 0, 0, 0); + } + memcpy(&tgt->link_reinit_iocb, iocb, sizeof(*iocb)); + tgt->link_reinit_iocb_pending = 1; + /* + * QLogic requires to wait after LINK REINIT for possible + * PDISC or ADISC ELS commands + */ + send_notify_ack = 0; + break; + } + + case IMM_NTFY_PORT_LOGOUT: + ql_dbg(ql_dbg_tgt_mgt, vha, 0xf034, + "qla_target(%d): Port logout (loop " + "%#x, subcode %x)\n", vha->vp_idx, + le16_to_cpu(iocb->u.isp24.nport_handle), + iocb->u.isp24.status_subcode); + + if (qlt_reset(vha, iocb, QLA_TGT_NEXUS_LOSS_SESS) == 0) + send_notify_ack = 0; + /* The sessions will be cleared in the callback, if needed */ + break; + + case IMM_NTFY_GLBL_TPRLO: + ql_dbg(ql_dbg_tgt_mgt, vha, 0xf035, + "qla_target(%d): Global TPRLO (%x)\n", vha->vp_idx, status); + if (qlt_reset(vha, iocb, QLA_TGT_NEXUS_LOSS) == 0) + send_notify_ack = 0; + /* The sessions will be cleared in the callback, if needed */ + break; + + case IMM_NTFY_PORT_CONFIG: + ql_dbg(ql_dbg_tgt_mgt, vha, 0xf036, + "qla_target(%d): Port config changed (%x)\n", vha->vp_idx, + status); + if (qlt_reset(vha, iocb, QLA_TGT_ABORT_ALL) == 0) + send_notify_ack = 0; + /* The sessions will be cleared in the callback, if needed */ + break; + + case IMM_NTFY_GLBL_LOGO: + ql_dbg(ql_dbg_tgt_mgt, vha, 0xf06a, + "qla_target(%d): Link failure detected\n", + vha->vp_idx); + /* I_T nexus loss */ + if (qlt_reset(vha, iocb, QLA_TGT_NEXUS_LOSS) == 0) + send_notify_ack = 0; + break; + + case IMM_NTFY_IOCB_OVERFLOW: + ql_dbg(ql_dbg_tgt_mgt, vha, 0xf06b, + "qla_target(%d): Cannot provide requested " + "capability (IOCB overflowed the immediate notify " + "resource count)\n", vha->vp_idx); + break; + + case IMM_NTFY_ABORT_TASK: + ql_dbg(ql_dbg_tgt_mgt, vha, 0xf037, + "qla_target(%d): Abort Task (S %08x I %#x -> " + "L %#x)\n", vha->vp_idx, + le16_to_cpu(iocb->u.isp2x.seq_id), + GET_TARGET_ID(ha, (struct atio_from_isp *)iocb), + le16_to_cpu(iocb->u.isp2x.lun)); + if (qlt_abort_task(vha, iocb) == 0) + send_notify_ack = 0; + break; + + case IMM_NTFY_RESOURCE: + ql_dbg(ql_dbg_tgt_mgt, vha, 0xf06c, + "qla_target(%d): Out of resources, host %ld\n", + vha->vp_idx, vha->host_no); + break; + + case IMM_NTFY_MSG_RX: + ql_dbg(ql_dbg_tgt_mgt, vha, 0xf038, + "qla_target(%d): Immediate notify task %x\n", + vha->vp_idx, iocb->u.isp2x.task_flags); + if (qlt_handle_task_mgmt(vha, iocb) == 0) + send_notify_ack = 0; + break; + + case IMM_NTFY_ELS: + if (qlt_24xx_handle_els(vha, iocb) == 0) + send_notify_ack = 0; + break; + + case IMM_NTFY_SRR: + qlt_prepare_srr_imm(vha, iocb); + send_notify_ack = 0; + break; + + default: + ql_dbg(ql_dbg_tgt_mgt, vha, 0xf06d, + "qla_target(%d): Received unknown immediate " + "notify status %x\n", vha->vp_idx, status); + break; + } + + if (send_notify_ack) + qlt_send_notify_ack(vha, iocb, add_flags, 0, 0, 0, 0, 0); +} + +/* + * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire + * This function sends busy to ISP 2xxx or 24xx. + */ +static void qlt_send_busy(struct scsi_qla_host *vha, + struct atio_from_isp *atio, uint16_t status) +{ + struct ctio7_to_24xx *ctio24; + struct qla_hw_data *ha = vha->hw; + request_t *pkt; + struct qla_tgt_sess *sess = NULL; + + sess = ha->tgt.tgt_ops->find_sess_by_s_id(vha, + atio->u.isp24.fcp_hdr.s_id); + if (!sess) { + qlt_send_term_exchange(vha, NULL, atio, 1); + return; + } + /* Sending marker isn't necessary, since we called from ISR */ + + pkt = (request_t *)qla2x00_alloc_iocbs(vha, NULL); + if (!pkt) { + ql_dbg(ql_dbg_tgt_mgt, vha, 0xf06e, + "qla_target(%d): %s failed: unable to allocate " + "request packet", vha->vp_idx, __func__); + return; + } + + pkt->entry_count = 1; + pkt->handle = QLA_TGT_SKIP_HANDLE | CTIO_COMPLETION_HANDLE_MARK; + + ctio24 = (struct ctio7_to_24xx *)pkt; + ctio24->entry_type = CTIO_TYPE7; + ctio24->nport_handle = sess->loop_id; + ctio24->timeout = __constant_cpu_to_le16(QLA_TGT_TIMEOUT); + ctio24->vp_index = vha->vp_idx; + ctio24->initiator_id[0] = atio->u.isp24.fcp_hdr.s_id[2]; + ctio24->initiator_id[1] = atio->u.isp24.fcp_hdr.s_id[1]; + ctio24->initiator_id[2] = atio->u.isp24.fcp_hdr.s_id[0]; + ctio24->exchange_addr = atio->u.isp24.exchange_addr; + ctio24->u.status1.flags = (atio->u.isp24.attr << 9) | + __constant_cpu_to_le16( + CTIO7_FLAGS_STATUS_MODE_1 | CTIO7_FLAGS_SEND_STATUS | + CTIO7_FLAGS_DONT_RET_CTIO); + /* + * CTIO from fw w/o se_cmd doesn't provide enough info to retry it, + * if the explicit conformation is used. + */ + ctio24->u.status1.ox_id = swab16(atio->u.isp24.fcp_hdr.ox_id); + ctio24->u.status1.scsi_status = cpu_to_le16(status); + ctio24->u.status1.residual = get_unaligned((uint32_t *) + &atio->u.isp24.fcp_cmnd.add_cdb[ + atio->u.isp24.fcp_cmnd.add_cdb_len]); + if (ctio24->u.status1.residual != 0) + ctio24->u.status1.scsi_status |= SS_RESIDUAL_UNDER; + + qla2x00_start_iocbs(vha, vha->req); +} + +/* ha->hardware_lock supposed to be held on entry */ +/* called via callback from qla2xxx */ +static void qlt_24xx_atio_pkt(struct scsi_qla_host *vha, + struct atio_from_isp *atio) +{ + struct qla_hw_data *ha = vha->hw; + struct qla_tgt *tgt = ha->tgt.qla_tgt; + int rc; + + if (unlikely(tgt == NULL)) { + ql_dbg(ql_dbg_tgt_mgt, vha, 0xf039, + "ATIO pkt, but no tgt (ha %p)", ha); + return; + } + ql_dbg(ql_dbg_tgt, vha, 0xe02c, + "qla_target(%d): ATIO pkt %p: type %02x count %02x", + vha->vp_idx, atio, atio->u.raw.entry_type, + atio->u.raw.entry_count); + /* + * In tgt_stop mode we also should allow all requests to pass. + * Otherwise, some commands can stuck. + */ + + tgt->irq_cmd_count++; + + switch (atio->u.raw.entry_type) { + case ATIO_TYPE7: + ql_dbg(ql_dbg_tgt, vha, 0xe02d, + "ATIO_TYPE7 instance %d, lun %Lx, read/write %d/%d, " + "add_cdb_len %d, data_length %04x, s_id %x:%x:%x\n", + vha->vp_idx, atio->u.isp24.fcp_cmnd.lun, + atio->u.isp24.fcp_cmnd.rddata, + atio->u.isp24.fcp_cmnd.wrdata, + atio->u.isp24.fcp_cmnd.add_cdb_len, + be32_to_cpu(get_unaligned((uint32_t *) + &atio->u.isp24.fcp_cmnd.add_cdb[ + atio->u.isp24.fcp_cmnd.add_cdb_len])), + atio->u.isp24.fcp_hdr.s_id[0], + atio->u.isp24.fcp_hdr.s_id[1], + atio->u.isp24.fcp_hdr.s_id[2]); + + if (unlikely(atio->u.isp24.exchange_addr == + ATIO_EXCHANGE_ADDRESS_UNKNOWN)) { + ql_dbg(ql_dbg_tgt, vha, 0xe058, + "qla_target(%d): ATIO_TYPE7 " + "received with UNKNOWN exchange address, " + "sending QUEUE_FULL\n", vha->vp_idx); + qlt_send_busy(vha, atio, SAM_STAT_TASK_SET_FULL); + break; + } + if (likely(atio->u.isp24.fcp_cmnd.task_mgmt_flags == 0)) + rc = qlt_handle_cmd_for_atio(vha, atio); + else + rc = qlt_handle_task_mgmt(vha, atio); + if (unlikely(rc != 0)) { + if (rc == -ESRCH) { +#if 1 /* With TERM EXCHANGE some FC cards refuse to boot */ + qlt_send_busy(vha, atio, SAM_STAT_BUSY); +#else + qlt_send_term_exchange(vha, NULL, atio, 1); +#endif + } else { + if (tgt->tgt_stop) { + ql_dbg(ql_dbg_tgt, vha, 0xe059, + "qla_target: Unable to send " + "command to target for req, " + "ignoring.\n"); + } else { + ql_dbg(ql_dbg_tgt, vha, 0xe05a, + "qla_target(%d): Unable to send " + "command to target, sending BUSY " + "status.\n", vha->vp_idx); + qlt_send_busy(vha, atio, SAM_STAT_BUSY); + } + } + } + break; + + case IMMED_NOTIFY_TYPE: + { + if (unlikely(atio->u.isp2x.entry_status != 0)) { + ql_dbg(ql_dbg_tgt, vha, 0xe05b, + "qla_target(%d): Received ATIO packet %x " + "with error status %x\n", vha->vp_idx, + atio->u.raw.entry_type, + atio->u.isp2x.entry_status); + break; + } + ql_dbg(ql_dbg_tgt, vha, 0xe02e, "%s", "IMMED_NOTIFY ATIO"); + qlt_handle_imm_notify(vha, (struct imm_ntfy_from_isp *)atio); + break; + } + + default: + ql_dbg(ql_dbg_tgt, vha, 0xe05c, + "qla_target(%d): Received unknown ATIO atio " + "type %x\n", vha->vp_idx, atio->u.raw.entry_type); + break; + } + + tgt->irq_cmd_count--; +} + +/* ha->hardware_lock supposed to be held on entry */ +/* called via callback from qla2xxx */ +static void qlt_response_pkt(struct scsi_qla_host *vha, response_t *pkt) +{ + struct qla_hw_data *ha = vha->hw; + struct qla_tgt *tgt = ha->tgt.qla_tgt; + + if (unlikely(tgt == NULL)) { + ql_dbg(ql_dbg_tgt, vha, 0xe05d, + "qla_target(%d): Response pkt %x received, but no " + "tgt (ha %p)\n", vha->vp_idx, pkt->entry_type, ha); + return; + } + + ql_dbg(ql_dbg_tgt, vha, 0xe02f, + "qla_target(%d): response pkt %p: T %02x C %02x S %02x " + "handle %#x\n", vha->vp_idx, pkt, pkt->entry_type, + pkt->entry_count, pkt->entry_status, pkt->handle); + + /* + * In tgt_stop mode we also should allow all requests to pass. + * Otherwise, some commands can stuck. + */ + + tgt->irq_cmd_count++; + + switch (pkt->entry_type) { + case CTIO_TYPE7: + { + struct ctio7_from_24xx *entry = (struct ctio7_from_24xx *)pkt; + ql_dbg(ql_dbg_tgt, vha, 0xe030, "CTIO_TYPE7: instance %d\n", + vha->vp_idx); + qlt_do_ctio_completion(vha, entry->handle, + le16_to_cpu(entry->status)|(pkt->entry_status << 16), + entry); + break; + } + + case ACCEPT_TGT_IO_TYPE: + { + struct atio_from_isp *atio = (struct atio_from_isp *)pkt; + int rc; + ql_dbg(ql_dbg_tgt, vha, 0xe031, + "ACCEPT_TGT_IO instance %d status %04x " + "lun %04x read/write %d data_length %04x " + "target_id %02x rx_id %04x\n ", vha->vp_idx, + le16_to_cpu(atio->u.isp2x.status), + le16_to_cpu(atio->u.isp2x.lun), + atio->u.isp2x.execution_codes, + le32_to_cpu(atio->u.isp2x.data_length), GET_TARGET_ID(ha, + atio), atio->u.isp2x.rx_id); + if (atio->u.isp2x.status != + __constant_cpu_to_le16(ATIO_CDB_VALID)) { + ql_dbg(ql_dbg_tgt, vha, 0xe05e, + "qla_target(%d): ATIO with error " + "status %x received\n", vha->vp_idx, + le16_to_cpu(atio->u.isp2x.status)); + break; + } + ql_dbg(ql_dbg_tgt, vha, 0xe032, + "FCP CDB: 0x%02x, sizeof(cdb): %lu", + atio->u.isp2x.cdb[0], (unsigned long + int)sizeof(atio->u.isp2x.cdb)); + + rc = qlt_handle_cmd_for_atio(vha, atio); + if (unlikely(rc != 0)) { + if (rc == -ESRCH) { +#if 1 /* With TERM EXCHANGE some FC cards refuse to boot */ + qlt_send_busy(vha, atio, 0); +#else + qlt_send_term_exchange(vha, NULL, atio, 1); +#endif + } else { + if (tgt->tgt_stop) { + ql_dbg(ql_dbg_tgt, vha, 0xe05f, + "qla_target: Unable to send " + "command to target, sending TERM " + "EXCHANGE for rsp\n"); + qlt_send_term_exchange(vha, NULL, + atio, 1); + } else { + ql_dbg(ql_dbg_tgt, vha, 0xe060, + "qla_target(%d): Unable to send " + "command to target, sending BUSY " + "status\n", vha->vp_idx); + qlt_send_busy(vha, atio, 0); + } + } + } + } + break; + + case CONTINUE_TGT_IO_TYPE: + { + struct ctio_to_2xxx *entry = (struct ctio_to_2xxx *)pkt; + ql_dbg(ql_dbg_tgt, vha, 0xe033, + "CONTINUE_TGT_IO: instance %d\n", vha->vp_idx); + qlt_do_ctio_completion(vha, entry->handle, + le16_to_cpu(entry->status)|(pkt->entry_status << 16), + entry); + break; + } + + case CTIO_A64_TYPE: + { + struct ctio_to_2xxx *entry = (struct ctio_to_2xxx *)pkt; + ql_dbg(ql_dbg_tgt, vha, 0xe034, "CTIO_A64: instance %d\n", + vha->vp_idx); + qlt_do_ctio_completion(vha, entry->handle, + le16_to_cpu(entry->status)|(pkt->entry_status << 16), + entry); + break; + } + + case IMMED_NOTIFY_TYPE: + ql_dbg(ql_dbg_tgt, vha, 0xe035, "%s", "IMMED_NOTIFY\n"); + qlt_handle_imm_notify(vha, (struct imm_ntfy_from_isp *)pkt); + break; + + case NOTIFY_ACK_TYPE: + if (tgt->notify_ack_expected > 0) { + struct nack_to_isp *entry = (struct nack_to_isp *)pkt; + ql_dbg(ql_dbg_tgt, vha, 0xe036, + "NOTIFY_ACK seq %08x status %x\n", + le16_to_cpu(entry->u.isp2x.seq_id), + le16_to_cpu(entry->u.isp2x.status)); + tgt->notify_ack_expected--; + if (entry->u.isp2x.status != + __constant_cpu_to_le16(NOTIFY_ACK_SUCCESS)) { + ql_dbg(ql_dbg_tgt, vha, 0xe061, + "qla_target(%d): NOTIFY_ACK " + "failed %x\n", vha->vp_idx, + le16_to_cpu(entry->u.isp2x.status)); + } + } else { + ql_dbg(ql_dbg_tgt, vha, 0xe062, + "qla_target(%d): Unexpected NOTIFY_ACK received\n", + vha->vp_idx); + } + break; + + case ABTS_RECV_24XX: + ql_dbg(ql_dbg_tgt, vha, 0xe037, + "ABTS_RECV_24XX: instance %d\n", vha->vp_idx); + qlt_24xx_handle_abts(vha, (struct abts_recv_from_24xx *)pkt); + break; + + case ABTS_RESP_24XX: + if (tgt->abts_resp_expected > 0) { + struct abts_resp_from_24xx_fw *entry = + (struct abts_resp_from_24xx_fw *)pkt; + ql_dbg(ql_dbg_tgt, vha, 0xe038, + "ABTS_RESP_24XX: compl_status %x\n", + entry->compl_status); + tgt->abts_resp_expected--; + if (le16_to_cpu(entry->compl_status) != + ABTS_RESP_COMPL_SUCCESS) { + if ((entry->error_subcode1 == 0x1E) && + (entry->error_subcode2 == 0)) { + /* + * We've got a race here: aborted + * exchange not terminated, i.e. + * response for the aborted command was + * sent between the abort request was + * received and processed. + * Unfortunately, the firmware has a + * silly requirement that all aborted + * exchanges must be explicitely + * terminated, otherwise it refuses to + * send responses for the abort + * requests. So, we have to + * (re)terminate the exchange and retry + * the abort response. + */ + qlt_24xx_retry_term_exchange(vha, + entry); + } else + ql_dbg(ql_dbg_tgt, vha, 0xe063, + "qla_target(%d): ABTS_RESP_24XX " + "failed %x (subcode %x:%x)", + vha->vp_idx, entry->compl_status, + entry->error_subcode1, + entry->error_subcode2); + } + } else { + ql_dbg(ql_dbg_tgt, vha, 0xe064, + "qla_target(%d): Unexpected ABTS_RESP_24XX " + "received\n", vha->vp_idx); + } + break; + + default: + ql_dbg(ql_dbg_tgt, vha, 0xe065, + "qla_target(%d): Received unknown response pkt " + "type %x\n", vha->vp_idx, pkt->entry_type); + break; + } + + tgt->irq_cmd_count--; +} + +/* + * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire + */ +void qlt_async_event(uint16_t code, struct scsi_qla_host *vha, + uint16_t *mailbox) +{ + struct qla_hw_data *ha = vha->hw; + struct qla_tgt *tgt = ha->tgt.qla_tgt; + int reason_code; + + ql_dbg(ql_dbg_tgt, vha, 0xe039, + "scsi(%ld): ha state %d init_done %d oper_mode %d topo %d\n", + vha->host_no, atomic_read(&vha->loop_state), vha->flags.init_done, + ha->operating_mode, ha->current_topology); + + if (!ha->tgt.tgt_ops) + return; + + if (unlikely(tgt == NULL)) { + ql_dbg(ql_dbg_tgt, vha, 0xe03a, + "ASYNC EVENT %#x, but no tgt (ha %p)\n", code, ha); + return; + } + + if (((code == MBA_POINT_TO_POINT) || (code == MBA_CHG_IN_CONNECTION)) && + IS_QLA2100(ha)) + return; + /* + * In tgt_stop mode we also should allow all requests to pass. + * Otherwise, some commands can stuck. + */ + + tgt->irq_cmd_count++; + + switch (code) { + case MBA_RESET: /* Reset */ + case MBA_SYSTEM_ERR: /* System Error */ + case MBA_REQ_TRANSFER_ERR: /* Request Transfer Error */ + case MBA_RSP_TRANSFER_ERR: /* Response Transfer Error */ + ql_dbg(ql_dbg_tgt_mgt, vha, 0xf03a, + "qla_target(%d): System error async event %#x " + "occured", vha->vp_idx, code); + break; + case MBA_WAKEUP_THRES: /* Request Queue Wake-up. */ + set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); + break; + + case MBA_LOOP_UP: + { + ql_dbg(ql_dbg_tgt_mgt, vha, 0xf03b, + "qla_target(%d): Async LOOP_UP occured " + "(m[1]=%x, m[2]=%x, m[3]=%x, m[4]=%x)", vha->vp_idx, + le16_to_cpu(mailbox[1]), le16_to_cpu(mailbox[2]), + le16_to_cpu(mailbox[3]), le16_to_cpu(mailbox[4])); + if (tgt->link_reinit_iocb_pending) { + qlt_send_notify_ack(vha, (void *)&tgt->link_reinit_iocb, + 0, 0, 0, 0, 0, 0); + tgt->link_reinit_iocb_pending = 0; + } + break; + } + + case MBA_LIP_OCCURRED: + case MBA_LOOP_DOWN: + case MBA_LIP_RESET: + case MBA_RSCN_UPDATE: + ql_dbg(ql_dbg_tgt_mgt, vha, 0xf03c, + "qla_target(%d): Async event %#x occured " + "(m[1]=%x, m[2]=%x, m[3]=%x, m[4]=%x)", vha->vp_idx, code, + le16_to_cpu(mailbox[1]), le16_to_cpu(mailbox[2]), + le16_to_cpu(mailbox[3]), le16_to_cpu(mailbox[4])); + break; + + case MBA_PORT_UPDATE: + ql_dbg(ql_dbg_tgt_mgt, vha, 0xf03d, + "qla_target(%d): Port update async event %#x " + "occured: updating the ports database (m[1]=%x, m[2]=%x, " + "m[3]=%x, m[4]=%x)", vha->vp_idx, code, + le16_to_cpu(mailbox[1]), le16_to_cpu(mailbox[2]), + le16_to_cpu(mailbox[3]), le16_to_cpu(mailbox[4])); + reason_code = le16_to_cpu(mailbox[2]); + if (reason_code == 0x4) + ql_dbg(ql_dbg_tgt_mgt, vha, 0xf03e, + "Async MB 2: Got PLOGI Complete\n"); + else if (reason_code == 0x7) + ql_dbg(ql_dbg_tgt_mgt, vha, 0xf03f, + "Async MB 2: Port Logged Out\n"); + break; + + default: + ql_dbg(ql_dbg_tgt_mgt, vha, 0xf040, + "qla_target(%d): Async event %#x occured: " + "ignore (m[1]=%x, m[2]=%x, m[3]=%x, m[4]=%x)", vha->vp_idx, + code, le16_to_cpu(mailbox[1]), le16_to_cpu(mailbox[2]), + le16_to_cpu(mailbox[3]), le16_to_cpu(mailbox[4])); + break; + } + + tgt->irq_cmd_count--; +} + +static fc_port_t *qlt_get_port_database(struct scsi_qla_host *vha, + uint16_t loop_id) +{ + fc_port_t *fcport; + int rc; + + fcport = kzalloc(sizeof(*fcport), GFP_KERNEL); + if (!fcport) { + ql_dbg(ql_dbg_tgt_mgt, vha, 0xf06f, + "qla_target(%d): Allocation of tmp FC port failed", + vha->vp_idx); + return NULL; + } + + ql_dbg(ql_dbg_tgt_mgt, vha, 0xf041, "loop_id %d", loop_id); + + fcport->loop_id = loop_id; + + rc = qla2x00_get_port_database(vha, fcport, 0); + if (rc != QLA_SUCCESS) { + ql_dbg(ql_dbg_tgt_mgt, vha, 0xf070, + "qla_target(%d): Failed to retrieve fcport " + "information -- get_port_database() returned %x " + "(loop_id=0x%04x)", vha->vp_idx, rc, loop_id); + kfree(fcport); + return NULL; + } + + return fcport; +} + +/* Must be called under tgt_mutex */ +static struct qla_tgt_sess *qlt_make_local_sess(struct scsi_qla_host *vha, + uint8_t *s_id) +{ + struct qla_hw_data *ha = vha->hw; + struct qla_tgt_sess *sess = NULL; + fc_port_t *fcport = NULL; + int rc, global_resets; + uint16_t loop_id = 0; + +retry: + global_resets = atomic_read(&ha->tgt.qla_tgt->tgt_global_resets_count); + + rc = qla24xx_get_loop_id(vha, s_id, &loop_id); + if (rc != 0) { + if ((s_id[0] == 0xFF) && + (s_id[1] == 0xFC)) { + /* + * This is Domain Controller, so it should be + * OK to drop SCSI commands from it. + */ + ql_dbg(ql_dbg_tgt_mgt, vha, 0xf042, + "Unable to find initiator with S_ID %x:%x:%x", + s_id[0], s_id[1], s_id[2]); + } else + ql_dbg(ql_dbg_tgt_mgt, vha, 0xf071, + "qla_target(%d): Unable to find " + "initiator with S_ID %x:%x:%x", + vha->vp_idx, s_id[0], s_id[1], + s_id[2]); + return NULL; + } + + fcport = qlt_get_port_database(vha, loop_id); + if (!fcport) + return NULL; + + if (global_resets != + atomic_read(&ha->tgt.qla_tgt->tgt_global_resets_count)) { + ql_dbg(ql_dbg_tgt_mgt, vha, 0xf043, + "qla_target(%d): global reset during session discovery " + "(counter was %d, new %d), retrying", vha->vp_idx, + global_resets, + atomic_read(&ha->tgt.qla_tgt->tgt_global_resets_count)); + goto retry; + } + + sess = qlt_create_sess(vha, fcport, true); + + kfree(fcport); + return sess; +} + +static void qlt_abort_work(struct qla_tgt *tgt, + struct qla_tgt_sess_work_param *prm) +{ + struct scsi_qla_host *vha = tgt->vha; + struct qla_hw_data *ha = vha->hw; + struct qla_tgt_sess *sess = NULL; + unsigned long flags; + uint32_t be_s_id; + uint8_t s_id[3]; + int rc; + + spin_lock_irqsave(&ha->hardware_lock, flags); + + if (tgt->tgt_stop) + goto out_term; + + s_id[0] = prm->abts.fcp_hdr_le.s_id[2]; + s_id[1] = prm->abts.fcp_hdr_le.s_id[1]; + s_id[2] = prm->abts.fcp_hdr_le.s_id[0]; + + sess = ha->tgt.tgt_ops->find_sess_by_s_id(vha, + (unsigned char *)&be_s_id); + if (!sess) { + spin_unlock_irqrestore(&ha->hardware_lock, flags); + + mutex_lock(&ha->tgt.tgt_mutex); + sess = qlt_make_local_sess(vha, s_id); + /* sess has got an extra creation ref */ + mutex_unlock(&ha->tgt.tgt_mutex); + + spin_lock_irqsave(&ha->hardware_lock, flags); + if (!sess) + goto out_term; + } else { + kref_get(&sess->se_sess->sess_kref); + } + + if (tgt->tgt_stop) + goto out_term; + + rc = __qlt_24xx_handle_abts(vha, &prm->abts, sess); + if (rc != 0) + goto out_term; + spin_unlock_irqrestore(&ha->hardware_lock, flags); + + ha->tgt.tgt_ops->put_sess(sess); + return; + +out_term: + qlt_24xx_send_abts_resp(vha, &prm->abts, FCP_TMF_REJECTED, false); + spin_unlock_irqrestore(&ha->hardware_lock, flags); + if (sess) + ha->tgt.tgt_ops->put_sess(sess); +} + +static void qlt_tmr_work(struct qla_tgt *tgt, + struct qla_tgt_sess_work_param *prm) +{ + struct atio_from_isp *a = &prm->tm_iocb2; + struct scsi_qla_host *vha = tgt->vha; + struct qla_hw_data *ha = vha->hw; + struct qla_tgt_sess *sess = NULL; + unsigned long flags; + uint8_t *s_id = NULL; /* to hide compiler warnings */ + int rc; + uint32_t lun, unpacked_lun; + int lun_size, fn; + void *iocb; + + spin_lock_irqsave(&ha->hardware_lock, flags); + + if (tgt->tgt_stop) + goto out_term; + + s_id = prm->tm_iocb2.u.isp24.fcp_hdr.s_id; + sess = ha->tgt.tgt_ops->find_sess_by_s_id(vha, s_id); + if (!sess) { + spin_unlock_irqrestore(&ha->hardware_lock, flags); + + mutex_lock(&ha->tgt.tgt_mutex); + sess = qlt_make_local_sess(vha, s_id); + /* sess has got an extra creation ref */ + mutex_unlock(&ha->tgt.tgt_mutex); + + spin_lock_irqsave(&ha->hardware_lock, flags); + if (!sess) + goto out_term; + } else { + kref_get(&sess->se_sess->sess_kref); + } + + iocb = a; + lun = a->u.isp24.fcp_cmnd.lun; + lun_size = sizeof(lun); + fn = a->u.isp24.fcp_cmnd.task_mgmt_flags; + unpacked_lun = scsilun_to_int((struct scsi_lun *)&lun); + + rc = qlt_issue_task_mgmt(sess, unpacked_lun, fn, iocb, 0); + if (rc != 0) + goto out_term; + spin_unlock_irqrestore(&ha->hardware_lock, flags); + + ha->tgt.tgt_ops->put_sess(sess); + return; + +out_term: + qlt_send_term_exchange(vha, NULL, &prm->tm_iocb2, 1); + spin_unlock_irqrestore(&ha->hardware_lock, flags); + if (sess) + ha->tgt.tgt_ops->put_sess(sess); +} + +static void qlt_sess_work_fn(struct work_struct *work) +{ + struct qla_tgt *tgt = container_of(work, struct qla_tgt, sess_work); + struct scsi_qla_host *vha = tgt->vha; + unsigned long flags; + + ql_dbg(ql_dbg_tgt_mgt, vha, 0xf000, "Sess work (tgt %p)", tgt); + + spin_lock_irqsave(&tgt->sess_work_lock, flags); + while (!list_empty(&tgt->sess_works_list)) { + struct qla_tgt_sess_work_param *prm = list_entry( + tgt->sess_works_list.next, typeof(*prm), + sess_works_list_entry); + + /* + * This work can be scheduled on several CPUs at time, so we + * must delete the entry to eliminate double processing + */ + list_del(&prm->sess_works_list_entry); + + spin_unlock_irqrestore(&tgt->sess_work_lock, flags); + + switch (prm->type) { + case QLA_TGT_SESS_WORK_ABORT: + qlt_abort_work(tgt, prm); + break; + case QLA_TGT_SESS_WORK_TM: + qlt_tmr_work(tgt, prm); + break; + default: + BUG_ON(1); + break; + } + + spin_lock_irqsave(&tgt->sess_work_lock, flags); + + kfree(prm); + } + spin_unlock_irqrestore(&tgt->sess_work_lock, flags); +} + +/* Must be called under tgt_host_action_mutex */ +int qlt_add_target(struct qla_hw_data *ha, struct scsi_qla_host *base_vha) +{ + struct qla_tgt *tgt; + + if (!QLA_TGT_MODE_ENABLED()) + return 0; + + ql_dbg(ql_dbg_tgt, base_vha, 0xe03b, + "Registering target for host %ld(%p)", base_vha->host_no, ha); + + BUG_ON((ha->tgt.qla_tgt != NULL) || (ha->tgt.tgt_ops != NULL)); + + tgt = kzalloc(sizeof(struct qla_tgt), GFP_KERNEL); + if (!tgt) { + ql_dbg(ql_dbg_tgt, base_vha, 0xe066, + "Unable to allocate struct qla_tgt\n"); + return -ENOMEM; + } + + if (!(base_vha->host->hostt->supported_mode & MODE_TARGET)) + base_vha->host->hostt->supported_mode |= MODE_TARGET; + + tgt->ha = ha; + tgt->vha = base_vha; + init_waitqueue_head(&tgt->waitQ); + INIT_LIST_HEAD(&tgt->sess_list); + INIT_LIST_HEAD(&tgt->del_sess_list); + INIT_DELAYED_WORK(&tgt->sess_del_work, + (void (*)(struct work_struct *))qlt_del_sess_work_fn); + spin_lock_init(&tgt->sess_work_lock); + INIT_WORK(&tgt->sess_work, qlt_sess_work_fn); + INIT_LIST_HEAD(&tgt->sess_works_list); + spin_lock_init(&tgt->srr_lock); + INIT_LIST_HEAD(&tgt->srr_ctio_list); + INIT_LIST_HEAD(&tgt->srr_imm_list); + INIT_WORK(&tgt->srr_work, qlt_handle_srr_work); + atomic_set(&tgt->tgt_global_resets_count, 0); + + ha->tgt.qla_tgt = tgt; + + ql_dbg(ql_dbg_tgt, base_vha, 0xe067, + "qla_target(%d): using 64 Bit PCI addressing", + base_vha->vp_idx); + tgt->tgt_enable_64bit_addr = 1; + /* 3 is reserved */ + tgt->sg_tablesize = QLA_TGT_MAX_SG_24XX(base_vha->req->length - 3); + tgt->datasegs_per_cmd = QLA_TGT_DATASEGS_PER_CMD_24XX; + tgt->datasegs_per_cont = QLA_TGT_DATASEGS_PER_CONT_24XX; + + mutex_lock(&qla_tgt_mutex); + list_add_tail(&tgt->tgt_list_entry, &qla_tgt_glist); + mutex_unlock(&qla_tgt_mutex); + + return 0; +} + +/* Must be called under tgt_host_action_mutex */ +int qlt_remove_target(struct qla_hw_data *ha, struct scsi_qla_host *vha) +{ + if (!ha->tgt.qla_tgt) + return 0; + + mutex_lock(&qla_tgt_mutex); + list_del(&ha->tgt.qla_tgt->tgt_list_entry); + mutex_unlock(&qla_tgt_mutex); + + ql_dbg(ql_dbg_tgt, vha, 0xe03c, "Unregistering target for host %ld(%p)", + vha->host_no, ha); + qlt_release(ha->tgt.qla_tgt); + + return 0; +} + +static void qlt_lport_dump(struct scsi_qla_host *vha, u64 wwpn, + unsigned char *b) +{ + int i; + + pr_debug("qla2xxx HW vha->node_name: "); + for (i = 0; i < WWN_SIZE; i++) + pr_debug("%02x ", vha->node_name[i]); + pr_debug("\n"); + pr_debug("qla2xxx HW vha->port_name: "); + for (i = 0; i < WWN_SIZE; i++) + pr_debug("%02x ", vha->port_name[i]); + pr_debug("\n"); + + pr_debug("qla2xxx passed configfs WWPN: "); + put_unaligned_be64(wwpn, b); + for (i = 0; i < WWN_SIZE; i++) + pr_debug("%02x ", b[i]); + pr_debug("\n"); +} + +/** + * qla_tgt_lport_register - register lport with external module + * + * @qla_tgt_ops: Pointer for tcm_qla2xxx qla_tgt_ops + * @wwpn: Passwd FC target WWPN + * @callback: lport initialization callback for tcm_qla2xxx code + * @target_lport_ptr: pointer for tcm_qla2xxx specific lport data + */ +int qlt_lport_register(struct qla_tgt_func_tmpl *qla_tgt_ops, u64 wwpn, + int (*callback)(struct scsi_qla_host *), void *target_lport_ptr) +{ + struct qla_tgt *tgt; + struct scsi_qla_host *vha; + struct qla_hw_data *ha; + struct Scsi_Host *host; + unsigned long flags; + int rc; + u8 b[WWN_SIZE]; + + mutex_lock(&qla_tgt_mutex); + list_for_each_entry(tgt, &qla_tgt_glist, tgt_list_entry) { + vha = tgt->vha; + ha = vha->hw; + + host = vha->host; + if (!host) + continue; + + if (ha->tgt.tgt_ops != NULL) + continue; + + if (!(host->hostt->supported_mode & MODE_TARGET)) + continue; + + spin_lock_irqsave(&ha->hardware_lock, flags); + if (host->active_mode & MODE_TARGET) { + pr_debug("MODE_TARGET already active on qla2xxx(%d)\n", + host->host_no); + spin_unlock_irqrestore(&ha->hardware_lock, flags); + continue; + } + spin_unlock_irqrestore(&ha->hardware_lock, flags); + + if (!scsi_host_get(host)) { + ql_dbg(ql_dbg_tgt, vha, 0xe068, + "Unable to scsi_host_get() for" + " qla2xxx scsi_host\n"); + continue; + } + qlt_lport_dump(vha, wwpn, b); + + if (memcmp(vha->port_name, b, WWN_SIZE)) { + scsi_host_put(host); + continue; + } + /* + * Setup passed parameters ahead of invoking callback + */ + ha->tgt.tgt_ops = qla_tgt_ops; + ha->tgt.target_lport_ptr = target_lport_ptr; + rc = (*callback)(vha); + if (rc != 0) { + ha->tgt.tgt_ops = NULL; + ha->tgt.target_lport_ptr = NULL; + } + mutex_unlock(&qla_tgt_mutex); + return rc; + } + mutex_unlock(&qla_tgt_mutex); + + return -ENODEV; +} +EXPORT_SYMBOL(qlt_lport_register); + +/** + * qla_tgt_lport_deregister - Degister lport + * + * @vha: Registered scsi_qla_host pointer + */ +void qlt_lport_deregister(struct scsi_qla_host *vha) +{ + struct qla_hw_data *ha = vha->hw; + struct Scsi_Host *sh = vha->host; + /* + * Clear the target_lport_ptr qla_target_template pointer in qla_hw_data + */ + ha->tgt.target_lport_ptr = NULL; + ha->tgt.tgt_ops = NULL; + /* + * Release the Scsi_Host reference for the underlying qla2xxx host + */ + scsi_host_put(sh); +} +EXPORT_SYMBOL(qlt_lport_deregister); + +/* Must be called under HW lock */ +void qlt_set_mode(struct scsi_qla_host *vha) +{ + struct qla_hw_data *ha = vha->hw; + + switch (ql2x_ini_mode) { + case QLA2XXX_INI_MODE_DISABLED: + case QLA2XXX_INI_MODE_EXCLUSIVE: + vha->host->active_mode = MODE_TARGET; + break; + case QLA2XXX_INI_MODE_ENABLED: + vha->host->active_mode |= MODE_TARGET; + break; + default: + break; + } + + if (ha->tgt.ini_mode_force_reverse) + qla_reverse_ini_mode(vha); +} + +/* Must be called under HW lock */ +void qlt_clear_mode(struct scsi_qla_host *vha) +{ + struct qla_hw_data *ha = vha->hw; + + switch (ql2x_ini_mode) { + case QLA2XXX_INI_MODE_DISABLED: + vha->host->active_mode = MODE_UNKNOWN; + break; + case QLA2XXX_INI_MODE_EXCLUSIVE: + vha->host->active_mode = MODE_INITIATOR; + break; + case QLA2XXX_INI_MODE_ENABLED: + vha->host->active_mode &= ~MODE_TARGET; + break; + default: + break; + } + + if (ha->tgt.ini_mode_force_reverse) + qla_reverse_ini_mode(vha); +} + +/* + * qla_tgt_enable_vha - NO LOCK HELD + * + * host_reset, bring up w/ Target Mode Enabled + */ +void +qlt_enable_vha(struct scsi_qla_host *vha) +{ + struct qla_hw_data *ha = vha->hw; + struct qla_tgt *tgt = ha->tgt.qla_tgt; + unsigned long flags; + + if (!tgt) { + ql_dbg(ql_dbg_tgt, vha, 0xe069, + "Unable to locate qla_tgt pointer from" + " struct qla_hw_data\n"); + dump_stack(); + return; + } + + spin_lock_irqsave(&ha->hardware_lock, flags); + tgt->tgt_stopped = 0; + qlt_set_mode(vha); + spin_unlock_irqrestore(&ha->hardware_lock, flags); + + set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); + qla2xxx_wake_dpc(vha); + qla2x00_wait_for_hba_online(vha); +} +EXPORT_SYMBOL(qlt_enable_vha); + +/* + * qla_tgt_disable_vha - NO LOCK HELD + * + * Disable Target Mode and reset the adapter + */ +void +qlt_disable_vha(struct scsi_qla_host *vha) +{ + struct qla_hw_data *ha = vha->hw; + struct qla_tgt *tgt = ha->tgt.qla_tgt; + unsigned long flags; + + if (!tgt) { + ql_dbg(ql_dbg_tgt, vha, 0xe06a, + "Unable to locate qla_tgt pointer from" + " struct qla_hw_data\n"); + dump_stack(); + return; + } + + spin_lock_irqsave(&ha->hardware_lock, flags); + qlt_clear_mode(vha); + spin_unlock_irqrestore(&ha->hardware_lock, flags); + + set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); + qla2xxx_wake_dpc(vha); + qla2x00_wait_for_hba_online(vha); +} + +/* + * Called from qla_init.c:qla24xx_vport_create() contex to setup + * the target mode specific struct scsi_qla_host and struct qla_hw_data + * members. + */ +void +qlt_vport_create(struct scsi_qla_host *vha, struct qla_hw_data *ha) +{ + if (!qla_tgt_mode_enabled(vha)) + return; + + mutex_init(&ha->tgt.tgt_mutex); + mutex_init(&ha->tgt.tgt_host_action_mutex); + + qlt_clear_mode(vha); + + /* + * NOTE: Currently the value is kept the same for <24xx and + * >=24xx ISPs. If it is necessary to change it, + * the check should be added for specific ISPs, + * assigning the value appropriately. + */ + ha->tgt.atio_q_length = ATIO_ENTRY_CNT_24XX; +} + +void +qlt_rff_id(struct scsi_qla_host *vha, struct ct_sns_req *ct_req) +{ + /* + * FC-4 Feature bit 0 indicates target functionality to the name server. + */ + if (qla_tgt_mode_enabled(vha)) { + if (qla_ini_mode_enabled(vha)) + ct_req->req.rff_id.fc4_feature = BIT_0 | BIT_1; + else + ct_req->req.rff_id.fc4_feature = BIT_0; + } else if (qla_ini_mode_enabled(vha)) { + ct_req->req.rff_id.fc4_feature = BIT_1; + } +} + +/* + * qlt_init_atio_q_entries() - Initializes ATIO queue entries. + * @ha: HA context + * + * Beginning of ATIO ring has initialization control block already built + * by nvram config routine. + * + * Returns 0 on success. + */ +void +qlt_init_atio_q_entries(struct scsi_qla_host *vha) +{ + struct qla_hw_data *ha = vha->hw; + uint16_t cnt; + struct atio_from_isp *pkt = (struct atio_from_isp *)ha->tgt.atio_ring; + + if (!qla_tgt_mode_enabled(vha)) + return; + + for (cnt = 0; cnt < ha->tgt.atio_q_length; cnt++) { + pkt->u.raw.signature = ATIO_PROCESSED; + pkt++; + } + +} + +/* + * qlt_24xx_process_atio_queue() - Process ATIO queue entries. + * @ha: SCSI driver HA context + */ +void +qlt_24xx_process_atio_queue(struct scsi_qla_host *vha) +{ + struct qla_hw_data *ha = vha->hw; + struct device_reg_24xx __iomem *reg = &ha->iobase->isp24; + struct atio_from_isp *pkt; + int cnt, i; + + if (!vha->flags.online) + return; + + while (ha->tgt.atio_ring_ptr->signature != ATIO_PROCESSED) { + pkt = (struct atio_from_isp *)ha->tgt.atio_ring_ptr; + cnt = pkt->u.raw.entry_count; + + qlt_24xx_atio_pkt_all_vps(vha, (struct atio_from_isp *)pkt); + + for (i = 0; i < cnt; i++) { + ha->tgt.atio_ring_index++; + if (ha->tgt.atio_ring_index == ha->tgt.atio_q_length) { + ha->tgt.atio_ring_index = 0; + ha->tgt.atio_ring_ptr = ha->tgt.atio_ring; + } else + ha->tgt.atio_ring_ptr++; + + pkt->u.raw.signature = ATIO_PROCESSED; + pkt = (struct atio_from_isp *)ha->tgt.atio_ring_ptr; + } + wmb(); + } + + /* Adjust ring index */ + WRT_REG_DWORD(®->atio_q_out, ha->tgt.atio_ring_index); +} + +void +qlt_24xx_config_rings(struct scsi_qla_host *vha, device_reg_t __iomem *reg) +{ + struct qla_hw_data *ha = vha->hw; + +/* FIXME: atio_q in/out for ha->mqenable=1..? */ + if (ha->mqenable) { +#if 0 + WRT_REG_DWORD(®->isp25mq.atio_q_in, 0); + WRT_REG_DWORD(®->isp25mq.atio_q_out, 0); + RD_REG_DWORD(®->isp25mq.atio_q_out); +#endif + } else { + /* Setup APTIO registers for target mode */ + WRT_REG_DWORD(®->isp24.atio_q_in, 0); + WRT_REG_DWORD(®->isp24.atio_q_out, 0); + RD_REG_DWORD(®->isp24.atio_q_out); + } +} + +void +qlt_24xx_config_nvram_stage1(struct scsi_qla_host *vha, struct nvram_24xx *nv) +{ + struct qla_hw_data *ha = vha->hw; + + if (qla_tgt_mode_enabled(vha)) { + if (!ha->tgt.saved_set) { + /* We save only once */ + ha->tgt.saved_exchange_count = nv->exchange_count; + ha->tgt.saved_firmware_options_1 = + nv->firmware_options_1; + ha->tgt.saved_firmware_options_2 = + nv->firmware_options_2; + ha->tgt.saved_firmware_options_3 = + nv->firmware_options_3; + ha->tgt.saved_set = 1; + } + + nv->exchange_count = __constant_cpu_to_le16(0xFFFF); + + /* Enable target mode */ + nv->firmware_options_1 |= __constant_cpu_to_le32(BIT_4); + + /* Disable ini mode, if requested */ + if (!qla_ini_mode_enabled(vha)) + nv->firmware_options_1 |= __constant_cpu_to_le32(BIT_5); + + /* Disable Full Login after LIP */ + nv->firmware_options_1 &= __constant_cpu_to_le32(~BIT_13); + /* Enable initial LIP */ + nv->firmware_options_1 &= __constant_cpu_to_le32(~BIT_9); + /* Enable FC tapes support */ + nv->firmware_options_2 |= __constant_cpu_to_le32(BIT_12); + /* Disable Full Login after LIP */ + nv->host_p &= __constant_cpu_to_le32(~BIT_10); + /* Enable target PRLI control */ + nv->firmware_options_2 |= __constant_cpu_to_le32(BIT_14); + } else { + if (ha->tgt.saved_set) { + nv->exchange_count = ha->tgt.saved_exchange_count; + nv->firmware_options_1 = + ha->tgt.saved_firmware_options_1; + nv->firmware_options_2 = + ha->tgt.saved_firmware_options_2; + nv->firmware_options_3 = + ha->tgt.saved_firmware_options_3; + } + return; + } + + /* out-of-order frames reassembly */ + nv->firmware_options_3 |= BIT_6|BIT_9; + + if (ha->tgt.enable_class_2) { + if (vha->flags.init_done) + fc_host_supported_classes(vha->host) = + FC_COS_CLASS2 | FC_COS_CLASS3; + + nv->firmware_options_2 |= __constant_cpu_to_le32(BIT_8); + } else { + if (vha->flags.init_done) + fc_host_supported_classes(vha->host) = FC_COS_CLASS3; + + nv->firmware_options_2 &= ~__constant_cpu_to_le32(BIT_8); + } +} + +void +qlt_24xx_config_nvram_stage2(struct scsi_qla_host *vha, + struct init_cb_24xx *icb) +{ + struct qla_hw_data *ha = vha->hw; + + if (ha->tgt.node_name_set) { + memcpy(icb->node_name, ha->tgt.tgt_node_name, WWN_SIZE); + icb->firmware_options_1 |= __constant_cpu_to_le32(BIT_14); + } +} + +int +qlt_24xx_process_response_error(struct scsi_qla_host *vha, + struct sts_entry_24xx *pkt) +{ + switch (pkt->entry_type) { + case ABTS_RECV_24XX: + case ABTS_RESP_24XX: + case CTIO_TYPE7: + case NOTIFY_ACK_TYPE: + return 1; + default: + return 0; + } +} + +void +qlt_modify_vp_config(struct scsi_qla_host *vha, + struct vp_config_entry_24xx *vpmod) +{ + if (qla_tgt_mode_enabled(vha)) + vpmod->options_idx1 &= ~BIT_5; + /* Disable ini mode, if requested */ + if (!qla_ini_mode_enabled(vha)) + vpmod->options_idx1 &= ~BIT_4; +} + +void +qlt_probe_one_stage1(struct scsi_qla_host *base_vha, struct qla_hw_data *ha) +{ + if (!QLA_TGT_MODE_ENABLED()) + return; + + mutex_init(&ha->tgt.tgt_mutex); + mutex_init(&ha->tgt.tgt_host_action_mutex); + qlt_clear_mode(base_vha); +} + +int +qlt_mem_alloc(struct qla_hw_data *ha) +{ + if (!QLA_TGT_MODE_ENABLED()) + return 0; + + ha->tgt.tgt_vp_map = kzalloc(sizeof(struct qla_tgt_vp_map) * + MAX_MULTI_ID_FABRIC, GFP_KERNEL); + if (!ha->tgt.tgt_vp_map) + return -ENOMEM; + + ha->tgt.atio_ring = dma_alloc_coherent(&ha->pdev->dev, + (ha->tgt.atio_q_length + 1) * sizeof(struct atio_from_isp), + &ha->tgt.atio_dma, GFP_KERNEL); + if (!ha->tgt.atio_ring) { + kfree(ha->tgt.tgt_vp_map); + return -ENOMEM; + } + return 0; +} + +void +qlt_mem_free(struct qla_hw_data *ha) +{ + if (!QLA_TGT_MODE_ENABLED()) + return; + + if (ha->tgt.atio_ring) { + dma_free_coherent(&ha->pdev->dev, (ha->tgt.atio_q_length + 1) * + sizeof(struct atio_from_isp), ha->tgt.atio_ring, + ha->tgt.atio_dma); + } + kfree(ha->tgt.tgt_vp_map); +} + +/* vport_slock to be held by the caller */ +void +qlt_update_vp_map(struct scsi_qla_host *vha, int cmd) +{ + if (!QLA_TGT_MODE_ENABLED()) + return; + + switch (cmd) { + case SET_VP_IDX: + vha->hw->tgt.tgt_vp_map[vha->vp_idx].vha = vha; + break; + case SET_AL_PA: + vha->hw->tgt.tgt_vp_map[vha->d_id.b.al_pa].idx = vha->vp_idx; + break; + case RESET_VP_IDX: + vha->hw->tgt.tgt_vp_map[vha->vp_idx].vha = NULL; + break; + case RESET_AL_PA: + vha->hw->tgt.tgt_vp_map[vha->d_id.b.al_pa].idx = 0; + break; + } +} + +static int __init qlt_parse_ini_mode(void) +{ + if (strcasecmp(qlini_mode, QLA2XXX_INI_MODE_STR_EXCLUSIVE) == 0) + ql2x_ini_mode = QLA2XXX_INI_MODE_EXCLUSIVE; + else if (strcasecmp(qlini_mode, QLA2XXX_INI_MODE_STR_DISABLED) == 0) + ql2x_ini_mode = QLA2XXX_INI_MODE_DISABLED; + else if (strcasecmp(qlini_mode, QLA2XXX_INI_MODE_STR_ENABLED) == 0) + ql2x_ini_mode = QLA2XXX_INI_MODE_ENABLED; + else + return false; + + return true; +} + +int __init qlt_init(void) +{ + int ret; + + if (!qlt_parse_ini_mode()) { + ql_log(ql_log_fatal, NULL, 0xe06b, + "qlt_parse_ini_mode() failed\n"); + return -EINVAL; + } + + if (!QLA_TGT_MODE_ENABLED()) + return 0; + + qla_tgt_cmd_cachep = kmem_cache_create("qla_tgt_cmd_cachep", + sizeof(struct qla_tgt_cmd), __alignof__(struct qla_tgt_cmd), 0, + NULL); + if (!qla_tgt_cmd_cachep) { + ql_log(ql_log_fatal, NULL, 0xe06c, + "kmem_cache_create for qla_tgt_cmd_cachep failed\n"); + return -ENOMEM; + } + + qla_tgt_mgmt_cmd_cachep = kmem_cache_create("qla_tgt_mgmt_cmd_cachep", + sizeof(struct qla_tgt_mgmt_cmd), __alignof__(struct + qla_tgt_mgmt_cmd), 0, NULL); + if (!qla_tgt_mgmt_cmd_cachep) { + ql_log(ql_log_fatal, NULL, 0xe06d, + "kmem_cache_create for qla_tgt_mgmt_cmd_cachep failed\n"); + ret = -ENOMEM; + goto out; + } + + qla_tgt_mgmt_cmd_mempool = mempool_create(25, mempool_alloc_slab, + mempool_free_slab, qla_tgt_mgmt_cmd_cachep); + if (!qla_tgt_mgmt_cmd_mempool) { + ql_log(ql_log_fatal, NULL, 0xe06e, + "mempool_create for qla_tgt_mgmt_cmd_mempool failed\n"); + ret = -ENOMEM; + goto out_mgmt_cmd_cachep; + } + + qla_tgt_wq = alloc_workqueue("qla_tgt_wq", 0, 0); + if (!qla_tgt_wq) { + ql_log(ql_log_fatal, NULL, 0xe06f, + "alloc_workqueue for qla_tgt_wq failed\n"); + ret = -ENOMEM; + goto out_cmd_mempool; + } + /* + * Return 1 to signal that initiator-mode is being disabled + */ + return (ql2x_ini_mode == QLA2XXX_INI_MODE_DISABLED) ? 1 : 0; + +out_cmd_mempool: + mempool_destroy(qla_tgt_mgmt_cmd_mempool); +out_mgmt_cmd_cachep: + kmem_cache_destroy(qla_tgt_mgmt_cmd_cachep); +out: + kmem_cache_destroy(qla_tgt_cmd_cachep); + return ret; +} + +void qlt_exit(void) +{ + if (!QLA_TGT_MODE_ENABLED()) + return; + + destroy_workqueue(qla_tgt_wq); + mempool_destroy(qla_tgt_mgmt_cmd_mempool); + kmem_cache_destroy(qla_tgt_mgmt_cmd_cachep); + kmem_cache_destroy(qla_tgt_cmd_cachep); +} diff --git a/drivers/scsi/qla2xxx/qla_target.h b/drivers/scsi/qla2xxx/qla_target.h new file mode 100644 index 000000000000..9ec19bc2f0fe --- /dev/null +++ b/drivers/scsi/qla2xxx/qla_target.h @@ -0,0 +1,1005 @@ +/* + * Copyright (C) 2004 - 2010 Vladislav Bolkhovitin <vst@vlnb.net> + * Copyright (C) 2004 - 2005 Leonid Stoljar + * Copyright (C) 2006 Nathaniel Clark <nate@misrule.us> + * Copyright (C) 2007 - 2010 ID7 Ltd. + * + * Forward port and refactoring to modern qla2xxx and target/configfs + * + * Copyright (C) 2010-2011 Nicholas A. Bellinger <nab@kernel.org> + * + * Additional file for the target driver support. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version 2 + * of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ +/* + * This is the global def file that is useful for including from the + * target portion. + */ + +#ifndef __QLA_TARGET_H +#define __QLA_TARGET_H + +#include "qla_def.h" + +/* + * Must be changed on any change in any initiator visible interfaces or + * data in the target add-on + */ +#define QLA2XXX_TARGET_MAGIC 269 + +/* + * Must be changed on any change in any target visible interfaces or + * data in the initiator + */ +#define QLA2XXX_INITIATOR_MAGIC 57222 + +#define QLA2XXX_INI_MODE_STR_EXCLUSIVE "exclusive" +#define QLA2XXX_INI_MODE_STR_DISABLED "disabled" +#define QLA2XXX_INI_MODE_STR_ENABLED "enabled" + +#define QLA2XXX_INI_MODE_EXCLUSIVE 0 +#define QLA2XXX_INI_MODE_DISABLED 1 +#define QLA2XXX_INI_MODE_ENABLED 2 + +#define QLA2XXX_COMMAND_COUNT_INIT 250 +#define QLA2XXX_IMMED_NOTIFY_COUNT_INIT 250 + +/* + * Used to mark which completion handles (for RIO Status's) are for CTIO's + * vs. regular (non-target) info. This is checked for in + * qla2x00_process_response_queue() to see if a handle coming back in a + * multi-complete should come to the tgt driver or be handled there by qla2xxx + */ +#define CTIO_COMPLETION_HANDLE_MARK BIT_29 +#if (CTIO_COMPLETION_HANDLE_MARK <= MAX_OUTSTANDING_COMMANDS) +#error "CTIO_COMPLETION_HANDLE_MARK not larger than MAX_OUTSTANDING_COMMANDS" +#endif +#define HANDLE_IS_CTIO_COMP(h) (h & CTIO_COMPLETION_HANDLE_MARK) + +/* Used to mark CTIO as intermediate */ +#define CTIO_INTERMEDIATE_HANDLE_MARK BIT_30 + +#ifndef OF_SS_MODE_0 +/* + * ISP target entries - Flags bit definitions. + */ +#define OF_SS_MODE_0 0 +#define OF_SS_MODE_1 1 +#define OF_SS_MODE_2 2 +#define OF_SS_MODE_3 3 + +#define OF_EXPL_CONF BIT_5 /* Explicit Confirmation Requested */ +#define OF_DATA_IN BIT_6 /* Data in to initiator */ + /* (data from target to initiator) */ +#define OF_DATA_OUT BIT_7 /* Data out from initiator */ + /* (data from initiator to target) */ +#define OF_NO_DATA (BIT_7 | BIT_6) +#define OF_INC_RC BIT_8 /* Increment command resource count */ +#define OF_FAST_POST BIT_9 /* Enable mailbox fast posting. */ +#define OF_CONF_REQ BIT_13 /* Confirmation Requested */ +#define OF_TERM_EXCH BIT_14 /* Terminate exchange */ +#define OF_SSTS BIT_15 /* Send SCSI status */ +#endif + +#ifndef QLA_TGT_DATASEGS_PER_CMD32 +#define QLA_TGT_DATASEGS_PER_CMD32 3 +#define QLA_TGT_DATASEGS_PER_CONT32 7 +#define QLA_TGT_MAX_SG32(ql) \ + (((ql) > 0) ? (QLA_TGT_DATASEGS_PER_CMD32 + \ + QLA_TGT_DATASEGS_PER_CONT32*((ql) - 1)) : 0) + +#define QLA_TGT_DATASEGS_PER_CMD64 2 +#define QLA_TGT_DATASEGS_PER_CONT64 5 +#define QLA_TGT_MAX_SG64(ql) \ + (((ql) > 0) ? (QLA_TGT_DATASEGS_PER_CMD64 + \ + QLA_TGT_DATASEGS_PER_CONT64*((ql) - 1)) : 0) +#endif + +#ifndef QLA_TGT_DATASEGS_PER_CMD_24XX +#define QLA_TGT_DATASEGS_PER_CMD_24XX 1 +#define QLA_TGT_DATASEGS_PER_CONT_24XX 5 +#define QLA_TGT_MAX_SG_24XX(ql) \ + (min(1270, ((ql) > 0) ? (QLA_TGT_DATASEGS_PER_CMD_24XX + \ + QLA_TGT_DATASEGS_PER_CONT_24XX*((ql) - 1)) : 0)) +#endif +#endif + +#define GET_TARGET_ID(ha, iocb) ((HAS_EXTENDED_IDS(ha)) \ + ? le16_to_cpu((iocb)->u.isp2x.target.extended) \ + : (uint16_t)(iocb)->u.isp2x.target.id.standard) + +#ifndef IMMED_NOTIFY_TYPE +#define IMMED_NOTIFY_TYPE 0x0D /* Immediate notify entry. */ +/* + * ISP queue - immediate notify entry structure definition. + * This is sent by the ISP to the Target driver. + * This IOCB would have report of events sent by the + * initiator, that needs to be handled by the target + * driver immediately. + */ +struct imm_ntfy_from_isp { + uint8_t entry_type; /* Entry type. */ + uint8_t entry_count; /* Entry count. */ + uint8_t sys_define; /* System defined. */ + uint8_t entry_status; /* Entry Status. */ + union { + struct { + uint32_t sys_define_2; /* System defined. */ + target_id_t target; + uint16_t lun; + uint8_t target_id; + uint8_t reserved_1; + uint16_t status_modifier; + uint16_t status; + uint16_t task_flags; + uint16_t seq_id; + uint16_t srr_rx_id; + uint32_t srr_rel_offs; + uint16_t srr_ui; +#define SRR_IU_DATA_IN 0x1 +#define SRR_IU_DATA_OUT 0x5 +#define SRR_IU_STATUS 0x7 + uint16_t srr_ox_id; + uint8_t reserved_2[28]; + } isp2x; + struct { + uint32_t reserved; + uint16_t nport_handle; + uint16_t reserved_2; + uint16_t flags; +#define NOTIFY24XX_FLAGS_GLOBAL_TPRLO BIT_1 +#define NOTIFY24XX_FLAGS_PUREX_IOCB BIT_0 + uint16_t srr_rx_id; + uint16_t status; + uint8_t status_subcode; + uint8_t reserved_3; + uint32_t exchange_address; + uint32_t srr_rel_offs; + uint16_t srr_ui; + uint16_t srr_ox_id; + uint8_t reserved_4[19]; + uint8_t vp_index; + uint32_t reserved_5; + uint8_t port_id[3]; + uint8_t reserved_6; + } isp24; + } u; + uint16_t reserved_7; + uint16_t ox_id; +} __packed; +#endif + +#ifndef NOTIFY_ACK_TYPE +#define NOTIFY_ACK_TYPE 0x0E /* Notify acknowledge entry. */ +/* + * ISP queue - notify acknowledge entry structure definition. + * This is sent to the ISP from the target driver. + */ +struct nack_to_isp { + uint8_t entry_type; /* Entry type. */ + uint8_t entry_count; /* Entry count. */ + uint8_t sys_define; /* System defined. */ + uint8_t entry_status; /* Entry Status. */ + union { + struct { + uint32_t sys_define_2; /* System defined. */ + target_id_t target; + uint8_t target_id; + uint8_t reserved_1; + uint16_t flags; + uint16_t resp_code; + uint16_t status; + uint16_t task_flags; + uint16_t seq_id; + uint16_t srr_rx_id; + uint32_t srr_rel_offs; + uint16_t srr_ui; + uint16_t srr_flags; + uint16_t srr_reject_code; + uint8_t srr_reject_vendor_uniq; + uint8_t srr_reject_code_expl; + uint8_t reserved_2[24]; + } isp2x; + struct { + uint32_t handle; + uint16_t nport_handle; + uint16_t reserved_1; + uint16_t flags; + uint16_t srr_rx_id; + uint16_t status; + uint8_t status_subcode; + uint8_t reserved_3; + uint32_t exchange_address; + uint32_t srr_rel_offs; + uint16_t srr_ui; + uint16_t srr_flags; + uint8_t reserved_4[19]; + uint8_t vp_index; + uint8_t srr_reject_vendor_uniq; + uint8_t srr_reject_code_expl; + uint8_t srr_reject_code; + uint8_t reserved_5[5]; + } isp24; + } u; + uint8_t reserved[2]; + uint16_t ox_id; +} __packed; +#define NOTIFY_ACK_SRR_FLAGS_ACCEPT 0 +#define NOTIFY_ACK_SRR_FLAGS_REJECT 1 + +#define NOTIFY_ACK_SRR_REJECT_REASON_UNABLE_TO_PERFORM 0x9 + +#define NOTIFY_ACK_SRR_FLAGS_REJECT_EXPL_NO_EXPL 0 +#define NOTIFY_ACK_SRR_FLAGS_REJECT_EXPL_UNABLE_TO_SUPPLY_DATA 0x2a + +#define NOTIFY_ACK_SUCCESS 0x01 +#endif + +#ifndef ACCEPT_TGT_IO_TYPE +#define ACCEPT_TGT_IO_TYPE 0x16 /* Accept target I/O entry. */ +#endif + +#ifndef CONTINUE_TGT_IO_TYPE +#define CONTINUE_TGT_IO_TYPE 0x17 +/* + * ISP queue - Continue Target I/O (CTIO) entry for status mode 0 structure. + * This structure is sent to the ISP 2xxx from target driver. + */ +struct ctio_to_2xxx { + uint8_t entry_type; /* Entry type. */ + uint8_t entry_count; /* Entry count. */ + uint8_t sys_define; /* System defined. */ + uint8_t entry_status; /* Entry Status. */ + uint32_t handle; /* System defined handle */ + target_id_t target; + uint16_t rx_id; + uint16_t flags; + uint16_t status; + uint16_t timeout; /* 0 = 30 seconds, 0xFFFF = disable */ + uint16_t dseg_count; /* Data segment count. */ + uint32_t relative_offset; + uint32_t residual; + uint16_t reserved_1[3]; + uint16_t scsi_status; + uint32_t transfer_length; + uint32_t dseg_0_address; /* Data segment 0 address. */ + uint32_t dseg_0_length; /* Data segment 0 length. */ + uint32_t dseg_1_address; /* Data segment 1 address. */ + uint32_t dseg_1_length; /* Data segment 1 length. */ + uint32_t dseg_2_address; /* Data segment 2 address. */ + uint32_t dseg_2_length; /* Data segment 2 length. */ +} __packed; +#define ATIO_PATH_INVALID 0x07 +#define ATIO_CANT_PROV_CAP 0x16 +#define ATIO_CDB_VALID 0x3D + +#define ATIO_EXEC_READ BIT_1 +#define ATIO_EXEC_WRITE BIT_0 +#endif + +#ifndef CTIO_A64_TYPE +#define CTIO_A64_TYPE 0x1F +#define CTIO_SUCCESS 0x01 +#define CTIO_ABORTED 0x02 +#define CTIO_INVALID_RX_ID 0x08 +#define CTIO_TIMEOUT 0x0B +#define CTIO_LIP_RESET 0x0E +#define CTIO_TARGET_RESET 0x17 +#define CTIO_PORT_UNAVAILABLE 0x28 +#define CTIO_PORT_LOGGED_OUT 0x29 +#define CTIO_PORT_CONF_CHANGED 0x2A +#define CTIO_SRR_RECEIVED 0x45 +#endif + +#ifndef CTIO_RET_TYPE +#define CTIO_RET_TYPE 0x17 /* CTIO return entry */ +#define ATIO_TYPE7 0x06 /* Accept target I/O entry for 24xx */ + +struct fcp_hdr { + uint8_t r_ctl; + uint8_t d_id[3]; + uint8_t cs_ctl; + uint8_t s_id[3]; + uint8_t type; + uint8_t f_ctl[3]; + uint8_t seq_id; + uint8_t df_ctl; + uint16_t seq_cnt; + uint16_t ox_id; + uint16_t rx_id; + uint32_t parameter; +} __packed; + +struct fcp_hdr_le { + uint8_t d_id[3]; + uint8_t r_ctl; + uint8_t s_id[3]; + uint8_t cs_ctl; + uint8_t f_ctl[3]; + uint8_t type; + uint16_t seq_cnt; + uint8_t df_ctl; + uint8_t seq_id; + uint16_t rx_id; + uint16_t ox_id; + uint32_t parameter; +} __packed; + +#define F_CTL_EXCH_CONTEXT_RESP BIT_23 +#define F_CTL_SEQ_CONTEXT_RESIP BIT_22 +#define F_CTL_LAST_SEQ BIT_20 +#define F_CTL_END_SEQ BIT_19 +#define F_CTL_SEQ_INITIATIVE BIT_16 + +#define R_CTL_BASIC_LINK_SERV 0x80 +#define R_CTL_B_ACC 0x4 +#define R_CTL_B_RJT 0x5 + +struct atio7_fcp_cmnd { + uint64_t lun; + uint8_t cmnd_ref; + uint8_t task_attr:3; + uint8_t reserved:5; + uint8_t task_mgmt_flags; +#define FCP_CMND_TASK_MGMT_CLEAR_ACA 6 +#define FCP_CMND_TASK_MGMT_TARGET_RESET 5 +#define FCP_CMND_TASK_MGMT_LU_RESET 4 +#define FCP_CMND_TASK_MGMT_CLEAR_TASK_SET 2 +#define FCP_CMND_TASK_MGMT_ABORT_TASK_SET 1 + uint8_t wrdata:1; + uint8_t rddata:1; + uint8_t add_cdb_len:6; + uint8_t cdb[16]; + /* + * add_cdb is optional and can absent from struct atio7_fcp_cmnd. Size 4 + * only to make sizeof(struct atio7_fcp_cmnd) be as expected by + * BUILD_BUG_ON in qlt_init(). + */ + uint8_t add_cdb[4]; + /* uint32_t data_length; */ +} __packed; + +/* + * ISP queue - Accept Target I/O (ATIO) type entry IOCB structure. + * This is sent from the ISP to the target driver. + */ +struct atio_from_isp { + union { + struct { + uint16_t entry_hdr; + uint8_t sys_define; /* System defined. */ + uint8_t entry_status; /* Entry Status. */ + uint32_t sys_define_2; /* System defined. */ + target_id_t target; + uint16_t rx_id; + uint16_t flags; + uint16_t status; + uint8_t command_ref; + uint8_t task_codes; + uint8_t task_flags; + uint8_t execution_codes; + uint8_t cdb[MAX_CMDSZ]; + uint32_t data_length; + uint16_t lun; + uint8_t initiator_port_name[WWN_SIZE]; /* on qla23xx */ + uint16_t reserved_32[6]; + uint16_t ox_id; + } isp2x; + struct { + uint16_t entry_hdr; + uint8_t fcp_cmnd_len_low; + uint8_t fcp_cmnd_len_high:4; + uint8_t attr:4; + uint32_t exchange_addr; +#define ATIO_EXCHANGE_ADDRESS_UNKNOWN 0xFFFFFFFF + struct fcp_hdr fcp_hdr; + struct atio7_fcp_cmnd fcp_cmnd; + } isp24; + struct { + uint8_t entry_type; /* Entry type. */ + uint8_t entry_count; /* Entry count. */ + uint8_t data[58]; + uint32_t signature; +#define ATIO_PROCESSED 0xDEADDEAD /* Signature */ + } raw; + } u; +} __packed; + +#define CTIO_TYPE7 0x12 /* Continue target I/O entry (for 24xx) */ + +/* + * ISP queue - Continue Target I/O (ATIO) type 7 entry (for 24xx) structure. + * This structure is sent to the ISP 24xx from the target driver. + */ + +struct ctio7_to_24xx { + uint8_t entry_type; /* Entry type. */ + uint8_t entry_count; /* Entry count. */ + uint8_t sys_define; /* System defined. */ + uint8_t entry_status; /* Entry Status. */ + uint32_t handle; /* System defined handle */ + uint16_t nport_handle; +#define CTIO7_NHANDLE_UNRECOGNIZED 0xFFFF + uint16_t timeout; + uint16_t dseg_count; /* Data segment count. */ + uint8_t vp_index; + uint8_t add_flags; + uint8_t initiator_id[3]; + uint8_t reserved; + uint32_t exchange_addr; + union { + struct { + uint16_t reserved1; + uint16_t flags; + uint32_t residual; + uint16_t ox_id; + uint16_t scsi_status; + uint32_t relative_offset; + uint32_t reserved2; + uint32_t transfer_length; + uint32_t reserved3; + /* Data segment 0 address. */ + uint32_t dseg_0_address[2]; + /* Data segment 0 length. */ + uint32_t dseg_0_length; + } status0; + struct { + uint16_t sense_length; + uint16_t flags; + uint32_t residual; + uint16_t ox_id; + uint16_t scsi_status; + uint16_t response_len; + uint16_t reserved; + uint8_t sense_data[24]; + } status1; + } u; +} __packed; + +/* + * ISP queue - CTIO type 7 from ISP 24xx to target driver + * returned entry structure. + */ +struct ctio7_from_24xx { + uint8_t entry_type; /* Entry type. */ + uint8_t entry_count; /* Entry count. */ + uint8_t sys_define; /* System defined. */ + uint8_t entry_status; /* Entry Status. */ + uint32_t handle; /* System defined handle */ + uint16_t status; + uint16_t timeout; + uint16_t dseg_count; /* Data segment count. */ + uint8_t vp_index; + uint8_t reserved1[5]; + uint32_t exchange_address; + uint16_t reserved2; + uint16_t flags; + uint32_t residual; + uint16_t ox_id; + uint16_t reserved3; + uint32_t relative_offset; + uint8_t reserved4[24]; +} __packed; + +/* CTIO7 flags values */ +#define CTIO7_FLAGS_SEND_STATUS BIT_15 +#define CTIO7_FLAGS_TERMINATE BIT_14 +#define CTIO7_FLAGS_CONFORM_REQ BIT_13 +#define CTIO7_FLAGS_DONT_RET_CTIO BIT_8 +#define CTIO7_FLAGS_STATUS_MODE_0 0 +#define CTIO7_FLAGS_STATUS_MODE_1 BIT_6 +#define CTIO7_FLAGS_EXPLICIT_CONFORM BIT_5 +#define CTIO7_FLAGS_CONFIRM_SATISF BIT_4 +#define CTIO7_FLAGS_DSD_PTR BIT_2 +#define CTIO7_FLAGS_DATA_IN BIT_1 +#define CTIO7_FLAGS_DATA_OUT BIT_0 + +#define ELS_PLOGI 0x3 +#define ELS_FLOGI 0x4 +#define ELS_LOGO 0x5 +#define ELS_PRLI 0x20 +#define ELS_PRLO 0x21 +#define ELS_TPRLO 0x24 +#define ELS_PDISC 0x50 +#define ELS_ADISC 0x52 + +/* + * ISP queue - ABTS received/response entries structure definition for 24xx. + */ +#define ABTS_RECV_24XX 0x54 /* ABTS received (for 24xx) */ +#define ABTS_RESP_24XX 0x55 /* ABTS responce (for 24xx) */ + +/* + * ISP queue - ABTS received IOCB entry structure definition for 24xx. + * The ABTS BLS received from the wire is sent to the + * target driver by the ISP 24xx. + * The IOCB is placed on the response queue. + */ +struct abts_recv_from_24xx { + uint8_t entry_type; /* Entry type. */ + uint8_t entry_count; /* Entry count. */ + uint8_t sys_define; /* System defined. */ + uint8_t entry_status; /* Entry Status. */ + uint8_t reserved_1[6]; + uint16_t nport_handle; + uint8_t reserved_2[2]; + uint8_t vp_index; + uint8_t reserved_3:4; + uint8_t sof_type:4; + uint32_t exchange_address; + struct fcp_hdr_le fcp_hdr_le; + uint8_t reserved_4[16]; + uint32_t exchange_addr_to_abort; +} __packed; + +#define ABTS_PARAM_ABORT_SEQ BIT_0 + +struct ba_acc_le { + uint16_t reserved; + uint8_t seq_id_last; + uint8_t seq_id_valid; +#define SEQ_ID_VALID 0x80 +#define SEQ_ID_INVALID 0x00 + uint16_t rx_id; + uint16_t ox_id; + uint16_t high_seq_cnt; + uint16_t low_seq_cnt; +} __packed; + +struct ba_rjt_le { + uint8_t vendor_uniq; + uint8_t reason_expl; + uint8_t reason_code; +#define BA_RJT_REASON_CODE_INVALID_COMMAND 0x1 +#define BA_RJT_REASON_CODE_UNABLE_TO_PERFORM 0x9 + uint8_t reserved; +} __packed; + +/* + * ISP queue - ABTS Response IOCB entry structure definition for 24xx. + * The ABTS response to the ABTS received is sent by the + * target driver to the ISP 24xx. + * The IOCB is placed on the request queue. + */ +struct abts_resp_to_24xx { + uint8_t entry_type; /* Entry type. */ + uint8_t entry_count; /* Entry count. */ + uint8_t sys_define; /* System defined. */ + uint8_t entry_status; /* Entry Status. */ + uint32_t handle; + uint16_t reserved_1; + uint16_t nport_handle; + uint16_t control_flags; +#define ABTS_CONTR_FLG_TERM_EXCHG BIT_0 + uint8_t vp_index; + uint8_t reserved_3:4; + uint8_t sof_type:4; + uint32_t exchange_address; + struct fcp_hdr_le fcp_hdr_le; + union { + struct ba_acc_le ba_acct; + struct ba_rjt_le ba_rjt; + } __packed payload; + uint32_t reserved_4; + uint32_t exchange_addr_to_abort; +} __packed; + +/* + * ISP queue - ABTS Response IOCB from ISP24xx Firmware entry structure. + * The ABTS response with completion status to the ABTS response + * (sent by the target driver to the ISP 24xx) is sent by the + * ISP24xx firmware to the target driver. + * The IOCB is placed on the response queue. + */ +struct abts_resp_from_24xx_fw { + uint8_t entry_type; /* Entry type. */ + uint8_t entry_count; /* Entry count. */ + uint8_t sys_define; /* System defined. */ + uint8_t entry_status; /* Entry Status. */ + uint32_t handle; + uint16_t compl_status; +#define ABTS_RESP_COMPL_SUCCESS 0 +#define ABTS_RESP_COMPL_SUBCODE_ERROR 0x31 + uint16_t nport_handle; + uint16_t reserved_1; + uint8_t reserved_2; + uint8_t reserved_3:4; + uint8_t sof_type:4; + uint32_t exchange_address; + struct fcp_hdr_le fcp_hdr_le; + uint8_t reserved_4[8]; + uint32_t error_subcode1; +#define ABTS_RESP_SUBCODE_ERR_ABORTED_EXCH_NOT_TERM 0x1E + uint32_t error_subcode2; + uint32_t exchange_addr_to_abort; +} __packed; + +/********************************************************************\ + * Type Definitions used by initiator & target halves +\********************************************************************/ + +struct qla_tgt_mgmt_cmd; +struct qla_tgt_sess; + +/* + * This structure provides a template of function calls that the + * target driver (from within qla_target.c) can issue to the + * target module (tcm_qla2xxx). + */ +struct qla_tgt_func_tmpl { + + int (*handle_cmd)(struct scsi_qla_host *, struct qla_tgt_cmd *, + unsigned char *, uint32_t, int, int, int); + int (*handle_data)(struct qla_tgt_cmd *); + int (*handle_tmr)(struct qla_tgt_mgmt_cmd *, uint32_t, uint8_t, + uint32_t); + void (*free_cmd)(struct qla_tgt_cmd *); + void (*free_mcmd)(struct qla_tgt_mgmt_cmd *); + void (*free_session)(struct qla_tgt_sess *); + + int (*check_initiator_node_acl)(struct scsi_qla_host *, unsigned char *, + void *, uint8_t *, uint16_t); + struct qla_tgt_sess *(*find_sess_by_loop_id)(struct scsi_qla_host *, + const uint16_t); + struct qla_tgt_sess *(*find_sess_by_s_id)(struct scsi_qla_host *, + const uint8_t *); + void (*clear_nacl_from_fcport_map)(struct qla_tgt_sess *); + void (*put_sess)(struct qla_tgt_sess *); + void (*shutdown_sess)(struct qla_tgt_sess *); +}; + +int qla2x00_wait_for_hba_online(struct scsi_qla_host *); + +#include <target/target_core_base.h> + +#define QLA_TGT_TIMEOUT 10 /* in seconds */ + +#define QLA_TGT_MAX_HW_PENDING_TIME 60 /* in seconds */ + +/* Immediate notify status constants */ +#define IMM_NTFY_LIP_RESET 0x000E +#define IMM_NTFY_LIP_LINK_REINIT 0x000F +#define IMM_NTFY_IOCB_OVERFLOW 0x0016 +#define IMM_NTFY_ABORT_TASK 0x0020 +#define IMM_NTFY_PORT_LOGOUT 0x0029 +#define IMM_NTFY_PORT_CONFIG 0x002A +#define IMM_NTFY_GLBL_TPRLO 0x002D +#define IMM_NTFY_GLBL_LOGO 0x002E +#define IMM_NTFY_RESOURCE 0x0034 +#define IMM_NTFY_MSG_RX 0x0036 +#define IMM_NTFY_SRR 0x0045 +#define IMM_NTFY_ELS 0x0046 + +/* Immediate notify task flags */ +#define IMM_NTFY_TASK_MGMT_SHIFT 8 + +#define QLA_TGT_CLEAR_ACA 0x40 +#define QLA_TGT_TARGET_RESET 0x20 +#define QLA_TGT_LUN_RESET 0x10 +#define QLA_TGT_CLEAR_TS 0x04 +#define QLA_TGT_ABORT_TS 0x02 +#define QLA_TGT_ABORT_ALL_SESS 0xFFFF +#define QLA_TGT_ABORT_ALL 0xFFFE +#define QLA_TGT_NEXUS_LOSS_SESS 0xFFFD +#define QLA_TGT_NEXUS_LOSS 0xFFFC + +/* Notify Acknowledge flags */ +#define NOTIFY_ACK_RES_COUNT BIT_8 +#define NOTIFY_ACK_CLEAR_LIP_RESET BIT_5 +#define NOTIFY_ACK_TM_RESP_CODE_VALID BIT_4 + +/* Command's states */ +#define QLA_TGT_STATE_NEW 0 /* New command + target processing */ +#define QLA_TGT_STATE_NEED_DATA 1 /* target needs data to continue */ +#define QLA_TGT_STATE_DATA_IN 2 /* Data arrived + target processing */ +#define QLA_TGT_STATE_PROCESSED 3 /* target done processing */ +#define QLA_TGT_STATE_ABORTED 4 /* Command aborted */ + +/* Special handles */ +#define QLA_TGT_NULL_HANDLE 0 +#define QLA_TGT_SKIP_HANDLE (0xFFFFFFFF & ~CTIO_COMPLETION_HANDLE_MARK) + +/* ATIO task_codes field */ +#define ATIO_SIMPLE_QUEUE 0 +#define ATIO_HEAD_OF_QUEUE 1 +#define ATIO_ORDERED_QUEUE 2 +#define ATIO_ACA_QUEUE 4 +#define ATIO_UNTAGGED 5 + +/* TM failed response codes, see FCP (9.4.11 FCP_RSP_INFO) */ +#define FC_TM_SUCCESS 0 +#define FC_TM_BAD_FCP_DATA 1 +#define FC_TM_BAD_CMD 2 +#define FC_TM_FCP_DATA_MISMATCH 3 +#define FC_TM_REJECT 4 +#define FC_TM_FAILED 5 + +/* + * Error code of qlt_pre_xmit_response() meaning that cmd's exchange was + * terminated, so no more actions is needed and success should be returned + * to target. + */ +#define QLA_TGT_PRE_XMIT_RESP_CMD_ABORTED 0x1717 + +#if (BITS_PER_LONG > 32) || defined(CONFIG_HIGHMEM64G) +#define pci_dma_lo32(a) (a & 0xffffffff) +#define pci_dma_hi32(a) ((((a) >> 16)>>16) & 0xffffffff) +#else +#define pci_dma_lo32(a) (a & 0xffffffff) +#define pci_dma_hi32(a) 0 +#endif + +#define QLA_TGT_SENSE_VALID(sense) ((sense != NULL) && \ + (((const uint8_t *)(sense))[0] & 0x70) == 0x70) + +struct qla_port_24xx_data { + uint8_t port_name[WWN_SIZE]; + uint16_t loop_id; + uint16_t reserved; +}; + +struct qla_tgt { + struct scsi_qla_host *vha; + struct qla_hw_data *ha; + + /* + * To sync between IRQ handlers and qlt_target_release(). Needed, + * because req_pkt() can drop/reaquire HW lock inside. Protected by + * HW lock. + */ + int irq_cmd_count; + + int datasegs_per_cmd, datasegs_per_cont, sg_tablesize; + + /* Target's flags, serialized by pha->hardware_lock */ + unsigned int tgt_enable_64bit_addr:1; /* 64-bits PCI addr enabled */ + unsigned int link_reinit_iocb_pending:1; + + /* + * Protected by tgt_mutex AND hardware_lock for writing and tgt_mutex + * OR hardware_lock for reading. + */ + int tgt_stop; /* the target mode driver is being stopped */ + int tgt_stopped; /* the target mode driver has been stopped */ + + /* Count of sessions refering qla_tgt. Protected by hardware_lock. */ + int sess_count; + + /* Protected by hardware_lock. Addition also protected by tgt_mutex. */ + struct list_head sess_list; + + /* Protected by hardware_lock */ + struct list_head del_sess_list; + struct delayed_work sess_del_work; + + spinlock_t sess_work_lock; + struct list_head sess_works_list; + struct work_struct sess_work; + + struct imm_ntfy_from_isp link_reinit_iocb; + wait_queue_head_t waitQ; + int notify_ack_expected; + int abts_resp_expected; + int modify_lun_expected; + + int ctio_srr_id; + int imm_srr_id; + spinlock_t srr_lock; + struct list_head srr_ctio_list; + struct list_head srr_imm_list; + struct work_struct srr_work; + + atomic_t tgt_global_resets_count; + + struct list_head tgt_list_entry; +}; + +/* + * Equivilant to IT Nexus (Initiator-Target) + */ +struct qla_tgt_sess { + uint16_t loop_id; + port_id_t s_id; + + unsigned int conf_compl_supported:1; + unsigned int deleted:1; + unsigned int local:1; + unsigned int tearing_down:1; + + struct se_session *se_sess; + struct scsi_qla_host *vha; + struct qla_tgt *tgt; + + struct list_head sess_list_entry; + unsigned long expires; + struct list_head del_list_entry; + + uint8_t port_name[WWN_SIZE]; + struct work_struct free_work; +}; + +struct qla_tgt_cmd { + struct qla_tgt_sess *sess; + int state; + struct se_cmd se_cmd; + struct work_struct free_work; + struct work_struct work; + /* Sense buffer that will be mapped into outgoing status */ + unsigned char sense_buffer[TRANSPORT_SENSE_BUFFER]; + + /* to save extra sess dereferences */ + unsigned int conf_compl_supported:1; + unsigned int sg_mapped:1; + unsigned int free_sg:1; + unsigned int aborted:1; /* Needed in case of SRR */ + unsigned int write_data_transferred:1; + + struct scatterlist *sg; /* cmd data buffer SG vector */ + int sg_cnt; /* SG segments count */ + int bufflen; /* cmd buffer length */ + int offset; + uint32_t tag; + uint32_t unpacked_lun; + enum dma_data_direction dma_data_direction; + + uint16_t loop_id; /* to save extra sess dereferences */ + struct qla_tgt *tgt; /* to save extra sess dereferences */ + struct scsi_qla_host *vha; + struct list_head cmd_list; + + struct atio_from_isp atio; +}; + +struct qla_tgt_sess_work_param { + struct list_head sess_works_list_entry; + +#define QLA_TGT_SESS_WORK_ABORT 1 +#define QLA_TGT_SESS_WORK_TM 2 + int type; + + union { + struct abts_recv_from_24xx abts; + struct imm_ntfy_from_isp tm_iocb; + struct atio_from_isp tm_iocb2; + }; +}; + +struct qla_tgt_mgmt_cmd { + uint8_t tmr_func; + uint8_t fc_tm_rsp; + struct qla_tgt_sess *sess; + struct se_cmd se_cmd; + struct work_struct free_work; + unsigned int flags; +#define QLA24XX_MGMT_SEND_NACK 1 + union { + struct atio_from_isp atio; + struct imm_ntfy_from_isp imm_ntfy; + struct abts_recv_from_24xx abts; + } __packed orig_iocb; +}; + +struct qla_tgt_prm { + struct qla_tgt_cmd *cmd; + struct qla_tgt *tgt; + void *pkt; + struct scatterlist *sg; /* cmd data buffer SG vector */ + int seg_cnt; + int req_cnt; + uint16_t rq_result; + uint16_t scsi_status; + unsigned char *sense_buffer; + int sense_buffer_len; + int residual; + int add_status_pkt; +}; + +struct qla_tgt_srr_imm { + struct list_head srr_list_entry; + int srr_id; + struct imm_ntfy_from_isp imm_ntfy; +}; + +struct qla_tgt_srr_ctio { + struct list_head srr_list_entry; + int srr_id; + struct qla_tgt_cmd *cmd; +}; + +#define QLA_TGT_XMIT_DATA 1 +#define QLA_TGT_XMIT_STATUS 2 +#define QLA_TGT_XMIT_ALL (QLA_TGT_XMIT_STATUS|QLA_TGT_XMIT_DATA) + +#include <linux/version.h> + +extern struct qla_tgt_data qla_target; +/* + * Internal function prototypes + */ +void qlt_disable_vha(struct scsi_qla_host *); + +/* + * Function prototypes for qla_target.c logic used by qla2xxx LLD code. + */ +extern int qlt_add_target(struct qla_hw_data *, struct scsi_qla_host *); +extern int qlt_remove_target(struct qla_hw_data *, struct scsi_qla_host *); +extern int qlt_lport_register(struct qla_tgt_func_tmpl *, u64, + int (*callback)(struct scsi_qla_host *), void *); +extern void qlt_lport_deregister(struct scsi_qla_host *); +extern void qlt_unreg_sess(struct qla_tgt_sess *); +extern void qlt_fc_port_added(struct scsi_qla_host *, fc_port_t *); +extern void qlt_fc_port_deleted(struct scsi_qla_host *, fc_port_t *); +extern void qlt_set_mode(struct scsi_qla_host *ha); +extern void qlt_clear_mode(struct scsi_qla_host *ha); +extern int __init qlt_init(void); +extern void qlt_exit(void); +extern void qlt_update_vp_map(struct scsi_qla_host *, int); + +/* + * This macro is used during early initializations when host->active_mode + * is not set. Right now, ha value is ignored. + */ +#define QLA_TGT_MODE_ENABLED() (ql2x_ini_mode != QLA2XXX_INI_MODE_ENABLED) + +static inline bool qla_tgt_mode_enabled(struct scsi_qla_host *ha) +{ + return ha->host->active_mode & MODE_TARGET; +} + +static inline bool qla_ini_mode_enabled(struct scsi_qla_host *ha) +{ + return ha->host->active_mode & MODE_INITIATOR; +} + +static inline void qla_reverse_ini_mode(struct scsi_qla_host *ha) +{ + if (ha->host->active_mode & MODE_INITIATOR) + ha->host->active_mode &= ~MODE_INITIATOR; + else + ha->host->active_mode |= MODE_INITIATOR; +} + +/* + * Exported symbols from qla_target.c LLD logic used by qla2xxx code.. + */ +extern void qlt_24xx_atio_pkt_all_vps(struct scsi_qla_host *, + struct atio_from_isp *); +extern void qlt_response_pkt_all_vps(struct scsi_qla_host *, response_t *); +extern int qlt_rdy_to_xfer(struct qla_tgt_cmd *); +extern int qlt_xmit_response(struct qla_tgt_cmd *, int, uint8_t); +extern void qlt_xmit_tm_rsp(struct qla_tgt_mgmt_cmd *); +extern void qlt_free_mcmd(struct qla_tgt_mgmt_cmd *); +extern void qlt_free_cmd(struct qla_tgt_cmd *cmd); +extern void qlt_ctio_completion(struct scsi_qla_host *, uint32_t); +extern void qlt_async_event(uint16_t, struct scsi_qla_host *, uint16_t *); +extern void qlt_enable_vha(struct scsi_qla_host *); +extern void qlt_vport_create(struct scsi_qla_host *, struct qla_hw_data *); +extern void qlt_rff_id(struct scsi_qla_host *, struct ct_sns_req *); +extern void qlt_init_atio_q_entries(struct scsi_qla_host *); +extern void qlt_24xx_process_atio_queue(struct scsi_qla_host *); +extern void qlt_24xx_config_rings(struct scsi_qla_host *, + device_reg_t __iomem *); +extern void qlt_24xx_config_nvram_stage1(struct scsi_qla_host *, + struct nvram_24xx *); +extern void qlt_24xx_config_nvram_stage2(struct scsi_qla_host *, + struct init_cb_24xx *); +extern int qlt_24xx_process_response_error(struct scsi_qla_host *, + struct sts_entry_24xx *); +extern void qlt_modify_vp_config(struct scsi_qla_host *, + struct vp_config_entry_24xx *); +extern void qlt_probe_one_stage1(struct scsi_qla_host *, struct qla_hw_data *); +extern int qlt_mem_alloc(struct qla_hw_data *); +extern void qlt_mem_free(struct qla_hw_data *); +extern void qlt_stop_phase1(struct qla_tgt *); +extern void qlt_stop_phase2(struct qla_tgt *); + +#endif /* __QLA_TARGET_H */ diff --git a/drivers/scsi/qla2xxx/tcm_qla2xxx.c b/drivers/scsi/qla2xxx/tcm_qla2xxx.c new file mode 100644 index 000000000000..436598f57404 --- /dev/null +++ b/drivers/scsi/qla2xxx/tcm_qla2xxx.c @@ -0,0 +1,1955 @@ +/******************************************************************************* + * This file contains tcm implementation using v4 configfs fabric infrastructure + * for QLogic target mode HBAs + * + * ?? Copyright 2010-2011 RisingTide Systems LLC. + * + * Licensed to the Linux Foundation under the General Public License (GPL) + * version 2. + * + * Author: Nicholas A. Bellinger <nab@risingtidesystems.com> + * + * tcm_qla2xxx_parse_wwn() and tcm_qla2xxx_format_wwn() contains code from + * the TCM_FC / Open-FCoE.org fabric module. + * + * Copyright (c) 2010 Cisco Systems, Inc + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + ****************************************************************************/ + + +#include <linux/module.h> +#include <linux/moduleparam.h> +#include <generated/utsrelease.h> +#include <linux/utsname.h> +#include <linux/init.h> +#include <linux/list.h> +#include <linux/slab.h> +#include <linux/kthread.h> +#include <linux/types.h> +#include <linux/string.h> +#include <linux/configfs.h> +#include <linux/ctype.h> +#include <linux/string.h> +#include <linux/ctype.h> +#include <asm/unaligned.h> +#include <scsi/scsi.h> +#include <scsi/scsi_host.h> +#include <scsi/scsi_device.h> +#include <scsi/scsi_cmnd.h> +#include <target/target_core_base.h> +#include <target/target_core_fabric.h> +#include <target/target_core_fabric_configfs.h> +#include <target/target_core_configfs.h> +#include <target/configfs_macros.h> + +#include "qla_def.h" +#include "qla_target.h" +#include "tcm_qla2xxx.h" + +struct workqueue_struct *tcm_qla2xxx_free_wq; +struct workqueue_struct *tcm_qla2xxx_cmd_wq; + +static int tcm_qla2xxx_check_true(struct se_portal_group *se_tpg) +{ + return 1; +} + +static int tcm_qla2xxx_check_false(struct se_portal_group *se_tpg) +{ + return 0; +} + +/* + * Parse WWN. + * If strict, we require lower-case hex and colon separators to be sure + * the name is the same as what would be generated by ft_format_wwn() + * so the name and wwn are mapped one-to-one. + */ +static ssize_t tcm_qla2xxx_parse_wwn(const char *name, u64 *wwn, int strict) +{ + const char *cp; + char c; + u32 nibble; + u32 byte = 0; + u32 pos = 0; + u32 err; + + *wwn = 0; + for (cp = name; cp < &name[TCM_QLA2XXX_NAMELEN - 1]; cp++) { + c = *cp; + if (c == '\n' && cp[1] == '\0') + continue; + if (strict && pos++ == 2 && byte++ < 7) { + pos = 0; + if (c == ':') + continue; + err = 1; + goto fail; + } + if (c == '\0') { + err = 2; + if (strict && byte != 8) + goto fail; + return cp - name; + } + err = 3; + if (isdigit(c)) + nibble = c - '0'; + else if (isxdigit(c) && (islower(c) || !strict)) + nibble = tolower(c) - 'a' + 10; + else + goto fail; + *wwn = (*wwn << 4) | nibble; + } + err = 4; +fail: + pr_debug("err %u len %zu pos %u byte %u\n", + err, cp - name, pos, byte); + return -1; +} + +static ssize_t tcm_qla2xxx_format_wwn(char *buf, size_t len, u64 wwn) +{ + u8 b[8]; + + put_unaligned_be64(wwn, b); + return snprintf(buf, len, + "%2.2x:%2.2x:%2.2x:%2.2x:%2.2x:%2.2x:%2.2x:%2.2x", + b[0], b[1], b[2], b[3], b[4], b[5], b[6], b[7]); +} + +static char *tcm_qla2xxx_get_fabric_name(void) +{ + return "qla2xxx"; +} + +/* + * From drivers/scsi/scsi_transport_fc.c:fc_parse_wwn + */ +static int tcm_qla2xxx_npiv_extract_wwn(const char *ns, u64 *nm) +{ + unsigned int i, j, value; + u8 wwn[8]; + + memset(wwn, 0, sizeof(wwn)); + + /* Validate and store the new name */ + for (i = 0, j = 0; i < 16; i++) { + value = hex_to_bin(*ns++); + if (value >= 0) + j = (j << 4) | value; + else + return -EINVAL; + + if (i % 2) { + wwn[i/2] = j & 0xff; + j = 0; + } + } + + *nm = wwn_to_u64(wwn); + return 0; +} + +/* + * This parsing logic follows drivers/scsi/scsi_transport_fc.c: + * store_fc_host_vport_create() + */ +static int tcm_qla2xxx_npiv_parse_wwn( + const char *name, + size_t count, + u64 *wwpn, + u64 *wwnn) +{ + unsigned int cnt = count; + int rc; + + *wwpn = 0; + *wwnn = 0; + + /* count may include a LF at end of string */ + if (name[cnt-1] == '\n') + cnt--; + + /* validate we have enough characters for WWPN */ + if ((cnt != (16+1+16)) || (name[16] != ':')) + return -EINVAL; + + rc = tcm_qla2xxx_npiv_extract_wwn(&name[0], wwpn); + if (rc != 0) + return rc; + + rc = tcm_qla2xxx_npiv_extract_wwn(&name[17], wwnn); + if (rc != 0) + return rc; + + return 0; +} + +static ssize_t tcm_qla2xxx_npiv_format_wwn(char *buf, size_t len, + u64 wwpn, u64 wwnn) +{ + u8 b[8], b2[8]; + + put_unaligned_be64(wwpn, b); + put_unaligned_be64(wwnn, b2); + return snprintf(buf, len, + "%2.2x:%2.2x:%2.2x:%2.2x:%2.2x:%2.2x:%2.2x:%2.2x," + "%2.2x:%2.2x:%2.2x:%2.2x:%2.2x:%2.2x:%2.2x:%2.2x", + b[0], b[1], b[2], b[3], b[4], b[5], b[6], b[7], + b2[0], b2[1], b2[2], b2[3], b2[4], b2[5], b2[6], b2[7]); +} + +static char *tcm_qla2xxx_npiv_get_fabric_name(void) +{ + return "qla2xxx_npiv"; +} + +static u8 tcm_qla2xxx_get_fabric_proto_ident(struct se_portal_group *se_tpg) +{ + struct tcm_qla2xxx_tpg *tpg = container_of(se_tpg, + struct tcm_qla2xxx_tpg, se_tpg); + struct tcm_qla2xxx_lport *lport = tpg->lport; + u8 proto_id; + + switch (lport->lport_proto_id) { + case SCSI_PROTOCOL_FCP: + default: + proto_id = fc_get_fabric_proto_ident(se_tpg); + break; + } + + return proto_id; +} + +static char *tcm_qla2xxx_get_fabric_wwn(struct se_portal_group *se_tpg) +{ + struct tcm_qla2xxx_tpg *tpg = container_of(se_tpg, + struct tcm_qla2xxx_tpg, se_tpg); + struct tcm_qla2xxx_lport *lport = tpg->lport; + + return &lport->lport_name[0]; +} + +static char *tcm_qla2xxx_npiv_get_fabric_wwn(struct se_portal_group *se_tpg) +{ + struct tcm_qla2xxx_tpg *tpg = container_of(se_tpg, + struct tcm_qla2xxx_tpg, se_tpg); + struct tcm_qla2xxx_lport *lport = tpg->lport; + + return &lport->lport_npiv_name[0]; +} + +static u16 tcm_qla2xxx_get_tag(struct se_portal_group *se_tpg) +{ + struct tcm_qla2xxx_tpg *tpg = container_of(se_tpg, + struct tcm_qla2xxx_tpg, se_tpg); + return tpg->lport_tpgt; +} + +static u32 tcm_qla2xxx_get_default_depth(struct se_portal_group *se_tpg) +{ + return 1; +} + +static u32 tcm_qla2xxx_get_pr_transport_id( + struct se_portal_group *se_tpg, + struct se_node_acl *se_nacl, + struct t10_pr_registration *pr_reg, + int *format_code, + unsigned char *buf) +{ + struct tcm_qla2xxx_tpg *tpg = container_of(se_tpg, + struct tcm_qla2xxx_tpg, se_tpg); + struct tcm_qla2xxx_lport *lport = tpg->lport; + int ret = 0; + + switch (lport->lport_proto_id) { + case SCSI_PROTOCOL_FCP: + default: + ret = fc_get_pr_transport_id(se_tpg, se_nacl, pr_reg, + format_code, buf); + break; + } + + return ret; +} + +static u32 tcm_qla2xxx_get_pr_transport_id_len( + struct se_portal_group *se_tpg, + struct se_node_acl *se_nacl, + struct t10_pr_registration *pr_reg, + int *format_code) +{ + struct tcm_qla2xxx_tpg *tpg = container_of(se_tpg, + struct tcm_qla2xxx_tpg, se_tpg); + struct tcm_qla2xxx_lport *lport = tpg->lport; + int ret = 0; + + switch (lport->lport_proto_id) { + case SCSI_PROTOCOL_FCP: + default: + ret = fc_get_pr_transport_id_len(se_tpg, se_nacl, pr_reg, + format_code); + break; + } + + return ret; +} + +static char *tcm_qla2xxx_parse_pr_out_transport_id( + struct se_portal_group *se_tpg, + const char *buf, + u32 *out_tid_len, + char **port_nexus_ptr) +{ + struct tcm_qla2xxx_tpg *tpg = container_of(se_tpg, + struct tcm_qla2xxx_tpg, se_tpg); + struct tcm_qla2xxx_lport *lport = tpg->lport; + char *tid = NULL; + + switch (lport->lport_proto_id) { + case SCSI_PROTOCOL_FCP: + default: + tid = fc_parse_pr_out_transport_id(se_tpg, buf, out_tid_len, + port_nexus_ptr); + break; + } + + return tid; +} + +static int tcm_qla2xxx_check_demo_mode(struct se_portal_group *se_tpg) +{ + struct tcm_qla2xxx_tpg *tpg = container_of(se_tpg, + struct tcm_qla2xxx_tpg, se_tpg); + + return QLA_TPG_ATTRIB(tpg)->generate_node_acls; +} + +static int tcm_qla2xxx_check_demo_mode_cache(struct se_portal_group *se_tpg) +{ + struct tcm_qla2xxx_tpg *tpg = container_of(se_tpg, + struct tcm_qla2xxx_tpg, se_tpg); + + return QLA_TPG_ATTRIB(tpg)->cache_dynamic_acls; +} + +static int tcm_qla2xxx_check_demo_write_protect(struct se_portal_group *se_tpg) +{ + struct tcm_qla2xxx_tpg *tpg = container_of(se_tpg, + struct tcm_qla2xxx_tpg, se_tpg); + + return QLA_TPG_ATTRIB(tpg)->demo_mode_write_protect; +} + +static int tcm_qla2xxx_check_prod_write_protect(struct se_portal_group *se_tpg) +{ + struct tcm_qla2xxx_tpg *tpg = container_of(se_tpg, + struct tcm_qla2xxx_tpg, se_tpg); + + return QLA_TPG_ATTRIB(tpg)->prod_mode_write_protect; +} + +static struct se_node_acl *tcm_qla2xxx_alloc_fabric_acl( + struct se_portal_group *se_tpg) +{ + struct tcm_qla2xxx_nacl *nacl; + + nacl = kzalloc(sizeof(struct tcm_qla2xxx_nacl), GFP_KERNEL); + if (!nacl) { + pr_err("Unable to alocate struct tcm_qla2xxx_nacl\n"); + return NULL; + } + + return &nacl->se_node_acl; +} + +static void tcm_qla2xxx_release_fabric_acl( + struct se_portal_group *se_tpg, + struct se_node_acl *se_nacl) +{ + struct tcm_qla2xxx_nacl *nacl = container_of(se_nacl, + struct tcm_qla2xxx_nacl, se_node_acl); + kfree(nacl); +} + +static u32 tcm_qla2xxx_tpg_get_inst_index(struct se_portal_group *se_tpg) +{ + struct tcm_qla2xxx_tpg *tpg = container_of(se_tpg, + struct tcm_qla2xxx_tpg, se_tpg); + + return tpg->lport_tpgt; +} + +static void tcm_qla2xxx_complete_mcmd(struct work_struct *work) +{ + struct qla_tgt_mgmt_cmd *mcmd = container_of(work, + struct qla_tgt_mgmt_cmd, free_work); + + transport_generic_free_cmd(&mcmd->se_cmd, 0); +} + +/* + * Called from qla_target_template->free_mcmd(), and will call + * tcm_qla2xxx_release_cmd() via normal struct target_core_fabric_ops + * release callback. qla_hw_data->hardware_lock is expected to be held + */ +static void tcm_qla2xxx_free_mcmd(struct qla_tgt_mgmt_cmd *mcmd) +{ + INIT_WORK(&mcmd->free_work, tcm_qla2xxx_complete_mcmd); + queue_work(tcm_qla2xxx_free_wq, &mcmd->free_work); +} + +static void tcm_qla2xxx_complete_free(struct work_struct *work) +{ + struct qla_tgt_cmd *cmd = container_of(work, struct qla_tgt_cmd, work); + + transport_generic_free_cmd(&cmd->se_cmd, 0); +} + +/* + * Called from qla_target_template->free_cmd(), and will call + * tcm_qla2xxx_release_cmd via normal struct target_core_fabric_ops + * release callback. qla_hw_data->hardware_lock is expected to be held + */ +static void tcm_qla2xxx_free_cmd(struct qla_tgt_cmd *cmd) +{ + INIT_WORK(&cmd->work, tcm_qla2xxx_complete_free); + queue_work(tcm_qla2xxx_free_wq, &cmd->work); +} + +/* + * Called from struct target_core_fabric_ops->check_stop_free() context + */ +static int tcm_qla2xxx_check_stop_free(struct se_cmd *se_cmd) +{ + return target_put_sess_cmd(se_cmd->se_sess, se_cmd); +} + +/* tcm_qla2xxx_release_cmd - Callback from TCM Core to release underlying + * fabric descriptor @se_cmd command to release + */ +static void tcm_qla2xxx_release_cmd(struct se_cmd *se_cmd) +{ + struct qla_tgt_cmd *cmd; + + if (se_cmd->se_cmd_flags & SCF_SCSI_TMR_CDB) { + struct qla_tgt_mgmt_cmd *mcmd = container_of(se_cmd, + struct qla_tgt_mgmt_cmd, se_cmd); + qlt_free_mcmd(mcmd); + return; + } + + cmd = container_of(se_cmd, struct qla_tgt_cmd, se_cmd); + qlt_free_cmd(cmd); +} + +static int tcm_qla2xxx_shutdown_session(struct se_session *se_sess) +{ + struct qla_tgt_sess *sess = se_sess->fabric_sess_ptr; + struct scsi_qla_host *vha; + unsigned long flags; + + BUG_ON(!sess); + vha = sess->vha; + + spin_lock_irqsave(&vha->hw->hardware_lock, flags); + sess->tearing_down = 1; + target_splice_sess_cmd_list(se_sess); + spin_unlock_irqrestore(&vha->hw->hardware_lock, flags); + + return 1; +} + +static void tcm_qla2xxx_close_session(struct se_session *se_sess) +{ + struct qla_tgt_sess *sess = se_sess->fabric_sess_ptr; + struct scsi_qla_host *vha; + unsigned long flags; + + BUG_ON(!sess); + vha = sess->vha; + + spin_lock_irqsave(&vha->hw->hardware_lock, flags); + qlt_unreg_sess(sess); + spin_unlock_irqrestore(&vha->hw->hardware_lock, flags); +} + +static u32 tcm_qla2xxx_sess_get_index(struct se_session *se_sess) +{ + return 0; +} + +/* + * The LIO target core uses DMA_TO_DEVICE to mean that data is going + * to the target (eg handling a WRITE) and DMA_FROM_DEVICE to mean + * that data is coming from the target (eg handling a READ). However, + * this is just the opposite of what we have to tell the DMA mapping + * layer -- eg when handling a READ, the HBA will have to DMA the data + * out of memory so it can send it to the initiator, which means we + * need to use DMA_TO_DEVICE when we map the data. + */ +static enum dma_data_direction tcm_qla2xxx_mapping_dir(struct se_cmd *se_cmd) +{ + if (se_cmd->se_cmd_flags & SCF_BIDI) + return DMA_BIDIRECTIONAL; + + switch (se_cmd->data_direction) { + case DMA_TO_DEVICE: + return DMA_FROM_DEVICE; + case DMA_FROM_DEVICE: + return DMA_TO_DEVICE; + case DMA_NONE: + default: + return DMA_NONE; + } +} + +static int tcm_qla2xxx_write_pending(struct se_cmd *se_cmd) +{ + struct qla_tgt_cmd *cmd = container_of(se_cmd, + struct qla_tgt_cmd, se_cmd); + + cmd->bufflen = se_cmd->data_length; + cmd->dma_data_direction = tcm_qla2xxx_mapping_dir(se_cmd); + + cmd->sg_cnt = se_cmd->t_data_nents; + cmd->sg = se_cmd->t_data_sg; + + /* + * qla_target.c:qlt_rdy_to_xfer() will call pci_map_sg() to setup + * the SGL mappings into PCIe memory for incoming FCP WRITE data. + */ + return qlt_rdy_to_xfer(cmd); +} + +static int tcm_qla2xxx_write_pending_status(struct se_cmd *se_cmd) +{ + unsigned long flags; + /* + * Check for WRITE_PENDING status to determine if we need to wait for + * CTIO aborts to be posted via hardware in tcm_qla2xxx_handle_data(). + */ + spin_lock_irqsave(&se_cmd->t_state_lock, flags); + if (se_cmd->t_state == TRANSPORT_WRITE_PENDING || + se_cmd->t_state == TRANSPORT_COMPLETE_QF_WP) { + spin_unlock_irqrestore(&se_cmd->t_state_lock, flags); + wait_for_completion_timeout(&se_cmd->t_transport_stop_comp, + 3000); + return 0; + } + spin_unlock_irqrestore(&se_cmd->t_state_lock, flags); + + return 0; +} + +static void tcm_qla2xxx_set_default_node_attrs(struct se_node_acl *nacl) +{ + return; +} + +static u32 tcm_qla2xxx_get_task_tag(struct se_cmd *se_cmd) +{ + struct qla_tgt_cmd *cmd = container_of(se_cmd, + struct qla_tgt_cmd, se_cmd); + + return cmd->tag; +} + +static int tcm_qla2xxx_get_cmd_state(struct se_cmd *se_cmd) +{ + return 0; +} + +/* + * Called from process context in qla_target.c:qlt_do_work() code + */ +static int tcm_qla2xxx_handle_cmd(scsi_qla_host_t *vha, struct qla_tgt_cmd *cmd, + unsigned char *cdb, uint32_t data_length, int fcp_task_attr, + int data_dir, int bidi) +{ + struct se_cmd *se_cmd = &cmd->se_cmd; + struct se_session *se_sess; + struct qla_tgt_sess *sess; + int flags = TARGET_SCF_ACK_KREF; + + if (bidi) + flags |= TARGET_SCF_BIDI_OP; + + sess = cmd->sess; + if (!sess) { + pr_err("Unable to locate struct qla_tgt_sess from qla_tgt_cmd\n"); + return -EINVAL; + } + + se_sess = sess->se_sess; + if (!se_sess) { + pr_err("Unable to locate active struct se_session\n"); + return -EINVAL; + } + + target_submit_cmd(se_cmd, se_sess, cdb, &cmd->sense_buffer[0], + cmd->unpacked_lun, data_length, fcp_task_attr, + data_dir, flags); + return 0; +} + +static void tcm_qla2xxx_do_rsp(struct work_struct *work) +{ + struct qla_tgt_cmd *cmd = container_of(work, struct qla_tgt_cmd, work); + /* + * Dispatch ->queue_status from workqueue process context + */ + transport_generic_request_failure(&cmd->se_cmd); +} + +/* + * Called from qla_target.c:qlt_do_ctio_completion() + */ +static int tcm_qla2xxx_handle_data(struct qla_tgt_cmd *cmd) +{ + struct se_cmd *se_cmd = &cmd->se_cmd; + unsigned long flags; + /* + * Ensure that the complete FCP WRITE payload has been received. + * Otherwise return an exception via CHECK_CONDITION status. + */ + if (!cmd->write_data_transferred) { + /* + * Check if se_cmd has already been aborted via LUN_RESET, and + * waiting upon completion in tcm_qla2xxx_write_pending_status() + */ + spin_lock_irqsave(&se_cmd->t_state_lock, flags); + if (se_cmd->transport_state & CMD_T_ABORTED) { + spin_unlock_irqrestore(&se_cmd->t_state_lock, flags); + complete(&se_cmd->t_transport_stop_comp); + return 0; + } + spin_unlock_irqrestore(&se_cmd->t_state_lock, flags); + + se_cmd->scsi_sense_reason = TCM_CHECK_CONDITION_ABORT_CMD; + INIT_WORK(&cmd->work, tcm_qla2xxx_do_rsp); + queue_work(tcm_qla2xxx_free_wq, &cmd->work); + return 0; + } + /* + * We now tell TCM to queue this WRITE CDB with TRANSPORT_PROCESS_WRITE + * status to the backstore processing thread. + */ + return transport_generic_handle_data(&cmd->se_cmd); +} + +/* + * Called from qla_target.c:qlt_issue_task_mgmt() + */ +int tcm_qla2xxx_handle_tmr(struct qla_tgt_mgmt_cmd *mcmd, uint32_t lun, + uint8_t tmr_func, uint32_t tag) +{ + struct qla_tgt_sess *sess = mcmd->sess; + struct se_cmd *se_cmd = &mcmd->se_cmd; + + return target_submit_tmr(se_cmd, sess->se_sess, NULL, lun, mcmd, + tmr_func, GFP_ATOMIC, tag, TARGET_SCF_ACK_KREF); +} + +static int tcm_qla2xxx_queue_data_in(struct se_cmd *se_cmd) +{ + struct qla_tgt_cmd *cmd = container_of(se_cmd, + struct qla_tgt_cmd, se_cmd); + + cmd->bufflen = se_cmd->data_length; + cmd->dma_data_direction = tcm_qla2xxx_mapping_dir(se_cmd); + cmd->aborted = (se_cmd->transport_state & CMD_T_ABORTED); + + cmd->sg_cnt = se_cmd->t_data_nents; + cmd->sg = se_cmd->t_data_sg; + cmd->offset = 0; + + /* + * Now queue completed DATA_IN the qla2xxx LLD and response ring + */ + return qlt_xmit_response(cmd, QLA_TGT_XMIT_DATA|QLA_TGT_XMIT_STATUS, + se_cmd->scsi_status); +} + +static int tcm_qla2xxx_queue_status(struct se_cmd *se_cmd) +{ + struct qla_tgt_cmd *cmd = container_of(se_cmd, + struct qla_tgt_cmd, se_cmd); + int xmit_type = QLA_TGT_XMIT_STATUS; + + cmd->bufflen = se_cmd->data_length; + cmd->sg = NULL; + cmd->sg_cnt = 0; + cmd->offset = 0; + cmd->dma_data_direction = tcm_qla2xxx_mapping_dir(se_cmd); + cmd->aborted = (se_cmd->transport_state & CMD_T_ABORTED); + + if (se_cmd->data_direction == DMA_FROM_DEVICE) { + /* + * For FCP_READ with CHECK_CONDITION status, clear cmd->bufflen + * for qla_tgt_xmit_response LLD code + */ + se_cmd->se_cmd_flags |= SCF_UNDERFLOW_BIT; + se_cmd->residual_count = se_cmd->data_length; + + cmd->bufflen = 0; + } + /* + * Now queue status response to qla2xxx LLD code and response ring + */ + return qlt_xmit_response(cmd, xmit_type, se_cmd->scsi_status); +} + +static int tcm_qla2xxx_queue_tm_rsp(struct se_cmd *se_cmd) +{ + struct se_tmr_req *se_tmr = se_cmd->se_tmr_req; + struct qla_tgt_mgmt_cmd *mcmd = container_of(se_cmd, + struct qla_tgt_mgmt_cmd, se_cmd); + + pr_debug("queue_tm_rsp: mcmd: %p func: 0x%02x response: 0x%02x\n", + mcmd, se_tmr->function, se_tmr->response); + /* + * Do translation between TCM TM response codes and + * QLA2xxx FC TM response codes. + */ + switch (se_tmr->response) { + case TMR_FUNCTION_COMPLETE: + mcmd->fc_tm_rsp = FC_TM_SUCCESS; + break; + case TMR_TASK_DOES_NOT_EXIST: + mcmd->fc_tm_rsp = FC_TM_BAD_CMD; + break; + case TMR_FUNCTION_REJECTED: + mcmd->fc_tm_rsp = FC_TM_REJECT; + break; + case TMR_LUN_DOES_NOT_EXIST: + default: + mcmd->fc_tm_rsp = FC_TM_FAILED; + break; + } + /* + * Queue the TM response to QLA2xxx LLD to build a + * CTIO response packet. + */ + qlt_xmit_tm_rsp(mcmd); + + return 0; +} + +static u16 tcm_qla2xxx_get_fabric_sense_len(void) +{ + return 0; +} + +static u16 tcm_qla2xxx_set_fabric_sense_len(struct se_cmd *se_cmd, + u32 sense_length) +{ + return 0; +} + +/* Local pointer to allocated TCM configfs fabric module */ +struct target_fabric_configfs *tcm_qla2xxx_fabric_configfs; +struct target_fabric_configfs *tcm_qla2xxx_npiv_fabric_configfs; + +static int tcm_qla2xxx_setup_nacl_from_rport( + struct se_portal_group *se_tpg, + struct se_node_acl *se_nacl, + struct tcm_qla2xxx_lport *lport, + struct tcm_qla2xxx_nacl *nacl, + u64 rport_wwnn) +{ + struct scsi_qla_host *vha = lport->qla_vha; + struct Scsi_Host *sh = vha->host; + struct fc_host_attrs *fc_host = shost_to_fc_host(sh); + struct fc_rport *rport; + unsigned long flags; + void *node; + int rc; + + /* + * Scan the existing rports, and create a session for the + * explict NodeACL is an matching rport->node_name already + * exists. + */ + spin_lock_irqsave(sh->host_lock, flags); + list_for_each_entry(rport, &fc_host->rports, peers) { + if (rport_wwnn != rport->node_name) + continue; + + pr_debug("Located existing rport_wwpn and rport->node_name: 0x%016LX, port_id: 0x%04x\n", + rport->node_name, rport->port_id); + nacl->nport_id = rport->port_id; + + spin_unlock_irqrestore(sh->host_lock, flags); + + spin_lock_irqsave(&vha->hw->hardware_lock, flags); + node = btree_lookup32(&lport->lport_fcport_map, rport->port_id); + if (node) { + rc = btree_update32(&lport->lport_fcport_map, + rport->port_id, se_nacl); + } else { + rc = btree_insert32(&lport->lport_fcport_map, + rport->port_id, se_nacl, + GFP_ATOMIC); + } + spin_unlock_irqrestore(&vha->hw->hardware_lock, flags); + + if (rc) { + pr_err("Unable to insert se_nacl into fcport_map"); + WARN_ON(rc > 0); + return rc; + } + + pr_debug("Inserted into fcport_map: %p for WWNN: 0x%016LX, port_id: 0x%08x\n", + se_nacl, rport_wwnn, nacl->nport_id); + + return 1; + } + spin_unlock_irqrestore(sh->host_lock, flags); + + return 0; +} + +/* + * Expected to be called with struct qla_hw_data->hardware_lock held + */ +static void tcm_qla2xxx_clear_nacl_from_fcport_map(struct qla_tgt_sess *sess) +{ + struct se_node_acl *se_nacl = sess->se_sess->se_node_acl; + struct se_portal_group *se_tpg = se_nacl->se_tpg; + struct se_wwn *se_wwn = se_tpg->se_tpg_wwn; + struct tcm_qla2xxx_lport *lport = container_of(se_wwn, + struct tcm_qla2xxx_lport, lport_wwn); + struct tcm_qla2xxx_nacl *nacl = container_of(se_nacl, + struct tcm_qla2xxx_nacl, se_node_acl); + void *node; + + pr_debug("fc_rport domain: port_id 0x%06x\n", nacl->nport_id); + + node = btree_remove32(&lport->lport_fcport_map, nacl->nport_id); + WARN_ON(node && (node != se_nacl)); + + pr_debug("Removed from fcport_map: %p for WWNN: 0x%016LX, port_id: 0x%06x\n", + se_nacl, nacl->nport_wwnn, nacl->nport_id); +} + +static void tcm_qla2xxx_put_sess(struct qla_tgt_sess *sess) +{ + target_put_session(sess->se_sess); +} + +static void tcm_qla2xxx_shutdown_sess(struct qla_tgt_sess *sess) +{ + tcm_qla2xxx_shutdown_session(sess->se_sess); +} + +static struct se_node_acl *tcm_qla2xxx_make_nodeacl( + struct se_portal_group *se_tpg, + struct config_group *group, + const char *name) +{ + struct se_wwn *se_wwn = se_tpg->se_tpg_wwn; + struct tcm_qla2xxx_lport *lport = container_of(se_wwn, + struct tcm_qla2xxx_lport, lport_wwn); + struct se_node_acl *se_nacl, *se_nacl_new; + struct tcm_qla2xxx_nacl *nacl; + u64 wwnn; + u32 qla2xxx_nexus_depth; + int rc; + + if (tcm_qla2xxx_parse_wwn(name, &wwnn, 1) < 0) + return ERR_PTR(-EINVAL); + + se_nacl_new = tcm_qla2xxx_alloc_fabric_acl(se_tpg); + if (!se_nacl_new) + return ERR_PTR(-ENOMEM); +/* #warning FIXME: Hardcoded qla2xxx_nexus depth in tcm_qla2xxx_make_nodeacl */ + qla2xxx_nexus_depth = 1; + + /* + * se_nacl_new may be released by core_tpg_add_initiator_node_acl() + * when converting a NodeACL from demo mode -> explict + */ + se_nacl = core_tpg_add_initiator_node_acl(se_tpg, se_nacl_new, + name, qla2xxx_nexus_depth); + if (IS_ERR(se_nacl)) { + tcm_qla2xxx_release_fabric_acl(se_tpg, se_nacl_new); + return se_nacl; + } + /* + * Locate our struct tcm_qla2xxx_nacl and set the FC Nport WWPN + */ + nacl = container_of(se_nacl, struct tcm_qla2xxx_nacl, se_node_acl); + nacl->nport_wwnn = wwnn; + tcm_qla2xxx_format_wwn(&nacl->nport_name[0], TCM_QLA2XXX_NAMELEN, wwnn); + /* + * Setup a se_nacl handle based on an a matching struct fc_rport setup + * via drivers/scsi/qla2xxx/qla_init.c:qla2x00_reg_remote_port() + */ + rc = tcm_qla2xxx_setup_nacl_from_rport(se_tpg, se_nacl, lport, + nacl, wwnn); + if (rc < 0) { + tcm_qla2xxx_release_fabric_acl(se_tpg, se_nacl_new); + return ERR_PTR(rc); + } + + return se_nacl; +} + +static void tcm_qla2xxx_drop_nodeacl(struct se_node_acl *se_acl) +{ + struct se_portal_group *se_tpg = se_acl->se_tpg; + struct tcm_qla2xxx_nacl *nacl = container_of(se_acl, + struct tcm_qla2xxx_nacl, se_node_acl); + + core_tpg_del_initiator_node_acl(se_tpg, se_acl, 1); + kfree(nacl); +} + +/* Start items for tcm_qla2xxx_tpg_attrib_cit */ + +#define DEF_QLA_TPG_ATTRIB(name) \ + \ +static ssize_t tcm_qla2xxx_tpg_attrib_show_##name( \ + struct se_portal_group *se_tpg, \ + char *page) \ +{ \ + struct tcm_qla2xxx_tpg *tpg = container_of(se_tpg, \ + struct tcm_qla2xxx_tpg, se_tpg); \ + \ + return sprintf(page, "%u\n", QLA_TPG_ATTRIB(tpg)->name); \ +} \ + \ +static ssize_t tcm_qla2xxx_tpg_attrib_store_##name( \ + struct se_portal_group *se_tpg, \ + const char *page, \ + size_t count) \ +{ \ + struct tcm_qla2xxx_tpg *tpg = container_of(se_tpg, \ + struct tcm_qla2xxx_tpg, se_tpg); \ + unsigned long val; \ + int ret; \ + \ + ret = kstrtoul(page, 0, &val); \ + if (ret < 0) { \ + pr_err("kstrtoul() failed with" \ + " ret: %d\n", ret); \ + return -EINVAL; \ + } \ + ret = tcm_qla2xxx_set_attrib_##name(tpg, val); \ + \ + return (!ret) ? count : -EINVAL; \ +} + +#define DEF_QLA_TPG_ATTR_BOOL(_name) \ + \ +static int tcm_qla2xxx_set_attrib_##_name( \ + struct tcm_qla2xxx_tpg *tpg, \ + unsigned long val) \ +{ \ + struct tcm_qla2xxx_tpg_attrib *a = &tpg->tpg_attrib; \ + \ + if ((val != 0) && (val != 1)) { \ + pr_err("Illegal boolean value %lu\n", val); \ + return -EINVAL; \ + } \ + \ + a->_name = val; \ + return 0; \ +} + +#define QLA_TPG_ATTR(_name, _mode) \ + TF_TPG_ATTRIB_ATTR(tcm_qla2xxx, _name, _mode); + +/* + * Define tcm_qla2xxx_tpg_attrib_s_generate_node_acls + */ +DEF_QLA_TPG_ATTR_BOOL(generate_node_acls); +DEF_QLA_TPG_ATTRIB(generate_node_acls); +QLA_TPG_ATTR(generate_node_acls, S_IRUGO | S_IWUSR); + +/* + Define tcm_qla2xxx_attrib_s_cache_dynamic_acls + */ +DEF_QLA_TPG_ATTR_BOOL(cache_dynamic_acls); +DEF_QLA_TPG_ATTRIB(cache_dynamic_acls); +QLA_TPG_ATTR(cache_dynamic_acls, S_IRUGO | S_IWUSR); + +/* + * Define tcm_qla2xxx_tpg_attrib_s_demo_mode_write_protect + */ +DEF_QLA_TPG_ATTR_BOOL(demo_mode_write_protect); +DEF_QLA_TPG_ATTRIB(demo_mode_write_protect); +QLA_TPG_ATTR(demo_mode_write_protect, S_IRUGO | S_IWUSR); + +/* + * Define tcm_qla2xxx_tpg_attrib_s_prod_mode_write_protect + */ +DEF_QLA_TPG_ATTR_BOOL(prod_mode_write_protect); +DEF_QLA_TPG_ATTRIB(prod_mode_write_protect); +QLA_TPG_ATTR(prod_mode_write_protect, S_IRUGO | S_IWUSR); + +static struct configfs_attribute *tcm_qla2xxx_tpg_attrib_attrs[] = { + &tcm_qla2xxx_tpg_attrib_generate_node_acls.attr, + &tcm_qla2xxx_tpg_attrib_cache_dynamic_acls.attr, + &tcm_qla2xxx_tpg_attrib_demo_mode_write_protect.attr, + &tcm_qla2xxx_tpg_attrib_prod_mode_write_protect.attr, + NULL, +}; + +/* End items for tcm_qla2xxx_tpg_attrib_cit */ + +static ssize_t tcm_qla2xxx_tpg_show_enable( + struct se_portal_group *se_tpg, + char *page) +{ + struct tcm_qla2xxx_tpg *tpg = container_of(se_tpg, + struct tcm_qla2xxx_tpg, se_tpg); + + return snprintf(page, PAGE_SIZE, "%d\n", + atomic_read(&tpg->lport_tpg_enabled)); +} + +static ssize_t tcm_qla2xxx_tpg_store_enable( + struct se_portal_group *se_tpg, + const char *page, + size_t count) +{ + struct se_wwn *se_wwn = se_tpg->se_tpg_wwn; + struct tcm_qla2xxx_lport *lport = container_of(se_wwn, + struct tcm_qla2xxx_lport, lport_wwn); + struct scsi_qla_host *vha = lport->qla_vha; + struct qla_hw_data *ha = vha->hw; + struct tcm_qla2xxx_tpg *tpg = container_of(se_tpg, + struct tcm_qla2xxx_tpg, se_tpg); + unsigned long op; + int rc; + + rc = kstrtoul(page, 0, &op); + if (rc < 0) { + pr_err("kstrtoul() returned %d\n", rc); + return -EINVAL; + } + if ((op != 1) && (op != 0)) { + pr_err("Illegal value for tpg_enable: %lu\n", op); + return -EINVAL; + } + + if (op) { + atomic_set(&tpg->lport_tpg_enabled, 1); + qlt_enable_vha(vha); + } else { + if (!ha->tgt.qla_tgt) { + pr_err("truct qla_hw_data *ha->tgt.qla_tgt is NULL\n"); + return -ENODEV; + } + atomic_set(&tpg->lport_tpg_enabled, 0); + qlt_stop_phase1(ha->tgt.qla_tgt); + } + + return count; +} + +TF_TPG_BASE_ATTR(tcm_qla2xxx, enable, S_IRUGO | S_IWUSR); + +static struct configfs_attribute *tcm_qla2xxx_tpg_attrs[] = { + &tcm_qla2xxx_tpg_enable.attr, + NULL, +}; + +static struct se_portal_group *tcm_qla2xxx_make_tpg( + struct se_wwn *wwn, + struct config_group *group, + const char *name) +{ + struct tcm_qla2xxx_lport *lport = container_of(wwn, + struct tcm_qla2xxx_lport, lport_wwn); + struct tcm_qla2xxx_tpg *tpg; + unsigned long tpgt; + int ret; + + if (strstr(name, "tpgt_") != name) + return ERR_PTR(-EINVAL); + if (kstrtoul(name + 5, 10, &tpgt) || tpgt > USHRT_MAX) + return ERR_PTR(-EINVAL); + + if (!lport->qla_npiv_vp && (tpgt != 1)) { + pr_err("In non NPIV mode, a single TPG=1 is used for HW port mappings\n"); + return ERR_PTR(-ENOSYS); + } + + tpg = kzalloc(sizeof(struct tcm_qla2xxx_tpg), GFP_KERNEL); + if (!tpg) { + pr_err("Unable to allocate struct tcm_qla2xxx_tpg\n"); + return ERR_PTR(-ENOMEM); + } + tpg->lport = lport; + tpg->lport_tpgt = tpgt; + /* + * By default allow READ-ONLY TPG demo-mode access w/ cached dynamic + * NodeACLs + */ + QLA_TPG_ATTRIB(tpg)->generate_node_acls = 1; + QLA_TPG_ATTRIB(tpg)->demo_mode_write_protect = 1; + QLA_TPG_ATTRIB(tpg)->cache_dynamic_acls = 1; + + ret = core_tpg_register(&tcm_qla2xxx_fabric_configfs->tf_ops, wwn, + &tpg->se_tpg, tpg, TRANSPORT_TPG_TYPE_NORMAL); + if (ret < 0) { + kfree(tpg); + return NULL; + } + /* + * Setup local TPG=1 pointer for non NPIV mode. + */ + if (lport->qla_npiv_vp == NULL) + lport->tpg_1 = tpg; + + return &tpg->se_tpg; +} + +static void tcm_qla2xxx_drop_tpg(struct se_portal_group *se_tpg) +{ + struct tcm_qla2xxx_tpg *tpg = container_of(se_tpg, + struct tcm_qla2xxx_tpg, se_tpg); + struct tcm_qla2xxx_lport *lport = tpg->lport; + struct scsi_qla_host *vha = lport->qla_vha; + struct qla_hw_data *ha = vha->hw; + /* + * Call into qla2x_target.c LLD logic to shutdown the active + * FC Nexuses and disable target mode operation for this qla_hw_data + */ + if (ha->tgt.qla_tgt && !ha->tgt.qla_tgt->tgt_stop) + qlt_stop_phase1(ha->tgt.qla_tgt); + + core_tpg_deregister(se_tpg); + /* + * Clear local TPG=1 pointer for non NPIV mode. + */ + if (lport->qla_npiv_vp == NULL) + lport->tpg_1 = NULL; + + kfree(tpg); +} + +static struct se_portal_group *tcm_qla2xxx_npiv_make_tpg( + struct se_wwn *wwn, + struct config_group *group, + const char *name) +{ + struct tcm_qla2xxx_lport *lport = container_of(wwn, + struct tcm_qla2xxx_lport, lport_wwn); + struct tcm_qla2xxx_tpg *tpg; + unsigned long tpgt; + int ret; + + if (strstr(name, "tpgt_") != name) + return ERR_PTR(-EINVAL); + if (kstrtoul(name + 5, 10, &tpgt) || tpgt > USHRT_MAX) + return ERR_PTR(-EINVAL); + + tpg = kzalloc(sizeof(struct tcm_qla2xxx_tpg), GFP_KERNEL); + if (!tpg) { + pr_err("Unable to allocate struct tcm_qla2xxx_tpg\n"); + return ERR_PTR(-ENOMEM); + } + tpg->lport = lport; + tpg->lport_tpgt = tpgt; + + ret = core_tpg_register(&tcm_qla2xxx_npiv_fabric_configfs->tf_ops, wwn, + &tpg->se_tpg, tpg, TRANSPORT_TPG_TYPE_NORMAL); + if (ret < 0) { + kfree(tpg); + return NULL; + } + return &tpg->se_tpg; +} + +/* + * Expected to be called with struct qla_hw_data->hardware_lock held + */ +static struct qla_tgt_sess *tcm_qla2xxx_find_sess_by_s_id( + scsi_qla_host_t *vha, + const uint8_t *s_id) +{ + struct qla_hw_data *ha = vha->hw; + struct tcm_qla2xxx_lport *lport; + struct se_node_acl *se_nacl; + struct tcm_qla2xxx_nacl *nacl; + u32 key; + + lport = ha->tgt.target_lport_ptr; + if (!lport) { + pr_err("Unable to locate struct tcm_qla2xxx_lport\n"); + dump_stack(); + return NULL; + } + + key = (((unsigned long)s_id[0] << 16) | + ((unsigned long)s_id[1] << 8) | + (unsigned long)s_id[2]); + pr_debug("find_sess_by_s_id: 0x%06x\n", key); + + se_nacl = btree_lookup32(&lport->lport_fcport_map, key); + if (!se_nacl) { + pr_debug("Unable to locate s_id: 0x%06x\n", key); + return NULL; + } + pr_debug("find_sess_by_s_id: located se_nacl: %p, initiatorname: %s\n", + se_nacl, se_nacl->initiatorname); + + nacl = container_of(se_nacl, struct tcm_qla2xxx_nacl, se_node_acl); + if (!nacl->qla_tgt_sess) { + pr_err("Unable to locate struct qla_tgt_sess\n"); + return NULL; + } + + return nacl->qla_tgt_sess; +} + +/* + * Expected to be called with struct qla_hw_data->hardware_lock held + */ +static void tcm_qla2xxx_set_sess_by_s_id( + struct tcm_qla2xxx_lport *lport, + struct se_node_acl *new_se_nacl, + struct tcm_qla2xxx_nacl *nacl, + struct se_session *se_sess, + struct qla_tgt_sess *qla_tgt_sess, + uint8_t *s_id) +{ + u32 key; + void *slot; + int rc; + + key = (((unsigned long)s_id[0] << 16) | + ((unsigned long)s_id[1] << 8) | + (unsigned long)s_id[2]); + pr_debug("set_sess_by_s_id: %06x\n", key); + + slot = btree_lookup32(&lport->lport_fcport_map, key); + if (!slot) { + if (new_se_nacl) { + pr_debug("Setting up new fc_port entry to new_se_nacl\n"); + nacl->nport_id = key; + rc = btree_insert32(&lport->lport_fcport_map, key, + new_se_nacl, GFP_ATOMIC); + if (rc) + printk(KERN_ERR "Unable to insert s_id into fcport_map: %06x\n", + (int)key); + } else { + pr_debug("Wiping nonexisting fc_port entry\n"); + } + + qla_tgt_sess->se_sess = se_sess; + nacl->qla_tgt_sess = qla_tgt_sess; + return; + } + + if (nacl->qla_tgt_sess) { + if (new_se_nacl == NULL) { + pr_debug("Clearing existing nacl->qla_tgt_sess and fc_port entry\n"); + btree_remove32(&lport->lport_fcport_map, key); + nacl->qla_tgt_sess = NULL; + return; + } + pr_debug("Replacing existing nacl->qla_tgt_sess and fc_port entry\n"); + btree_update32(&lport->lport_fcport_map, key, new_se_nacl); + qla_tgt_sess->se_sess = se_sess; + nacl->qla_tgt_sess = qla_tgt_sess; + return; + } + + if (new_se_nacl == NULL) { + pr_debug("Clearing existing fc_port entry\n"); + btree_remove32(&lport->lport_fcport_map, key); + return; + } + + pr_debug("Replacing existing fc_port entry w/o active nacl->qla_tgt_sess\n"); + btree_update32(&lport->lport_fcport_map, key, new_se_nacl); + qla_tgt_sess->se_sess = se_sess; + nacl->qla_tgt_sess = qla_tgt_sess; + + pr_debug("Setup nacl->qla_tgt_sess %p by s_id for se_nacl: %p, initiatorname: %s\n", + nacl->qla_tgt_sess, new_se_nacl, new_se_nacl->initiatorname); +} + +/* + * Expected to be called with struct qla_hw_data->hardware_lock held + */ +static struct qla_tgt_sess *tcm_qla2xxx_find_sess_by_loop_id( + scsi_qla_host_t *vha, + const uint16_t loop_id) +{ + struct qla_hw_data *ha = vha->hw; + struct tcm_qla2xxx_lport *lport; + struct se_node_acl *se_nacl; + struct tcm_qla2xxx_nacl *nacl; + struct tcm_qla2xxx_fc_loopid *fc_loopid; + + lport = ha->tgt.target_lport_ptr; + if (!lport) { + pr_err("Unable to locate struct tcm_qla2xxx_lport\n"); + dump_stack(); + return NULL; + } + + pr_debug("find_sess_by_loop_id: Using loop_id: 0x%04x\n", loop_id); + + fc_loopid = lport->lport_loopid_map + loop_id; + se_nacl = fc_loopid->se_nacl; + if (!se_nacl) { + pr_debug("Unable to locate se_nacl by loop_id: 0x%04x\n", + loop_id); + return NULL; + } + + nacl = container_of(se_nacl, struct tcm_qla2xxx_nacl, se_node_acl); + + if (!nacl->qla_tgt_sess) { + pr_err("Unable to locate struct qla_tgt_sess\n"); + return NULL; + } + + return nacl->qla_tgt_sess; +} + +/* + * Expected to be called with struct qla_hw_data->hardware_lock held + */ +static void tcm_qla2xxx_set_sess_by_loop_id( + struct tcm_qla2xxx_lport *lport, + struct se_node_acl *new_se_nacl, + struct tcm_qla2xxx_nacl *nacl, + struct se_session *se_sess, + struct qla_tgt_sess *qla_tgt_sess, + uint16_t loop_id) +{ + struct se_node_acl *saved_nacl; + struct tcm_qla2xxx_fc_loopid *fc_loopid; + + pr_debug("set_sess_by_loop_id: Using loop_id: 0x%04x\n", loop_id); + + fc_loopid = &((struct tcm_qla2xxx_fc_loopid *) + lport->lport_loopid_map)[loop_id]; + + saved_nacl = fc_loopid->se_nacl; + if (!saved_nacl) { + pr_debug("Setting up new fc_loopid->se_nacl to new_se_nacl\n"); + fc_loopid->se_nacl = new_se_nacl; + if (qla_tgt_sess->se_sess != se_sess) + qla_tgt_sess->se_sess = se_sess; + if (nacl->qla_tgt_sess != qla_tgt_sess) + nacl->qla_tgt_sess = qla_tgt_sess; + return; + } + + if (nacl->qla_tgt_sess) { + if (new_se_nacl == NULL) { + pr_debug("Clearing nacl->qla_tgt_sess and fc_loopid->se_nacl\n"); + fc_loopid->se_nacl = NULL; + nacl->qla_tgt_sess = NULL; + return; + } + + pr_debug("Replacing existing nacl->qla_tgt_sess and fc_loopid->se_nacl\n"); + fc_loopid->se_nacl = new_se_nacl; + if (qla_tgt_sess->se_sess != se_sess) + qla_tgt_sess->se_sess = se_sess; + if (nacl->qla_tgt_sess != qla_tgt_sess) + nacl->qla_tgt_sess = qla_tgt_sess; + return; + } + + if (new_se_nacl == NULL) { + pr_debug("Clearing fc_loopid->se_nacl\n"); + fc_loopid->se_nacl = NULL; + return; + } + + pr_debug("Replacing existing fc_loopid->se_nacl w/o active nacl->qla_tgt_sess\n"); + fc_loopid->se_nacl = new_se_nacl; + if (qla_tgt_sess->se_sess != se_sess) + qla_tgt_sess->se_sess = se_sess; + if (nacl->qla_tgt_sess != qla_tgt_sess) + nacl->qla_tgt_sess = qla_tgt_sess; + + pr_debug("Setup nacl->qla_tgt_sess %p by loop_id for se_nacl: %p, initiatorname: %s\n", + nacl->qla_tgt_sess, new_se_nacl, new_se_nacl->initiatorname); +} + +static void tcm_qla2xxx_free_session(struct qla_tgt_sess *sess) +{ + struct qla_tgt *tgt = sess->tgt; + struct qla_hw_data *ha = tgt->ha; + struct se_session *se_sess; + struct se_node_acl *se_nacl; + struct tcm_qla2xxx_lport *lport; + struct tcm_qla2xxx_nacl *nacl; + unsigned char be_sid[3]; + unsigned long flags; + + BUG_ON(in_interrupt()); + + se_sess = sess->se_sess; + if (!se_sess) { + pr_err("struct qla_tgt_sess->se_sess is NULL\n"); + dump_stack(); + return; + } + se_nacl = se_sess->se_node_acl; + nacl = container_of(se_nacl, struct tcm_qla2xxx_nacl, se_node_acl); + + lport = ha->tgt.target_lport_ptr; + if (!lport) { + pr_err("Unable to locate struct tcm_qla2xxx_lport\n"); + dump_stack(); + return; + } + target_wait_for_sess_cmds(se_sess, 0); + /* + * And now clear the se_nacl and session pointers from our HW lport + * mappings for fabric S_ID and LOOP_ID. + */ + memset(&be_sid, 0, 3); + be_sid[0] = sess->s_id.b.domain; + be_sid[1] = sess->s_id.b.area; + be_sid[2] = sess->s_id.b.al_pa; + + spin_lock_irqsave(&ha->hardware_lock, flags); + tcm_qla2xxx_set_sess_by_s_id(lport, NULL, nacl, se_sess, + sess, be_sid); + tcm_qla2xxx_set_sess_by_loop_id(lport, NULL, nacl, se_sess, + sess, sess->loop_id); + spin_unlock_irqrestore(&ha->hardware_lock, flags); + + transport_deregister_session_configfs(sess->se_sess); + transport_deregister_session(sess->se_sess); +} + +/* + * Called via qlt_create_sess():ha->qla2x_tmpl->check_initiator_node_acl() + * to locate struct se_node_acl + */ +static int tcm_qla2xxx_check_initiator_node_acl( + scsi_qla_host_t *vha, + unsigned char *fc_wwpn, + void *qla_tgt_sess, + uint8_t *s_id, + uint16_t loop_id) +{ + struct qla_hw_data *ha = vha->hw; + struct tcm_qla2xxx_lport *lport; + struct tcm_qla2xxx_tpg *tpg; + struct tcm_qla2xxx_nacl *nacl; + struct se_portal_group *se_tpg; + struct se_node_acl *se_nacl; + struct se_session *se_sess; + struct qla_tgt_sess *sess = qla_tgt_sess; + unsigned char port_name[36]; + unsigned long flags; + + lport = ha->tgt.target_lport_ptr; + if (!lport) { + pr_err("Unable to locate struct tcm_qla2xxx_lport\n"); + dump_stack(); + return -EINVAL; + } + /* + * Locate the TPG=1 reference.. + */ + tpg = lport->tpg_1; + if (!tpg) { + pr_err("Unable to lcoate struct tcm_qla2xxx_lport->tpg_1\n"); + return -EINVAL; + } + se_tpg = &tpg->se_tpg; + + se_sess = transport_init_session(); + if (IS_ERR(se_sess)) { + pr_err("Unable to initialize struct se_session\n"); + return PTR_ERR(se_sess); + } + /* + * Format the FCP Initiator port_name into colon seperated values to + * match the format by tcm_qla2xxx explict ConfigFS NodeACLs. + */ + memset(&port_name, 0, 36); + snprintf(port_name, 36, "%02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x", + fc_wwpn[0], fc_wwpn[1], fc_wwpn[2], fc_wwpn[3], fc_wwpn[4], + fc_wwpn[5], fc_wwpn[6], fc_wwpn[7]); + /* + * Locate our struct se_node_acl either from an explict NodeACL created + * via ConfigFS, or via running in TPG demo mode. + */ + se_sess->se_node_acl = core_tpg_check_initiator_node_acl(se_tpg, + port_name); + if (!se_sess->se_node_acl) { + transport_free_session(se_sess); + return -EINVAL; + } + se_nacl = se_sess->se_node_acl; + nacl = container_of(se_nacl, struct tcm_qla2xxx_nacl, se_node_acl); + /* + * And now setup the new se_nacl and session pointers into our HW lport + * mappings for fabric S_ID and LOOP_ID. + */ + spin_lock_irqsave(&ha->hardware_lock, flags); + tcm_qla2xxx_set_sess_by_s_id(lport, se_nacl, nacl, se_sess, + qla_tgt_sess, s_id); + tcm_qla2xxx_set_sess_by_loop_id(lport, se_nacl, nacl, se_sess, + qla_tgt_sess, loop_id); + spin_unlock_irqrestore(&ha->hardware_lock, flags); + /* + * Finally register the new FC Nexus with TCM + */ + __transport_register_session(se_nacl->se_tpg, se_nacl, se_sess, sess); + + return 0; +} + +/* + * Calls into tcm_qla2xxx used by qla2xxx LLD I/O path. + */ +static struct qla_tgt_func_tmpl tcm_qla2xxx_template = { + .handle_cmd = tcm_qla2xxx_handle_cmd, + .handle_data = tcm_qla2xxx_handle_data, + .handle_tmr = tcm_qla2xxx_handle_tmr, + .free_cmd = tcm_qla2xxx_free_cmd, + .free_mcmd = tcm_qla2xxx_free_mcmd, + .free_session = tcm_qla2xxx_free_session, + .check_initiator_node_acl = tcm_qla2xxx_check_initiator_node_acl, + .find_sess_by_s_id = tcm_qla2xxx_find_sess_by_s_id, + .find_sess_by_loop_id = tcm_qla2xxx_find_sess_by_loop_id, + .clear_nacl_from_fcport_map = tcm_qla2xxx_clear_nacl_from_fcport_map, + .put_sess = tcm_qla2xxx_put_sess, + .shutdown_sess = tcm_qla2xxx_shutdown_sess, +}; + +static int tcm_qla2xxx_init_lport(struct tcm_qla2xxx_lport *lport) +{ + int rc; + + rc = btree_init32(&lport->lport_fcport_map); + if (rc) { + pr_err("Unable to initialize lport->lport_fcport_map btree\n"); + return rc; + } + + lport->lport_loopid_map = vmalloc(sizeof(struct tcm_qla2xxx_fc_loopid) * + 65536); + if (!lport->lport_loopid_map) { + pr_err("Unable to allocate lport->lport_loopid_map of %zu bytes\n", + sizeof(struct tcm_qla2xxx_fc_loopid) * 65536); + btree_destroy32(&lport->lport_fcport_map); + return -ENOMEM; + } + memset(lport->lport_loopid_map, 0, sizeof(struct tcm_qla2xxx_fc_loopid) + * 65536); + pr_debug("qla2xxx: Allocated lport_loopid_map of %zu bytes\n", + sizeof(struct tcm_qla2xxx_fc_loopid) * 65536); + return 0; +} + +static int tcm_qla2xxx_lport_register_cb(struct scsi_qla_host *vha) +{ + struct qla_hw_data *ha = vha->hw; + struct tcm_qla2xxx_lport *lport; + /* + * Setup local pointer to vha, NPIV VP pointer (if present) and + * vha->tcm_lport pointer + */ + lport = (struct tcm_qla2xxx_lport *)ha->tgt.target_lport_ptr; + lport->qla_vha = vha; + + return 0; +} + +static struct se_wwn *tcm_qla2xxx_make_lport( + struct target_fabric_configfs *tf, + struct config_group *group, + const char *name) +{ + struct tcm_qla2xxx_lport *lport; + u64 wwpn; + int ret = -ENODEV; + + if (tcm_qla2xxx_parse_wwn(name, &wwpn, 1) < 0) + return ERR_PTR(-EINVAL); + + lport = kzalloc(sizeof(struct tcm_qla2xxx_lport), GFP_KERNEL); + if (!lport) { + pr_err("Unable to allocate struct tcm_qla2xxx_lport\n"); + return ERR_PTR(-ENOMEM); + } + lport->lport_wwpn = wwpn; + tcm_qla2xxx_format_wwn(&lport->lport_name[0], TCM_QLA2XXX_NAMELEN, + wwpn); + + ret = tcm_qla2xxx_init_lport(lport); + if (ret != 0) + goto out; + + ret = qlt_lport_register(&tcm_qla2xxx_template, wwpn, + tcm_qla2xxx_lport_register_cb, lport); + if (ret != 0) + goto out_lport; + + return &lport->lport_wwn; +out_lport: + vfree(lport->lport_loopid_map); + btree_destroy32(&lport->lport_fcport_map); +out: + kfree(lport); + return ERR_PTR(ret); +} + +static void tcm_qla2xxx_drop_lport(struct se_wwn *wwn) +{ + struct tcm_qla2xxx_lport *lport = container_of(wwn, + struct tcm_qla2xxx_lport, lport_wwn); + struct scsi_qla_host *vha = lport->qla_vha; + struct qla_hw_data *ha = vha->hw; + struct se_node_acl *node; + u32 key = 0; + + /* + * Call into qla2x_target.c LLD logic to complete the + * shutdown of struct qla_tgt after the call to + * qlt_stop_phase1() from tcm_qla2xxx_drop_tpg() above.. + */ + if (ha->tgt.qla_tgt && !ha->tgt.qla_tgt->tgt_stopped) + qlt_stop_phase2(ha->tgt.qla_tgt); + + qlt_lport_deregister(vha); + + vfree(lport->lport_loopid_map); + btree_for_each_safe32(&lport->lport_fcport_map, key, node) + btree_remove32(&lport->lport_fcport_map, key); + btree_destroy32(&lport->lport_fcport_map); + kfree(lport); +} + +static struct se_wwn *tcm_qla2xxx_npiv_make_lport( + struct target_fabric_configfs *tf, + struct config_group *group, + const char *name) +{ + struct tcm_qla2xxx_lport *lport; + u64 npiv_wwpn, npiv_wwnn; + int ret; + + if (tcm_qla2xxx_npiv_parse_wwn(name, strlen(name)+1, + &npiv_wwpn, &npiv_wwnn) < 0) + return ERR_PTR(-EINVAL); + + lport = kzalloc(sizeof(struct tcm_qla2xxx_lport), GFP_KERNEL); + if (!lport) { + pr_err("Unable to allocate struct tcm_qla2xxx_lport for NPIV\n"); + return ERR_PTR(-ENOMEM); + } + lport->lport_npiv_wwpn = npiv_wwpn; + lport->lport_npiv_wwnn = npiv_wwnn; + tcm_qla2xxx_npiv_format_wwn(&lport->lport_npiv_name[0], + TCM_QLA2XXX_NAMELEN, npiv_wwpn, npiv_wwnn); + +/* FIXME: tcm_qla2xxx_npiv_make_lport */ + ret = -ENOSYS; + if (ret != 0) + goto out; + + return &lport->lport_wwn; +out: + kfree(lport); + return ERR_PTR(ret); +} + +static void tcm_qla2xxx_npiv_drop_lport(struct se_wwn *wwn) +{ + struct tcm_qla2xxx_lport *lport = container_of(wwn, + struct tcm_qla2xxx_lport, lport_wwn); + struct scsi_qla_host *vha = lport->qla_vha; + struct Scsi_Host *sh = vha->host; + /* + * Notify libfc that we want to release the lport->npiv_vport + */ + fc_vport_terminate(lport->npiv_vport); + + scsi_host_put(sh); + kfree(lport); +} + + +static ssize_t tcm_qla2xxx_wwn_show_attr_version( + struct target_fabric_configfs *tf, + char *page) +{ + return sprintf(page, + "TCM QLOGIC QLA2XXX NPIV capable fabric module %s on %s/%s on " + UTS_RELEASE"\n", TCM_QLA2XXX_VERSION, utsname()->sysname, + utsname()->machine); +} + +TF_WWN_ATTR_RO(tcm_qla2xxx, version); + +static struct configfs_attribute *tcm_qla2xxx_wwn_attrs[] = { + &tcm_qla2xxx_wwn_version.attr, + NULL, +}; + +static struct target_core_fabric_ops tcm_qla2xxx_ops = { + .get_fabric_name = tcm_qla2xxx_get_fabric_name, + .get_fabric_proto_ident = tcm_qla2xxx_get_fabric_proto_ident, + .tpg_get_wwn = tcm_qla2xxx_get_fabric_wwn, + .tpg_get_tag = tcm_qla2xxx_get_tag, + .tpg_get_default_depth = tcm_qla2xxx_get_default_depth, + .tpg_get_pr_transport_id = tcm_qla2xxx_get_pr_transport_id, + .tpg_get_pr_transport_id_len = tcm_qla2xxx_get_pr_transport_id_len, + .tpg_parse_pr_out_transport_id = tcm_qla2xxx_parse_pr_out_transport_id, + .tpg_check_demo_mode = tcm_qla2xxx_check_demo_mode, + .tpg_check_demo_mode_cache = tcm_qla2xxx_check_demo_mode_cache, + .tpg_check_demo_mode_write_protect = + tcm_qla2xxx_check_demo_write_protect, + .tpg_check_prod_mode_write_protect = + tcm_qla2xxx_check_prod_write_protect, + .tpg_check_demo_mode_login_only = tcm_qla2xxx_check_true, + .tpg_alloc_fabric_acl = tcm_qla2xxx_alloc_fabric_acl, + .tpg_release_fabric_acl = tcm_qla2xxx_release_fabric_acl, + .tpg_get_inst_index = tcm_qla2xxx_tpg_get_inst_index, + .new_cmd_map = NULL, + .check_stop_free = tcm_qla2xxx_check_stop_free, + .release_cmd = tcm_qla2xxx_release_cmd, + .shutdown_session = tcm_qla2xxx_shutdown_session, + .close_session = tcm_qla2xxx_close_session, + .sess_get_index = tcm_qla2xxx_sess_get_index, + .sess_get_initiator_sid = NULL, + .write_pending = tcm_qla2xxx_write_pending, + .write_pending_status = tcm_qla2xxx_write_pending_status, + .set_default_node_attributes = tcm_qla2xxx_set_default_node_attrs, + .get_task_tag = tcm_qla2xxx_get_task_tag, + .get_cmd_state = tcm_qla2xxx_get_cmd_state, + .queue_data_in = tcm_qla2xxx_queue_data_in, + .queue_status = tcm_qla2xxx_queue_status, + .queue_tm_rsp = tcm_qla2xxx_queue_tm_rsp, + .get_fabric_sense_len = tcm_qla2xxx_get_fabric_sense_len, + .set_fabric_sense_len = tcm_qla2xxx_set_fabric_sense_len, + /* + * Setup function pointers for generic logic in + * target_core_fabric_configfs.c + */ + .fabric_make_wwn = tcm_qla2xxx_make_lport, + .fabric_drop_wwn = tcm_qla2xxx_drop_lport, + .fabric_make_tpg = tcm_qla2xxx_make_tpg, + .fabric_drop_tpg = tcm_qla2xxx_drop_tpg, + .fabric_post_link = NULL, + .fabric_pre_unlink = NULL, + .fabric_make_np = NULL, + .fabric_drop_np = NULL, + .fabric_make_nodeacl = tcm_qla2xxx_make_nodeacl, + .fabric_drop_nodeacl = tcm_qla2xxx_drop_nodeacl, +}; + +static struct target_core_fabric_ops tcm_qla2xxx_npiv_ops = { + .get_fabric_name = tcm_qla2xxx_npiv_get_fabric_name, + .get_fabric_proto_ident = tcm_qla2xxx_get_fabric_proto_ident, + .tpg_get_wwn = tcm_qla2xxx_npiv_get_fabric_wwn, + .tpg_get_tag = tcm_qla2xxx_get_tag, + .tpg_get_default_depth = tcm_qla2xxx_get_default_depth, + .tpg_get_pr_transport_id = tcm_qla2xxx_get_pr_transport_id, + .tpg_get_pr_transport_id_len = tcm_qla2xxx_get_pr_transport_id_len, + .tpg_parse_pr_out_transport_id = tcm_qla2xxx_parse_pr_out_transport_id, + .tpg_check_demo_mode = tcm_qla2xxx_check_false, + .tpg_check_demo_mode_cache = tcm_qla2xxx_check_true, + .tpg_check_demo_mode_write_protect = tcm_qla2xxx_check_true, + .tpg_check_prod_mode_write_protect = tcm_qla2xxx_check_false, + .tpg_check_demo_mode_login_only = tcm_qla2xxx_check_true, + .tpg_alloc_fabric_acl = tcm_qla2xxx_alloc_fabric_acl, + .tpg_release_fabric_acl = tcm_qla2xxx_release_fabric_acl, + .tpg_get_inst_index = tcm_qla2xxx_tpg_get_inst_index, + .release_cmd = tcm_qla2xxx_release_cmd, + .shutdown_session = tcm_qla2xxx_shutdown_session, + .close_session = tcm_qla2xxx_close_session, + .sess_get_index = tcm_qla2xxx_sess_get_index, + .sess_get_initiator_sid = NULL, + .write_pending = tcm_qla2xxx_write_pending, + .write_pending_status = tcm_qla2xxx_write_pending_status, + .set_default_node_attributes = tcm_qla2xxx_set_default_node_attrs, + .get_task_tag = tcm_qla2xxx_get_task_tag, + .get_cmd_state = tcm_qla2xxx_get_cmd_state, + .queue_data_in = tcm_qla2xxx_queue_data_in, + .queue_status = tcm_qla2xxx_queue_status, + .queue_tm_rsp = tcm_qla2xxx_queue_tm_rsp, + .get_fabric_sense_len = tcm_qla2xxx_get_fabric_sense_len, + .set_fabric_sense_len = tcm_qla2xxx_set_fabric_sense_len, + /* + * Setup function pointers for generic logic in + * target_core_fabric_configfs.c + */ + .fabric_make_wwn = tcm_qla2xxx_npiv_make_lport, + .fabric_drop_wwn = tcm_qla2xxx_npiv_drop_lport, + .fabric_make_tpg = tcm_qla2xxx_npiv_make_tpg, + .fabric_drop_tpg = tcm_qla2xxx_drop_tpg, + .fabric_post_link = NULL, + .fabric_pre_unlink = NULL, + .fabric_make_np = NULL, + .fabric_drop_np = NULL, + .fabric_make_nodeacl = tcm_qla2xxx_make_nodeacl, + .fabric_drop_nodeacl = tcm_qla2xxx_drop_nodeacl, +}; + +static int tcm_qla2xxx_register_configfs(void) +{ + struct target_fabric_configfs *fabric, *npiv_fabric; + int ret; + + pr_debug("TCM QLOGIC QLA2XXX fabric module %s on %s/%s on " + UTS_RELEASE"\n", TCM_QLA2XXX_VERSION, utsname()->sysname, + utsname()->machine); + /* + * Register the top level struct config_item_type with TCM core + */ + fabric = target_fabric_configfs_init(THIS_MODULE, "qla2xxx"); + if (IS_ERR(fabric)) { + pr_err("target_fabric_configfs_init() failed\n"); + return PTR_ERR(fabric); + } + /* + * Setup fabric->tf_ops from our local tcm_qla2xxx_ops + */ + fabric->tf_ops = tcm_qla2xxx_ops; + /* + * Setup default attribute lists for various fabric->tf_cit_tmpl + */ + TF_CIT_TMPL(fabric)->tfc_wwn_cit.ct_attrs = tcm_qla2xxx_wwn_attrs; + TF_CIT_TMPL(fabric)->tfc_tpg_base_cit.ct_attrs = tcm_qla2xxx_tpg_attrs; + TF_CIT_TMPL(fabric)->tfc_tpg_attrib_cit.ct_attrs = + tcm_qla2xxx_tpg_attrib_attrs; + TF_CIT_TMPL(fabric)->tfc_tpg_param_cit.ct_attrs = NULL; + TF_CIT_TMPL(fabric)->tfc_tpg_np_base_cit.ct_attrs = NULL; + TF_CIT_TMPL(fabric)->tfc_tpg_nacl_base_cit.ct_attrs = NULL; + TF_CIT_TMPL(fabric)->tfc_tpg_nacl_attrib_cit.ct_attrs = NULL; + TF_CIT_TMPL(fabric)->tfc_tpg_nacl_auth_cit.ct_attrs = NULL; + TF_CIT_TMPL(fabric)->tfc_tpg_nacl_param_cit.ct_attrs = NULL; + /* + * Register the fabric for use within TCM + */ + ret = target_fabric_configfs_register(fabric); + if (ret < 0) { + pr_err("target_fabric_configfs_register() failed for TCM_QLA2XXX\n"); + return ret; + } + /* + * Setup our local pointer to *fabric + */ + tcm_qla2xxx_fabric_configfs = fabric; + pr_debug("TCM_QLA2XXX[0] - Set fabric -> tcm_qla2xxx_fabric_configfs\n"); + + /* + * Register the top level struct config_item_type for NPIV with TCM core + */ + npiv_fabric = target_fabric_configfs_init(THIS_MODULE, "qla2xxx_npiv"); + if (IS_ERR(npiv_fabric)) { + pr_err("target_fabric_configfs_init() failed\n"); + ret = PTR_ERR(npiv_fabric); + goto out_fabric; + } + /* + * Setup fabric->tf_ops from our local tcm_qla2xxx_npiv_ops + */ + npiv_fabric->tf_ops = tcm_qla2xxx_npiv_ops; + /* + * Setup default attribute lists for various npiv_fabric->tf_cit_tmpl + */ + TF_CIT_TMPL(npiv_fabric)->tfc_wwn_cit.ct_attrs = tcm_qla2xxx_wwn_attrs; + TF_CIT_TMPL(npiv_fabric)->tfc_tpg_base_cit.ct_attrs = NULL; + TF_CIT_TMPL(npiv_fabric)->tfc_tpg_attrib_cit.ct_attrs = NULL; + TF_CIT_TMPL(npiv_fabric)->tfc_tpg_param_cit.ct_attrs = NULL; + TF_CIT_TMPL(npiv_fabric)->tfc_tpg_np_base_cit.ct_attrs = NULL; + TF_CIT_TMPL(npiv_fabric)->tfc_tpg_nacl_base_cit.ct_attrs = NULL; + TF_CIT_TMPL(npiv_fabric)->tfc_tpg_nacl_attrib_cit.ct_attrs = NULL; + TF_CIT_TMPL(npiv_fabric)->tfc_tpg_nacl_auth_cit.ct_attrs = NULL; + TF_CIT_TMPL(npiv_fabric)->tfc_tpg_nacl_param_cit.ct_attrs = NULL; + /* + * Register the npiv_fabric for use within TCM + */ + ret = target_fabric_configfs_register(npiv_fabric); + if (ret < 0) { + pr_err("target_fabric_configfs_register() failed for TCM_QLA2XXX\n"); + goto out_fabric; + } + /* + * Setup our local pointer to *npiv_fabric + */ + tcm_qla2xxx_npiv_fabric_configfs = npiv_fabric; + pr_debug("TCM_QLA2XXX[0] - Set fabric -> tcm_qla2xxx_npiv_fabric_configfs\n"); + + tcm_qla2xxx_free_wq = alloc_workqueue("tcm_qla2xxx_free", + WQ_MEM_RECLAIM, 0); + if (!tcm_qla2xxx_free_wq) { + ret = -ENOMEM; + goto out_fabric_npiv; + } + + tcm_qla2xxx_cmd_wq = alloc_workqueue("tcm_qla2xxx_cmd", 0, 0); + if (!tcm_qla2xxx_cmd_wq) { + ret = -ENOMEM; + goto out_free_wq; + } + + return 0; + +out_free_wq: + destroy_workqueue(tcm_qla2xxx_free_wq); +out_fabric_npiv: + target_fabric_configfs_deregister(tcm_qla2xxx_npiv_fabric_configfs); +out_fabric: + target_fabric_configfs_deregister(tcm_qla2xxx_fabric_configfs); + return ret; +} + +static void tcm_qla2xxx_deregister_configfs(void) +{ + destroy_workqueue(tcm_qla2xxx_cmd_wq); + destroy_workqueue(tcm_qla2xxx_free_wq); + + target_fabric_configfs_deregister(tcm_qla2xxx_fabric_configfs); + tcm_qla2xxx_fabric_configfs = NULL; + pr_debug("TCM_QLA2XXX[0] - Cleared tcm_qla2xxx_fabric_configfs\n"); + + target_fabric_configfs_deregister(tcm_qla2xxx_npiv_fabric_configfs); + tcm_qla2xxx_npiv_fabric_configfs = NULL; + pr_debug("TCM_QLA2XXX[0] - Cleared tcm_qla2xxx_npiv_fabric_configfs\n"); +} + +static int __init tcm_qla2xxx_init(void) +{ + int ret; + + ret = tcm_qla2xxx_register_configfs(); + if (ret < 0) + return ret; + + return 0; +} + +static void __exit tcm_qla2xxx_exit(void) +{ + tcm_qla2xxx_deregister_configfs(); +} + +MODULE_DESCRIPTION("TCM QLA2XXX series NPIV enabled fabric driver"); +MODULE_LICENSE("GPL"); +module_init(tcm_qla2xxx_init); +module_exit(tcm_qla2xxx_exit); diff --git a/drivers/scsi/qla2xxx/tcm_qla2xxx.h b/drivers/scsi/qla2xxx/tcm_qla2xxx.h new file mode 100644 index 000000000000..825498103352 --- /dev/null +++ b/drivers/scsi/qla2xxx/tcm_qla2xxx.h @@ -0,0 +1,82 @@ +#include <target/target_core_base.h> +#include <linux/btree.h> + +#define TCM_QLA2XXX_VERSION "v0.1" +/* length of ASCII WWPNs including pad */ +#define TCM_QLA2XXX_NAMELEN 32 +/* lenth of ASCII NPIV 'WWPN+WWNN' including pad */ +#define TCM_QLA2XXX_NPIV_NAMELEN 66 + +#include "qla_target.h" + +struct tcm_qla2xxx_nacl { + /* From libfc struct fc_rport->port_id */ + u32 nport_id; + /* Binary World Wide unique Node Name for remote FC Initiator Nport */ + u64 nport_wwnn; + /* ASCII formatted WWPN for FC Initiator Nport */ + char nport_name[TCM_QLA2XXX_NAMELEN]; + /* Pointer to qla_tgt_sess */ + struct qla_tgt_sess *qla_tgt_sess; + /* Pointer to TCM FC nexus */ + struct se_session *nport_nexus; + /* Returned by tcm_qla2xxx_make_nodeacl() */ + struct se_node_acl se_node_acl; +}; + +struct tcm_qla2xxx_tpg_attrib { + int generate_node_acls; + int cache_dynamic_acls; + int demo_mode_write_protect; + int prod_mode_write_protect; +}; + +struct tcm_qla2xxx_tpg { + /* FC lport target portal group tag for TCM */ + u16 lport_tpgt; + /* Atomic bit to determine TPG active status */ + atomic_t lport_tpg_enabled; + /* Pointer back to tcm_qla2xxx_lport */ + struct tcm_qla2xxx_lport *lport; + /* Used by tcm_qla2xxx_tpg_attrib_cit */ + struct tcm_qla2xxx_tpg_attrib tpg_attrib; + /* Returned by tcm_qla2xxx_make_tpg() */ + struct se_portal_group se_tpg; +}; + +#define QLA_TPG_ATTRIB(tpg) (&(tpg)->tpg_attrib) + +struct tcm_qla2xxx_fc_loopid { + struct se_node_acl *se_nacl; +}; + +struct tcm_qla2xxx_lport { + /* SCSI protocol the lport is providing */ + u8 lport_proto_id; + /* Binary World Wide unique Port Name for FC Target Lport */ + u64 lport_wwpn; + /* Binary World Wide unique Port Name for FC NPIV Target Lport */ + u64 lport_npiv_wwpn; + /* Binary World Wide unique Node Name for FC NPIV Target Lport */ + u64 lport_npiv_wwnn; + /* ASCII formatted WWPN for FC Target Lport */ + char lport_name[TCM_QLA2XXX_NAMELEN]; + /* ASCII formatted WWPN+WWNN for NPIV FC Target Lport */ + char lport_npiv_name[TCM_QLA2XXX_NPIV_NAMELEN]; + /* map for fc_port pointers in 24-bit FC Port ID space */ + struct btree_head32 lport_fcport_map; + /* vmalloc-ed memory for fc_port pointers for 16-bit FC loop ID */ + struct tcm_qla2xxx_fc_loopid *lport_loopid_map; + /* Pointer to struct scsi_qla_host from qla2xxx LLD */ + struct scsi_qla_host *qla_vha; + /* Pointer to struct scsi_qla_host for NPIV VP from qla2xxx LLD */ + struct scsi_qla_host *qla_npiv_vp; + /* Pointer to struct qla_tgt pointer */ + struct qla_tgt lport_qla_tgt; + /* Pointer to struct fc_vport for NPIV vport from libfc */ + struct fc_vport *npiv_vport; + /* Pointer to TPG=1 for non NPIV mode */ + struct tcm_qla2xxx_tpg *tpg_1; + /* Returned by tcm_qla2xxx_make_lport() */ + struct se_wwn lport_wwn; +}; diff --git a/drivers/scsi/qla4xxx/ql4_attr.c b/drivers/scsi/qla4xxx/ql4_attr.c index 0b0a7d42137d..c681b2a355e1 100644 --- a/drivers/scsi/qla4xxx/ql4_attr.c +++ b/drivers/scsi/qla4xxx/ql4_attr.c @@ -9,6 +9,140 @@ #include "ql4_glbl.h" #include "ql4_dbg.h" +static ssize_t +qla4_8xxx_sysfs_read_fw_dump(struct file *filep, struct kobject *kobj, + struct bin_attribute *ba, char *buf, loff_t off, + size_t count) +{ + struct scsi_qla_host *ha = to_qla_host(dev_to_shost(container_of(kobj, + struct device, kobj))); + + if (!is_qla8022(ha)) + return -EINVAL; + + if (!test_bit(AF_82XX_DUMP_READING, &ha->flags)) + return 0; + + return memory_read_from_buffer(buf, count, &off, ha->fw_dump, + ha->fw_dump_size); +} + +static ssize_t +qla4_8xxx_sysfs_write_fw_dump(struct file *filep, struct kobject *kobj, + struct bin_attribute *ba, char *buf, loff_t off, + size_t count) +{ + struct scsi_qla_host *ha = to_qla_host(dev_to_shost(container_of(kobj, + struct device, kobj))); + uint32_t dev_state; + long reading; + int ret = 0; + + if (!is_qla8022(ha)) + return -EINVAL; + + if (off != 0) + return ret; + + buf[1] = 0; + ret = kstrtol(buf, 10, &reading); + if (ret) { + ql4_printk(KERN_ERR, ha, "%s: Invalid input. Return err %d\n", + __func__, ret); + return ret; + } + + switch (reading) { + case 0: + /* clear dump collection flags */ + if (test_and_clear_bit(AF_82XX_DUMP_READING, &ha->flags)) { + clear_bit(AF_82XX_FW_DUMPED, &ha->flags); + /* Reload minidump template */ + qla4xxx_alloc_fw_dump(ha); + DEBUG2(ql4_printk(KERN_INFO, ha, + "Firmware template reloaded\n")); + } + break; + case 1: + /* Set flag to read dump */ + if (test_bit(AF_82XX_FW_DUMPED, &ha->flags) && + !test_bit(AF_82XX_DUMP_READING, &ha->flags)) { + set_bit(AF_82XX_DUMP_READING, &ha->flags); + DEBUG2(ql4_printk(KERN_INFO, ha, + "Raw firmware dump ready for read on (%ld).\n", + ha->host_no)); + } + break; + case 2: + /* Reset HBA */ + qla4_8xxx_idc_lock(ha); + dev_state = qla4_8xxx_rd_32(ha, QLA82XX_CRB_DEV_STATE); + if (dev_state == QLA82XX_DEV_READY) { + ql4_printk(KERN_INFO, ha, + "%s: Setting Need reset, reset_owner is 0x%x.\n", + __func__, ha->func_num); + qla4_8xxx_wr_32(ha, QLA82XX_CRB_DEV_STATE, + QLA82XX_DEV_NEED_RESET); + set_bit(AF_82XX_RST_OWNER, &ha->flags); + } else + ql4_printk(KERN_INFO, ha, + "%s: Reset not performed as device state is 0x%x\n", + __func__, dev_state); + + qla4_8xxx_idc_unlock(ha); + break; + default: + /* do nothing */ + break; + } + + return count; +} + +static struct bin_attribute sysfs_fw_dump_attr = { + .attr = { + .name = "fw_dump", + .mode = S_IRUSR | S_IWUSR, + }, + .size = 0, + .read = qla4_8xxx_sysfs_read_fw_dump, + .write = qla4_8xxx_sysfs_write_fw_dump, +}; + +static struct sysfs_entry { + char *name; + struct bin_attribute *attr; +} bin_file_entries[] = { + { "fw_dump", &sysfs_fw_dump_attr }, + { NULL }, +}; + +void qla4_8xxx_alloc_sysfs_attr(struct scsi_qla_host *ha) +{ + struct Scsi_Host *host = ha->host; + struct sysfs_entry *iter; + int ret; + + for (iter = bin_file_entries; iter->name; iter++) { + ret = sysfs_create_bin_file(&host->shost_gendev.kobj, + iter->attr); + if (ret) + ql4_printk(KERN_ERR, ha, + "Unable to create sysfs %s binary attribute (%d).\n", + iter->name, ret); + } +} + +void qla4_8xxx_free_sysfs_attr(struct scsi_qla_host *ha) +{ + struct Scsi_Host *host = ha->host; + struct sysfs_entry *iter; + + for (iter = bin_file_entries; iter->name; iter++) + sysfs_remove_bin_file(&host->shost_gendev.kobj, + iter->attr); +} + /* Scsi_Host attributes. */ static ssize_t qla4xxx_fw_version_show(struct device *dev, diff --git a/drivers/scsi/qla4xxx/ql4_def.h b/drivers/scsi/qla4xxx/ql4_def.h index 7f2492e88be7..96a5616a8fda 100644 --- a/drivers/scsi/qla4xxx/ql4_def.h +++ b/drivers/scsi/qla4xxx/ql4_def.h @@ -398,6 +398,16 @@ struct isp_operations { int (*get_sys_info) (struct scsi_qla_host *); }; +struct ql4_mdump_size_table { + uint32_t size; + uint32_t size_cmask_02; + uint32_t size_cmask_04; + uint32_t size_cmask_08; + uint32_t size_cmask_10; + uint32_t size_cmask_FF; + uint32_t version; +}; + /*qla4xxx ipaddress configuration details */ struct ipaddress_config { uint16_t ipv4_options; @@ -485,6 +495,10 @@ struct scsi_qla_host { #define AF_EEH_BUSY 20 /* 0x00100000 */ #define AF_PCI_CHANNEL_IO_PERM_FAILURE 21 /* 0x00200000 */ #define AF_BUILD_DDB_LIST 22 /* 0x00400000 */ +#define AF_82XX_FW_DUMPED 24 /* 0x01000000 */ +#define AF_82XX_RST_OWNER 25 /* 0x02000000 */ +#define AF_82XX_DUMP_READING 26 /* 0x04000000 */ + unsigned long dpc_flags; #define DPC_RESET_HA 1 /* 0x00000002 */ @@ -662,6 +676,11 @@ struct scsi_qla_host { uint32_t nx_dev_init_timeout; uint32_t nx_reset_timeout; + void *fw_dump; + uint32_t fw_dump_size; + uint32_t fw_dump_capture_mask; + void *fw_dump_tmplt_hdr; + uint32_t fw_dump_tmplt_size; struct completion mbx_intr_comp; @@ -936,4 +955,7 @@ static inline int ql4xxx_reset_active(struct scsi_qla_host *ha) #define PROCESS_ALL_AENS 0 #define FLUSH_DDB_CHANGED_AENS 1 +/* Defines for udev events */ +#define QL4_UEVENT_CODE_FW_DUMP 0 + #endif /*_QLA4XXX_H */ diff --git a/drivers/scsi/qla4xxx/ql4_fw.h b/drivers/scsi/qla4xxx/ql4_fw.h index 210cd1d64475..7240948fb929 100644 --- a/drivers/scsi/qla4xxx/ql4_fw.h +++ b/drivers/scsi/qla4xxx/ql4_fw.h @@ -385,6 +385,11 @@ struct qla_flt_region { #define MBOX_CMD_GET_IP_ADDR_STATE 0x0091 #define MBOX_CMD_SEND_IPV6_ROUTER_SOL 0x0092 #define MBOX_CMD_GET_DB_ENTRY_CURRENT_IP_ADDR 0x0093 +#define MBOX_CMD_MINIDUMP 0x0129 + +/* Minidump subcommand */ +#define MINIDUMP_GET_SIZE_SUBCOMMAND 0x00 +#define MINIDUMP_GET_TMPLT_SUBCOMMAND 0x01 /* Mailbox 1 */ #define FW_STATE_READY 0x0000 @@ -1190,4 +1195,27 @@ struct ql_iscsi_stats { uint8_t reserved2[264]; /* 0x0308 - 0x040F */ }; +#define QLA82XX_DBG_STATE_ARRAY_LEN 16 +#define QLA82XX_DBG_CAP_SIZE_ARRAY_LEN 8 +#define QLA82XX_DBG_RSVD_ARRAY_LEN 8 + +struct qla4_8xxx_minidump_template_hdr { + uint32_t entry_type; + uint32_t first_entry_offset; + uint32_t size_of_template; + uint32_t capture_debug_level; + uint32_t num_of_entries; + uint32_t version; + uint32_t driver_timestamp; + uint32_t checksum; + + uint32_t driver_capture_mask; + uint32_t driver_info_word2; + uint32_t driver_info_word3; + uint32_t driver_info_word4; + + uint32_t saved_state_array[QLA82XX_DBG_STATE_ARRAY_LEN]; + uint32_t capture_size_array[QLA82XX_DBG_CAP_SIZE_ARRAY_LEN]; +}; + #endif /* _QLA4X_FW_H */ diff --git a/drivers/scsi/qla4xxx/ql4_glbl.h b/drivers/scsi/qla4xxx/ql4_glbl.h index 910536667cf5..20b49d019043 100644 --- a/drivers/scsi/qla4xxx/ql4_glbl.h +++ b/drivers/scsi/qla4xxx/ql4_glbl.h @@ -196,10 +196,18 @@ int qla4xxx_bsg_request(struct bsg_job *bsg_job); int qla4xxx_process_vendor_specific(struct bsg_job *bsg_job); void qla4xxx_arm_relogin_timer(struct ddb_entry *ddb_entry); +int qla4xxx_get_minidump_template(struct scsi_qla_host *ha, + dma_addr_t phys_addr); +int qla4xxx_req_template_size(struct scsi_qla_host *ha); +void qla4_8xxx_alloc_sysfs_attr(struct scsi_qla_host *ha); +void qla4_8xxx_free_sysfs_attr(struct scsi_qla_host *ha); +void qla4xxx_alloc_fw_dump(struct scsi_qla_host *ha); extern int ql4xextended_error_logging; extern int ql4xdontresethba; extern int ql4xenablemsix; +extern int ql4xmdcapmask; +extern int ql4xenablemd; extern struct device_attribute *qla4xxx_host_attrs[]; #endif /* _QLA4x_GBL_H */ diff --git a/drivers/scsi/qla4xxx/ql4_init.c b/drivers/scsi/qla4xxx/ql4_init.c index 90ee5d8fa731..bf36723b84e1 100644 --- a/drivers/scsi/qla4xxx/ql4_init.c +++ b/drivers/scsi/qla4xxx/ql4_init.c @@ -277,6 +277,94 @@ qla4xxx_wait_for_ip_config(struct scsi_qla_host *ha) return ipv4_wait|ipv6_wait; } +/** + * qla4xxx_alloc_fw_dump - Allocate memory for minidump data. + * @ha: pointer to host adapter structure. + **/ +void qla4xxx_alloc_fw_dump(struct scsi_qla_host *ha) +{ + int status; + uint32_t capture_debug_level; + int hdr_entry_bit, k; + void *md_tmp; + dma_addr_t md_tmp_dma; + struct qla4_8xxx_minidump_template_hdr *md_hdr; + + if (ha->fw_dump) { + ql4_printk(KERN_WARNING, ha, + "Firmware dump previously allocated.\n"); + return; + } + + status = qla4xxx_req_template_size(ha); + if (status != QLA_SUCCESS) { + ql4_printk(KERN_INFO, ha, + "scsi%ld: Failed to get template size\n", + ha->host_no); + return; + } + + clear_bit(AF_82XX_FW_DUMPED, &ha->flags); + + /* Allocate memory for saving the template */ + md_tmp = dma_alloc_coherent(&ha->pdev->dev, ha->fw_dump_tmplt_size, + &md_tmp_dma, GFP_KERNEL); + + /* Request template */ + status = qla4xxx_get_minidump_template(ha, md_tmp_dma); + if (status != QLA_SUCCESS) { + ql4_printk(KERN_INFO, ha, + "scsi%ld: Failed to get minidump template\n", + ha->host_no); + goto alloc_cleanup; + } + + md_hdr = (struct qla4_8xxx_minidump_template_hdr *)md_tmp; + + capture_debug_level = md_hdr->capture_debug_level; + + /* Get capture mask based on module loadtime setting. */ + if (ql4xmdcapmask >= 0x3 && ql4xmdcapmask <= 0x7F) + ha->fw_dump_capture_mask = ql4xmdcapmask; + else + ha->fw_dump_capture_mask = capture_debug_level; + + md_hdr->driver_capture_mask = ha->fw_dump_capture_mask; + + DEBUG2(ql4_printk(KERN_INFO, ha, "Minimum num of entries = %d\n", + md_hdr->num_of_entries)); + DEBUG2(ql4_printk(KERN_INFO, ha, "Dump template size = %d\n", + ha->fw_dump_tmplt_size)); + DEBUG2(ql4_printk(KERN_INFO, ha, "Selected Capture mask =0x%x\n", + ha->fw_dump_capture_mask)); + + /* Calculate fw_dump_size */ + for (hdr_entry_bit = 0x2, k = 1; (hdr_entry_bit & 0xFF); + hdr_entry_bit <<= 1, k++) { + if (hdr_entry_bit & ha->fw_dump_capture_mask) + ha->fw_dump_size += md_hdr->capture_size_array[k]; + } + + /* Total firmware dump size including command header */ + ha->fw_dump_size += ha->fw_dump_tmplt_size; + ha->fw_dump = vmalloc(ha->fw_dump_size); + if (!ha->fw_dump) + goto alloc_cleanup; + + DEBUG2(ql4_printk(KERN_INFO, ha, + "Minidump Tempalate Size = 0x%x KB\n", + ha->fw_dump_tmplt_size)); + DEBUG2(ql4_printk(KERN_INFO, ha, + "Total Minidump size = 0x%x KB\n", ha->fw_dump_size)); + + memcpy(ha->fw_dump, md_tmp, ha->fw_dump_tmplt_size); + ha->fw_dump_tmplt_hdr = ha->fw_dump; + +alloc_cleanup: + dma_free_coherent(&ha->pdev->dev, ha->fw_dump_tmplt_size, + md_tmp, md_tmp_dma); +} + static int qla4xxx_fw_ready(struct scsi_qla_host *ha) { uint32_t timeout_count; @@ -445,9 +533,13 @@ static int qla4xxx_init_firmware(struct scsi_qla_host *ha) "control block\n", ha->host_no, __func__)); return status; } + if (!qla4xxx_fw_ready(ha)) return status; + if (is_qla8022(ha) && !test_bit(AF_INIT_DONE, &ha->flags)) + qla4xxx_alloc_fw_dump(ha); + return qla4xxx_get_firmware_status(ha); } @@ -884,8 +976,8 @@ int qla4xxx_ddb_change(struct scsi_qla_host *ha, uint32_t fw_ddb_index, switch (state) { case DDB_DS_SESSION_ACTIVE: case DDB_DS_DISCOVERY: - ddb_entry->unblock_sess(ddb_entry->sess); qla4xxx_update_session_conn_param(ha, ddb_entry); + ddb_entry->unblock_sess(ddb_entry->sess); status = QLA_SUCCESS; break; case DDB_DS_SESSION_FAILED: @@ -897,6 +989,7 @@ int qla4xxx_ddb_change(struct scsi_qla_host *ha, uint32_t fw_ddb_index, } break; case DDB_DS_SESSION_ACTIVE: + case DDB_DS_DISCOVERY: switch (state) { case DDB_DS_SESSION_FAILED: /* diff --git a/drivers/scsi/qla4xxx/ql4_mbx.c b/drivers/scsi/qla4xxx/ql4_mbx.c index 7ac21dabbf22..cab8f665a41f 100644 --- a/drivers/scsi/qla4xxx/ql4_mbx.c +++ b/drivers/scsi/qla4xxx/ql4_mbx.c @@ -51,25 +51,6 @@ int qla4xxx_mailbox_command(struct scsi_qla_host *ha, uint8_t inCount, } } - if (is_qla8022(ha)) { - if (test_bit(AF_FW_RECOVERY, &ha->flags)) { - DEBUG2(ql4_printk(KERN_WARNING, ha, "scsi%ld: %s: " - "prematurely completing mbx cmd as firmware " - "recovery detected\n", ha->host_no, __func__)); - return status; - } - /* Do not send any mbx cmd if h/w is in failed state*/ - qla4_8xxx_idc_lock(ha); - dev_state = qla4_8xxx_rd_32(ha, QLA82XX_CRB_DEV_STATE); - qla4_8xxx_idc_unlock(ha); - if (dev_state == QLA82XX_DEV_FAILED) { - ql4_printk(KERN_WARNING, ha, "scsi%ld: %s: H/W is in " - "failed state, do not send any mailbox commands\n", - ha->host_no, __func__); - return status; - } - } - if ((is_aer_supported(ha)) && (test_bit(AF_PCI_CHANNEL_IO_PERM_FAILURE, &ha->flags))) { DEBUG2(printk(KERN_WARNING "scsi%ld: %s: Perm failure on EEH, " @@ -96,6 +77,25 @@ int qla4xxx_mailbox_command(struct scsi_qla_host *ha, uint8_t inCount, msleep(10); } + if (is_qla8022(ha)) { + if (test_bit(AF_FW_RECOVERY, &ha->flags)) { + DEBUG2(ql4_printk(KERN_WARNING, ha, + "scsi%ld: %s: prematurely completing mbx cmd as firmware recovery detected\n", + ha->host_no, __func__)); + goto mbox_exit; + } + /* Do not send any mbx cmd if h/w is in failed state*/ + qla4_8xxx_idc_lock(ha); + dev_state = qla4_8xxx_rd_32(ha, QLA82XX_CRB_DEV_STATE); + qla4_8xxx_idc_unlock(ha); + if (dev_state == QLA82XX_DEV_FAILED) { + ql4_printk(KERN_WARNING, ha, + "scsi%ld: %s: H/W is in failed state, do not send any mailbox commands\n", + ha->host_no, __func__); + goto mbox_exit; + } + } + spin_lock_irqsave(&ha->hardware_lock, flags); ha->mbox_status_count = outCount; @@ -270,6 +270,79 @@ mbox_exit: return status; } +/** + * qla4xxx_get_minidump_template - Get the firmware template + * @ha: Pointer to host adapter structure. + * @phys_addr: dma address for template + * + * Obtain the minidump template from firmware during initialization + * as it may not be available when minidump is desired. + **/ +int qla4xxx_get_minidump_template(struct scsi_qla_host *ha, + dma_addr_t phys_addr) +{ + uint32_t mbox_cmd[MBOX_REG_COUNT]; + uint32_t mbox_sts[MBOX_REG_COUNT]; + int status; + + memset(&mbox_cmd, 0, sizeof(mbox_cmd)); + memset(&mbox_sts, 0, sizeof(mbox_sts)); + + mbox_cmd[0] = MBOX_CMD_MINIDUMP; + mbox_cmd[1] = MINIDUMP_GET_TMPLT_SUBCOMMAND; + mbox_cmd[2] = LSDW(phys_addr); + mbox_cmd[3] = MSDW(phys_addr); + mbox_cmd[4] = ha->fw_dump_tmplt_size; + mbox_cmd[5] = 0; + + status = qla4xxx_mailbox_command(ha, MBOX_REG_COUNT, 2, &mbox_cmd[0], + &mbox_sts[0]); + if (status != QLA_SUCCESS) { + DEBUG2(ql4_printk(KERN_INFO, ha, + "scsi%ld: %s: Cmd = %08X, mbx[0] = 0x%04x, mbx[1] = 0x%04x\n", + ha->host_no, __func__, mbox_cmd[0], + mbox_sts[0], mbox_sts[1])); + } + return status; +} + +/** + * qla4xxx_req_template_size - Get minidump template size from firmware. + * @ha: Pointer to host adapter structure. + **/ +int qla4xxx_req_template_size(struct scsi_qla_host *ha) +{ + uint32_t mbox_cmd[MBOX_REG_COUNT]; + uint32_t mbox_sts[MBOX_REG_COUNT]; + int status; + + memset(&mbox_cmd, 0, sizeof(mbox_cmd)); + memset(&mbox_sts, 0, sizeof(mbox_sts)); + + mbox_cmd[0] = MBOX_CMD_MINIDUMP; + mbox_cmd[1] = MINIDUMP_GET_SIZE_SUBCOMMAND; + + status = qla4xxx_mailbox_command(ha, MBOX_REG_COUNT, 8, &mbox_cmd[0], + &mbox_sts[0]); + if (status == QLA_SUCCESS) { + ha->fw_dump_tmplt_size = mbox_sts[1]; + DEBUG2(ql4_printk(KERN_INFO, ha, + "%s: sts[0]=0x%04x, template size=0x%04x, size_cm_02=0x%04x, size_cm_04=0x%04x, size_cm_08=0x%04x, size_cm_10=0x%04x, size_cm_FF=0x%04x, version=0x%04x\n", + __func__, mbox_sts[0], mbox_sts[1], + mbox_sts[2], mbox_sts[3], mbox_sts[4], + mbox_sts[5], mbox_sts[6], mbox_sts[7])); + if (ha->fw_dump_tmplt_size == 0) + status = QLA_ERROR; + } else { + ql4_printk(KERN_WARNING, ha, + "%s: Error sts[0]=0x%04x, mbx[1]=0x%04x\n", + __func__, mbox_sts[0], mbox_sts[1]); + status = QLA_ERROR; + } + + return status; +} + void qla4xxx_mailbox_premature_completion(struct scsi_qla_host *ha) { set_bit(AF_FW_RECOVERY, &ha->flags); diff --git a/drivers/scsi/qla4xxx/ql4_nx.c b/drivers/scsi/qla4xxx/ql4_nx.c index e1e46b6dac75..228b67020d2c 100644 --- a/drivers/scsi/qla4xxx/ql4_nx.c +++ b/drivers/scsi/qla4xxx/ql4_nx.c @@ -7,6 +7,7 @@ #include <linux/delay.h> #include <linux/io.h> #include <linux/pci.h> +#include <linux/ratelimit.h> #include "ql4_def.h" #include "ql4_glbl.h" @@ -420,6 +421,38 @@ qla4_8xxx_rd_32(struct scsi_qla_host *ha, ulong off) return data; } +/* Minidump related functions */ +static int qla4_8xxx_md_rw_32(struct scsi_qla_host *ha, uint32_t off, + u32 data, uint8_t flag) +{ + uint32_t win_read, off_value, rval = QLA_SUCCESS; + + off_value = off & 0xFFFF0000; + writel(off_value, (void __iomem *)(CRB_WINDOW_2M + ha->nx_pcibase)); + + /* Read back value to make sure write has gone through before trying + * to use it. + */ + win_read = readl((void __iomem *)(CRB_WINDOW_2M + ha->nx_pcibase)); + if (win_read != off_value) { + DEBUG2(ql4_printk(KERN_INFO, ha, + "%s: Written (0x%x) != Read (0x%x), off=0x%x\n", + __func__, off_value, win_read, off)); + return QLA_ERROR; + } + + off_value = off & 0x0000FFFF; + + if (flag) + writel(data, (void __iomem *)(off_value + CRB_INDIRECT_2M + + ha->nx_pcibase)); + else + rval = readl((void __iomem *)(off_value + CRB_INDIRECT_2M + + ha->nx_pcibase)); + + return rval; +} + #define CRB_WIN_LOCK_TIMEOUT 100000000 int qla4_8xxx_crb_win_lock(struct scsi_qla_host *ha) @@ -1252,9 +1285,9 @@ qla4_8xxx_pci_mem_read_2M(struct scsi_qla_host *ha, } if (j >= MAX_CTL_CHECK) { - if (printk_ratelimit()) - ql4_printk(KERN_ERR, ha, - "failed to read through agent\n"); + printk_ratelimited(KERN_ERR + "%s: failed to read through agent\n", + __func__); break; } @@ -1390,7 +1423,8 @@ qla4_8xxx_pci_mem_write_2M(struct scsi_qla_host *ha, if (j >= MAX_CTL_CHECK) { if (printk_ratelimit()) ql4_printk(KERN_ERR, ha, - "failed to write through agent\n"); + "%s: failed to read through agent\n", + __func__); ret = -1; break; } @@ -1462,6 +1496,8 @@ qla4_8xxx_set_drv_active(struct scsi_qla_host *ha) drv_active = qla4_8xxx_rd_32(ha, QLA82XX_CRB_DRV_ACTIVE); drv_active |= (1 << (ha->func_num * 4)); + ql4_printk(KERN_INFO, ha, "%s(%ld): drv_active: 0x%08x\n", + __func__, ha->host_no, drv_active); qla4_8xxx_wr_32(ha, QLA82XX_CRB_DRV_ACTIVE, drv_active); } @@ -1472,6 +1508,8 @@ qla4_8xxx_clear_drv_active(struct scsi_qla_host *ha) drv_active = qla4_8xxx_rd_32(ha, QLA82XX_CRB_DRV_ACTIVE); drv_active &= ~(1 << (ha->func_num * 4)); + ql4_printk(KERN_INFO, ha, "%s(%ld): drv_active: 0x%08x\n", + __func__, ha->host_no, drv_active); qla4_8xxx_wr_32(ha, QLA82XX_CRB_DRV_ACTIVE, drv_active); } @@ -1497,6 +1535,8 @@ qla4_8xxx_set_rst_ready(struct scsi_qla_host *ha) drv_state = qla4_8xxx_rd_32(ha, QLA82XX_CRB_DRV_STATE); drv_state |= (1 << (ha->func_num * 4)); + ql4_printk(KERN_INFO, ha, "%s(%ld): drv_state: 0x%08x\n", + __func__, ha->host_no, drv_state); qla4_8xxx_wr_32(ha, QLA82XX_CRB_DRV_STATE, drv_state); } @@ -1507,6 +1547,8 @@ qla4_8xxx_clear_rst_ready(struct scsi_qla_host *ha) drv_state = qla4_8xxx_rd_32(ha, QLA82XX_CRB_DRV_STATE); drv_state &= ~(1 << (ha->func_num * 4)); + ql4_printk(KERN_INFO, ha, "%s(%ld): drv_state: 0x%08x\n", + __func__, ha->host_no, drv_state); qla4_8xxx_wr_32(ha, QLA82XX_CRB_DRV_STATE, drv_state); } @@ -1601,6 +1643,629 @@ static void qla4_8xxx_rom_lock_recovery(struct scsi_qla_host *ha) qla4_8xxx_rom_unlock(ha); } +static void qla4_8xxx_minidump_process_rdcrb(struct scsi_qla_host *ha, + struct qla82xx_minidump_entry_hdr *entry_hdr, + uint32_t **d_ptr) +{ + uint32_t r_addr, r_stride, loop_cnt, i, r_value; + struct qla82xx_minidump_entry_crb *crb_hdr; + uint32_t *data_ptr = *d_ptr; + + DEBUG2(ql4_printk(KERN_INFO, ha, "Entering fn: %s\n", __func__)); + crb_hdr = (struct qla82xx_minidump_entry_crb *)entry_hdr; + r_addr = crb_hdr->addr; + r_stride = crb_hdr->crb_strd.addr_stride; + loop_cnt = crb_hdr->op_count; + + for (i = 0; i < loop_cnt; i++) { + r_value = qla4_8xxx_md_rw_32(ha, r_addr, 0, 0); + *data_ptr++ = cpu_to_le32(r_addr); + *data_ptr++ = cpu_to_le32(r_value); + r_addr += r_stride; + } + *d_ptr = data_ptr; +} + +static int qla4_8xxx_minidump_process_l2tag(struct scsi_qla_host *ha, + struct qla82xx_minidump_entry_hdr *entry_hdr, + uint32_t **d_ptr) +{ + uint32_t addr, r_addr, c_addr, t_r_addr; + uint32_t i, k, loop_count, t_value, r_cnt, r_value; + unsigned long p_wait, w_time, p_mask; + uint32_t c_value_w, c_value_r; + struct qla82xx_minidump_entry_cache *cache_hdr; + int rval = QLA_ERROR; + uint32_t *data_ptr = *d_ptr; + + DEBUG2(ql4_printk(KERN_INFO, ha, "Entering fn: %s\n", __func__)); + cache_hdr = (struct qla82xx_minidump_entry_cache *)entry_hdr; + + loop_count = cache_hdr->op_count; + r_addr = cache_hdr->read_addr; + c_addr = cache_hdr->control_addr; + c_value_w = cache_hdr->cache_ctrl.write_value; + + t_r_addr = cache_hdr->tag_reg_addr; + t_value = cache_hdr->addr_ctrl.init_tag_value; + r_cnt = cache_hdr->read_ctrl.read_addr_cnt; + p_wait = cache_hdr->cache_ctrl.poll_wait; + p_mask = cache_hdr->cache_ctrl.poll_mask; + + for (i = 0; i < loop_count; i++) { + qla4_8xxx_md_rw_32(ha, t_r_addr, t_value, 1); + + if (c_value_w) + qla4_8xxx_md_rw_32(ha, c_addr, c_value_w, 1); + + if (p_mask) { + w_time = jiffies + p_wait; + do { + c_value_r = qla4_8xxx_md_rw_32(ha, c_addr, + 0, 0); + if ((c_value_r & p_mask) == 0) { + break; + } else if (time_after_eq(jiffies, w_time)) { + /* capturing dump failed */ + return rval; + } + } while (1); + } + + addr = r_addr; + for (k = 0; k < r_cnt; k++) { + r_value = qla4_8xxx_md_rw_32(ha, addr, 0, 0); + *data_ptr++ = cpu_to_le32(r_value); + addr += cache_hdr->read_ctrl.read_addr_stride; + } + + t_value += cache_hdr->addr_ctrl.tag_value_stride; + } + *d_ptr = data_ptr; + return QLA_SUCCESS; +} + +static int qla4_8xxx_minidump_process_control(struct scsi_qla_host *ha, + struct qla82xx_minidump_entry_hdr *entry_hdr) +{ + struct qla82xx_minidump_entry_crb *crb_entry; + uint32_t read_value, opcode, poll_time, addr, index, rval = QLA_SUCCESS; + uint32_t crb_addr; + unsigned long wtime; + struct qla4_8xxx_minidump_template_hdr *tmplt_hdr; + int i; + + DEBUG2(ql4_printk(KERN_INFO, ha, "Entering fn: %s\n", __func__)); + tmplt_hdr = (struct qla4_8xxx_minidump_template_hdr *) + ha->fw_dump_tmplt_hdr; + crb_entry = (struct qla82xx_minidump_entry_crb *)entry_hdr; + + crb_addr = crb_entry->addr; + for (i = 0; i < crb_entry->op_count; i++) { + opcode = crb_entry->crb_ctrl.opcode; + if (opcode & QLA82XX_DBG_OPCODE_WR) { + qla4_8xxx_md_rw_32(ha, crb_addr, + crb_entry->value_1, 1); + opcode &= ~QLA82XX_DBG_OPCODE_WR; + } + if (opcode & QLA82XX_DBG_OPCODE_RW) { + read_value = qla4_8xxx_md_rw_32(ha, crb_addr, 0, 0); + qla4_8xxx_md_rw_32(ha, crb_addr, read_value, 1); + opcode &= ~QLA82XX_DBG_OPCODE_RW; + } + if (opcode & QLA82XX_DBG_OPCODE_AND) { + read_value = qla4_8xxx_md_rw_32(ha, crb_addr, 0, 0); + read_value &= crb_entry->value_2; + opcode &= ~QLA82XX_DBG_OPCODE_AND; + if (opcode & QLA82XX_DBG_OPCODE_OR) { + read_value |= crb_entry->value_3; + opcode &= ~QLA82XX_DBG_OPCODE_OR; + } + qla4_8xxx_md_rw_32(ha, crb_addr, read_value, 1); + } + if (opcode & QLA82XX_DBG_OPCODE_OR) { + read_value = qla4_8xxx_md_rw_32(ha, crb_addr, 0, 0); + read_value |= crb_entry->value_3; + qla4_8xxx_md_rw_32(ha, crb_addr, read_value, 1); + opcode &= ~QLA82XX_DBG_OPCODE_OR; + } + if (opcode & QLA82XX_DBG_OPCODE_POLL) { + poll_time = crb_entry->crb_strd.poll_timeout; + wtime = jiffies + poll_time; + read_value = qla4_8xxx_md_rw_32(ha, crb_addr, 0, 0); + + do { + if ((read_value & crb_entry->value_2) == + crb_entry->value_1) + break; + else if (time_after_eq(jiffies, wtime)) { + /* capturing dump failed */ + rval = QLA_ERROR; + break; + } else + read_value = qla4_8xxx_md_rw_32(ha, + crb_addr, 0, 0); + } while (1); + opcode &= ~QLA82XX_DBG_OPCODE_POLL; + } + + if (opcode & QLA82XX_DBG_OPCODE_RDSTATE) { + if (crb_entry->crb_strd.state_index_a) { + index = crb_entry->crb_strd.state_index_a; + addr = tmplt_hdr->saved_state_array[index]; + } else { + addr = crb_addr; + } + + read_value = qla4_8xxx_md_rw_32(ha, addr, 0, 0); + index = crb_entry->crb_ctrl.state_index_v; + tmplt_hdr->saved_state_array[index] = read_value; + opcode &= ~QLA82XX_DBG_OPCODE_RDSTATE; + } + + if (opcode & QLA82XX_DBG_OPCODE_WRSTATE) { + if (crb_entry->crb_strd.state_index_a) { + index = crb_entry->crb_strd.state_index_a; + addr = tmplt_hdr->saved_state_array[index]; + } else { + addr = crb_addr; + } + + if (crb_entry->crb_ctrl.state_index_v) { + index = crb_entry->crb_ctrl.state_index_v; + read_value = + tmplt_hdr->saved_state_array[index]; + } else { + read_value = crb_entry->value_1; + } + + qla4_8xxx_md_rw_32(ha, addr, read_value, 1); + opcode &= ~QLA82XX_DBG_OPCODE_WRSTATE; + } + + if (opcode & QLA82XX_DBG_OPCODE_MDSTATE) { + index = crb_entry->crb_ctrl.state_index_v; + read_value = tmplt_hdr->saved_state_array[index]; + read_value <<= crb_entry->crb_ctrl.shl; + read_value >>= crb_entry->crb_ctrl.shr; + if (crb_entry->value_2) + read_value &= crb_entry->value_2; + read_value |= crb_entry->value_3; + read_value += crb_entry->value_1; + tmplt_hdr->saved_state_array[index] = read_value; + opcode &= ~QLA82XX_DBG_OPCODE_MDSTATE; + } + crb_addr += crb_entry->crb_strd.addr_stride; + } + DEBUG2(ql4_printk(KERN_INFO, ha, "Leaving fn: %s\n", __func__)); + return rval; +} + +static void qla4_8xxx_minidump_process_rdocm(struct scsi_qla_host *ha, + struct qla82xx_minidump_entry_hdr *entry_hdr, + uint32_t **d_ptr) +{ + uint32_t r_addr, r_stride, loop_cnt, i, r_value; + struct qla82xx_minidump_entry_rdocm *ocm_hdr; + uint32_t *data_ptr = *d_ptr; + + DEBUG2(ql4_printk(KERN_INFO, ha, "Entering fn: %s\n", __func__)); + ocm_hdr = (struct qla82xx_minidump_entry_rdocm *)entry_hdr; + r_addr = ocm_hdr->read_addr; + r_stride = ocm_hdr->read_addr_stride; + loop_cnt = ocm_hdr->op_count; + + DEBUG2(ql4_printk(KERN_INFO, ha, + "[%s]: r_addr: 0x%x, r_stride: 0x%x, loop_cnt: 0x%x\n", + __func__, r_addr, r_stride, loop_cnt)); + + for (i = 0; i < loop_cnt; i++) { + r_value = readl((void __iomem *)(r_addr + ha->nx_pcibase)); + *data_ptr++ = cpu_to_le32(r_value); + r_addr += r_stride; + } + DEBUG2(ql4_printk(KERN_INFO, ha, "Leaving fn: %s datacount: 0x%lx\n", + __func__, (loop_cnt * sizeof(uint32_t)))); + *d_ptr = data_ptr; +} + +static void qla4_8xxx_minidump_process_rdmux(struct scsi_qla_host *ha, + struct qla82xx_minidump_entry_hdr *entry_hdr, + uint32_t **d_ptr) +{ + uint32_t r_addr, s_stride, s_addr, s_value, loop_cnt, i, r_value; + struct qla82xx_minidump_entry_mux *mux_hdr; + uint32_t *data_ptr = *d_ptr; + + DEBUG2(ql4_printk(KERN_INFO, ha, "Entering fn: %s\n", __func__)); + mux_hdr = (struct qla82xx_minidump_entry_mux *)entry_hdr; + r_addr = mux_hdr->read_addr; + s_addr = mux_hdr->select_addr; + s_stride = mux_hdr->select_value_stride; + s_value = mux_hdr->select_value; + loop_cnt = mux_hdr->op_count; + + for (i = 0; i < loop_cnt; i++) { + qla4_8xxx_md_rw_32(ha, s_addr, s_value, 1); + r_value = qla4_8xxx_md_rw_32(ha, r_addr, 0, 0); + *data_ptr++ = cpu_to_le32(s_value); + *data_ptr++ = cpu_to_le32(r_value); + s_value += s_stride; + } + *d_ptr = data_ptr; +} + +static void qla4_8xxx_minidump_process_l1cache(struct scsi_qla_host *ha, + struct qla82xx_minidump_entry_hdr *entry_hdr, + uint32_t **d_ptr) +{ + uint32_t addr, r_addr, c_addr, t_r_addr; + uint32_t i, k, loop_count, t_value, r_cnt, r_value; + uint32_t c_value_w; + struct qla82xx_minidump_entry_cache *cache_hdr; + uint32_t *data_ptr = *d_ptr; + + cache_hdr = (struct qla82xx_minidump_entry_cache *)entry_hdr; + loop_count = cache_hdr->op_count; + r_addr = cache_hdr->read_addr; + c_addr = cache_hdr->control_addr; + c_value_w = cache_hdr->cache_ctrl.write_value; + + t_r_addr = cache_hdr->tag_reg_addr; + t_value = cache_hdr->addr_ctrl.init_tag_value; + r_cnt = cache_hdr->read_ctrl.read_addr_cnt; + + for (i = 0; i < loop_count; i++) { + qla4_8xxx_md_rw_32(ha, t_r_addr, t_value, 1); + qla4_8xxx_md_rw_32(ha, c_addr, c_value_w, 1); + addr = r_addr; + for (k = 0; k < r_cnt; k++) { + r_value = qla4_8xxx_md_rw_32(ha, addr, 0, 0); + *data_ptr++ = cpu_to_le32(r_value); + addr += cache_hdr->read_ctrl.read_addr_stride; + } + t_value += cache_hdr->addr_ctrl.tag_value_stride; + } + *d_ptr = data_ptr; +} + +static void qla4_8xxx_minidump_process_queue(struct scsi_qla_host *ha, + struct qla82xx_minidump_entry_hdr *entry_hdr, + uint32_t **d_ptr) +{ + uint32_t s_addr, r_addr; + uint32_t r_stride, r_value, r_cnt, qid = 0; + uint32_t i, k, loop_cnt; + struct qla82xx_minidump_entry_queue *q_hdr; + uint32_t *data_ptr = *d_ptr; + + DEBUG2(ql4_printk(KERN_INFO, ha, "Entering fn: %s\n", __func__)); + q_hdr = (struct qla82xx_minidump_entry_queue *)entry_hdr; + s_addr = q_hdr->select_addr; + r_cnt = q_hdr->rd_strd.read_addr_cnt; + r_stride = q_hdr->rd_strd.read_addr_stride; + loop_cnt = q_hdr->op_count; + + for (i = 0; i < loop_cnt; i++) { + qla4_8xxx_md_rw_32(ha, s_addr, qid, 1); + r_addr = q_hdr->read_addr; + for (k = 0; k < r_cnt; k++) { + r_value = qla4_8xxx_md_rw_32(ha, r_addr, 0, 0); + *data_ptr++ = cpu_to_le32(r_value); + r_addr += r_stride; + } + qid += q_hdr->q_strd.queue_id_stride; + } + *d_ptr = data_ptr; +} + +#define MD_DIRECT_ROM_WINDOW 0x42110030 +#define MD_DIRECT_ROM_READ_BASE 0x42150000 + +static void qla4_8xxx_minidump_process_rdrom(struct scsi_qla_host *ha, + struct qla82xx_minidump_entry_hdr *entry_hdr, + uint32_t **d_ptr) +{ + uint32_t r_addr, r_value; + uint32_t i, loop_cnt; + struct qla82xx_minidump_entry_rdrom *rom_hdr; + uint32_t *data_ptr = *d_ptr; + + DEBUG2(ql4_printk(KERN_INFO, ha, "Entering fn: %s\n", __func__)); + rom_hdr = (struct qla82xx_minidump_entry_rdrom *)entry_hdr; + r_addr = rom_hdr->read_addr; + loop_cnt = rom_hdr->read_data_size/sizeof(uint32_t); + + DEBUG2(ql4_printk(KERN_INFO, ha, + "[%s]: flash_addr: 0x%x, read_data_size: 0x%x\n", + __func__, r_addr, loop_cnt)); + + for (i = 0; i < loop_cnt; i++) { + qla4_8xxx_md_rw_32(ha, MD_DIRECT_ROM_WINDOW, + (r_addr & 0xFFFF0000), 1); + r_value = qla4_8xxx_md_rw_32(ha, + MD_DIRECT_ROM_READ_BASE + + (r_addr & 0x0000FFFF), 0, 0); + *data_ptr++ = cpu_to_le32(r_value); + r_addr += sizeof(uint32_t); + } + *d_ptr = data_ptr; +} + +#define MD_MIU_TEST_AGT_CTRL 0x41000090 +#define MD_MIU_TEST_AGT_ADDR_LO 0x41000094 +#define MD_MIU_TEST_AGT_ADDR_HI 0x41000098 + +static int qla4_8xxx_minidump_process_rdmem(struct scsi_qla_host *ha, + struct qla82xx_minidump_entry_hdr *entry_hdr, + uint32_t **d_ptr) +{ + uint32_t r_addr, r_value, r_data; + uint32_t i, j, loop_cnt; + struct qla82xx_minidump_entry_rdmem *m_hdr; + unsigned long flags; + uint32_t *data_ptr = *d_ptr; + + DEBUG2(ql4_printk(KERN_INFO, ha, "Entering fn: %s\n", __func__)); + m_hdr = (struct qla82xx_minidump_entry_rdmem *)entry_hdr; + r_addr = m_hdr->read_addr; + loop_cnt = m_hdr->read_data_size/16; + + DEBUG2(ql4_printk(KERN_INFO, ha, + "[%s]: Read addr: 0x%x, read_data_size: 0x%x\n", + __func__, r_addr, m_hdr->read_data_size)); + + if (r_addr & 0xf) { + DEBUG2(ql4_printk(KERN_INFO, ha, + "[%s]: Read addr 0x%x not 16 bytes alligned\n", + __func__, r_addr)); + return QLA_ERROR; + } + + if (m_hdr->read_data_size % 16) { + DEBUG2(ql4_printk(KERN_INFO, ha, + "[%s]: Read data[0x%x] not multiple of 16 bytes\n", + __func__, m_hdr->read_data_size)); + return QLA_ERROR; + } + + DEBUG2(ql4_printk(KERN_INFO, ha, + "[%s]: rdmem_addr: 0x%x, read_data_size: 0x%x, loop_cnt: 0x%x\n", + __func__, r_addr, m_hdr->read_data_size, loop_cnt)); + + write_lock_irqsave(&ha->hw_lock, flags); + for (i = 0; i < loop_cnt; i++) { + qla4_8xxx_md_rw_32(ha, MD_MIU_TEST_AGT_ADDR_LO, r_addr, 1); + r_value = 0; + qla4_8xxx_md_rw_32(ha, MD_MIU_TEST_AGT_ADDR_HI, r_value, 1); + r_value = MIU_TA_CTL_ENABLE; + qla4_8xxx_md_rw_32(ha, MD_MIU_TEST_AGT_CTRL, r_value, 1); + r_value = MIU_TA_CTL_START | MIU_TA_CTL_ENABLE; + qla4_8xxx_md_rw_32(ha, MD_MIU_TEST_AGT_CTRL, r_value, 1); + + for (j = 0; j < MAX_CTL_CHECK; j++) { + r_value = qla4_8xxx_md_rw_32(ha, MD_MIU_TEST_AGT_CTRL, + 0, 0); + if ((r_value & MIU_TA_CTL_BUSY) == 0) + break; + } + + if (j >= MAX_CTL_CHECK) { + printk_ratelimited(KERN_ERR + "%s: failed to read through agent\n", + __func__); + write_unlock_irqrestore(&ha->hw_lock, flags); + return QLA_SUCCESS; + } + + for (j = 0; j < 4; j++) { + r_data = qla4_8xxx_md_rw_32(ha, + MD_MIU_TEST_AGT_RDDATA[j], + 0, 0); + *data_ptr++ = cpu_to_le32(r_data); + } + + r_addr += 16; + } + write_unlock_irqrestore(&ha->hw_lock, flags); + + DEBUG2(ql4_printk(KERN_INFO, ha, "Leaving fn: %s datacount: 0x%x\n", + __func__, (loop_cnt * 16))); + + *d_ptr = data_ptr; + return QLA_SUCCESS; +} + +static void ql4_8xxx_mark_entry_skipped(struct scsi_qla_host *ha, + struct qla82xx_minidump_entry_hdr *entry_hdr, + int index) +{ + entry_hdr->d_ctrl.driver_flags |= QLA82XX_DBG_SKIPPED_FLAG; + DEBUG2(ql4_printk(KERN_INFO, ha, + "scsi(%ld): Skipping entry[%d]: ETYPE[0x%x]-ELEVEL[0x%x]\n", + ha->host_no, index, entry_hdr->entry_type, + entry_hdr->d_ctrl.entry_capture_mask)); +} + +/** + * qla82xx_collect_md_data - Retrieve firmware minidump data. + * @ha: pointer to adapter structure + **/ +static int qla4_8xxx_collect_md_data(struct scsi_qla_host *ha) +{ + int num_entry_hdr = 0; + struct qla82xx_minidump_entry_hdr *entry_hdr; + struct qla4_8xxx_minidump_template_hdr *tmplt_hdr; + uint32_t *data_ptr; + uint32_t data_collected = 0; + int i, rval = QLA_ERROR; + uint64_t now; + uint32_t timestamp; + + if (!ha->fw_dump) { + ql4_printk(KERN_INFO, ha, "%s(%ld) No buffer to dump\n", + __func__, ha->host_no); + return rval; + } + + tmplt_hdr = (struct qla4_8xxx_minidump_template_hdr *) + ha->fw_dump_tmplt_hdr; + data_ptr = (uint32_t *)((uint8_t *)ha->fw_dump + + ha->fw_dump_tmplt_size); + data_collected += ha->fw_dump_tmplt_size; + + num_entry_hdr = tmplt_hdr->num_of_entries; + ql4_printk(KERN_INFO, ha, "[%s]: starting data ptr: %p\n", + __func__, data_ptr); + ql4_printk(KERN_INFO, ha, + "[%s]: no of entry headers in Template: 0x%x\n", + __func__, num_entry_hdr); + ql4_printk(KERN_INFO, ha, "[%s]: Capture Mask obtained: 0x%x\n", + __func__, ha->fw_dump_capture_mask); + ql4_printk(KERN_INFO, ha, "[%s]: Total_data_size 0x%x, %d obtained\n", + __func__, ha->fw_dump_size, ha->fw_dump_size); + + /* Update current timestamp before taking dump */ + now = get_jiffies_64(); + timestamp = (u32)(jiffies_to_msecs(now) / 1000); + tmplt_hdr->driver_timestamp = timestamp; + + entry_hdr = (struct qla82xx_minidump_entry_hdr *) + (((uint8_t *)ha->fw_dump_tmplt_hdr) + + tmplt_hdr->first_entry_offset); + + /* Walk through the entry headers - validate/perform required action */ + for (i = 0; i < num_entry_hdr; i++) { + if (data_collected >= ha->fw_dump_size) { + ql4_printk(KERN_INFO, ha, + "Data collected: [0x%x], Total Dump size: [0x%x]\n", + data_collected, ha->fw_dump_size); + return rval; + } + + if (!(entry_hdr->d_ctrl.entry_capture_mask & + ha->fw_dump_capture_mask)) { + entry_hdr->d_ctrl.driver_flags |= + QLA82XX_DBG_SKIPPED_FLAG; + goto skip_nxt_entry; + } + + DEBUG2(ql4_printk(KERN_INFO, ha, + "Data collected: [0x%x], Dump size left:[0x%x]\n", + data_collected, + (ha->fw_dump_size - data_collected))); + + /* Decode the entry type and take required action to capture + * debug data + */ + switch (entry_hdr->entry_type) { + case QLA82XX_RDEND: + ql4_8xxx_mark_entry_skipped(ha, entry_hdr, i); + break; + case QLA82XX_CNTRL: + rval = qla4_8xxx_minidump_process_control(ha, + entry_hdr); + if (rval != QLA_SUCCESS) { + ql4_8xxx_mark_entry_skipped(ha, entry_hdr, i); + goto md_failed; + } + break; + case QLA82XX_RDCRB: + qla4_8xxx_minidump_process_rdcrb(ha, entry_hdr, + &data_ptr); + break; + case QLA82XX_RDMEM: + rval = qla4_8xxx_minidump_process_rdmem(ha, entry_hdr, + &data_ptr); + if (rval != QLA_SUCCESS) { + ql4_8xxx_mark_entry_skipped(ha, entry_hdr, i); + goto md_failed; + } + break; + case QLA82XX_BOARD: + case QLA82XX_RDROM: + qla4_8xxx_minidump_process_rdrom(ha, entry_hdr, + &data_ptr); + break; + case QLA82XX_L2DTG: + case QLA82XX_L2ITG: + case QLA82XX_L2DAT: + case QLA82XX_L2INS: + rval = qla4_8xxx_minidump_process_l2tag(ha, entry_hdr, + &data_ptr); + if (rval != QLA_SUCCESS) { + ql4_8xxx_mark_entry_skipped(ha, entry_hdr, i); + goto md_failed; + } + break; + case QLA82XX_L1DAT: + case QLA82XX_L1INS: + qla4_8xxx_minidump_process_l1cache(ha, entry_hdr, + &data_ptr); + break; + case QLA82XX_RDOCM: + qla4_8xxx_minidump_process_rdocm(ha, entry_hdr, + &data_ptr); + break; + case QLA82XX_RDMUX: + qla4_8xxx_minidump_process_rdmux(ha, entry_hdr, + &data_ptr); + break; + case QLA82XX_QUEUE: + qla4_8xxx_minidump_process_queue(ha, entry_hdr, + &data_ptr); + break; + case QLA82XX_RDNOP: + default: + ql4_8xxx_mark_entry_skipped(ha, entry_hdr, i); + break; + } + + data_collected = (uint8_t *)data_ptr - + ((uint8_t *)((uint8_t *)ha->fw_dump + + ha->fw_dump_tmplt_size)); +skip_nxt_entry: + /* next entry in the template */ + entry_hdr = (struct qla82xx_minidump_entry_hdr *) + (((uint8_t *)entry_hdr) + + entry_hdr->entry_size); + } + + if ((data_collected + ha->fw_dump_tmplt_size) != ha->fw_dump_size) { + ql4_printk(KERN_INFO, ha, + "Dump data mismatch: Data collected: [0x%x], total_data_size:[0x%x]\n", + data_collected, ha->fw_dump_size); + goto md_failed; + } + + DEBUG2(ql4_printk(KERN_INFO, ha, "Leaving fn: %s Last entry: 0x%x\n", + __func__, i)); +md_failed: + return rval; +} + +/** + * qla4_8xxx_uevent_emit - Send uevent when the firmware dump is ready. + * @ha: pointer to adapter structure + **/ +static void qla4_8xxx_uevent_emit(struct scsi_qla_host *ha, u32 code) +{ + char event_string[40]; + char *envp[] = { event_string, NULL }; + + switch (code) { + case QL4_UEVENT_CODE_FW_DUMP: + snprintf(event_string, sizeof(event_string), "FW_DUMP=%ld", + ha->host_no); + break; + default: + /*do nothing*/ + break; + } + + kobject_uevent_env(&(&ha->pdev->dev)->kobj, KOBJ_CHANGE, envp); +} + /** * qla4_8xxx_device_bootstrap - Initialize device, set DEV_READY, start fw * @ha: pointer to adapter structure @@ -1659,6 +2324,15 @@ dev_initialize: qla4_8xxx_wr_32(ha, QLA82XX_CRB_DRV_IDC_VERSION, QLA82XX_IDC_VERSION); qla4_8xxx_idc_unlock(ha); + if (ql4xenablemd && test_bit(AF_FW_RECOVERY, &ha->flags) && + !test_and_set_bit(AF_82XX_FW_DUMPED, &ha->flags)) { + if (!qla4_8xxx_collect_md_data(ha)) { + qla4_8xxx_uevent_emit(ha, QL4_UEVENT_CODE_FW_DUMP); + } else { + ql4_printk(KERN_INFO, ha, "Unable to collect minidump\n"); + clear_bit(AF_82XX_FW_DUMPED, &ha->flags); + } + } rval = qla4_8xxx_try_start_fw(ha); qla4_8xxx_idc_lock(ha); @@ -1686,6 +2360,7 @@ static void qla4_8xxx_need_reset_handler(struct scsi_qla_host *ha) { uint32_t dev_state, drv_state, drv_active; + uint32_t active_mask = 0xFFFFFFFF; unsigned long reset_timeout; ql4_printk(KERN_INFO, ha, @@ -1697,7 +2372,14 @@ qla4_8xxx_need_reset_handler(struct scsi_qla_host *ha) qla4_8xxx_idc_lock(ha); } - qla4_8xxx_set_rst_ready(ha); + if (!test_bit(AF_82XX_RST_OWNER, &ha->flags)) { + DEBUG2(ql4_printk(KERN_INFO, ha, + "%s(%ld): reset acknowledged\n", + __func__, ha->host_no)); + qla4_8xxx_set_rst_ready(ha); + } else { + active_mask = (~(1 << (ha->func_num * 4))); + } /* wait for 10 seconds for reset ack from all functions */ reset_timeout = jiffies + (ha->nx_reset_timeout * HZ); @@ -1709,12 +2391,24 @@ qla4_8xxx_need_reset_handler(struct scsi_qla_host *ha) "%s(%ld): drv_state = 0x%x, drv_active = 0x%x\n", __func__, ha->host_no, drv_state, drv_active); - while (drv_state != drv_active) { + while (drv_state != (drv_active & active_mask)) { if (time_after_eq(jiffies, reset_timeout)) { - printk("%s: RESET TIMEOUT!\n", DRIVER_NAME); + ql4_printk(KERN_INFO, ha, + "%s: RESET TIMEOUT! drv_state: 0x%08x, drv_active: 0x%08x\n", + DRIVER_NAME, drv_state, drv_active); break; } + /* + * When reset_owner times out, check which functions + * acked/did not ack + */ + if (test_bit(AF_82XX_RST_OWNER, &ha->flags)) { + ql4_printk(KERN_INFO, ha, + "%s(%ld): drv_state = 0x%x, drv_active = 0x%x\n", + __func__, ha->host_no, drv_state, + drv_active); + } qla4_8xxx_idc_unlock(ha); msleep(1000); qla4_8xxx_idc_lock(ha); @@ -1723,14 +2417,18 @@ qla4_8xxx_need_reset_handler(struct scsi_qla_host *ha) drv_active = qla4_8xxx_rd_32(ha, QLA82XX_CRB_DRV_ACTIVE); } + /* Clear RESET OWNER as we are not going to use it any further */ + clear_bit(AF_82XX_RST_OWNER, &ha->flags); + dev_state = qla4_8xxx_rd_32(ha, QLA82XX_CRB_DEV_STATE); - ql4_printk(KERN_INFO, ha, "3:Device state is 0x%x = %s\n", dev_state, - dev_state < MAX_STATES ? qdev_state[dev_state] : "Unknown"); + ql4_printk(KERN_INFO, ha, "Device state is 0x%x = %s\n", dev_state, + dev_state < MAX_STATES ? qdev_state[dev_state] : "Unknown"); /* Force to DEV_COLD unless someone else is starting a reset */ if (dev_state != QLA82XX_DEV_INITIALIZING) { ql4_printk(KERN_INFO, ha, "HW State: COLD/RE-INIT\n"); qla4_8xxx_wr_32(ha, QLA82XX_CRB_DEV_STATE, QLA82XX_DEV_COLD); + qla4_8xxx_set_rst_ready(ha); } } @@ -1765,8 +2463,9 @@ int qla4_8xxx_device_state_handler(struct scsi_qla_host *ha) } dev_state = qla4_8xxx_rd_32(ha, QLA82XX_CRB_DEV_STATE); - ql4_printk(KERN_INFO, ha, "1:Device state is 0x%x = %s\n", dev_state, - dev_state < MAX_STATES ? qdev_state[dev_state] : "Unknown"); + DEBUG2(ql4_printk(KERN_INFO, ha, "Device state is 0x%x = %s\n", + dev_state, dev_state < MAX_STATES ? + qdev_state[dev_state] : "Unknown")); /* wait for 30 seconds for device to go ready */ dev_init_timeout = jiffies + (ha->nx_dev_init_timeout * HZ); @@ -1775,15 +2474,19 @@ int qla4_8xxx_device_state_handler(struct scsi_qla_host *ha) while (1) { if (time_after_eq(jiffies, dev_init_timeout)) { - ql4_printk(KERN_WARNING, ha, "Device init failed!\n"); + ql4_printk(KERN_WARNING, ha, + "%s: Device Init Failed 0x%x = %s\n", + DRIVER_NAME, + dev_state, dev_state < MAX_STATES ? + qdev_state[dev_state] : "Unknown"); qla4_8xxx_wr_32(ha, QLA82XX_CRB_DEV_STATE, QLA82XX_DEV_FAILED); } dev_state = qla4_8xxx_rd_32(ha, QLA82XX_CRB_DEV_STATE); - ql4_printk(KERN_INFO, ha, - "2:Device state is 0x%x = %s\n", dev_state, - dev_state < MAX_STATES ? qdev_state[dev_state] : "Unknown"); + ql4_printk(KERN_INFO, ha, "Device state is 0x%x = %s\n", + dev_state, dev_state < MAX_STATES ? + qdev_state[dev_state] : "Unknown"); /* NOTE: Make sure idc unlocked upon exit of switch statement */ switch (dev_state) { @@ -2184,6 +2887,7 @@ qla4_8xxx_isp_reset(struct scsi_qla_host *ha) ql4_printk(KERN_INFO, ha, "HW State: NEED RESET\n"); qla4_8xxx_wr_32(ha, QLA82XX_CRB_DEV_STATE, QLA82XX_DEV_NEED_RESET); + set_bit(AF_82XX_RST_OWNER, &ha->flags); } else ql4_printk(KERN_INFO, ha, "HW State: DEVICE INITIALIZING\n"); @@ -2195,8 +2899,10 @@ qla4_8xxx_isp_reset(struct scsi_qla_host *ha) qla4_8xxx_clear_rst_ready(ha); qla4_8xxx_idc_unlock(ha); - if (rval == QLA_SUCCESS) + if (rval == QLA_SUCCESS) { + ql4_printk(KERN_INFO, ha, "Clearing AF_RECOVERY in qla4_8xxx_isp_reset\n"); clear_bit(AF_FW_RECOVERY, &ha->flags); + } return rval; } diff --git a/drivers/scsi/qla4xxx/ql4_nx.h b/drivers/scsi/qla4xxx/ql4_nx.h index dc7500e47b8b..30258479f100 100644 --- a/drivers/scsi/qla4xxx/ql4_nx.h +++ b/drivers/scsi/qla4xxx/ql4_nx.h @@ -792,4 +792,196 @@ struct crb_addr_pair { #define MIU_TEST_AGT_WRDATA_UPPER_LO (0x0b0) #define MIU_TEST_AGT_WRDATA_UPPER_HI (0x0b4) +/* Minidump related */ + +/* Entry Type Defines */ +#define QLA82XX_RDNOP 0 +#define QLA82XX_RDCRB 1 +#define QLA82XX_RDMUX 2 +#define QLA82XX_QUEUE 3 +#define QLA82XX_BOARD 4 +#define QLA82XX_RDOCM 6 +#define QLA82XX_PREGS 7 +#define QLA82XX_L1DTG 8 +#define QLA82XX_L1ITG 9 +#define QLA82XX_L1DAT 11 +#define QLA82XX_L1INS 12 +#define QLA82XX_L2DTG 21 +#define QLA82XX_L2ITG 22 +#define QLA82XX_L2DAT 23 +#define QLA82XX_L2INS 24 +#define QLA82XX_RDROM 71 +#define QLA82XX_RDMEM 72 +#define QLA82XX_CNTRL 98 +#define QLA82XX_RDEND 255 + +/* Opcodes for Control Entries. + * These Flags are bit fields. + */ +#define QLA82XX_DBG_OPCODE_WR 0x01 +#define QLA82XX_DBG_OPCODE_RW 0x02 +#define QLA82XX_DBG_OPCODE_AND 0x04 +#define QLA82XX_DBG_OPCODE_OR 0x08 +#define QLA82XX_DBG_OPCODE_POLL 0x10 +#define QLA82XX_DBG_OPCODE_RDSTATE 0x20 +#define QLA82XX_DBG_OPCODE_WRSTATE 0x40 +#define QLA82XX_DBG_OPCODE_MDSTATE 0x80 + +/* Driver Flags */ +#define QLA82XX_DBG_SKIPPED_FLAG 0x80 /* driver skipped this entry */ +#define QLA82XX_DBG_SIZE_ERR_FLAG 0x40 /* Entry vs Capture size + * mismatch */ + +/* Driver_code is for driver to write some info about the entry + * currently not used. + */ +struct qla82xx_minidump_entry_hdr { + uint32_t entry_type; + uint32_t entry_size; + uint32_t entry_capture_size; + struct { + uint8_t entry_capture_mask; + uint8_t entry_code; + uint8_t driver_code; + uint8_t driver_flags; + } d_ctrl; +}; + +/* Read CRB entry header */ +struct qla82xx_minidump_entry_crb { + struct qla82xx_minidump_entry_hdr h; + uint32_t addr; + struct { + uint8_t addr_stride; + uint8_t state_index_a; + uint16_t poll_timeout; + } crb_strd; + uint32_t data_size; + uint32_t op_count; + + struct { + uint8_t opcode; + uint8_t state_index_v; + uint8_t shl; + uint8_t shr; + } crb_ctrl; + + uint32_t value_1; + uint32_t value_2; + uint32_t value_3; +}; + +struct qla82xx_minidump_entry_cache { + struct qla82xx_minidump_entry_hdr h; + uint32_t tag_reg_addr; + struct { + uint16_t tag_value_stride; + uint16_t init_tag_value; + } addr_ctrl; + uint32_t data_size; + uint32_t op_count; + uint32_t control_addr; + struct { + uint16_t write_value; + uint8_t poll_mask; + uint8_t poll_wait; + } cache_ctrl; + uint32_t read_addr; + struct { + uint8_t read_addr_stride; + uint8_t read_addr_cnt; + uint16_t rsvd_1; + } read_ctrl; +}; + +/* Read OCM */ +struct qla82xx_minidump_entry_rdocm { + struct qla82xx_minidump_entry_hdr h; + uint32_t rsvd_0; + uint32_t rsvd_1; + uint32_t data_size; + uint32_t op_count; + uint32_t rsvd_2; + uint32_t rsvd_3; + uint32_t read_addr; + uint32_t read_addr_stride; +}; + +/* Read Memory */ +struct qla82xx_minidump_entry_rdmem { + struct qla82xx_minidump_entry_hdr h; + uint32_t rsvd[6]; + uint32_t read_addr; + uint32_t read_data_size; +}; + +/* Read ROM */ +struct qla82xx_minidump_entry_rdrom { + struct qla82xx_minidump_entry_hdr h; + uint32_t rsvd[6]; + uint32_t read_addr; + uint32_t read_data_size; +}; + +/* Mux entry */ +struct qla82xx_minidump_entry_mux { + struct qla82xx_minidump_entry_hdr h; + uint32_t select_addr; + uint32_t rsvd_0; + uint32_t data_size; + uint32_t op_count; + uint32_t select_value; + uint32_t select_value_stride; + uint32_t read_addr; + uint32_t rsvd_1; +}; + +/* Queue entry */ +struct qla82xx_minidump_entry_queue { + struct qla82xx_minidump_entry_hdr h; + uint32_t select_addr; + struct { + uint16_t queue_id_stride; + uint16_t rsvd_0; + } q_strd; + uint32_t data_size; + uint32_t op_count; + uint32_t rsvd_1; + uint32_t rsvd_2; + uint32_t read_addr; + struct { + uint8_t read_addr_stride; + uint8_t read_addr_cnt; + uint16_t rsvd_3; + } rd_strd; +}; + +#define QLA82XX_MINIDUMP_OCM0_SIZE (256 * 1024) +#define QLA82XX_MINIDUMP_L1C_SIZE (256 * 1024) +#define QLA82XX_MINIDUMP_L2C_SIZE 1572864 +#define QLA82XX_MINIDUMP_COMMON_STR_SIZE 0 +#define QLA82XX_MINIDUMP_FCOE_STR_SIZE 0 +#define QLA82XX_MINIDUMP_MEM_SIZE 0 +#define QLA82XX_MAX_ENTRY_HDR 4 + +struct qla82xx_minidump { + uint32_t md_ocm0_data[QLA82XX_MINIDUMP_OCM0_SIZE]; + uint32_t md_l1c_data[QLA82XX_MINIDUMP_L1C_SIZE]; + uint32_t md_l2c_data[QLA82XX_MINIDUMP_L2C_SIZE]; + uint32_t md_cs_data[QLA82XX_MINIDUMP_COMMON_STR_SIZE]; + uint32_t md_fcoes_data[QLA82XX_MINIDUMP_FCOE_STR_SIZE]; + uint32_t md_mem_data[QLA82XX_MINIDUMP_MEM_SIZE]; +}; + +#define MBC_DIAGNOSTIC_MINIDUMP_TEMPLATE 0x129 +#define RQST_TMPLT_SIZE 0x0 +#define RQST_TMPLT 0x1 +#define MD_DIRECT_ROM_WINDOW 0x42110030 +#define MD_DIRECT_ROM_READ_BASE 0x42150000 +#define MD_MIU_TEST_AGT_CTRL 0x41000090 +#define MD_MIU_TEST_AGT_ADDR_LO 0x41000094 +#define MD_MIU_TEST_AGT_ADDR_HI 0x41000098 + +static const int MD_MIU_TEST_AGT_RDDATA[] = { 0x410000A8, + 0x410000AC, 0x410000B8, 0x410000BC }; #endif diff --git a/drivers/scsi/qla4xxx/ql4_os.c b/drivers/scsi/qla4xxx/ql4_os.c index ee47820c30a6..cd15678f9ada 100644 --- a/drivers/scsi/qla4xxx/ql4_os.c +++ b/drivers/scsi/qla4xxx/ql4_os.c @@ -68,12 +68,34 @@ MODULE_PARM_DESC(ql4xmaxqdepth, " Maximum queue depth to report for target devices.\n" "\t\t Default: 32."); +static int ql4xqfulltracking = 1; +module_param(ql4xqfulltracking, int, S_IRUGO | S_IWUSR); +MODULE_PARM_DESC(ql4xqfulltracking, + " Enable or disable dynamic tracking and adjustment of\n" + "\t\t scsi device queue depth.\n" + "\t\t 0 - Disable.\n" + "\t\t 1 - Enable. (Default)"); + static int ql4xsess_recovery_tmo = QL4_SESS_RECOVERY_TMO; module_param(ql4xsess_recovery_tmo, int, S_IRUGO); MODULE_PARM_DESC(ql4xsess_recovery_tmo, " Target Session Recovery Timeout.\n" "\t\t Default: 120 sec."); +int ql4xmdcapmask = 0x1F; +module_param(ql4xmdcapmask, int, S_IRUGO); +MODULE_PARM_DESC(ql4xmdcapmask, + " Set the Minidump driver capture mask level.\n" + "\t\t Default is 0x1F.\n" + "\t\t Can be set to 0x3, 0x7, 0xF, 0x1F, 0x3F, 0x7F"); + +int ql4xenablemd = 1; +module_param(ql4xenablemd, int, S_IRUGO | S_IWUSR); +MODULE_PARM_DESC(ql4xenablemd, + " Set to enable minidump.\n" + "\t\t 0 - disable minidump\n" + "\t\t 1 - enable minidump (Default)"); + static int qla4xxx_wait_for_hba_online(struct scsi_qla_host *ha); /* * SCSI host template entry points @@ -140,6 +162,8 @@ static int qla4xxx_slave_configure(struct scsi_device *device); static void qla4xxx_slave_destroy(struct scsi_device *sdev); static umode_t ql4_attr_is_visible(int param_type, int param); static int qla4xxx_host_reset(struct Scsi_Host *shost, int reset_type); +static int qla4xxx_change_queue_depth(struct scsi_device *sdev, int qdepth, + int reason); static struct qla4_8xxx_legacy_intr_set legacy_intr[] = QLA82XX_LEGACY_INTR_CONFIG; @@ -159,6 +183,7 @@ static struct scsi_host_template qla4xxx_driver_template = { .slave_configure = qla4xxx_slave_configure, .slave_alloc = qla4xxx_slave_alloc, .slave_destroy = qla4xxx_slave_destroy, + .change_queue_depth = qla4xxx_change_queue_depth, .this_id = -1, .cmd_per_lun = 3, @@ -1555,19 +1580,53 @@ static void qla4xxx_session_destroy(struct iscsi_cls_session *cls_sess) struct iscsi_session *sess; struct ddb_entry *ddb_entry; struct scsi_qla_host *ha; - unsigned long flags; + unsigned long flags, wtime; + struct dev_db_entry *fw_ddb_entry = NULL; + dma_addr_t fw_ddb_entry_dma; + uint32_t ddb_state; + int ret; DEBUG2(printk(KERN_INFO "Func: %s\n", __func__)); sess = cls_sess->dd_data; ddb_entry = sess->dd_data; ha = ddb_entry->ha; + fw_ddb_entry = dma_alloc_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry), + &fw_ddb_entry_dma, GFP_KERNEL); + if (!fw_ddb_entry) { + ql4_printk(KERN_ERR, ha, + "%s: Unable to allocate dma buffer\n", __func__); + goto destroy_session; + } + + wtime = jiffies + (HZ * LOGOUT_TOV); + do { + ret = qla4xxx_get_fwddb_entry(ha, ddb_entry->fw_ddb_index, + fw_ddb_entry, fw_ddb_entry_dma, + NULL, NULL, &ddb_state, NULL, + NULL, NULL); + if (ret == QLA_ERROR) + goto destroy_session; + + if ((ddb_state == DDB_DS_NO_CONNECTION_ACTIVE) || + (ddb_state == DDB_DS_SESSION_FAILED)) + goto destroy_session; + + schedule_timeout_uninterruptible(HZ); + } while ((time_after(wtime, jiffies))); + +destroy_session: qla4xxx_clear_ddb_entry(ha, ddb_entry->fw_ddb_index); spin_lock_irqsave(&ha->hardware_lock, flags); qla4xxx_free_ddb(ha, ddb_entry); spin_unlock_irqrestore(&ha->hardware_lock, flags); + iscsi_session_teardown(cls_sess); + + if (fw_ddb_entry) + dma_free_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry), + fw_ddb_entry, fw_ddb_entry_dma); } static struct iscsi_cls_conn * @@ -2220,6 +2279,9 @@ static void qla4xxx_mem_free(struct scsi_qla_host *ha) dma_free_coherent(&ha->pdev->dev, ha->queues_len, ha->queues, ha->queues_dma); + if (ha->fw_dump) + vfree(ha->fw_dump); + ha->queues_len = 0; ha->queues = NULL; ha->queues_dma = 0; @@ -2229,6 +2291,8 @@ static void qla4xxx_mem_free(struct scsi_qla_host *ha) ha->response_dma = 0; ha->shadow_regs = NULL; ha->shadow_regs_dma = 0; + ha->fw_dump = NULL; + ha->fw_dump_size = 0; /* Free srb pool. */ if (ha->srb_mempool) @@ -5023,6 +5087,8 @@ static int __devinit qla4xxx_probe_adapter(struct pci_dev *pdev, set_bit(AF_INIT_DONE, &ha->flags); + qla4_8xxx_alloc_sysfs_attr(ha); + printk(KERN_INFO " QLogic iSCSI HBA Driver version: %s\n" " QLogic ISP%04x @ %s, host#=%ld, fw=%02d.%02d.%02d.%02d\n", @@ -5149,6 +5215,7 @@ static void __devexit qla4xxx_remove_adapter(struct pci_dev *pdev) iscsi_boot_destroy_kset(ha->boot_kset); qla4xxx_destroy_fw_ddb_session(ha); + qla4_8xxx_free_sysfs_attr(ha); scsi_remove_host(ha->host); @@ -5217,6 +5284,15 @@ static void qla4xxx_slave_destroy(struct scsi_device *sdev) scsi_deactivate_tcq(sdev, 1); } +static int qla4xxx_change_queue_depth(struct scsi_device *sdev, int qdepth, + int reason) +{ + if (!ql4xqfulltracking) + return -EOPNOTSUPP; + + return iscsi_change_queue_depth(sdev, qdepth, reason); +} + /** * qla4xxx_del_from_active_array - returns an active srb * @ha: Pointer to host adapter structure. diff --git a/drivers/scsi/qla4xxx/ql4_version.h b/drivers/scsi/qla4xxx/ql4_version.h index 97b30c108e36..cc1cc3518b87 100644 --- a/drivers/scsi/qla4xxx/ql4_version.h +++ b/drivers/scsi/qla4xxx/ql4_version.h @@ -5,4 +5,4 @@ * See LICENSE.qla4xxx for copyright and licensing details. */ -#define QLA4XXX_DRIVER_VERSION "5.02.00-k16" +#define QLA4XXX_DRIVER_VERSION "5.02.00-k17" diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c index 62ddfd31d4ce..6dfb9785d345 100644 --- a/drivers/scsi/scsi_lib.c +++ b/drivers/scsi/scsi_lib.c @@ -1378,16 +1378,19 @@ static int scsi_lld_busy(struct request_queue *q) { struct scsi_device *sdev = q->queuedata; struct Scsi_Host *shost; - struct scsi_target *starget; if (!sdev) return 0; shost = sdev->host; - starget = scsi_target(sdev); - if (scsi_host_in_recovery(shost) || scsi_host_is_busy(shost) || - scsi_target_is_busy(starget) || scsi_device_is_busy(sdev)) + /* + * Ignore host/starget busy state. + * Since block layer does not have a concept of fairness across + * multiple queues, congestion of host/starget needs to be handled + * in SCSI layer. + */ + if (scsi_host_in_recovery(shost) || scsi_device_is_busy(sdev)) return 1; return 0; diff --git a/drivers/scsi/scsi_pm.c b/drivers/scsi/scsi_pm.c index f661a41fa4c6..d4201ded3b22 100644 --- a/drivers/scsi/scsi_pm.c +++ b/drivers/scsi/scsi_pm.c @@ -24,8 +24,11 @@ static int scsi_dev_type_suspend(struct device *dev, pm_message_t msg) err = scsi_device_quiesce(to_scsi_device(dev)); if (err == 0) { drv = dev->driver; - if (drv && drv->suspend) + if (drv && drv->suspend) { err = drv->suspend(dev, msg); + if (err) + scsi_device_resume(to_scsi_device(dev)); + } } dev_dbg(dev, "scsi suspend: %d\n", err); return err; diff --git a/drivers/scsi/scsi_scan.c b/drivers/scsi/scsi_scan.c index 01b03744f1f9..2e5fe584aad3 100644 --- a/drivers/scsi/scsi_scan.c +++ b/drivers/scsi/scsi_scan.c @@ -147,7 +147,7 @@ int scsi_complete_async_scans(void) do { if (list_empty(&scanning_hosts)) - return 0; + goto out; /* If we can't get memory immediately, that's OK. Just * sleep a little. Even if we never get memory, the async * scans will finish eventually. @@ -179,8 +179,11 @@ int scsi_complete_async_scans(void) } done: spin_unlock(&async_scan_lock); - kfree(data); + + out: + async_synchronize_full_domain(&scsi_sd_probe_domain); + return 0; } diff --git a/drivers/scsi/scsi_wait_scan.c b/drivers/scsi/scsi_wait_scan.c index 74708fcaf82f..ae7814874618 100644 --- a/drivers/scsi/scsi_wait_scan.c +++ b/drivers/scsi/scsi_wait_scan.c @@ -12,7 +12,7 @@ #include <linux/module.h> #include <linux/device.h> -#include <scsi/scsi_scan.h> +#include "scsi_priv.h" static int __init wait_scan_init(void) { diff --git a/drivers/scsi/ufs/ufshcd.c b/drivers/scsi/ufs/ufshcd.c index 4e010b727818..6a4fd00117ca 100644 --- a/drivers/scsi/ufs/ufshcd.c +++ b/drivers/scsi/ufs/ufshcd.c @@ -1836,7 +1836,7 @@ ufshcd_probe(struct pci_dev *pdev, const struct pci_device_id *id) err = pci_request_regions(pdev, UFSHCD); if (err < 0) { dev_err(&pdev->dev, "request regions failed\n"); - goto out_disable; + goto out_host_put; } hba->mmio_base = pci_ioremap_bar(pdev, 0); @@ -1925,8 +1925,9 @@ out_iounmap: iounmap(hba->mmio_base); out_release_regions: pci_release_regions(pdev); -out_disable: +out_host_put: scsi_host_put(host); +out_disable: pci_clear_master(pdev); pci_disable_device(pdev); out_error: |