summaryrefslogtreecommitdiff
path: root/drivers/scsi/lpfc
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/scsi/lpfc')
-rw-r--r--drivers/scsi/lpfc/lpfc.h22
-rw-r--r--drivers/scsi/lpfc/lpfc_attr.c628
-rw-r--r--drivers/scsi/lpfc/lpfc_bsg.c3
-rw-r--r--drivers/scsi/lpfc/lpfc_bsg.h2
-rw-r--r--drivers/scsi/lpfc/lpfc_crtn.h24
-rw-r--r--drivers/scsi/lpfc/lpfc_debugfs.c108
-rw-r--r--drivers/scsi/lpfc/lpfc_disc.h2
-rw-r--r--drivers/scsi/lpfc/lpfc_els.c200
-rw-r--r--drivers/scsi/lpfc/lpfc_hbadisc.c39
-rw-r--r--drivers/scsi/lpfc/lpfc_hw.h1
-rw-r--r--drivers/scsi/lpfc/lpfc_hw4.h6
-rw-r--r--drivers/scsi/lpfc/lpfc_init.c277
-rw-r--r--drivers/scsi/lpfc/lpfc_mem.c47
-rw-r--r--drivers/scsi/lpfc/lpfc_nportdisc.c56
-rw-r--r--drivers/scsi/lpfc/lpfc_scsi.c552
-rw-r--r--drivers/scsi/lpfc/lpfc_scsi.h18
-rw-r--r--drivers/scsi/lpfc/lpfc_sli.c267
-rw-r--r--drivers/scsi/lpfc/lpfc_sli.h2
-rw-r--r--drivers/scsi/lpfc/lpfc_sli4.h21
-rw-r--r--drivers/scsi/lpfc/lpfc_version.h2
20 files changed, 1951 insertions, 326 deletions
diff --git a/drivers/scsi/lpfc/lpfc.h b/drivers/scsi/lpfc/lpfc.h
index 4e1b75ca7451..94a3cafe7197 100644
--- a/drivers/scsi/lpfc/lpfc.h
+++ b/drivers/scsi/lpfc/lpfc.h
@@ -73,8 +73,6 @@ struct lpfc_sli2_slim;
*/
/* 1 Second */
#define QUEUE_RAMP_DOWN_INTERVAL (msecs_to_jiffies(1000 * 1))
-/* 5 minutes */
-#define QUEUE_RAMP_UP_INTERVAL (msecs_to_jiffies(1000 * 300))
/* Number of exchanges reserved for discovery to complete */
#define LPFC_DISC_IOCB_BUFF_COUNT 20
@@ -722,6 +720,20 @@ struct lpfc_hba {
uint32_t cfg_hba_queue_depth;
uint32_t cfg_enable_hba_reset;
uint32_t cfg_enable_hba_heartbeat;
+ uint32_t cfg_fof;
+ uint32_t cfg_EnableXLane;
+ uint8_t cfg_oas_tgt_wwpn[8];
+ uint8_t cfg_oas_vpt_wwpn[8];
+ uint32_t cfg_oas_lun_state;
+#define OAS_LUN_ENABLE 1
+#define OAS_LUN_DISABLE 0
+ uint32_t cfg_oas_lun_status;
+#define OAS_LUN_STATUS_EXISTS 0x01
+ uint32_t cfg_oas_flags;
+#define OAS_FIND_ANY_VPORT 0x01
+#define OAS_FIND_ANY_TARGET 0x02
+#define OAS_LUN_VALID 0x04
+ uint32_t cfg_XLanePriority;
uint32_t cfg_enable_bg;
uint32_t cfg_hostmem_hgp;
uint32_t cfg_log_verbose;
@@ -730,6 +742,7 @@ struct lpfc_hba {
uint32_t cfg_request_firmware_upgrade;
uint32_t cfg_iocb_cnt;
uint32_t cfg_suppress_link_up;
+ uint32_t cfg_rrq_xri_bitmap_sz;
#define LPFC_INITIALIZE_LINK 0 /* do normal init_link mbox */
#define LPFC_DELAY_INIT_LINK 1 /* layered driver hold off */
#define LPFC_DELAY_INIT_LINK_INDEFINITELY 2 /* wait, manual intervention */
@@ -835,6 +848,7 @@ struct lpfc_hba {
mempool_t *mbox_mem_pool;
mempool_t *nlp_mem_pool;
mempool_t *rrq_pool;
+ mempool_t *active_rrq_pool;
struct fc_host_statistics link_stats;
enum intr_type_t intr_type;
@@ -869,7 +883,6 @@ struct lpfc_hba {
atomic_t num_cmd_success;
unsigned long last_rsrc_error_time;
unsigned long last_ramp_down_time;
- unsigned long last_ramp_up_time;
#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
struct dentry *hba_debugfs_root;
atomic_t debugfs_vport_count;
@@ -971,6 +984,9 @@ struct lpfc_hba {
atomic_t sdev_cnt;
uint8_t fips_spec_rev;
uint8_t fips_level;
+ spinlock_t devicelock; /* lock for luns list */
+ mempool_t *device_data_mem_pool;
+ struct list_head luns;
};
static inline struct Scsi_Host *
diff --git a/drivers/scsi/lpfc/lpfc_attr.c b/drivers/scsi/lpfc/lpfc_attr.c
index 00656fc92b93..8d5b6ceec9c9 100644
--- a/drivers/scsi/lpfc/lpfc_attr.c
+++ b/drivers/scsi/lpfc/lpfc_attr.c
@@ -529,6 +529,27 @@ lpfc_sli4_protocol_show(struct device *dev, struct device_attribute *attr,
}
/**
+ * lpfc_oas_supported_show - Return whether or not Optimized Access Storage
+ * (OAS) is supported.
+ * @dev: class unused variable.
+ * @attr: device attribute, not used.
+ * @buf: on return contains the module description text.
+ *
+ * Returns: size of formatted string.
+ **/
+static ssize_t
+lpfc_oas_supported_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct Scsi_Host *shost = class_to_shost(dev);
+ struct lpfc_vport *vport = (struct lpfc_vport *)shost->hostdata;
+ struct lpfc_hba *phba = vport->phba;
+
+ return snprintf(buf, PAGE_SIZE, "%d\n",
+ phba->sli4_hba.pc_sli4_params.oas_supported);
+}
+
+/**
* lpfc_link_state_store - Transition the link_state on an HBA port
* @dev: class device that is converted into a Scsi_host.
* @attr: device attribute, not used.
@@ -2041,9 +2062,53 @@ static DEVICE_ATTR(lpfc_dss, S_IRUGO, lpfc_dss_show, NULL);
static DEVICE_ATTR(lpfc_sriov_hw_max_virtfn, S_IRUGO,
lpfc_sriov_hw_max_virtfn_show, NULL);
static DEVICE_ATTR(protocol, S_IRUGO, lpfc_sli4_protocol_show, NULL);
+static DEVICE_ATTR(lpfc_xlane_supported, S_IRUGO, lpfc_oas_supported_show,
+ NULL);
static char *lpfc_soft_wwn_key = "C99G71SL8032A";
+#define WWN_SZ 8
+/**
+ * lpfc_wwn_set - Convert string to the 8 byte WWN value.
+ * @buf: WWN string.
+ * @cnt: Length of string.
+ * @wwn: Array to receive converted wwn value.
+ *
+ * Returns:
+ * -EINVAL if the buffer does not contain a valid wwn
+ * 0 success
+ **/
+static size_t
+lpfc_wwn_set(const char *buf, size_t cnt, char wwn[])
+{
+ unsigned int i, j;
+
+ /* Count may include a LF at end of string */
+ if (buf[cnt-1] == '\n')
+ cnt--;
+ if ((cnt < 16) || (cnt > 18) || ((cnt == 17) && (*buf++ != 'x')) ||
+ ((cnt == 18) && ((*buf++ != '0') || (*buf++ != 'x'))))
+ return -EINVAL;
+
+ memset(wwn, 0, WWN_SZ);
+
+ /* Validate and store the new name */
+ for (i = 0, j = 0; i < 16; i++) {
+ if ((*buf >= 'a') && (*buf <= 'f'))
+ j = ((j << 4) | ((*buf++ - 'a') + 10));
+ else if ((*buf >= 'A') && (*buf <= 'F'))
+ j = ((j << 4) | ((*buf++ - 'A') + 10));
+ else if ((*buf >= '0') && (*buf <= '9'))
+ j = ((j << 4) | (*buf++ - '0'));
+ else
+ return -EINVAL;
+ if (i % 2) {
+ wwn[i/2] = j & 0xff;
+ j = 0;
+ }
+ }
+ return 0;
+}
/**
* lpfc_soft_wwn_enable_store - Allows setting of the wwn if the key is valid
* @dev: class device that is converted into a Scsi_host.
@@ -2132,9 +2197,9 @@ lpfc_soft_wwpn_store(struct device *dev, struct device_attribute *attr,
struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
struct lpfc_hba *phba = vport->phba;
struct completion online_compl;
- int stat1=0, stat2=0;
- unsigned int i, j, cnt=count;
- u8 wwpn[8];
+ int stat1 = 0, stat2 = 0;
+ unsigned int cnt = count;
+ u8 wwpn[WWN_SZ];
int rc;
if (!phba->cfg_enable_hba_reset)
@@ -2149,29 +2214,19 @@ lpfc_soft_wwpn_store(struct device *dev, struct device_attribute *attr,
if (buf[cnt-1] == '\n')
cnt--;
- if (!phba->soft_wwn_enable || (cnt < 16) || (cnt > 18) ||
- ((cnt == 17) && (*buf++ != 'x')) ||
- ((cnt == 18) && ((*buf++ != '0') || (*buf++ != 'x'))))
+ if (!phba->soft_wwn_enable)
return -EINVAL;
+ /* lock setting wwpn, wwnn down */
phba->soft_wwn_enable = 0;
- memset(wwpn, 0, sizeof(wwpn));
-
- /* Validate and store the new name */
- for (i=0, j=0; i < 16; i++) {
- int value;
-
- value = hex_to_bin(*buf++);
- if (value >= 0)
- j = (j << 4) | value;
- else
- return -EINVAL;
- if (i % 2) {
- wwpn[i/2] = j & 0xff;
- j = 0;
- }
+ rc = lpfc_wwn_set(buf, cnt, wwpn);
+ if (!rc) {
+ /* not able to set wwpn, unlock it */
+ phba->soft_wwn_enable = 1;
+ return rc;
}
+
phba->cfg_soft_wwpn = wwn_to_u64(wwpn);
fc_host_port_name(shost) = phba->cfg_soft_wwpn;
if (phba->cfg_soft_wwnn)
@@ -2198,7 +2253,7 @@ lpfc_soft_wwpn_store(struct device *dev, struct device_attribute *attr,
"reinit adapter - %d\n", stat2);
return (stat1 || stat2) ? -EIO : count;
}
-static DEVICE_ATTR(lpfc_soft_wwpn, S_IRUGO | S_IWUSR,\
+static DEVICE_ATTR(lpfc_soft_wwpn, S_IRUGO | S_IWUSR,
lpfc_soft_wwpn_show, lpfc_soft_wwpn_store);
/**
@@ -2235,39 +2290,25 @@ lpfc_soft_wwnn_store(struct device *dev, struct device_attribute *attr,
{
struct Scsi_Host *shost = class_to_shost(dev);
struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
- unsigned int i, j, cnt=count;
- u8 wwnn[8];
+ unsigned int cnt = count;
+ u8 wwnn[WWN_SZ];
+ int rc;
/* count may include a LF at end of string */
if (buf[cnt-1] == '\n')
cnt--;
- if (!phba->soft_wwn_enable || (cnt < 16) || (cnt > 18) ||
- ((cnt == 17) && (*buf++ != 'x')) ||
- ((cnt == 18) && ((*buf++ != '0') || (*buf++ != 'x'))))
+ if (!phba->soft_wwn_enable)
return -EINVAL;
- /*
- * Allow wwnn to be set many times, as long as the enable is set.
- * However, once the wwpn is set, everything locks.
- */
-
- memset(wwnn, 0, sizeof(wwnn));
-
- /* Validate and store the new name */
- for (i=0, j=0; i < 16; i++) {
- int value;
-
- value = hex_to_bin(*buf++);
- if (value >= 0)
- j = (j << 4) | value;
- else
- return -EINVAL;
- if (i % 2) {
- wwnn[i/2] = j & 0xff;
- j = 0;
- }
+ rc = lpfc_wwn_set(buf, cnt, wwnn);
+ if (!rc) {
+ /* Allow wwnn to be set many times, as long as the enable
+ * is set. However, once the wwpn is set, everything locks.
+ */
+ return rc;
}
+
phba->cfg_soft_wwnn = wwn_to_u64(wwnn);
dev_printk(KERN_NOTICE, &phba->pcidev->dev,
@@ -2276,9 +2317,438 @@ lpfc_soft_wwnn_store(struct device *dev, struct device_attribute *attr,
return count;
}
-static DEVICE_ATTR(lpfc_soft_wwnn, S_IRUGO | S_IWUSR,\
+static DEVICE_ATTR(lpfc_soft_wwnn, S_IRUGO | S_IWUSR,
lpfc_soft_wwnn_show, lpfc_soft_wwnn_store);
+/**
+ * lpfc_oas_tgt_show - Return wwpn of target whose luns maybe enabled for
+ * Optimized Access Storage (OAS) operations.
+ * @dev: class device that is converted into a Scsi_host.
+ * @attr: device attribute, not used.
+ * @buf: buffer for passing information.
+ *
+ * Returns:
+ * value of count
+ **/
+static ssize_t
+lpfc_oas_tgt_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct Scsi_Host *shost = class_to_shost(dev);
+ struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
+
+ return snprintf(buf, PAGE_SIZE, "0x%llx\n",
+ wwn_to_u64(phba->cfg_oas_tgt_wwpn));
+}
+
+/**
+ * lpfc_oas_tgt_store - Store wwpn of target whose luns maybe enabled for
+ * Optimized Access Storage (OAS) operations.
+ * @dev: class device that is converted into a Scsi_host.
+ * @attr: device attribute, not used.
+ * @buf: buffer for passing information.
+ * @count: Size of the data buffer.
+ *
+ * Returns:
+ * -EINVAL count is invalid, invalid wwpn byte invalid
+ * -EPERM oas is not supported by hba
+ * value of count on success
+ **/
+static ssize_t
+lpfc_oas_tgt_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct Scsi_Host *shost = class_to_shost(dev);
+ struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
+ unsigned int cnt = count;
+ uint8_t wwpn[WWN_SZ];
+ int rc;
+
+ if (!phba->cfg_EnableXLane)
+ return -EPERM;
+
+ /* count may include a LF at end of string */
+ if (buf[cnt-1] == '\n')
+ cnt--;
+
+ rc = lpfc_wwn_set(buf, cnt, wwpn);
+ if (rc)
+ return rc;
+
+ memcpy(phba->cfg_oas_tgt_wwpn, wwpn, (8 * sizeof(uint8_t)));
+ memcpy(phba->sli4_hba.oas_next_tgt_wwpn, wwpn, (8 * sizeof(uint8_t)));
+ if (wwn_to_u64(wwpn) == 0)
+ phba->cfg_oas_flags |= OAS_FIND_ANY_TARGET;
+ else
+ phba->cfg_oas_flags &= ~OAS_FIND_ANY_TARGET;
+ phba->cfg_oas_flags &= ~OAS_LUN_VALID;
+ phba->sli4_hba.oas_next_lun = FIND_FIRST_OAS_LUN;
+ return count;
+}
+static DEVICE_ATTR(lpfc_xlane_tgt, S_IRUGO | S_IWUSR,
+ lpfc_oas_tgt_show, lpfc_oas_tgt_store);
+
+/**
+ * lpfc_oas_vpt_show - Return wwpn of vport whose targets maybe enabled
+ * for Optimized Access Storage (OAS) operations.
+ * @dev: class device that is converted into a Scsi_host.
+ * @attr: device attribute, not used.
+ * @buf: buffer for passing information.
+ *
+ * Returns:
+ * value of count on success
+ **/
+static ssize_t
+lpfc_oas_vpt_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct Scsi_Host *shost = class_to_shost(dev);
+ struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
+
+ return snprintf(buf, PAGE_SIZE, "0x%llx\n",
+ wwn_to_u64(phba->cfg_oas_vpt_wwpn));
+}
+
+/**
+ * lpfc_oas_vpt_store - Store wwpn of vport whose targets maybe enabled
+ * for Optimized Access Storage (OAS) operations.
+ * @dev: class device that is converted into a Scsi_host.
+ * @attr: device attribute, not used.
+ * @buf: buffer for passing information.
+ * @count: Size of the data buffer.
+ *
+ * Returns:
+ * -EINVAL count is invalid, invalid wwpn byte invalid
+ * -EPERM oas is not supported by hba
+ * value of count on success
+ **/
+static ssize_t
+lpfc_oas_vpt_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct Scsi_Host *shost = class_to_shost(dev);
+ struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
+ unsigned int cnt = count;
+ uint8_t wwpn[WWN_SZ];
+ int rc;
+
+ if (!phba->cfg_EnableXLane)
+ return -EPERM;
+
+ /* count may include a LF at end of string */
+ if (buf[cnt-1] == '\n')
+ cnt--;
+
+ rc = lpfc_wwn_set(buf, cnt, wwpn);
+ if (rc)
+ return rc;
+
+ memcpy(phba->cfg_oas_vpt_wwpn, wwpn, (8 * sizeof(uint8_t)));
+ memcpy(phba->sli4_hba.oas_next_vpt_wwpn, wwpn, (8 * sizeof(uint8_t)));
+ if (wwn_to_u64(wwpn) == 0)
+ phba->cfg_oas_flags |= OAS_FIND_ANY_VPORT;
+ else
+ phba->cfg_oas_flags &= ~OAS_FIND_ANY_VPORT;
+ phba->cfg_oas_flags &= ~OAS_LUN_VALID;
+ phba->sli4_hba.oas_next_lun = FIND_FIRST_OAS_LUN;
+ return count;
+}
+static DEVICE_ATTR(lpfc_xlane_vpt, S_IRUGO | S_IWUSR,
+ lpfc_oas_vpt_show, lpfc_oas_vpt_store);
+
+/**
+ * lpfc_oas_lun_state_show - Return the current state (enabled or disabled)
+ * of whether luns will be enabled or disabled
+ * for Optimized Access Storage (OAS) operations.
+ * @dev: class device that is converted into a Scsi_host.
+ * @attr: device attribute, not used.
+ * @buf: buffer for passing information.
+ *
+ * Returns:
+ * size of formatted string.
+ **/
+static ssize_t
+lpfc_oas_lun_state_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct Scsi_Host *shost = class_to_shost(dev);
+ struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
+
+ return snprintf(buf, PAGE_SIZE, "%d\n", phba->cfg_oas_lun_state);
+}
+
+/**
+ * lpfc_oas_lun_state_store - Store the state (enabled or disabled)
+ * of whether luns will be enabled or disabled
+ * for Optimized Access Storage (OAS) operations.
+ * @dev: class device that is converted into a Scsi_host.
+ * @attr: device attribute, not used.
+ * @buf: buffer for passing information.
+ * @count: Size of the data buffer.
+ *
+ * Returns:
+ * -EINVAL count is invalid, invalid wwpn byte invalid
+ * -EPERM oas is not supported by hba
+ * value of count on success
+ **/
+static ssize_t
+lpfc_oas_lun_state_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct Scsi_Host *shost = class_to_shost(dev);
+ struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
+ int val = 0;
+
+ if (!phba->cfg_EnableXLane)
+ return -EPERM;
+
+ if (!isdigit(buf[0]))
+ return -EINVAL;
+
+ if (sscanf(buf, "%i", &val) != 1)
+ return -EINVAL;
+
+ if ((val != 0) && (val != 1))
+ return -EINVAL;
+
+ phba->cfg_oas_lun_state = val;
+
+ return strlen(buf);
+}
+static DEVICE_ATTR(lpfc_xlane_lun_state, S_IRUGO | S_IWUSR,
+ lpfc_oas_lun_state_show, lpfc_oas_lun_state_store);
+
+/**
+ * lpfc_oas_lun_status_show - Return the status of the Optimized Access
+ * Storage (OAS) lun returned by the
+ * lpfc_oas_lun_show function.
+ * @dev: class device that is converted into a Scsi_host.
+ * @attr: device attribute, not used.
+ * @buf: buffer for passing information.
+ *
+ * Returns:
+ * size of formatted string.
+ **/
+static ssize_t
+lpfc_oas_lun_status_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct Scsi_Host *shost = class_to_shost(dev);
+ struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
+
+ if (!(phba->cfg_oas_flags & OAS_LUN_VALID))
+ return -EFAULT;
+
+ return snprintf(buf, PAGE_SIZE, "%d\n", phba->cfg_oas_lun_status);
+}
+static DEVICE_ATTR(lpfc_xlane_lun_status, S_IRUGO,
+ lpfc_oas_lun_status_show, NULL);
+
+
+/**
+ * lpfc_oas_lun_state_set - enable or disable a lun for Optimized Access Storage
+ * (OAS) operations.
+ * @phba: lpfc_hba pointer.
+ * @ndlp: pointer to fcp target node.
+ * @lun: the fc lun for setting oas state.
+ * @oas_state: the oas state to be set to the lun.
+ *
+ * Returns:
+ * SUCCESS : 0
+ * -EPERM OAS is not enabled or not supported by this port.
+ *
+ */
+static size_t
+lpfc_oas_lun_state_set(struct lpfc_hba *phba, uint8_t vpt_wwpn[],
+ uint8_t tgt_wwpn[], uint64_t lun, uint32_t oas_state)
+{
+
+ int rc = 0;
+
+ if (!phba->cfg_EnableXLane)
+ return -EPERM;
+
+ if (oas_state) {
+ if (!lpfc_enable_oas_lun(phba, (struct lpfc_name *)vpt_wwpn,
+ (struct lpfc_name *)tgt_wwpn, lun))
+ rc = -ENOMEM;
+ } else {
+ lpfc_disable_oas_lun(phba, (struct lpfc_name *)vpt_wwpn,
+ (struct lpfc_name *)tgt_wwpn, lun);
+ }
+ return rc;
+
+}
+
+/**
+ * lpfc_oas_lun_get_next - get the next lun that has been enabled for Optimized
+ * Access Storage (OAS) operations.
+ * @phba: lpfc_hba pointer.
+ * @vpt_wwpn: wwpn of the vport associated with the returned lun
+ * @tgt_wwpn: wwpn of the target associated with the returned lun
+ * @lun_status: status of the lun returned lun
+ *
+ * Returns the first or next lun enabled for OAS operations for the vport/target
+ * specified. If a lun is found, its vport wwpn, target wwpn and status is
+ * returned. If the lun is not found, NOT_OAS_ENABLED_LUN is returned.
+ *
+ * Return:
+ * lun that is OAS enabled for the vport/target
+ * NOT_OAS_ENABLED_LUN when no oas enabled lun found.
+ */
+static uint64_t
+lpfc_oas_lun_get_next(struct lpfc_hba *phba, uint8_t vpt_wwpn[],
+ uint8_t tgt_wwpn[], uint32_t *lun_status)
+{
+ uint64_t found_lun;
+
+ if (unlikely(!phba) || !vpt_wwpn || !tgt_wwpn)
+ return NOT_OAS_ENABLED_LUN;
+ if (lpfc_find_next_oas_lun(phba, (struct lpfc_name *)
+ phba->sli4_hba.oas_next_vpt_wwpn,
+ (struct lpfc_name *)
+ phba->sli4_hba.oas_next_tgt_wwpn,
+ &phba->sli4_hba.oas_next_lun,
+ (struct lpfc_name *)vpt_wwpn,
+ (struct lpfc_name *)tgt_wwpn,
+ &found_lun, lun_status))
+ return found_lun;
+ else
+ return NOT_OAS_ENABLED_LUN;
+}
+
+/**
+ * lpfc_oas_lun_state_change - enable/disable a lun for OAS operations
+ * @phba: lpfc_hba pointer.
+ * @vpt_wwpn: vport wwpn by reference.
+ * @tgt_wwpn: target wwpn by reference.
+ * @lun: the fc lun for setting oas state.
+ * @oas_state: the oas state to be set to the oas_lun.
+ *
+ * This routine enables (OAS_LUN_ENABLE) or disables (OAS_LUN_DISABLE)
+ * a lun for OAS operations.
+ *
+ * Return:
+ * SUCCESS: 0
+ * -ENOMEM: failed to enable an lun for OAS operations
+ * -EPERM: OAS is not enabled
+ */
+static ssize_t
+lpfc_oas_lun_state_change(struct lpfc_hba *phba, uint8_t vpt_wwpn[],
+ uint8_t tgt_wwpn[], uint64_t lun,
+ uint32_t oas_state)
+{
+
+ int rc;
+
+ rc = lpfc_oas_lun_state_set(phba, vpt_wwpn, tgt_wwpn, lun,
+ oas_state);
+ return rc;
+}
+
+/**
+ * lpfc_oas_lun_show - Return oas enabled luns from a chosen target
+ * @dev: class device that is converted into a Scsi_host.
+ * @attr: device attribute, not used.
+ * @buf: buffer for passing information.
+ *
+ * This routine returns a lun enabled for OAS each time the function
+ * is called.
+ *
+ * Returns:
+ * SUCCESS: size of formatted string.
+ * -EFAULT: target or vport wwpn was not set properly.
+ * -EPERM: oas is not enabled.
+ **/
+static ssize_t
+lpfc_oas_lun_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct Scsi_Host *shost = class_to_shost(dev);
+ struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
+
+ uint64_t oas_lun;
+ int len = 0;
+
+ if (!phba->cfg_EnableXLane)
+ return -EPERM;
+
+ if (wwn_to_u64(phba->cfg_oas_vpt_wwpn) == 0)
+ if (!(phba->cfg_oas_flags & OAS_FIND_ANY_VPORT))
+ return -EFAULT;
+
+ if (wwn_to_u64(phba->cfg_oas_tgt_wwpn) == 0)
+ if (!(phba->cfg_oas_flags & OAS_FIND_ANY_TARGET))
+ return -EFAULT;
+
+ oas_lun = lpfc_oas_lun_get_next(phba, phba->cfg_oas_vpt_wwpn,
+ phba->cfg_oas_tgt_wwpn,
+ &phba->cfg_oas_lun_status);
+ if (oas_lun != NOT_OAS_ENABLED_LUN)
+ phba->cfg_oas_flags |= OAS_LUN_VALID;
+
+ len += snprintf(buf + len, PAGE_SIZE-len, "0x%llx", oas_lun);
+
+ return len;
+}
+
+/**
+ * lpfc_oas_lun_store - Sets the OAS state for lun
+ * @dev: class device that is converted into a Scsi_host.
+ * @attr: device attribute, not used.
+ * @buf: buffer for passing information.
+ *
+ * This function sets the OAS state for lun. Before this function is called,
+ * the vport wwpn, target wwpn, and oas state need to be set.
+ *
+ * Returns:
+ * SUCCESS: size of formatted string.
+ * -EFAULT: target or vport wwpn was not set properly.
+ * -EPERM: oas is not enabled.
+ * size of formatted string.
+ **/
+static ssize_t
+lpfc_oas_lun_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct Scsi_Host *shost = class_to_shost(dev);
+ struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
+ uint64_t scsi_lun;
+ ssize_t rc;
+
+ if (!phba->cfg_EnableXLane)
+ return -EPERM;
+
+ if (wwn_to_u64(phba->cfg_oas_vpt_wwpn) == 0)
+ return -EFAULT;
+
+ if (wwn_to_u64(phba->cfg_oas_tgt_wwpn) == 0)
+ return -EFAULT;
+
+ if (!isdigit(buf[0]))
+ return -EINVAL;
+
+ if (sscanf(buf, "0x%llx", &scsi_lun) != 1)
+ return -EINVAL;
+
+ lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
+ "3372 Try to set vport 0x%llx target 0x%llx lun:%lld "
+ "with oas set to %d\n",
+ wwn_to_u64(phba->cfg_oas_vpt_wwpn),
+ wwn_to_u64(phba->cfg_oas_tgt_wwpn), scsi_lun,
+ phba->cfg_oas_lun_state);
+
+ rc = lpfc_oas_lun_state_change(phba, phba->cfg_oas_vpt_wwpn,
+ phba->cfg_oas_tgt_wwpn, scsi_lun,
+ phba->cfg_oas_lun_state);
+
+ if (rc)
+ return rc;
+
+ return count;
+}
+static DEVICE_ATTR(lpfc_xlane_lun, S_IRUGO | S_IWUSR,
+ lpfc_oas_lun_show, lpfc_oas_lun_store);
static int lpfc_poll = 0;
module_param(lpfc_poll, int, S_IRUGO);
@@ -3818,7 +4288,7 @@ lpfc_fcp_cpu_map_show(struct device *dev, struct device_attribute *attr,
struct lpfc_vport *vport = (struct lpfc_vport *)shost->hostdata;
struct lpfc_hba *phba = vport->phba;
struct lpfc_vector_map_info *cpup;
- int idx, len = 0;
+ int len = 0;
if ((phba->sli_rev != LPFC_SLI_REV4) ||
(phba->intr_type != MSIX))
@@ -3846,23 +4316,39 @@ lpfc_fcp_cpu_map_show(struct device *dev, struct device_attribute *attr,
break;
}
- cpup = phba->sli4_hba.cpu_map;
- for (idx = 0; idx < phba->sli4_hba.num_present_cpu; idx++) {
+ while (phba->sli4_hba.curr_disp_cpu < phba->sli4_hba.num_present_cpu) {
+ cpup = &phba->sli4_hba.cpu_map[phba->sli4_hba.curr_disp_cpu];
+
+ /* margin should fit in this and the truncated message */
if (cpup->irq == LPFC_VECTOR_MAP_EMPTY)
len += snprintf(buf + len, PAGE_SIZE-len,
"CPU %02d io_chan %02d "
"physid %d coreid %d\n",
- idx, cpup->channel_id, cpup->phys_id,
+ phba->sli4_hba.curr_disp_cpu,
+ cpup->channel_id, cpup->phys_id,
cpup->core_id);
else
len += snprintf(buf + len, PAGE_SIZE-len,
"CPU %02d io_chan %02d "
"physid %d coreid %d IRQ %d\n",
- idx, cpup->channel_id, cpup->phys_id,
+ phba->sli4_hba.curr_disp_cpu,
+ cpup->channel_id, cpup->phys_id,
cpup->core_id, cpup->irq);
- cpup++;
+ phba->sli4_hba.curr_disp_cpu++;
+
+ /* display max number of CPUs keeping some margin */
+ if (phba->sli4_hba.curr_disp_cpu <
+ phba->sli4_hba.num_present_cpu &&
+ (len >= (PAGE_SIZE - 64))) {
+ len += snprintf(buf + len, PAGE_SIZE-len, "more...\n");
+ break;
+ }
}
+
+ if (phba->sli4_hba.curr_disp_cpu == phba->sli4_hba.num_present_cpu)
+ phba->sli4_hba.curr_disp_cpu = 0;
+
return len;
}
@@ -4157,6 +4643,21 @@ LPFC_ATTR_R(enable_hba_reset, 1, 0, 1, "Enable HBA resets from the driver.");
LPFC_ATTR_R(enable_hba_heartbeat, 0, 0, 1, "Enable HBA Heartbeat.");
/*
+# lpfc_EnableXLane: Enable Express Lane Feature
+# 0x0 Express Lane Feature disabled
+# 0x1 Express Lane Feature enabled
+# Value range is [0,1]. Default value is 0.
+*/
+LPFC_ATTR_R(EnableXLane, 0, 0, 1, "Enable Express Lane Feature.");
+
+/*
+# lpfc_XLanePriority: Define CS_CTL priority for Express Lane Feature
+# 0x0 - 0x7f = CS_CTL field in FC header (high 7 bits)
+# Value range is [0x0,0x7f]. Default value is 0
+*/
+LPFC_ATTR_R(XLanePriority, 0, 0x0, 0x7f, "CS_CTL for Express Lane Feature.");
+
+/*
# lpfc_enable_bg: Enable BlockGuard (Emulex's Implementation of T10-DIF)
# 0 = BlockGuard disabled (default)
# 1 = BlockGuard enabled
@@ -4317,6 +4818,13 @@ struct device_attribute *lpfc_hba_attrs[] = {
&dev_attr_lpfc_soft_wwn_enable,
&dev_attr_lpfc_enable_hba_reset,
&dev_attr_lpfc_enable_hba_heartbeat,
+ &dev_attr_lpfc_EnableXLane,
+ &dev_attr_lpfc_XLanePriority,
+ &dev_attr_lpfc_xlane_lun,
+ &dev_attr_lpfc_xlane_tgt,
+ &dev_attr_lpfc_xlane_vpt,
+ &dev_attr_lpfc_xlane_lun_state,
+ &dev_attr_lpfc_xlane_lun_status,
&dev_attr_lpfc_sg_seg_cnt,
&dev_attr_lpfc_max_scsicmpl_time,
&dev_attr_lpfc_stat_data_ctrl,
@@ -4335,6 +4843,7 @@ struct device_attribute *lpfc_hba_attrs[] = {
&dev_attr_lpfc_dss,
&dev_attr_lpfc_sriov_hw_max_virtfn,
&dev_attr_protocol,
+ &dev_attr_lpfc_xlane_supported,
NULL,
};
@@ -5296,11 +5805,20 @@ lpfc_get_cfgparam(struct lpfc_hba *phba)
lpfc_fcp_io_channel_init(phba, lpfc_fcp_io_channel);
lpfc_enable_hba_reset_init(phba, lpfc_enable_hba_reset);
lpfc_enable_hba_heartbeat_init(phba, lpfc_enable_hba_heartbeat);
+ lpfc_EnableXLane_init(phba, lpfc_EnableXLane);
+ if (phba->sli_rev != LPFC_SLI_REV4)
+ phba->cfg_EnableXLane = 0;
+ lpfc_XLanePriority_init(phba, lpfc_XLanePriority);
+ memset(phba->cfg_oas_tgt_wwpn, 0, (8 * sizeof(uint8_t)));
+ memset(phba->cfg_oas_vpt_wwpn, 0, (8 * sizeof(uint8_t)));
+ phba->cfg_oas_lun_state = 0;
+ phba->cfg_oas_lun_status = 0;
+ phba->cfg_oas_flags = 0;
lpfc_enable_bg_init(phba, lpfc_enable_bg);
if (phba->sli_rev == LPFC_SLI_REV4)
phba->cfg_poll = 0;
else
- phba->cfg_poll = lpfc_poll;
+ phba->cfg_poll = lpfc_poll;
phba->cfg_soft_wwnn = 0L;
phba->cfg_soft_wwpn = 0L;
lpfc_sg_seg_cnt_init(phba, lpfc_sg_seg_cnt);
diff --git a/drivers/scsi/lpfc/lpfc_bsg.c b/drivers/scsi/lpfc/lpfc_bsg.c
index 82134d20e2d8..ca2f4ea7cdef 100644
--- a/drivers/scsi/lpfc/lpfc_bsg.c
+++ b/drivers/scsi/lpfc/lpfc_bsg.c
@@ -4153,6 +4153,7 @@ lpfc_bsg_handle_sli_cfg_mbox(struct lpfc_hba *phba, struct fc_bsg_job *job,
if (subsys == SLI_CONFIG_SUBSYS_FCOE) {
switch (opcode) {
case FCOE_OPCODE_READ_FCF:
+ case FCOE_OPCODE_GET_DPORT_RESULTS:
lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
"2957 Handled SLI_CONFIG "
"subsys_fcoe, opcode:x%x\n",
@@ -4161,6 +4162,8 @@ lpfc_bsg_handle_sli_cfg_mbox(struct lpfc_hba *phba, struct fc_bsg_job *job,
nemb_mse, dmabuf);
break;
case FCOE_OPCODE_ADD_FCF:
+ case FCOE_OPCODE_SET_DPORT_MODE:
+ case LPFC_MBOX_OPCODE_FCOE_LINK_DIAG_STATE:
lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
"2958 Handled SLI_CONFIG "
"subsys_fcoe, opcode:x%x\n",
diff --git a/drivers/scsi/lpfc/lpfc_bsg.h b/drivers/scsi/lpfc/lpfc_bsg.h
index 67f7d0a160d1..a94d4c9dfaa5 100644
--- a/drivers/scsi/lpfc/lpfc_bsg.h
+++ b/drivers/scsi/lpfc/lpfc_bsg.h
@@ -231,6 +231,8 @@ struct lpfc_sli_config_emb0_subsys {
#define SLI_CONFIG_SUBSYS_FCOE 0x0C
#define FCOE_OPCODE_READ_FCF 0x08
#define FCOE_OPCODE_ADD_FCF 0x09
+#define FCOE_OPCODE_SET_DPORT_MODE 0x27
+#define FCOE_OPCODE_GET_DPORT_RESULTS 0x28
};
struct lpfc_sli_config_emb1_subsys {
diff --git a/drivers/scsi/lpfc/lpfc_crtn.h b/drivers/scsi/lpfc/lpfc_crtn.h
index cda076a84239..adda0bf7a244 100644
--- a/drivers/scsi/lpfc/lpfc_crtn.h
+++ b/drivers/scsi/lpfc/lpfc_crtn.h
@@ -187,6 +187,11 @@ void lpfc_offline_prep(struct lpfc_hba *, int);
void lpfc_offline(struct lpfc_hba *);
void lpfc_reset_hba(struct lpfc_hba *);
+int lpfc_fof_queue_create(struct lpfc_hba *);
+int lpfc_fof_queue_setup(struct lpfc_hba *);
+int lpfc_fof_queue_destroy(struct lpfc_hba *);
+irqreturn_t lpfc_sli4_fof_intr_handler(int, void *);
+
int lpfc_sli_setup(struct lpfc_hba *);
int lpfc_sli_queue_setup(struct lpfc_hba *);
@@ -242,6 +247,7 @@ int lpfc_sli4_fcf_rr_next_proc(struct lpfc_vport *, uint16_t);
void lpfc_sli4_clear_fcf_rr_bmask(struct lpfc_hba *);
int lpfc_mem_alloc(struct lpfc_hba *, int align);
+int lpfc_mem_alloc_active_rrq_pool_s4(struct lpfc_hba *);
void lpfc_mem_free(struct lpfc_hba *);
void lpfc_mem_free_all(struct lpfc_hba *);
void lpfc_stop_vport_timers(struct lpfc_vport *);
@@ -399,7 +405,6 @@ void lpfc_fabric_block_timeout(unsigned long);
void lpfc_unblock_fabric_iocbs(struct lpfc_hba *);
void lpfc_rampdown_queue_depth(struct lpfc_hba *);
void lpfc_ramp_down_queue_handler(struct lpfc_hba *);
-void lpfc_ramp_up_queue_handler(struct lpfc_hba *);
void lpfc_scsi_dev_block(struct lpfc_hba *);
void
@@ -471,3 +476,20 @@ void lpfc_free_sgl_list(struct lpfc_hba *, struct list_head *);
uint32_t lpfc_sli_port_speed_get(struct lpfc_hba *);
int lpfc_sli4_request_firmware_update(struct lpfc_hba *, uint8_t);
void lpfc_sli4_offline_eratt(struct lpfc_hba *);
+
+struct lpfc_device_data *lpfc_create_device_data(struct lpfc_hba *,
+ struct lpfc_name *,
+ struct lpfc_name *,
+ uint64_t, bool);
+void lpfc_delete_device_data(struct lpfc_hba *, struct lpfc_device_data*);
+struct lpfc_device_data *__lpfc_get_device_data(struct lpfc_hba *,
+ struct list_head *list,
+ struct lpfc_name *,
+ struct lpfc_name *, uint64_t);
+bool lpfc_enable_oas_lun(struct lpfc_hba *, struct lpfc_name *,
+ struct lpfc_name *, uint64_t);
+bool lpfc_disable_oas_lun(struct lpfc_hba *, struct lpfc_name *,
+ struct lpfc_name *, uint64_t);
+bool lpfc_find_next_oas_lun(struct lpfc_hba *, struct lpfc_name *,
+ struct lpfc_name *, uint64_t *, struct lpfc_name *,
+ struct lpfc_name *, uint64_t *, uint32_t *);
diff --git a/drivers/scsi/lpfc/lpfc_debugfs.c b/drivers/scsi/lpfc/lpfc_debugfs.c
index b800cc952ca6..828c08e9389e 100644
--- a/drivers/scsi/lpfc/lpfc_debugfs.c
+++ b/drivers/scsi/lpfc/lpfc_debugfs.c
@@ -2280,6 +2280,104 @@ proc_cq:
}
}
+ if (phba->cfg_fof) {
+ /* FOF EQ */
+ qp = phba->sli4_hba.fof_eq;
+ if (!qp)
+ goto out;
+
+ len += snprintf(pbuffer+len,
+ LPFC_QUE_INFO_GET_BUF_SIZE-len,
+ "\nFOF EQ info: "
+ "EQ-STAT[max:x%x noE:x%x "
+ "bs:x%x proc:x%llx]\n",
+ qp->q_cnt_1, qp->q_cnt_2,
+ qp->q_cnt_3, (unsigned long long)qp->q_cnt_4);
+
+ len += snprintf(pbuffer+len,
+ LPFC_QUE_INFO_GET_BUF_SIZE-len,
+ "EQID[%02d], "
+ "QE-CNT[%04d], QE-SIZE[%04d], "
+ "HOST-IDX[%04d], PORT-IDX[%04d]",
+ qp->queue_id,
+ qp->entry_count,
+ qp->entry_size,
+ qp->host_index,
+ qp->hba_index);
+
+ /* Reset max counter */
+ qp->EQ_max_eqe = 0;
+
+ len += snprintf(pbuffer+len,
+ LPFC_QUE_INFO_GET_BUF_SIZE-len, "\n");
+ if (len >= max_cnt)
+ goto too_big;
+ }
+
+ if (phba->cfg_EnableXLane) {
+
+ /* OAS CQ */
+ qp = phba->sli4_hba.oas_cq;
+ if (qp) {
+ len += snprintf(pbuffer+len,
+ LPFC_QUE_INFO_GET_BUF_SIZE-len,
+ "\tOAS CQ info: ");
+ len += snprintf(pbuffer+len,
+ LPFC_QUE_INFO_GET_BUF_SIZE-len,
+ "AssocEQID[%02d]: "
+ "CQ STAT[max:x%x relw:x%x "
+ "xabt:x%x wq:x%llx]\n",
+ qp->assoc_qid,
+ qp->q_cnt_1, qp->q_cnt_2,
+ qp->q_cnt_3, (unsigned long long)qp->q_cnt_4);
+ len += snprintf(pbuffer+len,
+ LPFC_QUE_INFO_GET_BUF_SIZE-len,
+ "\tCQID[%02d], "
+ "QE-CNT[%04d], QE-SIZE[%04d], "
+ "HOST-IDX[%04d], PORT-IDX[%04d]",
+ qp->queue_id, qp->entry_count,
+ qp->entry_size, qp->host_index,
+ qp->hba_index);
+
+ /* Reset max counter */
+ qp->CQ_max_cqe = 0;
+
+ len += snprintf(pbuffer+len,
+ LPFC_QUE_INFO_GET_BUF_SIZE-len, "\n");
+ if (len >= max_cnt)
+ goto too_big;
+ }
+
+ /* OAS WQ */
+ qp = phba->sli4_hba.oas_wq;
+ if (qp) {
+ len += snprintf(pbuffer+len,
+ LPFC_QUE_INFO_GET_BUF_SIZE-len,
+ "\t\tOAS WQ info: ");
+ len += snprintf(pbuffer+len,
+ LPFC_QUE_INFO_GET_BUF_SIZE-len,
+ "AssocCQID[%02d]: "
+ "WQ-STAT[oflow:x%x posted:x%llx]\n",
+ qp->assoc_qid,
+ qp->q_cnt_1, (unsigned long long)qp->q_cnt_4);
+ len += snprintf(pbuffer+len,
+ LPFC_QUE_INFO_GET_BUF_SIZE-len,
+ "\t\tWQID[%02d], "
+ "QE-CNT[%04d], QE-SIZE[%04d], "
+ "HOST-IDX[%04d], PORT-IDX[%04d]",
+ qp->queue_id,
+ qp->entry_count,
+ qp->entry_size,
+ qp->host_index,
+ qp->hba_index);
+
+ len += snprintf(pbuffer+len,
+ LPFC_QUE_INFO_GET_BUF_SIZE-len, "\n");
+ if (len >= max_cnt)
+ goto too_big;
+ }
+ }
+out:
spin_unlock_irq(&phba->hbalock);
return simple_read_from_buffer(buf, nbytes, ppos, pbuffer, len);
@@ -3927,6 +4025,7 @@ lpfc_debugfs_initialize(struct lpfc_vport *vport)
struct lpfc_hba *phba = vport->phba;
char name[64];
uint32_t num, i;
+ bool pport_setup = false;
if (!lpfc_debugfs_enable)
return;
@@ -3947,6 +4046,7 @@ lpfc_debugfs_initialize(struct lpfc_vport *vport)
/* Setup funcX directory for specific HBA PCI function */
snprintf(name, sizeof(name), "fn%d", phba->brd_no);
if (!phba->hba_debugfs_root) {
+ pport_setup = true;
phba->hba_debugfs_root =
debugfs_create_dir(name, lpfc_debugfs_root);
if (!phba->hba_debugfs_root) {
@@ -4239,6 +4339,14 @@ lpfc_debugfs_initialize(struct lpfc_vport *vport)
}
/*
+ * The following section is for additional directories/files for the
+ * physical port.
+ */
+
+ if (!pport_setup)
+ goto debug_failed;
+
+ /*
* iDiag debugfs root entry points for SLI4 device only
*/
if (phba->sli_rev < LPFC_SLI_REV4)
diff --git a/drivers/scsi/lpfc/lpfc_disc.h b/drivers/scsi/lpfc/lpfc_disc.h
index e409ba5f728c..1a6fe524940d 100644
--- a/drivers/scsi/lpfc/lpfc_disc.h
+++ b/drivers/scsi/lpfc/lpfc_disc.h
@@ -116,7 +116,7 @@ struct lpfc_nodelist {
atomic_t cmd_pending;
uint32_t cmd_qdepth;
unsigned long last_change_time;
- struct lpfc_node_rrqs active_rrqs;
+ unsigned long *active_rrqs_xri_bitmap;
struct lpfc_scsicmd_bkt *lat_data; /* Latency data */
};
struct lpfc_node_rrq {
diff --git a/drivers/scsi/lpfc/lpfc_els.c b/drivers/scsi/lpfc/lpfc_els.c
index 110445f0c58d..624fe0b3cc0b 100644
--- a/drivers/scsi/lpfc/lpfc_els.c
+++ b/drivers/scsi/lpfc/lpfc_els.c
@@ -1516,7 +1516,7 @@ lpfc_plogi_confirm_nport(struct lpfc_hba *phba, uint32_t *prsp,
uint32_t rc, keepDID = 0;
int put_node;
int put_rport;
- struct lpfc_node_rrqs rrq;
+ unsigned long *active_rrqs_xri_bitmap = NULL;
/* Fabric nodes can have the same WWPN so we don't bother searching
* by WWPN. Just return the ndlp that was given to us.
@@ -1534,7 +1534,13 @@ lpfc_plogi_confirm_nport(struct lpfc_hba *phba, uint32_t *prsp,
if (new_ndlp == ndlp && NLP_CHK_NODE_ACT(new_ndlp))
return ndlp;
- memset(&rrq.xri_bitmap, 0, sizeof(new_ndlp->active_rrqs.xri_bitmap));
+ if (phba->sli_rev == LPFC_SLI_REV4) {
+ active_rrqs_xri_bitmap = mempool_alloc(phba->active_rrq_pool,
+ GFP_KERNEL);
+ if (active_rrqs_xri_bitmap)
+ memset(active_rrqs_xri_bitmap, 0,
+ phba->cfg_rrq_xri_bitmap_sz);
+ }
lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
"3178 PLOGI confirm: ndlp %p x%x: new_ndlp %p\n",
@@ -1543,41 +1549,58 @@ lpfc_plogi_confirm_nport(struct lpfc_hba *phba, uint32_t *prsp,
if (!new_ndlp) {
rc = memcmp(&ndlp->nlp_portname, name,
sizeof(struct lpfc_name));
- if (!rc)
+ if (!rc) {
+ if (active_rrqs_xri_bitmap)
+ mempool_free(active_rrqs_xri_bitmap,
+ phba->active_rrq_pool);
return ndlp;
+ }
new_ndlp = mempool_alloc(phba->nlp_mem_pool, GFP_ATOMIC);
- if (!new_ndlp)
+ if (!new_ndlp) {
+ if (active_rrqs_xri_bitmap)
+ mempool_free(active_rrqs_xri_bitmap,
+ phba->active_rrq_pool);
return ndlp;
+ }
lpfc_nlp_init(vport, new_ndlp, ndlp->nlp_DID);
} else if (!NLP_CHK_NODE_ACT(new_ndlp)) {
rc = memcmp(&ndlp->nlp_portname, name,
sizeof(struct lpfc_name));
- if (!rc)
+ if (!rc) {
+ if (active_rrqs_xri_bitmap)
+ mempool_free(active_rrqs_xri_bitmap,
+ phba->active_rrq_pool);
return ndlp;
+ }
new_ndlp = lpfc_enable_node(vport, new_ndlp,
NLP_STE_UNUSED_NODE);
- if (!new_ndlp)
+ if (!new_ndlp) {
+ if (active_rrqs_xri_bitmap)
+ mempool_free(active_rrqs_xri_bitmap,
+ phba->active_rrq_pool);
return ndlp;
+ }
keepDID = new_ndlp->nlp_DID;
- if (phba->sli_rev == LPFC_SLI_REV4)
- memcpy(&rrq.xri_bitmap,
- &new_ndlp->active_rrqs.xri_bitmap,
- sizeof(new_ndlp->active_rrqs.xri_bitmap));
+ if ((phba->sli_rev == LPFC_SLI_REV4) && active_rrqs_xri_bitmap)
+ memcpy(active_rrqs_xri_bitmap,
+ new_ndlp->active_rrqs_xri_bitmap,
+ phba->cfg_rrq_xri_bitmap_sz);
} else {
keepDID = new_ndlp->nlp_DID;
- if (phba->sli_rev == LPFC_SLI_REV4)
- memcpy(&rrq.xri_bitmap,
- &new_ndlp->active_rrqs.xri_bitmap,
- sizeof(new_ndlp->active_rrqs.xri_bitmap));
+ if (phba->sli_rev == LPFC_SLI_REV4 &&
+ active_rrqs_xri_bitmap)
+ memcpy(active_rrqs_xri_bitmap,
+ new_ndlp->active_rrqs_xri_bitmap,
+ phba->cfg_rrq_xri_bitmap_sz);
}
lpfc_unreg_rpi(vport, new_ndlp);
new_ndlp->nlp_DID = ndlp->nlp_DID;
new_ndlp->nlp_prev_state = ndlp->nlp_prev_state;
if (phba->sli_rev == LPFC_SLI_REV4)
- memcpy(new_ndlp->active_rrqs.xri_bitmap,
- &ndlp->active_rrqs.xri_bitmap,
- sizeof(ndlp->active_rrqs.xri_bitmap));
+ memcpy(new_ndlp->active_rrqs_xri_bitmap,
+ ndlp->active_rrqs_xri_bitmap,
+ phba->cfg_rrq_xri_bitmap_sz);
if (ndlp->nlp_flag & NLP_NPR_2B_DISC)
new_ndlp->nlp_flag |= NLP_NPR_2B_DISC;
@@ -1619,10 +1642,11 @@ lpfc_plogi_confirm_nport(struct lpfc_hba *phba, uint32_t *prsp,
/* Two ndlps cannot have the same did on the nodelist */
ndlp->nlp_DID = keepDID;
- if (phba->sli_rev == LPFC_SLI_REV4)
- memcpy(&ndlp->active_rrqs.xri_bitmap,
- &rrq.xri_bitmap,
- sizeof(ndlp->active_rrqs.xri_bitmap));
+ if (phba->sli_rev == LPFC_SLI_REV4 &&
+ active_rrqs_xri_bitmap)
+ memcpy(ndlp->active_rrqs_xri_bitmap,
+ active_rrqs_xri_bitmap,
+ phba->cfg_rrq_xri_bitmap_sz);
lpfc_drop_node(vport, ndlp);
}
else {
@@ -1634,10 +1658,11 @@ lpfc_plogi_confirm_nport(struct lpfc_hba *phba, uint32_t *prsp,
/* Two ndlps cannot have the same did */
ndlp->nlp_DID = keepDID;
- if (phba->sli_rev == LPFC_SLI_REV4)
- memcpy(&ndlp->active_rrqs.xri_bitmap,
- &rrq.xri_bitmap,
- sizeof(ndlp->active_rrqs.xri_bitmap));
+ if (phba->sli_rev == LPFC_SLI_REV4 &&
+ active_rrqs_xri_bitmap)
+ memcpy(ndlp->active_rrqs_xri_bitmap,
+ active_rrqs_xri_bitmap,
+ phba->cfg_rrq_xri_bitmap_sz);
/* Since we are swapping the ndlp passed in with the new one
* and the did has already been swapped, copy over state.
@@ -1668,6 +1693,10 @@ lpfc_plogi_confirm_nport(struct lpfc_hba *phba, uint32_t *prsp,
put_device(&rport->dev);
}
}
+ if (phba->sli_rev == LPFC_SLI_REV4 &&
+ active_rrqs_xri_bitmap)
+ mempool_free(active_rrqs_xri_bitmap,
+ phba->active_rrq_pool);
return new_ndlp;
}
@@ -2772,6 +2801,7 @@ lpfc_issue_els_scr(struct lpfc_vport *vport, uint32_t nportid, uint8_t retry)
/* This will cause the callback-function lpfc_cmpl_els_cmd to
* trigger the release of node.
*/
+
lpfc_nlp_put(ndlp);
return 0;
}
@@ -6193,11 +6223,11 @@ lpfc_els_timeout(unsigned long ptr)
spin_lock_irqsave(&vport->work_port_lock, iflag);
tmo_posted = vport->work_port_events & WORKER_ELS_TMO;
- if (!tmo_posted)
+ if ((!tmo_posted) && (!(vport->load_flag & FC_UNLOADING)))
vport->work_port_events |= WORKER_ELS_TMO;
spin_unlock_irqrestore(&vport->work_port_lock, iflag);
- if (!tmo_posted)
+ if ((!tmo_posted) && (!(vport->load_flag & FC_UNLOADING)))
lpfc_worker_wake_up(phba);
return;
}
@@ -6223,19 +6253,26 @@ lpfc_els_timeout_handler(struct lpfc_vport *vport)
uint32_t els_command = 0;
uint32_t timeout;
uint32_t remote_ID = 0xffffffff;
- LIST_HEAD(txcmplq_completions);
LIST_HEAD(abort_list);
timeout = (uint32_t)(phba->fc_ratov << 1);
pring = &phba->sli.ring[LPFC_ELS_RING];
-
+ if ((phba->pport->load_flag & FC_UNLOADING))
+ return;
spin_lock_irq(&phba->hbalock);
- list_splice_init(&pring->txcmplq, &txcmplq_completions);
- spin_unlock_irq(&phba->hbalock);
+ if (phba->sli_rev == LPFC_SLI_REV4)
+ spin_lock(&pring->ring_lock);
+
+ if ((phba->pport->load_flag & FC_UNLOADING)) {
+ if (phba->sli_rev == LPFC_SLI_REV4)
+ spin_unlock(&pring->ring_lock);
+ spin_unlock_irq(&phba->hbalock);
+ return;
+ }
- list_for_each_entry_safe(piocb, tmp_iocb, &txcmplq_completions, list) {
+ list_for_each_entry_safe(piocb, tmp_iocb, &pring->txcmplq, list) {
cmd = &piocb->iocb;
if ((piocb->iocb_flag & LPFC_IO_LIBDFC) != 0 ||
@@ -6274,11 +6311,12 @@ lpfc_els_timeout_handler(struct lpfc_vport *vport)
}
list_add_tail(&piocb->dlist, &abort_list);
}
- spin_lock_irq(&phba->hbalock);
- list_splice(&txcmplq_completions, &pring->txcmplq);
+ if (phba->sli_rev == LPFC_SLI_REV4)
+ spin_unlock(&pring->ring_lock);
spin_unlock_irq(&phba->hbalock);
list_for_each_entry_safe(piocb, tmp_iocb, &abort_list, dlist) {
+ cmd = &piocb->iocb;
lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
"0127 ELS timeout Data: x%x x%x x%x "
"x%x\n", els_command,
@@ -6290,8 +6328,9 @@ lpfc_els_timeout_handler(struct lpfc_vport *vport)
}
if (!list_empty(&phba->sli.ring[LPFC_ELS_RING].txcmplq))
- mod_timer(&vport->els_tmofunc,
- jiffies + msecs_to_jiffies(1000 * timeout));
+ if (!(phba->pport->load_flag & FC_UNLOADING))
+ mod_timer(&vport->els_tmofunc,
+ jiffies + msecs_to_jiffies(1000 * timeout));
}
/**
@@ -6317,15 +6356,50 @@ lpfc_els_timeout_handler(struct lpfc_vport *vport)
void
lpfc_els_flush_cmd(struct lpfc_vport *vport)
{
- LIST_HEAD(completions);
+ LIST_HEAD(abort_list);
struct lpfc_hba *phba = vport->phba;
struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_ELS_RING];
struct lpfc_iocbq *tmp_iocb, *piocb;
IOCB_t *cmd = NULL;
lpfc_fabric_abort_vport(vport);
+ /*
+ * For SLI3, only the hbalock is required. But SLI4 needs to coordinate
+ * with the ring insert operation. Because lpfc_sli_issue_abort_iotag
+ * ultimately grabs the ring_lock, the driver must splice the list into
+ * a working list and release the locks before calling the abort.
+ */
+ spin_lock_irq(&phba->hbalock);
+ if (phba->sli_rev == LPFC_SLI_REV4)
+ spin_lock(&pring->ring_lock);
+
+ list_for_each_entry_safe(piocb, tmp_iocb, &pring->txcmplq, list) {
+ if (piocb->iocb_flag & LPFC_IO_LIBDFC)
+ continue;
+
+ if (piocb->vport != vport)
+ continue;
+ list_add_tail(&piocb->dlist, &abort_list);
+ }
+ if (phba->sli_rev == LPFC_SLI_REV4)
+ spin_unlock(&pring->ring_lock);
+ spin_unlock_irq(&phba->hbalock);
+ /* Abort each iocb on the aborted list and remove the dlist links. */
+ list_for_each_entry_safe(piocb, tmp_iocb, &abort_list, dlist) {
+ spin_lock_irq(&phba->hbalock);
+ list_del_init(&piocb->dlist);
+ lpfc_sli_issue_abort_iotag(phba, pring, piocb);
+ spin_unlock_irq(&phba->hbalock);
+ }
+ if (!list_empty(&abort_list))
+ lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
+ "3387 abort list for txq not empty\n");
+ INIT_LIST_HEAD(&abort_list);
spin_lock_irq(&phba->hbalock);
+ if (phba->sli_rev == LPFC_SLI_REV4)
+ spin_lock(&pring->ring_lock);
+
list_for_each_entry_safe(piocb, tmp_iocb, &pring->txq, list) {
cmd = &piocb->iocb;
@@ -6343,24 +6417,16 @@ lpfc_els_flush_cmd(struct lpfc_vport *vport)
if (piocb->vport != vport)
continue;
- list_move_tail(&piocb->list, &completions);
- }
-
- list_for_each_entry_safe(piocb, tmp_iocb, &pring->txcmplq, list) {
- if (piocb->iocb_flag & LPFC_IO_LIBDFC) {
- continue;
- }
-
- if (piocb->vport != vport)
- continue;
-
- lpfc_sli_issue_abort_iotag(phba, pring, piocb);
+ list_del_init(&piocb->list);
+ list_add_tail(&piocb->list, &abort_list);
}
+ if (phba->sli_rev == LPFC_SLI_REV4)
+ spin_unlock(&pring->ring_lock);
spin_unlock_irq(&phba->hbalock);
/* Cancell all the IOCBs from the completions list */
- lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT,
- IOERR_SLI_ABORTED);
+ lpfc_sli_cancel_iocbs(phba, &abort_list,
+ IOSTAT_LOCAL_REJECT, IOERR_SLI_ABORTED);
return;
}
@@ -6385,35 +6451,9 @@ lpfc_els_flush_cmd(struct lpfc_vport *vport)
void
lpfc_els_flush_all_cmd(struct lpfc_hba *phba)
{
- LIST_HEAD(completions);
- struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_ELS_RING];
- struct lpfc_iocbq *tmp_iocb, *piocb;
- IOCB_t *cmd = NULL;
-
- lpfc_fabric_abort_hba(phba);
- spin_lock_irq(&phba->hbalock);
- list_for_each_entry_safe(piocb, tmp_iocb, &pring->txq, list) {
- cmd = &piocb->iocb;
- if (piocb->iocb_flag & LPFC_IO_LIBDFC)
- continue;
- /* Do not flush out the QUE_RING and ABORT/CLOSE iocbs */
- if (cmd->ulpCommand == CMD_QUE_RING_BUF_CN ||
- cmd->ulpCommand == CMD_QUE_RING_BUF64_CN ||
- cmd->ulpCommand == CMD_CLOSE_XRI_CN ||
- cmd->ulpCommand == CMD_ABORT_XRI_CN)
- continue;
- list_move_tail(&piocb->list, &completions);
- }
- list_for_each_entry_safe(piocb, tmp_iocb, &pring->txcmplq, list) {
- if (piocb->iocb_flag & LPFC_IO_LIBDFC)
- continue;
- lpfc_sli_issue_abort_iotag(phba, pring, piocb);
- }
- spin_unlock_irq(&phba->hbalock);
-
- /* Cancel all the IOCBs from the completions list */
- lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT,
- IOERR_SLI_ABORTED);
+ struct lpfc_vport *vport;
+ list_for_each_entry(vport, &phba->port_list, listentry)
+ lpfc_els_flush_cmd(vport);
return;
}
diff --git a/drivers/scsi/lpfc/lpfc_hbadisc.c b/drivers/scsi/lpfc/lpfc_hbadisc.c
index 883ea2d9f237..59b51c529ba0 100644
--- a/drivers/scsi/lpfc/lpfc_hbadisc.c
+++ b/drivers/scsi/lpfc/lpfc_hbadisc.c
@@ -674,8 +674,6 @@ lpfc_work_done(struct lpfc_hba *phba)
lpfc_fdmi_timeout_handler(vport);
if (work_port_events & WORKER_RAMP_DOWN_QUEUE)
lpfc_ramp_down_queue_handler(phba);
- if (work_port_events & WORKER_RAMP_UP_QUEUE)
- lpfc_ramp_up_queue_handler(phba);
if (work_port_events & WORKER_DELAYED_DISC_TMO)
lpfc_delayed_disc_timeout_handler(vport);
}
@@ -2545,8 +2543,11 @@ lpfc_mbx_cmpl_fcf_rr_read_fcf_rec(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
if (!new_fcf_record) {
lpfc_printf_log(phba, KERN_WARNING, LOG_FIP,
"2766 Mailbox command READ_FCF_RECORD "
- "failed to retrieve a FCF record.\n");
- goto error_out;
+ "failed to retrieve a FCF record. "
+ "hba_flg x%x fcf_flg x%x\n", phba->hba_flag,
+ phba->fcf.fcf_flag);
+ lpfc_unregister_fcf_rescan(phba);
+ goto out;
}
/* Get the needed parameters from FCF record */
@@ -3973,7 +3974,10 @@ lpfc_nlp_counters(struct lpfc_vport *vport, int state, int count)
vport->fc_map_cnt += count;
break;
case NLP_STE_NPR_NODE:
- vport->fc_npr_cnt += count;
+ if (vport->fc_npr_cnt == 0 && count == -1)
+ vport->fc_npr_cnt = 0;
+ else
+ vport->fc_npr_cnt += count;
break;
}
spin_unlock_irq(shost->host_lock);
@@ -4180,6 +4184,7 @@ lpfc_enable_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
struct lpfc_hba *phba = vport->phba;
uint32_t did;
unsigned long flags;
+ unsigned long *active_rrqs_xri_bitmap = NULL;
if (!ndlp)
return NULL;
@@ -4208,12 +4213,17 @@ lpfc_enable_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
/* Keep the original DID */
did = ndlp->nlp_DID;
+ if (phba->sli_rev == LPFC_SLI_REV4)
+ active_rrqs_xri_bitmap = ndlp->active_rrqs_xri_bitmap;
/* re-initialize ndlp except of ndlp linked list pointer */
memset((((char *)ndlp) + sizeof (struct list_head)), 0,
sizeof (struct lpfc_nodelist) - sizeof (struct list_head));
lpfc_initialize_node(vport, ndlp, did);
+ if (phba->sli_rev == LPFC_SLI_REV4)
+ ndlp->active_rrqs_xri_bitmap = active_rrqs_xri_bitmap;
+
spin_unlock_irqrestore(&phba->ndlp_lock, flags);
if (vport->phba->sli_rev == LPFC_SLI_REV4)
ndlp->nlp_rpi = lpfc_sli4_alloc_rpi(vport->phba);
@@ -4799,9 +4809,10 @@ __lpfc_findnode_did(struct lpfc_vport *vport, uint32_t did)
((uint32_t) ndlp->nlp_rpi & 0xff));
lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE,
"0929 FIND node DID "
- "Data: x%p x%x x%x x%x\n",
+ "Data: x%p x%x x%x x%x %p\n",
ndlp, ndlp->nlp_DID,
- ndlp->nlp_flag, data1);
+ ndlp->nlp_flag, data1,
+ ndlp->active_rrqs_xri_bitmap);
return ndlp;
}
}
@@ -5618,8 +5629,13 @@ lpfc_nlp_init(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
lpfc_initialize_node(vport, ndlp, did);
INIT_LIST_HEAD(&ndlp->nlp_listp);
- if (vport->phba->sli_rev == LPFC_SLI_REV4)
+ if (vport->phba->sli_rev == LPFC_SLI_REV4) {
ndlp->nlp_rpi = lpfc_sli4_alloc_rpi(vport->phba);
+ ndlp->active_rrqs_xri_bitmap =
+ mempool_alloc(vport->phba->active_rrq_pool,
+ GFP_KERNEL);
+ }
+
lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_NODE,
@@ -5664,6 +5680,9 @@ lpfc_nlp_release(struct kref *kref)
/* free ndlp memory for final ndlp release */
if (NLP_CHK_FREE_REQ(ndlp)) {
kfree(ndlp->lat_data);
+ if (phba->sli_rev == LPFC_SLI_REV4)
+ mempool_free(ndlp->active_rrqs_xri_bitmap,
+ ndlp->phba->active_rrq_pool);
mempool_free(ndlp, ndlp->phba->nlp_mem_pool);
}
}
@@ -6170,10 +6189,6 @@ lpfc_read_fcf_conn_tbl(struct lpfc_hba *phba,
memcpy(&conn_entry->conn_rec, &conn_rec[i],
sizeof(struct lpfc_fcf_conn_rec));
- conn_entry->conn_rec.vlan_tag =
- conn_entry->conn_rec.vlan_tag;
- conn_entry->conn_rec.flags =
- conn_entry->conn_rec.flags;
list_add_tail(&conn_entry->list,
&phba->fcf_conn_rec_list);
}
diff --git a/drivers/scsi/lpfc/lpfc_hw.h b/drivers/scsi/lpfc/lpfc_hw.h
index 6f927d30ca69..3d9438ce59ab 100644
--- a/drivers/scsi/lpfc/lpfc_hw.h
+++ b/drivers/scsi/lpfc/lpfc_hw.h
@@ -45,6 +45,7 @@
#define LPFC_EXTRA_RING 1 /* ring 1 for other protocols */
#define LPFC_ELS_RING 2 /* ring 2 for ELS commands */
#define LPFC_FCP_NEXT_RING 3
+#define LPFC_FCP_OAS_RING 3
#define SLI2_IOCB_CMD_R0_ENTRIES 172 /* SLI-2 FCP command ring entries */
#define SLI2_IOCB_RSP_R0_ENTRIES 134 /* SLI-2 FCP response ring entries */
diff --git a/drivers/scsi/lpfc/lpfc_hw4.h b/drivers/scsi/lpfc/lpfc_hw4.h
index 5464b116d328..fd79f7de7666 100644
--- a/drivers/scsi/lpfc/lpfc_hw4.h
+++ b/drivers/scsi/lpfc/lpfc_hw4.h
@@ -2616,6 +2616,9 @@ struct lpfc_sli4_parameters {
#define cfg_phwq_SHIFT 15
#define cfg_phwq_MASK 0x00000001
#define cfg_phwq_WORD word12
+#define cfg_oas_SHIFT 25
+#define cfg_oas_MASK 0x00000001
+#define cfg_oas_WORD word12
#define cfg_loopbk_scope_SHIFT 28
#define cfg_loopbk_scope_MASK 0x0000000f
#define cfg_loopbk_scope_WORD word12
@@ -3322,6 +3325,9 @@ struct wqe_common {
#define wqe_ebde_cnt_SHIFT 0
#define wqe_ebde_cnt_MASK 0x0000000f
#define wqe_ebde_cnt_WORD word10
+#define wqe_oas_SHIFT 6
+#define wqe_oas_MASK 0x00000001
+#define wqe_oas_WORD word10
#define wqe_lenloc_SHIFT 7
#define wqe_lenloc_MASK 0x00000003
#define wqe_lenloc_WORD word10
diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c
index 68c94cc85c35..635eeb3d6987 100644
--- a/drivers/scsi/lpfc/lpfc_init.c
+++ b/drivers/scsi/lpfc/lpfc_init.c
@@ -80,6 +80,7 @@ static void lpfc_sli4_cq_event_pool_destroy(struct lpfc_hba *);
static void lpfc_sli4_cq_event_release_all(struct lpfc_hba *);
static void lpfc_sli4_disable_intr(struct lpfc_hba *);
static uint32_t lpfc_sli4_enable_intr(struct lpfc_hba *, uint32_t);
+static void lpfc_sli4_oas_verify(struct lpfc_hba *phba);
static struct scsi_transport_template *lpfc_transport_template = NULL;
static struct scsi_transport_template *lpfc_vport_transport_template = NULL;
@@ -1005,9 +1006,14 @@ lpfc_rrq_timeout(unsigned long ptr)
phba = (struct lpfc_hba *)ptr;
spin_lock_irqsave(&phba->pport->work_port_lock, iflag);
- phba->hba_flag |= HBA_RRQ_ACTIVE;
+ if (!(phba->pport->load_flag & FC_UNLOADING))
+ phba->hba_flag |= HBA_RRQ_ACTIVE;
+ else
+ phba->hba_flag &= ~HBA_RRQ_ACTIVE;
spin_unlock_irqrestore(&phba->pport->work_port_lock, iflag);
- lpfc_worker_wake_up(phba);
+
+ if (!(phba->pport->load_flag & FC_UNLOADING))
+ lpfc_worker_wake_up(phba);
}
/**
@@ -1468,7 +1474,8 @@ lpfc_handle_eratt_s3(struct lpfc_hba *phba)
* for handling possible port resource change.
**/
static int
-lpfc_sli4_port_sta_fn_reset(struct lpfc_hba *phba, int mbx_action)
+lpfc_sli4_port_sta_fn_reset(struct lpfc_hba *phba, int mbx_action,
+ bool en_rn_msg)
{
int rc;
uint32_t intr_mode;
@@ -1480,9 +1487,10 @@ lpfc_sli4_port_sta_fn_reset(struct lpfc_hba *phba, int mbx_action)
rc = lpfc_sli4_pdev_status_reg_wait(phba);
if (!rc) {
/* need reset: attempt for port recovery */
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
- "2887 Reset Needed: Attempting Port "
- "Recovery...\n");
+ if (en_rn_msg)
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "2887 Reset Needed: Attempting Port "
+ "Recovery...\n");
lpfc_offline_prep(phba, mbx_action);
lpfc_offline(phba);
/* release interrupt for possible resource change */
@@ -1522,6 +1530,7 @@ lpfc_handle_eratt_s4(struct lpfc_hba *phba)
uint32_t reg_err1, reg_err2;
uint32_t uerrlo_reg, uemasklo_reg;
uint32_t pci_rd_rc1, pci_rd_rc2;
+ bool en_rn_msg = true;
int rc;
/* If the pci channel is offline, ignore possible errors, since
@@ -1572,10 +1581,12 @@ lpfc_handle_eratt_s4(struct lpfc_hba *phba)
break;
}
if (reg_err1 == SLIPORT_ERR1_REG_ERR_CODE_2 &&
- reg_err2 == SLIPORT_ERR2_REG_FW_RESTART)
+ reg_err2 == SLIPORT_ERR2_REG_FW_RESTART) {
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
- "3143 Port Down: Firmware Restarted\n");
- else if (reg_err1 == SLIPORT_ERR1_REG_ERR_CODE_2 &&
+ "3143 Port Down: Firmware Update "
+ "Detected\n");
+ en_rn_msg = false;
+ } else if (reg_err1 == SLIPORT_ERR1_REG_ERR_CODE_2 &&
reg_err2 == SLIPORT_ERR2_REG_FORCED_DUMP)
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
"3144 Port Down: Debug Dump\n");
@@ -1585,7 +1596,8 @@ lpfc_handle_eratt_s4(struct lpfc_hba *phba)
"3145 Port Down: Provisioning\n");
/* Check port status register for function reset */
- rc = lpfc_sli4_port_sta_fn_reset(phba, LPFC_MBX_NO_WAIT);
+ rc = lpfc_sli4_port_sta_fn_reset(phba, LPFC_MBX_NO_WAIT,
+ en_rn_msg);
if (rc == 0) {
/* don't report event on forced debug dump */
if (reg_err1 == SLIPORT_ERR1_REG_ERR_CODE_2 &&
@@ -4856,6 +4868,7 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
uint8_t pn_page[LPFC_MAX_SUPPORTED_PAGES] = {0};
struct lpfc_mqe *mqe;
int longs;
+ int fof_vectors = 0;
/* Get all the module params for configuring this host */
lpfc_get_cfgparam(phba);
@@ -5061,6 +5074,9 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
rc = lpfc_sli4_read_config(phba);
if (unlikely(rc))
goto out_free_bsmbx;
+ rc = lpfc_mem_alloc_active_rrq_pool_s4(phba);
+ if (unlikely(rc))
+ goto out_free_bsmbx;
/* IF Type 0 ports get initialized now. */
if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) ==
@@ -5118,6 +5134,12 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
}
}
mempool_free(mboxq, phba->mbox_mem_pool);
+
+ /* Verify OAS is supported */
+ lpfc_sli4_oas_verify(phba);
+ if (phba->cfg_fof)
+ fof_vectors = 1;
+
/* Verify all the SLI4 queues */
rc = lpfc_sli4_queue_verify(phba);
if (rc)
@@ -5159,7 +5181,8 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
phba->sli4_hba.fcp_eq_hdl =
kzalloc((sizeof(struct lpfc_fcp_eq_hdl) *
- phba->cfg_fcp_io_channel), GFP_KERNEL);
+ (fof_vectors + phba->cfg_fcp_io_channel)),
+ GFP_KERNEL);
if (!phba->sli4_hba.fcp_eq_hdl) {
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
"2572 Failed allocate memory for "
@@ -5169,7 +5192,8 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
}
phba->sli4_hba.msix_entries = kzalloc((sizeof(struct msix_entry) *
- phba->cfg_fcp_io_channel), GFP_KERNEL);
+ (fof_vectors +
+ phba->cfg_fcp_io_channel)), GFP_KERNEL);
if (!phba->sli4_hba.msix_entries) {
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
"2573 Failed allocate memory for msi-x "
@@ -5267,6 +5291,7 @@ lpfc_sli4_driver_resource_unset(struct lpfc_hba *phba)
kfree(phba->sli4_hba.cpu_map);
phba->sli4_hba.num_present_cpu = 0;
phba->sli4_hba.num_online_cpu = 0;
+ phba->sli4_hba.curr_disp_cpu = 0;
/* Free memory allocated for msi-x interrupt vector entries */
kfree(phba->sli4_hba.msix_entries);
@@ -5390,6 +5415,10 @@ lpfc_setup_driver_resource_phase1(struct lpfc_hba *phba)
/* Initialize FCF connection rec list */
INIT_LIST_HEAD(&phba->fcf_conn_rec_list);
+ /* Initialize OAS configuration list */
+ spin_lock_init(&phba->devicelock);
+ INIT_LIST_HEAD(&phba->luns);
+
return 0;
}
@@ -6816,6 +6845,7 @@ lpfc_sli4_queue_verify(struct lpfc_hba *phba)
int cfg_fcp_io_channel;
uint32_t cpu;
uint32_t i = 0;
+ int fof_vectors = phba->cfg_fof ? 1 : 0;
/*
* Sanity check for configured queue parameters against the run-time
@@ -6832,6 +6862,7 @@ lpfc_sli4_queue_verify(struct lpfc_hba *phba)
}
phba->sli4_hba.num_online_cpu = i;
phba->sli4_hba.num_present_cpu = lpfc_present_cpu;
+ phba->sli4_hba.curr_disp_cpu = 0;
if (i < cfg_fcp_io_channel) {
lpfc_printf_log(phba,
@@ -6842,7 +6873,7 @@ lpfc_sli4_queue_verify(struct lpfc_hba *phba)
cfg_fcp_io_channel = i;
}
- if (cfg_fcp_io_channel >
+ if (cfg_fcp_io_channel + fof_vectors >
phba->sli4_hba.max_cfg_param.max_eq) {
if (phba->sli4_hba.max_cfg_param.max_eq <
LPFC_FCP_IO_CHAN_MIN) {
@@ -6859,7 +6890,8 @@ lpfc_sli4_queue_verify(struct lpfc_hba *phba)
"available EQs: from %d to %d\n",
cfg_fcp_io_channel,
phba->sli4_hba.max_cfg_param.max_eq);
- cfg_fcp_io_channel = phba->sli4_hba.max_cfg_param.max_eq;
+ cfg_fcp_io_channel = phba->sli4_hba.max_cfg_param.max_eq -
+ fof_vectors;
}
/* The actual number of FCP event queues adopted */
@@ -7070,6 +7102,9 @@ lpfc_sli4_queue_create(struct lpfc_hba *phba)
}
phba->sli4_hba.dat_rq = qdesc;
+ /* Create the Queues needed for Flash Optimized Fabric operations */
+ if (phba->cfg_fof)
+ lpfc_fof_queue_create(phba);
return 0;
out_error:
@@ -7094,6 +7129,9 @@ lpfc_sli4_queue_destroy(struct lpfc_hba *phba)
{
int idx;
+ if (phba->cfg_fof)
+ lpfc_fof_queue_destroy(phba);
+
if (phba->sli4_hba.hba_eq != NULL) {
/* Release HBA event queue */
for (idx = 0; idx < phba->cfg_fcp_io_channel; idx++) {
@@ -7478,8 +7516,20 @@ lpfc_sli4_queue_setup(struct lpfc_hba *phba)
phba->sli4_hba.hdr_rq->queue_id,
phba->sli4_hba.dat_rq->queue_id,
phba->sli4_hba.els_cq->queue_id);
+
+ if (phba->cfg_fof) {
+ rc = lpfc_fof_queue_setup(phba);
+ if (rc) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "0549 Failed setup of FOF Queues: "
+ "rc = 0x%x\n", rc);
+ goto out_destroy_els_rq;
+ }
+ }
return 0;
+out_destroy_els_rq:
+ lpfc_rq_destroy(phba, phba->sli4_hba.hdr_rq, phba->sli4_hba.dat_rq);
out_destroy_els_wq:
lpfc_wq_destroy(phba, phba->sli4_hba.els_wq);
out_destroy_mbx_wq:
@@ -7518,6 +7568,9 @@ lpfc_sli4_queue_unset(struct lpfc_hba *phba)
{
int fcp_qidx;
+ /* Unset the queues created for Flash Optimized Fabric operations */
+ if (phba->cfg_fof)
+ lpfc_fof_queue_destroy(phba);
/* Unset mailbox command work queue */
lpfc_mq_destroy(phba, phba->sli4_hba.mbx_wq);
/* Unset ELS work queue */
@@ -8635,6 +8688,10 @@ lpfc_sli4_enable_msix(struct lpfc_hba *phba)
/* Configure MSI-X capability structure */
vectors = phba->cfg_fcp_io_channel;
+ if (phba->cfg_fof) {
+ phba->sli4_hba.msix_entries[index].entry = index;
+ vectors++;
+ }
enable_msix_vectors:
rc = pci_enable_msix(phba->pcidev, phba->sli4_hba.msix_entries,
vectors);
@@ -8664,7 +8721,15 @@ enable_msix_vectors:
phba->sli4_hba.fcp_eq_hdl[index].idx = index;
phba->sli4_hba.fcp_eq_hdl[index].phba = phba;
atomic_set(&phba->sli4_hba.fcp_eq_hdl[index].fcp_eq_in_use, 1);
- rc = request_irq(phba->sli4_hba.msix_entries[index].vector,
+ if (phba->cfg_fof && (index == (vectors - 1)))
+ rc = request_irq(
+ phba->sli4_hba.msix_entries[index].vector,
+ &lpfc_sli4_fof_intr_handler, IRQF_SHARED,
+ (char *)&phba->sli4_hba.handler_name[index],
+ &phba->sli4_hba.fcp_eq_hdl[index]);
+ else
+ rc = request_irq(
+ phba->sli4_hba.msix_entries[index].vector,
&lpfc_sli4_hba_intr_handler, IRQF_SHARED,
(char *)&phba->sli4_hba.handler_name[index],
&phba->sli4_hba.fcp_eq_hdl[index]);
@@ -8676,6 +8741,9 @@ enable_msix_vectors:
}
}
+ if (phba->cfg_fof)
+ vectors--;
+
if (vectors != phba->cfg_fcp_io_channel) {
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
"3238 Reducing IO channels to match number of "
@@ -8721,7 +8789,10 @@ lpfc_sli4_disable_msix(struct lpfc_hba *phba)
free_irq(phba->sli4_hba.msix_entries[index].vector,
&phba->sli4_hba.fcp_eq_hdl[index]);
}
-
+ if (phba->cfg_fof) {
+ free_irq(phba->sli4_hba.msix_entries[index].vector,
+ &phba->sli4_hba.fcp_eq_hdl[index]);
+ }
/* Disable MSI-X */
pci_disable_msix(phba->pcidev);
@@ -8771,6 +8842,10 @@ lpfc_sli4_enable_msi(struct lpfc_hba *phba)
phba->sli4_hba.fcp_eq_hdl[index].phba = phba;
}
+ if (phba->cfg_fof) {
+ phba->sli4_hba.fcp_eq_hdl[index].idx = index;
+ phba->sli4_hba.fcp_eq_hdl[index].phba = phba;
+ }
return 0;
}
@@ -8853,6 +8928,12 @@ lpfc_sli4_enable_intr(struct lpfc_hba *phba, uint32_t cfg_mode)
atomic_set(&phba->sli4_hba.fcp_eq_hdl[index].
fcp_eq_in_use, 1);
}
+ if (phba->cfg_fof) {
+ phba->sli4_hba.fcp_eq_hdl[index].idx = index;
+ phba->sli4_hba.fcp_eq_hdl[index].phba = phba;
+ atomic_set(&phba->sli4_hba.fcp_eq_hdl[index].
+ fcp_eq_in_use, 1);
+ }
}
}
return intr_mode;
@@ -9163,6 +9244,7 @@ lpfc_get_sli4_parameters(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
phba->sli3_options &= ~LPFC_SLI4_PHWQ_ENABLED;
sli4_params->sge_supp_len = mbx_sli4_parameters->sge_supp_len;
sli4_params->loopbk_scope = bf_get(loopbk_scope, mbx_sli4_parameters);
+ sli4_params->oas_supported = bf_get(cfg_oas, mbx_sli4_parameters);
sli4_params->cqv = bf_get(cfg_cqv, mbx_sli4_parameters);
sli4_params->mqv = bf_get(cfg_mqv, mbx_sli4_parameters);
sli4_params->wqv = bf_get(cfg_wqv, mbx_sli4_parameters);
@@ -10796,6 +10878,169 @@ lpfc_io_resume(struct pci_dev *pdev)
return;
}
+/**
+ * lpfc_sli4_oas_verify - Verify OAS is supported by this adapter
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine checks to see if OAS is supported for this adapter. If
+ * supported, the configure Flash Optimized Fabric flag is set. Otherwise,
+ * the enable oas flag is cleared and the pool created for OAS device data
+ * is destroyed.
+ *
+ **/
+void
+lpfc_sli4_oas_verify(struct lpfc_hba *phba)
+{
+
+ if (!phba->cfg_EnableXLane)
+ return;
+
+ if (phba->sli4_hba.pc_sli4_params.oas_supported) {
+ phba->cfg_fof = 1;
+ } else {
+ phba->cfg_EnableXLane = 0;
+ if (phba->device_data_mem_pool)
+ mempool_destroy(phba->device_data_mem_pool);
+ phba->device_data_mem_pool = NULL;
+ }
+
+ return;
+}
+
+/**
+ * lpfc_fof_queue_setup - Set up all the fof queues
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine is invoked to set up all the fof queues for the FC HBA
+ * operation.
+ *
+ * Return codes
+ * 0 - successful
+ * -ENOMEM - No available memory
+ **/
+int
+lpfc_fof_queue_setup(struct lpfc_hba *phba)
+{
+ struct lpfc_sli *psli = &phba->sli;
+ int rc;
+
+ rc = lpfc_eq_create(phba, phba->sli4_hba.fof_eq, LPFC_MAX_IMAX);
+ if (rc)
+ return -ENOMEM;
+
+ if (phba->cfg_EnableXLane) {
+
+ rc = lpfc_cq_create(phba, phba->sli4_hba.oas_cq,
+ phba->sli4_hba.fof_eq, LPFC_WCQ, LPFC_FCP);
+ if (rc)
+ goto out_oas_cq;
+
+ rc = lpfc_wq_create(phba, phba->sli4_hba.oas_wq,
+ phba->sli4_hba.oas_cq, LPFC_FCP);
+ if (rc)
+ goto out_oas_wq;
+
+ phba->sli4_hba.oas_cq->pring = &psli->ring[LPFC_FCP_OAS_RING];
+ phba->sli4_hba.oas_ring = &psli->ring[LPFC_FCP_OAS_RING];
+ }
+
+ return 0;
+
+out_oas_wq:
+ if (phba->cfg_EnableXLane)
+ lpfc_cq_destroy(phba, phba->sli4_hba.oas_cq);
+out_oas_cq:
+ lpfc_eq_destroy(phba, phba->sli4_hba.fof_eq);
+ return rc;
+
+}
+
+/**
+ * lpfc_fof_queue_create - Create all the fof queues
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine is invoked to allocate all the fof queues for the FC HBA
+ * operation. For each SLI4 queue type, the parameters such as queue entry
+ * count (queue depth) shall be taken from the module parameter. For now,
+ * we just use some constant number as place holder.
+ *
+ * Return codes
+ * 0 - successful
+ * -ENOMEM - No availble memory
+ * -EIO - The mailbox failed to complete successfully.
+ **/
+int
+lpfc_fof_queue_create(struct lpfc_hba *phba)
+{
+ struct lpfc_queue *qdesc;
+
+ /* Create FOF EQ */
+ qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.eq_esize,
+ phba->sli4_hba.eq_ecount);
+ if (!qdesc)
+ goto out_error;
+
+ phba->sli4_hba.fof_eq = qdesc;
+
+ if (phba->cfg_EnableXLane) {
+
+ /* Create OAS CQ */
+ qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.cq_esize,
+ phba->sli4_hba.cq_ecount);
+ if (!qdesc)
+ goto out_error;
+
+ phba->sli4_hba.oas_cq = qdesc;
+
+ /* Create OAS WQ */
+ qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.wq_esize,
+ phba->sli4_hba.wq_ecount);
+ if (!qdesc)
+ goto out_error;
+
+ phba->sli4_hba.oas_wq = qdesc;
+
+ }
+ return 0;
+
+out_error:
+ lpfc_fof_queue_destroy(phba);
+ return -ENOMEM;
+}
+
+/**
+ * lpfc_fof_queue_destroy - Destroy all the fof queues
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine is invoked to release all the SLI4 queues with the FC HBA
+ * operation.
+ *
+ * Return codes
+ * 0 - successful
+ **/
+int
+lpfc_fof_queue_destroy(struct lpfc_hba *phba)
+{
+ /* Release FOF Event queue */
+ if (phba->sli4_hba.fof_eq != NULL) {
+ lpfc_sli4_queue_free(phba->sli4_hba.fof_eq);
+ phba->sli4_hba.fof_eq = NULL;
+ }
+
+ /* Release OAS Completion queue */
+ if (phba->sli4_hba.oas_cq != NULL) {
+ lpfc_sli4_queue_free(phba->sli4_hba.oas_cq);
+ phba->sli4_hba.oas_cq = NULL;
+ }
+
+ /* Release OAS Work queue */
+ if (phba->sli4_hba.oas_wq != NULL) {
+ lpfc_sli4_queue_free(phba->sli4_hba.oas_wq);
+ phba->sli4_hba.oas_wq = NULL;
+ }
+ return 0;
+}
+
static struct pci_device_id lpfc_id_table[] = {
{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_VIPER,
PCI_ANY_ID, PCI_ANY_ID, },
diff --git a/drivers/scsi/lpfc/lpfc_mem.c b/drivers/scsi/lpfc/lpfc_mem.c
index 812d0cd7c86d..ed419aad2b1f 100644
--- a/drivers/scsi/lpfc/lpfc_mem.c
+++ b/drivers/scsi/lpfc/lpfc_mem.c
@@ -38,10 +38,29 @@
#include "lpfc_scsi.h"
#include "lpfc.h"
#include "lpfc_crtn.h"
+#include "lpfc_logmsg.h"
#define LPFC_MBUF_POOL_SIZE 64 /* max elements in MBUF safety pool */
#define LPFC_MEM_POOL_SIZE 64 /* max elem in non-DMA safety pool */
+#define LPFC_DEVICE_DATA_POOL_SIZE 64 /* max elements in device data pool */
+int
+lpfc_mem_alloc_active_rrq_pool_s4(struct lpfc_hba *phba) {
+ size_t bytes;
+ int max_xri = phba->sli4_hba.max_cfg_param.max_xri;
+
+ if (max_xri <= 0)
+ return -ENOMEM;
+ bytes = ((BITS_PER_LONG - 1 + max_xri) / BITS_PER_LONG) *
+ sizeof(unsigned long);
+ phba->cfg_rrq_xri_bitmap_sz = bytes;
+ phba->active_rrq_pool = mempool_create_kmalloc_pool(LPFC_MEM_POOL_SIZE,
+ bytes);
+ if (!phba->active_rrq_pool)
+ return -ENOMEM;
+ else
+ return 0;
+}
/**
* lpfc_mem_alloc - create and allocate all PCI and memory pools
@@ -146,6 +165,16 @@ lpfc_mem_alloc(struct lpfc_hba *phba, int align)
phba->lpfc_drb_pool = NULL;
}
+ if (phba->cfg_EnableXLane) {
+ phba->device_data_mem_pool = mempool_create_kmalloc_pool(
+ LPFC_DEVICE_DATA_POOL_SIZE,
+ sizeof(struct lpfc_device_data));
+ if (!phba->device_data_mem_pool)
+ goto fail_free_hrb_pool;
+ } else {
+ phba->device_data_mem_pool = NULL;
+ }
+
return 0;
fail_free_hrb_pool:
pci_pool_destroy(phba->lpfc_hrb_pool);
@@ -188,6 +217,7 @@ lpfc_mem_free(struct lpfc_hba *phba)
{
int i;
struct lpfc_dma_pool *pool = &phba->lpfc_mbuf_safety_pool;
+ struct lpfc_device_data *device_data;
/* Free HBQ pools */
lpfc_sli_hbqbuf_free_all(phba);
@@ -209,6 +239,10 @@ lpfc_mem_free(struct lpfc_hba *phba)
/* Free NLP memory pool */
mempool_destroy(phba->nlp_mem_pool);
phba->nlp_mem_pool = NULL;
+ if (phba->sli_rev == LPFC_SLI_REV4 && phba->active_rrq_pool) {
+ mempool_destroy(phba->active_rrq_pool);
+ phba->active_rrq_pool = NULL;
+ }
/* Free mbox memory pool */
mempool_destroy(phba->mbox_mem_pool);
@@ -227,6 +261,19 @@ lpfc_mem_free(struct lpfc_hba *phba)
pci_pool_destroy(phba->lpfc_scsi_dma_buf_pool);
phba->lpfc_scsi_dma_buf_pool = NULL;
+ /* Free Device Data memory pool */
+ if (phba->device_data_mem_pool) {
+ /* Ensure all objects have been returned to the pool */
+ while (!list_empty(&phba->luns)) {
+ device_data = list_first_entry(&phba->luns,
+ struct lpfc_device_data,
+ listentry);
+ list_del(&device_data->listentry);
+ mempool_free(device_data, phba->device_data_mem_pool);
+ }
+ mempool_destroy(phba->device_data_mem_pool);
+ }
+ phba->device_data_mem_pool = NULL;
return;
}
diff --git a/drivers/scsi/lpfc/lpfc_nportdisc.c b/drivers/scsi/lpfc/lpfc_nportdisc.c
index abc361259d6d..c342f6afd747 100644
--- a/drivers/scsi/lpfc/lpfc_nportdisc.c
+++ b/drivers/scsi/lpfc/lpfc_nportdisc.c
@@ -203,8 +203,6 @@ lpfc_check_elscmpl_iocb(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
int
lpfc_els_abort(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp)
{
- LIST_HEAD(completions);
- LIST_HEAD(txcmplq_completions);
LIST_HEAD(abort_list);
struct lpfc_sli *psli = &phba->sli;
struct lpfc_sli_ring *pring = &psli->ring[LPFC_ELS_RING];
@@ -216,32 +214,27 @@ lpfc_els_abort(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp)
"Data: x%x x%x x%x\n",
ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state,
ndlp->nlp_rpi);
-
+ /* Clean up all fabric IOs first.*/
lpfc_fabric_abort_nport(ndlp);
- /* First check the txq */
+ /*
+ * Lock the ELS ring txcmplq for SLI3/SLI4 and build a local list
+ * of all ELS IOs that need an ABTS. The IOs need to stay on the
+ * txcmplq so that the abort operation completes them successfully.
+ */
spin_lock_irq(&phba->hbalock);
- list_for_each_entry_safe(iocb, next_iocb, &pring->txq, list) {
- /* Check to see if iocb matches the nport we are looking for */
- if (lpfc_check_sli_ndlp(phba, pring, iocb, ndlp)) {
- /* It matches, so deque and call compl with anp error */
- list_move_tail(&iocb->list, &completions);
- }
- }
-
- /* Next check the txcmplq */
- list_splice_init(&pring->txcmplq, &txcmplq_completions);
- spin_unlock_irq(&phba->hbalock);
-
- list_for_each_entry_safe(iocb, next_iocb, &txcmplq_completions, list) {
- /* Check to see if iocb matches the nport we are looking for */
+ if (phba->sli_rev == LPFC_SLI_REV4)
+ spin_lock(&pring->ring_lock);
+ list_for_each_entry_safe(iocb, next_iocb, &pring->txcmplq, list) {
+ /* Add to abort_list on on NDLP match. */
if (lpfc_check_sli_ndlp(phba, pring, iocb, ndlp))
list_add_tail(&iocb->dlist, &abort_list);
}
- spin_lock_irq(&phba->hbalock);
- list_splice(&txcmplq_completions, &pring->txcmplq);
+ if (phba->sli_rev == LPFC_SLI_REV4)
+ spin_unlock(&pring->ring_lock);
spin_unlock_irq(&phba->hbalock);
+ /* Abort the targeted IOs and remove them from the abort list. */
list_for_each_entry_safe(iocb, next_iocb, &abort_list, dlist) {
spin_lock_irq(&phba->hbalock);
list_del_init(&iocb->dlist);
@@ -249,9 +242,28 @@ lpfc_els_abort(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp)
spin_unlock_irq(&phba->hbalock);
}
+ INIT_LIST_HEAD(&abort_list);
+
+ /* Now process the txq */
+ spin_lock_irq(&phba->hbalock);
+ if (phba->sli_rev == LPFC_SLI_REV4)
+ spin_lock(&pring->ring_lock);
+
+ list_for_each_entry_safe(iocb, next_iocb, &pring->txq, list) {
+ /* Check to see if iocb matches the nport we are looking for */
+ if (lpfc_check_sli_ndlp(phba, pring, iocb, ndlp)) {
+ list_del_init(&iocb->list);
+ list_add_tail(&iocb->list, &abort_list);
+ }
+ }
+
+ if (phba->sli_rev == LPFC_SLI_REV4)
+ spin_unlock(&pring->ring_lock);
+ spin_unlock_irq(&phba->hbalock);
+
/* Cancel all the IOCBs from the completions list */
- lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT,
- IOERR_SLI_ABORTED);
+ lpfc_sli_cancel_iocbs(phba, &abort_list,
+ IOSTAT_LOCAL_REJECT, IOERR_SLI_ABORTED);
lpfc_cancel_retry_delay_tmo(phba->pport, ndlp);
return 0;
diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c
index b2ede05a5f0a..462453ee0bda 100644
--- a/drivers/scsi/lpfc/lpfc_scsi.c
+++ b/drivers/scsi/lpfc/lpfc_scsi.c
@@ -68,6 +68,17 @@ struct scsi_dif_tuple {
__be32 ref_tag; /* Target LBA or indirect LBA */
};
+static struct lpfc_rport_data *
+lpfc_rport_data_from_scsi_device(struct scsi_device *sdev)
+{
+ struct lpfc_vport *vport = (struct lpfc_vport *)sdev->host->hostdata;
+
+ if (vport->phba->cfg_EnableXLane)
+ return ((struct lpfc_device_data *)sdev->hostdata)->rport_data;
+ else
+ return (struct lpfc_rport_data *)sdev->hostdata;
+}
+
static void
lpfc_release_scsi_buf_s4(struct lpfc_hba *phba, struct lpfc_scsi_buf *psb);
static void
@@ -304,9 +315,27 @@ lpfc_change_queue_depth(struct scsi_device *sdev, int qdepth, int reason)
unsigned long new_queue_depth, old_queue_depth;
old_queue_depth = sdev->queue_depth;
- scsi_adjust_queue_depth(sdev, scsi_get_tag_type(sdev), qdepth);
+
+ switch (reason) {
+ case SCSI_QDEPTH_DEFAULT:
+ /* change request from sysfs, fall through */
+ case SCSI_QDEPTH_RAMP_UP:
+ scsi_adjust_queue_depth(sdev, scsi_get_tag_type(sdev), qdepth);
+ break;
+ case SCSI_QDEPTH_QFULL:
+ if (scsi_track_queue_full(sdev, qdepth) == 0)
+ return sdev->queue_depth;
+
+ lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP,
+ "0711 detected queue full - lun queue "
+ "depth adjusted to %d.\n", sdev->queue_depth);
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+
new_queue_depth = sdev->queue_depth;
- rdata = sdev->hostdata;
+ rdata = lpfc_rport_data_from_scsi_device(sdev);
if (rdata)
lpfc_send_sdev_queuedepth_change_event(phba, vport,
rdata->pnode, sdev->lun,
@@ -377,50 +406,6 @@ lpfc_rampdown_queue_depth(struct lpfc_hba *phba)
}
/**
- * lpfc_rampup_queue_depth - Post RAMP_UP_QUEUE event for worker thread
- * @phba: The Hba for which this call is being executed.
- *
- * This routine post WORKER_RAMP_UP_QUEUE event for @phba vport. This routine
- * post at most 1 event every 5 minute after last_ramp_up_time or
- * last_rsrc_error_time. This routine wakes up worker thread of @phba
- * to process WORKER_RAM_DOWN_EVENT event.
- *
- * This routine should be called with no lock held.
- **/
-static inline void
-lpfc_rampup_queue_depth(struct lpfc_vport *vport,
- uint32_t queue_depth)
-{
- unsigned long flags;
- struct lpfc_hba *phba = vport->phba;
- uint32_t evt_posted;
- atomic_inc(&phba->num_cmd_success);
-
- if (vport->cfg_lun_queue_depth <= queue_depth)
- return;
- spin_lock_irqsave(&phba->hbalock, flags);
- if (time_before(jiffies,
- phba->last_ramp_up_time + QUEUE_RAMP_UP_INTERVAL) ||
- time_before(jiffies,
- phba->last_rsrc_error_time + QUEUE_RAMP_UP_INTERVAL)) {
- spin_unlock_irqrestore(&phba->hbalock, flags);
- return;
- }
- phba->last_ramp_up_time = jiffies;
- spin_unlock_irqrestore(&phba->hbalock, flags);
-
- spin_lock_irqsave(&phba->pport->work_port_lock, flags);
- evt_posted = phba->pport->work_port_events & WORKER_RAMP_UP_QUEUE;
- if (!evt_posted)
- phba->pport->work_port_events |= WORKER_RAMP_UP_QUEUE;
- spin_unlock_irqrestore(&phba->pport->work_port_lock, flags);
-
- if (!evt_posted)
- lpfc_worker_wake_up(phba);
- return;
-}
-
-/**
* lpfc_ramp_down_queue_handler - WORKER_RAMP_DOWN_QUEUE event handler
* @phba: The Hba for which this call is being executed.
*
@@ -472,41 +457,6 @@ lpfc_ramp_down_queue_handler(struct lpfc_hba *phba)
}
/**
- * lpfc_ramp_up_queue_handler - WORKER_RAMP_UP_QUEUE event handler
- * @phba: The Hba for which this call is being executed.
- *
- * This routine is called to process WORKER_RAMP_UP_QUEUE event for worker
- * thread.This routine increases queue depth for all scsi device on each vport
- * associated with @phba by 1. This routine also sets @phba num_rsrc_err and
- * num_cmd_success to zero.
- **/
-void
-lpfc_ramp_up_queue_handler(struct lpfc_hba *phba)
-{
- struct lpfc_vport **vports;
- struct Scsi_Host *shost;
- struct scsi_device *sdev;
- int i;
-
- vports = lpfc_create_vport_work_array(phba);
- if (vports != NULL)
- for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
- shost = lpfc_shost_from_vport(vports[i]);
- shost_for_each_device(sdev, shost) {
- if (vports[i]->cfg_lun_queue_depth <=
- sdev->queue_depth)
- continue;
- lpfc_change_queue_depth(sdev,
- sdev->queue_depth+1,
- SCSI_QDEPTH_RAMP_UP);
- }
- }
- lpfc_destroy_vport_work_array(phba, vports);
- atomic_set(&phba->num_rsrc_err, 0);
- atomic_set(&phba->num_cmd_success, 0);
-}
-
-/**
* lpfc_scsi_dev_block - set all scsi hosts to block state
* @phba: Pointer to HBA context object.
*
@@ -1502,7 +1452,7 @@ lpfc_bg_err_inject(struct lpfc_hba *phba, struct scsi_cmnd *sc,
}
/* Next check if we need to match the remote NPortID or WWPN */
- rdata = sc->device->hostdata;
+ rdata = lpfc_rport_data_from_scsi_device(sc->device);
if (rdata && rdata->pnode) {
ndlp = rdata->pnode;
@@ -3507,6 +3457,14 @@ lpfc_scsi_prep_dma_buf_s4(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd)
* we need to set word 4 of IOCB here
*/
iocb_cmd->un.fcpi.fcpi_parm = scsi_bufflen(scsi_cmnd);
+
+ /*
+ * If the OAS driver feature is enabled and the lun is enabled for
+ * OAS, set the oas iocb related flags.
+ */
+ if ((phba->cfg_EnableXLane) && ((struct lpfc_device_data *)
+ scsi_cmnd->device->hostdata)->oas_enabled)
+ lpfc_cmd->cur_iocbq.iocb_flag |= LPFC_IO_OAS;
return 0;
}
@@ -4021,7 +3979,6 @@ lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn,
struct lpfc_nodelist *pnode = rdata->pnode;
struct scsi_cmnd *cmd;
int result;
- struct scsi_device *tmp_sdev;
int depth;
unsigned long flags;
struct lpfc_fast_path_event *fast_path_evt;
@@ -4266,32 +4223,6 @@ lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn,
return;
}
- if (!result)
- lpfc_rampup_queue_depth(vport, queue_depth);
-
- /*
- * Check for queue full. If the lun is reporting queue full, then
- * back off the lun queue depth to prevent target overloads.
- */
- if (result == SAM_STAT_TASK_SET_FULL && pnode &&
- NLP_CHK_NODE_ACT(pnode)) {
- shost_for_each_device(tmp_sdev, shost) {
- if (tmp_sdev->id != scsi_id)
- continue;
- depth = scsi_track_queue_full(tmp_sdev,
- tmp_sdev->queue_depth-1);
- if (depth <= 0)
- continue;
- lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP,
- "0711 detected queue full - lun queue "
- "depth adjusted to %d.\n", depth);
- lpfc_send_sdev_queuedepth_change_event(phba, vport,
- pnode,
- tmp_sdev->lun,
- depth+1, depth);
- }
- }
-
spin_lock_irqsave(&phba->hbalock, flags);
lpfc_cmd->pCmd = NULL;
spin_unlock_irqrestore(&phba->hbalock, flags);
@@ -4492,6 +4423,8 @@ lpfc_scsi_prep_task_mgmt_cmd(struct lpfc_vport *vport,
}
piocb->ulpFCP2Rcvy = (ndlp->nlp_fcp_info & NLP_FCP_2_DEVICE) ? 1 : 0;
piocb->ulpClass = (ndlp->nlp_fcp_info & 0x0f);
+ piocb->ulpPU = 0;
+ piocb->un.fcpi.fcpi_parm = 0;
/* ulpTimeout is only one byte */
if (lpfc_cmd->timeout > 0xff) {
@@ -4691,12 +4624,13 @@ lpfc_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *cmnd)
{
struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
struct lpfc_hba *phba = vport->phba;
- struct lpfc_rport_data *rdata = cmnd->device->hostdata;
+ struct lpfc_rport_data *rdata;
struct lpfc_nodelist *ndlp;
struct lpfc_scsi_buf *lpfc_cmd;
struct fc_rport *rport = starget_to_rport(scsi_target(cmnd->device));
int err;
+ rdata = lpfc_rport_data_from_scsi_device(cmnd->device);
err = fc_remote_port_chkready(rport);
if (err) {
cmnd->result = err;
@@ -4782,6 +4716,24 @@ lpfc_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *cmnd)
&lpfc_cmd->cur_iocbq, SLI_IOCB_RET_IOCB);
if (err) {
atomic_dec(&ndlp->cmd_pending);
+ lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
+ "3376 FCP could not issue IOCB err %x"
+ "FCP cmd x%x <%d/%d> "
+ "sid: x%x did: x%x oxid: x%x "
+ "Data: x%x x%x x%x x%x\n",
+ err, cmnd->cmnd[0],
+ cmnd->device ? cmnd->device->id : 0xffff,
+ cmnd->device ? cmnd->device->lun : 0xffff,
+ vport->fc_myDID, ndlp->nlp_DID,
+ phba->sli_rev == LPFC_SLI_REV4 ?
+ lpfc_cmd->cur_iocbq.sli4_xritag : 0xffff,
+ lpfc_cmd->cur_iocbq.iocb.ulpContext,
+ lpfc_cmd->cur_iocbq.iocb.ulpIoTag,
+ lpfc_cmd->cur_iocbq.iocb.ulpTimeout,
+ (uint32_t)
+ (cmnd->request->timeout / 1000));
+
+
goto out_host_busy_free_buf;
}
if (phba->cfg_poll & ENABLE_FCP_RING_POLLING) {
@@ -5161,10 +5113,11 @@ lpfc_send_taskmgmt(struct lpfc_vport *vport, struct lpfc_rport_data *rdata,
static int
lpfc_chk_tgt_mapped(struct lpfc_vport *vport, struct scsi_cmnd *cmnd)
{
- struct lpfc_rport_data *rdata = cmnd->device->hostdata;
+ struct lpfc_rport_data *rdata;
struct lpfc_nodelist *pnode;
unsigned long later;
+ rdata = lpfc_rport_data_from_scsi_device(cmnd->device);
if (!rdata) {
lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
"0797 Tgt Map rport failure: rdata x%p\n", rdata);
@@ -5182,7 +5135,7 @@ lpfc_chk_tgt_mapped(struct lpfc_vport *vport, struct scsi_cmnd *cmnd)
if (pnode->nlp_state == NLP_STE_MAPPED_NODE)
return SUCCESS;
schedule_timeout_uninterruptible(msecs_to_jiffies(500));
- rdata = cmnd->device->hostdata;
+ rdata = lpfc_rport_data_from_scsi_device(cmnd->device);
if (!rdata)
return FAILED;
pnode = rdata->pnode;
@@ -5254,13 +5207,14 @@ lpfc_device_reset_handler(struct scsi_cmnd *cmnd)
{
struct Scsi_Host *shost = cmnd->device->host;
struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
- struct lpfc_rport_data *rdata = cmnd->device->hostdata;
+ struct lpfc_rport_data *rdata;
struct lpfc_nodelist *pnode;
unsigned tgt_id = cmnd->device->id;
unsigned int lun_id = cmnd->device->lun;
struct lpfc_scsi_event_header scsi_event;
int status;
+ rdata = lpfc_rport_data_from_scsi_device(cmnd->device);
if (!rdata) {
lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
"0798 Device Reset rport failure: rdata x%p\n", rdata);
@@ -5323,13 +5277,14 @@ lpfc_target_reset_handler(struct scsi_cmnd *cmnd)
{
struct Scsi_Host *shost = cmnd->device->host;
struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
- struct lpfc_rport_data *rdata = cmnd->device->hostdata;
+ struct lpfc_rport_data *rdata;
struct lpfc_nodelist *pnode;
unsigned tgt_id = cmnd->device->id;
unsigned int lun_id = cmnd->device->lun;
struct lpfc_scsi_event_header scsi_event;
int status;
+ rdata = lpfc_rport_data_from_scsi_device(cmnd->device);
if (!rdata) {
lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
"0799 Target Reset rport failure: rdata x%p\n", rdata);
@@ -5529,11 +5484,45 @@ lpfc_slave_alloc(struct scsi_device *sdev)
uint32_t num_to_alloc = 0;
int num_allocated = 0;
uint32_t sdev_cnt;
+ struct lpfc_device_data *device_data;
+ unsigned long flags;
+ struct lpfc_name target_wwpn;
if (!rport || fc_remote_port_chkready(rport))
return -ENXIO;
- sdev->hostdata = rport->dd_data;
+ if (phba->cfg_EnableXLane) {
+
+ /*
+ * Check to see if the device data structure for the lun
+ * exists. If not, create one.
+ */
+
+ u64_to_wwn(rport->port_name, target_wwpn.u.wwn);
+ spin_lock_irqsave(&phba->devicelock, flags);
+ device_data = __lpfc_get_device_data(phba,
+ &phba->luns,
+ &vport->fc_portname,
+ &target_wwpn,
+ sdev->lun);
+ if (!device_data) {
+ spin_unlock_irqrestore(&phba->devicelock, flags);
+ device_data = lpfc_create_device_data(phba,
+ &vport->fc_portname,
+ &target_wwpn,
+ sdev->lun, true);
+ if (!device_data)
+ return -ENOMEM;
+ spin_lock_irqsave(&phba->devicelock, flags);
+ list_add_tail(&device_data->listentry, &phba->luns);
+ }
+ device_data->rport_data = rport->dd_data;
+ device_data->available = true;
+ spin_unlock_irqrestore(&phba->devicelock, flags);
+ sdev->hostdata = device_data;
+ } else {
+ sdev->hostdata = rport->dd_data;
+ }
sdev_cnt = atomic_inc_return(&phba->sdev_cnt);
/*
@@ -5623,11 +5612,344 @@ lpfc_slave_destroy(struct scsi_device *sdev)
{
struct lpfc_vport *vport = (struct lpfc_vport *) sdev->host->hostdata;
struct lpfc_hba *phba = vport->phba;
+ unsigned long flags;
+ struct lpfc_device_data *device_data = sdev->hostdata;
+
atomic_dec(&phba->sdev_cnt);
+ if ((phba->cfg_EnableXLane) && (device_data)) {
+ spin_lock_irqsave(&phba->devicelock, flags);
+ device_data->available = false;
+ if (!device_data->oas_enabled)
+ lpfc_delete_device_data(phba, device_data);
+ spin_unlock_irqrestore(&phba->devicelock, flags);
+ }
sdev->hostdata = NULL;
return;
}
+/**
+ * lpfc_create_device_data - creates and initializes device data structure for OAS
+ * @pha: Pointer to host bus adapter structure.
+ * @vport_wwpn: Pointer to vport's wwpn information
+ * @target_wwpn: Pointer to target's wwpn information
+ * @lun: Lun on target
+ * @atomic_create: Flag to indicate if memory should be allocated using the
+ * GFP_ATOMIC flag or not.
+ *
+ * This routine creates a device data structure which will contain identifying
+ * information for the device (host wwpn, target wwpn, lun), state of OAS,
+ * whether or not the corresponding lun is available by the system,
+ * and pointer to the rport data.
+ *
+ * Return codes:
+ * NULL - Error
+ * Pointer to lpfc_device_data - Success
+ **/
+struct lpfc_device_data*
+lpfc_create_device_data(struct lpfc_hba *phba, struct lpfc_name *vport_wwpn,
+ struct lpfc_name *target_wwpn, uint64_t lun,
+ bool atomic_create)
+{
+
+ struct lpfc_device_data *lun_info;
+ int memory_flags;
+
+ if (unlikely(!phba) || !vport_wwpn || !target_wwpn ||
+ !(phba->cfg_EnableXLane))
+ return NULL;
+
+ /* Attempt to create the device data to contain lun info */
+
+ if (atomic_create)
+ memory_flags = GFP_ATOMIC;
+ else
+ memory_flags = GFP_KERNEL;
+ lun_info = mempool_alloc(phba->device_data_mem_pool, memory_flags);
+ if (!lun_info)
+ return NULL;
+ INIT_LIST_HEAD(&lun_info->listentry);
+ lun_info->rport_data = NULL;
+ memcpy(&lun_info->device_id.vport_wwpn, vport_wwpn,
+ sizeof(struct lpfc_name));
+ memcpy(&lun_info->device_id.target_wwpn, target_wwpn,
+ sizeof(struct lpfc_name));
+ lun_info->device_id.lun = lun;
+ lun_info->oas_enabled = false;
+ lun_info->available = false;
+ return lun_info;
+}
+
+/**
+ * lpfc_delete_device_data - frees a device data structure for OAS
+ * @pha: Pointer to host bus adapter structure.
+ * @lun_info: Pointer to device data structure to free.
+ *
+ * This routine frees the previously allocated device data structure passed.
+ *
+ **/
+void
+lpfc_delete_device_data(struct lpfc_hba *phba,
+ struct lpfc_device_data *lun_info)
+{
+
+ if (unlikely(!phba) || !lun_info ||
+ !(phba->cfg_EnableXLane))
+ return;
+
+ if (!list_empty(&lun_info->listentry))
+ list_del(&lun_info->listentry);
+ mempool_free(lun_info, phba->device_data_mem_pool);
+ return;
+}
+
+/**
+ * __lpfc_get_device_data - returns the device data for the specified lun
+ * @pha: Pointer to host bus adapter structure.
+ * @list: Point to list to search.
+ * @vport_wwpn: Pointer to vport's wwpn information
+ * @target_wwpn: Pointer to target's wwpn information
+ * @lun: Lun on target
+ *
+ * This routine searches the list passed for the specified lun's device data.
+ * This function does not hold locks, it is the responsibility of the caller
+ * to ensure the proper lock is held before calling the function.
+ *
+ * Return codes:
+ * NULL - Error
+ * Pointer to lpfc_device_data - Success
+ **/
+struct lpfc_device_data*
+__lpfc_get_device_data(struct lpfc_hba *phba, struct list_head *list,
+ struct lpfc_name *vport_wwpn,
+ struct lpfc_name *target_wwpn, uint64_t lun)
+{
+
+ struct lpfc_device_data *lun_info;
+
+ if (unlikely(!phba) || !list || !vport_wwpn || !target_wwpn ||
+ !phba->cfg_EnableXLane)
+ return NULL;
+
+ /* Check to see if the lun is already enabled for OAS. */
+
+ list_for_each_entry(lun_info, list, listentry) {
+ if ((memcmp(&lun_info->device_id.vport_wwpn, vport_wwpn,
+ sizeof(struct lpfc_name)) == 0) &&
+ (memcmp(&lun_info->device_id.target_wwpn, target_wwpn,
+ sizeof(struct lpfc_name)) == 0) &&
+ (lun_info->device_id.lun == lun))
+ return lun_info;
+ }
+
+ return NULL;
+}
+
+/**
+ * lpfc_find_next_oas_lun - searches for the next oas lun
+ * @pha: Pointer to host bus adapter structure.
+ * @vport_wwpn: Pointer to vport's wwpn information
+ * @target_wwpn: Pointer to target's wwpn information
+ * @starting_lun: Pointer to the lun to start searching for
+ * @found_vport_wwpn: Pointer to the found lun's vport wwpn information
+ * @found_target_wwpn: Pointer to the found lun's target wwpn information
+ * @found_lun: Pointer to the found lun.
+ * @found_lun_status: Pointer to status of the found lun.
+ *
+ * This routine searches the luns list for the specified lun
+ * or the first lun for the vport/target. If the vport wwpn contains
+ * a zero value then a specific vport is not specified. In this case
+ * any vport which contains the lun will be considered a match. If the
+ * target wwpn contains a zero value then a specific target is not specified.
+ * In this case any target which contains the lun will be considered a
+ * match. If the lun is found, the lun, vport wwpn, target wwpn and lun status
+ * are returned. The function will also return the next lun if available.
+ * If the next lun is not found, starting_lun parameter will be set to
+ * NO_MORE_OAS_LUN.
+ *
+ * Return codes:
+ * non-0 - Error
+ * 0 - Success
+ **/
+bool
+lpfc_find_next_oas_lun(struct lpfc_hba *phba, struct lpfc_name *vport_wwpn,
+ struct lpfc_name *target_wwpn, uint64_t *starting_lun,
+ struct lpfc_name *found_vport_wwpn,
+ struct lpfc_name *found_target_wwpn,
+ uint64_t *found_lun,
+ uint32_t *found_lun_status)
+{
+
+ unsigned long flags;
+ struct lpfc_device_data *lun_info;
+ struct lpfc_device_id *device_id;
+ uint64_t lun;
+ bool found = false;
+
+ if (unlikely(!phba) || !vport_wwpn || !target_wwpn ||
+ !starting_lun || !found_vport_wwpn ||
+ !found_target_wwpn || !found_lun || !found_lun_status ||
+ (*starting_lun == NO_MORE_OAS_LUN) ||
+ !phba->cfg_EnableXLane)
+ return false;
+
+ lun = *starting_lun;
+ *found_lun = NO_MORE_OAS_LUN;
+ *starting_lun = NO_MORE_OAS_LUN;
+
+ /* Search for lun or the lun closet in value */
+
+ spin_lock_irqsave(&phba->devicelock, flags);
+ list_for_each_entry(lun_info, &phba->luns, listentry) {
+ if (((wwn_to_u64(vport_wwpn->u.wwn) == 0) ||
+ (memcmp(&lun_info->device_id.vport_wwpn, vport_wwpn,
+ sizeof(struct lpfc_name)) == 0)) &&
+ ((wwn_to_u64(target_wwpn->u.wwn) == 0) ||
+ (memcmp(&lun_info->device_id.target_wwpn, target_wwpn,
+ sizeof(struct lpfc_name)) == 0)) &&
+ (lun_info->oas_enabled)) {
+ device_id = &lun_info->device_id;
+ if ((!found) &&
+ ((lun == FIND_FIRST_OAS_LUN) ||
+ (device_id->lun == lun))) {
+ *found_lun = device_id->lun;
+ memcpy(found_vport_wwpn,
+ &device_id->vport_wwpn,
+ sizeof(struct lpfc_name));
+ memcpy(found_target_wwpn,
+ &device_id->target_wwpn,
+ sizeof(struct lpfc_name));
+ if (lun_info->available)
+ *found_lun_status =
+ OAS_LUN_STATUS_EXISTS;
+ else
+ *found_lun_status = 0;
+ if (phba->cfg_oas_flags & OAS_FIND_ANY_VPORT)
+ memset(vport_wwpn, 0x0,
+ sizeof(struct lpfc_name));
+ if (phba->cfg_oas_flags & OAS_FIND_ANY_TARGET)
+ memset(target_wwpn, 0x0,
+ sizeof(struct lpfc_name));
+ found = true;
+ } else if (found) {
+ *starting_lun = device_id->lun;
+ memcpy(vport_wwpn, &device_id->vport_wwpn,
+ sizeof(struct lpfc_name));
+ memcpy(target_wwpn, &device_id->target_wwpn,
+ sizeof(struct lpfc_name));
+ break;
+ }
+ }
+ }
+ spin_unlock_irqrestore(&phba->devicelock, flags);
+ return found;
+}
+
+/**
+ * lpfc_enable_oas_lun - enables a lun for OAS operations
+ * @pha: Pointer to host bus adapter structure.
+ * @vport_wwpn: Pointer to vport's wwpn information
+ * @target_wwpn: Pointer to target's wwpn information
+ * @lun: Lun
+ *
+ * This routine enables a lun for oas operations. The routines does so by
+ * doing the following :
+ *
+ * 1) Checks to see if the device data for the lun has been created.
+ * 2) If found, sets the OAS enabled flag if not set and returns.
+ * 3) Otherwise, creates a device data structure.
+ * 4) If successfully created, indicates the device data is for an OAS lun,
+ * indicates the lun is not available and add to the list of luns.
+ *
+ * Return codes:
+ * false - Error
+ * true - Success
+ **/
+bool
+lpfc_enable_oas_lun(struct lpfc_hba *phba, struct lpfc_name *vport_wwpn,
+ struct lpfc_name *target_wwpn, uint64_t lun)
+{
+
+ struct lpfc_device_data *lun_info;
+ unsigned long flags;
+
+ if (unlikely(!phba) || !vport_wwpn || !target_wwpn ||
+ !phba->cfg_EnableXLane)
+ return false;
+
+ spin_lock_irqsave(&phba->devicelock, flags);
+
+ /* Check to see if the device data for the lun has been created */
+ lun_info = __lpfc_get_device_data(phba, &phba->luns, vport_wwpn,
+ target_wwpn, lun);
+ if (lun_info) {
+ if (!lun_info->oas_enabled)
+ lun_info->oas_enabled = true;
+ spin_unlock_irqrestore(&phba->devicelock, flags);
+ return true;
+ }
+
+ /* Create an lun info structure and add to list of luns */
+ lun_info = lpfc_create_device_data(phba, vport_wwpn, target_wwpn, lun,
+ false);
+ if (lun_info) {
+ lun_info->oas_enabled = true;
+ lun_info->available = false;
+ list_add_tail(&lun_info->listentry, &phba->luns);
+ spin_unlock_irqrestore(&phba->devicelock, flags);
+ return true;
+ }
+ spin_unlock_irqrestore(&phba->devicelock, flags);
+ return false;
+}
+
+/**
+ * lpfc_disable_oas_lun - disables a lun for OAS operations
+ * @pha: Pointer to host bus adapter structure.
+ * @vport_wwpn: Pointer to vport's wwpn information
+ * @target_wwpn: Pointer to target's wwpn information
+ * @lun: Lun
+ *
+ * This routine disables a lun for oas operations. The routines does so by
+ * doing the following :
+ *
+ * 1) Checks to see if the device data for the lun is created.
+ * 2) If present, clears the flag indicating this lun is for OAS.
+ * 3) If the lun is not available by the system, the device data is
+ * freed.
+ *
+ * Return codes:
+ * false - Error
+ * true - Success
+ **/
+bool
+lpfc_disable_oas_lun(struct lpfc_hba *phba, struct lpfc_name *vport_wwpn,
+ struct lpfc_name *target_wwpn, uint64_t lun)
+{
+
+ struct lpfc_device_data *lun_info;
+ unsigned long flags;
+
+ if (unlikely(!phba) || !vport_wwpn || !target_wwpn ||
+ !phba->cfg_EnableXLane)
+ return false;
+
+ spin_lock_irqsave(&phba->devicelock, flags);
+
+ /* Check to see if the lun is available. */
+ lun_info = __lpfc_get_device_data(phba,
+ &phba->luns, vport_wwpn,
+ target_wwpn, lun);
+ if (lun_info) {
+ lun_info->oas_enabled = false;
+ if (!lun_info->available)
+ lpfc_delete_device_data(phba, lun_info);
+ spin_unlock_irqrestore(&phba->devicelock, flags);
+ return true;
+ }
+
+ spin_unlock_irqrestore(&phba->devicelock, flags);
+ return false;
+}
struct scsi_host_template lpfc_template = {
.module = THIS_MODULE,
diff --git a/drivers/scsi/lpfc/lpfc_scsi.h b/drivers/scsi/lpfc/lpfc_scsi.h
index 852ff7def493..0120bfccf50b 100644
--- a/drivers/scsi/lpfc/lpfc_scsi.h
+++ b/drivers/scsi/lpfc/lpfc_scsi.h
@@ -41,6 +41,20 @@ struct lpfc_rport_data {
struct lpfc_nodelist *pnode; /* Pointer to the node structure. */
};
+struct lpfc_device_id {
+ struct lpfc_name vport_wwpn;
+ struct lpfc_name target_wwpn;
+ uint64_t lun;
+};
+
+struct lpfc_device_data {
+ struct list_head listentry;
+ struct lpfc_rport_data *rport_data;
+ struct lpfc_device_id device_id;
+ bool oas_enabled;
+ bool available;
+};
+
struct fcp_rsp {
uint32_t rspRsvd1; /* FC Word 0, byte 0:3 */
uint32_t rspRsvd2; /* FC Word 1, byte 0:3 */
@@ -166,3 +180,7 @@ struct lpfc_scsi_buf {
#define LPFC_SCSI_DMA_EXT_SIZE 264
#define LPFC_BPL_SIZE 1024
#define MDAC_DIRECT_CMD 0x22
+
+#define FIND_FIRST_OAS_LUN 0
+#define NO_MORE_OAS_LUN -1
+#define NOT_OAS_ENABLED_LUN NO_MORE_OAS_LUN
diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c
index 8f580fda443f..6bb51f8e3c1b 100644
--- a/drivers/scsi/lpfc/lpfc_sli.c
+++ b/drivers/scsi/lpfc/lpfc_sli.c
@@ -635,7 +635,7 @@ lpfc_clr_rrq_active(struct lpfc_hba *phba,
if (!ndlp)
goto out;
- if (test_and_clear_bit(xritag, ndlp->active_rrqs.xri_bitmap)) {
+ if (test_and_clear_bit(xritag, ndlp->active_rrqs_xri_bitmap)) {
rrq->send_rrq = 0;
rrq->xritag = 0;
rrq->rrq_stop_time = 0;
@@ -678,7 +678,8 @@ lpfc_handle_rrq_active(struct lpfc_hba *phba)
next_time = rrq->rrq_stop_time;
}
spin_unlock_irqrestore(&phba->hbalock, iflags);
- if (!list_empty(&phba->active_rrq_list))
+ if ((!list_empty(&phba->active_rrq_list)) &&
+ (!(phba->pport->load_flag & FC_UNLOADING)))
mod_timer(&phba->rrq_tmr, next_time);
list_for_each_entry_safe(rrq, nextrrq, &send_rrq, list) {
list_del(&rrq->list);
@@ -792,7 +793,9 @@ lpfc_cleanup_wt_rrqs(struct lpfc_hba *phba)
list_del(&rrq->list);
lpfc_clr_rrq_active(phba, rrq->xritag, rrq);
}
- if (!list_empty(&phba->active_rrq_list))
+ if ((!list_empty(&phba->active_rrq_list)) &&
+ (!(phba->pport->load_flag & FC_UNLOADING)))
+
mod_timer(&phba->rrq_tmr, next_time);
}
@@ -813,7 +816,9 @@ lpfc_test_rrq_active(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp,
{
if (!ndlp)
return 0;
- if (test_bit(xritag, ndlp->active_rrqs.xri_bitmap))
+ if (!ndlp->active_rrqs_xri_bitmap)
+ return 0;
+ if (test_bit(xritag, ndlp->active_rrqs_xri_bitmap))
return 1;
else
return 0;
@@ -863,7 +868,10 @@ lpfc_set_rrq_active(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp,
if (ndlp->vport && (ndlp->vport->load_flag & FC_UNLOADING))
goto out;
- if (test_and_set_bit(xritag, ndlp->active_rrqs.xri_bitmap))
+ if (!ndlp->active_rrqs_xri_bitmap)
+ goto out;
+
+ if (test_and_set_bit(xritag, ndlp->active_rrqs_xri_bitmap))
goto out;
spin_unlock_irqrestore(&phba->hbalock, iflags);
@@ -1318,7 +1326,8 @@ lpfc_sli_ringtxcmpl_put(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
if ((unlikely(pring->ringno == LPFC_ELS_RING)) &&
(piocb->iocb.ulpCommand != CMD_ABORT_XRI_CN) &&
- (piocb->iocb.ulpCommand != CMD_CLOSE_XRI_CN)) {
+ (piocb->iocb.ulpCommand != CMD_CLOSE_XRI_CN) &&
+ (!(piocb->vport->load_flag & FC_UNLOADING))) {
if (!piocb->vport)
BUG();
else
@@ -4971,12 +4980,19 @@ lpfc_sli4_arm_cqeq_intr(struct lpfc_hba *phba)
LPFC_QUEUE_REARM);
} while (++fcp_eqidx < phba->cfg_fcp_io_channel);
}
+
+ if (phba->cfg_EnableXLane)
+ lpfc_sli4_cq_release(phba->sli4_hba.oas_cq, LPFC_QUEUE_REARM);
+
if (phba->sli4_hba.hba_eq) {
for (fcp_eqidx = 0; fcp_eqidx < phba->cfg_fcp_io_channel;
fcp_eqidx++)
lpfc_sli4_eq_release(phba->sli4_hba.hba_eq[fcp_eqidx],
LPFC_QUEUE_REARM);
}
+
+ if (phba->cfg_fof)
+ lpfc_sli4_eq_release(phba->sli4_hba.fof_eq, LPFC_QUEUE_REARM);
}
/**
@@ -8032,7 +8048,8 @@ lpfc_sli4_scmd_to_wqidx_distr(struct lpfc_hba *phba)
struct lpfc_vector_map_info *cpup;
int chann, cpu;
- if (phba->cfg_fcp_io_sched == LPFC_FCP_SCHED_BY_CPU) {
+ if (phba->cfg_fcp_io_sched == LPFC_FCP_SCHED_BY_CPU
+ && phba->cfg_fcp_io_channel > 1) {
cpu = smp_processor_id();
if (cpu < phba->sli4_hba.num_present_cpu) {
cpup = phba->sli4_hba.cpu_map;
@@ -8250,6 +8267,14 @@ lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq,
bf_set(wqe_ebde_cnt, &wqe->fcp_iwrite.wqe_com, 0);
bf_set(wqe_pu, &wqe->fcp_iwrite.wqe_com, iocbq->iocb.ulpPU);
bf_set(wqe_dbde, &wqe->fcp_iwrite.wqe_com, 1);
+ if (iocbq->iocb_flag & LPFC_IO_OAS) {
+ bf_set(wqe_oas, &wqe->fcp_iwrite.wqe_com, 1);
+ if (phba->cfg_XLanePriority) {
+ bf_set(wqe_ccpe, &wqe->fcp_iwrite.wqe_com, 1);
+ bf_set(wqe_ccp, &wqe->fcp_iwrite.wqe_com,
+ (phba->cfg_XLanePriority << 1));
+ }
+ }
break;
case CMD_FCP_IREAD64_CR:
/* word3 iocb=iotag wqe=payload_offset_len */
@@ -8271,6 +8296,14 @@ lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq,
bf_set(wqe_ebde_cnt, &wqe->fcp_iread.wqe_com, 0);
bf_set(wqe_pu, &wqe->fcp_iread.wqe_com, iocbq->iocb.ulpPU);
bf_set(wqe_dbde, &wqe->fcp_iread.wqe_com, 1);
+ if (iocbq->iocb_flag & LPFC_IO_OAS) {
+ bf_set(wqe_oas, &wqe->fcp_iread.wqe_com, 1);
+ if (phba->cfg_XLanePriority) {
+ bf_set(wqe_ccpe, &wqe->fcp_iread.wqe_com, 1);
+ bf_set(wqe_ccp, &wqe->fcp_iread.wqe_com,
+ (phba->cfg_XLanePriority << 1));
+ }
+ }
break;
case CMD_FCP_ICMND64_CR:
/* word3 iocb=iotag wqe=payload_offset_len */
@@ -8291,6 +8324,14 @@ lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq,
bf_set(wqe_ebde_cnt, &wqe->fcp_icmd.wqe_com, 0);
bf_set(wqe_erp, &wqe->fcp_icmd.wqe_com,
iocbq->iocb.ulpFCP2Rcvy);
+ if (iocbq->iocb_flag & LPFC_IO_OAS) {
+ bf_set(wqe_oas, &wqe->fcp_icmd.wqe_com, 1);
+ if (phba->cfg_XLanePriority) {
+ bf_set(wqe_ccpe, &wqe->fcp_icmd.wqe_com, 1);
+ bf_set(wqe_ccp, &wqe->fcp_icmd.wqe_com,
+ (phba->cfg_XLanePriority << 1));
+ }
+ }
break;
case CMD_GEN_REQUEST64_CR:
/* For this command calculate the xmit length of the
@@ -8523,6 +8564,7 @@ __lpfc_sli_issue_iocb_s4(struct lpfc_hba *phba, uint32_t ring_number,
{
struct lpfc_sglq *sglq;
union lpfc_wqe wqe;
+ struct lpfc_queue *wq;
struct lpfc_sli_ring *pring = &phba->sli.ring[ring_number];
if (piocb->sli4_xritag == NO_XRI) {
@@ -8575,11 +8617,14 @@ __lpfc_sli_issue_iocb_s4(struct lpfc_hba *phba, uint32_t ring_number,
return IOCB_ERROR;
if ((piocb->iocb_flag & LPFC_IO_FCP) ||
- (piocb->iocb_flag & LPFC_USE_FCPWQIDX)) {
- if (unlikely(!phba->sli4_hba.fcp_wq))
- return IOCB_ERROR;
- if (lpfc_sli4_wq_put(phba->sli4_hba.fcp_wq[piocb->fcp_wqidx],
- &wqe))
+ (piocb->iocb_flag & LPFC_USE_FCPWQIDX)) {
+ if (!phba->cfg_EnableXLane || (!(piocb->iocb_flag &
+ LPFC_IO_OAS))) {
+ wq = phba->sli4_hba.fcp_wq[piocb->fcp_wqidx];
+ } else {
+ wq = phba->sli4_hba.oas_wq;
+ }
+ if (lpfc_sli4_wq_put(wq, &wqe))
return IOCB_ERROR;
} else {
if (unlikely(!phba->sli4_hba.els_wq))
@@ -8669,12 +8714,20 @@ lpfc_sli_issue_iocb(struct lpfc_hba *phba, uint32_t ring_number,
if (phba->sli_rev == LPFC_SLI_REV4) {
if (piocb->iocb_flag & LPFC_IO_FCP) {
- if (unlikely(!phba->sli4_hba.fcp_wq))
- return IOCB_ERROR;
- idx = lpfc_sli4_scmd_to_wqidx_distr(phba);
- piocb->fcp_wqidx = idx;
- ring_number = MAX_SLI3_CONFIGURED_RINGS + idx;
-
+ if (!phba->cfg_EnableXLane || (!(piocb->iocb_flag &
+ LPFC_IO_OAS))) {
+ if (unlikely(!phba->sli4_hba.fcp_wq))
+ return IOCB_ERROR;
+ idx = lpfc_sli4_scmd_to_wqidx_distr(phba);
+ piocb->fcp_wqidx = idx;
+ ring_number = MAX_SLI3_CONFIGURED_RINGS + idx;
+ } else {
+ if (unlikely(!phba->sli4_hba.oas_wq))
+ return IOCB_ERROR;
+ idx = 0;
+ piocb->fcp_wqidx = 0;
+ ring_number = LPFC_FCP_OAS_RING;
+ }
pring = &phba->sli.ring[ring_number];
spin_lock_irqsave(&pring->ring_lock, iflags);
rc = __lpfc_sli_issue_iocb(phba, ring_number, piocb,
@@ -12132,6 +12185,175 @@ lpfc_sli4_eq_flush(struct lpfc_hba *phba, struct lpfc_queue *eq)
lpfc_sli4_eq_release(eq, LPFC_QUEUE_REARM);
}
+
+/**
+ * lpfc_sli4_fof_handle_eqe - Process a Flash Optimized Fabric event queue
+ * entry
+ * @phba: Pointer to HBA context object.
+ * @eqe: Pointer to fast-path event queue entry.
+ *
+ * This routine process a event queue entry from the Flash Optimized Fabric
+ * event queue. It will check the MajorCode and MinorCode to determine this
+ * is for a completion event on a completion queue, if not, an error shall be
+ * logged and just return. Otherwise, it will get to the corresponding
+ * completion queue and process all the entries on the completion queue, rearm
+ * the completion queue, and then return.
+ **/
+static void
+lpfc_sli4_fof_handle_eqe(struct lpfc_hba *phba, struct lpfc_eqe *eqe)
+{
+ struct lpfc_queue *cq;
+ struct lpfc_cqe *cqe;
+ bool workposted = false;
+ uint16_t cqid;
+ int ecount = 0;
+
+ if (unlikely(bf_get_le32(lpfc_eqe_major_code, eqe) != 0)) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+ "9147 Not a valid completion "
+ "event: majorcode=x%x, minorcode=x%x\n",
+ bf_get_le32(lpfc_eqe_major_code, eqe),
+ bf_get_le32(lpfc_eqe_minor_code, eqe));
+ return;
+ }
+
+ /* Get the reference to the corresponding CQ */
+ cqid = bf_get_le32(lpfc_eqe_resource_id, eqe);
+
+ /* Next check for OAS */
+ cq = phba->sli4_hba.oas_cq;
+ if (unlikely(!cq)) {
+ if (phba->sli.sli_flag & LPFC_SLI_ACTIVE)
+ lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+ "9148 OAS completion queue "
+ "does not exist\n");
+ return;
+ }
+
+ if (unlikely(cqid != cq->queue_id)) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+ "9149 Miss-matched fast-path compl "
+ "queue id: eqcqid=%d, fcpcqid=%d\n",
+ cqid, cq->queue_id);
+ return;
+ }
+
+ /* Process all the entries to the OAS CQ */
+ while ((cqe = lpfc_sli4_cq_get(cq))) {
+ workposted |= lpfc_sli4_fp_handle_wcqe(phba, cq, cqe);
+ if (!(++ecount % cq->entry_repost))
+ lpfc_sli4_cq_release(cq, LPFC_QUEUE_NOARM);
+ }
+
+ /* Track the max number of CQEs processed in 1 EQ */
+ if (ecount > cq->CQ_max_cqe)
+ cq->CQ_max_cqe = ecount;
+
+ /* Catch the no cq entry condition */
+ if (unlikely(ecount == 0))
+ lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+ "9153 No entry from fast-path completion "
+ "queue fcpcqid=%d\n", cq->queue_id);
+
+ /* In any case, flash and re-arm the CQ */
+ lpfc_sli4_cq_release(cq, LPFC_QUEUE_REARM);
+
+ /* wake up worker thread if there are works to be done */
+ if (workposted)
+ lpfc_worker_wake_up(phba);
+}
+
+/**
+ * lpfc_sli4_fof_intr_handler - HBA interrupt handler to SLI-4 device
+ * @irq: Interrupt number.
+ * @dev_id: The device context pointer.
+ *
+ * This function is directly called from the PCI layer as an interrupt
+ * service routine when device with SLI-4 interface spec is enabled with
+ * MSI-X multi-message interrupt mode and there is a Flash Optimized Fabric
+ * IOCB ring event in the HBA. However, when the device is enabled with either
+ * MSI or Pin-IRQ interrupt mode, this function is called as part of the
+ * device-level interrupt handler. When the PCI slot is in error recovery
+ * or the HBA is undergoing initialization, the interrupt handler will not
+ * process the interrupt. The Flash Optimized Fabric ring event are handled in
+ * the intrrupt context. This function is called without any lock held.
+ * It gets the hbalock to access and update SLI data structures. Note that,
+ * the EQ to CQ are one-to-one map such that the EQ index is
+ * equal to that of CQ index.
+ *
+ * This function returns IRQ_HANDLED when interrupt is handled else it
+ * returns IRQ_NONE.
+ **/
+irqreturn_t
+lpfc_sli4_fof_intr_handler(int irq, void *dev_id)
+{
+ struct lpfc_hba *phba;
+ struct lpfc_fcp_eq_hdl *fcp_eq_hdl;
+ struct lpfc_queue *eq;
+ struct lpfc_eqe *eqe;
+ unsigned long iflag;
+ int ecount = 0;
+ uint32_t eqidx;
+
+ /* Get the driver's phba structure from the dev_id */
+ fcp_eq_hdl = (struct lpfc_fcp_eq_hdl *)dev_id;
+ phba = fcp_eq_hdl->phba;
+ eqidx = fcp_eq_hdl->idx;
+
+ if (unlikely(!phba))
+ return IRQ_NONE;
+
+ /* Get to the EQ struct associated with this vector */
+ eq = phba->sli4_hba.fof_eq;
+ if (unlikely(!eq))
+ return IRQ_NONE;
+
+ /* Check device state for handling interrupt */
+ if (unlikely(lpfc_intr_state_check(phba))) {
+ eq->EQ_badstate++;
+ /* Check again for link_state with lock held */
+ spin_lock_irqsave(&phba->hbalock, iflag);
+ if (phba->link_state < LPFC_LINK_DOWN)
+ /* Flush, clear interrupt, and rearm the EQ */
+ lpfc_sli4_eq_flush(phba, eq);
+ spin_unlock_irqrestore(&phba->hbalock, iflag);
+ return IRQ_NONE;
+ }
+
+ /*
+ * Process all the event on FCP fast-path EQ
+ */
+ while ((eqe = lpfc_sli4_eq_get(eq))) {
+ lpfc_sli4_fof_handle_eqe(phba, eqe);
+ if (!(++ecount % eq->entry_repost))
+ lpfc_sli4_eq_release(eq, LPFC_QUEUE_NOARM);
+ eq->EQ_processed++;
+ }
+
+ /* Track the max number of EQEs processed in 1 intr */
+ if (ecount > eq->EQ_max_eqe)
+ eq->EQ_max_eqe = ecount;
+
+
+ if (unlikely(ecount == 0)) {
+ eq->EQ_no_entry++;
+
+ if (phba->intr_type == MSIX)
+ /* MSI-X treated interrupt served as no EQ share INT */
+ lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
+ "9145 MSI-X interrupt with no EQE\n");
+ else {
+ lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+ "9146 ISR interrupt with no EQE\n");
+ /* Non MSI-X treated on interrupt as EQ share INT */
+ return IRQ_NONE;
+ }
+ }
+ /* Always clear and re-arm the fast-path EQ */
+ lpfc_sli4_eq_release(eq, LPFC_QUEUE_REARM);
+ return IRQ_HANDLED;
+}
+
/**
* lpfc_sli4_hba_intr_handler - HBA interrupt handler to SLI-4 device
* @irq: Interrupt number.
@@ -12287,6 +12509,13 @@ lpfc_sli4_intr_handler(int irq, void *dev_id)
hba_handled |= true;
}
+ if (phba->cfg_fof) {
+ hba_irq_rc = lpfc_sli4_fof_intr_handler(irq,
+ &phba->sli4_hba.fcp_eq_hdl[0]);
+ if (hba_irq_rc == IRQ_HANDLED)
+ hba_handled |= true;
+ }
+
return (hba_handled == true) ? IRQ_HANDLED : IRQ_NONE;
} /* lpfc_sli4_intr_handler */
@@ -16544,7 +16773,7 @@ lpfc_drain_txq(struct lpfc_hba *phba)
{
LIST_HEAD(completions);
struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_ELS_RING];
- struct lpfc_iocbq *piocbq = 0;
+ struct lpfc_iocbq *piocbq = NULL;
unsigned long iflags = 0;
char *fail_msg = NULL;
struct lpfc_sglq *sglq;
diff --git a/drivers/scsi/lpfc/lpfc_sli.h b/drivers/scsi/lpfc/lpfc_sli.h
index 6b0f2478706e..6f04080f4ea8 100644
--- a/drivers/scsi/lpfc/lpfc_sli.h
+++ b/drivers/scsi/lpfc/lpfc_sli.h
@@ -78,6 +78,8 @@ struct lpfc_iocbq {
#define LPFC_FIP_ELS_ID_MASK 0xc000 /* ELS_ID range 0-3, non-shifted mask */
#define LPFC_FIP_ELS_ID_SHIFT 14
+#define LPFC_IO_OAS 0x10000 /* OAS FCP IO */
+
uint32_t drvrTimeout; /* driver timeout in seconds */
uint32_t fcp_wqidx; /* index to FCP work queue */
struct lpfc_vport *vport;/* virtual port pointer */
diff --git a/drivers/scsi/lpfc/lpfc_sli4.h b/drivers/scsi/lpfc/lpfc_sli4.h
index 298c8cd1a89d..9b8cda866176 100644
--- a/drivers/scsi/lpfc/lpfc_sli4.h
+++ b/drivers/scsi/lpfc/lpfc_sli4.h
@@ -39,6 +39,10 @@
#define LPFC_FCP_IO_CHAN_MIN 1
#define LPFC_FCP_IO_CHAN_MAX 16
+/* Number of channels used for Flash Optimized Fabric (FOF) operations */
+
+#define LPFC_FOF_IO_CHAN_NUM 1
+
/*
* Provide the default FCF Record attributes used by the driver
* when nonFIP mode is configured and there is no other default
@@ -399,6 +403,7 @@ struct lpfc_pc_sli4_params {
uint32_t if_page_sz;
uint32_t rq_db_window;
uint32_t loopbk_scope;
+ uint32_t oas_supported;
uint32_t eq_pages_max;
uint32_t eqe_size;
uint32_t cq_pages_max;
@@ -439,6 +444,8 @@ struct lpfc_sli4_lnk_info {
uint8_t lnk_no;
};
+#define LPFC_SLI4_HANDLER_CNT (LPFC_FCP_IO_CHAN_MAX+ \
+ LPFC_FOF_IO_CHAN_NUM)
#define LPFC_SLI4_HANDLER_NAME_SZ 16
/* Used for IRQ vector to CPU mapping */
@@ -507,7 +514,7 @@ struct lpfc_sli4_hba {
struct lpfc_register sli_intf;
struct lpfc_pc_sli4_params pc_sli4_params;
struct msix_entry *msix_entries;
- uint8_t handler_name[LPFC_FCP_IO_CHAN_MAX][LPFC_SLI4_HANDLER_NAME_SZ];
+ uint8_t handler_name[LPFC_SLI4_HANDLER_CNT][LPFC_SLI4_HANDLER_NAME_SZ];
struct lpfc_fcp_eq_hdl *fcp_eq_hdl; /* FCP per-WQ handle */
/* Pointers to the constructed SLI4 queues */
@@ -527,6 +534,17 @@ struct lpfc_sli4_hba {
uint32_t ulp0_mode; /* ULP0 protocol mode */
uint32_t ulp1_mode; /* ULP1 protocol mode */
+ struct lpfc_queue *fof_eq; /* Flash Optimized Fabric Event queue */
+
+ /* Optimized Access Storage specific queues/structures */
+
+ struct lpfc_queue *oas_cq; /* OAS completion queue */
+ struct lpfc_queue *oas_wq; /* OAS Work queue */
+ struct lpfc_sli_ring *oas_ring;
+ uint64_t oas_next_lun;
+ uint8_t oas_next_tgt_wwpn[8];
+ uint8_t oas_next_vpt_wwpn[8];
+
/* Setup information for various queue parameters */
int eq_esize;
int eq_ecount;
@@ -589,6 +607,7 @@ struct lpfc_sli4_hba {
struct lpfc_vector_map_info *cpu_map;
uint16_t num_online_cpu;
uint16_t num_present_cpu;
+ uint16_t curr_disp_cpu;
};
enum lpfc_sge_type {
diff --git a/drivers/scsi/lpfc/lpfc_version.h b/drivers/scsi/lpfc/lpfc_version.h
index e3094c4e143b..e32cbec70324 100644
--- a/drivers/scsi/lpfc/lpfc_version.h
+++ b/drivers/scsi/lpfc/lpfc_version.h
@@ -18,7 +18,7 @@
* included with this package. *
*******************************************************************/
-#define LPFC_DRIVER_VERSION "8.3.43"
+#define LPFC_DRIVER_VERSION "8.3.45"
#define LPFC_DRIVER_NAME "lpfc"
/* Used for SLI 2/3 */