summaryrefslogtreecommitdiff
path: root/drivers/net/ethernet/qlogic/qed
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/ethernet/qlogic/qed')
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_debug.c8
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_hw.c7
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_mcp.c96
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_reg_addr.h1
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_roce.c15
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_sp.h3
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_sp_commands.c16
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_spq.c57
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_vf.c1
9 files changed, 142 insertions, 62 deletions
diff --git a/drivers/net/ethernet/qlogic/qed/qed_debug.c b/drivers/net/ethernet/qlogic/qed/qed_debug.c
index 68f19ca57f96..245eb02d0c4e 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_debug.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_debug.c
@@ -3039,10 +3039,10 @@ static u32 qed_grc_dump_big_ram(struct qed_hwfn *p_hwfn,
s_big_ram_defs[big_ram_id].num_of_blocks[dev_data->chip_id];
ram_size = total_blocks * BIG_RAM_BLOCK_SIZE_DWORDS;
- strncpy(type_name, s_big_ram_defs[big_ram_id].instance_name,
- strlen(s_big_ram_defs[big_ram_id].instance_name));
- strncpy(mem_name, s_big_ram_defs[big_ram_id].instance_name,
- strlen(s_big_ram_defs[big_ram_id].instance_name));
+ strscpy(type_name, s_big_ram_defs[big_ram_id].instance_name,
+ sizeof(type_name));
+ strscpy(mem_name, s_big_ram_defs[big_ram_id].instance_name,
+ sizeof(mem_name));
/* Dump memory header */
offset += qed_grc_dump_mem_hdr(p_hwfn,
diff --git a/drivers/net/ethernet/qlogic/qed/qed_hw.c b/drivers/net/ethernet/qlogic/qed/qed_hw.c
index 6e4fae9b1430..944749cfe092 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_hw.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_hw.c
@@ -34,6 +34,7 @@ struct qed_ptt {
struct list_head list_entry;
unsigned int idx;
struct pxp_ptt_entry pxp;
+ u8 hwfn_id;
};
struct qed_ptt_pool {
@@ -55,6 +56,7 @@ int qed_ptt_pool_alloc(struct qed_hwfn *p_hwfn)
p_pool->ptts[i].idx = i;
p_pool->ptts[i].pxp.offset = QED_BAR_INVALID_OFFSET;
p_pool->ptts[i].pxp.pretend.control = 0;
+ p_pool->ptts[i].hwfn_id = p_hwfn->my_id;
if (i >= RESERVED_PTT_MAX)
list_add(&p_pool->ptts[i].list_entry,
&p_pool->free_list);
@@ -169,6 +171,11 @@ static u32 qed_set_ptt(struct qed_hwfn *p_hwfn,
offset = hw_addr - win_hw_addr;
+ if (p_ptt->hwfn_id != p_hwfn->my_id)
+ DP_NOTICE(p_hwfn,
+ "ptt[%d] of hwfn[%02x] is used by hwfn[%02x]!\n",
+ p_ptt->idx, p_ptt->hwfn_id, p_hwfn->my_id);
+
/* Verify the address is within the window */
if (hw_addr < win_hw_addr ||
offset >= PXP_EXTERNAL_BAR_PF_WINDOW_SINGLE_SIZE) {
diff --git a/drivers/net/ethernet/qlogic/qed/qed_mcp.c b/drivers/net/ethernet/qlogic/qed/qed_mcp.c
index eaa242df4131..e175fcd73739 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_mcp.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_mcp.c
@@ -97,18 +97,57 @@ int qed_mcp_free(struct qed_hwfn *p_hwfn)
return 0;
}
+/* Maximum of 1 sec to wait for the SHMEM ready indication */
+#define QED_MCP_SHMEM_RDY_MAX_RETRIES 20
+#define QED_MCP_SHMEM_RDY_ITER_MS 50
+
static int qed_load_mcp_offsets(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
{
struct qed_mcp_info *p_info = p_hwfn->mcp_info;
+ u8 cnt = QED_MCP_SHMEM_RDY_MAX_RETRIES;
+ u8 msec = QED_MCP_SHMEM_RDY_ITER_MS;
u32 drv_mb_offsize, mfw_mb_offsize;
u32 mcp_pf_id = MCP_PF_ID(p_hwfn);
p_info->public_base = qed_rd(p_hwfn, p_ptt, MISC_REG_SHARED_MEM_ADDR);
- if (!p_info->public_base)
- return 0;
+ if (!p_info->public_base) {
+ DP_NOTICE(p_hwfn,
+ "The address of the MCP scratch-pad is not configured\n");
+ return -EINVAL;
+ }
p_info->public_base |= GRCBASE_MCP;
+ /* Get the MFW MB address and number of supported messages */
+ mfw_mb_offsize = qed_rd(p_hwfn, p_ptt,
+ SECTION_OFFSIZE_ADDR(p_info->public_base,
+ PUBLIC_MFW_MB));
+ p_info->mfw_mb_addr = SECTION_ADDR(mfw_mb_offsize, mcp_pf_id);
+ p_info->mfw_mb_length = (u16)qed_rd(p_hwfn, p_ptt,
+ p_info->mfw_mb_addr +
+ offsetof(struct public_mfw_mb,
+ sup_msgs));
+
+ /* The driver can notify that there was an MCP reset, and might read the
+ * SHMEM values before the MFW has completed initializing them.
+ * To avoid this, the "sup_msgs" field in the MFW mailbox is used as a
+ * data ready indication.
+ */
+ while (!p_info->mfw_mb_length && --cnt) {
+ msleep(msec);
+ p_info->mfw_mb_length =
+ (u16)qed_rd(p_hwfn, p_ptt,
+ p_info->mfw_mb_addr +
+ offsetof(struct public_mfw_mb, sup_msgs));
+ }
+
+ if (!cnt) {
+ DP_NOTICE(p_hwfn,
+ "Failed to get the SHMEM ready notification after %d msec\n",
+ QED_MCP_SHMEM_RDY_MAX_RETRIES * msec);
+ return -EBUSY;
+ }
+
/* Calculate the driver and MFW mailbox address */
drv_mb_offsize = qed_rd(p_hwfn, p_ptt,
SECTION_OFFSIZE_ADDR(p_info->public_base,
@@ -118,13 +157,6 @@ static int qed_load_mcp_offsets(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
"drv_mb_offsiz = 0x%x, drv_mb_addr = 0x%x mcp_pf_id = 0x%x\n",
drv_mb_offsize, p_info->drv_mb_addr, mcp_pf_id);
- /* Set the MFW MB address */
- mfw_mb_offsize = qed_rd(p_hwfn, p_ptt,
- SECTION_OFFSIZE_ADDR(p_info->public_base,
- PUBLIC_MFW_MB));
- p_info->mfw_mb_addr = SECTION_ADDR(mfw_mb_offsize, mcp_pf_id);
- p_info->mfw_mb_length = (u16)qed_rd(p_hwfn, p_ptt, p_info->mfw_mb_addr);
-
/* Get the current driver mailbox sequence before sending
* the first command
*/
@@ -1198,31 +1230,61 @@ qed_mcp_send_drv_version(struct qed_hwfn *p_hwfn,
return rc;
}
+/* A maximal 100 msec waiting time for the MCP to halt */
+#define QED_MCP_HALT_SLEEP_MS 10
+#define QED_MCP_HALT_MAX_RETRIES 10
+
int qed_mcp_halt(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
{
- u32 resp = 0, param = 0;
+ u32 resp = 0, param = 0, cpu_state, cnt = 0;
int rc;
rc = qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_MCP_HALT, 0, &resp,
&param);
- if (rc)
+ if (rc) {
DP_ERR(p_hwfn, "MCP response failure, aborting\n");
+ return rc;
+ }
- return rc;
+ do {
+ msleep(QED_MCP_HALT_SLEEP_MS);
+ cpu_state = qed_rd(p_hwfn, p_ptt, MCP_REG_CPU_STATE);
+ if (cpu_state & MCP_REG_CPU_STATE_SOFT_HALTED)
+ break;
+ } while (++cnt < QED_MCP_HALT_MAX_RETRIES);
+
+ if (cnt == QED_MCP_HALT_MAX_RETRIES) {
+ DP_NOTICE(p_hwfn,
+ "Failed to halt the MCP [CPU_MODE = 0x%08x, CPU_STATE = 0x%08x]\n",
+ qed_rd(p_hwfn, p_ptt, MCP_REG_CPU_MODE), cpu_state);
+ return -EBUSY;
+ }
+
+ return 0;
}
+#define QED_MCP_RESUME_SLEEP_MS 10
+
int qed_mcp_resume(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
{
- u32 value, cpu_mode;
+ u32 cpu_mode, cpu_state;
qed_wr(p_hwfn, p_ptt, MCP_REG_CPU_STATE, 0xffffffff);
- value = qed_rd(p_hwfn, p_ptt, MCP_REG_CPU_MODE);
- value &= ~MCP_REG_CPU_MODE_SOFT_HALT;
- qed_wr(p_hwfn, p_ptt, MCP_REG_CPU_MODE, value);
cpu_mode = qed_rd(p_hwfn, p_ptt, MCP_REG_CPU_MODE);
+ cpu_mode &= ~MCP_REG_CPU_MODE_SOFT_HALT;
+ qed_wr(p_hwfn, p_ptt, MCP_REG_CPU_MODE, cpu_mode);
+ msleep(QED_MCP_RESUME_SLEEP_MS);
+ cpu_state = qed_rd(p_hwfn, p_ptt, MCP_REG_CPU_STATE);
- return (cpu_mode & MCP_REG_CPU_MODE_SOFT_HALT) ? -EAGAIN : 0;
+ if (cpu_state & MCP_REG_CPU_STATE_SOFT_HALTED) {
+ DP_NOTICE(p_hwfn,
+ "Failed to resume the MCP [CPU_MODE = 0x%08x, CPU_STATE = 0x%08x]\n",
+ cpu_mode, cpu_state);
+ return -EBUSY;
+ }
+
+ return 0;
}
int qed_mcp_set_led(struct qed_hwfn *p_hwfn,
diff --git a/drivers/net/ethernet/qlogic/qed/qed_reg_addr.h b/drivers/net/ethernet/qlogic/qed/qed_reg_addr.h
index b414a0542177..56be1d6adfcc 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_reg_addr.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_reg_addr.h
@@ -510,6 +510,7 @@
0
#define MCP_REG_CPU_STATE \
0xe05004UL
+#define MCP_REG_CPU_STATE_SOFT_HALTED (0x1UL << 10)
#define MCP_REG_CPU_EVENT_MASK \
0xe05008UL
#define PGLUE_B_REG_PF_BAR0_SIZE \
diff --git a/drivers/net/ethernet/qlogic/qed/qed_roce.c b/drivers/net/ethernet/qlogic/qed/qed_roce.c
index d9dcb0d1714c..07783d13df71 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_roce.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_roce.c
@@ -1059,23 +1059,16 @@ static void qed_rdma_copy_gids(struct qed_rdma_qp *qp, __le32 *src_gid,
static enum roce_flavor qed_roce_mode_to_flavor(enum roce_mode roce_mode)
{
- enum roce_flavor flavor;
-
switch (roce_mode) {
case ROCE_V1:
- flavor = PLAIN_ROCE;
- break;
+ return PLAIN_ROCE;
case ROCE_V2_IPV4:
- flavor = RROCE_IPV4;
- break;
+ return RROCE_IPV4;
case ROCE_V2_IPV6:
- flavor = ROCE_V2_IPV6;
- break;
+ return RROCE_IPV6;
default:
- flavor = MAX_ROCE_MODE;
- break;
+ return MAX_ROCE_FLAVOR;
}
- return flavor;
}
static int qed_roce_alloc_cid(struct qed_hwfn *p_hwfn, u16 *cid)
diff --git a/drivers/net/ethernet/qlogic/qed/qed_sp.h b/drivers/net/ethernet/qlogic/qed/qed_sp.h
index b2c08e4d2a9b..bae7b7f9b1cf 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_sp.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_sp.h
@@ -132,6 +132,9 @@ struct qed_spq_entry {
enum spq_mode comp_mode;
struct qed_spq_comp_cb comp_cb;
struct qed_spq_comp_done comp_done; /* SPQ_MODE_EBLOCK */
+
+ /* Posted entry for unlimited list entry in EBLOCK mode */
+ struct qed_spq_entry *post_ent;
};
struct qed_eq {
diff --git a/drivers/net/ethernet/qlogic/qed/qed_sp_commands.c b/drivers/net/ethernet/qlogic/qed/qed_sp_commands.c
index 2888eb0628f8..ac69ff3f7c5c 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_sp_commands.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_sp_commands.c
@@ -56,7 +56,7 @@ int qed_sp_init_request(struct qed_hwfn *p_hwfn,
case QED_SPQ_MODE_BLOCK:
if (!p_data->p_comp_data)
- return -EINVAL;
+ goto err;
p_ent->comp_cb.cookie = p_data->p_comp_data->cookie;
break;
@@ -71,7 +71,7 @@ int qed_sp_init_request(struct qed_hwfn *p_hwfn,
default:
DP_NOTICE(p_hwfn, "Unknown SPQE completion mode %d\n",
p_ent->comp_mode);
- return -EINVAL;
+ goto err;
}
DP_VERBOSE(p_hwfn, QED_MSG_SPQ,
@@ -85,6 +85,18 @@ int qed_sp_init_request(struct qed_hwfn *p_hwfn,
memset(&p_ent->ramrod, 0, sizeof(p_ent->ramrod));
return 0;
+
+err:
+ /* qed_spq_get_entry() can either get an entry from the free_pool,
+ * or, if no entries are left, allocate a new entry and add it to
+ * the unlimited_pending list.
+ */
+ if (p_ent->queue == &p_hwfn->p_spq->unlimited_pending)
+ kfree(p_ent);
+ else
+ qed_spq_return_entry(p_hwfn, p_ent);
+
+ return -EINVAL;
}
static enum tunnel_clss qed_tunn_get_clss_type(u8 type)
diff --git a/drivers/net/ethernet/qlogic/qed/qed_spq.c b/drivers/net/ethernet/qlogic/qed/qed_spq.c
index 9fbaf9429fd0..80c8c7f0d932 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_spq.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_spq.c
@@ -595,6 +595,8 @@ static int qed_spq_add_entry(struct qed_hwfn *p_hwfn,
/* EBLOCK responsible to free the allocated p_ent */
if (p_ent->comp_mode != QED_SPQ_MODE_EBLOCK)
kfree(p_ent);
+ else
+ p_ent->post_ent = p_en2;
p_ent = p_en2;
}
@@ -678,6 +680,25 @@ static int qed_spq_pend_post(struct qed_hwfn *p_hwfn)
SPQ_HIGH_PRI_RESERVE_DEFAULT);
}
+/* Avoid overriding of SPQ entries when getting out-of-order completions, by
+ * marking the completions in a bitmap and increasing the chain consumer only
+ * for the first successive completed entries.
+ */
+static void qed_spq_comp_bmap_update(struct qed_hwfn *p_hwfn, __le16 echo)
+{
+ u16 pos = le16_to_cpu(echo) % SPQ_RING_SIZE;
+ struct qed_spq *p_spq = p_hwfn->p_spq;
+
+ __set_bit(pos, p_spq->p_comp_bitmap);
+ while (test_bit(p_spq->comp_bitmap_idx,
+ p_spq->p_comp_bitmap)) {
+ __clear_bit(p_spq->comp_bitmap_idx,
+ p_spq->p_comp_bitmap);
+ p_spq->comp_bitmap_idx++;
+ qed_chain_return_produced(&p_spq->chain);
+ }
+}
+
int qed_spq_post(struct qed_hwfn *p_hwfn,
struct qed_spq_entry *p_ent, u8 *fw_return_code)
{
@@ -728,11 +749,12 @@ int qed_spq_post(struct qed_hwfn *p_hwfn,
rc = qed_spq_block(p_hwfn, p_ent, fw_return_code);
if (p_ent->queue == &p_spq->unlimited_pending) {
- /* This is an allocated p_ent which does not need to
- * return to pool.
- */
+ struct qed_spq_entry *p_post_ent = p_ent->post_ent;
+
kfree(p_ent);
- return rc;
+
+ /* Return the entry which was actually posted */
+ p_ent = p_post_ent;
}
if (rc)
@@ -746,7 +768,7 @@ int qed_spq_post(struct qed_hwfn *p_hwfn,
spq_post_fail2:
spin_lock_bh(&p_spq->lock);
list_del(&p_ent->list);
- qed_chain_return_produced(&p_spq->chain);
+ qed_spq_comp_bmap_update(p_hwfn, p_ent->elem.hdr.echo);
spq_post_fail:
/* return to the free pool */
@@ -778,25 +800,8 @@ int qed_spq_completion(struct qed_hwfn *p_hwfn,
spin_lock_bh(&p_spq->lock);
list_for_each_entry_safe(p_ent, tmp, &p_spq->completion_pending, list) {
if (p_ent->elem.hdr.echo == echo) {
- u16 pos = le16_to_cpu(echo) % SPQ_RING_SIZE;
-
list_del(&p_ent->list);
-
- /* Avoid overriding of SPQ entries when getting
- * out-of-order completions, by marking the completions
- * in a bitmap and increasing the chain consumer only
- * for the first successive completed entries.
- */
- __set_bit(pos, p_spq->p_comp_bitmap);
-
- while (test_bit(p_spq->comp_bitmap_idx,
- p_spq->p_comp_bitmap)) {
- __clear_bit(p_spq->comp_bitmap_idx,
- p_spq->p_comp_bitmap);
- p_spq->comp_bitmap_idx++;
- qed_chain_return_produced(&p_spq->chain);
- }
-
+ qed_spq_comp_bmap_update(p_hwfn, echo);
p_spq->comp_count++;
found = p_ent;
break;
@@ -835,11 +840,9 @@ int qed_spq_completion(struct qed_hwfn *p_hwfn,
QED_MSG_SPQ,
"Got a completion without a callback function\n");
- if ((found->comp_mode != QED_SPQ_MODE_EBLOCK) ||
- (found->queue == &p_spq->unlimited_pending))
+ if (found->comp_mode != QED_SPQ_MODE_EBLOCK)
/* EBLOCK is responsible for returning its own entry into the
- * free list, unless it originally added the entry into the
- * unlimited pending list.
+ * free list.
*/
qed_spq_return_entry(p_hwfn, found);
diff --git a/drivers/net/ethernet/qlogic/qed/qed_vf.c b/drivers/net/ethernet/qlogic/qed/qed_vf.c
index faf8215872de..9cc02b94328a 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_vf.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_vf.c
@@ -295,7 +295,6 @@ static int qed_vf_pf_acquire(struct qed_hwfn *p_hwfn)
}
if (!p_iov->b_pre_fp_hsi &&
- ETH_HSI_VER_MINOR &&
(resp->pfdev_info.minor_fp_hsi < ETH_HSI_VER_MINOR)) {
DP_INFO(p_hwfn,
"PF is using older fastpath HSI; %02x.%02x is configured\n",