summaryrefslogtreecommitdiff
path: root/drivers/net/ethernet/brocade
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/ethernet/brocade')
-rw-r--r--drivers/net/ethernet/brocade/bna/bfa_ioc.c650
-rw-r--r--drivers/net/ethernet/brocade/bna/bfa_ioc.h8
-rw-r--r--drivers/net/ethernet/brocade/bna/bfa_ioc_ct.c40
-rw-r--r--drivers/net/ethernet/brocade/bna/bfi.h33
-rw-r--r--drivers/net/ethernet/brocade/bna/bfi_enet.h3
-rw-r--r--drivers/net/ethernet/brocade/bna/bna.h24
-rw-r--r--drivers/net/ethernet/brocade/bna/bna_enet.c58
-rw-r--r--drivers/net/ethernet/brocade/bna/bna_hw_defs.h4
-rw-r--r--drivers/net/ethernet/brocade/bna/bna_tx_rx.c251
-rw-r--r--drivers/net/ethernet/brocade/bna/bna_types.h57
-rw-r--r--drivers/net/ethernet/brocade/bna/bnad.c559
-rw-r--r--drivers/net/ethernet/brocade/bna/bnad.h26
-rw-r--r--drivers/net/ethernet/brocade/bna/bnad_ethtool.c1
-rw-r--r--drivers/net/ethernet/brocade/bna/cna.h4
14 files changed, 1422 insertions, 296 deletions
diff --git a/drivers/net/ethernet/brocade/bna/bfa_ioc.c b/drivers/net/ethernet/brocade/bna/bfa_ioc.c
index 6f3cac060f29..537bba14f913 100644
--- a/drivers/net/ethernet/brocade/bna/bfa_ioc.c
+++ b/drivers/net/ethernet/brocade/bna/bfa_ioc.c
@@ -22,6 +22,14 @@
/* IOC local definitions */
+#define bfa_ioc_state_disabled(__sm) \
+ (((__sm) == BFI_IOC_UNINIT) || \
+ ((__sm) == BFI_IOC_INITING) || \
+ ((__sm) == BFI_IOC_HWINIT) || \
+ ((__sm) == BFI_IOC_DISABLED) || \
+ ((__sm) == BFI_IOC_FAIL) || \
+ ((__sm) == BFI_IOC_CFG_DISABLED))
+
/* Asic specific macros : see bfa_hw_cb.c and bfa_hw_ct.c for details. */
#define bfa_ioc_firmware_lock(__ioc) \
@@ -42,6 +50,14 @@
((__ioc)->ioc_hwif->ioc_sync_ack(__ioc))
#define bfa_ioc_sync_complete(__ioc) \
((__ioc)->ioc_hwif->ioc_sync_complete(__ioc))
+#define bfa_ioc_set_cur_ioc_fwstate(__ioc, __fwstate) \
+ ((__ioc)->ioc_hwif->ioc_set_fwstate(__ioc, __fwstate))
+#define bfa_ioc_get_cur_ioc_fwstate(__ioc) \
+ ((__ioc)->ioc_hwif->ioc_get_fwstate(__ioc))
+#define bfa_ioc_set_alt_ioc_fwstate(__ioc, __fwstate) \
+ ((__ioc)->ioc_hwif->ioc_set_alt_fwstate(__ioc, __fwstate))
+#define bfa_ioc_get_alt_ioc_fwstate(__ioc) \
+ ((__ioc)->ioc_hwif->ioc_get_alt_fwstate(__ioc))
#define bfa_ioc_mbox_cmd_pending(__ioc) \
(!list_empty(&((__ioc)->mbox_mod.cmd_q)) || \
@@ -76,8 +92,8 @@ static void bfa_ioc_pf_disabled(struct bfa_ioc *ioc);
static void bfa_ioc_pf_failed(struct bfa_ioc *ioc);
static void bfa_ioc_pf_hwfailed(struct bfa_ioc *ioc);
static void bfa_ioc_pf_fwmismatch(struct bfa_ioc *ioc);
-static void bfa_ioc_boot(struct bfa_ioc *ioc, enum bfi_fwboot_type boot_type,
- u32 boot_param);
+static enum bfa_status bfa_ioc_boot(struct bfa_ioc *ioc,
+ enum bfi_fwboot_type boot_type, u32 boot_param);
static u32 bfa_ioc_smem_pgnum(struct bfa_ioc *ioc, u32 fmaddr);
static void bfa_ioc_get_adapter_serial_num(struct bfa_ioc *ioc,
char *serial_num);
@@ -860,7 +876,7 @@ bfa_iocpf_sm_disabling(struct bfa_iocpf *iocpf, enum iocpf_event event)
*/
case IOCPF_E_TIMEOUT:
- writel(BFI_IOC_FAIL, ioc->ioc_regs.ioc_fwstate);
+ bfa_ioc_set_cur_ioc_fwstate(ioc, BFI_IOC_FAIL);
bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabling_sync);
break;
@@ -949,7 +965,7 @@ bfa_iocpf_sm_initfail_sync(struct bfa_iocpf *iocpf, enum iocpf_event event)
case IOCPF_E_SEMLOCKED:
bfa_ioc_notify_fail(ioc);
bfa_ioc_sync_leave(ioc);
- writel(BFI_IOC_FAIL, ioc->ioc_regs.ioc_fwstate);
+ bfa_ioc_set_cur_ioc_fwstate(ioc, BFI_IOC_FAIL);
bfa_nw_ioc_hw_sem_release(ioc);
bfa_fsm_set_state(iocpf, bfa_iocpf_sm_initfail);
break;
@@ -1031,7 +1047,7 @@ bfa_iocpf_sm_fail_sync(struct bfa_iocpf *iocpf, enum iocpf_event event)
bfa_ioc_notify_fail(ioc);
if (!iocpf->auto_recover) {
bfa_ioc_sync_leave(ioc);
- writel(BFI_IOC_FAIL, ioc->ioc_regs.ioc_fwstate);
+ bfa_ioc_set_cur_ioc_fwstate(ioc, BFI_IOC_FAIL);
bfa_nw_ioc_hw_sem_release(ioc);
bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fail);
} else {
@@ -1131,6 +1147,25 @@ bfa_nw_ioc_sem_release(void __iomem *sem_reg)
writel(1, sem_reg);
}
+/* Invalidate fwver signature */
+enum bfa_status
+bfa_nw_ioc_fwsig_invalidate(struct bfa_ioc *ioc)
+{
+ u32 pgnum, pgoff;
+ u32 loff = 0;
+ enum bfi_ioc_state ioc_fwstate;
+
+ ioc_fwstate = bfa_ioc_get_cur_ioc_fwstate(ioc);
+ if (!bfa_ioc_state_disabled(ioc_fwstate))
+ return BFA_STATUS_ADAPTER_ENABLED;
+
+ pgnum = bfa_ioc_smem_pgnum(ioc, loff);
+ pgoff = PSS_SMEM_PGOFF(loff);
+ writel(pgnum, ioc->ioc_regs.host_page_num_fn);
+ writel(BFI_IOC_FW_INV_SIGN, ioc->ioc_regs.smem_page_start + loff);
+ return BFA_STATUS_OK;
+}
+
/* Clear fwver hdr */
static void
bfa_ioc_fwver_clear(struct bfa_ioc *ioc)
@@ -1162,7 +1197,7 @@ bfa_ioc_hw_sem_init(struct bfa_ioc *ioc)
r32 = readl(ioc->ioc_regs.ioc_init_sem_reg);
}
- fwstate = readl(ioc->ioc_regs.ioc_fwstate);
+ fwstate = bfa_ioc_get_cur_ioc_fwstate(ioc);
if (fwstate == BFI_IOC_UNINIT) {
writel(1, ioc->ioc_regs.ioc_init_sem_reg);
return;
@@ -1176,8 +1211,8 @@ bfa_ioc_hw_sem_init(struct bfa_ioc *ioc)
}
bfa_ioc_fwver_clear(ioc);
- writel(BFI_IOC_UNINIT, ioc->ioc_regs.ioc_fwstate);
- writel(BFI_IOC_UNINIT, ioc->ioc_regs.alt_ioc_fwstate);
+ bfa_ioc_set_cur_ioc_fwstate(ioc, BFI_IOC_UNINIT);
+ bfa_ioc_set_alt_ioc_fwstate(ioc, BFI_IOC_UNINIT);
/*
* Try to lock and then unlock the semaphore.
@@ -1309,22 +1344,510 @@ bfa_nw_ioc_fwver_get(struct bfa_ioc *ioc, struct bfi_ioc_image_hdr *fwhdr)
}
}
-/* Returns TRUE if same. */
+static bool
+bfa_ioc_fwver_md5_check(struct bfi_ioc_image_hdr *fwhdr_1,
+ struct bfi_ioc_image_hdr *fwhdr_2)
+{
+ int i;
+
+ for (i = 0; i < BFI_IOC_MD5SUM_SZ; i++) {
+ if (fwhdr_1->md5sum[i] != fwhdr_2->md5sum[i])
+ return false;
+ }
+
+ return true;
+}
+
+/* Returns TRUE if major minor and maintainence are same.
+ * If patch version are same, check for MD5 Checksum to be same.
+ */
+static bool
+bfa_ioc_fw_ver_compatible(struct bfi_ioc_image_hdr *drv_fwhdr,
+ struct bfi_ioc_image_hdr *fwhdr_to_cmp)
+{
+ if (drv_fwhdr->signature != fwhdr_to_cmp->signature)
+ return false;
+ if (drv_fwhdr->fwver.major != fwhdr_to_cmp->fwver.major)
+ return false;
+ if (drv_fwhdr->fwver.minor != fwhdr_to_cmp->fwver.minor)
+ return false;
+ if (drv_fwhdr->fwver.maint != fwhdr_to_cmp->fwver.maint)
+ return false;
+ if (drv_fwhdr->fwver.patch == fwhdr_to_cmp->fwver.patch &&
+ drv_fwhdr->fwver.phase == fwhdr_to_cmp->fwver.phase &&
+ drv_fwhdr->fwver.build == fwhdr_to_cmp->fwver.build)
+ return bfa_ioc_fwver_md5_check(drv_fwhdr, fwhdr_to_cmp);
+
+ return true;
+}
+
+static bool
+bfa_ioc_flash_fwver_valid(struct bfi_ioc_image_hdr *flash_fwhdr)
+{
+ if (flash_fwhdr->fwver.major == 0 || flash_fwhdr->fwver.major == 0xFF)
+ return false;
+
+ return true;
+}
+
+static bool
+fwhdr_is_ga(struct bfi_ioc_image_hdr *fwhdr)
+{
+ if (fwhdr->fwver.phase == 0 &&
+ fwhdr->fwver.build == 0)
+ return false;
+
+ return true;
+}
+
+/* Returns TRUE if both are compatible and patch of fwhdr_to_cmp is better. */
+static enum bfi_ioc_img_ver_cmp
+bfa_ioc_fw_ver_patch_cmp(struct bfi_ioc_image_hdr *base_fwhdr,
+ struct bfi_ioc_image_hdr *fwhdr_to_cmp)
+{
+ if (bfa_ioc_fw_ver_compatible(base_fwhdr, fwhdr_to_cmp) == false)
+ return BFI_IOC_IMG_VER_INCOMP;
+
+ if (fwhdr_to_cmp->fwver.patch > base_fwhdr->fwver.patch)
+ return BFI_IOC_IMG_VER_BETTER;
+ else if (fwhdr_to_cmp->fwver.patch < base_fwhdr->fwver.patch)
+ return BFI_IOC_IMG_VER_OLD;
+
+ /* GA takes priority over internal builds of the same patch stream.
+ * At this point major minor maint and patch numbers are same.
+ */
+ if (fwhdr_is_ga(base_fwhdr) == true)
+ if (fwhdr_is_ga(fwhdr_to_cmp))
+ return BFI_IOC_IMG_VER_SAME;
+ else
+ return BFI_IOC_IMG_VER_OLD;
+ else
+ if (fwhdr_is_ga(fwhdr_to_cmp))
+ return BFI_IOC_IMG_VER_BETTER;
+
+ if (fwhdr_to_cmp->fwver.phase > base_fwhdr->fwver.phase)
+ return BFI_IOC_IMG_VER_BETTER;
+ else if (fwhdr_to_cmp->fwver.phase < base_fwhdr->fwver.phase)
+ return BFI_IOC_IMG_VER_OLD;
+
+ if (fwhdr_to_cmp->fwver.build > base_fwhdr->fwver.build)
+ return BFI_IOC_IMG_VER_BETTER;
+ else if (fwhdr_to_cmp->fwver.build < base_fwhdr->fwver.build)
+ return BFI_IOC_IMG_VER_OLD;
+
+ /* All Version Numbers are equal.
+ * Md5 check to be done as a part of compatibility check.
+ */
+ return BFI_IOC_IMG_VER_SAME;
+}
+
+/* register definitions */
+#define FLI_CMD_REG 0x0001d000
+#define FLI_WRDATA_REG 0x0001d00c
+#define FLI_RDDATA_REG 0x0001d010
+#define FLI_ADDR_REG 0x0001d004
+#define FLI_DEV_STATUS_REG 0x0001d014
+
+#define BFA_FLASH_FIFO_SIZE 128 /* fifo size */
+#define BFA_FLASH_CHECK_MAX 10000 /* max # of status check */
+#define BFA_FLASH_BLOCKING_OP_MAX 1000000 /* max # of blocking op check */
+#define BFA_FLASH_WIP_MASK 0x01 /* write in progress bit mask */
+
+#define NFC_STATE_RUNNING 0x20000001
+#define NFC_STATE_PAUSED 0x00004560
+#define NFC_VER_VALID 0x147
+
+enum bfa_flash_cmd {
+ BFA_FLASH_FAST_READ = 0x0b, /* fast read */
+ BFA_FLASH_WRITE_ENABLE = 0x06, /* write enable */
+ BFA_FLASH_SECTOR_ERASE = 0xd8, /* sector erase */
+ BFA_FLASH_WRITE = 0x02, /* write */
+ BFA_FLASH_READ_STATUS = 0x05, /* read status */
+};
+
+/* hardware error definition */
+enum bfa_flash_err {
+ BFA_FLASH_NOT_PRESENT = -1, /*!< flash not present */
+ BFA_FLASH_UNINIT = -2, /*!< flash not initialized */
+ BFA_FLASH_BAD = -3, /*!< flash bad */
+ BFA_FLASH_BUSY = -4, /*!< flash busy */
+ BFA_FLASH_ERR_CMD_ACT = -5, /*!< command active never cleared */
+ BFA_FLASH_ERR_FIFO_CNT = -6, /*!< fifo count never cleared */
+ BFA_FLASH_ERR_WIP = -7, /*!< write-in-progress never cleared */
+ BFA_FLASH_ERR_TIMEOUT = -8, /*!< fli timeout */
+ BFA_FLASH_ERR_LEN = -9, /*!< invalid length */
+};
+
+/* flash command register data structure */
+union bfa_flash_cmd_reg {
+ struct {
+#ifdef __BIG_ENDIAN
+ u32 act:1;
+ u32 rsv:1;
+ u32 write_cnt:9;
+ u32 read_cnt:9;
+ u32 addr_cnt:4;
+ u32 cmd:8;
+#else
+ u32 cmd:8;
+ u32 addr_cnt:4;
+ u32 read_cnt:9;
+ u32 write_cnt:9;
+ u32 rsv:1;
+ u32 act:1;
+#endif
+ } r;
+ u32 i;
+};
+
+/* flash device status register data structure */
+union bfa_flash_dev_status_reg {
+ struct {
+#ifdef __BIG_ENDIAN
+ u32 rsv:21;
+ u32 fifo_cnt:6;
+ u32 busy:1;
+ u32 init_status:1;
+ u32 present:1;
+ u32 bad:1;
+ u32 good:1;
+#else
+ u32 good:1;
+ u32 bad:1;
+ u32 present:1;
+ u32 init_status:1;
+ u32 busy:1;
+ u32 fifo_cnt:6;
+ u32 rsv:21;
+#endif
+ } r;
+ u32 i;
+};
+
+/* flash address register data structure */
+union bfa_flash_addr_reg {
+ struct {
+#ifdef __BIG_ENDIAN
+ u32 addr:24;
+ u32 dummy:8;
+#else
+ u32 dummy:8;
+ u32 addr:24;
+#endif
+ } r;
+ u32 i;
+};
+
+/* Flash raw private functions */
+static void
+bfa_flash_set_cmd(void __iomem *pci_bar, u8 wr_cnt,
+ u8 rd_cnt, u8 ad_cnt, u8 op)
+{
+ union bfa_flash_cmd_reg cmd;
+
+ cmd.i = 0;
+ cmd.r.act = 1;
+ cmd.r.write_cnt = wr_cnt;
+ cmd.r.read_cnt = rd_cnt;
+ cmd.r.addr_cnt = ad_cnt;
+ cmd.r.cmd = op;
+ writel(cmd.i, (pci_bar + FLI_CMD_REG));
+}
+
+static void
+bfa_flash_set_addr(void __iomem *pci_bar, u32 address)
+{
+ union bfa_flash_addr_reg addr;
+
+ addr.r.addr = address & 0x00ffffff;
+ addr.r.dummy = 0;
+ writel(addr.i, (pci_bar + FLI_ADDR_REG));
+}
+
+static int
+bfa_flash_cmd_act_check(void __iomem *pci_bar)
+{
+ union bfa_flash_cmd_reg cmd;
+
+ cmd.i = readl(pci_bar + FLI_CMD_REG);
+
+ if (cmd.r.act)
+ return BFA_FLASH_ERR_CMD_ACT;
+
+ return 0;
+}
+
+/* Flush FLI data fifo. */
+static u32
+bfa_flash_fifo_flush(void __iomem *pci_bar)
+{
+ u32 i;
+ u32 t;
+ union bfa_flash_dev_status_reg dev_status;
+
+ dev_status.i = readl(pci_bar + FLI_DEV_STATUS_REG);
+
+ if (!dev_status.r.fifo_cnt)
+ return 0;
+
+ /* fifo counter in terms of words */
+ for (i = 0; i < dev_status.r.fifo_cnt; i++)
+ t = readl(pci_bar + FLI_RDDATA_REG);
+
+ /* Check the device status. It may take some time. */
+ for (i = 0; i < BFA_FLASH_CHECK_MAX; i++) {
+ dev_status.i = readl(pci_bar + FLI_DEV_STATUS_REG);
+ if (!dev_status.r.fifo_cnt)
+ break;
+ }
+
+ if (dev_status.r.fifo_cnt)
+ return BFA_FLASH_ERR_FIFO_CNT;
+
+ return 0;
+}
+
+/* Read flash status. */
+static u32
+bfa_flash_status_read(void __iomem *pci_bar)
+{
+ union bfa_flash_dev_status_reg dev_status;
+ u32 status;
+ u32 ret_status;
+ int i;
+
+ status = bfa_flash_fifo_flush(pci_bar);
+ if (status < 0)
+ return status;
+
+ bfa_flash_set_cmd(pci_bar, 0, 4, 0, BFA_FLASH_READ_STATUS);
+
+ for (i = 0; i < BFA_FLASH_CHECK_MAX; i++) {
+ status = bfa_flash_cmd_act_check(pci_bar);
+ if (!status)
+ break;
+ }
+
+ if (status)
+ return status;
+
+ dev_status.i = readl(pci_bar + FLI_DEV_STATUS_REG);
+ if (!dev_status.r.fifo_cnt)
+ return BFA_FLASH_BUSY;
+
+ ret_status = readl(pci_bar + FLI_RDDATA_REG);
+ ret_status >>= 24;
+
+ status = bfa_flash_fifo_flush(pci_bar);
+ if (status < 0)
+ return status;
+
+ return ret_status;
+}
+
+/* Start flash read operation. */
+static u32
+bfa_flash_read_start(void __iomem *pci_bar, u32 offset, u32 len,
+ char *buf)
+{
+ u32 status;
+
+ /* len must be mutiple of 4 and not exceeding fifo size */
+ if (len == 0 || len > BFA_FLASH_FIFO_SIZE || (len & 0x03) != 0)
+ return BFA_FLASH_ERR_LEN;
+
+ /* check status */
+ status = bfa_flash_status_read(pci_bar);
+ if (status == BFA_FLASH_BUSY)
+ status = bfa_flash_status_read(pci_bar);
+
+ if (status < 0)
+ return status;
+
+ /* check if write-in-progress bit is cleared */
+ if (status & BFA_FLASH_WIP_MASK)
+ return BFA_FLASH_ERR_WIP;
+
+ bfa_flash_set_addr(pci_bar, offset);
+
+ bfa_flash_set_cmd(pci_bar, 0, (u8)len, 4, BFA_FLASH_FAST_READ);
+
+ return 0;
+}
+
+/* Check flash read operation. */
+static u32
+bfa_flash_read_check(void __iomem *pci_bar)
+{
+ if (bfa_flash_cmd_act_check(pci_bar))
+ return 1;
+
+ return 0;
+}
+
+/* End flash read operation. */
+static void
+bfa_flash_read_end(void __iomem *pci_bar, u32 len, char *buf)
+{
+ u32 i;
+
+ /* read data fifo up to 32 words */
+ for (i = 0; i < len; i += 4) {
+ u32 w = readl(pci_bar + FLI_RDDATA_REG);
+ *((u32 *)(buf + i)) = swab32(w);
+ }
+
+ bfa_flash_fifo_flush(pci_bar);
+}
+
+/* Perform flash raw read. */
+
+#define FLASH_BLOCKING_OP_MAX 500
+#define FLASH_SEM_LOCK_REG 0x18820
+
+static int
+bfa_raw_sem_get(void __iomem *bar)
+{
+ int locked;
+
+ locked = readl((bar + FLASH_SEM_LOCK_REG));
+
+ return !locked;
+}
+
+static enum bfa_status
+bfa_flash_sem_get(void __iomem *bar)
+{
+ u32 n = FLASH_BLOCKING_OP_MAX;
+
+ while (!bfa_raw_sem_get(bar)) {
+ if (--n <= 0)
+ return BFA_STATUS_BADFLASH;
+ udelay(10000);
+ }
+ return BFA_STATUS_OK;
+}
+
+static void
+bfa_flash_sem_put(void __iomem *bar)
+{
+ writel(0, (bar + FLASH_SEM_LOCK_REG));
+}
+
+static enum bfa_status
+bfa_flash_raw_read(void __iomem *pci_bar, u32 offset, char *buf,
+ u32 len)
+{
+ u32 n, status;
+ u32 off, l, s, residue, fifo_sz;
+
+ residue = len;
+ off = 0;
+ fifo_sz = BFA_FLASH_FIFO_SIZE;
+ status = bfa_flash_sem_get(pci_bar);
+ if (status != BFA_STATUS_OK)
+ return status;
+
+ while (residue) {
+ s = offset + off;
+ n = s / fifo_sz;
+ l = (n + 1) * fifo_sz - s;
+ if (l > residue)
+ l = residue;
+
+ status = bfa_flash_read_start(pci_bar, offset + off, l,
+ &buf[off]);
+ if (status < 0) {
+ bfa_flash_sem_put(pci_bar);
+ return BFA_STATUS_FAILED;
+ }
+
+ n = BFA_FLASH_BLOCKING_OP_MAX;
+ while (bfa_flash_read_check(pci_bar)) {
+ if (--n <= 0) {
+ bfa_flash_sem_put(pci_bar);
+ return BFA_STATUS_FAILED;
+ }
+ }
+
+ bfa_flash_read_end(pci_bar, l, &buf[off]);
+
+ residue -= l;
+ off += l;
+ }
+ bfa_flash_sem_put(pci_bar);
+
+ return BFA_STATUS_OK;
+}
+
+u32
+bfa_nw_ioc_flash_img_get_size(struct bfa_ioc *ioc)
+{
+ return BFI_FLASH_IMAGE_SZ/sizeof(u32);
+}
+
+#define BFA_FLASH_PART_FWIMG_ADDR 0x100000 /* fw image address */
+
+enum bfa_status
+bfa_nw_ioc_flash_img_get_chnk(struct bfa_ioc *ioc, u32 off,
+ u32 *fwimg)
+{
+ return bfa_flash_raw_read(ioc->pcidev.pci_bar_kva,
+ BFA_FLASH_PART_FWIMG_ADDR + (off * sizeof(u32)),
+ (char *)fwimg, BFI_FLASH_CHUNK_SZ);
+}
+
+static enum bfi_ioc_img_ver_cmp
+bfa_ioc_flash_fwver_cmp(struct bfa_ioc *ioc,
+ struct bfi_ioc_image_hdr *base_fwhdr)
+{
+ struct bfi_ioc_image_hdr *flash_fwhdr;
+ enum bfa_status status;
+ u32 fwimg[BFI_FLASH_CHUNK_SZ_WORDS];
+
+ status = bfa_nw_ioc_flash_img_get_chnk(ioc, 0, fwimg);
+ if (status != BFA_STATUS_OK)
+ return BFI_IOC_IMG_VER_INCOMP;
+
+ flash_fwhdr = (struct bfi_ioc_image_hdr *)fwimg;
+ if (bfa_ioc_flash_fwver_valid(flash_fwhdr))
+ return bfa_ioc_fw_ver_patch_cmp(base_fwhdr, flash_fwhdr);
+ else
+ return BFI_IOC_IMG_VER_INCOMP;
+}
+
+/**
+ * Returns TRUE if driver is willing to work with current smem f/w version.
+ */
bool
bfa_nw_ioc_fwver_cmp(struct bfa_ioc *ioc, struct bfi_ioc_image_hdr *fwhdr)
{
struct bfi_ioc_image_hdr *drv_fwhdr;
- int i;
+ enum bfi_ioc_img_ver_cmp smem_flash_cmp, drv_smem_cmp;
drv_fwhdr = (struct bfi_ioc_image_hdr *)
bfa_cb_image_get_chunk(bfa_ioc_asic_gen(ioc), 0);
- for (i = 0; i < BFI_IOC_MD5SUM_SZ; i++) {
- if (fwhdr->md5sum[i] != drv_fwhdr->md5sum[i])
- return false;
+ /* If smem is incompatible or old, driver should not work with it. */
+ drv_smem_cmp = bfa_ioc_fw_ver_patch_cmp(drv_fwhdr, fwhdr);
+ if (drv_smem_cmp == BFI_IOC_IMG_VER_INCOMP ||
+ drv_smem_cmp == BFI_IOC_IMG_VER_OLD) {
+ return false;
}
- return true;
+ /* IF Flash has a better F/W than smem do not work with smem.
+ * If smem f/w == flash f/w, as smem f/w not old | incmp, work with it.
+ * If Flash is old or incomp work with smem iff smem f/w == drv f/w.
+ */
+ smem_flash_cmp = bfa_ioc_flash_fwver_cmp(ioc, fwhdr);
+
+ if (smem_flash_cmp == BFI_IOC_IMG_VER_BETTER)
+ return false;
+ else if (smem_flash_cmp == BFI_IOC_IMG_VER_SAME)
+ return true;
+ else
+ return (drv_smem_cmp == BFI_IOC_IMG_VER_SAME) ?
+ true : false;
}
/* Return true if current running version is valid. Firmware signature and
@@ -1333,15 +1856,9 @@ bfa_nw_ioc_fwver_cmp(struct bfa_ioc *ioc, struct bfi_ioc_image_hdr *fwhdr)
static bool
bfa_ioc_fwver_valid(struct bfa_ioc *ioc, u32 boot_env)
{
- struct bfi_ioc_image_hdr fwhdr, *drv_fwhdr;
+ struct bfi_ioc_image_hdr fwhdr;
bfa_nw_ioc_fwver_get(ioc, &fwhdr);
- drv_fwhdr = (struct bfi_ioc_image_hdr *)
- bfa_cb_image_get_chunk(bfa_ioc_asic_gen(ioc), 0);
-
- if (fwhdr.signature != drv_fwhdr->signature)
- return false;
-
if (swab32(fwhdr.bootenv) != boot_env)
return false;
@@ -1366,7 +1883,7 @@ bfa_ioc_hwinit(struct bfa_ioc *ioc, bool force)
bool fwvalid;
u32 boot_env;
- ioc_fwstate = readl(ioc->ioc_regs.ioc_fwstate);
+ ioc_fwstate = bfa_ioc_get_cur_ioc_fwstate(ioc);
if (force)
ioc_fwstate = BFI_IOC_UNINIT;
@@ -1380,8 +1897,10 @@ bfa_ioc_hwinit(struct bfa_ioc *ioc, bool force)
false : bfa_ioc_fwver_valid(ioc, boot_env);
if (!fwvalid) {
- bfa_ioc_boot(ioc, BFI_FWBOOT_TYPE_NORMAL, boot_env);
- bfa_ioc_poll_fwinit(ioc);
+ if (bfa_ioc_boot(ioc, BFI_FWBOOT_TYPE_NORMAL, boot_env) ==
+ BFA_STATUS_OK)
+ bfa_ioc_poll_fwinit(ioc);
+
return;
}
@@ -1411,8 +1930,9 @@ bfa_ioc_hwinit(struct bfa_ioc *ioc, bool force)
/**
* Initialize the h/w for any other states.
*/
- bfa_ioc_boot(ioc, BFI_FWBOOT_TYPE_NORMAL, boot_env);
- bfa_ioc_poll_fwinit(ioc);
+ if (bfa_ioc_boot(ioc, BFI_FWBOOT_TYPE_NORMAL, boot_env) ==
+ BFA_STATUS_OK)
+ bfa_ioc_poll_fwinit(ioc);
}
void
@@ -1517,7 +2037,7 @@ bfa_ioc_hb_stop(struct bfa_ioc *ioc)
}
/* Initiate a full firmware download. */
-static void
+static enum bfa_status
bfa_ioc_download_fw(struct bfa_ioc *ioc, u32 boot_type,
u32 boot_env)
{
@@ -1527,18 +2047,47 @@ bfa_ioc_download_fw(struct bfa_ioc *ioc, u32 boot_type,
u32 chunkno = 0;
u32 i;
u32 asicmode;
+ u32 fwimg_size;
+ u32 fwimg_buf[BFI_FLASH_CHUNK_SZ_WORDS];
+ enum bfa_status status;
+
+ if (boot_env == BFI_FWBOOT_ENV_OS &&
+ boot_type == BFI_FWBOOT_TYPE_FLASH) {
+ fwimg_size = BFI_FLASH_IMAGE_SZ/sizeof(u32);
+
+ status = bfa_nw_ioc_flash_img_get_chnk(ioc,
+ BFA_IOC_FLASH_CHUNK_ADDR(chunkno), fwimg_buf);
+ if (status != BFA_STATUS_OK)
+ return status;
- fwimg = bfa_cb_image_get_chunk(bfa_ioc_asic_gen(ioc), chunkno);
+ fwimg = fwimg_buf;
+ } else {
+ fwimg_size = bfa_cb_image_get_size(bfa_ioc_asic_gen(ioc));
+ fwimg = bfa_cb_image_get_chunk(bfa_ioc_asic_gen(ioc),
+ BFA_IOC_FLASH_CHUNK_ADDR(chunkno));
+ }
pgnum = bfa_ioc_smem_pgnum(ioc, loff);
writel(pgnum, ioc->ioc_regs.host_page_num_fn);
- for (i = 0; i < bfa_cb_image_get_size(bfa_ioc_asic_gen(ioc)); i++) {
+ for (i = 0; i < fwimg_size; i++) {
if (BFA_IOC_FLASH_CHUNK_NO(i) != chunkno) {
chunkno = BFA_IOC_FLASH_CHUNK_NO(i);
- fwimg = bfa_cb_image_get_chunk(bfa_ioc_asic_gen(ioc),
+ if (boot_env == BFI_FWBOOT_ENV_OS &&
+ boot_type == BFI_FWBOOT_TYPE_FLASH) {
+ status = bfa_nw_ioc_flash_img_get_chnk(ioc,
+ BFA_IOC_FLASH_CHUNK_ADDR(chunkno),
+ fwimg_buf);
+ if (status != BFA_STATUS_OK)
+ return status;
+
+ fwimg = fwimg_buf;
+ } else {
+ fwimg = bfa_cb_image_get_chunk(
+ bfa_ioc_asic_gen(ioc),
BFA_IOC_FLASH_CHUNK_ADDR(chunkno));
+ }
}
/**
@@ -1566,6 +2115,10 @@ bfa_ioc_download_fw(struct bfa_ioc *ioc, u32 boot_type,
/*
* Set boot type, env and device mode at the end.
*/
+ if (boot_env == BFI_FWBOOT_ENV_OS &&
+ boot_type == BFI_FWBOOT_TYPE_FLASH) {
+ boot_type = BFI_FWBOOT_TYPE_NORMAL;
+ }
asicmode = BFI_FWBOOT_DEVMODE(ioc->asic_gen, ioc->asic_mode,
ioc->port0_mode, ioc->port1_mode);
writel(asicmode, ((ioc->ioc_regs.smem_page_start)
@@ -1574,6 +2127,7 @@ bfa_ioc_download_fw(struct bfa_ioc *ioc, u32 boot_type,
+ (BFI_FWBOOT_TYPE_OFF)));
writel(boot_env, ((ioc->ioc_regs.smem_page_start)
+ (BFI_FWBOOT_ENV_OFF)));
+ return BFA_STATUS_OK;
}
static void
@@ -1846,29 +2400,47 @@ bfa_ioc_pll_init(struct bfa_ioc *ioc)
/* Interface used by diag module to do firmware boot with memory test
* as the entry vector.
*/
-static void
+static enum bfa_status
bfa_ioc_boot(struct bfa_ioc *ioc, enum bfi_fwboot_type boot_type,
u32 boot_env)
{
+ struct bfi_ioc_image_hdr *drv_fwhdr;
+ enum bfa_status status;
bfa_ioc_stats(ioc, ioc_boots);
if (bfa_ioc_pll_init(ioc) != BFA_STATUS_OK)
- return;
+ return BFA_STATUS_FAILED;
+ if (boot_env == BFI_FWBOOT_ENV_OS &&
+ boot_type == BFI_FWBOOT_TYPE_NORMAL) {
+ drv_fwhdr = (struct bfi_ioc_image_hdr *)
+ bfa_cb_image_get_chunk(bfa_ioc_asic_gen(ioc), 0);
+ /* Work with Flash iff flash f/w is better than driver f/w.
+ * Otherwise push drivers firmware.
+ */
+ if (bfa_ioc_flash_fwver_cmp(ioc, drv_fwhdr) ==
+ BFI_IOC_IMG_VER_BETTER)
+ boot_type = BFI_FWBOOT_TYPE_FLASH;
+ }
/**
* Initialize IOC state of all functions on a chip reset.
*/
if (boot_type == BFI_FWBOOT_TYPE_MEMTEST) {
- writel(BFI_IOC_MEMTEST, ioc->ioc_regs.ioc_fwstate);
- writel(BFI_IOC_MEMTEST, ioc->ioc_regs.alt_ioc_fwstate);
+ bfa_ioc_set_cur_ioc_fwstate(ioc, BFI_IOC_MEMTEST);
+ bfa_ioc_set_alt_ioc_fwstate(ioc, BFI_IOC_MEMTEST);
} else {
- writel(BFI_IOC_INITING, ioc->ioc_regs.ioc_fwstate);
- writel(BFI_IOC_INITING, ioc->ioc_regs.alt_ioc_fwstate);
+ bfa_ioc_set_cur_ioc_fwstate(ioc, BFI_IOC_INITING);
+ bfa_ioc_set_alt_ioc_fwstate(ioc, BFI_IOC_INITING);
}
bfa_ioc_msgflush(ioc);
- bfa_ioc_download_fw(ioc, boot_type, boot_env);
- bfa_ioc_lpu_start(ioc);
+ status = bfa_ioc_download_fw(ioc, boot_type, boot_env);
+ if (status == BFA_STATUS_OK)
+ bfa_ioc_lpu_start(ioc);
+ else
+ bfa_nw_iocpf_timeout(ioc);
+
+ return status;
}
/* Enable/disable IOC failure auto recovery. */
@@ -2473,7 +3045,7 @@ bfa_nw_iocpf_sem_timeout(void *ioc_arg)
static void
bfa_ioc_poll_fwinit(struct bfa_ioc *ioc)
{
- u32 fwstate = readl(ioc->ioc_regs.ioc_fwstate);
+ u32 fwstate = bfa_ioc_get_cur_ioc_fwstate(ioc);
if (fwstate == BFI_IOC_DISABLED) {
bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_FWREADY);
diff --git a/drivers/net/ethernet/brocade/bna/bfa_ioc.h b/drivers/net/ethernet/brocade/bna/bfa_ioc.h
index f04e0aab25b4..20cff7df4b55 100644
--- a/drivers/net/ethernet/brocade/bna/bfa_ioc.h
+++ b/drivers/net/ethernet/brocade/bna/bfa_ioc.h
@@ -215,6 +215,13 @@ struct bfa_ioc_hwif {
void (*ioc_sync_ack) (struct bfa_ioc *ioc);
bool (*ioc_sync_complete) (struct bfa_ioc *ioc);
bool (*ioc_lpu_read_stat) (struct bfa_ioc *ioc);
+ void (*ioc_set_fwstate) (struct bfa_ioc *ioc,
+ enum bfi_ioc_state fwstate);
+ enum bfi_ioc_state (*ioc_get_fwstate) (struct bfa_ioc *ioc);
+ void (*ioc_set_alt_fwstate) (struct bfa_ioc *ioc,
+ enum bfi_ioc_state fwstate);
+ enum bfi_ioc_state (*ioc_get_alt_fwstate) (struct bfa_ioc *ioc);
+
};
#define bfa_ioc_pcifn(__ioc) ((__ioc)->pcidev.pci_func)
@@ -291,6 +298,7 @@ void bfa_nw_ioc_error_isr(struct bfa_ioc *ioc);
bool bfa_nw_ioc_is_disabled(struct bfa_ioc *ioc);
bool bfa_nw_ioc_is_operational(struct bfa_ioc *ioc);
void bfa_nw_ioc_get_attr(struct bfa_ioc *ioc, struct bfa_ioc_attr *ioc_attr);
+enum bfa_status bfa_nw_ioc_fwsig_invalidate(struct bfa_ioc *ioc);
void bfa_nw_ioc_notify_register(struct bfa_ioc *ioc,
struct bfa_ioc_notify *notify);
bool bfa_nw_ioc_sem_get(void __iomem *sem_reg);
diff --git a/drivers/net/ethernet/brocade/bna/bfa_ioc_ct.c b/drivers/net/ethernet/brocade/bna/bfa_ioc_ct.c
index 5df0b0c68c5a..d639558455cb 100644
--- a/drivers/net/ethernet/brocade/bna/bfa_ioc_ct.c
+++ b/drivers/net/ethernet/brocade/bna/bfa_ioc_ct.c
@@ -48,6 +48,12 @@ static void bfa_ioc_ct_sync_join(struct bfa_ioc *ioc);
static void bfa_ioc_ct_sync_leave(struct bfa_ioc *ioc);
static void bfa_ioc_ct_sync_ack(struct bfa_ioc *ioc);
static bool bfa_ioc_ct_sync_complete(struct bfa_ioc *ioc);
+static void bfa_ioc_ct_set_cur_ioc_fwstate(
+ struct bfa_ioc *ioc, enum bfi_ioc_state fwstate);
+static enum bfi_ioc_state bfa_ioc_ct_get_cur_ioc_fwstate(struct bfa_ioc *ioc);
+static void bfa_ioc_ct_set_alt_ioc_fwstate(
+ struct bfa_ioc *ioc, enum bfi_ioc_state fwstate);
+static enum bfi_ioc_state bfa_ioc_ct_get_alt_ioc_fwstate(struct bfa_ioc *ioc);
static enum bfa_status bfa_ioc_ct_pll_init(void __iomem *rb,
enum bfi_asic_mode asic_mode);
static enum bfa_status bfa_ioc_ct2_pll_init(void __iomem *rb,
@@ -68,6 +74,10 @@ static const struct bfa_ioc_hwif nw_hwif_ct = {
.ioc_sync_leave = bfa_ioc_ct_sync_leave,
.ioc_sync_ack = bfa_ioc_ct_sync_ack,
.ioc_sync_complete = bfa_ioc_ct_sync_complete,
+ .ioc_set_fwstate = bfa_ioc_ct_set_cur_ioc_fwstate,
+ .ioc_get_fwstate = bfa_ioc_ct_get_cur_ioc_fwstate,
+ .ioc_set_alt_fwstate = bfa_ioc_ct_set_alt_ioc_fwstate,
+ .ioc_get_alt_fwstate = bfa_ioc_ct_get_alt_ioc_fwstate,
};
static const struct bfa_ioc_hwif nw_hwif_ct2 = {
@@ -85,6 +95,10 @@ static const struct bfa_ioc_hwif nw_hwif_ct2 = {
.ioc_sync_leave = bfa_ioc_ct_sync_leave,
.ioc_sync_ack = bfa_ioc_ct_sync_ack,
.ioc_sync_complete = bfa_ioc_ct_sync_complete,
+ .ioc_set_fwstate = bfa_ioc_ct_set_cur_ioc_fwstate,
+ .ioc_get_fwstate = bfa_ioc_ct_get_cur_ioc_fwstate,
+ .ioc_set_alt_fwstate = bfa_ioc_ct_set_alt_ioc_fwstate,
+ .ioc_get_alt_fwstate = bfa_ioc_ct_get_alt_ioc_fwstate,
};
/* Called from bfa_ioc_attach() to map asic specific calls. */
@@ -565,6 +579,32 @@ bfa_ioc_ct_sync_complete(struct bfa_ioc *ioc)
return false;
}
+static void
+bfa_ioc_ct_set_cur_ioc_fwstate(struct bfa_ioc *ioc,
+ enum bfi_ioc_state fwstate)
+{
+ writel(fwstate, ioc->ioc_regs.ioc_fwstate);
+}
+
+static enum bfi_ioc_state
+bfa_ioc_ct_get_cur_ioc_fwstate(struct bfa_ioc *ioc)
+{
+ return (enum bfi_ioc_state)readl(ioc->ioc_regs.ioc_fwstate);
+}
+
+static void
+bfa_ioc_ct_set_alt_ioc_fwstate(struct bfa_ioc *ioc,
+ enum bfi_ioc_state fwstate)
+{
+ writel(fwstate, ioc->ioc_regs.alt_ioc_fwstate);
+}
+
+static enum bfi_ioc_state
+bfa_ioc_ct_get_alt_ioc_fwstate(struct bfa_ioc *ioc)
+{
+ return (enum bfi_ioc_state)readl(ioc->ioc_regs.alt_ioc_fwstate);
+}
+
static enum bfa_status
bfa_ioc_ct_pll_init(void __iomem *rb, enum bfi_asic_mode asic_mode)
{
diff --git a/drivers/net/ethernet/brocade/bna/bfi.h b/drivers/net/ethernet/brocade/bna/bfi.h
index 1f24c23dc786..8c563a77cdf6 100644
--- a/drivers/net/ethernet/brocade/bna/bfi.h
+++ b/drivers/net/ethernet/brocade/bna/bfi.h
@@ -25,6 +25,7 @@
/* BFI FW image type */
#define BFI_FLASH_CHUNK_SZ 256 /*!< Flash chunk size */
#define BFI_FLASH_CHUNK_SZ_WORDS (BFI_FLASH_CHUNK_SZ/sizeof(u32))
+#define BFI_FLASH_IMAGE_SZ 0x100000
/* Msg header common to all msgs */
struct bfi_mhdr {
@@ -233,7 +234,29 @@ struct bfi_ioc_getattr_reply {
#define BFI_IOC_TRC_HDR_SZ 32
#define BFI_IOC_FW_SIGNATURE (0xbfadbfad)
+#define BFI_IOC_FW_INV_SIGN (0xdeaddead)
#define BFI_IOC_MD5SUM_SZ 4
+
+struct bfi_ioc_fwver {
+#ifdef __BIG_ENDIAN
+ u8 patch;
+ u8 maint;
+ u8 minor;
+ u8 major;
+ u8 rsvd[2];
+ u8 build;
+ u8 phase;
+#else
+ u8 major;
+ u8 minor;
+ u8 maint;
+ u8 patch;
+ u8 phase;
+ u8 build;
+ u8 rsvd[2];
+#endif
+};
+
struct bfi_ioc_image_hdr {
u32 signature; /*!< constant signature */
u8 asic_gen; /*!< asic generation */
@@ -242,10 +265,18 @@ struct bfi_ioc_image_hdr {
u8 port1_mode; /*!< device mode for port 1 */
u32 exec; /*!< exec vector */
u32 bootenv; /*!< firmware boot env */
- u32 rsvd_b[4];
+ u32 rsvd_b[2];
+ struct bfi_ioc_fwver fwver;
u32 md5sum[BFI_IOC_MD5SUM_SZ];
};
+enum bfi_ioc_img_ver_cmp {
+ BFI_IOC_IMG_VER_INCOMP,
+ BFI_IOC_IMG_VER_OLD,
+ BFI_IOC_IMG_VER_SAME,
+ BFI_IOC_IMG_VER_BETTER
+};
+
#define BFI_FWBOOT_DEVMODE_OFF 4
#define BFI_FWBOOT_TYPE_OFF 8
#define BFI_FWBOOT_ENV_OFF 12
diff --git a/drivers/net/ethernet/brocade/bna/bfi_enet.h b/drivers/net/ethernet/brocade/bna/bfi_enet.h
index 7d10e335c27d..ae072dc5d238 100644
--- a/drivers/net/ethernet/brocade/bna/bfi_enet.h
+++ b/drivers/net/ethernet/brocade/bna/bfi_enet.h
@@ -472,7 +472,8 @@ enum bfi_enet_hds_type {
struct bfi_enet_rx_cfg {
u8 rxq_type;
- u8 rsvd[3];
+ u8 rsvd[1];
+ u16 frame_size;
struct {
u8 max_header_size;
diff --git a/drivers/net/ethernet/brocade/bna/bna.h b/drivers/net/ethernet/brocade/bna/bna.h
index f1eafc409bbd..1f512190d696 100644
--- a/drivers/net/ethernet/brocade/bna/bna.h
+++ b/drivers/net/ethernet/brocade/bna/bna.h
@@ -354,6 +354,14 @@ do { \
} \
} while (0)
+#define bna_mcam_mod_free_q(_bna) (&(_bna)->mcam_mod.free_q)
+
+#define bna_mcam_mod_del_q(_bna) (&(_bna)->mcam_mod.del_q)
+
+#define bna_ucam_mod_free_q(_bna) (&(_bna)->ucam_mod.free_q)
+
+#define bna_ucam_mod_del_q(_bna) (&(_bna)->ucam_mod.del_q)
+
/* Inline functions */
static inline struct bna_mac *bna_mac_find(struct list_head *q, u8 *addr)
@@ -391,12 +399,8 @@ int bna_num_rxp_set(struct bna *bna, int num_rxp);
void bna_hw_stats_get(struct bna *bna);
/* APIs for RxF */
-struct bna_mac *bna_ucam_mod_mac_get(struct bna_ucam_mod *ucam_mod);
-void bna_ucam_mod_mac_put(struct bna_ucam_mod *ucam_mod,
- struct bna_mac *mac);
-struct bna_mac *bna_mcam_mod_mac_get(struct bna_mcam_mod *mcam_mod);
-void bna_mcam_mod_mac_put(struct bna_mcam_mod *mcam_mod,
- struct bna_mac *mac);
+struct bna_mac *bna_cam_mod_mac_get(struct list_head *head);
+void bna_cam_mod_mac_put(struct list_head *tail, struct bna_mac *mac);
struct bna_mcam_handle *bna_mcam_mod_handle_get(struct bna_mcam_mod *mod);
void bna_mcam_mod_handle_put(struct bna_mcam_mod *mcam_mod,
struct bna_mcam_handle *handle);
@@ -493,11 +497,17 @@ enum bna_cb_status
bna_rx_ucast_del(struct bna_rx *rx, u8 *ucmac,
void (*cbfn)(struct bnad *, struct bna_rx *));
enum bna_cb_status
+bna_rx_ucast_listset(struct bna_rx *rx, int count, u8 *uclist,
+ void (*cbfn)(struct bnad *, struct bna_rx *));
+enum bna_cb_status
bna_rx_mcast_add(struct bna_rx *rx, u8 *mcmac,
void (*cbfn)(struct bnad *, struct bna_rx *));
enum bna_cb_status
bna_rx_mcast_listset(struct bna_rx *rx, int count, u8 *mcmac,
void (*cbfn)(struct bnad *, struct bna_rx *));
+void
+bna_rx_mcast_delall(struct bna_rx *rx,
+ void (*cbfn)(struct bnad *, struct bna_rx *));
enum bna_cb_status
bna_rx_mode_set(struct bna_rx *rx, enum bna_rxmode rxmode,
enum bna_rxmode bitmask,
@@ -505,6 +515,8 @@ bna_rx_mode_set(struct bna_rx *rx, enum bna_rxmode rxmode,
void bna_rx_vlan_add(struct bna_rx *rx, int vlan_id);
void bna_rx_vlan_del(struct bna_rx *rx, int vlan_id);
void bna_rx_vlanfilter_enable(struct bna_rx *rx);
+void bna_rx_vlan_strip_enable(struct bna_rx *rx);
+void bna_rx_vlan_strip_disable(struct bna_rx *rx);
/* ENET */
/* API for RX */
diff --git a/drivers/net/ethernet/brocade/bna/bna_enet.c b/drivers/net/ethernet/brocade/bna/bna_enet.c
index 3ca77fad4851..13f9636cdba7 100644
--- a/drivers/net/ethernet/brocade/bna/bna_enet.c
+++ b/drivers/net/ethernet/brocade/bna/bna_enet.c
@@ -1811,6 +1811,13 @@ bna_ucam_mod_init(struct bna_ucam_mod *ucam_mod, struct bna *bna,
list_add_tail(&ucam_mod->ucmac[i].qe, &ucam_mod->free_q);
}
+ /* A separate queue to allow synchronous setting of a list of MACs */
+ INIT_LIST_HEAD(&ucam_mod->del_q);
+ for (i = i; i < (bna->ioceth.attr.num_ucmac * 2); i++) {
+ bfa_q_qe_init(&ucam_mod->ucmac[i].qe);
+ list_add_tail(&ucam_mod->ucmac[i].qe, &ucam_mod->del_q);
+ }
+
ucam_mod->bna = bna;
}
@@ -1818,11 +1825,16 @@ static void
bna_ucam_mod_uninit(struct bna_ucam_mod *ucam_mod)
{
struct list_head *qe;
- int i = 0;
+ int i;
+ i = 0;
list_for_each(qe, &ucam_mod->free_q)
i++;
+ i = 0;
+ list_for_each(qe, &ucam_mod->del_q)
+ i++;
+
ucam_mod->bna = NULL;
}
@@ -1851,6 +1863,13 @@ bna_mcam_mod_init(struct bna_mcam_mod *mcam_mod, struct bna *bna,
&mcam_mod->free_handle_q);
}
+ /* A separate queue to allow synchronous setting of a list of MACs */
+ INIT_LIST_HEAD(&mcam_mod->del_q);
+ for (i = i; i < (bna->ioceth.attr.num_mcmac * 2); i++) {
+ bfa_q_qe_init(&mcam_mod->mcmac[i].qe);
+ list_add_tail(&mcam_mod->mcmac[i].qe, &mcam_mod->del_q);
+ }
+
mcam_mod->bna = bna;
}
@@ -1864,6 +1883,9 @@ bna_mcam_mod_uninit(struct bna_mcam_mod *mcam_mod)
list_for_each(qe, &mcam_mod->free_q) i++;
i = 0;
+ list_for_each(qe, &mcam_mod->del_q) i++;
+
+ i = 0;
list_for_each(qe, &mcam_mod->free_handle_q) i++;
mcam_mod->bna = NULL;
@@ -1976,7 +1998,7 @@ bna_mod_res_req(struct bna *bna, struct bna_res_info *res_info)
BNA_MEM_T_KVA;
res_info[BNA_MOD_RES_MEM_T_UCMAC_ARRAY].res_u.mem_info.num = 1;
res_info[BNA_MOD_RES_MEM_T_UCMAC_ARRAY].res_u.mem_info.len =
- attr->num_ucmac * sizeof(struct bna_mac);
+ (attr->num_ucmac * 2) * sizeof(struct bna_mac);
/* Virtual memory for Multicast MAC address - stored by mcam module */
res_info[BNA_MOD_RES_MEM_T_MCMAC_ARRAY].res_type = BNA_RES_T_MEM;
@@ -1984,7 +2006,7 @@ bna_mod_res_req(struct bna *bna, struct bna_res_info *res_info)
BNA_MEM_T_KVA;
res_info[BNA_MOD_RES_MEM_T_MCMAC_ARRAY].res_u.mem_info.num = 1;
res_info[BNA_MOD_RES_MEM_T_MCMAC_ARRAY].res_u.mem_info.len =
- attr->num_mcmac * sizeof(struct bna_mac);
+ (attr->num_mcmac * 2) * sizeof(struct bna_mac);
/* Virtual memory for Multicast handle - stored by mcam module */
res_info[BNA_MOD_RES_MEM_T_MCHANDLE_ARRAY].res_type = BNA_RES_T_MEM;
@@ -2080,41 +2102,21 @@ bna_num_rxp_set(struct bna *bna, int num_rxp)
}
struct bna_mac *
-bna_ucam_mod_mac_get(struct bna_ucam_mod *ucam_mod)
-{
- struct list_head *qe;
-
- if (list_empty(&ucam_mod->free_q))
- return NULL;
-
- bfa_q_deq(&ucam_mod->free_q, &qe);
-
- return (struct bna_mac *)qe;
-}
-
-void
-bna_ucam_mod_mac_put(struct bna_ucam_mod *ucam_mod, struct bna_mac *mac)
-{
- list_add_tail(&mac->qe, &ucam_mod->free_q);
-}
-
-struct bna_mac *
-bna_mcam_mod_mac_get(struct bna_mcam_mod *mcam_mod)
+bna_cam_mod_mac_get(struct list_head *head)
{
struct list_head *qe;
- if (list_empty(&mcam_mod->free_q))
+ if (list_empty(head))
return NULL;
- bfa_q_deq(&mcam_mod->free_q, &qe);
-
+ bfa_q_deq(head, &qe);
return (struct bna_mac *)qe;
}
void
-bna_mcam_mod_mac_put(struct bna_mcam_mod *mcam_mod, struct bna_mac *mac)
+bna_cam_mod_mac_put(struct list_head *tail, struct bna_mac *mac)
{
- list_add_tail(&mac->qe, &mcam_mod->free_q);
+ list_add_tail(&mac->qe, tail);
}
struct bna_mcam_handle *
diff --git a/drivers/net/ethernet/brocade/bna/bna_hw_defs.h b/drivers/net/ethernet/brocade/bna/bna_hw_defs.h
index af3f7bb0b3b8..2702d02e98d9 100644
--- a/drivers/net/ethernet/brocade/bna/bna_hw_defs.h
+++ b/drivers/net/ethernet/brocade/bna/bna_hw_defs.h
@@ -322,6 +322,10 @@ do { \
#define BNA_CQ_EF_REMOTE (1 << 19)
#define BNA_CQ_EF_LOCAL (1 << 20)
+/* CAT2 ASIC does not use bit 21 as per the SPEC.
+ * Bit 31 is set in every end of frame completion
+ */
+#define BNA_CQ_EF_EOP (1 << 31)
/* Data structures */
diff --git a/drivers/net/ethernet/brocade/bna/bna_tx_rx.c b/drivers/net/ethernet/brocade/bna/bna_tx_rx.c
index 3c07064b2bc4..85e63546abe3 100644
--- a/drivers/net/ethernet/brocade/bna/bna_tx_rx.c
+++ b/drivers/net/ethernet/brocade/bna/bna_tx_rx.c
@@ -529,13 +529,13 @@ bna_rxf_mcast_cfg_apply(struct bna_rxf *rxf)
struct list_head *qe;
int ret;
- /* Delete multicast entries previousely added */
+ /* First delete multicast entries to maintain the count */
while (!list_empty(&rxf->mcast_pending_del_q)) {
bfa_q_deq(&rxf->mcast_pending_del_q, &qe);
bfa_q_qe_init(qe);
mac = (struct bna_mac *)qe;
ret = bna_rxf_mcast_del(rxf, mac, BNA_HARD_CLEANUP);
- bna_mcam_mod_mac_put(&rxf->rx->bna->mcam_mod, mac);
+ bna_cam_mod_mac_put(bna_mcam_mod_del_q(rxf->rx->bna), mac);
if (ret)
return ret;
}
@@ -586,7 +586,7 @@ bna_rxf_mcast_cfg_reset(struct bna_rxf *rxf, enum bna_cleanup_type cleanup)
bfa_q_qe_init(qe);
mac = (struct bna_mac *)qe;
ret = bna_rxf_mcast_del(rxf, mac, cleanup);
- bna_mcam_mod_mac_put(&rxf->rx->bna->mcam_mod, mac);
+ bna_cam_mod_mac_put(bna_mcam_mod_del_q(rxf->rx->bna), mac);
if (ret)
return ret;
}
@@ -796,20 +796,20 @@ bna_rxf_uninit(struct bna_rxf *rxf)
while (!list_empty(&rxf->ucast_pending_add_q)) {
bfa_q_deq(&rxf->ucast_pending_add_q, &mac);
bfa_q_qe_init(&mac->qe);
- bna_ucam_mod_mac_put(&rxf->rx->bna->ucam_mod, mac);
+ bna_cam_mod_mac_put(bna_ucam_mod_free_q(rxf->rx->bna), mac);
}
if (rxf->ucast_pending_mac) {
bfa_q_qe_init(&rxf->ucast_pending_mac->qe);
- bna_ucam_mod_mac_put(&rxf->rx->bna->ucam_mod,
- rxf->ucast_pending_mac);
+ bna_cam_mod_mac_put(bna_ucam_mod_free_q(rxf->rx->bna),
+ rxf->ucast_pending_mac);
rxf->ucast_pending_mac = NULL;
}
while (!list_empty(&rxf->mcast_pending_add_q)) {
bfa_q_deq(&rxf->mcast_pending_add_q, &mac);
bfa_q_qe_init(&mac->qe);
- bna_mcam_mod_mac_put(&rxf->rx->bna->mcam_mod, mac);
+ bna_cam_mod_mac_put(bna_mcam_mod_free_q(rxf->rx->bna), mac);
}
rxf->rxmode_pending = 0;
@@ -869,7 +869,7 @@ bna_rx_ucast_set(struct bna_rx *rx, u8 *ucmac,
if (rxf->ucast_pending_mac == NULL) {
rxf->ucast_pending_mac =
- bna_ucam_mod_mac_get(&rxf->rx->bna->ucam_mod);
+ bna_cam_mod_mac_get(bna_ucam_mod_free_q(rxf->rx->bna));
if (rxf->ucast_pending_mac == NULL)
return BNA_CB_UCAST_CAM_FULL;
bfa_q_qe_init(&rxf->ucast_pending_mac->qe);
@@ -900,7 +900,7 @@ bna_rx_mcast_add(struct bna_rx *rx, u8 *addr,
return BNA_CB_SUCCESS;
}
- mac = bna_mcam_mod_mac_get(&rxf->rx->bna->mcam_mod);
+ mac = bna_cam_mod_mac_get(bna_mcam_mod_free_q(rxf->rx->bna));
if (mac == NULL)
return BNA_CB_MCAST_LIST_FULL;
bfa_q_qe_init(&mac->qe);
@@ -916,35 +916,92 @@ bna_rx_mcast_add(struct bna_rx *rx, u8 *addr,
}
enum bna_cb_status
-bna_rx_mcast_listset(struct bna_rx *rx, int count, u8 *mclist,
+bna_rx_ucast_listset(struct bna_rx *rx, int count, u8 *uclist,
void (*cbfn)(struct bnad *, struct bna_rx *))
{
+ struct bna_ucam_mod *ucam_mod = &rx->bna->ucam_mod;
struct bna_rxf *rxf = &rx->rxf;
struct list_head list_head;
struct list_head *qe;
u8 *mcaddr;
- struct bna_mac *mac;
+ struct bna_mac *mac, *del_mac;
int i;
+ /* Purge the pending_add_q */
+ while (!list_empty(&rxf->ucast_pending_add_q)) {
+ bfa_q_deq(&rxf->ucast_pending_add_q, &qe);
+ bfa_q_qe_init(qe);
+ mac = (struct bna_mac *)qe;
+ bna_cam_mod_mac_put(&ucam_mod->free_q, mac);
+ }
+
+ /* Schedule active_q entries for deletion */
+ while (!list_empty(&rxf->ucast_active_q)) {
+ bfa_q_deq(&rxf->ucast_active_q, &qe);
+ mac = (struct bna_mac *)qe;
+ bfa_q_qe_init(&mac->qe);
+
+ del_mac = bna_cam_mod_mac_get(&ucam_mod->del_q);
+ memcpy(del_mac, mac, sizeof(*del_mac));
+ list_add_tail(&del_mac->qe, &rxf->ucast_pending_del_q);
+ bna_cam_mod_mac_put(&ucam_mod->free_q, mac);
+ }
+
/* Allocate nodes */
INIT_LIST_HEAD(&list_head);
- for (i = 0, mcaddr = mclist; i < count; i++) {
- mac = bna_mcam_mod_mac_get(&rxf->rx->bna->mcam_mod);
+ for (i = 0, mcaddr = uclist; i < count; i++) {
+ mac = bna_cam_mod_mac_get(&ucam_mod->free_q);
if (mac == NULL)
goto err_return;
bfa_q_qe_init(&mac->qe);
memcpy(mac->addr, mcaddr, ETH_ALEN);
list_add_tail(&mac->qe, &list_head);
-
mcaddr += ETH_ALEN;
}
+ /* Add the new entries */
+ while (!list_empty(&list_head)) {
+ bfa_q_deq(&list_head, &qe);
+ mac = (struct bna_mac *)qe;
+ bfa_q_qe_init(&mac->qe);
+ list_add_tail(&mac->qe, &rxf->ucast_pending_add_q);
+ }
+
+ rxf->cam_fltr_cbfn = cbfn;
+ rxf->cam_fltr_cbarg = rx->bna->bnad;
+ bfa_fsm_send_event(rxf, RXF_E_CONFIG);
+
+ return BNA_CB_SUCCESS;
+
+err_return:
+ while (!list_empty(&list_head)) {
+ bfa_q_deq(&list_head, &qe);
+ mac = (struct bna_mac *)qe;
+ bfa_q_qe_init(&mac->qe);
+ bna_cam_mod_mac_put(&ucam_mod->free_q, mac);
+ }
+
+ return BNA_CB_UCAST_CAM_FULL;
+}
+
+enum bna_cb_status
+bna_rx_mcast_listset(struct bna_rx *rx, int count, u8 *mclist,
+ void (*cbfn)(struct bnad *, struct bna_rx *))
+{
+ struct bna_mcam_mod *mcam_mod = &rx->bna->mcam_mod;
+ struct bna_rxf *rxf = &rx->rxf;
+ struct list_head list_head;
+ struct list_head *qe;
+ u8 *mcaddr;
+ struct bna_mac *mac, *del_mac;
+ int i;
+
/* Purge the pending_add_q */
while (!list_empty(&rxf->mcast_pending_add_q)) {
bfa_q_deq(&rxf->mcast_pending_add_q, &qe);
bfa_q_qe_init(qe);
mac = (struct bna_mac *)qe;
- bna_mcam_mod_mac_put(&rxf->rx->bna->mcam_mod, mac);
+ bna_cam_mod_mac_put(&mcam_mod->free_q, mac);
}
/* Schedule active_q entries for deletion */
@@ -952,7 +1009,26 @@ bna_rx_mcast_listset(struct bna_rx *rx, int count, u8 *mclist,
bfa_q_deq(&rxf->mcast_active_q, &qe);
mac = (struct bna_mac *)qe;
bfa_q_qe_init(&mac->qe);
- list_add_tail(&mac->qe, &rxf->mcast_pending_del_q);
+
+ del_mac = bna_cam_mod_mac_get(&mcam_mod->del_q);
+
+ memcpy(del_mac, mac, sizeof(*del_mac));
+ list_add_tail(&del_mac->qe, &rxf->mcast_pending_del_q);
+ mac->handle = NULL;
+ bna_cam_mod_mac_put(&mcam_mod->free_q, mac);
+ }
+
+ /* Allocate nodes */
+ INIT_LIST_HEAD(&list_head);
+ for (i = 0, mcaddr = mclist; i < count; i++) {
+ mac = bna_cam_mod_mac_get(&mcam_mod->free_q);
+ if (mac == NULL)
+ goto err_return;
+ bfa_q_qe_init(&mac->qe);
+ memcpy(mac->addr, mcaddr, ETH_ALEN);
+ list_add_tail(&mac->qe, &list_head);
+
+ mcaddr += ETH_ALEN;
}
/* Add the new entries */
@@ -974,13 +1050,56 @@ err_return:
bfa_q_deq(&list_head, &qe);
mac = (struct bna_mac *)qe;
bfa_q_qe_init(&mac->qe);
- bna_mcam_mod_mac_put(&rxf->rx->bna->mcam_mod, mac);
+ bna_cam_mod_mac_put(&mcam_mod->free_q, mac);
}
return BNA_CB_MCAST_LIST_FULL;
}
void
+bna_rx_mcast_delall(struct bna_rx *rx,
+ void (*cbfn)(struct bnad *, struct bna_rx *))
+{
+ struct bna_rxf *rxf = &rx->rxf;
+ struct list_head *qe;
+ struct bna_mac *mac, *del_mac;
+ int need_hw_config = 0;
+
+ /* Purge all entries from pending_add_q */
+ while (!list_empty(&rxf->mcast_pending_add_q)) {
+ bfa_q_deq(&rxf->mcast_pending_add_q, &qe);
+ mac = (struct bna_mac *)qe;
+ bfa_q_qe_init(&mac->qe);
+ bna_cam_mod_mac_put(bna_mcam_mod_free_q(rxf->rx->bna), mac);
+ }
+
+ /* Schedule all entries in active_q for deletion */
+ while (!list_empty(&rxf->mcast_active_q)) {
+ bfa_q_deq(&rxf->mcast_active_q, &qe);
+ mac = (struct bna_mac *)qe;
+ bfa_q_qe_init(&mac->qe);
+
+ del_mac = bna_cam_mod_mac_get(bna_mcam_mod_del_q(rxf->rx->bna));
+
+ memcpy(del_mac, mac, sizeof(*del_mac));
+ list_add_tail(&del_mac->qe, &rxf->mcast_pending_del_q);
+ mac->handle = NULL;
+ bna_cam_mod_mac_put(bna_mcam_mod_free_q(rxf->rx->bna), mac);
+ need_hw_config = 1;
+ }
+
+ if (need_hw_config) {
+ rxf->cam_fltr_cbfn = cbfn;
+ rxf->cam_fltr_cbarg = rx->bna->bnad;
+ bfa_fsm_send_event(rxf, RXF_E_CONFIG);
+ return;
+ }
+
+ if (cbfn)
+ (*cbfn)(rx->bna->bnad, rx);
+}
+
+void
bna_rx_vlan_add(struct bna_rx *rx, int vlan_id)
{
struct bna_rxf *rxf = &rx->rxf;
@@ -1022,7 +1141,7 @@ bna_rxf_ucast_cfg_apply(struct bna_rxf *rxf)
bfa_q_qe_init(qe);
mac = (struct bna_mac *)qe;
bna_bfi_ucast_req(rxf, mac, BFI_ENET_H2I_MAC_UCAST_DEL_REQ);
- bna_ucam_mod_mac_put(&rxf->rx->bna->ucam_mod, mac);
+ bna_cam_mod_mac_put(bna_ucam_mod_del_q(rxf->rx->bna), mac);
return 1;
}
@@ -1062,11 +1181,13 @@ bna_rxf_ucast_cfg_reset(struct bna_rxf *rxf, enum bna_cleanup_type cleanup)
bfa_q_qe_init(qe);
mac = (struct bna_mac *)qe;
if (cleanup == BNA_SOFT_CLEANUP)
- bna_ucam_mod_mac_put(&rxf->rx->bna->ucam_mod, mac);
+ bna_cam_mod_mac_put(bna_ucam_mod_del_q(rxf->rx->bna),
+ mac);
else {
bna_bfi_ucast_req(rxf, mac,
BFI_ENET_H2I_MAC_UCAST_DEL_REQ);
- bna_ucam_mod_mac_put(&rxf->rx->bna->ucam_mod, mac);
+ bna_cam_mod_mac_put(bna_ucam_mod_del_q(rxf->rx->bna),
+ mac);
return 1;
}
}
@@ -1690,6 +1811,7 @@ bna_bfi_rx_enet_start(struct bna_rx *rx)
cfg_req->mh.num_entries = htons(
bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_rx_cfg_req)));
+ cfg_req->rx_cfg.frame_size = bna_enet_mtu_get(&rx->bna->enet);
cfg_req->num_queue_sets = rx->num_paths;
for (i = 0, rxp_qe = bfa_q_first(&rx->rxp_q);
i < rx->num_paths;
@@ -1711,8 +1833,17 @@ bna_bfi_rx_enet_start(struct bna_rx *rx)
/* Large/Single RxQ */
bfi_enet_datapath_q_init(&cfg_req->q_cfg[i].ql.q,
&q0->qpt);
- q0->buffer_size =
- bna_enet_mtu_get(&rx->bna->enet);
+ if (q0->multi_buffer)
+ /* multi-buffer is enabled by allocating
+ * a new rx with new set of resources.
+ * q0->buffer_size should be initialized to
+ * fragment size.
+ */
+ cfg_req->rx_cfg.multi_buffer =
+ BNA_STATUS_T_ENABLED;
+ else
+ q0->buffer_size =
+ bna_enet_mtu_get(&rx->bna->enet);
cfg_req->q_cfg[i].ql.rx_buffer_size =
htons((u16)q0->buffer_size);
break;
@@ -2262,8 +2393,8 @@ bna_rx_res_req(struct bna_rx_config *q_cfg, struct bna_res_info *res_info)
u32 hq_depth;
u32 dq_depth;
- dq_depth = q_cfg->q_depth;
- hq_depth = ((q_cfg->rxp_type == BNA_RXP_SINGLE) ? 0 : q_cfg->q_depth);
+ dq_depth = q_cfg->q0_depth;
+ hq_depth = ((q_cfg->rxp_type == BNA_RXP_SINGLE) ? 0 : q_cfg->q1_depth);
cq_depth = dq_depth + hq_depth;
BNA_TO_POWER_OF_2_HIGH(cq_depth);
@@ -2380,10 +2511,10 @@ bna_rx_create(struct bna *bna, struct bnad *bnad,
struct bna_rxq *q0;
struct bna_rxq *q1;
struct bna_intr_info *intr_info;
- u32 page_count;
+ struct bna_mem_descr *hqunmap_mem;
+ struct bna_mem_descr *dqunmap_mem;
struct bna_mem_descr *ccb_mem;
struct bna_mem_descr *rcb_mem;
- struct bna_mem_descr *unmapq_mem;
struct bna_mem_descr *cqpt_mem;
struct bna_mem_descr *cswqpt_mem;
struct bna_mem_descr *cpage_mem;
@@ -2393,8 +2524,10 @@ bna_rx_create(struct bna *bna, struct bnad *bnad,
struct bna_mem_descr *dsqpt_mem;
struct bna_mem_descr *hpage_mem;
struct bna_mem_descr *dpage_mem;
- int i;
- int dpage_count, hpage_count, rcb_idx;
+ u32 dpage_count, hpage_count;
+ u32 hq_idx, dq_idx, rcb_idx;
+ u32 cq_depth, i;
+ u32 page_count;
if (!bna_rx_res_check(rx_mod, rx_cfg))
return NULL;
@@ -2402,7 +2535,8 @@ bna_rx_create(struct bna *bna, struct bnad *bnad,
intr_info = &res_info[BNA_RX_RES_T_INTR].res_u.intr_info;
ccb_mem = &res_info[BNA_RX_RES_MEM_T_CCB].res_u.mem_info.mdl[0];
rcb_mem = &res_info[BNA_RX_RES_MEM_T_RCB].res_u.mem_info.mdl[0];
- unmapq_mem = &res_info[BNA_RX_RES_MEM_T_UNMAPQ].res_u.mem_info.mdl[0];
+ dqunmap_mem = &res_info[BNA_RX_RES_MEM_T_UNMAPDQ].res_u.mem_info.mdl[0];
+ hqunmap_mem = &res_info[BNA_RX_RES_MEM_T_UNMAPHQ].res_u.mem_info.mdl[0];
cqpt_mem = &res_info[BNA_RX_RES_MEM_T_CQPT].res_u.mem_info.mdl[0];
cswqpt_mem = &res_info[BNA_RX_RES_MEM_T_CSWQPT].res_u.mem_info.mdl[0];
cpage_mem = &res_info[BNA_RX_RES_MEM_T_CQPT_PAGE].res_u.mem_info.mdl[0];
@@ -2454,7 +2588,8 @@ bna_rx_create(struct bna *bna, struct bnad *bnad,
}
rx->num_paths = rx_cfg->num_paths;
- for (i = 0, rcb_idx = 0; i < rx->num_paths; i++) {
+ for (i = 0, hq_idx = 0, dq_idx = 0, rcb_idx = 0;
+ i < rx->num_paths; i++) {
rxp = bna_rxp_get(rx_mod);
list_add_tail(&rxp->qe, &rx->rxp_q);
rxp->type = rx_cfg->rxp_type;
@@ -2497,9 +2632,13 @@ bna_rx_create(struct bna *bna, struct bnad *bnad,
q0->rxp = rxp;
q0->rcb = (struct bna_rcb *) rcb_mem[rcb_idx].kva;
- q0->rcb->unmap_q = (void *)unmapq_mem[rcb_idx].kva;
- rcb_idx++;
- q0->rcb->q_depth = rx_cfg->q_depth;
+ q0->rcb->unmap_q = (void *)dqunmap_mem[dq_idx].kva;
+ rcb_idx++; dq_idx++;
+ q0->rcb->q_depth = rx_cfg->q0_depth;
+ q0->q_depth = rx_cfg->q0_depth;
+ q0->multi_buffer = rx_cfg->q0_multi_buf;
+ q0->buffer_size = rx_cfg->q0_buf_size;
+ q0->num_vecs = rx_cfg->q0_num_vecs;
q0->rcb->rxq = q0;
q0->rcb->bnad = bna->bnad;
q0->rcb->id = 0;
@@ -2519,15 +2658,18 @@ bna_rx_create(struct bna *bna, struct bnad *bnad,
q1->rxp = rxp;
q1->rcb = (struct bna_rcb *) rcb_mem[rcb_idx].kva;
- q1->rcb->unmap_q = (void *)unmapq_mem[rcb_idx].kva;
- rcb_idx++;
- q1->rcb->q_depth = rx_cfg->q_depth;
+ q1->rcb->unmap_q = (void *)hqunmap_mem[hq_idx].kva;
+ rcb_idx++; hq_idx++;
+ q1->rcb->q_depth = rx_cfg->q1_depth;
+ q1->q_depth = rx_cfg->q1_depth;
+ q1->multi_buffer = BNA_STATUS_T_DISABLED;
+ q1->num_vecs = 1;
q1->rcb->rxq = q1;
q1->rcb->bnad = bna->bnad;
q1->rcb->id = 1;
q1->buffer_size = (rx_cfg->rxp_type == BNA_RXP_HDS) ?
rx_cfg->hds_config.forced_offset
- : rx_cfg->small_buff_size;
+ : rx_cfg->q1_buf_size;
q1->rx_packets = q1->rx_bytes = 0;
q1->rx_packets_with_error = q1->rxbuf_alloc_failed = 0;
@@ -2542,9 +2684,14 @@ bna_rx_create(struct bna *bna, struct bnad *bnad,
/* Setup CQ */
rxp->cq.ccb = (struct bna_ccb *) ccb_mem[i].kva;
- rxp->cq.ccb->q_depth = rx_cfg->q_depth +
- ((rx_cfg->rxp_type == BNA_RXP_SINGLE) ?
- 0 : rx_cfg->q_depth);
+ cq_depth = rx_cfg->q0_depth +
+ ((rx_cfg->rxp_type == BNA_RXP_SINGLE) ?
+ 0 : rx_cfg->q1_depth);
+ /* if multi-buffer is enabled sum of q0_depth
+ * and q1_depth need not be a power of 2
+ */
+ BNA_TO_POWER_OF_2_HIGH(cq_depth);
+ rxp->cq.ccb->q_depth = cq_depth;
rxp->cq.ccb->cq = &rxp->cq;
rxp->cq.ccb->rcb[0] = q0->rcb;
q0->rcb->ccb = rxp->cq.ccb;
@@ -2670,6 +2817,30 @@ bna_rx_cleanup_complete(struct bna_rx *rx)
bfa_fsm_send_event(rx, RX_E_CLEANUP_DONE);
}
+void
+bna_rx_vlan_strip_enable(struct bna_rx *rx)
+{
+ struct bna_rxf *rxf = &rx->rxf;
+
+ if (rxf->vlan_strip_status == BNA_STATUS_T_DISABLED) {
+ rxf->vlan_strip_status = BNA_STATUS_T_ENABLED;
+ rxf->vlan_strip_pending = true;
+ bfa_fsm_send_event(rxf, RXF_E_CONFIG);
+ }
+}
+
+void
+bna_rx_vlan_strip_disable(struct bna_rx *rx)
+{
+ struct bna_rxf *rxf = &rx->rxf;
+
+ if (rxf->vlan_strip_status != BNA_STATUS_T_DISABLED) {
+ rxf->vlan_strip_status = BNA_STATUS_T_DISABLED;
+ rxf->vlan_strip_pending = true;
+ bfa_fsm_send_event(rxf, RXF_E_CONFIG);
+ }
+}
+
enum bna_cb_status
bna_rx_mode_set(struct bna_rx *rx, enum bna_rxmode new_mode,
enum bna_rxmode bitmask,
diff --git a/drivers/net/ethernet/brocade/bna/bna_types.h b/drivers/net/ethernet/brocade/bna/bna_types.h
index dc50f7836b6d..621547cd3504 100644
--- a/drivers/net/ethernet/brocade/bna/bna_types.h
+++ b/drivers/net/ethernet/brocade/bna/bna_types.h
@@ -109,20 +109,21 @@ enum bna_tx_res_req_type {
enum bna_rx_mem_type {
BNA_RX_RES_MEM_T_CCB = 0, /* CQ context */
BNA_RX_RES_MEM_T_RCB = 1, /* CQ context */
- BNA_RX_RES_MEM_T_UNMAPQ = 2, /* UnmapQ for RxQs */
- BNA_RX_RES_MEM_T_CQPT = 3, /* CQ QPT */
- BNA_RX_RES_MEM_T_CSWQPT = 4, /* S/W QPT */
- BNA_RX_RES_MEM_T_CQPT_PAGE = 5, /* CQPT page */
- BNA_RX_RES_MEM_T_HQPT = 6, /* RX QPT */
- BNA_RX_RES_MEM_T_DQPT = 7, /* RX QPT */
- BNA_RX_RES_MEM_T_HSWQPT = 8, /* RX s/w QPT */
- BNA_RX_RES_MEM_T_DSWQPT = 9, /* RX s/w QPT */
- BNA_RX_RES_MEM_T_DPAGE = 10, /* RX s/w QPT */
- BNA_RX_RES_MEM_T_HPAGE = 11, /* RX s/w QPT */
- BNA_RX_RES_MEM_T_IBIDX = 12,
- BNA_RX_RES_MEM_T_RIT = 13,
- BNA_RX_RES_T_INTR = 14, /* Rx interrupts */
- BNA_RX_RES_T_MAX = 15
+ BNA_RX_RES_MEM_T_UNMAPHQ = 2,
+ BNA_RX_RES_MEM_T_UNMAPDQ = 3,
+ BNA_RX_RES_MEM_T_CQPT = 4,
+ BNA_RX_RES_MEM_T_CSWQPT = 5,
+ BNA_RX_RES_MEM_T_CQPT_PAGE = 6,
+ BNA_RX_RES_MEM_T_HQPT = 7,
+ BNA_RX_RES_MEM_T_DQPT = 8,
+ BNA_RX_RES_MEM_T_HSWQPT = 9,
+ BNA_RX_RES_MEM_T_DSWQPT = 10,
+ BNA_RX_RES_MEM_T_DPAGE = 11,
+ BNA_RX_RES_MEM_T_HPAGE = 12,
+ BNA_RX_RES_MEM_T_IBIDX = 13,
+ BNA_RX_RES_MEM_T_RIT = 14,
+ BNA_RX_RES_T_INTR = 15,
+ BNA_RX_RES_T_MAX = 16
};
enum bna_tx_type {
@@ -583,6 +584,8 @@ struct bna_rxq {
int buffer_size;
int q_depth;
+ u32 num_vecs;
+ enum bna_status multi_buffer;
struct bna_qpt qpt;
struct bna_rcb *rcb;
@@ -632,6 +635,8 @@ struct bna_ccb {
struct bna_rcb *rcb[2];
void *ctrl; /* For bnad */
struct bna_pkt_rate pkt_rate;
+ u32 pkts_una;
+ u32 bytes_per_intr;
/* Control path */
struct bna_cq *cq;
@@ -671,14 +676,22 @@ struct bna_rx_config {
int num_paths;
enum bna_rxp_type rxp_type;
int paused;
- int q_depth;
int coalescing_timeo;
/*
* Small/Large (or Header/Data) buffer size to be configured
- * for SLR and HDS queue type. Large buffer size comes from
- * enet->mtu.
+ * for SLR and HDS queue type.
*/
- int small_buff_size;
+ u32 frame_size;
+
+ /* header or small queue */
+ u32 q1_depth;
+ u32 q1_buf_size;
+
+ /* data or large queue */
+ u32 q0_depth;
+ u32 q0_buf_size;
+ u32 q0_num_vecs;
+ enum bna_status q0_multi_buf;
enum bna_status rss_status;
struct bna_rss_config rss_config;
@@ -866,8 +879,9 @@ struct bna_rx_mod {
/* CAM */
struct bna_ucam_mod {
- struct bna_mac *ucmac; /* BFI_MAX_UCMAC entries */
+ struct bna_mac *ucmac; /* num_ucmac * 2 entries */
struct list_head free_q;
+ struct list_head del_q;
struct bna *bna;
};
@@ -880,9 +894,10 @@ struct bna_mcam_handle {
};
struct bna_mcam_mod {
- struct bna_mac *mcmac; /* BFI_MAX_MCMAC entries */
- struct bna_mcam_handle *mchandle; /* BFI_MAX_MCMAC entries */
+ struct bna_mac *mcmac; /* num_mcmac * 2 entries */
+ struct bna_mcam_handle *mchandle; /* num_mcmac entries */
struct list_head free_q;
+ struct list_head del_q;
struct list_head free_handle_q;
struct bna *bna;
diff --git a/drivers/net/ethernet/brocade/bna/bnad.c b/drivers/net/ethernet/brocade/bna/bnad.c
index 248bc37cb41b..5f24a9ffcfaa 100644
--- a/drivers/net/ethernet/brocade/bna/bnad.c
+++ b/drivers/net/ethernet/brocade/bna/bnad.c
@@ -52,7 +52,7 @@ MODULE_PARM_DESC(bna_debugfs_enable, "Enables debugfs feature, default=1,"
/*
* Global variables
*/
-u32 bnad_rxqs_per_cq = 2;
+static u32 bnad_rxqs_per_cq = 2;
static u32 bna_id;
static struct mutex bnad_list_mutex;
static LIST_HEAD(bnad_list);
@@ -142,7 +142,8 @@ bnad_tx_buff_unmap(struct bnad *bnad,
dma_unmap_page(&bnad->pcidev->dev,
dma_unmap_addr(&unmap->vectors[vector], dma_addr),
- skb_shinfo(skb)->frags[nvecs].size, DMA_TO_DEVICE);
+ dma_unmap_len(&unmap->vectors[vector], dma_len),
+ DMA_TO_DEVICE);
dma_unmap_addr_set(&unmap->vectors[vector], dma_addr, 0);
nvecs--;
}
@@ -282,27 +283,32 @@ static int
bnad_rxq_alloc_init(struct bnad *bnad, struct bna_rcb *rcb)
{
struct bnad_rx_unmap_q *unmap_q = rcb->unmap_q;
- int mtu, order;
+ int order;
bnad_rxq_alloc_uninit(bnad, rcb);
- mtu = bna_enet_mtu_get(&bnad->bna.enet);
- order = get_order(mtu);
+ order = get_order(rcb->rxq->buffer_size);
+
+ unmap_q->type = BNAD_RXBUF_PAGE;
if (bna_is_small_rxq(rcb->id)) {
unmap_q->alloc_order = 0;
unmap_q->map_size = rcb->rxq->buffer_size;
} else {
- unmap_q->alloc_order = order;
- unmap_q->map_size =
- (rcb->rxq->buffer_size > 2048) ?
- PAGE_SIZE << order : 2048;
+ if (rcb->rxq->multi_buffer) {
+ unmap_q->alloc_order = 0;
+ unmap_q->map_size = rcb->rxq->buffer_size;
+ unmap_q->type = BNAD_RXBUF_MULTI_BUFF;
+ } else {
+ unmap_q->alloc_order = order;
+ unmap_q->map_size =
+ (rcb->rxq->buffer_size > 2048) ?
+ PAGE_SIZE << order : 2048;
+ }
}
BUG_ON(((PAGE_SIZE << order) % unmap_q->map_size));
- unmap_q->type = BNAD_RXBUF_PAGE;
-
return 0;
}
@@ -345,10 +351,10 @@ bnad_rxq_cleanup(struct bnad *bnad, struct bna_rcb *rcb)
for (i = 0; i < rcb->q_depth; i++) {
struct bnad_rx_unmap *unmap = &unmap_q->unmap[i];
- if (BNAD_RXBUF_IS_PAGE(unmap_q->type))
- bnad_rxq_cleanup_page(bnad, unmap);
- else
+ if (BNAD_RXBUF_IS_SK_BUFF(unmap_q->type))
bnad_rxq_cleanup_skb(bnad, unmap);
+ else
+ bnad_rxq_cleanup_page(bnad, unmap);
}
bnad_rxq_alloc_uninit(bnad, rcb);
}
@@ -480,10 +486,10 @@ bnad_rxq_post(struct bnad *bnad, struct bna_rcb *rcb)
if (!(to_alloc >> BNAD_RXQ_REFILL_THRESHOLD_SHIFT))
return;
- if (BNAD_RXBUF_IS_PAGE(unmap_q->type))
- bnad_rxq_refill_page(bnad, rcb, to_alloc);
- else
+ if (BNAD_RXBUF_IS_SK_BUFF(unmap_q->type))
bnad_rxq_refill_skb(bnad, rcb, to_alloc);
+ else
+ bnad_rxq_refill_page(bnad, rcb, to_alloc);
}
#define flags_cksum_prot_mask (BNA_CQ_EF_IPV4 | BNA_CQ_EF_L3_CKSUM_OK | \
@@ -500,72 +506,114 @@ bnad_rxq_post(struct bnad *bnad, struct bna_rcb *rcb)
#define flags_udp6 (BNA_CQ_EF_IPV6 | \
BNA_CQ_EF_UDP | BNA_CQ_EF_L4_CKSUM_OK)
-static inline struct sk_buff *
-bnad_cq_prepare_skb(struct bnad_rx_ctrl *rx_ctrl,
- struct bnad_rx_unmap_q *unmap_q,
- struct bnad_rx_unmap *unmap,
- u32 length, u32 flags)
+static void
+bnad_cq_drop_packet(struct bnad *bnad, struct bna_rcb *rcb,
+ u32 sop_ci, u32 nvecs)
{
- struct bnad *bnad = rx_ctrl->bnad;
- struct sk_buff *skb;
+ struct bnad_rx_unmap_q *unmap_q;
+ struct bnad_rx_unmap *unmap;
+ u32 ci, vec;
- if (BNAD_RXBUF_IS_PAGE(unmap_q->type)) {
- skb = napi_get_frags(&rx_ctrl->napi);
- if (unlikely(!skb))
- return NULL;
+ unmap_q = rcb->unmap_q;
+ for (vec = 0, ci = sop_ci; vec < nvecs; vec++) {
+ unmap = &unmap_q->unmap[ci];
+ BNA_QE_INDX_INC(ci, rcb->q_depth);
+
+ if (BNAD_RXBUF_IS_SK_BUFF(unmap_q->type))
+ bnad_rxq_cleanup_skb(bnad, unmap);
+ else
+ bnad_rxq_cleanup_page(bnad, unmap);
+ }
+}
+
+static void
+bnad_cq_setup_skb_frags(struct bna_rcb *rcb, struct sk_buff *skb,
+ u32 sop_ci, u32 nvecs, u32 last_fraglen)
+{
+ struct bnad *bnad;
+ u32 ci, vec, len, totlen = 0;
+ struct bnad_rx_unmap_q *unmap_q;
+ struct bnad_rx_unmap *unmap;
+
+ unmap_q = rcb->unmap_q;
+ bnad = rcb->bnad;
+
+ /* prefetch header */
+ prefetch(page_address(unmap_q->unmap[sop_ci].page) +
+ unmap_q->unmap[sop_ci].page_offset);
+
+ for (vec = 1, ci = sop_ci; vec <= nvecs; vec++) {
+ unmap = &unmap_q->unmap[ci];
+ BNA_QE_INDX_INC(ci, rcb->q_depth);
dma_unmap_page(&bnad->pcidev->dev,
dma_unmap_addr(&unmap->vector, dma_addr),
unmap->vector.len, DMA_FROM_DEVICE);
+
+ len = (vec == nvecs) ?
+ last_fraglen : unmap->vector.len;
+ totlen += len;
+
skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags,
- unmap->page, unmap->page_offset, length);
- skb->len += length;
- skb->data_len += length;
- skb->truesize += length;
+ unmap->page, unmap->page_offset, len);
unmap->page = NULL;
unmap->vector.len = 0;
-
- return skb;
}
- skb = unmap->skb;
- BUG_ON(!skb);
+ skb->len += totlen;
+ skb->data_len += totlen;
+ skb->truesize += totlen;
+}
+
+static inline void
+bnad_cq_setup_skb(struct bnad *bnad, struct sk_buff *skb,
+ struct bnad_rx_unmap *unmap, u32 len)
+{
+ prefetch(skb->data);
dma_unmap_single(&bnad->pcidev->dev,
dma_unmap_addr(&unmap->vector, dma_addr),
unmap->vector.len, DMA_FROM_DEVICE);
- skb_put(skb, length);
-
+ skb_put(skb, len);
skb->protocol = eth_type_trans(skb, bnad->netdev);
unmap->skb = NULL;
unmap->vector.len = 0;
- return skb;
}
static u32
bnad_cq_process(struct bnad *bnad, struct bna_ccb *ccb, int budget)
{
- struct bna_cq_entry *cq, *cmpl;
+ struct bna_cq_entry *cq, *cmpl, *next_cmpl;
struct bna_rcb *rcb = NULL;
struct bnad_rx_unmap_q *unmap_q;
- struct bnad_rx_unmap *unmap;
- struct sk_buff *skb;
+ struct bnad_rx_unmap *unmap = NULL;
+ struct sk_buff *skb = NULL;
struct bna_pkt_rate *pkt_rt = &ccb->pkt_rate;
struct bnad_rx_ctrl *rx_ctrl = ccb->ctrl;
- u32 packets = 0, length = 0, flags, masked_flags;
+ u32 packets = 0, len = 0, totlen = 0;
+ u32 pi, vec, sop_ci = 0, nvecs = 0;
+ u32 flags, masked_flags;
prefetch(bnad->netdev);
cq = ccb->sw_q;
cmpl = &cq[ccb->producer_index];
- while (cmpl->valid && (packets < budget)) {
- packets++;
- flags = ntohl(cmpl->flags);
- length = ntohs(cmpl->length);
+ while (packets < budget) {
+ if (!cmpl->valid)
+ break;
+ /* The 'valid' field is set by the adapter, only after writing
+ * the other fields of completion entry. Hence, do not load
+ * other fields of completion entry *before* the 'valid' is
+ * loaded. Adding the rmb() here prevents the compiler and/or
+ * CPU from reordering the reads which would potentially result
+ * in reading stale values in completion entry.
+ */
+ rmb();
+
BNA_UPDATE_PKT_CNT(pkt_rt, ntohs(cmpl->length));
if (bna_is_small_rxq(cmpl->rxq_id))
@@ -574,25 +622,78 @@ bnad_cq_process(struct bnad *bnad, struct bna_ccb *ccb, int budget)
rcb = ccb->rcb[0];
unmap_q = rcb->unmap_q;
- unmap = &unmap_q->unmap[rcb->consumer_index];
- if (unlikely(flags & (BNA_CQ_EF_MAC_ERROR |
- BNA_CQ_EF_FCS_ERROR |
- BNA_CQ_EF_TOO_LONG))) {
- if (BNAD_RXBUF_IS_PAGE(unmap_q->type))
- bnad_rxq_cleanup_page(bnad, unmap);
- else
- bnad_rxq_cleanup_skb(bnad, unmap);
+ /* start of packet ci */
+ sop_ci = rcb->consumer_index;
+
+ if (BNAD_RXBUF_IS_SK_BUFF(unmap_q->type)) {
+ unmap = &unmap_q->unmap[sop_ci];
+ skb = unmap->skb;
+ } else {
+ skb = napi_get_frags(&rx_ctrl->napi);
+ if (unlikely(!skb))
+ break;
+ }
+ prefetch(skb);
+
+ flags = ntohl(cmpl->flags);
+ len = ntohs(cmpl->length);
+ totlen = len;
+ nvecs = 1;
+ /* Check all the completions for this frame.
+ * busy-wait doesn't help much, break here.
+ */
+ if (BNAD_RXBUF_IS_MULTI_BUFF(unmap_q->type) &&
+ (flags & BNA_CQ_EF_EOP) == 0) {
+ pi = ccb->producer_index;
+ do {
+ BNA_QE_INDX_INC(pi, ccb->q_depth);
+ next_cmpl = &cq[pi];
+
+ if (!next_cmpl->valid)
+ break;
+ /* The 'valid' field is set by the adapter, only
+ * after writing the other fields of completion
+ * entry. Hence, do not load other fields of
+ * completion entry *before* the 'valid' is
+ * loaded. Adding the rmb() here prevents the
+ * compiler and/or CPU from reordering the reads
+ * which would potentially result in reading
+ * stale values in completion entry.
+ */
+ rmb();
+
+ len = ntohs(next_cmpl->length);
+ flags = ntohl(next_cmpl->flags);
+
+ nvecs++;
+ totlen += len;
+ } while ((flags & BNA_CQ_EF_EOP) == 0);
+
+ if (!next_cmpl->valid)
+ break;
+ }
+
+ /* TODO: BNA_CQ_EF_LOCAL ? */
+ if (unlikely(flags & (BNA_CQ_EF_MAC_ERROR |
+ BNA_CQ_EF_FCS_ERROR |
+ BNA_CQ_EF_TOO_LONG))) {
+ bnad_cq_drop_packet(bnad, rcb, sop_ci, nvecs);
rcb->rxq->rx_packets_with_error++;
+
goto next;
}
- skb = bnad_cq_prepare_skb(ccb->ctrl, unmap_q, unmap,
- length, flags);
+ if (BNAD_RXBUF_IS_SK_BUFF(unmap_q->type))
+ bnad_cq_setup_skb(bnad, skb, unmap, len);
+ else
+ bnad_cq_setup_skb_frags(rcb, skb, sop_ci, nvecs, len);
- if (unlikely(!skb))
- break;
+ packets++;
+ rcb->rxq->rx_packets++;
+ rcb->rxq->rx_bytes += totlen;
+ ccb->bytes_per_intr += totlen;
masked_flags = flags & flags_cksum_prot_mask;
@@ -606,21 +707,21 @@ bnad_cq_process(struct bnad *bnad, struct bna_ccb *ccb, int budget)
else
skb_checksum_none_assert(skb);
- rcb->rxq->rx_packets++;
- rcb->rxq->rx_bytes += length;
-
if (flags & BNA_CQ_EF_VLAN)
__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), ntohs(cmpl->vlan_tag));
- if (BNAD_RXBUF_IS_PAGE(unmap_q->type))
- napi_gro_frags(&rx_ctrl->napi);
- else
+ if (BNAD_RXBUF_IS_SK_BUFF(unmap_q->type))
netif_receive_skb(skb);
+ else
+ napi_gro_frags(&rx_ctrl->napi);
next:
- cmpl->valid = 0;
- BNA_QE_INDX_INC(rcb->consumer_index, rcb->q_depth);
- BNA_QE_INDX_INC(ccb->producer_index, ccb->q_depth);
+ BNA_QE_INDX_ADD(rcb->consumer_index, nvecs, rcb->q_depth);
+ for (vec = 0; vec < nvecs; vec++) {
+ cmpl = &cq[ccb->producer_index];
+ cmpl->valid = 0;
+ BNA_QE_INDX_INC(ccb->producer_index, ccb->q_depth);
+ }
cmpl = &cq[ccb->producer_index];
}
@@ -1899,8 +2000,10 @@ bnad_setup_tx(struct bnad *bnad, u32 tx_id)
tx = bna_tx_create(&bnad->bna, bnad, tx_config, &tx_cbfn, res_info,
tx_info);
spin_unlock_irqrestore(&bnad->bna_lock, flags);
- if (!tx)
+ if (!tx) {
+ err = -ENOMEM;
goto err_return;
+ }
tx_info->tx = tx;
INIT_DELAYED_WORK(&tx_info->tx_cleanup_work,
@@ -1911,7 +2014,7 @@ bnad_setup_tx(struct bnad *bnad, u32 tx_id)
err = bnad_tx_msix_register(bnad, tx_info,
tx_id, bnad->num_txq_per_tx);
if (err)
- goto err_return;
+ goto cleanup_tx;
}
spin_lock_irqsave(&bnad->bna_lock, flags);
@@ -1920,6 +2023,12 @@ bnad_setup_tx(struct bnad *bnad, u32 tx_id)
return 0;
+cleanup_tx:
+ spin_lock_irqsave(&bnad->bna_lock, flags);
+ bna_tx_destroy(tx_info->tx);
+ spin_unlock_irqrestore(&bnad->bna_lock, flags);
+ tx_info->tx = NULL;
+ tx_info->tx_id = 0;
err_return:
bnad_tx_res_free(bnad, res_info);
return err;
@@ -1930,6 +2039,7 @@ err_return:
static void
bnad_init_rx_config(struct bnad *bnad, struct bna_rx_config *rx_config)
{
+ memset(rx_config, 0, sizeof(*rx_config));
rx_config->rx_type = BNA_RX_T_REGULAR;
rx_config->num_paths = bnad->num_rxp_per_rx;
rx_config->coalescing_timeo = bnad->rx_coalescing_timeo;
@@ -1950,10 +2060,39 @@ bnad_init_rx_config(struct bnad *bnad, struct bna_rx_config *rx_config)
memset(&rx_config->rss_config, 0,
sizeof(rx_config->rss_config));
}
+
+ rx_config->frame_size = BNAD_FRAME_SIZE(bnad->netdev->mtu);
+ rx_config->q0_multi_buf = BNA_STATUS_T_DISABLED;
+
+ /* BNA_RXP_SINGLE - one data-buffer queue
+ * BNA_RXP_SLR - one small-buffer and one large-buffer queues
+ * BNA_RXP_HDS - one header-buffer and one data-buffer queues
+ */
+ /* TODO: configurable param for queue type */
rx_config->rxp_type = BNA_RXP_SLR;
- rx_config->q_depth = bnad->rxq_depth;
- rx_config->small_buff_size = BFI_SMALL_RXBUF_SIZE;
+ if (BNAD_PCI_DEV_IS_CAT2(bnad) &&
+ rx_config->frame_size > 4096) {
+ /* though size_routing_enable is set in SLR,
+ * small packets may get routed to same rxq.
+ * set buf_size to 2048 instead of PAGE_SIZE.
+ */
+ rx_config->q0_buf_size = 2048;
+ /* this should be in multiples of 2 */
+ rx_config->q0_num_vecs = 4;
+ rx_config->q0_depth = bnad->rxq_depth * rx_config->q0_num_vecs;
+ rx_config->q0_multi_buf = BNA_STATUS_T_ENABLED;
+ } else {
+ rx_config->q0_buf_size = rx_config->frame_size;
+ rx_config->q0_num_vecs = 1;
+ rx_config->q0_depth = bnad->rxq_depth;
+ }
+
+ /* initialize for q1 for BNA_RXP_SLR/BNA_RXP_HDS */
+ if (rx_config->rxp_type == BNA_RXP_SLR) {
+ rx_config->q1_depth = bnad->rxq_depth;
+ rx_config->q1_buf_size = BFI_SMALL_RXBUF_SIZE;
+ }
rx_config->vlan_strip_status = BNA_STATUS_T_ENABLED;
}
@@ -1969,6 +2108,49 @@ bnad_rx_ctrl_init(struct bnad *bnad, u32 rx_id)
}
/* Called with mutex_lock(&bnad->conf_mutex) held */
+u32
+bnad_reinit_rx(struct bnad *bnad)
+{
+ struct net_device *netdev = bnad->netdev;
+ u32 err = 0, current_err = 0;
+ u32 rx_id = 0, count = 0;
+ unsigned long flags;
+
+ /* destroy and create new rx objects */
+ for (rx_id = 0; rx_id < bnad->num_rx; rx_id++) {
+ if (!bnad->rx_info[rx_id].rx)
+ continue;
+ bnad_destroy_rx(bnad, rx_id);
+ }
+
+ spin_lock_irqsave(&bnad->bna_lock, flags);
+ bna_enet_mtu_set(&bnad->bna.enet,
+ BNAD_FRAME_SIZE(bnad->netdev->mtu), NULL);
+ spin_unlock_irqrestore(&bnad->bna_lock, flags);
+
+ for (rx_id = 0; rx_id < bnad->num_rx; rx_id++) {
+ count++;
+ current_err = bnad_setup_rx(bnad, rx_id);
+ if (current_err && !err) {
+ err = current_err;
+ pr_err("RXQ:%u setup failed\n", rx_id);
+ }
+ }
+
+ /* restore rx configuration */
+ if (bnad->rx_info[0].rx && !err) {
+ bnad_restore_vlans(bnad, 0);
+ bnad_enable_default_bcast(bnad);
+ spin_lock_irqsave(&bnad->bna_lock, flags);
+ bnad_mac_addr_set_locked(bnad, netdev->dev_addr);
+ spin_unlock_irqrestore(&bnad->bna_lock, flags);
+ bnad_set_rx_mode(netdev);
+ }
+
+ return count;
+}
+
+/* Called with bnad_conf_lock() held */
void
bnad_destroy_rx(struct bnad *bnad, u32 rx_id)
{
@@ -2047,13 +2229,19 @@ bnad_setup_rx(struct bnad *bnad, u32 rx_id)
spin_unlock_irqrestore(&bnad->bna_lock, flags);
/* Fill Unmap Q memory requirements */
- BNAD_FILL_UNMAPQ_MEM_REQ(&res_info[BNA_RX_RES_MEM_T_UNMAPQ],
- rx_config->num_paths +
- ((rx_config->rxp_type == BNA_RXP_SINGLE) ?
- 0 : rx_config->num_paths),
- ((bnad->rxq_depth * sizeof(struct bnad_rx_unmap)) +
- sizeof(struct bnad_rx_unmap_q)));
-
+ BNAD_FILL_UNMAPQ_MEM_REQ(&res_info[BNA_RX_RES_MEM_T_UNMAPDQ],
+ rx_config->num_paths,
+ (rx_config->q0_depth *
+ sizeof(struct bnad_rx_unmap)) +
+ sizeof(struct bnad_rx_unmap_q));
+
+ if (rx_config->rxp_type != BNA_RXP_SINGLE) {
+ BNAD_FILL_UNMAPQ_MEM_REQ(&res_info[BNA_RX_RES_MEM_T_UNMAPHQ],
+ rx_config->num_paths,
+ (rx_config->q1_depth *
+ sizeof(struct bnad_rx_unmap) +
+ sizeof(struct bnad_rx_unmap_q)));
+ }
/* Allocate resource */
err = bnad_rx_res_alloc(bnad, res_info, rx_id);
if (err)
@@ -2548,7 +2736,6 @@ bnad_open(struct net_device *netdev)
int err;
struct bnad *bnad = netdev_priv(netdev);
struct bna_pause_config pause_config;
- int mtu;
unsigned long flags;
mutex_lock(&bnad->conf_mutex);
@@ -2567,10 +2754,9 @@ bnad_open(struct net_device *netdev)
pause_config.tx_pause = 0;
pause_config.rx_pause = 0;
- mtu = ETH_HLEN + VLAN_HLEN + bnad->netdev->mtu + ETH_FCS_LEN;
-
spin_lock_irqsave(&bnad->bna_lock, flags);
- bna_enet_mtu_set(&bnad->bna.enet, mtu, NULL);
+ bna_enet_mtu_set(&bnad->bna.enet,
+ BNAD_FRAME_SIZE(bnad->netdev->mtu), NULL);
bna_enet_pause_config(&bnad->bna.enet, &pause_config, NULL);
bna_enet_enable(&bnad->bna.enet);
spin_unlock_irqrestore(&bnad->bna_lock, flags);
@@ -2624,9 +2810,6 @@ bnad_stop(struct net_device *netdev)
bnad_destroy_tx(bnad, 0);
bnad_destroy_rx(bnad, 0);
- /* These config flags are cleared in the hardware */
- bnad->cfg_flags &= ~(BNAD_CF_ALLMULTI | BNAD_CF_PROMISC);
-
/* Synchronize mailbox IRQ */
bnad_mbox_irq_sync(bnad);
@@ -2784,21 +2967,21 @@ bnad_start_xmit(struct sk_buff *skb, struct net_device *netdev)
}
tcb = bnad->tx_info[0].tcb[txq_id];
- q_depth = tcb->q_depth;
- prod = tcb->producer_index;
-
- unmap_q = tcb->unmap_q;
/*
* Takes care of the Tx that is scheduled between clearing the flag
* and the netif_tx_stop_all_queues() call.
*/
- if (unlikely(!test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags))) {
+ if (unlikely(!tcb || !test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags))) {
dev_kfree_skb(skb);
BNAD_UPDATE_CTR(bnad, tx_skb_stopping);
return NETDEV_TX_OK;
}
+ q_depth = tcb->q_depth;
+ prod = tcb->producer_index;
+ unmap_q = tcb->unmap_q;
+
vectors = 1 + skb_shinfo(skb)->nr_frags;
wis = BNA_TXQ_WI_NEEDED(vectors); /* 4 vectors per work item */
@@ -2863,7 +3046,7 @@ bnad_start_xmit(struct sk_buff *skb, struct net_device *netdev)
for (i = 0, vect_id = 0; i < vectors - 1; i++) {
const struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i];
- u16 size = skb_frag_size(frag);
+ u32 size = skb_frag_size(frag);
if (unlikely(size == 0)) {
/* Undo the changes starting at tcb->producer_index */
@@ -2888,10 +3071,11 @@ bnad_start_xmit(struct sk_buff *skb, struct net_device *netdev)
dma_addr = skb_frag_dma_map(&bnad->pcidev->dev, frag,
0, size, DMA_TO_DEVICE);
+ dma_unmap_len_set(&unmap->vectors[vect_id], dma_len, size);
BNA_SET_DMA_ADDR(dma_addr, &txqent->vector[vect_id].host_addr);
txqent->vector[vect_id].length = htons(size);
dma_unmap_addr_set(&unmap->vectors[vect_id], dma_addr,
- dma_addr);
+ dma_addr);
head_unmap->nvecs++;
}
@@ -2911,6 +3095,8 @@ bnad_start_xmit(struct sk_buff *skb, struct net_device *netdev)
if (unlikely(!test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags)))
return NETDEV_TX_OK;
+ skb_tx_timestamp(skb);
+
bna_txq_prod_indx_doorbell(tcb);
smp_mb();
@@ -2937,73 +3123,133 @@ bnad_get_stats64(struct net_device *netdev, struct rtnl_link_stats64 *stats)
return stats;
}
+static void
+bnad_set_rx_ucast_fltr(struct bnad *bnad)
+{
+ struct net_device *netdev = bnad->netdev;
+ int uc_count = netdev_uc_count(netdev);
+ enum bna_cb_status ret;
+ u8 *mac_list;
+ struct netdev_hw_addr *ha;
+ int entry;
+
+ if (netdev_uc_empty(bnad->netdev)) {
+ bna_rx_ucast_listset(bnad->rx_info[0].rx, 0, NULL, NULL);
+ return;
+ }
+
+ if (uc_count > bna_attr(&bnad->bna)->num_ucmac)
+ goto mode_default;
+
+ mac_list = kzalloc(uc_count * ETH_ALEN, GFP_ATOMIC);
+ if (mac_list == NULL)
+ goto mode_default;
+
+ entry = 0;
+ netdev_for_each_uc_addr(ha, netdev) {
+ memcpy(&mac_list[entry * ETH_ALEN],
+ &ha->addr[0], ETH_ALEN);
+ entry++;
+ }
+
+ ret = bna_rx_ucast_listset(bnad->rx_info[0].rx, entry,
+ mac_list, NULL);
+ kfree(mac_list);
+
+ if (ret != BNA_CB_SUCCESS)
+ goto mode_default;
+
+ return;
+
+ /* ucast packets not in UCAM are routed to default function */
+mode_default:
+ bnad->cfg_flags |= BNAD_CF_DEFAULT;
+ bna_rx_ucast_listset(bnad->rx_info[0].rx, 0, NULL, NULL);
+}
+
+static void
+bnad_set_rx_mcast_fltr(struct bnad *bnad)
+{
+ struct net_device *netdev = bnad->netdev;
+ int mc_count = netdev_mc_count(netdev);
+ enum bna_cb_status ret;
+ u8 *mac_list;
+
+ if (netdev->flags & IFF_ALLMULTI)
+ goto mode_allmulti;
+
+ if (netdev_mc_empty(netdev))
+ return;
+
+ if (mc_count > bna_attr(&bnad->bna)->num_mcmac)
+ goto mode_allmulti;
+
+ mac_list = kzalloc((mc_count + 1) * ETH_ALEN, GFP_ATOMIC);
+
+ if (mac_list == NULL)
+ goto mode_allmulti;
+
+ memcpy(&mac_list[0], &bnad_bcast_addr[0], ETH_ALEN);
+
+ /* copy rest of the MCAST addresses */
+ bnad_netdev_mc_list_get(netdev, mac_list);
+ ret = bna_rx_mcast_listset(bnad->rx_info[0].rx, mc_count + 1,
+ mac_list, NULL);
+ kfree(mac_list);
+
+ if (ret != BNA_CB_SUCCESS)
+ goto mode_allmulti;
+
+ return;
+
+mode_allmulti:
+ bnad->cfg_flags |= BNAD_CF_ALLMULTI;
+ bna_rx_mcast_delall(bnad->rx_info[0].rx, NULL);
+}
+
void
bnad_set_rx_mode(struct net_device *netdev)
{
struct bnad *bnad = netdev_priv(netdev);
- u32 new_mask, valid_mask;
+ enum bna_rxmode new_mode, mode_mask;
unsigned long flags;
spin_lock_irqsave(&bnad->bna_lock, flags);
- new_mask = valid_mask = 0;
-
- if (netdev->flags & IFF_PROMISC) {
- if (!(bnad->cfg_flags & BNAD_CF_PROMISC)) {
- new_mask = BNAD_RXMODE_PROMISC_DEFAULT;
- valid_mask = BNAD_RXMODE_PROMISC_DEFAULT;
- bnad->cfg_flags |= BNAD_CF_PROMISC;
- }
- } else {
- if (bnad->cfg_flags & BNAD_CF_PROMISC) {
- new_mask = ~BNAD_RXMODE_PROMISC_DEFAULT;
- valid_mask = BNAD_RXMODE_PROMISC_DEFAULT;
- bnad->cfg_flags &= ~BNAD_CF_PROMISC;
- }
- }
-
- if (netdev->flags & IFF_ALLMULTI) {
- if (!(bnad->cfg_flags & BNAD_CF_ALLMULTI)) {
- new_mask |= BNA_RXMODE_ALLMULTI;
- valid_mask |= BNA_RXMODE_ALLMULTI;
- bnad->cfg_flags |= BNAD_CF_ALLMULTI;
- }
- } else {
- if (bnad->cfg_flags & BNAD_CF_ALLMULTI) {
- new_mask &= ~BNA_RXMODE_ALLMULTI;
- valid_mask |= BNA_RXMODE_ALLMULTI;
- bnad->cfg_flags &= ~BNAD_CF_ALLMULTI;
- }
+ if (bnad->rx_info[0].rx == NULL) {
+ spin_unlock_irqrestore(&bnad->bna_lock, flags);
+ return;
}
- if (bnad->rx_info[0].rx == NULL)
- goto unlock;
+ /* clear bnad flags to update it with new settings */
+ bnad->cfg_flags &= ~(BNAD_CF_PROMISC | BNAD_CF_DEFAULT |
+ BNAD_CF_ALLMULTI);
- bna_rx_mode_set(bnad->rx_info[0].rx, new_mask, valid_mask, NULL);
+ new_mode = 0;
+ if (netdev->flags & IFF_PROMISC) {
+ new_mode |= BNAD_RXMODE_PROMISC_DEFAULT;
+ bnad->cfg_flags |= BNAD_CF_PROMISC;
+ } else {
+ bnad_set_rx_mcast_fltr(bnad);
- if (!netdev_mc_empty(netdev)) {
- u8 *mcaddr_list;
- int mc_count = netdev_mc_count(netdev);
+ if (bnad->cfg_flags & BNAD_CF_ALLMULTI)
+ new_mode |= BNA_RXMODE_ALLMULTI;
- /* Index 0 holds the broadcast address */
- mcaddr_list =
- kzalloc((mc_count + 1) * ETH_ALEN,
- GFP_ATOMIC);
- if (!mcaddr_list)
- goto unlock;
+ bnad_set_rx_ucast_fltr(bnad);
- memcpy(&mcaddr_list[0], &bnad_bcast_addr[0], ETH_ALEN);
+ if (bnad->cfg_flags & BNAD_CF_DEFAULT)
+ new_mode |= BNA_RXMODE_DEFAULT;
+ }
- /* Copy rest of the MC addresses */
- bnad_netdev_mc_list_get(netdev, mcaddr_list);
+ mode_mask = BNA_RXMODE_PROMISC | BNA_RXMODE_DEFAULT |
+ BNA_RXMODE_ALLMULTI;
+ bna_rx_mode_set(bnad->rx_info[0].rx, new_mode, mode_mask, NULL);
- bna_rx_mcast_listset(bnad->rx_info[0].rx, mc_count + 1,
- mcaddr_list, NULL);
+ if (bnad->cfg_flags & BNAD_CF_PROMISC)
+ bna_rx_vlan_strip_disable(bnad->rx_info[0].rx);
+ else
+ bna_rx_vlan_strip_enable(bnad->rx_info[0].rx);
- /* Should we enable BNAD_CF_ALLMULTI for err != 0 ? */
- kfree(mcaddr_list);
- }
-unlock:
spin_unlock_irqrestore(&bnad->bna_lock, flags);
}
@@ -3033,14 +3279,14 @@ bnad_set_mac_address(struct net_device *netdev, void *mac_addr)
}
static int
-bnad_mtu_set(struct bnad *bnad, int mtu)
+bnad_mtu_set(struct bnad *bnad, int frame_size)
{
unsigned long flags;
init_completion(&bnad->bnad_completions.mtu_comp);
spin_lock_irqsave(&bnad->bna_lock, flags);
- bna_enet_mtu_set(&bnad->bna.enet, mtu, bnad_cb_enet_mtu_set);
+ bna_enet_mtu_set(&bnad->bna.enet, frame_size, bnad_cb_enet_mtu_set);
spin_unlock_irqrestore(&bnad->bna_lock, flags);
wait_for_completion(&bnad->bnad_completions.mtu_comp);
@@ -3051,18 +3297,34 @@ bnad_mtu_set(struct bnad *bnad, int mtu)
static int
bnad_change_mtu(struct net_device *netdev, int new_mtu)
{
- int err, mtu = netdev->mtu;
+ int err, mtu;
struct bnad *bnad = netdev_priv(netdev);
+ u32 rx_count = 0, frame, new_frame;
if (new_mtu + ETH_HLEN < ETH_ZLEN || new_mtu > BNAD_JUMBO_MTU)
return -EINVAL;
mutex_lock(&bnad->conf_mutex);
+ mtu = netdev->mtu;
netdev->mtu = new_mtu;
- mtu = ETH_HLEN + VLAN_HLEN + new_mtu + ETH_FCS_LEN;
- err = bnad_mtu_set(bnad, mtu);
+ frame = BNAD_FRAME_SIZE(mtu);
+ new_frame = BNAD_FRAME_SIZE(new_mtu);
+
+ /* check if multi-buffer needs to be enabled */
+ if (BNAD_PCI_DEV_IS_CAT2(bnad) &&
+ netif_running(bnad->netdev)) {
+ /* only when transition is over 4K */
+ if ((frame <= 4096 && new_frame > 4096) ||
+ (frame > 4096 && new_frame <= 4096))
+ rx_count = bnad_reinit_rx(bnad);
+ }
+
+ /* rx_count > 0 - new rx created
+ * - Linux set err = 0 and return
+ */
+ err = bnad_mtu_set(bnad, new_frame);
if (err)
err = -EBUSY;
@@ -3262,7 +3524,6 @@ bnad_uninit(struct bnad *bnad)
if (bnad->bar0)
iounmap(bnad->bar0);
- pci_set_drvdata(bnad->pcidev, NULL);
}
/*
diff --git a/drivers/net/ethernet/brocade/bna/bnad.h b/drivers/net/ethernet/brocade/bna/bnad.h
index f7e033f8a00e..2842c188e0da 100644
--- a/drivers/net/ethernet/brocade/bna/bnad.h
+++ b/drivers/net/ethernet/brocade/bna/bnad.h
@@ -71,7 +71,7 @@ struct bnad_rx_ctrl {
#define BNAD_NAME "bna"
#define BNAD_NAME_LEN 64
-#define BNAD_VERSION "3.2.21.1"
+#define BNAD_VERSION "3.2.23.0"
#define BNAD_MAILBOX_MSIX_INDEX 0
#define BNAD_MAILBOX_MSIX_VECTORS 1
@@ -84,7 +84,7 @@ struct bnad_rx_ctrl {
#define BNAD_IOCETH_TIMEOUT 10000
#define BNAD_MIN_Q_DEPTH 512
-#define BNAD_MAX_RXQ_DEPTH 2048
+#define BNAD_MAX_RXQ_DEPTH 16384
#define BNAD_MAX_TXQ_DEPTH 2048
#define BNAD_JUMBO_MTU 9000
@@ -105,6 +105,9 @@ struct bnad_rx_ctrl {
#define BNAD_NUM_TXQ (bnad->num_tx * bnad->num_txq_per_tx)
#define BNAD_NUM_RXP (bnad->num_rx * bnad->num_rxp_per_rx)
+#define BNAD_FRAME_SIZE(_mtu) \
+ (ETH_HLEN + VLAN_HLEN + (_mtu) + ETH_FCS_LEN)
+
/*
* DATA STRUCTURES
*/
@@ -219,6 +222,7 @@ struct bnad_rx_info {
struct bnad_tx_vector {
DEFINE_DMA_UNMAP_ADDR(dma_addr);
+ DEFINE_DMA_UNMAP_LEN(dma_len);
};
struct bnad_tx_unmap {
@@ -234,33 +238,38 @@ struct bnad_rx_vector {
struct bnad_rx_unmap {
struct page *page;
- u32 page_offset;
struct sk_buff *skb;
struct bnad_rx_vector vector;
+ u32 page_offset;
};
enum bnad_rxbuf_type {
BNAD_RXBUF_NONE = 0,
- BNAD_RXBUF_SKB = 1,
+ BNAD_RXBUF_SK_BUFF = 1,
BNAD_RXBUF_PAGE = 2,
- BNAD_RXBUF_MULTI = 3
+ BNAD_RXBUF_MULTI_BUFF = 3
};
-#define BNAD_RXBUF_IS_PAGE(_type) ((_type) == BNAD_RXBUF_PAGE)
+#define BNAD_RXBUF_IS_SK_BUFF(_type) ((_type) == BNAD_RXBUF_SK_BUFF)
+#define BNAD_RXBUF_IS_MULTI_BUFF(_type) ((_type) == BNAD_RXBUF_MULTI_BUFF)
struct bnad_rx_unmap_q {
int reuse_pi;
int alloc_order;
u32 map_size;
enum bnad_rxbuf_type type;
- struct bnad_rx_unmap unmap[0];
+ struct bnad_rx_unmap unmap[0] ____cacheline_aligned;
};
+#define BNAD_PCI_DEV_IS_CAT2(_bnad) \
+ ((_bnad)->pcidev->device == BFA_PCI_DEVICE_ID_CT2)
+
/* Bit mask values for bnad->cfg_flags */
#define BNAD_CF_DIM_ENABLED 0x01 /* DIM */
#define BNAD_CF_PROMISC 0x02
#define BNAD_CF_ALLMULTI 0x04
-#define BNAD_CF_MSIX 0x08 /* If in MSIx mode */
+#define BNAD_CF_DEFAULT 0x08
+#define BNAD_CF_MSIX 0x10 /* If in MSIx mode */
/* Defines for run_flags bit-mask */
/* Set, tested & cleared using xxx_bit() functions */
@@ -367,7 +376,6 @@ struct bnad_drvinfo {
* EXTERN VARIABLES
*/
extern const struct firmware *bfi_fw;
-extern u32 bnad_rxqs_per_cq;
/*
* EXTERN PROTOTYPES
diff --git a/drivers/net/ethernet/brocade/bna/bnad_ethtool.c b/drivers/net/ethernet/brocade/bna/bnad_ethtool.c
index 455b5a2e59d4..f9e150825bb5 100644
--- a/drivers/net/ethernet/brocade/bna/bnad_ethtool.c
+++ b/drivers/net/ethernet/brocade/bna/bnad_ethtool.c
@@ -1131,6 +1131,7 @@ static const struct ethtool_ops bnad_ethtool_ops = {
.get_eeprom = bnad_get_eeprom,
.set_eeprom = bnad_set_eeprom,
.flash_device = bnad_flash_device,
+ .get_ts_info = ethtool_op_get_ts_info,
};
void
diff --git a/drivers/net/ethernet/brocade/bna/cna.h b/drivers/net/ethernet/brocade/bna/cna.h
index 43405f654b4a..b3ff6d507951 100644
--- a/drivers/net/ethernet/brocade/bna/cna.h
+++ b/drivers/net/ethernet/brocade/bna/cna.h
@@ -37,8 +37,8 @@
extern char bfa_version[];
-#define CNA_FW_FILE_CT "ctfw-3.2.1.1.bin"
-#define CNA_FW_FILE_CT2 "ct2fw-3.2.1.1.bin"
+#define CNA_FW_FILE_CT "ctfw-3.2.3.0.bin"
+#define CNA_FW_FILE_CT2 "ct2fw-3.2.3.0.bin"
#define FC_SYMNAME_MAX 256 /*!< max name server symbolic name size */
#pragma pack(1)