diff options
Diffstat (limited to 'drivers/net/ethernet/broadcom')
26 files changed, 2546 insertions, 976 deletions
diff --git a/drivers/net/ethernet/broadcom/Kconfig b/drivers/net/ethernet/broadcom/Kconfig index 3e69b3f88099..1d680baf43d6 100644 --- a/drivers/net/ethernet/broadcom/Kconfig +++ b/drivers/net/ethernet/broadcom/Kconfig @@ -22,7 +22,6 @@ config B44 tristate "Broadcom 440x/47xx ethernet support" depends on SSB_POSSIBLE && HAS_DMA select SSB - select NET_CORE select MII ---help--- If you have a network (Ethernet) controller of this type, say Y @@ -54,7 +53,6 @@ config B44_PCI config BCM63XX_ENET tristate "Broadcom 63xx internal mac support" depends on BCM63XX - select NET_CORE select MII select PHYLIB help diff --git a/drivers/net/ethernet/broadcom/bcm63xx_enet.c b/drivers/net/ethernet/broadcom/bcm63xx_enet.c index 0b3e23ec37f7..b1bcd4ba4744 100644 --- a/drivers/net/ethernet/broadcom/bcm63xx_enet.c +++ b/drivers/net/ethernet/broadcom/bcm63xx_enet.c @@ -41,8 +41,8 @@ static int copybreak __read_mostly = 128; module_param(copybreak, int, 0); MODULE_PARM_DESC(copybreak, "Receive copy threshold"); -/* io memory shared between all devices */ -static void __iomem *bcm_enet_shared_base; +/* io registers memory shared between all devices */ +static void __iomem *bcm_enet_shared_base[3]; /* * io helpers to access mac registers @@ -59,17 +59,76 @@ static inline void enet_writel(struct bcm_enet_priv *priv, } /* - * io helpers to access shared registers + * io helpers to access switch registers */ +static inline u32 enetsw_readl(struct bcm_enet_priv *priv, u32 off) +{ + return bcm_readl(priv->base + off); +} + +static inline void enetsw_writel(struct bcm_enet_priv *priv, + u32 val, u32 off) +{ + bcm_writel(val, priv->base + off); +} + +static inline u16 enetsw_readw(struct bcm_enet_priv *priv, u32 off) +{ + return bcm_readw(priv->base + off); +} + +static inline void enetsw_writew(struct bcm_enet_priv *priv, + u16 val, u32 off) +{ + bcm_writew(val, priv->base + off); +} + +static inline u8 enetsw_readb(struct bcm_enet_priv *priv, u32 off) +{ + return bcm_readb(priv->base + off); +} + +static inline void enetsw_writeb(struct bcm_enet_priv *priv, + u8 val, u32 off) +{ + bcm_writeb(val, priv->base + off); +} + + +/* io helpers to access shared registers */ static inline u32 enet_dma_readl(struct bcm_enet_priv *priv, u32 off) { - return bcm_readl(bcm_enet_shared_base + off); + return bcm_readl(bcm_enet_shared_base[0] + off); } static inline void enet_dma_writel(struct bcm_enet_priv *priv, u32 val, u32 off) { - bcm_writel(val, bcm_enet_shared_base + off); + bcm_writel(val, bcm_enet_shared_base[0] + off); +} + +static inline u32 enet_dmac_readl(struct bcm_enet_priv *priv, u32 off, int chan) +{ + return bcm_readl(bcm_enet_shared_base[1] + + bcm63xx_enetdmacreg(off) + chan * priv->dma_chan_width); +} + +static inline void enet_dmac_writel(struct bcm_enet_priv *priv, + u32 val, u32 off, int chan) +{ + bcm_writel(val, bcm_enet_shared_base[1] + + bcm63xx_enetdmacreg(off) + chan * priv->dma_chan_width); +} + +static inline u32 enet_dmas_readl(struct bcm_enet_priv *priv, u32 off, int chan) +{ + return bcm_readl(bcm_enet_shared_base[2] + off + chan * priv->dma_chan_width); +} + +static inline void enet_dmas_writel(struct bcm_enet_priv *priv, + u32 val, u32 off, int chan) +{ + bcm_writel(val, bcm_enet_shared_base[2] + off + chan * priv->dma_chan_width); } /* @@ -196,7 +255,6 @@ static int bcm_enet_refill_rx(struct net_device *dev) if (!skb) break; priv->rx_skb[desc_idx] = skb; - p = dma_map_single(&priv->pdev->dev, skb->data, priv->rx_skb_size, DMA_FROM_DEVICE); @@ -206,7 +264,7 @@ static int bcm_enet_refill_rx(struct net_device *dev) len_stat = priv->rx_skb_size << DMADESC_LENGTH_SHIFT; len_stat |= DMADESC_OWNER_MASK; if (priv->rx_dirty_desc == priv->rx_ring_size - 1) { - len_stat |= DMADESC_WRAP_MASK; + len_stat |= (DMADESC_WRAP_MASK >> priv->dma_desc_shift); priv->rx_dirty_desc = 0; } else { priv->rx_dirty_desc++; @@ -217,7 +275,10 @@ static int bcm_enet_refill_rx(struct net_device *dev) priv->rx_desc_count++; /* tell dma engine we allocated one buffer */ - enet_dma_writel(priv, 1, ENETDMA_BUFALLOC_REG(priv->rx_chan)); + if (priv->dma_has_sram) + enet_dma_writel(priv, 1, ENETDMA_BUFALLOC_REG(priv->rx_chan)); + else + enet_dmac_writel(priv, 1, ENETDMAC_BUFALLOC, priv->rx_chan); } /* If rx ring is still empty, set a timer to try allocating @@ -293,13 +354,15 @@ static int bcm_enet_receive_queue(struct net_device *dev, int budget) /* if the packet does not have start of packet _and_ * end of packet flag set, then just recycle it */ - if ((len_stat & DMADESC_ESOP_MASK) != DMADESC_ESOP_MASK) { + if ((len_stat & (DMADESC_ESOP_MASK >> priv->dma_desc_shift)) != + (DMADESC_ESOP_MASK >> priv->dma_desc_shift)) { dev->stats.rx_dropped++; continue; } /* recycle packet if it's marked as bad */ - if (unlikely(len_stat & DMADESC_ERR_MASK)) { + if (!priv->enet_is_sw && + unlikely(len_stat & DMADESC_ERR_MASK)) { dev->stats.rx_errors++; if (len_stat & DMADESC_OVSIZE_MASK) @@ -353,8 +416,8 @@ static int bcm_enet_receive_queue(struct net_device *dev, int budget) bcm_enet_refill_rx(dev); /* kick rx dma */ - enet_dma_writel(priv, ENETDMA_CHANCFG_EN_MASK, - ENETDMA_CHANCFG_REG(priv->rx_chan)); + enet_dmac_writel(priv, priv->dma_chan_en_mask, + ENETDMAC_CHANCFG, priv->rx_chan); } return processed; @@ -429,10 +492,10 @@ static int bcm_enet_poll(struct napi_struct *napi, int budget) dev = priv->net_dev; /* ack interrupts */ - enet_dma_writel(priv, ENETDMA_IR_PKTDONE_MASK, - ENETDMA_IR_REG(priv->rx_chan)); - enet_dma_writel(priv, ENETDMA_IR_PKTDONE_MASK, - ENETDMA_IR_REG(priv->tx_chan)); + enet_dmac_writel(priv, priv->dma_chan_int_mask, + ENETDMAC_IR, priv->rx_chan); + enet_dmac_writel(priv, priv->dma_chan_int_mask, + ENETDMAC_IR, priv->tx_chan); /* reclaim sent skb */ tx_work_done = bcm_enet_tx_reclaim(dev, 0); @@ -451,10 +514,10 @@ static int bcm_enet_poll(struct napi_struct *napi, int budget) napi_complete(napi); /* restore rx/tx interrupt */ - enet_dma_writel(priv, ENETDMA_IR_PKTDONE_MASK, - ENETDMA_IRMASK_REG(priv->rx_chan)); - enet_dma_writel(priv, ENETDMA_IR_PKTDONE_MASK, - ENETDMA_IRMASK_REG(priv->tx_chan)); + enet_dmac_writel(priv, priv->dma_chan_int_mask, + ENETDMAC_IRMASK, priv->rx_chan); + enet_dmac_writel(priv, priv->dma_chan_int_mask, + ENETDMAC_IRMASK, priv->tx_chan); return rx_work_done; } @@ -497,8 +560,8 @@ static irqreturn_t bcm_enet_isr_dma(int irq, void *dev_id) priv = netdev_priv(dev); /* mask rx/tx interrupts */ - enet_dma_writel(priv, 0, ENETDMA_IRMASK_REG(priv->rx_chan)); - enet_dma_writel(priv, 0, ENETDMA_IRMASK_REG(priv->tx_chan)); + enet_dmac_writel(priv, 0, ENETDMAC_IRMASK, priv->rx_chan); + enet_dmac_writel(priv, 0, ENETDMAC_IRMASK, priv->tx_chan); napi_schedule(&priv->napi); @@ -530,6 +593,26 @@ static int bcm_enet_start_xmit(struct sk_buff *skb, struct net_device *dev) goto out_unlock; } + /* pad small packets sent on a switch device */ + if (priv->enet_is_sw && skb->len < 64) { + int needed = 64 - skb->len; + char *data; + + if (unlikely(skb_tailroom(skb) < needed)) { + struct sk_buff *nskb; + + nskb = skb_copy_expand(skb, 0, needed, GFP_ATOMIC); + if (!nskb) { + ret = NETDEV_TX_BUSY; + goto out_unlock; + } + dev_kfree_skb(skb); + skb = nskb; + } + data = skb_put(skb, needed); + memset(data, 0, needed); + } + /* point to the next available desc */ desc = &priv->tx_desc_cpu[priv->tx_curr_desc]; priv->tx_skb[priv->tx_curr_desc] = skb; @@ -539,14 +622,14 @@ static int bcm_enet_start_xmit(struct sk_buff *skb, struct net_device *dev) DMA_TO_DEVICE); len_stat = (skb->len << DMADESC_LENGTH_SHIFT) & DMADESC_LENGTH_MASK; - len_stat |= DMADESC_ESOP_MASK | + len_stat |= (DMADESC_ESOP_MASK >> priv->dma_desc_shift) | DMADESC_APPEND_CRC | DMADESC_OWNER_MASK; priv->tx_curr_desc++; if (priv->tx_curr_desc == priv->tx_ring_size) { priv->tx_curr_desc = 0; - len_stat |= DMADESC_WRAP_MASK; + len_stat |= (DMADESC_WRAP_MASK >> priv->dma_desc_shift); } priv->tx_desc_count--; @@ -557,8 +640,8 @@ static int bcm_enet_start_xmit(struct sk_buff *skb, struct net_device *dev) wmb(); /* kick tx dma */ - enet_dma_writel(priv, ENETDMA_CHANCFG_EN_MASK, - ENETDMA_CHANCFG_REG(priv->tx_chan)); + enet_dmac_writel(priv, priv->dma_chan_en_mask, + ENETDMAC_CHANCFG, priv->tx_chan); /* stop queue if no more desc available */ if (!priv->tx_desc_count) @@ -686,6 +769,9 @@ static void bcm_enet_set_flow(struct bcm_enet_priv *priv, int rx_en, int tx_en) val &= ~ENET_RXCFG_ENFLOW_MASK; enet_writel(priv, val, ENET_RXCFG_REG); + if (!priv->dma_has_sram) + return; + /* tx flow control (pause frame generation) */ val = enet_dma_readl(priv, ENETDMA_CFG_REG); if (tx_en) @@ -833,8 +919,8 @@ static int bcm_enet_open(struct net_device *dev) /* mask all interrupts and request them */ enet_writel(priv, 0, ENET_IRMASK_REG); - enet_dma_writel(priv, 0, ENETDMA_IRMASK_REG(priv->rx_chan)); - enet_dma_writel(priv, 0, ENETDMA_IRMASK_REG(priv->tx_chan)); + enet_dmac_writel(priv, 0, ENETDMAC_IRMASK, priv->rx_chan); + enet_dmac_writel(priv, 0, ENETDMAC_IRMASK, priv->tx_chan); ret = request_irq(dev->irq, bcm_enet_isr_mac, 0, dev->name, dev); if (ret) @@ -909,8 +995,12 @@ static int bcm_enet_open(struct net_device *dev) priv->rx_curr_desc = 0; /* initialize flow control buffer allocation */ - enet_dma_writel(priv, ENETDMA_BUFALLOC_FORCE_MASK | 0, - ENETDMA_BUFALLOC_REG(priv->rx_chan)); + if (priv->dma_has_sram) + enet_dma_writel(priv, ENETDMA_BUFALLOC_FORCE_MASK | 0, + ENETDMA_BUFALLOC_REG(priv->rx_chan)); + else + enet_dmac_writel(priv, ENETDMA_BUFALLOC_FORCE_MASK | 0, + ENETDMAC_BUFALLOC, priv->rx_chan); if (bcm_enet_refill_rx(dev)) { dev_err(kdev, "cannot allocate rx skb queue\n"); @@ -919,37 +1009,55 @@ static int bcm_enet_open(struct net_device *dev) } /* write rx & tx ring addresses */ - enet_dma_writel(priv, priv->rx_desc_dma, - ENETDMA_RSTART_REG(priv->rx_chan)); - enet_dma_writel(priv, priv->tx_desc_dma, - ENETDMA_RSTART_REG(priv->tx_chan)); + if (priv->dma_has_sram) { + enet_dmas_writel(priv, priv->rx_desc_dma, + ENETDMAS_RSTART_REG, priv->rx_chan); + enet_dmas_writel(priv, priv->tx_desc_dma, + ENETDMAS_RSTART_REG, priv->tx_chan); + } else { + enet_dmac_writel(priv, priv->rx_desc_dma, + ENETDMAC_RSTART, priv->rx_chan); + enet_dmac_writel(priv, priv->tx_desc_dma, + ENETDMAC_RSTART, priv->tx_chan); + } /* clear remaining state ram for rx & tx channel */ - enet_dma_writel(priv, 0, ENETDMA_SRAM2_REG(priv->rx_chan)); - enet_dma_writel(priv, 0, ENETDMA_SRAM2_REG(priv->tx_chan)); - enet_dma_writel(priv, 0, ENETDMA_SRAM3_REG(priv->rx_chan)); - enet_dma_writel(priv, 0, ENETDMA_SRAM3_REG(priv->tx_chan)); - enet_dma_writel(priv, 0, ENETDMA_SRAM4_REG(priv->rx_chan)); - enet_dma_writel(priv, 0, ENETDMA_SRAM4_REG(priv->tx_chan)); + if (priv->dma_has_sram) { + enet_dmas_writel(priv, 0, ENETDMAS_SRAM2_REG, priv->rx_chan); + enet_dmas_writel(priv, 0, ENETDMAS_SRAM2_REG, priv->tx_chan); + enet_dmas_writel(priv, 0, ENETDMAS_SRAM3_REG, priv->rx_chan); + enet_dmas_writel(priv, 0, ENETDMAS_SRAM3_REG, priv->tx_chan); + enet_dmas_writel(priv, 0, ENETDMAS_SRAM4_REG, priv->rx_chan); + enet_dmas_writel(priv, 0, ENETDMAS_SRAM4_REG, priv->tx_chan); + } else { + enet_dmac_writel(priv, 0, ENETDMAC_FC, priv->rx_chan); + enet_dmac_writel(priv, 0, ENETDMAC_FC, priv->tx_chan); + } /* set max rx/tx length */ enet_writel(priv, priv->hw_mtu, ENET_RXMAXLEN_REG); enet_writel(priv, priv->hw_mtu, ENET_TXMAXLEN_REG); /* set dma maximum burst len */ - enet_dma_writel(priv, BCMENET_DMA_MAXBURST, - ENETDMA_MAXBURST_REG(priv->rx_chan)); - enet_dma_writel(priv, BCMENET_DMA_MAXBURST, - ENETDMA_MAXBURST_REG(priv->tx_chan)); + enet_dmac_writel(priv, priv->dma_maxburst, + ENETDMAC_MAXBURST, priv->rx_chan); + enet_dmac_writel(priv, priv->dma_maxburst, + ENETDMAC_MAXBURST, priv->tx_chan); /* set correct transmit fifo watermark */ enet_writel(priv, BCMENET_TX_FIFO_TRESH, ENET_TXWMARK_REG); /* set flow control low/high threshold to 1/3 / 2/3 */ - val = priv->rx_ring_size / 3; - enet_dma_writel(priv, val, ENETDMA_FLOWCL_REG(priv->rx_chan)); - val = (priv->rx_ring_size * 2) / 3; - enet_dma_writel(priv, val, ENETDMA_FLOWCH_REG(priv->rx_chan)); + if (priv->dma_has_sram) { + val = priv->rx_ring_size / 3; + enet_dma_writel(priv, val, ENETDMA_FLOWCL_REG(priv->rx_chan)); + val = (priv->rx_ring_size * 2) / 3; + enet_dma_writel(priv, val, ENETDMA_FLOWCH_REG(priv->rx_chan)); + } else { + enet_dmac_writel(priv, 5, ENETDMAC_FC, priv->rx_chan); + enet_dmac_writel(priv, priv->rx_ring_size, ENETDMAC_LEN, priv->rx_chan); + enet_dmac_writel(priv, priv->tx_ring_size, ENETDMAC_LEN, priv->tx_chan); + } /* all set, enable mac and interrupts, start dma engine and * kick rx dma channel */ @@ -958,26 +1066,26 @@ static int bcm_enet_open(struct net_device *dev) val |= ENET_CTL_ENABLE_MASK; enet_writel(priv, val, ENET_CTL_REG); enet_dma_writel(priv, ENETDMA_CFG_EN_MASK, ENETDMA_CFG_REG); - enet_dma_writel(priv, ENETDMA_CHANCFG_EN_MASK, - ENETDMA_CHANCFG_REG(priv->rx_chan)); + enet_dmac_writel(priv, priv->dma_chan_en_mask, + ENETDMAC_CHANCFG, priv->rx_chan); /* watch "mib counters about to overflow" interrupt */ enet_writel(priv, ENET_IR_MIB, ENET_IR_REG); enet_writel(priv, ENET_IR_MIB, ENET_IRMASK_REG); /* watch "packet transferred" interrupt in rx and tx */ - enet_dma_writel(priv, ENETDMA_IR_PKTDONE_MASK, - ENETDMA_IR_REG(priv->rx_chan)); - enet_dma_writel(priv, ENETDMA_IR_PKTDONE_MASK, - ENETDMA_IR_REG(priv->tx_chan)); + enet_dmac_writel(priv, priv->dma_chan_int_mask, + ENETDMAC_IR, priv->rx_chan); + enet_dmac_writel(priv, priv->dma_chan_int_mask, + ENETDMAC_IR, priv->tx_chan); /* make sure we enable napi before rx interrupt */ napi_enable(&priv->napi); - enet_dma_writel(priv, ENETDMA_IR_PKTDONE_MASK, - ENETDMA_IRMASK_REG(priv->rx_chan)); - enet_dma_writel(priv, ENETDMA_IR_PKTDONE_MASK, - ENETDMA_IRMASK_REG(priv->tx_chan)); + enet_dmac_writel(priv, priv->dma_chan_int_mask, + ENETDMAC_IRMASK, priv->rx_chan); + enet_dmac_writel(priv, priv->dma_chan_int_mask, + ENETDMAC_IRMASK, priv->tx_chan); if (priv->has_phy) phy_start(priv->phydev); @@ -1057,14 +1165,14 @@ static void bcm_enet_disable_dma(struct bcm_enet_priv *priv, int chan) { int limit; - enet_dma_writel(priv, 0, ENETDMA_CHANCFG_REG(chan)); + enet_dmac_writel(priv, 0, ENETDMAC_CHANCFG, chan); limit = 1000; do { u32 val; - val = enet_dma_readl(priv, ENETDMA_CHANCFG_REG(chan)); - if (!(val & ENETDMA_CHANCFG_EN_MASK)) + val = enet_dmac_readl(priv, ENETDMAC_CHANCFG, chan); + if (!(val & ENETDMAC_CHANCFG_EN_MASK)) break; udelay(1); } while (limit--); @@ -1090,8 +1198,8 @@ static int bcm_enet_stop(struct net_device *dev) /* mask all interrupts */ enet_writel(priv, 0, ENET_IRMASK_REG); - enet_dma_writel(priv, 0, ENETDMA_IRMASK_REG(priv->rx_chan)); - enet_dma_writel(priv, 0, ENETDMA_IRMASK_REG(priv->tx_chan)); + enet_dmac_writel(priv, 0, ENETDMAC_IRMASK, priv->rx_chan); + enet_dmac_writel(priv, 0, ENETDMAC_IRMASK, priv->tx_chan); /* make sure no mib update is scheduled */ cancel_work_sync(&priv->mib_update_task); @@ -1328,6 +1436,20 @@ static void bcm_enet_get_ethtool_stats(struct net_device *netdev, mutex_unlock(&priv->mib_update_lock); } +static int bcm_enet_nway_reset(struct net_device *dev) +{ + struct bcm_enet_priv *priv; + + priv = netdev_priv(dev); + if (priv->has_phy) { + if (!priv->phydev) + return -ENODEV; + return genphy_restart_aneg(priv->phydev); + } + + return -EOPNOTSUPP; +} + static int bcm_enet_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) { @@ -1470,6 +1592,7 @@ static const struct ethtool_ops bcm_enet_ethtool_ops = { .get_strings = bcm_enet_get_strings, .get_sset_count = bcm_enet_get_sset_count, .get_ethtool_stats = bcm_enet_get_ethtool_stats, + .nway_reset = bcm_enet_nway_reset, .get_settings = bcm_enet_get_settings, .set_settings = bcm_enet_set_settings, .get_drvinfo = bcm_enet_get_drvinfo, @@ -1530,7 +1653,7 @@ static int compute_hw_mtu(struct bcm_enet_priv *priv, int mtu) * it's appended */ priv->rx_skb_size = ALIGN(actual_mtu + ETH_FCS_LEN, - BCMENET_DMA_MAXBURST * 4); + priv->dma_maxburst * 4); return 0; } @@ -1621,7 +1744,7 @@ static int bcm_enet_probe(struct platform_device *pdev) /* stop if shared driver failed, assume driver->probe will be * called in the same order we register devices (correct ?) */ - if (!bcm_enet_shared_base) + if (!bcm_enet_shared_base[0]) return -ENODEV; res_mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); @@ -1637,6 +1760,9 @@ static int bcm_enet_probe(struct platform_device *pdev) return -ENOMEM; priv = netdev_priv(dev); + priv->enet_is_sw = false; + priv->dma_maxburst = BCMENET_DMA_MAXBURST; + ret = compute_hw_mtu(priv, dev->mtu); if (ret) goto out; @@ -1687,6 +1813,11 @@ static int bcm_enet_probe(struct platform_device *pdev) priv->pause_tx = pd->pause_tx; priv->force_duplex_full = pd->force_duplex_full; priv->force_speed_100 = pd->force_speed_100; + priv->dma_chan_en_mask = pd->dma_chan_en_mask; + priv->dma_chan_int_mask = pd->dma_chan_int_mask; + priv->dma_chan_width = pd->dma_chan_width; + priv->dma_has_sram = pd->dma_has_sram; + priv->dma_desc_shift = pd->dma_desc_shift; } if (priv->mac_id == 0 && priv->has_phy && !priv->use_external_mii) { @@ -1847,7 +1978,6 @@ static int bcm_enet_remove(struct platform_device *pdev) clk_disable_unprepare(priv->mac_clk); clk_put(priv->mac_clk); - platform_set_drvdata(pdev, NULL); free_netdev(dev); return 0; } @@ -1862,19 +1992,881 @@ struct platform_driver bcm63xx_enet_driver = { }; /* - * reserve & remap memory space shared between all macs + * switch mii access callbacks */ -static int bcm_enet_shared_probe(struct platform_device *pdev) +static int bcmenet_sw_mdio_read(struct bcm_enet_priv *priv, + int ext, int phy_id, int location) { - struct resource *res; + u32 reg; + int ret; - res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - if (!res) + spin_lock_bh(&priv->enetsw_mdio_lock); + enetsw_writel(priv, 0, ENETSW_MDIOC_REG); + + reg = ENETSW_MDIOC_RD_MASK | + (phy_id << ENETSW_MDIOC_PHYID_SHIFT) | + (location << ENETSW_MDIOC_REG_SHIFT); + + if (ext) + reg |= ENETSW_MDIOC_EXT_MASK; + + enetsw_writel(priv, reg, ENETSW_MDIOC_REG); + udelay(50); + ret = enetsw_readw(priv, ENETSW_MDIOD_REG); + spin_unlock_bh(&priv->enetsw_mdio_lock); + return ret; +} + +static void bcmenet_sw_mdio_write(struct bcm_enet_priv *priv, + int ext, int phy_id, int location, + uint16_t data) +{ + u32 reg; + + spin_lock_bh(&priv->enetsw_mdio_lock); + enetsw_writel(priv, 0, ENETSW_MDIOC_REG); + + reg = ENETSW_MDIOC_WR_MASK | + (phy_id << ENETSW_MDIOC_PHYID_SHIFT) | + (location << ENETSW_MDIOC_REG_SHIFT); + + if (ext) + reg |= ENETSW_MDIOC_EXT_MASK; + + reg |= data; + + enetsw_writel(priv, reg, ENETSW_MDIOC_REG); + udelay(50); + spin_unlock_bh(&priv->enetsw_mdio_lock); +} + +static inline int bcm_enet_port_is_rgmii(int portid) +{ + return portid >= ENETSW_RGMII_PORT0; +} + +/* + * enet sw PHY polling + */ +static void swphy_poll_timer(unsigned long data) +{ + struct bcm_enet_priv *priv = (struct bcm_enet_priv *)data; + unsigned int i; + + for (i = 0; i < priv->num_ports; i++) { + struct bcm63xx_enetsw_port *port; + int val, j, up, advertise, lpa, lpa2, speed, duplex, media; + int external_phy = bcm_enet_port_is_rgmii(i); + u8 override; + + port = &priv->used_ports[i]; + if (!port->used) + continue; + + if (port->bypass_link) + continue; + + /* dummy read to clear */ + for (j = 0; j < 2; j++) + val = bcmenet_sw_mdio_read(priv, external_phy, + port->phy_id, MII_BMSR); + + if (val == 0xffff) + continue; + + up = (val & BMSR_LSTATUS) ? 1 : 0; + if (!(up ^ priv->sw_port_link[i])) + continue; + + priv->sw_port_link[i] = up; + + /* link changed */ + if (!up) { + dev_info(&priv->pdev->dev, "link DOWN on %s\n", + port->name); + enetsw_writeb(priv, ENETSW_PORTOV_ENABLE_MASK, + ENETSW_PORTOV_REG(i)); + enetsw_writeb(priv, ENETSW_PTCTRL_RXDIS_MASK | + ENETSW_PTCTRL_TXDIS_MASK, + ENETSW_PTCTRL_REG(i)); + continue; + } + + advertise = bcmenet_sw_mdio_read(priv, external_phy, + port->phy_id, MII_ADVERTISE); + + lpa = bcmenet_sw_mdio_read(priv, external_phy, port->phy_id, + MII_LPA); + + lpa2 = bcmenet_sw_mdio_read(priv, external_phy, port->phy_id, + MII_STAT1000); + + /* figure out media and duplex from advertise and LPA values */ + media = mii_nway_result(lpa & advertise); + duplex = (media & ADVERTISE_FULL) ? 1 : 0; + if (lpa2 & LPA_1000FULL) + duplex = 1; + + if (lpa2 & (LPA_1000FULL | LPA_1000HALF)) + speed = 1000; + else { + if (media & (ADVERTISE_100FULL | ADVERTISE_100HALF)) + speed = 100; + else + speed = 10; + } + + dev_info(&priv->pdev->dev, + "link UP on %s, %dMbps, %s-duplex\n", + port->name, speed, duplex ? "full" : "half"); + + override = ENETSW_PORTOV_ENABLE_MASK | + ENETSW_PORTOV_LINKUP_MASK; + + if (speed == 1000) + override |= ENETSW_IMPOV_1000_MASK; + else if (speed == 100) + override |= ENETSW_IMPOV_100_MASK; + if (duplex) + override |= ENETSW_IMPOV_FDX_MASK; + + enetsw_writeb(priv, override, ENETSW_PORTOV_REG(i)); + enetsw_writeb(priv, 0, ENETSW_PTCTRL_REG(i)); + } + + priv->swphy_poll.expires = jiffies + HZ; + add_timer(&priv->swphy_poll); +} + +/* + * open callback, allocate dma rings & buffers and start rx operation + */ +static int bcm_enetsw_open(struct net_device *dev) +{ + struct bcm_enet_priv *priv; + struct device *kdev; + int i, ret; + unsigned int size; + void *p; + u32 val; + + priv = netdev_priv(dev); + kdev = &priv->pdev->dev; + + /* mask all interrupts and request them */ + enet_dmac_writel(priv, 0, ENETDMAC_IRMASK, priv->rx_chan); + enet_dmac_writel(priv, 0, ENETDMAC_IRMASK, priv->tx_chan); + + ret = request_irq(priv->irq_rx, bcm_enet_isr_dma, + IRQF_DISABLED, dev->name, dev); + if (ret) + goto out_freeirq; + + if (priv->irq_tx != -1) { + ret = request_irq(priv->irq_tx, bcm_enet_isr_dma, + IRQF_DISABLED, dev->name, dev); + if (ret) + goto out_freeirq_rx; + } + + /* allocate rx dma ring */ + size = priv->rx_ring_size * sizeof(struct bcm_enet_desc); + p = dma_alloc_coherent(kdev, size, &priv->rx_desc_dma, GFP_KERNEL); + if (!p) { + dev_err(kdev, "cannot allocate rx ring %u\n", size); + ret = -ENOMEM; + goto out_freeirq_tx; + } + + memset(p, 0, size); + priv->rx_desc_alloc_size = size; + priv->rx_desc_cpu = p; + + /* allocate tx dma ring */ + size = priv->tx_ring_size * sizeof(struct bcm_enet_desc); + p = dma_alloc_coherent(kdev, size, &priv->tx_desc_dma, GFP_KERNEL); + if (!p) { + dev_err(kdev, "cannot allocate tx ring\n"); + ret = -ENOMEM; + goto out_free_rx_ring; + } + + memset(p, 0, size); + priv->tx_desc_alloc_size = size; + priv->tx_desc_cpu = p; + + priv->tx_skb = kzalloc(sizeof(struct sk_buff *) * priv->tx_ring_size, + GFP_KERNEL); + if (!priv->tx_skb) { + dev_err(kdev, "cannot allocate rx skb queue\n"); + ret = -ENOMEM; + goto out_free_tx_ring; + } + + priv->tx_desc_count = priv->tx_ring_size; + priv->tx_dirty_desc = 0; + priv->tx_curr_desc = 0; + spin_lock_init(&priv->tx_lock); + + /* init & fill rx ring with skbs */ + priv->rx_skb = kzalloc(sizeof(struct sk_buff *) * priv->rx_ring_size, + GFP_KERNEL); + if (!priv->rx_skb) { + dev_err(kdev, "cannot allocate rx skb queue\n"); + ret = -ENOMEM; + goto out_free_tx_skb; + } + + priv->rx_desc_count = 0; + priv->rx_dirty_desc = 0; + priv->rx_curr_desc = 0; + + /* disable all ports */ + for (i = 0; i < priv->num_ports; i++) { + enetsw_writeb(priv, ENETSW_PORTOV_ENABLE_MASK, + ENETSW_PORTOV_REG(i)); + enetsw_writeb(priv, ENETSW_PTCTRL_RXDIS_MASK | + ENETSW_PTCTRL_TXDIS_MASK, + ENETSW_PTCTRL_REG(i)); + + priv->sw_port_link[i] = 0; + } + + /* reset mib */ + val = enetsw_readb(priv, ENETSW_GMCR_REG); + val |= ENETSW_GMCR_RST_MIB_MASK; + enetsw_writeb(priv, val, ENETSW_GMCR_REG); + mdelay(1); + val &= ~ENETSW_GMCR_RST_MIB_MASK; + enetsw_writeb(priv, val, ENETSW_GMCR_REG); + mdelay(1); + + /* force CPU port state */ + val = enetsw_readb(priv, ENETSW_IMPOV_REG); + val |= ENETSW_IMPOV_FORCE_MASK | ENETSW_IMPOV_LINKUP_MASK; + enetsw_writeb(priv, val, ENETSW_IMPOV_REG); + + /* enable switch forward engine */ + val = enetsw_readb(priv, ENETSW_SWMODE_REG); + val |= ENETSW_SWMODE_FWD_EN_MASK; + enetsw_writeb(priv, val, ENETSW_SWMODE_REG); + + /* enable jumbo on all ports */ + enetsw_writel(priv, 0x1ff, ENETSW_JMBCTL_PORT_REG); + enetsw_writew(priv, 9728, ENETSW_JMBCTL_MAXSIZE_REG); + + /* initialize flow control buffer allocation */ + enet_dma_writel(priv, ENETDMA_BUFALLOC_FORCE_MASK | 0, + ENETDMA_BUFALLOC_REG(priv->rx_chan)); + + if (bcm_enet_refill_rx(dev)) { + dev_err(kdev, "cannot allocate rx skb queue\n"); + ret = -ENOMEM; + goto out; + } + + /* write rx & tx ring addresses */ + enet_dmas_writel(priv, priv->rx_desc_dma, + ENETDMAS_RSTART_REG, priv->rx_chan); + enet_dmas_writel(priv, priv->tx_desc_dma, + ENETDMAS_RSTART_REG, priv->tx_chan); + + /* clear remaining state ram for rx & tx channel */ + enet_dmas_writel(priv, 0, ENETDMAS_SRAM2_REG, priv->rx_chan); + enet_dmas_writel(priv, 0, ENETDMAS_SRAM2_REG, priv->tx_chan); + enet_dmas_writel(priv, 0, ENETDMAS_SRAM3_REG, priv->rx_chan); + enet_dmas_writel(priv, 0, ENETDMAS_SRAM3_REG, priv->tx_chan); + enet_dmas_writel(priv, 0, ENETDMAS_SRAM4_REG, priv->rx_chan); + enet_dmas_writel(priv, 0, ENETDMAS_SRAM4_REG, priv->tx_chan); + + /* set dma maximum burst len */ + enet_dmac_writel(priv, priv->dma_maxburst, + ENETDMAC_MAXBURST, priv->rx_chan); + enet_dmac_writel(priv, priv->dma_maxburst, + ENETDMAC_MAXBURST, priv->tx_chan); + + /* set flow control low/high threshold to 1/3 / 2/3 */ + val = priv->rx_ring_size / 3; + enet_dma_writel(priv, val, ENETDMA_FLOWCL_REG(priv->rx_chan)); + val = (priv->rx_ring_size * 2) / 3; + enet_dma_writel(priv, val, ENETDMA_FLOWCH_REG(priv->rx_chan)); + + /* all set, enable mac and interrupts, start dma engine and + * kick rx dma channel + */ + wmb(); + enet_dma_writel(priv, ENETDMA_CFG_EN_MASK, ENETDMA_CFG_REG); + enet_dmac_writel(priv, ENETDMAC_CHANCFG_EN_MASK, + ENETDMAC_CHANCFG, priv->rx_chan); + + /* watch "packet transferred" interrupt in rx and tx */ + enet_dmac_writel(priv, ENETDMAC_IR_PKTDONE_MASK, + ENETDMAC_IR, priv->rx_chan); + enet_dmac_writel(priv, ENETDMAC_IR_PKTDONE_MASK, + ENETDMAC_IR, priv->tx_chan); + + /* make sure we enable napi before rx interrupt */ + napi_enable(&priv->napi); + + enet_dmac_writel(priv, ENETDMAC_IR_PKTDONE_MASK, + ENETDMAC_IRMASK, priv->rx_chan); + enet_dmac_writel(priv, ENETDMAC_IR_PKTDONE_MASK, + ENETDMAC_IRMASK, priv->tx_chan); + + netif_carrier_on(dev); + netif_start_queue(dev); + + /* apply override config for bypass_link ports here. */ + for (i = 0; i < priv->num_ports; i++) { + struct bcm63xx_enetsw_port *port; + u8 override; + port = &priv->used_ports[i]; + if (!port->used) + continue; + + if (!port->bypass_link) + continue; + + override = ENETSW_PORTOV_ENABLE_MASK | + ENETSW_PORTOV_LINKUP_MASK; + + switch (port->force_speed) { + case 1000: + override |= ENETSW_IMPOV_1000_MASK; + break; + case 100: + override |= ENETSW_IMPOV_100_MASK; + break; + case 10: + break; + default: + pr_warn("invalid forced speed on port %s: assume 10\n", + port->name); + break; + } + + if (port->force_duplex_full) + override |= ENETSW_IMPOV_FDX_MASK; + + + enetsw_writeb(priv, override, ENETSW_PORTOV_REG(i)); + enetsw_writeb(priv, 0, ENETSW_PTCTRL_REG(i)); + } + + /* start phy polling timer */ + init_timer(&priv->swphy_poll); + priv->swphy_poll.function = swphy_poll_timer; + priv->swphy_poll.data = (unsigned long)priv; + priv->swphy_poll.expires = jiffies; + add_timer(&priv->swphy_poll); + return 0; + +out: + for (i = 0; i < priv->rx_ring_size; i++) { + struct bcm_enet_desc *desc; + + if (!priv->rx_skb[i]) + continue; + + desc = &priv->rx_desc_cpu[i]; + dma_unmap_single(kdev, desc->address, priv->rx_skb_size, + DMA_FROM_DEVICE); + kfree_skb(priv->rx_skb[i]); + } + kfree(priv->rx_skb); + +out_free_tx_skb: + kfree(priv->tx_skb); + +out_free_tx_ring: + dma_free_coherent(kdev, priv->tx_desc_alloc_size, + priv->tx_desc_cpu, priv->tx_desc_dma); + +out_free_rx_ring: + dma_free_coherent(kdev, priv->rx_desc_alloc_size, + priv->rx_desc_cpu, priv->rx_desc_dma); + +out_freeirq_tx: + if (priv->irq_tx != -1) + free_irq(priv->irq_tx, dev); + +out_freeirq_rx: + free_irq(priv->irq_rx, dev); + +out_freeirq: + return ret; +} + +/* stop callback */ +static int bcm_enetsw_stop(struct net_device *dev) +{ + struct bcm_enet_priv *priv; + struct device *kdev; + int i; + + priv = netdev_priv(dev); + kdev = &priv->pdev->dev; + + del_timer_sync(&priv->swphy_poll); + netif_stop_queue(dev); + napi_disable(&priv->napi); + del_timer_sync(&priv->rx_timeout); + + /* mask all interrupts */ + enet_dmac_writel(priv, 0, ENETDMAC_IRMASK, priv->rx_chan); + enet_dmac_writel(priv, 0, ENETDMAC_IRMASK, priv->tx_chan); + + /* disable dma & mac */ + bcm_enet_disable_dma(priv, priv->tx_chan); + bcm_enet_disable_dma(priv, priv->rx_chan); + + /* force reclaim of all tx buffers */ + bcm_enet_tx_reclaim(dev, 1); + + /* free the rx skb ring */ + for (i = 0; i < priv->rx_ring_size; i++) { + struct bcm_enet_desc *desc; + + if (!priv->rx_skb[i]) + continue; + + desc = &priv->rx_desc_cpu[i]; + dma_unmap_single(kdev, desc->address, priv->rx_skb_size, + DMA_FROM_DEVICE); + kfree_skb(priv->rx_skb[i]); + } + + /* free remaining allocated memory */ + kfree(priv->rx_skb); + kfree(priv->tx_skb); + dma_free_coherent(kdev, priv->rx_desc_alloc_size, + priv->rx_desc_cpu, priv->rx_desc_dma); + dma_free_coherent(kdev, priv->tx_desc_alloc_size, + priv->tx_desc_cpu, priv->tx_desc_dma); + if (priv->irq_tx != -1) + free_irq(priv->irq_tx, dev); + free_irq(priv->irq_rx, dev); + + return 0; +} + +/* try to sort out phy external status by walking the used_port field + * in the bcm_enet_priv structure. in case the phy address is not + * assigned to any physical port on the switch, assume it is external + * (and yell at the user). + */ +static int bcm_enetsw_phy_is_external(struct bcm_enet_priv *priv, int phy_id) +{ + int i; + + for (i = 0; i < priv->num_ports; ++i) { + if (!priv->used_ports[i].used) + continue; + if (priv->used_ports[i].phy_id == phy_id) + return bcm_enet_port_is_rgmii(i); + } + + printk_once(KERN_WARNING "bcm63xx_enet: could not find a used port with phy_id %i, assuming phy is external\n", + phy_id); + return 1; +} + +/* can't use bcmenet_sw_mdio_read directly as we need to sort out + * external/internal status of the given phy_id first. + */ +static int bcm_enetsw_mii_mdio_read(struct net_device *dev, int phy_id, + int location) +{ + struct bcm_enet_priv *priv; + + priv = netdev_priv(dev); + return bcmenet_sw_mdio_read(priv, + bcm_enetsw_phy_is_external(priv, phy_id), + phy_id, location); +} + +/* can't use bcmenet_sw_mdio_write directly as we need to sort out + * external/internal status of the given phy_id first. + */ +static void bcm_enetsw_mii_mdio_write(struct net_device *dev, int phy_id, + int location, + int val) +{ + struct bcm_enet_priv *priv; + + priv = netdev_priv(dev); + bcmenet_sw_mdio_write(priv, bcm_enetsw_phy_is_external(priv, phy_id), + phy_id, location, val); +} + +static int bcm_enetsw_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) +{ + struct mii_if_info mii; + + mii.dev = dev; + mii.mdio_read = bcm_enetsw_mii_mdio_read; + mii.mdio_write = bcm_enetsw_mii_mdio_write; + mii.phy_id = 0; + mii.phy_id_mask = 0x3f; + mii.reg_num_mask = 0x1f; + return generic_mii_ioctl(&mii, if_mii(rq), cmd, NULL); + +} + +static const struct net_device_ops bcm_enetsw_ops = { + .ndo_open = bcm_enetsw_open, + .ndo_stop = bcm_enetsw_stop, + .ndo_start_xmit = bcm_enet_start_xmit, + .ndo_change_mtu = bcm_enet_change_mtu, + .ndo_do_ioctl = bcm_enetsw_ioctl, +}; + + +static const struct bcm_enet_stats bcm_enetsw_gstrings_stats[] = { + { "rx_packets", DEV_STAT(rx_packets), -1 }, + { "tx_packets", DEV_STAT(tx_packets), -1 }, + { "rx_bytes", DEV_STAT(rx_bytes), -1 }, + { "tx_bytes", DEV_STAT(tx_bytes), -1 }, + { "rx_errors", DEV_STAT(rx_errors), -1 }, + { "tx_errors", DEV_STAT(tx_errors), -1 }, + { "rx_dropped", DEV_STAT(rx_dropped), -1 }, + { "tx_dropped", DEV_STAT(tx_dropped), -1 }, + + { "tx_good_octets", GEN_STAT(mib.tx_gd_octets), ETHSW_MIB_RX_GD_OCT }, + { "tx_unicast", GEN_STAT(mib.tx_unicast), ETHSW_MIB_RX_BRDCAST }, + { "tx_broadcast", GEN_STAT(mib.tx_brdcast), ETHSW_MIB_RX_BRDCAST }, + { "tx_multicast", GEN_STAT(mib.tx_mult), ETHSW_MIB_RX_MULT }, + { "tx_64_octets", GEN_STAT(mib.tx_64), ETHSW_MIB_RX_64 }, + { "tx_65_127_oct", GEN_STAT(mib.tx_65_127), ETHSW_MIB_RX_65_127 }, + { "tx_128_255_oct", GEN_STAT(mib.tx_128_255), ETHSW_MIB_RX_128_255 }, + { "tx_256_511_oct", GEN_STAT(mib.tx_256_511), ETHSW_MIB_RX_256_511 }, + { "tx_512_1023_oct", GEN_STAT(mib.tx_512_1023), ETHSW_MIB_RX_512_1023}, + { "tx_1024_1522_oct", GEN_STAT(mib.tx_1024_max), + ETHSW_MIB_RX_1024_1522 }, + { "tx_1523_2047_oct", GEN_STAT(mib.tx_1523_2047), + ETHSW_MIB_RX_1523_2047 }, + { "tx_2048_4095_oct", GEN_STAT(mib.tx_2048_4095), + ETHSW_MIB_RX_2048_4095 }, + { "tx_4096_8191_oct", GEN_STAT(mib.tx_4096_8191), + ETHSW_MIB_RX_4096_8191 }, + { "tx_8192_9728_oct", GEN_STAT(mib.tx_8192_9728), + ETHSW_MIB_RX_8192_9728 }, + { "tx_oversize", GEN_STAT(mib.tx_ovr), ETHSW_MIB_RX_OVR }, + { "tx_oversize_drop", GEN_STAT(mib.tx_ovr), ETHSW_MIB_RX_OVR_DISC }, + { "tx_dropped", GEN_STAT(mib.tx_drop), ETHSW_MIB_RX_DROP }, + { "tx_undersize", GEN_STAT(mib.tx_underrun), ETHSW_MIB_RX_UND }, + { "tx_pause", GEN_STAT(mib.tx_pause), ETHSW_MIB_RX_PAUSE }, + + { "rx_good_octets", GEN_STAT(mib.rx_gd_octets), ETHSW_MIB_TX_ALL_OCT }, + { "rx_broadcast", GEN_STAT(mib.rx_brdcast), ETHSW_MIB_TX_BRDCAST }, + { "rx_multicast", GEN_STAT(mib.rx_mult), ETHSW_MIB_TX_MULT }, + { "rx_unicast", GEN_STAT(mib.rx_unicast), ETHSW_MIB_TX_MULT }, + { "rx_pause", GEN_STAT(mib.rx_pause), ETHSW_MIB_TX_PAUSE }, + { "rx_dropped", GEN_STAT(mib.rx_drop), ETHSW_MIB_TX_DROP_PKTS }, + +}; + +#define BCM_ENETSW_STATS_LEN \ + (sizeof(bcm_enetsw_gstrings_stats) / sizeof(struct bcm_enet_stats)) + +static void bcm_enetsw_get_strings(struct net_device *netdev, + u32 stringset, u8 *data) +{ + int i; + + switch (stringset) { + case ETH_SS_STATS: + for (i = 0; i < BCM_ENETSW_STATS_LEN; i++) { + memcpy(data + i * ETH_GSTRING_LEN, + bcm_enetsw_gstrings_stats[i].stat_string, + ETH_GSTRING_LEN); + } + break; + } +} + +static int bcm_enetsw_get_sset_count(struct net_device *netdev, + int string_set) +{ + switch (string_set) { + case ETH_SS_STATS: + return BCM_ENETSW_STATS_LEN; + default: + return -EINVAL; + } +} + +static void bcm_enetsw_get_drvinfo(struct net_device *netdev, + struct ethtool_drvinfo *drvinfo) +{ + strncpy(drvinfo->driver, bcm_enet_driver_name, 32); + strncpy(drvinfo->version, bcm_enet_driver_version, 32); + strncpy(drvinfo->fw_version, "N/A", 32); + strncpy(drvinfo->bus_info, "bcm63xx", 32); + drvinfo->n_stats = BCM_ENETSW_STATS_LEN; +} + +static void bcm_enetsw_get_ethtool_stats(struct net_device *netdev, + struct ethtool_stats *stats, + u64 *data) +{ + struct bcm_enet_priv *priv; + int i; + + priv = netdev_priv(netdev); + + for (i = 0; i < BCM_ENETSW_STATS_LEN; i++) { + const struct bcm_enet_stats *s; + u32 lo, hi; + char *p; + int reg; + + s = &bcm_enetsw_gstrings_stats[i]; + + reg = s->mib_reg; + if (reg == -1) + continue; + + lo = enetsw_readl(priv, ENETSW_MIB_REG(reg)); + p = (char *)priv + s->stat_offset; + + if (s->sizeof_stat == sizeof(u64)) { + hi = enetsw_readl(priv, ENETSW_MIB_REG(reg + 1)); + *(u64 *)p = ((u64)hi << 32 | lo); + } else { + *(u32 *)p = lo; + } + } + + for (i = 0; i < BCM_ENETSW_STATS_LEN; i++) { + const struct bcm_enet_stats *s; + char *p; + + s = &bcm_enetsw_gstrings_stats[i]; + + if (s->mib_reg == -1) + p = (char *)&netdev->stats + s->stat_offset; + else + p = (char *)priv + s->stat_offset; + + data[i] = (s->sizeof_stat == sizeof(u64)) ? + *(u64 *)p : *(u32 *)p; + } +} + +static void bcm_enetsw_get_ringparam(struct net_device *dev, + struct ethtool_ringparam *ering) +{ + struct bcm_enet_priv *priv; + + priv = netdev_priv(dev); + + /* rx/tx ring is actually only limited by memory */ + ering->rx_max_pending = 8192; + ering->tx_max_pending = 8192; + ering->rx_mini_max_pending = 0; + ering->rx_jumbo_max_pending = 0; + ering->rx_pending = priv->rx_ring_size; + ering->tx_pending = priv->tx_ring_size; +} + +static int bcm_enetsw_set_ringparam(struct net_device *dev, + struct ethtool_ringparam *ering) +{ + struct bcm_enet_priv *priv; + int was_running; + + priv = netdev_priv(dev); + + was_running = 0; + if (netif_running(dev)) { + bcm_enetsw_stop(dev); + was_running = 1; + } + + priv->rx_ring_size = ering->rx_pending; + priv->tx_ring_size = ering->tx_pending; + + if (was_running) { + int err; + + err = bcm_enetsw_open(dev); + if (err) + dev_close(dev); + } + return 0; +} + +static struct ethtool_ops bcm_enetsw_ethtool_ops = { + .get_strings = bcm_enetsw_get_strings, + .get_sset_count = bcm_enetsw_get_sset_count, + .get_ethtool_stats = bcm_enetsw_get_ethtool_stats, + .get_drvinfo = bcm_enetsw_get_drvinfo, + .get_ringparam = bcm_enetsw_get_ringparam, + .set_ringparam = bcm_enetsw_set_ringparam, +}; + +/* allocate netdevice, request register memory and register device. */ +static int bcm_enetsw_probe(struct platform_device *pdev) +{ + struct bcm_enet_priv *priv; + struct net_device *dev; + struct bcm63xx_enetsw_platform_data *pd; + struct resource *res_mem; + int ret, irq_rx, irq_tx; + + /* stop if shared driver failed, assume driver->probe will be + * called in the same order we register devices (correct ?) + */ + if (!bcm_enet_shared_base[0]) + return -ENODEV; + + res_mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); + irq_rx = platform_get_irq(pdev, 0); + irq_tx = platform_get_irq(pdev, 1); + if (!res_mem || irq_rx < 0) return -ENODEV; - bcm_enet_shared_base = devm_request_and_ioremap(&pdev->dev, res); - if (!bcm_enet_shared_base) + ret = 0; + dev = alloc_etherdev(sizeof(*priv)); + if (!dev) return -ENOMEM; + priv = netdev_priv(dev); + memset(priv, 0, sizeof(*priv)); + + /* initialize default and fetch platform data */ + priv->enet_is_sw = true; + priv->irq_rx = irq_rx; + priv->irq_tx = irq_tx; + priv->rx_ring_size = BCMENET_DEF_RX_DESC; + priv->tx_ring_size = BCMENET_DEF_TX_DESC; + priv->dma_maxburst = BCMENETSW_DMA_MAXBURST; + + pd = pdev->dev.platform_data; + if (pd) { + memcpy(dev->dev_addr, pd->mac_addr, ETH_ALEN); + memcpy(priv->used_ports, pd->used_ports, + sizeof(pd->used_ports)); + priv->num_ports = pd->num_ports; + priv->dma_has_sram = pd->dma_has_sram; + priv->dma_chan_en_mask = pd->dma_chan_en_mask; + priv->dma_chan_int_mask = pd->dma_chan_int_mask; + priv->dma_chan_width = pd->dma_chan_width; + } + + ret = compute_hw_mtu(priv, dev->mtu); + if (ret) + goto out; + + if (!request_mem_region(res_mem->start, resource_size(res_mem), + "bcm63xx_enetsw")) { + ret = -EBUSY; + goto out; + } + + priv->base = ioremap(res_mem->start, resource_size(res_mem)); + if (priv->base == NULL) { + ret = -ENOMEM; + goto out_release_mem; + } + + priv->mac_clk = clk_get(&pdev->dev, "enetsw"); + if (IS_ERR(priv->mac_clk)) { + ret = PTR_ERR(priv->mac_clk); + goto out_unmap; + } + clk_enable(priv->mac_clk); + + priv->rx_chan = 0; + priv->tx_chan = 1; + spin_lock_init(&priv->rx_lock); + + /* init rx timeout (used for oom) */ + init_timer(&priv->rx_timeout); + priv->rx_timeout.function = bcm_enet_refill_rx_timer; + priv->rx_timeout.data = (unsigned long)dev; + + /* register netdevice */ + dev->netdev_ops = &bcm_enetsw_ops; + netif_napi_add(dev, &priv->napi, bcm_enet_poll, 16); + SET_ETHTOOL_OPS(dev, &bcm_enetsw_ethtool_ops); + SET_NETDEV_DEV(dev, &pdev->dev); + + spin_lock_init(&priv->enetsw_mdio_lock); + + ret = register_netdev(dev); + if (ret) + goto out_put_clk; + + netif_carrier_off(dev); + platform_set_drvdata(pdev, dev); + priv->pdev = pdev; + priv->net_dev = dev; + + return 0; + +out_put_clk: + clk_put(priv->mac_clk); + +out_unmap: + iounmap(priv->base); + +out_release_mem: + release_mem_region(res_mem->start, resource_size(res_mem)); +out: + free_netdev(dev); + return ret; +} + + +/* exit func, stops hardware and unregisters netdevice */ +static int bcm_enetsw_remove(struct platform_device *pdev) +{ + struct bcm_enet_priv *priv; + struct net_device *dev; + struct resource *res; + + /* stop netdevice */ + dev = platform_get_drvdata(pdev); + priv = netdev_priv(dev); + unregister_netdev(dev); + + /* release device resources */ + iounmap(priv->base); + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + release_mem_region(res->start, resource_size(res)); + + platform_set_drvdata(pdev, NULL); + free_netdev(dev); + return 0; +} + +struct platform_driver bcm63xx_enetsw_driver = { + .probe = bcm_enetsw_probe, + .remove = bcm_enetsw_remove, + .driver = { + .name = "bcm63xx_enetsw", + .owner = THIS_MODULE, + }, +}; + +/* reserve & remap memory space shared between all macs */ +static int bcm_enet_shared_probe(struct platform_device *pdev) +{ + struct resource *res; + void __iomem *p[3]; + unsigned int i; + + memset(bcm_enet_shared_base, 0, sizeof(bcm_enet_shared_base)); + + for (i = 0; i < 3; i++) { + res = platform_get_resource(pdev, IORESOURCE_MEM, i); + p[i] = devm_ioremap_resource(&pdev->dev, res); + if (IS_ERR(p[i])) + return PTR_ERR(p[i]); + } + + memcpy(bcm_enet_shared_base, p, sizeof(bcm_enet_shared_base)); return 0; } @@ -1884,8 +2876,7 @@ static int bcm_enet_shared_remove(struct platform_device *pdev) return 0; } -/* - * this "shared" driver is needed because both macs share a single +/* this "shared" driver is needed because both macs share a single * address space */ struct platform_driver bcm63xx_enet_shared_driver = { @@ -1897,9 +2888,7 @@ struct platform_driver bcm63xx_enet_shared_driver = { }, }; -/* - * entry point - */ +/* entry point */ static int __init bcm_enet_init(void) { int ret; @@ -1912,12 +2901,19 @@ static int __init bcm_enet_init(void) if (ret) platform_driver_unregister(&bcm63xx_enet_shared_driver); + ret = platform_driver_register(&bcm63xx_enetsw_driver); + if (ret) { + platform_driver_unregister(&bcm63xx_enet_driver); + platform_driver_unregister(&bcm63xx_enet_shared_driver); + } + return ret; } static void __exit bcm_enet_exit(void) { platform_driver_unregister(&bcm63xx_enet_driver); + platform_driver_unregister(&bcm63xx_enetsw_driver); platform_driver_unregister(&bcm63xx_enet_shared_driver); } diff --git a/drivers/net/ethernet/broadcom/bcm63xx_enet.h b/drivers/net/ethernet/broadcom/bcm63xx_enet.h index 133d5857b9e2..f55af4310085 100644 --- a/drivers/net/ethernet/broadcom/bcm63xx_enet.h +++ b/drivers/net/ethernet/broadcom/bcm63xx_enet.h @@ -18,6 +18,7 @@ /* maximum burst len for dma (4 bytes unit) */ #define BCMENET_DMA_MAXBURST 16 +#define BCMENETSW_DMA_MAXBURST 8 /* tx transmit threshold (4 bytes unit), fifo is 256 bytes, the value * must be low enough so that a DMA transfer of above burst length can @@ -84,11 +85,60 @@ #define ETH_MIB_RX_CNTRL 54 +/* + * SW MIB Counters register definitions +*/ +#define ETHSW_MIB_TX_ALL_OCT 0 +#define ETHSW_MIB_TX_DROP_PKTS 2 +#define ETHSW_MIB_TX_QOS_PKTS 3 +#define ETHSW_MIB_TX_BRDCAST 4 +#define ETHSW_MIB_TX_MULT 5 +#define ETHSW_MIB_TX_UNI 6 +#define ETHSW_MIB_TX_COL 7 +#define ETHSW_MIB_TX_1_COL 8 +#define ETHSW_MIB_TX_M_COL 9 +#define ETHSW_MIB_TX_DEF 10 +#define ETHSW_MIB_TX_LATE 11 +#define ETHSW_MIB_TX_EX_COL 12 +#define ETHSW_MIB_TX_PAUSE 14 +#define ETHSW_MIB_TX_QOS_OCT 15 + +#define ETHSW_MIB_RX_ALL_OCT 17 +#define ETHSW_MIB_RX_UND 19 +#define ETHSW_MIB_RX_PAUSE 20 +#define ETHSW_MIB_RX_64 21 +#define ETHSW_MIB_RX_65_127 22 +#define ETHSW_MIB_RX_128_255 23 +#define ETHSW_MIB_RX_256_511 24 +#define ETHSW_MIB_RX_512_1023 25 +#define ETHSW_MIB_RX_1024_1522 26 +#define ETHSW_MIB_RX_OVR 27 +#define ETHSW_MIB_RX_JAB 28 +#define ETHSW_MIB_RX_ALIGN 29 +#define ETHSW_MIB_RX_CRC 30 +#define ETHSW_MIB_RX_GD_OCT 31 +#define ETHSW_MIB_RX_DROP 33 +#define ETHSW_MIB_RX_UNI 34 +#define ETHSW_MIB_RX_MULT 35 +#define ETHSW_MIB_RX_BRDCAST 36 +#define ETHSW_MIB_RX_SA_CHANGE 37 +#define ETHSW_MIB_RX_FRAG 38 +#define ETHSW_MIB_RX_OVR_DISC 39 +#define ETHSW_MIB_RX_SYM 40 +#define ETHSW_MIB_RX_QOS_PKTS 41 +#define ETHSW_MIB_RX_QOS_OCT 42 +#define ETHSW_MIB_RX_1523_2047 44 +#define ETHSW_MIB_RX_2048_4095 45 +#define ETHSW_MIB_RX_4096_8191 46 +#define ETHSW_MIB_RX_8192_9728 47 + + struct bcm_enet_mib_counters { u64 tx_gd_octets; u32 tx_gd_pkts; u32 tx_all_octets; u32 tx_all_pkts; + u32 tx_unicast; u32 tx_brdcast; u32 tx_mult; u32 tx_64; @@ -97,7 +147,12 @@ struct bcm_enet_mib_counters { u32 tx_256_511; u32 tx_512_1023; u32 tx_1024_max; + u32 tx_1523_2047; + u32 tx_2048_4095; + u32 tx_4096_8191; + u32 tx_8192_9728; u32 tx_jab; + u32 tx_drop; u32 tx_ovr; u32 tx_frag; u32 tx_underrun; @@ -114,6 +169,7 @@ struct bcm_enet_mib_counters { u32 rx_all_octets; u32 rx_all_pkts; u32 rx_brdcast; + u32 rx_unicast; u32 rx_mult; u32 rx_64; u32 rx_65_127; @@ -197,6 +253,9 @@ struct bcm_enet_priv { /* number of dma desc in tx ring */ int tx_ring_size; + /* maximum dma burst size */ + int dma_maxburst; + /* cpu view of rx dma ring */ struct bcm_enet_desc *tx_desc_cpu; @@ -269,6 +328,33 @@ struct bcm_enet_priv { /* maximum hardware transmit/receive size */ unsigned int hw_mtu; + + bool enet_is_sw; + + /* port mapping for switch devices */ + int num_ports; + struct bcm63xx_enetsw_port used_ports[ENETSW_MAX_PORT]; + int sw_port_link[ENETSW_MAX_PORT]; + + /* used to poll switch port state */ + struct timer_list swphy_poll; + spinlock_t enetsw_mdio_lock; + + /* dma channel enable mask */ + u32 dma_chan_en_mask; + + /* dma channel interrupt mask */ + u32 dma_chan_int_mask; + + /* DMA engine has internal SRAM */ + bool dma_has_sram; + + /* dma channel width */ + unsigned int dma_chan_width; + + /* dma descriptor shift value */ + unsigned int dma_desc_shift; }; + #endif /* ! BCM63XX_ENET_H_ */ diff --git a/drivers/net/ethernet/broadcom/bnx2.c b/drivers/net/ethernet/broadcom/bnx2.c index 5d204492c603..6a2de1d79ff6 100644 --- a/drivers/net/ethernet/broadcom/bnx2.c +++ b/drivers/net/ethernet/broadcom/bnx2.c @@ -8104,7 +8104,7 @@ bnx2_init_board(struct pci_dev *pdev, struct net_device *dev) pci_set_master(pdev); - bp->pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM); + bp->pm_cap = pdev->pm_cap; if (bp->pm_cap == 0) { dev_err(&pdev->dev, "Cannot find power management capability, aborting\n"); @@ -8764,18 +8764,4 @@ static struct pci_driver bnx2_pci_driver = { .err_handler = &bnx2_err_handler, }; -static int __init bnx2_init(void) -{ - return pci_register_driver(&bnx2_pci_driver); -} - -static void __exit bnx2_cleanup(void) -{ - pci_unregister_driver(&bnx2_pci_driver); -} - -module_init(bnx2_init); -module_exit(bnx2_cleanup); - - - +module_pci_driver(bnx2_pci_driver); diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h index 3dba2a70a00e..dedbd76c033e 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h @@ -34,12 +34,10 @@ #define BCM_DCBNL #endif - #include "bnx2x_hsi.h" #include "../cnic_if.h" - #define BNX2X_MIN_MSIX_VEC_CNT(bp) ((bp)->min_msix_vec_cnt) #include <linux/mdio.h> @@ -114,7 +112,6 @@ do { \ #define BNX2X_ERROR(fmt, ...) \ pr_err("[%s:%d]" fmt, __func__, __LINE__, ##__VA_ARGS__) - /* before we have a dev->name use dev_info() */ #define BNX2X_DEV_INFO(fmt, ...) \ do { \ @@ -147,7 +144,6 @@ do { \ #define U64_HI(x) ((u32)(((u64)(x)) >> 32)) #define HILO_U64(hi, lo) ((((u64)(hi)) << 32) + (lo)) - #define REG_ADDR(bp, offset) ((bp->regview) + (offset)) #define REG_RD(bp, offset) readl(REG_ADDR(bp, offset)) @@ -366,7 +362,7 @@ union db_prod { /* * Number of required SGEs is the sum of two: * 1. Number of possible opened aggregations (next packet for - * these aggregations will probably consume SGE immidiatelly) + * these aggregations will probably consume SGE immediately) * 2. Rest of BRB blocks divided by 2 (block will consume new SGE only * after placement on BD for new TPA aggregation) * @@ -387,7 +383,6 @@ union db_prod { #define BIT_VEC64_ELEM_SHIFT 6 #define BIT_VEC64_ELEM_MASK ((u64)BIT_VEC64_ELEM_SZ - 1) - #define __BIT_VEC64_SET_BIT(el, bit) \ do { \ el = ((el) | ((u64)0x1 << (bit))); \ @@ -398,7 +393,6 @@ union db_prod { el = ((el) & (~((u64)0x1 << (bit)))); \ } while (0) - #define BIT_VEC64_SET_BIT(vec64, idx) \ __BIT_VEC64_SET_BIT((vec64)[(idx) >> BIT_VEC64_ELEM_SHIFT], \ (idx) & BIT_VEC64_ELEM_MASK) @@ -419,8 +413,6 @@ union db_prod { /*******************************************************/ - - /* Number of u64 elements in SGE mask array */ #define RX_SGE_MASK_LEN (NUM_RX_SGE / BIT_VEC64_ELEM_SZ) #define RX_SGE_MASK_LEN_MASK (RX_SGE_MASK_LEN - 1) @@ -493,11 +485,26 @@ struct bnx2x_fastpath { struct bnx2x *bp; /* parent */ struct napi_struct napi; + +#ifdef CONFIG_NET_LL_RX_POLL + unsigned int state; +#define BNX2X_FP_STATE_IDLE 0 +#define BNX2X_FP_STATE_NAPI (1 << 0) /* NAPI owns this FP */ +#define BNX2X_FP_STATE_POLL (1 << 1) /* poll owns this FP */ +#define BNX2X_FP_STATE_NAPI_YIELD (1 << 2) /* NAPI yielded this FP */ +#define BNX2X_FP_STATE_POLL_YIELD (1 << 3) /* poll yielded this FP */ +#define BNX2X_FP_YIELD (BNX2X_FP_STATE_NAPI_YIELD | BNX2X_FP_STATE_POLL_YIELD) +#define BNX2X_FP_LOCKED (BNX2X_FP_STATE_NAPI | BNX2X_FP_STATE_POLL) +#define BNX2X_FP_USER_PEND (BNX2X_FP_STATE_POLL | BNX2X_FP_STATE_POLL_YIELD) + /* protect state */ + spinlock_t lock; +#endif /* CONFIG_NET_LL_RX_POLL */ + union host_hc_status_block status_blk; - /* chip independed shortcuts into sb structure */ + /* chip independent shortcuts into sb structure */ __le16 *sb_index_values; __le16 *sb_running_index; - /* chip independed shortcut into rx_prods_offset memory */ + /* chip independent shortcut into rx_prods_offset memory */ u32 ustorm_rx_prods_offset; u32 rx_buf_size; @@ -565,6 +572,116 @@ struct bnx2x_fastpath { #define bnx2x_fp_stats(bp, fp) (&((bp)->fp_stats[(fp)->index])) #define bnx2x_fp_qstats(bp, fp) (&((bp)->fp_stats[(fp)->index].eth_q_stats)) +#ifdef CONFIG_NET_LL_RX_POLL +static inline void bnx2x_fp_init_lock(struct bnx2x_fastpath *fp) +{ + spin_lock_init(&fp->lock); + fp->state = BNX2X_FP_STATE_IDLE; +} + +/* called from the device poll routine to get ownership of a FP */ +static inline bool bnx2x_fp_lock_napi(struct bnx2x_fastpath *fp) +{ + bool rc = true; + + spin_lock(&fp->lock); + if (fp->state & BNX2X_FP_LOCKED) { + WARN_ON(fp->state & BNX2X_FP_STATE_NAPI); + fp->state |= BNX2X_FP_STATE_NAPI_YIELD; + rc = false; + } else { + /* we don't care if someone yielded */ + fp->state = BNX2X_FP_STATE_NAPI; + } + spin_unlock(&fp->lock); + return rc; +} + +/* returns true is someone tried to get the FP while napi had it */ +static inline bool bnx2x_fp_unlock_napi(struct bnx2x_fastpath *fp) +{ + bool rc = false; + + spin_lock(&fp->lock); + WARN_ON(fp->state & + (BNX2X_FP_STATE_POLL | BNX2X_FP_STATE_NAPI_YIELD)); + + if (fp->state & BNX2X_FP_STATE_POLL_YIELD) + rc = true; + fp->state = BNX2X_FP_STATE_IDLE; + spin_unlock(&fp->lock); + return rc; +} + +/* called from bnx2x_low_latency_poll() */ +static inline bool bnx2x_fp_lock_poll(struct bnx2x_fastpath *fp) +{ + bool rc = true; + + spin_lock_bh(&fp->lock); + if ((fp->state & BNX2X_FP_LOCKED)) { + fp->state |= BNX2X_FP_STATE_POLL_YIELD; + rc = false; + } else { + /* preserve yield marks */ + fp->state |= BNX2X_FP_STATE_POLL; + } + spin_unlock_bh(&fp->lock); + return rc; +} + +/* returns true if someone tried to get the FP while it was locked */ +static inline bool bnx2x_fp_unlock_poll(struct bnx2x_fastpath *fp) +{ + bool rc = false; + + spin_lock_bh(&fp->lock); + WARN_ON(fp->state & BNX2X_FP_STATE_NAPI); + + if (fp->state & BNX2X_FP_STATE_POLL_YIELD) + rc = true; + fp->state = BNX2X_FP_STATE_IDLE; + spin_unlock_bh(&fp->lock); + return rc; +} + +/* true if a socket is polling, even if it did not get the lock */ +static inline bool bnx2x_fp_ll_polling(struct bnx2x_fastpath *fp) +{ + WARN_ON(!(fp->state & BNX2X_FP_LOCKED)); + return fp->state & BNX2X_FP_USER_PEND; +} +#else +static inline void bnx2x_fp_init_lock(struct bnx2x_fastpath *fp) +{ +} + +static inline bool bnx2x_fp_lock_napi(struct bnx2x_fastpath *fp) +{ + return true; +} + +static inline bool bnx2x_fp_unlock_napi(struct bnx2x_fastpath *fp) +{ + return false; +} + +static inline bool bnx2x_fp_lock_poll(struct bnx2x_fastpath *fp) +{ + return false; +} + +static inline bool bnx2x_fp_unlock_poll(struct bnx2x_fastpath *fp) +{ + return false; +} + +static inline bool bnx2x_fp_ll_polling(struct bnx2x_fastpath *fp) +{ + return false; +} +#endif /* CONFIG_NET_LL_RX_POLL */ + /* Use 2500 as a mini-jumbo MTU for FCoE */ #define BNX2X_FCOE_MINI_JUMBO_MTU 2500 @@ -580,12 +697,10 @@ struct bnx2x_fastpath { txdata_ptr[FIRST_TX_COS_INDEX] \ ->var) - #define IS_ETH_FP(fp) ((fp)->index < BNX2X_NUM_ETH_QUEUES((fp)->bp)) #define IS_FCOE_FP(fp) ((fp)->index == FCOE_IDX((fp)->bp)) #define IS_FCOE_IDX(idx) ((idx) == FCOE_IDX(bp)) - /* MC hsi */ #define MAX_FETCH_BD 13 /* HW max BDs per packet */ #define RX_COPY_THRESH 92 @@ -613,7 +728,7 @@ struct bnx2x_fastpath { * START_BD(splitted) - includes unpaged data segment for GSO * PARSING_BD - for TSO and CSUM data * PARSING_BD2 - for encapsulation data - * Frag BDs - decribes pages for frags + * Frag BDs - describes pages for frags */ #define BDS_PER_TX_PKT 4 #define MAX_BDS_PER_TX_PKT (MAX_SKB_FRAGS + BDS_PER_TX_PKT) @@ -693,12 +808,10 @@ struct bnx2x_fastpath { FW_DROP_LEVEL(bp)) #define RCQ_TH_HI(bp) (RCQ_TH_LO(bp) + DROPLESS_FC_HEADROOM) - /* This is needed for determining of last_max */ #define SUB_S16(a, b) (s16)((s16)(a) - (s16)(b)) #define SUB_S32(a, b) (s32)((s32)(a) - (s32)(b)) - #define BNX2X_SWCID_SHIFT 17 #define BNX2X_SWCID_MASK ((0x1 << BNX2X_SWCID_SHIFT) - 1) @@ -723,7 +836,6 @@ struct bnx2x_fastpath { DPM_TRIGER_TYPE); \ } while (0) - /* TX CSUM helpers */ #define SKB_CS_OFF(skb) (offsetof(struct tcphdr, check) - \ skb->csum_offset) @@ -766,7 +878,6 @@ struct bnx2x_fastpath { #define BNX2X_RX_SUM_FIX(cqe) \ BNX2X_PRS_FLAG_OVERETH_IPV4(cqe->fast_path_cqe.pars_flags.flags) - #define FP_USB_FUNC_OFF \ offsetof(struct cstorm_status_block_u, func) #define FP_CSB_FUNC_OFF \ @@ -900,14 +1011,14 @@ struct bnx2x_common { #define CHIP_IS_E3A0(bp) (CHIP_IS_E3(bp) && \ (CHIP_REV(bp) == CHIP_REV_Ax)) /* This define is used in two main places: - * 1. In the early stages of nic_load, to know if to configrue Parser / Searcher + * 1. In the early stages of nic_load, to know if to configure Parser / Searcher * to nic-only mode or to offload mode. Offload mode is configured if either the * chip is E1x (where MIC_MODE register is not applicable), or if cnic already * registered for this port (which means that the user wants storage services). * 2. During cnic-related load, to know if offload mode is already configured in - * the HW or needs to be configrued. + * the HW or needs to be configured. * Since the transition from nic-mode to offload-mode in HW causes traffic - * coruption, nic-mode is configured only in ports on which storage services + * corruption, nic-mode is configured only in ports on which storage services * where never requested. */ #define CONFIGURE_NIC_MODE(bp) (!CHIP_IS_E1x(bp) && !CNIC_ENABLED(bp)) @@ -1008,14 +1119,14 @@ extern struct workqueue_struct *bnx2x_wq; * If the maximum number of FP-SB available is X then: * a. If CNIC is supported it consumes 1 FP-SB thus the max number of * regular L2 queues is Y=X-1 - * b. in MF mode the actual number of L2 queues is Y= (X-1/MF_factor) + * b. In MF mode the actual number of L2 queues is Y= (X-1/MF_factor) * c. If the FCoE L2 queue is supported the actual number of L2 queues * is Y+1 * d. The number of irqs (MSIX vectors) is either Y+1 (one extra for * slow-path interrupts) or Y+2 if CNIC is supported (one additional * FP interrupt context for the CNIC). * e. The number of HW context (CID count) is always X or X+1 if FCoE - * L2 queue is supported. the cid for the FCoE L2 queue is always X. + * L2 queue is supported. The cid for the FCoE L2 queue is always X. */ /* fast-path interrupt contexts E1x */ @@ -1068,7 +1179,6 @@ struct bnx2x_slowpath { struct eth_classify_rules_ramrod_data e2; } mac_rdata; - union { struct tstorm_eth_mac_filter_config e1x; struct eth_filter_rules_ramrod_data e2; @@ -1119,7 +1229,6 @@ struct bnx2x_slowpath { #define bnx2x_sp_mapping(bp, var) \ (bp->slowpath_mapping + offsetof(struct bnx2x_slowpath, var)) - /* attn group wiring */ #define MAX_DYNAMIC_ATTN_GRPS 8 @@ -1221,11 +1330,11 @@ enum { BNX2X_SP_RTNL_AFEX_F_UPDATE, BNX2X_SP_RTNL_ENABLE_SRIOV, BNX2X_SP_RTNL_VFPF_MCAST, + BNX2X_SP_RTNL_VFPF_CHANNEL_DOWN, BNX2X_SP_RTNL_VFPF_STORM_RX_MODE, BNX2X_SP_RTNL_HYPERVISOR_VLAN, }; - struct bnx2x_prev_path_list { struct list_head list; u8 bus; @@ -1392,6 +1501,7 @@ struct bnx2x { #define USING_SINGLE_MSIX_FLAG (1 << 20) #define BC_SUPPORTS_DCBX_MSG_NON_PMF (1 << 21) #define IS_VF_FLAG (1 << 22) +#define INTERRUPTS_ENABLED_FLAG (1 << 23) #define BP_NOMCP(bp) ((bp)->flags & NO_MCP_FLAG) @@ -1585,7 +1695,7 @@ struct bnx2x { struct mutex cnic_mutex; struct bnx2x_vlan_mac_obj iscsi_l2_mac_obj; - /* Start index of the "special" (CNIC related) L2 cleints */ + /* Start index of the "special" (CNIC related) L2 clients */ u8 cnic_base_cl_id; int dmae_ready; @@ -1699,7 +1809,7 @@ struct bnx2x { /* operation indication for the sp_rtnl task */ unsigned long sp_rtnl_state; - /* DCBX Negotation results */ + /* DCBX Negotiation results */ struct dcbx_features dcbx_local_feat; u32 dcbx_error; @@ -1755,7 +1865,6 @@ extern int num_queues; #define FUNC_FLG_SPQ 0x0010 #define FUNC_FLG_LEADING 0x0020 /* PF only */ - struct bnx2x_func_init_params { /* dma */ dma_addr_t fw_stat_map; /* valid iff FUNC_FLG_STATS */ @@ -1853,9 +1962,6 @@ struct bnx2x_func_init_params { #define skip_queue(bp, idx) (NO_FCOE(bp) && IS_FCOE_IDX(idx)) - - - /** * bnx2x_set_mac_one - configure a single MAC address * @@ -1921,7 +2027,6 @@ u32 bnx2x_dmae_opcode(struct bnx2x *bp, u8 src_type, u8 dst_type, void bnx2x_prep_dmae_with_comp(struct bnx2x *bp, struct dmae_command *dmae, u8 src_type, u8 dst_type); int bnx2x_issue_dmae_with_comp(struct bnx2x *bp, struct dmae_command *dmae); -void bnx2x_dp_dmae(struct bnx2x *bp, struct dmae_command *dmae, int msglvl); /* FLR related routines */ u32 bnx2x_flr_clnup_poll_count(struct bnx2x *bp); @@ -1937,6 +2042,8 @@ int bnx2x_sp_post(struct bnx2x *bp, int command, int cid, void bnx2x_update_coalesce(struct bnx2x *bp); int bnx2x_get_cur_phy_idx(struct bnx2x *bp); +bool bnx2x_port_after_undi(struct bnx2x *bp); + static inline u32 reg_poll(struct bnx2x *bp, u32 reg, u32 expected, int ms, int wait) { @@ -1998,7 +2105,6 @@ void bnx2x_igu_clear_sb_gen(struct bnx2x *bp, u8 func, u8 idu_sb_id, #define UNLOAD_CLOSE 1 #define UNLOAD_RECOVERY 2 - /* DMAE command defines */ #define DMAE_TIMEOUT -1 #define DMAE_PCI_ERROR -2 /* E2 and onward */ @@ -2062,7 +2168,8 @@ void bnx2x_igu_clear_sb_gen(struct bnx2x *bp, u8 func, u8 idu_sb_id, #define DMAE_LEN32_WR_MAX(bp) (CHIP_IS_E1(bp) ? 0x400 : 0x2000) #define DMAE_COMP_VAL 0x60d0d0ae /* E2 and on - upper bit - indicates eror */ + * indicates error + */ #define MAX_DMAE_C_PER_PORT 8 #define INIT_DMAE_C(bp) (BP_PORT(bp) * MAX_DMAE_C_PER_PORT + \ @@ -2100,7 +2207,6 @@ void bnx2x_igu_clear_sb_gen(struct bnx2x *bp, u8 func, u8 idu_sb_id, #define SP_DESC_CNT (BCM_PAGE_SIZE / sizeof(struct eth_spe)) #define MAX_SP_DESC_CNT (SP_DESC_CNT - 1) - #define BNX2X_BTR 4 #define MAX_SPQ_PENDING 8 @@ -2137,6 +2243,8 @@ void bnx2x_igu_clear_sb_gen(struct bnx2x *bp, u8 func, u8 idu_sb_id, #define ATTN_HARD_WIRED_MASK 0xff00 #define ATTENTION_ID 4 +#define IS_MF_STORAGE_ONLY(bp) (IS_MF_STORAGE_SD(bp) || \ + IS_MF_FCOE_AFEX(bp)) /* stuff added to make the code fit 80Col */ @@ -2338,4 +2446,9 @@ enum { #define NUM_MACS 8 +enum bnx2x_pci_bus_speed { + BNX2X_PCI_LINK_SPEED_2500 = 2500, + BNX2X_PCI_LINK_SPEED_5000 = 5000, + BNX2X_PCI_LINK_SPEED_8000 = 8000 +}; #endif /* bnx2x.h */ diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c index 638e55435b04..ec3aa1d451e8 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c @@ -24,6 +24,7 @@ #include <net/tcp.h> #include <net/ipv6.h> #include <net/ip6_checksum.h> +#include <net/ll_poll.h> #include <linux/prefetch.h> #include "bnx2x_cmn.h" #include "bnx2x_init.h" @@ -124,7 +125,7 @@ static void bnx2x_shrink_eth_fp(struct bnx2x *bp, int delta) int i, cos, old_eth_num = BNX2X_NUM_ETH_QUEUES(bp); /* Queue pointer cannot be re-set on an fp-basis, as moving pointer - * backward along the array could cause memory to be overriden + * backward along the array could cause memory to be overridden */ for (cos = 1; cos < bp->max_cos; cos++) { for (i = 0; i < old_eth_num - delta; i++) { @@ -165,7 +166,6 @@ static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fp_txdata *txdata, dma_unmap_single(&bp->pdev->dev, BD_UNMAP_ADDR(tx_start_bd), BD_UNMAP_LEN(tx_start_bd), DMA_TO_DEVICE); - nbd = le16_to_cpu(tx_start_bd->nbd) - 1; #ifdef BNX2X_STOP_ON_ERROR if ((nbd - 1) > (MAX_SKB_FRAGS + 2)) { @@ -259,7 +259,7 @@ int bnx2x_tx_int(struct bnx2x *bp, struct bnx2x_fp_txdata *txdata) smp_mb(); if (unlikely(netif_tx_queue_stopped(txq))) { - /* Taking tx_lock() is needed to prevent reenabling the queue + /* Taking tx_lock() is needed to prevent re-enabling the queue * while it's empty. This could have happen if rx_action() gets * suspended in bnx2x_tx_int() after the condition before * netif_tx_wake_queue(), while tx_action (bnx2x_start_xmit()): @@ -572,7 +572,7 @@ static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp, return err; } - /* Unmap the page as we r going to pass it to the stack */ + /* Unmap the page as we're going to pass it to the stack */ dma_unmap_page(&bp->pdev->dev, dma_unmap_addr(&old_rx_pg, mapping), SGE_PAGES, DMA_FROM_DEVICE); @@ -733,7 +733,6 @@ static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp, dev_kfree_skb_any(skb); } - /* put new data in bin */ rx_buf->data = new_data; @@ -805,40 +804,32 @@ int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget) { struct bnx2x *bp = fp->bp; u16 bd_cons, bd_prod, bd_prod_fw, comp_ring_cons; - u16 hw_comp_cons, sw_comp_cons, sw_comp_prod; + u16 sw_comp_cons, sw_comp_prod; int rx_pkt = 0; + union eth_rx_cqe *cqe; + struct eth_fast_path_rx_cqe *cqe_fp; #ifdef BNX2X_STOP_ON_ERROR if (unlikely(bp->panic)) return 0; #endif - /* CQ "next element" is of the size of the regular element, - that's why it's ok here */ - hw_comp_cons = le16_to_cpu(*fp->rx_cons_sb); - if ((hw_comp_cons & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT) - hw_comp_cons++; - bd_cons = fp->rx_bd_cons; bd_prod = fp->rx_bd_prod; bd_prod_fw = bd_prod; sw_comp_cons = fp->rx_comp_cons; sw_comp_prod = fp->rx_comp_prod; - /* Memory barrier necessary as speculative reads of the rx - * buffer can be ahead of the index in the status block - */ - rmb(); + comp_ring_cons = RCQ_BD(sw_comp_cons); + cqe = &fp->rx_comp_ring[comp_ring_cons]; + cqe_fp = &cqe->fast_path_cqe; DP(NETIF_MSG_RX_STATUS, - "queue[%d]: hw_comp_cons %u sw_comp_cons %u\n", - fp->index, hw_comp_cons, sw_comp_cons); + "queue[%d]: sw_comp_cons %u\n", fp->index, sw_comp_cons); - while (sw_comp_cons != hw_comp_cons) { + while (BNX2X_IS_CQE_COMPLETED(cqe_fp)) { struct sw_rx_bd *rx_buf = NULL; struct sk_buff *skb; - union eth_rx_cqe *cqe; - struct eth_fast_path_rx_cqe *cqe_fp; u8 cqe_fp_flags; enum eth_rx_cqe_type cqe_fp_type; u16 len, pad, queue; @@ -850,12 +841,9 @@ int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget) return 0; #endif - comp_ring_cons = RCQ_BD(sw_comp_cons); bd_prod = RX_BD(bd_prod); bd_cons = RX_BD(bd_cons); - cqe = &fp->rx_comp_ring[comp_ring_cons]; - cqe_fp = &cqe->fast_path_cqe; cqe_fp_flags = cqe_fp->type_error_flags; cqe_fp_type = cqe_fp_flags & ETH_FAST_PATH_RX_CQE_TYPE; @@ -899,7 +887,6 @@ int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget) cqe_fp); goto next_rx; - } queue = cqe->end_agg_cqe.queue_index; tpa_info = &fp->tpa_info[queue]; @@ -1002,9 +989,13 @@ reuse_rx: PARSING_FLAGS_VLAN) __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), le16_to_cpu(cqe_fp->vlan_tag)); - napi_gro_receive(&fp->napi, skb); + skb_mark_ll(skb, &fp->napi); + if (bnx2x_fp_ll_polling(fp)) + netif_receive_skb(skb); + else + napi_gro_receive(&fp->napi, skb); next_rx: rx_buf->data = NULL; @@ -1016,8 +1007,15 @@ next_cqe: sw_comp_prod = NEXT_RCQ_IDX(sw_comp_prod); sw_comp_cons = NEXT_RCQ_IDX(sw_comp_cons); + /* mark CQE as free */ + BNX2X_SEED_CQE(cqe_fp); + if (rx_pkt == budget) break; + + comp_ring_cons = RCQ_BD(sw_comp_cons); + cqe = &fp->rx_comp_ring[comp_ring_cons]; + cqe_fp = &cqe->fast_path_cqe; } /* while */ fp->rx_bd_cons = bd_cons; @@ -1053,8 +1051,6 @@ static irqreturn_t bnx2x_msix_fp_int(int irq, void *fp_cookie) #endif /* Handle Rx and Tx according to MSI-X vector */ - prefetch(fp->rx_cons_sb); - for_each_cos_in_tx_queue(fp, cos) prefetch(fp->txdata_ptr[cos]->tx_cons_sb); @@ -1118,7 +1114,7 @@ static void bnx2x_fill_report_data(struct bnx2x *bp, memset(data, 0, sizeof(*data)); - /* Fill the report data: efective line speed */ + /* Fill the report data: effective line speed */ data->line_speed = line_speed; /* Link is down */ @@ -1161,7 +1157,7 @@ void bnx2x_link_report(struct bnx2x *bp) * * @bp: driver handle * - * None atomic inmlementation. + * None atomic implementation. * Should be called under the phy_lock. */ void __bnx2x_link_report(struct bnx2x *bp) @@ -1304,7 +1300,7 @@ void bnx2x_init_rx_rings(struct bnx2x *bp) "mtu %d rx_buf_size %d\n", bp->dev->mtu, fp->rx_buf_size); if (!fp->disable_tpa) { - /* Fill the per-aggregtion pool */ + /* Fill the per-aggregation pool */ for (i = 0; i < MAX_AGG_QS(bp); i++) { struct bnx2x_agg_info *tpa_info = &fp->tpa_info[i]; @@ -1726,7 +1722,7 @@ static int bnx2x_req_irq(struct bnx2x *bp) return request_irq(irq, bnx2x_interrupt, flags, bp->dev->name, bp->dev); } -int bnx2x_setup_irqs(struct bnx2x *bp) +static int bnx2x_setup_irqs(struct bnx2x *bp) { int rc = 0; if (bp->flags & USING_MSIX_FLAG && @@ -1759,32 +1755,46 @@ static void bnx2x_napi_enable_cnic(struct bnx2x *bp) { int i; - for_each_rx_queue_cnic(bp, i) + for_each_rx_queue_cnic(bp, i) { + bnx2x_fp_init_lock(&bp->fp[i]); napi_enable(&bnx2x_fp(bp, i, napi)); + } } static void bnx2x_napi_enable(struct bnx2x *bp) { int i; - for_each_eth_queue(bp, i) + for_each_eth_queue(bp, i) { + bnx2x_fp_init_lock(&bp->fp[i]); napi_enable(&bnx2x_fp(bp, i, napi)); + } } static void bnx2x_napi_disable_cnic(struct bnx2x *bp) { int i; - for_each_rx_queue_cnic(bp, i) + local_bh_disable(); + for_each_rx_queue_cnic(bp, i) { napi_disable(&bnx2x_fp(bp, i, napi)); + while (!bnx2x_fp_lock_napi(&bp->fp[i])) + mdelay(1); + } + local_bh_enable(); } static void bnx2x_napi_disable(struct bnx2x *bp) { int i; - for_each_eth_queue(bp, i) + local_bh_disable(); + for_each_eth_queue(bp, i) { napi_disable(&bnx2x_fp(bp, i, napi)); + while (!bnx2x_fp_lock_napi(&bp->fp[i])) + mdelay(1); + } + local_bh_enable(); } void bnx2x_netif_start(struct bnx2x *bp) @@ -1829,7 +1839,7 @@ u16 bnx2x_select_queue(struct net_device *dev, struct sk_buff *skb) } /* select a non-FCoE queue */ - return __skb_tx_hash(dev, skb, BNX2X_NUM_ETH_QUEUES(bp)); + return __netdev_pick_tx(dev, skb) % BNX2X_NUM_ETH_QUEUES(bp); } void bnx2x_set_num_queues(struct bnx2x *bp) @@ -1862,7 +1872,7 @@ void bnx2x_set_num_queues(struct bnx2x *bp) * * If the actual number of Tx queues (for each CoS) is less than 16 then there * will be the holes at the end of each group of 16 ETh L2 indices (0..15, - * 16..31,...) with indicies that are not coupled with any real Tx queue. + * 16..31,...) with indices that are not coupled with any real Tx queue. * * The proper configuration of skb->queue_mapping is handled by * bnx2x_select_queue() and __skb_tx_hash(). @@ -1924,7 +1934,7 @@ static void bnx2x_set_rx_buf_size(struct bnx2x *bp) ETH_OVREHEAD + mtu + BNX2X_FW_RX_ALIGN_END; - /* Note : rx_buf_size doesnt take into account NET_SKB_PAD */ + /* Note : rx_buf_size doesn't take into account NET_SKB_PAD */ if (fp->rx_buf_size + NET_SKB_PAD <= PAGE_SIZE) fp->rx_frag_size = fp->rx_buf_size + NET_SKB_PAD; else @@ -1937,7 +1947,7 @@ static int bnx2x_init_rss_pf(struct bnx2x *bp) int i; u8 num_eth_queues = BNX2X_NUM_ETH_QUEUES(bp); - /* Prepare the initial contents fo the indirection table if RSS is + /* Prepare the initial contents for the indirection table if RSS is * enabled */ for (i = 0; i < sizeof(bp->rss_conf_obj.ind_table); i++) @@ -2015,7 +2025,7 @@ static int bnx2x_init_hw(struct bnx2x *bp, u32 load_code) /* * Cleans the object that have internal lists without sending - * ramrods. Should be run when interrutps are disabled. + * ramrods. Should be run when interrupts are disabled. */ void bnx2x_squeeze_objects(struct bnx2x *bp) { @@ -2166,10 +2176,10 @@ static int bnx2x_alloc_fw_stats_mem(struct bnx2x *bp) bp->fw_stats_data_mapping = bp->fw_stats_mapping + bp->fw_stats_req_sz; - DP(BNX2X_MSG_SP, "statistics request base address set to %x %x", + DP(BNX2X_MSG_SP, "statistics request base address set to %x %x\n", U64_HI(bp->fw_stats_req_mapping), U64_LO(bp->fw_stats_req_mapping)); - DP(BNX2X_MSG_SP, "statistics data base address set to %x %x", + DP(BNX2X_MSG_SP, "statistics data base address set to %x %x\n", U64_HI(bp->fw_stats_data_mapping), U64_LO(bp->fw_stats_data_mapping)); return 0; @@ -2183,6 +2193,8 @@ alloc_mem_err: /* send load request to mcp and analyze response */ static int bnx2x_nic_load_request(struct bnx2x *bp, u32 *load_code) { + u32 param; + /* init fw_seq */ bp->fw_seq = (SHMEM_RD(bp, func_mb[BP_FW_MB_IDX(bp)].drv_mb_header) & @@ -2195,9 +2207,13 @@ static int bnx2x_nic_load_request(struct bnx2x *bp, u32 *load_code) DRV_PULSE_SEQ_MASK); BNX2X_DEV_INFO("drv_pulse 0x%x\n", bp->fw_drv_pulse_wr_seq); + param = DRV_MSG_CODE_LOAD_REQ_WITH_LFA; + + if (IS_MF_SD(bp) && bnx2x_port_after_undi(bp)) + param |= DRV_MSG_CODE_LOAD_REQ_FORCE_LFA; + /* load request */ - (*load_code) = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_REQ, - DRV_MSG_CODE_LOAD_REQ_WITH_LFA); + (*load_code) = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_REQ, param); /* if mcp fails to respond we must abort */ if (!(*load_code)) { @@ -2238,7 +2254,7 @@ int bnx2x_nic_load_analyze_req(struct bnx2x *bp, u32 load_code) /* abort nic load if version mismatch */ if (my_fw != loaded_fw) { - BNX2X_ERR("bnx2x with FW %x was already loaded which mismatches my %x FW. aborting\n", + BNX2X_ERR("bnx2x with FW %x was already loaded which mismatches my %x FW. Aborting\n", loaded_fw, my_fw); return -EBUSY; } @@ -2316,10 +2332,10 @@ static void bnx2x_nic_load_afex_dcc(struct bnx2x *bp, int load_code) static void bnx2x_bz_fp(struct bnx2x *bp, int index) { struct bnx2x_fastpath *fp = &bp->fp[index]; - int cos; struct napi_struct orig_napi = fp->napi; struct bnx2x_agg_info *orig_tpa_info = fp->tpa_info; + /* bzero bnx2x_fastpath contents */ if (fp->tpa_info) memset(fp->tpa_info, 0, ETH_MAX_AGGREGATION_QUEUES_E1H_E2 * @@ -2345,8 +2361,7 @@ static void bnx2x_bz_fp(struct bnx2x *bp, int index) fp->txdata_ptr[cos] = &bp->bnx2x_txq[cos * BNX2X_NUM_ETH_QUEUES(bp) + index]; - /* - * set the tpa flag for each queue. The tpa flag determines the queue + /* set the tpa flag for each queue. The tpa flag determines the queue * minimal size so it must be set prior to queue memory allocation */ fp->disable_tpa = !(bp->flags & TPA_ENABLE_FLAG || @@ -2429,7 +2444,6 @@ int bnx2x_load_cnic(struct bnx2x *bp) if (bp->state == BNX2X_STATE_OPEN) bnx2x_cnic_notify(bp, CNIC_CTL_START_CMD); - DP(NETIF_MSG_IFUP, "Ending successfully CNIC-related load\n"); return 0; @@ -2472,6 +2486,7 @@ int bnx2x_nic_load(struct bnx2x *bp, int load_mode) bp->state = BNX2X_STATE_OPENING_WAIT4_LOAD; + /* zero the structure w/o any lock, before SP handler is initialized */ memset(&bp->last_reported_link, 0, sizeof(bp->last_reported_link)); __set_bit(BNX2X_LINK_REPORT_LINK_DOWN, &bp->last_reported_link.link_report_flags); @@ -2536,8 +2551,8 @@ int bnx2x_nic_load(struct bnx2x *bp, int load_mode) } /* configure multi cos mappings in kernel. - * this configuration may be overriden by a multi class queue discipline - * or by a dcbx negotiation result. + * this configuration may be overridden by a multi class queue + * discipline or by a dcbx negotiation result. */ bnx2x_setup_tc(bp->dev, bp->max_cos); @@ -2696,7 +2711,7 @@ int bnx2x_nic_load(struct bnx2x *bp, int load_mode) /* Start the Tx */ switch (load_mode) { case LOAD_NORMAL: - /* Tx queue should be only reenabled */ + /* Tx queue should be only re-enabled */ netif_tx_wake_all_queues(bp->dev); break; @@ -2841,7 +2856,7 @@ int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode, bool keep_link) } /* Nothing to do during unload if previous bnx2x_nic_load() - * have not completed succesfully - all resourses are released. + * have not completed successfully - all resources are released. * * we can get here only after unsuccessful ndo_* callback, during which * dev->IFF_UP flag is still on. @@ -2856,6 +2871,9 @@ int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode, bool keep_link) bp->state = BNX2X_STATE_CLOSING_WAIT4_HALT; smp_mb(); + /* indicate to VFs that the PF is going down */ + bnx2x_iov_channel_down(bp); + if (CNIC_LOADED(bp)) bnx2x_cnic_notify(bp, CNIC_CTL_STOP_CMD); @@ -2890,10 +2908,9 @@ int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode, bool keep_link) /* Send the UNLOAD_REQUEST to the MCP */ bnx2x_send_unload_req(bp, unload_mode); - /* - * Prevent transactions to host from the functions on the + /* Prevent transactions to host from the functions on the * engine that doesn't reset global blocks in case of global - * attention once gloabl blocks are reset and gates are opened + * attention once global blocks are reset and gates are opened * (the engine which leader will perform the recovery * last). */ @@ -2914,7 +2931,7 @@ int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode, bool keep_link) } /* - * At this stage no more interrupts will arrive so we may safly clean + * At this stage no more interrupts will arrive so we may safely clean * the queueable objects here in case they failed to get cleaned so far. */ if (IS_PF(bp)) @@ -2955,7 +2972,6 @@ int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode, bool keep_link) bnx2x_set_reset_global(bp); } - /* The last driver must disable a "close the gate" if there is no * parity attention or "process kill" pending. */ @@ -3040,6 +3056,8 @@ int bnx2x_poll(struct napi_struct *napi, int budget) return 0; } #endif + if (!bnx2x_fp_lock_napi(fp)) + return work_done; for_each_cos_in_tx_queue(fp, cos) if (bnx2x_tx_queue_has_work(fp->txdata_ptr[cos])) @@ -3049,12 +3067,15 @@ int bnx2x_poll(struct napi_struct *napi, int budget) work_done += bnx2x_rx_int(fp, budget - work_done); /* must not complete if we consumed full budget */ - if (work_done >= budget) + if (work_done >= budget) { + bnx2x_fp_unlock_napi(fp); break; + } } /* Fall out from the NAPI loop if needed */ - if (!(bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp))) { + if (!bnx2x_fp_unlock_napi(fp) && + !(bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp))) { /* No need to update SB for FCoE L2 ring as long as * it's connected to the default SB and the SB @@ -3096,6 +3117,32 @@ int bnx2x_poll(struct napi_struct *napi, int budget) return work_done; } +#ifdef CONFIG_NET_LL_RX_POLL +/* must be called with local_bh_disable()d */ +int bnx2x_low_latency_recv(struct napi_struct *napi) +{ + struct bnx2x_fastpath *fp = container_of(napi, struct bnx2x_fastpath, + napi); + struct bnx2x *bp = fp->bp; + int found = 0; + + if ((bp->state == BNX2X_STATE_CLOSED) || + (bp->state == BNX2X_STATE_ERROR) || + (bp->flags & (TPA_ENABLE_FLAG | GRO_ENABLE_FLAG))) + return LL_FLUSH_FAILED; + + if (!bnx2x_fp_lock_poll(fp)) + return LL_FLUSH_BUSY; + + if (bnx2x_has_rx_work(fp)) + found = bnx2x_rx_int(fp, 4); + + bnx2x_fp_unlock_poll(fp); + + return found; +} +#endif + /* we split the first BD into headers and data BDs * to ease the pain of our fellow microcode engineers * we use one mapping for both BDs @@ -3496,9 +3543,12 @@ static void bnx2x_update_pbds_gso_enc(struct sk_buff *skb, /* outer IP header info */ if (xmit_type & XMIT_CSUM_V4) { struct iphdr *iph = ip_hdr(skb); + u16 csum = (__force u16)(~iph->check) - + (__force u16)iph->tot_len - + (__force u16)iph->frag_off; + pbd2->fw_ip_csum_wo_len_flags_frag = - bswab16(csum_fold((~iph->check) - - iph->tot_len - iph->frag_off)); + bswab16(csum_fold((__force __wsum)csum)); } else { pbd2->fw_ip_hdr_to_payload_w = hlen_w - ((sizeof(struct ipv6hdr)) >> 1); @@ -3586,7 +3636,7 @@ netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev) DP(NETIF_MSG_TX_QUEUED, "indices: txq %d, fp %d, txdata %d\n", txq_index, fp_index, txdata_index); */ - /* enable this debug print to view the tranmission details + /* enable this debug print to view the transmission details DP(NETIF_MSG_TX_QUEUED, "transmitting packet cid %d fp index %d txdata_index %d tx_data ptr %p fp pointer %p\n", txdata->cid, fp_index, txdata_index, txdata, fp); */ @@ -3968,7 +4018,7 @@ int bnx2x_setup_tc(struct net_device *dev, u8 num_tc) /* setup tc must be called under rtnl lock */ ASSERT_RTNL(); - /* no traffic classes requested. aborting */ + /* no traffic classes requested. Aborting */ if (!num_tc) { netdev_reset_tc(dev); return 0; @@ -3976,7 +4026,7 @@ int bnx2x_setup_tc(struct net_device *dev, u8 num_tc) /* requested to support too many traffic classes */ if (num_tc > bp->max_cos) { - BNX2X_ERR("support for too many traffic classes requested: %d. max supported is %d\n", + BNX2X_ERR("support for too many traffic classes requested: %d. Max supported is %d\n", num_tc, bp->max_cos); return -EINVAL; } @@ -3995,8 +4045,7 @@ int bnx2x_setup_tc(struct net_device *dev, u8 num_tc) prio, bp->prio_to_cos[prio]); } - - /* Use this configuration to diffrentiate tc0 from other COSes + /* Use this configuration to differentiate tc0 from other COSes This can be used for ets or pfc, and save the effort of setting up a multio class queue disc or negotiating DCBX with a switch netdev_set_prio_tc_map(dev, 0, 0); @@ -4288,10 +4337,11 @@ static int bnx2x_alloc_fp_mem_at(struct bnx2x *bp, int index) &bnx2x_fp(bp, index, rx_desc_mapping), sizeof(struct eth_rx_bd) * NUM_RX_BD); - BNX2X_PCI_ALLOC(bnx2x_fp(bp, index, rx_comp_ring), - &bnx2x_fp(bp, index, rx_comp_mapping), - sizeof(struct eth_fast_path_rx_cqe) * - NUM_RCQ_BD); + /* Seed all CQEs by 1s */ + BNX2X_PCI_FALLOC(bnx2x_fp(bp, index, rx_comp_ring), + &bnx2x_fp(bp, index, rx_comp_mapping), + sizeof(struct eth_fast_path_rx_cqe) * + NUM_RCQ_BD); /* SGE ring */ BNX2X_ALLOC(bnx2x_fp(bp, index, rx_page_ring), @@ -4472,7 +4522,6 @@ int bnx2x_alloc_mem_bp(struct bnx2x *bp) alloc_err: bnx2x_free_mem_bp(bp); return -ENOMEM; - } int bnx2x_reload_if_running(struct net_device *dev) @@ -4514,7 +4563,6 @@ int bnx2x_get_cur_phy_idx(struct bnx2x *bp) } return sel_phy_idx; - } int bnx2x_get_link_cfg_idx(struct bnx2x *bp) { @@ -4602,6 +4650,7 @@ int bnx2x_set_features(struct net_device *dev, netdev_features_t features) { struct bnx2x *bp = netdev_priv(dev); u32 flags = bp->flags; + u32 changes; bool bnx2x_reload = false; if (features & NETIF_F_LRO) @@ -4626,10 +4675,16 @@ int bnx2x_set_features(struct net_device *dev, netdev_features_t features) } } - if (flags ^ bp->flags) { - bp->flags = flags; + changes = flags ^ bp->flags; + + /* if GRO is changed while LRO is enabled, don't force a reload */ + if ((changes & GRO_ENABLE_FLAG) && (flags & TPA_ENABLE_FLAG)) + changes &= ~GRO_ENABLE_FLAG; + + if (changes) bnx2x_reload = true; - } + + bp->flags = flags; if (bnx2x_reload) { if (bp->recovery_state == BNX2X_RECOVERY_DONE) @@ -4724,7 +4779,6 @@ int bnx2x_resume(struct pci_dev *pdev) return rc; } - void bnx2x_set_ctx_validation(struct bnx2x *bp, struct eth_context *cxt, u32 cid) { @@ -4742,7 +4796,6 @@ static void storm_memset_hc_timeout(struct bnx2x *bp, u8 port, u8 fw_sb_id, u8 sb_index, u8 ticks) { - u32 addr = BAR_CSTRORM_INTMEM + CSTORM_STATUS_BLOCK_DATA_TIMEOUT_OFFSET(fw_sb_id, sb_index); REG_WR8(bp, addr, ticks); diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h index 151675d66b0d..c07a6d054cfe 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h @@ -22,7 +22,6 @@ #include <linux/netdevice.h> #include <linux/etherdevice.h> - #include "bnx2x.h" #include "bnx2x_sriov.h" @@ -50,13 +49,25 @@ extern int int_mode; } \ } while (0) -#define BNX2X_PCI_ALLOC(x, y, size) \ -do { \ - x = dma_alloc_coherent(&bp->pdev->dev, size, y, \ - GFP_KERNEL | __GFP_ZERO); \ - if (x == NULL) \ - goto alloc_mem_err; \ -} while (0) +#define BNX2X_PCI_ALLOC(x, y, size) \ + do { \ + x = dma_alloc_coherent(&bp->pdev->dev, size, y, \ + GFP_KERNEL | __GFP_ZERO); \ + if (x == NULL) \ + goto alloc_mem_err; \ + DP(NETIF_MSG_HW, "BNX2X_PCI_ALLOC: Physical %Lx Virtual %p\n", \ + (unsigned long long)(*y), x); \ + } while (0) + +#define BNX2X_PCI_FALLOC(x, y, size) \ + do { \ + x = dma_alloc_coherent(&bp->pdev->dev, size, y, GFP_KERNEL); \ + if (x == NULL) \ + goto alloc_mem_err; \ + memset((void *)x, 0xFFFFFFFF, size); \ + DP(NETIF_MSG_HW, "BNX2X_PCI_FALLOC: Physical %Lx Virtual %p\n",\ + (unsigned long long)(*y), x); \ + } while (0) #define BNX2X_ALLOC(x, size) \ do { \ @@ -494,9 +505,6 @@ void bnx2x_update_max_mf_config(struct bnx2x *bp, u32 value); /* Error handling */ void bnx2x_fw_dump_lvl(struct bnx2x *bp, const char *lvl); -/* validate currect fw is loaded */ -bool bnx2x_test_firmware_version(struct bnx2x *bp, bool is_err); - /* dev_close main block */ int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode, bool keep_link); @@ -607,6 +615,13 @@ int bnx2x_enable_msi(struct bnx2x *bp); int bnx2x_poll(struct napi_struct *napi, int budget); /** + * bnx2x_low_latency_recv - LL callback + * + * @napi: napi structure + */ +int bnx2x_low_latency_recv(struct napi_struct *napi); + +/** * bnx2x_alloc_mem_bp - allocate memories outsize main driver structure * * @bp: driver handle @@ -800,16 +815,18 @@ static inline bool bnx2x_has_tx_work(struct bnx2x_fastpath *fp) return false; } +#define BNX2X_IS_CQE_COMPLETED(cqe_fp) (cqe_fp->marker == 0x0) +#define BNX2X_SEED_CQE(cqe_fp) (cqe_fp->marker = 0xFFFFFFFF) static inline int bnx2x_has_rx_work(struct bnx2x_fastpath *fp) { - u16 rx_cons_sb; + u16 cons; + union eth_rx_cqe *cqe; + struct eth_fast_path_rx_cqe *cqe_fp; - /* Tell compiler that status block fields can change */ - barrier(); - rx_cons_sb = le16_to_cpu(*fp->rx_cons_sb); - if ((rx_cons_sb & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT) - rx_cons_sb++; - return (fp->rx_comp_cons != rx_cons_sb); + cons = RCQ_BD(fp->rx_comp_cons); + cqe = &fp->rx_comp_ring[cons]; + cqe_fp = &cqe->fast_path_cqe; + return BNX2X_IS_CQE_COMPLETED(cqe_fp); } /** @@ -848,9 +865,11 @@ static inline void bnx2x_add_all_napi_cnic(struct bnx2x *bp) int i; /* Add NAPI objects */ - for_each_rx_queue_cnic(bp, i) + for_each_rx_queue_cnic(bp, i) { netif_napi_add(bp->dev, &bnx2x_fp(bp, i, napi), bnx2x_poll, NAPI_POLL_WEIGHT); + napi_hash_add(&bnx2x_fp(bp, i, napi)); + } } static inline void bnx2x_add_all_napi(struct bnx2x *bp) @@ -858,25 +877,31 @@ static inline void bnx2x_add_all_napi(struct bnx2x *bp) int i; /* Add NAPI objects */ - for_each_eth_queue(bp, i) + for_each_eth_queue(bp, i) { netif_napi_add(bp->dev, &bnx2x_fp(bp, i, napi), bnx2x_poll, NAPI_POLL_WEIGHT); + napi_hash_add(&bnx2x_fp(bp, i, napi)); + } } static inline void bnx2x_del_all_napi_cnic(struct bnx2x *bp) { int i; - for_each_rx_queue_cnic(bp, i) + for_each_rx_queue_cnic(bp, i) { + napi_hash_del(&bnx2x_fp(bp, i, napi)); netif_napi_del(&bnx2x_fp(bp, i, napi)); + } } static inline void bnx2x_del_all_napi(struct bnx2x *bp) { int i; - for_each_eth_queue(bp, i) + for_each_eth_queue(bp, i) { + napi_hash_del(&bnx2x_fp(bp, i, napi)); netif_napi_del(&bnx2x_fp(bp, i, napi)); + } } int bnx2x_set_int_mode(struct bnx2x *bp); @@ -1171,7 +1196,6 @@ static inline u8 bnx2x_cnic_eth_cl_id(struct bnx2x *bp, u8 cl_idx) static inline u8 bnx2x_cnic_fw_sb_id(struct bnx2x *bp) { - /* the 'first' id is allocated for the cnic */ return bp->base_fw_ndsb; } @@ -1181,7 +1205,6 @@ static inline u8 bnx2x_cnic_igu_sb_id(struct bnx2x *bp) return bp->igu_base_sb; } - static inline void bnx2x_init_fcoe_fp(struct bnx2x *bp) { struct bnx2x_fastpath *fp = bnx2x_fcoe_fp(bp); @@ -1334,8 +1357,8 @@ static inline bool bnx2x_mtu_allows_gro(int mtu) int fpp = SGE_PAGE_SIZE / (mtu - ETH_MAX_TPA_HEADER_SIZE); /* - * 1. number of frags should not grow above MAX_SKB_FRAGS - * 2. frag must fit the page + * 1. Number of frags should not grow above MAX_SKB_FRAGS + * 2. Frag must fit the page */ return mtu <= SGE_PAGE_SIZE && (U_ETH_SGL_SIZE * fpp) <= MAX_SKB_FRAGS; } diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c index 4b077a7f16af..0c94df47e0e8 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c @@ -253,7 +253,6 @@ static void bnx2x_dcbx_get_ets_feature(struct bnx2x *bp, memset(&pg_help_data, 0, sizeof(struct pg_help_data)); - if (GET_FLAGS(error, DCBX_LOCAL_ETS_ERROR)) DP(BNX2X_MSG_DCB, "DCBX_LOCAL_ETS_ERROR\n"); @@ -298,7 +297,6 @@ static void bnx2x_dcbx_get_ets_feature(struct bnx2x *bp, static void bnx2x_dcbx_get_pfc_feature(struct bnx2x *bp, struct dcbx_pfc_feature *pfc, u32 error) { - if (GET_FLAGS(error, DCBX_LOCAL_PFC_ERROR)) DP(BNX2X_MSG_DCB, "DCBX_LOCAL_PFC_ERROR\n"); @@ -367,7 +365,6 @@ static int bnx2x_dcbx_read_mib(struct bnx2x *bp, struct lldp_remote_mib *remote_mib ; struct lldp_local_mib *local_mib; - switch (read_mib_type) { case DCBX_READ_LOCAL_MIB: mib_size = sizeof(struct lldp_local_mib); @@ -629,7 +626,6 @@ static int bnx2x_dcbx_read_shmem_neg_results(struct bnx2x *bp) return 0; } - #ifdef BCM_DCBNL static inline u8 bnx2x_dcbx_dcbnl_app_up(struct dcbx_app_priority_entry *ent) @@ -691,7 +687,7 @@ static inline void bnx2x_dcbx_update_tc_mapping(struct bnx2x *bp) } /* setup tc must be called under rtnl lock, but we can't take it here - * as we are handling an attetntion on a work queue which must be + * as we are handling an attention on a work queue which must be * flushed at some rtnl-locked contexts (e.g. if down) */ if (!test_and_set_bit(BNX2X_SP_RTNL_SETUP_TC, &bp->sp_rtnl_state)) @@ -711,7 +707,7 @@ void bnx2x_dcbx_set_params(struct bnx2x *bp, u32 state) */ bnx2x_dcbnl_update_applist(bp, true); - /* Read rmeote mib if dcbx is in the FW */ + /* Read remote mib if dcbx is in the FW */ if (bnx2x_dcbx_read_shmem_remote_mib(bp)) return; #endif @@ -742,7 +738,7 @@ void bnx2x_dcbx_set_params(struct bnx2x *bp, u32 state) bnx2x_dcbx_update_tc_mapping(bp); /* - * allow other funtions to update their netdevices + * allow other functions to update their netdevices * accordingly */ if (IS_MF(bp)) @@ -864,7 +860,7 @@ static void bnx2x_dcbx_admin_mib_updated_params(struct bnx2x *bp, i, DCBX_PRI_PG_GET(af->ets.pri_pg_tbl, i)); } - /*For IEEE admin_recommendation_bw_precentage + /*For IEEE admin_recommendation_bw_percentage *For IEEE admin_recommendation_ets_pg */ af->pfc.pri_en_bitmap = (u8)dp->admin_pfc_bitmap; for (i = 0; i < DCBX_CONFIG_MAX_APP_PROTOCOL; i++) { @@ -896,13 +892,11 @@ static void bnx2x_dcbx_admin_mib_updated_params(struct bnx2x *bp, } af->app.default_pri = (u8)dp->admin_default_priority; - } /* Write the data. */ bnx2x_write_data(bp, (u32 *)&admin_mib, offset, sizeof(struct lldp_admin_mib)); - } void bnx2x_dcbx_set_state(struct bnx2x *bp, bool dcb_on, u32 dcbx_enabled) @@ -1076,7 +1070,7 @@ static void bnx2x_dcbx_get_num_pg_traf_type(struct bnx2x *bp, bool pg_found = false; u32 i, traf_type, add_traf_type, add_pg; u32 *ttp = bp->dcbx_port_params.app.traffic_type_priority; - struct pg_entry_help_data *data = help_data->data; /*shotcut*/ + struct pg_entry_help_data *data = help_data->data; /*shortcut*/ /* Set to invalid */ for (i = 0; i < LLFC_DRIVER_TRAFFIC_TYPE_MAX; i++) @@ -1172,7 +1166,8 @@ static void bnx2x_dcbx_separate_pauseable_from_non(struct bnx2x *bp, DCBX_PG_BW_GET(ets->pg_bw_tbl, pg_entry)); else /* If we join a group and one is strict - * than the bw rulls */ + * than the bw rules + */ cos_data->data[entry].strict = BNX2X_DCBX_STRICT_COS_HIGHEST; } @@ -1181,7 +1176,6 @@ static void bnx2x_dcbx_separate_pauseable_from_non(struct bnx2x *bp, BNX2X_ERR("dcbx error: Both groups must have priorities\n"); } - #ifndef POWER_OF_2 #define POWER_OF_2(x) ((0 != x) && (0 == (x & (x-1)))) #endif @@ -1284,7 +1278,7 @@ static void bnx2x_dcbx_2cos_limit_cee_single_pg_to_cos_params(struct bnx2x *bp, } else { /* If there are only pauseable priorities or * only non-pauseable,* the lower priorities go - * to the first queue and the higherpriorities go + * to the first queue and the higher priorities go * to the second queue. */ cos_data->data[0].pausable = @@ -1484,7 +1478,7 @@ static void bnx2x_dcbx_2cos_limit_cee_three_pg_to_cos_params( * queue and one priority goes to the second queue. * * We will join this two cases: - * if one is BW limited it will go to the secoend queue + * if one is BW limited it will go to the second queue * otherwise the last priority will get it */ @@ -1504,7 +1498,8 @@ static void bnx2x_dcbx_2cos_limit_cee_three_pg_to_cos_params( false == b_found_strict) /* last entry will be handled separately * If no priority is strict than last - * enty goes to last queue.*/ + * entry goes to last queue. + */ entry = 1; cos_data->data[entry].pri_join_mask |= pri_tested; @@ -1516,7 +1511,8 @@ static void bnx2x_dcbx_2cos_limit_cee_three_pg_to_cos_params( b_found_strict = true; cos_data->data[1].pri_join_mask |= pri_tested; /* If we join a group and one is strict - * than the bw rulls */ + * than the bw rules + */ cos_data->data[1].strict = BNX2X_DCBX_STRICT_COS_HIGHEST; } @@ -1524,7 +1520,6 @@ static void bnx2x_dcbx_2cos_limit_cee_three_pg_to_cos_params( } } - static void bnx2x_dcbx_2cos_limit_cee_fill_cos_params(struct bnx2x *bp, struct pg_help_data *help_data, struct dcbx_ets_feature *ets, @@ -1533,7 +1528,6 @@ static void bnx2x_dcbx_2cos_limit_cee_fill_cos_params(struct bnx2x *bp, u32 pri_join_mask, u8 num_of_dif_pri) { - /* default E2 settings */ cos_data->num_of_cos = DCBX_COS_MAX_NUM_E2; @@ -1629,7 +1623,6 @@ static u8 bnx2x_dcbx_cee_fill_strict_pri(struct bnx2x *bp, u8 num_spread_of_entries, u8 strict_app_pris) { - if (bnx2x_dcbx_spread_strict_pri(bp, cos_data, entry, num_spread_of_entries, strict_app_pris)) { @@ -1848,7 +1841,7 @@ static void bnx2x_dcbx_fw_struct(struct bnx2x *bp, void bnx2x_dcbx_pmf_update(struct bnx2x *bp) { - /* if we need to syncronize DCBX result from prev PMF + /* if we need to synchronize DCBX result from prev PMF * read it from shmem and update bp and netdev accordingly */ if (SHMEM2_HAS(bp, drv_flags) && @@ -1876,7 +1869,6 @@ void bnx2x_dcbx_pmf_update(struct bnx2x *bp) * dcbx negotiation. */ bnx2x_dcbx_update_tc_mapping(bp); - } } @@ -1943,14 +1935,14 @@ static void bnx2x_dcbnl_set_pg_tccfg_tx(struct net_device *netdev, int prio, return; /** - * bw_pct ingnored - band-width percentage devision between user + * bw_pct ignored - band-width percentage devision between user * priorities within the same group is not * standard and hence not supported * - * prio_type igonred - priority levels within the same group are not + * prio_type ignored - priority levels within the same group are not * standard and hence are not supported. According * to the standard pgid 15 is dedicated to strict - * prioirty traffic (on the port level). + * priority traffic (on the port level). * * up_map ignored */ @@ -1995,14 +1987,14 @@ static void bnx2x_dcbnl_get_pg_tccfg_tx(struct net_device *netdev, int prio, DP(BNX2X_MSG_DCB, "prio = %d\n", prio); /** - * bw_pct ingnored - band-width percentage devision between user + * bw_pct ignored - band-width percentage devision between user * priorities within the same group is not * standard and hence not supported * - * prio_type igonred - priority levels within the same group are not + * prio_type ignored - priority levels within the same group are not * standard and hence are not supported. According * to the standard pgid 15 is dedicated to strict - * prioirty traffic (on the port level). + * priority traffic (on the port level). * * up_map ignored */ @@ -2389,7 +2381,7 @@ static u8 bnx2x_dcbnl_get_featcfg(struct net_device *netdev, int featid, *flags |= DCB_FEATCFG_ERROR; break; default: - BNX2X_ERR("Non valid featrue-ID\n"); + BNX2X_ERR("Non valid feature-ID\n"); rval = 1; break; } @@ -2430,7 +2422,7 @@ static u8 bnx2x_dcbnl_set_featcfg(struct net_device *netdev, int featid, flags & DCB_FEATCFG_WILLING ? 1 : 0; break; default: - BNX2X_ERR("Non valid featrue-ID\n"); + BNX2X_ERR("Non valid feature-ID\n"); rval = 1; break; } diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.h index d153f44cf8f9..125bd1b6586f 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.h +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.h @@ -134,8 +134,6 @@ enum { #define PFC_BRB1_REG_HIGH_LLFC_LOW_THRESHOLD 130 #define PFC_BRB1_REG_HIGH_LLFC_HIGH_THRESHOLD 170 - - struct cos_entry_help_data { u32 pri_join_mask; u32 cos_bw; @@ -170,7 +168,6 @@ struct cos_help_data { (!(IS_DCBX_PFC_PRI_ONLY_NON_PAUSE((bp), (pg_pri)) || \ IS_DCBX_PFC_PRI_ONLY_PAUSE((bp), (pg_pri)))) - struct pg_entry_help_data { u8 num_of_dif_pri; u8 pg; diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dump.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dump.h index bff5e33eaa14..12eb4baee9f6 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dump.h +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dump.h @@ -13,12 +13,6 @@ * consent. */ - -/* This struct holds a signature to ensure the dump returned from the driver - * match the meta data file inserted to grc_dump.tcl - * The signature is time stamp, diag version and grc_dump version - */ - #ifndef BNX2X_DUMP_H #define BNX2X_DUMP_H @@ -28,7 +22,6 @@ #define DRV_DUMP_USTORM_WAITP_ADDRESS 0x338a80 #define DRV_DUMP_CSTORM_WAITP_ADDRESS 0x238a80 - /* Possible Chips */ #define DUMP_CHIP_E1 1 #define DUMP_CHIP_E1H 2 diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c index ce1a91618677..c5f225101684 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c @@ -320,7 +320,7 @@ static int bnx2x_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) speed = ethtool_cmd_speed(cmd); - /* If recieved a request for an unknown duplex, assume full*/ + /* If received a request for an unknown duplex, assume full*/ if (cmd->duplex == DUPLEX_UNKNOWN) cmd->duplex = DUPLEX_FULL; @@ -733,7 +733,6 @@ static bool bnx2x_is_reg_in_chip(struct bnx2x *bp, return false; } - static bool bnx2x_is_wreg_in_chip(struct bnx2x *bp, const struct wreg_addr *wreg_info) { @@ -850,7 +849,7 @@ static int __bnx2x_get_preset_regs(struct bnx2x *bp, u32 *p, u32 preset) /* Paged registers are supported in E2 & E3 only */ if (CHIP_IS_E2(bp) || CHIP_IS_E3(bp)) { - /* Read "paged" registes */ + /* Read "paged" registers */ bnx2x_read_pages_regs(bp, p, preset); } @@ -960,6 +959,9 @@ static int bnx2x_set_dump(struct net_device *dev, struct ethtool_dump *val) struct bnx2x *bp = netdev_priv(dev); /* Use the ethtool_dump "flag" field as the dump preset index */ + if (val->flag < 1 || val->flag > DUMP_MAX_PRESETS) + return -EINVAL; + bp->dump_preset_idx = val->flag; return 0; } @@ -969,12 +971,12 @@ static int bnx2x_get_dump_flag(struct net_device *dev, { struct bnx2x *bp = netdev_priv(dev); + dump->version = BNX2X_DUMP_VERSION; + dump->flag = bp->dump_preset_idx; /* Calculate the requested preset idx length */ dump->len = bnx2x_get_preset_regs_len(dev, bp->dump_preset_idx); DP(BNX2X_MSG_ETHTOOL, "Get dump preset %d length=%d\n", bp->dump_preset_idx, dump->len); - - dump->flag = ETHTOOL_GET_DUMP_DATA; return 0; } @@ -986,8 +988,6 @@ static int bnx2x_get_dump_data(struct net_device *dev, struct bnx2x *bp = netdev_priv(dev); struct dump_header dump_hdr = {0}; - memset(p, 0, dump->len); - /* Disable parity attentions as long as following dump may * cause false alarms by reading never written registers. We * will re-enable parity attentions right after the dump. @@ -1155,8 +1155,8 @@ static int bnx2x_get_eeprom_len(struct net_device *dev) return bp->common.flash_size; } -/* Per pf misc lock must be aquired before the per port mcp lock. Otherwise, had - * we done things the other way around, if two pfs from the same port would +/* Per pf misc lock must be acquired before the per port mcp lock. Otherwise, + * had we done things the other way around, if two pfs from the same port would * attempt to access nvram at the same time, we could run into a scenario such * as: * pf A takes the port lock. @@ -1381,12 +1381,29 @@ static int bnx2x_nvram_read32(struct bnx2x *bp, u32 offset, u32 *buf, return rc; } +static bool bnx2x_is_nvm_accessible(struct bnx2x *bp) +{ + int rc = 1; + u16 pm = 0; + struct net_device *dev = pci_get_drvdata(bp->pdev); + + if (bp->pm_cap) + rc = pci_read_config_word(bp->pdev, + bp->pm_cap + PCI_PM_CTRL, &pm); + + if ((rc && !netif_running(dev)) || + (!rc && ((pm & PCI_PM_CTRL_STATE_MASK) != (__force u16)PCI_D0))) + return false; + + return true; +} + static int bnx2x_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *eebuf) { struct bnx2x *bp = netdev_priv(dev); - if (!netif_running(dev)) { + if (!bnx2x_is_nvm_accessible(bp)) { DP(BNX2X_MSG_ETHTOOL | BNX2X_MSG_NVM, "cannot access eeprom when the interface is down\n"); return -EAGAIN; @@ -1411,7 +1428,7 @@ static int bnx2x_get_module_eeprom(struct net_device *dev, u8 *user_data = data; unsigned int start_addr = ee->offset, xfer_size = 0; - if (!netif_running(dev)) { + if (!bnx2x_is_nvm_accessible(bp)) { DP(BNX2X_MSG_ETHTOOL | BNX2X_MSG_NVM, "cannot access eeprom when the interface is down\n"); return -EAGAIN; @@ -1474,7 +1491,7 @@ static int bnx2x_get_module_info(struct net_device *dev, int phy_idx, rc; u8 sff8472_comp, diag_type; - if (!netif_running(dev)) { + if (!bnx2x_is_nvm_accessible(bp)) { DP(BNX2X_MSG_ETHTOOL | BNX2X_MSG_NVM, "cannot access eeprom when the interface is down\n"); return -EAGAIN; @@ -1594,8 +1611,10 @@ static int bnx2x_nvram_write1(struct bnx2x *bp, u32 offset, u8 *data_buf, */ val = be32_to_cpu(val_be); - val &= ~le32_to_cpu(0xff << BYTE_OFFSET(offset)); - val |= le32_to_cpu(*data_buf << BYTE_OFFSET(offset)); + val &= ~le32_to_cpu((__force __le32) + (0xff << BYTE_OFFSET(offset))); + val |= le32_to_cpu((__force __le32) + (*data_buf << BYTE_OFFSET(offset))); rc = bnx2x_nvram_write_dword(bp, align_offset, val, cmd_flags); @@ -1676,7 +1695,8 @@ static int bnx2x_set_eeprom(struct net_device *dev, int port = BP_PORT(bp); int rc = 0; u32 ext_phy_config; - if (!netif_running(dev)) { + + if (!bnx2x_is_nvm_accessible(bp)) { DP(BNX2X_MSG_ETHTOOL | BNX2X_MSG_NVM, "cannot access eeprom when the interface is down\n"); return -EAGAIN; @@ -1921,6 +1941,19 @@ static const char bnx2x_tests_str_arr[BNX2X_NUM_TESTS_SF][ETH_GSTRING_LEN] = { "link_test (online) " }; +enum { + BNX2X_PRI_FLAG_ISCSI, + BNX2X_PRI_FLAG_FCOE, + BNX2X_PRI_FLAG_STORAGE, + BNX2X_PRI_FLAG_LEN, +}; + +static const char bnx2x_private_arr[BNX2X_PRI_FLAG_LEN][ETH_GSTRING_LEN] = { + "iSCSI offload support", + "FCoE offload support", + "Storage only interface" +}; + static u32 bnx2x_eee_to_adv(u32 eee_adv) { u32 modes = 0; @@ -2041,7 +2074,7 @@ static int bnx2x_set_eee(struct net_device *dev, struct ethtool_eee *edata) EEE_MODE_OVERRIDE_NVRAM | EEE_MODE_OUTPUT_TIME; - /* Restart link to propogate changes */ + /* Restart link to propagate changes */ if (netif_running(dev)) { bnx2x_stats_handle(bp, STATS_EVENT_STOP); bnx2x_force_link_reset(bp); @@ -2160,7 +2193,7 @@ static int bnx2x_test_registers(struct bnx2x *bp) { BNX2X_CHIP_MASK_ALL, 0xffffffff, 0, 0x00000000 } }; - if (!netif_running(bp->dev)) { + if (!bnx2x_is_nvm_accessible(bp)) { DP(BNX2X_MSG_ETHTOOL | BNX2X_MSG_NVM, "cannot access eeprom when the interface is down\n"); return rc; @@ -2264,7 +2297,7 @@ static int bnx2x_test_memory(struct bnx2x *bp) { NULL, 0xffffffff, {0, 0, 0, 0} } }; - if (!netif_running(bp->dev)) { + if (!bnx2x_is_nvm_accessible(bp)) { DP(BNX2X_MSG_ETHTOOL | BNX2X_MSG_NVM, "cannot access eeprom when the interface is down\n"); return rc; @@ -2978,32 +3011,47 @@ static int bnx2x_num_stat_queues(struct bnx2x *bp) static int bnx2x_get_sset_count(struct net_device *dev, int stringset) { struct bnx2x *bp = netdev_priv(dev); - int i, num_stats; + int i, num_strings = 0; switch (stringset) { case ETH_SS_STATS: if (is_multi(bp)) { - num_stats = bnx2x_num_stat_queues(bp) * - BNX2X_NUM_Q_STATS; + num_strings = bnx2x_num_stat_queues(bp) * + BNX2X_NUM_Q_STATS; } else - num_stats = 0; + num_strings = 0; if (IS_MF_MODE_STAT(bp)) { for (i = 0; i < BNX2X_NUM_STATS; i++) if (IS_FUNC_STAT(i)) - num_stats++; + num_strings++; } else - num_stats += BNX2X_NUM_STATS; + num_strings += BNX2X_NUM_STATS; - return num_stats; + return num_strings; case ETH_SS_TEST: return BNX2X_NUM_TESTS(bp); + case ETH_SS_PRIV_FLAGS: + return BNX2X_PRI_FLAG_LEN; + default: return -EINVAL; } } +static u32 bnx2x_get_private_flags(struct net_device *dev) +{ + struct bnx2x *bp = netdev_priv(dev); + u32 flags = 0; + + flags |= (!(bp->flags & NO_ISCSI_FLAG) ? 1 : 0) << BNX2X_PRI_FLAG_ISCSI; + flags |= (!(bp->flags & NO_FCOE_FLAG) ? 1 : 0) << BNX2X_PRI_FLAG_FCOE; + flags |= (!!IS_MF_STORAGE_ONLY(bp)) << BNX2X_PRI_FLAG_STORAGE; + + return flags; +} + static void bnx2x_get_strings(struct net_device *dev, u32 stringset, u8 *buf) { struct bnx2x *bp = netdev_priv(dev); @@ -3026,7 +3074,6 @@ static void bnx2x_get_strings(struct net_device *dev, u32 stringset, u8 *buf) } } - for (i = 0, j = 0; i < BNX2X_NUM_STATS; i++) { if (IS_MF_MODE_STAT(bp) && IS_PORT_STAT(i)) continue; @@ -3045,6 +3092,12 @@ static void bnx2x_get_strings(struct net_device *dev, u32 stringset, u8 *buf) start = 4; memcpy(buf, bnx2x_tests_str_arr + start, ETH_GSTRING_LEN * BNX2X_NUM_TESTS(bp)); + break; + + case ETH_SS_PRIV_FLAGS: + memcpy(buf, bnx2x_private_arr, + ETH_GSTRING_LEN * BNX2X_PRI_FLAG_LEN); + break; } } @@ -3106,17 +3159,12 @@ static int bnx2x_set_phys_id(struct net_device *dev, { struct bnx2x *bp = netdev_priv(dev); - if (!netif_running(dev)) { + if (!bnx2x_is_nvm_accessible(bp)) { DP(BNX2X_MSG_ETHTOOL | BNX2X_MSG_NVM, "cannot access eeprom when the interface is down\n"); return -EAGAIN; } - if (!bp->port.pmf) { - DP(BNX2X_MSG_ETHTOOL, "Interface is not pmf\n"); - return -EOPNOTSUPP; - } - switch (state) { case ETHTOOL_ID_ACTIVE: return 1; /* cycle on/off once per second */ @@ -3148,7 +3196,6 @@ static int bnx2x_set_phys_id(struct net_device *dev, static int bnx2x_get_rss_flags(struct bnx2x *bp, struct ethtool_rxnfc *info) { - switch (info->flow_type) { case TCP_V4_FLOW: case TCP_V6_FLOW: @@ -3384,7 +3431,6 @@ static int bnx2x_set_channels(struct net_device *dev, { struct bnx2x *bp = netdev_priv(dev); - DP(BNX2X_MSG_ETHTOOL, "set-channels command parameters: rx = %d, tx = %d, other = %d, combined = %d\n", channels->rx_count, channels->tx_count, channels->other_count, @@ -3445,6 +3491,7 @@ static const struct ethtool_ops bnx2x_ethtool_ops = { .set_pauseparam = bnx2x_set_pauseparam, .self_test = bnx2x_self_test, .get_sset_count = bnx2x_get_sset_count, + .get_priv_flags = bnx2x_get_private_flags, .get_strings = bnx2x_get_strings, .set_phys_id = bnx2x_set_phys_id, .get_ethtool_stats = bnx2x_get_ethtool_stats, diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h index 12f00a40cdf0..5018e52ae2ad 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h @@ -1323,6 +1323,8 @@ struct drv_func_mb { #define DRV_MSG_CODE_UNLOAD_SKIP_LINK_RESET 0x00000002 #define DRV_MSG_CODE_LOAD_REQ_WITH_LFA 0x0000100a + #define DRV_MSG_CODE_LOAD_REQ_FORCE_LFA 0x00002000 + u32 fw_mb_header; #define FW_MSG_CODE_MASK 0xffff0000 #define FW_MSG_CODE_DRV_LOAD_COMMON 0x10100000 @@ -3816,7 +3818,8 @@ struct eth_fast_path_rx_cqe { __le16 len_on_bd; struct parsing_flags pars_flags; union eth_sgl_or_raw_data sgl_or_raw_data; - __le32 reserved1[8]; + __le32 reserved1[7]; + u32 marker; }; diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c index b4c9dea93a53..15a528bda87c 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c @@ -93,7 +93,6 @@ MODULE_FIRMWARE(FW_FILE_NAME_E1); MODULE_FIRMWARE(FW_FILE_NAME_E1H); MODULE_FIRMWARE(FW_FILE_NAME_E2); - int num_queues; module_param(num_queues, int, 0); MODULE_PARM_DESC(num_queues, @@ -103,8 +102,6 @@ static int disable_tpa; module_param(disable_tpa, int, 0); MODULE_PARM_DESC(disable_tpa, " Disable the TPA (LRO) feature"); -#define INT_MODE_INTx 1 -#define INT_MODE_MSI 2 int int_mode; module_param(int_mode, int, 0); MODULE_PARM_DESC(int_mode, " Force interrupt mode other than MSI-X " @@ -122,8 +119,6 @@ static int debug; module_param(debug, int, 0); MODULE_PARM_DESC(debug, " Default debug msglevel"); - - struct workqueue_struct *bnx2x_wq; struct bnx2x_mac_vals { @@ -376,9 +371,11 @@ static u32 bnx2x_reg_rd_ind(struct bnx2x *bp, u32 addr) #define DMAE_DP_DST_PCI "pci dst_addr [%x:%08x]" #define DMAE_DP_DST_NONE "dst_addr [none]" -void bnx2x_dp_dmae(struct bnx2x *bp, struct dmae_command *dmae, int msglvl) +static void bnx2x_dp_dmae(struct bnx2x *bp, + struct dmae_command *dmae, int msglvl) { u32 src_type = dmae->opcode & DMAE_COMMAND_SRC; + int i; switch (dmae->opcode & DMAE_COMMAND_DST) { case DMAE_CMD_DST_PCI: @@ -434,6 +431,10 @@ void bnx2x_dp_dmae(struct bnx2x *bp, struct dmae_command *dmae, int msglvl) dmae->comp_val); break; } + + for (i = 0; i < (sizeof(struct dmae_command)/4); i++) + DP(msglvl, "DMAE RAW [%02d]: 0x%08x\n", + i, *(((u32 *)dmae) + i)); } /* copy command into DMAE command memory and set DMAE command go */ @@ -508,8 +509,9 @@ int bnx2x_issue_dmae_with_comp(struct bnx2x *bp, struct dmae_command *dmae) int cnt = CHIP_REV_IS_SLOW(bp) ? (400000) : 4000; int rc = 0; - /* - * Lock the dmae channel. Disable BHs to prevent a dead-lock + bnx2x_dp_dmae(bp, dmae, BNX2X_MSG_DMAE); + + /* Lock the dmae channel. Disable BHs to prevent a dead-lock * as long as this code is called both from syscall context and * from ndo_set_rx_mode() flow that may be called from BH. */ @@ -548,6 +550,7 @@ unlock: void bnx2x_write_dmae(struct bnx2x *bp, dma_addr_t dma_addr, u32 dst_addr, u32 len32) { + int rc; struct dmae_command dmae; if (!bp->dmae_ready) { @@ -571,11 +574,16 @@ void bnx2x_write_dmae(struct bnx2x *bp, dma_addr_t dma_addr, u32 dst_addr, dmae.len = len32; /* issue the command and wait for completion */ - bnx2x_issue_dmae_with_comp(bp, &dmae); + rc = bnx2x_issue_dmae_with_comp(bp, &dmae); + if (rc) { + BNX2X_ERR("DMAE returned failure %d\n", rc); + bnx2x_panic(); + } } void bnx2x_read_dmae(struct bnx2x *bp, u32 src_addr, u32 len32) { + int rc; struct dmae_command dmae; if (!bp->dmae_ready) { @@ -603,7 +611,11 @@ void bnx2x_read_dmae(struct bnx2x *bp, u32 src_addr, u32 len32) dmae.len = len32; /* issue the command and wait for completion */ - bnx2x_issue_dmae_with_comp(bp, &dmae); + rc = bnx2x_issue_dmae_with_comp(bp, &dmae); + if (rc) { + BNX2X_ERR("DMAE returned failure %d\n", rc); + bnx2x_panic(); + } } static void bnx2x_write_dmae_phys_len(struct bnx2x *bp, dma_addr_t phys_addr, @@ -811,8 +823,8 @@ static void bnx2x_hc_int_disable(struct bnx2x *bp) u32 val = REG_RD(bp, addr); /* in E1 we must use only PCI configuration space to disable - * MSI/MSIX capablility - * It's forbitten to disable IGU_PF_CONF_MSI_MSIX_EN in HC block + * MSI/MSIX capability + * It's forbidden to disable IGU_PF_CONF_MSI_MSIX_EN in HC block */ if (CHIP_IS_E1(bp)) { /* Since IGU_PF_CONF_MSI_MSIX_EN still always on @@ -839,7 +851,7 @@ static void bnx2x_hc_int_disable(struct bnx2x *bp) REG_WR(bp, addr, val); if (REG_RD(bp, addr) != val) - BNX2X_ERR("BUG! proper val not read from IGU!\n"); + BNX2X_ERR("BUG! Proper val not read from IGU!\n"); } static void bnx2x_igu_int_disable(struct bnx2x *bp) @@ -857,7 +869,7 @@ static void bnx2x_igu_int_disable(struct bnx2x *bp) REG_WR(bp, IGU_REG_PF_CONFIGURATION, val); if (REG_RD(bp, IGU_REG_PF_CONFIGURATION) != val) - BNX2X_ERR("BUG! proper val not read from IGU!\n"); + BNX2X_ERR("BUG! Proper val not read from IGU!\n"); } static void bnx2x_int_disable(struct bnx2x *bp) @@ -917,7 +929,6 @@ void bnx2x_panic_dump(struct bnx2x *bp, bool disable_int) sp_sb_data.p_func.vf_valid, sp_sb_data.state); - for_each_eth_queue(bp, i) { struct bnx2x_fastpath *fp = &bp->fp[i]; int loop; @@ -1016,7 +1027,7 @@ void bnx2x_panic_dump(struct bnx2x *bp, bool disable_int) hc_sm_p[j].timer_value); } - /* Indecies data */ + /* Indices data */ for (j = 0; j < loop; j++) { pr_cont("INDEX[%d] flags (0x%x) timeout (0x%x)\n", j, hc_index_p[j].flags, @@ -1027,6 +1038,7 @@ void bnx2x_panic_dump(struct bnx2x *bp, bool disable_int) #ifdef BNX2X_STOP_ON_ERROR /* event queue */ + BNX2X_ERR("eq cons %x prod %x\n", bp->eq_cons, bp->eq_prod); for (i = 0; i < NUM_EQ_DESC; i++) { u32 *data = (u32 *)&bp->eq_ring[i].message.data; @@ -1111,7 +1123,7 @@ void bnx2x_panic_dump(struct bnx2x *bp, bool disable_int) * bnx2x_pf_flr_clnup() is called during nic_load in the per function HW * initialization. */ -#define FLR_WAIT_USEC 10000 /* 10 miliseconds */ +#define FLR_WAIT_USEC 10000 /* 10 milliseconds */ #define FLR_WAIT_INTERVAL 50 /* usec */ #define FLR_POLL_CNT (FLR_WAIT_USEC/FLR_WAIT_INTERVAL) /* 200 */ @@ -1290,7 +1302,6 @@ void bnx2x_tx_hw_flushed(struct bnx2x *bp, u32 poll_count) for (i = 0; i < ARRAY_SIZE(cmd_regs); i++) bnx2x_pbf_pN_cmd_flushed(bp, &cmd_regs[i], poll_count); - /* Verify the transmission buffers are flushed P0, P1, P4 */ for (i = 0; i < ARRAY_SIZE(buf_regs); i++) bnx2x_pbf_pN_buf_flushed(bp, &buf_regs[i], poll_count); @@ -1305,11 +1316,9 @@ void bnx2x_tx_hw_flushed(struct bnx2x *bp, u32 poll_count) #define OP_GEN_AGG_VECT(index) \ (((index) << SDM_OP_GEN_AGG_VECT_IDX_SHIFT) & SDM_OP_GEN_AGG_VECT_IDX) - int bnx2x_send_final_clnup(struct bnx2x *bp, u8 clnup_func, u32 poll_cnt) { u32 op_gen_command = 0; - u32 comp_addr = BAR_CSTRORM_INTMEM + CSTORM_FINAL_CLEANUP_COMPLETE_OFFSET(clnup_func); int ret = 0; @@ -1334,7 +1343,7 @@ int bnx2x_send_final_clnup(struct bnx2x *bp, u8 clnup_func, u32 poll_cnt) bnx2x_panic(); return 1; } - /* Zero completion for nxt FLR */ + /* Zero completion for next FLR */ REG_WR(bp, comp_addr, 0); return ret; @@ -1352,7 +1361,6 @@ u8 bnx2x_is_pcie_pending(struct pci_dev *dev) */ static int bnx2x_poll_hw_usage_counters(struct bnx2x *bp, u32 poll_cnt) { - /* wait for CFC PF usage-counter to zero (includes all the VFs) */ if (bnx2x_flr_clnup_poll_hw_counter(bp, CFC_REG_NUM_LCIDS_INSIDE_PF, @@ -1360,7 +1368,6 @@ static int bnx2x_poll_hw_usage_counters(struct bnx2x *bp, u32 poll_cnt) poll_cnt)) return 1; - /* Wait for DQ PF usage-counter to zero (until DQ cleanup) */ if (bnx2x_flr_clnup_poll_hw_counter(bp, DORQ_REG_PF_USAGE_CNT, @@ -1390,7 +1397,7 @@ static int bnx2x_poll_hw_usage_counters(struct bnx2x *bp, u32 poll_cnt) /* Wait DMAE PF usage counter to zero */ if (bnx2x_flr_clnup_poll_hw_counter(bp, dmae_reg_go_c[INIT_DMAE_C(bp)], - "DMAE dommand register timed out", + "DMAE command register timed out", poll_cnt)) return 1; @@ -1770,7 +1777,7 @@ void bnx2x_sp_event(struct bnx2x_fastpath *fp, union eth_rx_cqe *rr_cqe) break; case (RAMROD_CMD_ID_ETH_TERMINATE): - DP(BNX2X_MSG_SP, "got MULTI[%d] teminate ramrod\n", cid); + DP(BNX2X_MSG_SP, "got MULTI[%d] terminate ramrod\n", cid); drv_cmd = BNX2X_Q_CMD_TERMINATE; break; @@ -1859,7 +1866,6 @@ irqreturn_t bnx2x_interrupt(int irq, void *dev_instance) mask = 0x2 << (fp->index + CNIC_SUPPORT(bp)); if (status & mask) { /* Handle Rx or Tx according to SB id */ - prefetch(fp->rx_cons_sb); for_each_cos_in_tx_queue(fp, cos) prefetch(fp->txdata_ptr[cos]->tx_cons_sb); prefetch(&fp->sb_running_index[SM_RX_ID]); @@ -1947,7 +1953,7 @@ int bnx2x_acquire_hw_lock(struct bnx2x *bp, u32 resource) if (lock_status & resource_bit) return 0; - msleep(5); + usleep_range(5000, 10000); } BNX2X_ERR("Timeout\n"); return -EAGAIN; @@ -1982,8 +1988,8 @@ int bnx2x_release_hw_lock(struct bnx2x *bp, u32 resource) /* Validating that the resource is currently taken */ lock_status = REG_RD(bp, hw_lock_control_reg); if (!(lock_status & resource_bit)) { - BNX2X_ERR("lock_status 0x%x resource_bit 0x%x. unlock was called but lock wasn't taken!\n", - lock_status, resource_bit); + BNX2X_ERR("lock_status 0x%x resource_bit 0x%x. Unlock was called but lock wasn't taken!\n", + lock_status, resource_bit); return -EFAULT; } @@ -1991,7 +1997,6 @@ int bnx2x_release_hw_lock(struct bnx2x *bp, u32 resource) return 0; } - int bnx2x_get_gpio(struct bnx2x *bp, int gpio_num, u8 port) { /* The GPIO should be swapped if swap register is set and active */ @@ -2347,14 +2352,13 @@ u8 bnx2x_link_test(struct bnx2x *bp, u8 is_serdes) return rc; } - /* Calculates the sum of vn_min_rates. It's needed for further normalizing of the min_rates. Returns: sum of vn_min_rates. or 0 - if all the min_rates are 0. - In the later case fainess algorithm should be deactivated. + In the later case fairness algorithm should be deactivated. If not all min_rates are zero then those that are zeroes will be set to 1. */ static void bnx2x_calc_vn_min(struct bnx2x *bp, @@ -2419,7 +2423,6 @@ static void bnx2x_calc_vn_max(struct bnx2x *bp, int vn, input->vnic_max_rate[vn] = vn_max_rate; } - static int bnx2x_get_cmng_fns_mode(struct bnx2x *bp) { if (CHIP_REV_IS_SLOW(bp)) @@ -2435,7 +2438,7 @@ void bnx2x_read_mf_cfg(struct bnx2x *bp) int vn, n = (CHIP_MODE_IS_4_PORT(bp) ? 2 : 1); if (BP_NOMCP(bp)) - return; /* what should be the default bvalue in this case */ + return; /* what should be the default value in this case */ /* For 2 port configuration the absolute function number formula * is: @@ -2901,7 +2904,6 @@ u32 bnx2x_fw_command(struct bnx2x *bp, u32 command, u32 param) return rc; } - static void storm_memset_func_cfg(struct bnx2x *bp, struct tstorm_eth_function_common_config *tcfg, u16 abs_fid) @@ -2935,7 +2937,7 @@ void bnx2x_func_init(struct bnx2x *bp, struct bnx2x_func_init_params *p) } /** - * bnx2x_get_tx_only_flags - Return common flags + * bnx2x_get_common_flags - Return common flags * * @bp device handle * @fp queue handle @@ -3006,7 +3008,6 @@ static unsigned long bnx2x_get_q_flags(struct bnx2x *bp, if (IS_MF_AFEX(bp)) __set_bit(BNX2X_Q_FLG_SILENT_VLAN_REM, &flags); - return flags | bnx2x_get_common_flags(bp, fp, true); } @@ -3082,7 +3083,7 @@ static void bnx2x_pf_rx_q_prep(struct bnx2x *bp, * placed on the BD (not including paddings). */ rxq_init->buf_sz = fp->rx_buf_size - BNX2X_FW_RX_ALIGN_START - - BNX2X_FW_RX_ALIGN_END - IP_HEADER_ALIGNMENT_PADDING; + BNX2X_FW_RX_ALIGN_END - IP_HEADER_ALIGNMENT_PADDING; rxq_init->cl_qzone_id = fp->cl_qzone_id; rxq_init->tpa_agg_sz = tpa_agg_size; @@ -3124,7 +3125,7 @@ static void bnx2x_pf_tx_q_prep(struct bnx2x *bp, txq_init->fw_sb_id = fp->fw_sb_id; /* - * set the tss leading client id for TX classfication == + * set the tss leading client id for TX classification == * leading RSS client id */ txq_init->tss_leading_cl_id = bnx2x_fp(bp, 0, cl_id); @@ -3196,7 +3197,6 @@ static void bnx2x_pf_init(struct bnx2x *bp) storm_memset_eq_data(bp, &eq_data, BP_FUNC(bp)); } - static void bnx2x_e1h_disable(struct bnx2x *bp) { int port = BP_PORT(bp); @@ -3212,7 +3212,7 @@ static void bnx2x_e1h_enable(struct bnx2x *bp) REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 1); - /* Tx queue should be only reenabled */ + /* Tx queue should be only re-enabled */ netif_tx_wake_all_queues(bp->dev); /* @@ -3540,10 +3540,8 @@ static bool bnx2x_is_contextless_ramrod(int cmd, int cmd_type) return true; else return false; - } - /** * bnx2x_sp_post - place a single command on an SP ring * @@ -3608,14 +3606,13 @@ int bnx2x_sp_post(struct bnx2x *bp, int command, int cid, /* * It's ok if the actual decrement is issued towards the memory * somewhere between the spin_lock and spin_unlock. Thus no - * more explict memory barrier is needed. + * more explicit memory barrier is needed. */ if (common) atomic_dec(&bp->eq_spq_left); else atomic_dec(&bp->cq_spq_left); - DP(BNX2X_MSG_SP, "SPQE[%x] (%x:%x) (cmd, common?) (%d,%d) hw_cid %x data (%x:%x) type(0x%x) left (CQ, EQ) (%x,%x)\n", bp->spq_prod_idx, (u32)U64_HI(bp->spq_mapping), @@ -3637,15 +3634,14 @@ static int bnx2x_acquire_alr(struct bnx2x *bp) might_sleep(); for (j = 0; j < 1000; j++) { - val = (1UL << 31); - REG_WR(bp, GRCBASE_MCP + 0x9c, val); - val = REG_RD(bp, GRCBASE_MCP + 0x9c); - if (val & (1L << 31)) + REG_WR(bp, MCP_REG_MCPR_ACCESS_LOCK, MCPR_ACCESS_LOCK_LOCK); + val = REG_RD(bp, MCP_REG_MCPR_ACCESS_LOCK); + if (val & MCPR_ACCESS_LOCK_LOCK) break; - msleep(5); + usleep_range(5000, 10000); } - if (!(val & (1L << 31))) { + if (!(val & MCPR_ACCESS_LOCK_LOCK)) { BNX2X_ERR("Cannot acquire MCP access lock register\n"); rc = -EBUSY; } @@ -3656,7 +3652,7 @@ static int bnx2x_acquire_alr(struct bnx2x *bp) /* release split MCP access lock register */ static void bnx2x_release_alr(struct bnx2x *bp) { - REG_WR(bp, GRCBASE_MCP + 0x9c, 0); + REG_WR(bp, MCP_REG_MCPR_ACCESS_LOCK, 0); } #define BNX2X_DEF_SB_ATT_IDX 0x0001 @@ -3678,7 +3674,7 @@ static u16 bnx2x_update_dsb_idx(struct bnx2x *bp) rc |= BNX2X_DEF_SB_IDX; } - /* Do not reorder: indecies reading should complete before handling */ + /* Do not reorder: indices reading should complete before handling */ barrier(); return rc; } @@ -3827,8 +3823,7 @@ static void bnx2x_fan_failure(struct bnx2x *bp) netdev_err(bp->dev, "Fan Failure on Network Controller has caused the driver to shutdown the card to prevent permanent damage.\n" "Please contact OEM Support for assistance\n"); - /* - * Schedule device reset (unload) + /* Schedule device reset (unload) * This is due to some boards consuming sufficient power when driver is * up to overheat if fan fails. */ @@ -3836,7 +3831,6 @@ static void bnx2x_fan_failure(struct bnx2x *bp) set_bit(BNX2X_SP_RTNL_FAN_FAILURE, &bp->sp_rtnl_state); smp_mb__after_clear_bit(); schedule_delayed_work(&bp->sp_rtnl_task, 0); - } static void bnx2x_attn_int_deasserted0(struct bnx2x *bp, u32 attn) @@ -4106,7 +4100,7 @@ static void bnx2x_clear_reset_global(struct bnx2x *bp) */ static bool bnx2x_reset_is_global(struct bnx2x *bp) { - u32 val = REG_RD(bp, BNX2X_RECOVERY_GLOB_REG); + u32 val = REG_RD(bp, BNX2X_RECOVERY_GLOB_REG); DP(NETIF_MSG_HW, "GEN_REG_VAL=0x%08x\n", val); return (val & BNX2X_GLOBAL_RESET_BIT) ? true : false; @@ -4157,7 +4151,7 @@ void bnx2x_set_reset_in_progress(struct bnx2x *bp) */ bool bnx2x_reset_is_done(struct bnx2x *bp, int engine) { - u32 val = REG_RD(bp, BNX2X_RECOVERY_GLOB_REG); + u32 val = REG_RD(bp, BNX2X_RECOVERY_GLOB_REG); u32 bit = engine ? BNX2X_PATH1_RST_IN_PROG_BIT : BNX2X_PATH0_RST_IN_PROG_BIT; @@ -4260,13 +4254,18 @@ static bool bnx2x_get_load_status(struct bnx2x *bp, int engine) return val != 0; } +static void _print_parity(struct bnx2x *bp, u32 reg) +{ + pr_cont(" [0x%08x] ", REG_RD(bp, reg)); +} + static void _print_next_block(int idx, const char *blk) { pr_cont("%s%s", idx ? ", " : "", blk); } -static int bnx2x_check_blocks_with_parity0(u32 sig, int par_num, - bool print) +static int bnx2x_check_blocks_with_parity0(struct bnx2x *bp, u32 sig, + int par_num, bool print) { int i = 0; u32 cur_bit = 0; @@ -4275,33 +4274,54 @@ static int bnx2x_check_blocks_with_parity0(u32 sig, int par_num, if (sig & cur_bit) { switch (cur_bit) { case AEU_INPUTS_ATTN_BITS_BRB_PARITY_ERROR: - if (print) + if (print) { _print_next_block(par_num++, "BRB"); + _print_parity(bp, + BRB1_REG_BRB1_PRTY_STS); + } break; case AEU_INPUTS_ATTN_BITS_PARSER_PARITY_ERROR: - if (print) + if (print) { _print_next_block(par_num++, "PARSER"); + _print_parity(bp, PRS_REG_PRS_PRTY_STS); + } break; case AEU_INPUTS_ATTN_BITS_TSDM_PARITY_ERROR: - if (print) + if (print) { _print_next_block(par_num++, "TSDM"); + _print_parity(bp, + TSDM_REG_TSDM_PRTY_STS); + } break; case AEU_INPUTS_ATTN_BITS_SEARCHER_PARITY_ERROR: - if (print) + if (print) { _print_next_block(par_num++, "SEARCHER"); + _print_parity(bp, SRC_REG_SRC_PRTY_STS); + } break; case AEU_INPUTS_ATTN_BITS_TCM_PARITY_ERROR: - if (print) + if (print) { _print_next_block(par_num++, "TCM"); + _print_parity(bp, + TCM_REG_TCM_PRTY_STS); + } break; case AEU_INPUTS_ATTN_BITS_TSEMI_PARITY_ERROR: - if (print) + if (print) { _print_next_block(par_num++, "TSEMI"); + _print_parity(bp, + TSEM_REG_TSEM_PRTY_STS_0); + _print_parity(bp, + TSEM_REG_TSEM_PRTY_STS_1); + } break; case AEU_INPUTS_ATTN_BITS_PBCLIENT_PARITY_ERROR: - if (print) + if (print) { _print_next_block(par_num++, "XPB"); + _print_parity(bp, GRCBASE_XPB + + PB_REG_PB_PRTY_STS); + } break; } @@ -4313,8 +4333,9 @@ static int bnx2x_check_blocks_with_parity0(u32 sig, int par_num, return par_num; } -static int bnx2x_check_blocks_with_parity1(u32 sig, int par_num, - bool *global, bool print) +static int bnx2x_check_blocks_with_parity1(struct bnx2x *bp, u32 sig, + int par_num, bool *global, + bool print) { int i = 0; u32 cur_bit = 0; @@ -4323,37 +4344,66 @@ static int bnx2x_check_blocks_with_parity1(u32 sig, int par_num, if (sig & cur_bit) { switch (cur_bit) { case AEU_INPUTS_ATTN_BITS_PBF_PARITY_ERROR: - if (print) + if (print) { _print_next_block(par_num++, "PBF"); + _print_parity(bp, PBF_REG_PBF_PRTY_STS); + } break; case AEU_INPUTS_ATTN_BITS_QM_PARITY_ERROR: - if (print) + if (print) { _print_next_block(par_num++, "QM"); + _print_parity(bp, QM_REG_QM_PRTY_STS); + } break; case AEU_INPUTS_ATTN_BITS_TIMERS_PARITY_ERROR: - if (print) + if (print) { _print_next_block(par_num++, "TM"); + _print_parity(bp, TM_REG_TM_PRTY_STS); + } break; case AEU_INPUTS_ATTN_BITS_XSDM_PARITY_ERROR: - if (print) + if (print) { _print_next_block(par_num++, "XSDM"); + _print_parity(bp, + XSDM_REG_XSDM_PRTY_STS); + } break; case AEU_INPUTS_ATTN_BITS_XCM_PARITY_ERROR: - if (print) + if (print) { _print_next_block(par_num++, "XCM"); + _print_parity(bp, XCM_REG_XCM_PRTY_STS); + } break; case AEU_INPUTS_ATTN_BITS_XSEMI_PARITY_ERROR: - if (print) + if (print) { _print_next_block(par_num++, "XSEMI"); + _print_parity(bp, + XSEM_REG_XSEM_PRTY_STS_0); + _print_parity(bp, + XSEM_REG_XSEM_PRTY_STS_1); + } break; case AEU_INPUTS_ATTN_BITS_DOORBELLQ_PARITY_ERROR: - if (print) + if (print) { _print_next_block(par_num++, "DOORBELLQ"); + _print_parity(bp, + DORQ_REG_DORQ_PRTY_STS); + } break; case AEU_INPUTS_ATTN_BITS_NIG_PARITY_ERROR: - if (print) + if (print) { _print_next_block(par_num++, "NIG"); + if (CHIP_IS_E1x(bp)) { + _print_parity(bp, + NIG_REG_NIG_PRTY_STS); + } else { + _print_parity(bp, + NIG_REG_NIG_PRTY_STS_0); + _print_parity(bp, + NIG_REG_NIG_PRTY_STS_1); + } + } break; case AEU_INPUTS_ATTN_BITS_VAUX_PCI_CORE_PARITY_ERROR: if (print) @@ -4362,32 +4412,52 @@ static int bnx2x_check_blocks_with_parity1(u32 sig, int par_num, *global = true; break; case AEU_INPUTS_ATTN_BITS_DEBUG_PARITY_ERROR: - if (print) + if (print) { _print_next_block(par_num++, "DEBUG"); + _print_parity(bp, DBG_REG_DBG_PRTY_STS); + } break; case AEU_INPUTS_ATTN_BITS_USDM_PARITY_ERROR: - if (print) + if (print) { _print_next_block(par_num++, "USDM"); + _print_parity(bp, + USDM_REG_USDM_PRTY_STS); + } break; case AEU_INPUTS_ATTN_BITS_UCM_PARITY_ERROR: - if (print) + if (print) { _print_next_block(par_num++, "UCM"); + _print_parity(bp, UCM_REG_UCM_PRTY_STS); + } break; case AEU_INPUTS_ATTN_BITS_USEMI_PARITY_ERROR: - if (print) + if (print) { _print_next_block(par_num++, "USEMI"); + _print_parity(bp, + USEM_REG_USEM_PRTY_STS_0); + _print_parity(bp, + USEM_REG_USEM_PRTY_STS_1); + } break; case AEU_INPUTS_ATTN_BITS_UPB_PARITY_ERROR: - if (print) + if (print) { _print_next_block(par_num++, "UPB"); + _print_parity(bp, GRCBASE_UPB + + PB_REG_PB_PRTY_STS); + } break; case AEU_INPUTS_ATTN_BITS_CSDM_PARITY_ERROR: - if (print) + if (print) { _print_next_block(par_num++, "CSDM"); + _print_parity(bp, + CSDM_REG_CSDM_PRTY_STS); + } break; case AEU_INPUTS_ATTN_BITS_CCM_PARITY_ERROR: - if (print) + if (print) { _print_next_block(par_num++, "CCM"); + _print_parity(bp, CCM_REG_CCM_PRTY_STS); + } break; } @@ -4399,8 +4469,8 @@ static int bnx2x_check_blocks_with_parity1(u32 sig, int par_num, return par_num; } -static int bnx2x_check_blocks_with_parity2(u32 sig, int par_num, - bool print) +static int bnx2x_check_blocks_with_parity2(struct bnx2x *bp, u32 sig, + int par_num, bool print) { int i = 0; u32 cur_bit = 0; @@ -4409,12 +4479,23 @@ static int bnx2x_check_blocks_with_parity2(u32 sig, int par_num, if (sig & cur_bit) { switch (cur_bit) { case AEU_INPUTS_ATTN_BITS_CSEMI_PARITY_ERROR: - if (print) + if (print) { _print_next_block(par_num++, "CSEMI"); + _print_parity(bp, + CSEM_REG_CSEM_PRTY_STS_0); + _print_parity(bp, + CSEM_REG_CSEM_PRTY_STS_1); + } break; case AEU_INPUTS_ATTN_BITS_PXP_PARITY_ERROR: - if (print) + if (print) { _print_next_block(par_num++, "PXP"); + _print_parity(bp, PXP_REG_PXP_PRTY_STS); + _print_parity(bp, + PXP2_REG_PXP2_PRTY_STS_0); + _print_parity(bp, + PXP2_REG_PXP2_PRTY_STS_1); + } break; case AEU_IN_ATTN_BITS_PXPPCICLOCKCLIENT_PARITY_ERROR: if (print) @@ -4422,24 +4503,42 @@ static int bnx2x_check_blocks_with_parity2(u32 sig, int par_num, "PXPPCICLOCKCLIENT"); break; case AEU_INPUTS_ATTN_BITS_CFC_PARITY_ERROR: - if (print) + if (print) { _print_next_block(par_num++, "CFC"); + _print_parity(bp, + CFC_REG_CFC_PRTY_STS); + } break; case AEU_INPUTS_ATTN_BITS_CDU_PARITY_ERROR: - if (print) + if (print) { _print_next_block(par_num++, "CDU"); + _print_parity(bp, CDU_REG_CDU_PRTY_STS); + } break; case AEU_INPUTS_ATTN_BITS_DMAE_PARITY_ERROR: - if (print) + if (print) { _print_next_block(par_num++, "DMAE"); + _print_parity(bp, + DMAE_REG_DMAE_PRTY_STS); + } break; case AEU_INPUTS_ATTN_BITS_IGU_PARITY_ERROR: - if (print) + if (print) { _print_next_block(par_num++, "IGU"); + if (CHIP_IS_E1x(bp)) + _print_parity(bp, + HC_REG_HC_PRTY_STS); + else + _print_parity(bp, + IGU_REG_IGU_PRTY_STS); + } break; case AEU_INPUTS_ATTN_BITS_MISC_PARITY_ERROR: - if (print) + if (print) { _print_next_block(par_num++, "MISC"); + _print_parity(bp, + MISC_REG_MISC_PRTY_STS); + } break; } @@ -4493,8 +4592,8 @@ static int bnx2x_check_blocks_with_parity3(u32 sig, int par_num, return par_num; } -static int bnx2x_check_blocks_with_parity4(u32 sig, int par_num, - bool print) +static int bnx2x_check_blocks_with_parity4(struct bnx2x *bp, u32 sig, + int par_num, bool print) { int i = 0; u32 cur_bit = 0; @@ -4503,12 +4602,18 @@ static int bnx2x_check_blocks_with_parity4(u32 sig, int par_num, if (sig & cur_bit) { switch (cur_bit) { case AEU_INPUTS_ATTN_BITS_PGLUE_PARITY_ERROR: - if (print) + if (print) { _print_next_block(par_num++, "PGLUE_B"); + _print_parity(bp, + PGLUE_B_REG_PGLUE_B_PRTY_STS); + } break; case AEU_INPUTS_ATTN_BITS_ATC_PARITY_ERROR: - if (print) + if (print) { _print_next_block(par_num++, "ATC"); + _print_parity(bp, + ATC_REG_ATC_PRTY_STS); + } break; } @@ -4539,15 +4644,15 @@ static bool bnx2x_parity_attn(struct bnx2x *bp, bool *global, bool print, if (print) netdev_err(bp->dev, "Parity errors detected in blocks: "); - par_num = bnx2x_check_blocks_with_parity0( + par_num = bnx2x_check_blocks_with_parity0(bp, sig[0] & HW_PRTY_ASSERT_SET_0, par_num, print); - par_num = bnx2x_check_blocks_with_parity1( + par_num = bnx2x_check_blocks_with_parity1(bp, sig[1] & HW_PRTY_ASSERT_SET_1, par_num, global, print); - par_num = bnx2x_check_blocks_with_parity2( + par_num = bnx2x_check_blocks_with_parity2(bp, sig[2] & HW_PRTY_ASSERT_SET_2, par_num, print); par_num = bnx2x_check_blocks_with_parity3( sig[3] & HW_PRTY_ASSERT_SET_3, par_num, global, print); - par_num = bnx2x_check_blocks_with_parity4( + par_num = bnx2x_check_blocks_with_parity4(bp, sig[4] & HW_PRTY_ASSERT_SET_4, par_num, print); if (print) @@ -4591,7 +4696,6 @@ bool bnx2x_chk_parity_attn(struct bnx2x *bp, bool *global, bool print) return bnx2x_parity_attn(bp, global, print, attn.sig); } - static void bnx2x_attn_int_deasserted4(struct bnx2x *bp, u32 attn) { u32 val; @@ -4643,7 +4747,6 @@ static void bnx2x_attn_int_deasserted4(struct bnx2x *bp, u32 attn) (u32)(attn & (AEU_INPUTS_ATTN_BITS_PGLUE_PARITY_ERROR | AEU_INPUTS_ATTN_BITS_ATC_PARITY_ERROR))); } - } static void bnx2x_attn_int_deasserted(struct bnx2x *bp, u32 deasserted) @@ -4878,7 +4981,6 @@ static void bnx2x_handle_classification_eqe(struct bnx2x *bp, BNX2X_ERR("Failed to schedule new commands: %d\n", rc); else if (rc > 0) DP(BNX2X_MSG_SP, "Scheduled next pending commands...\n"); - } static void bnx2x_set_iscsi_eth_rx_mode(struct bnx2x *bp, bool start); @@ -5009,7 +5111,7 @@ static void bnx2x_eq_int(struct bnx2x *bp) hw_cons = le16_to_cpu(*bp->eq_cons_sb); /* The hw_cos range is 1-255, 257 - the sw_cons range is 0-254, 256. - * when we get the the next-page we nned to adjust so the loop + * when we get the next-page we need to adjust so the loop * condition below will be met. The next element is the size of a * regular element and hence incrementing by 1 */ @@ -5075,8 +5177,6 @@ static void bnx2x_eq_int(struct bnx2x *bp) if (q_obj->complete_cmd(bp, q_obj, BNX2X_Q_CMD_CFC_DEL)) break; - - goto next_spqe; case EVENT_RING_OPCODE_STOP_TRAFFIC: @@ -5218,7 +5318,7 @@ static void bnx2x_sp_task(struct work_struct *work) DP(BNX2X_MSG_SP, "sp task invoked\n"); - /* make sure the atomic interupt_occurred has been written */ + /* make sure the atomic interrupt_occurred has been written */ smp_rmb(); if (atomic_read(&bp->interrupt_occurred)) { @@ -5265,7 +5365,6 @@ static void bnx2x_sp_task(struct work_struct *work) /* ack status block only if something was actually handled */ bnx2x_ack_sb(bp, bp->igu_dsb_id, ATTENTION_ID, le16_to_cpu(bp->def_att_idx), IGU_INT_ENABLE, 1); - } /* must be called after the EQ processing (since eq leads to sriov @@ -5316,7 +5415,6 @@ irqreturn_t bnx2x_msix_sp_int(int irq, void *dev_instance) /* end of slow path */ - void bnx2x_drv_pulse(struct bnx2x *bp) { SHMEM_WR(bp, func_mb[BP_FW_MB_IDX(bp)].drv_pulse_mb, @@ -5360,7 +5458,7 @@ static void bnx2x_timer(unsigned long data) /* sample pf vf bulletin board for new posts from pf */ if (IS_VF(bp)) - bnx2x_sample_bulletin(bp); + bnx2x_timer_sriov(bp); mod_timer(&bp->timer, jiffies + bp->current_interval); } @@ -5382,7 +5480,6 @@ static void bnx2x_fill(struct bnx2x *bp, u32 addr, int fill, u32 len) else for (i = 0; i < len; i++) REG_WR8(bp, addr + i, fill); - } /* helper: writes FP SP data to FW - data_size in dwords */ @@ -5461,10 +5558,8 @@ static void bnx2x_zero_sp_sb(struct bnx2x *bp) bnx2x_fill(bp, BAR_CSTRORM_INTMEM + CSTORM_SP_SYNC_BLOCK_OFFSET(func), 0, CSTORM_SP_SYNC_BLOCK_SIZE); - } - static void bnx2x_setup_ndsb_state_machine(struct hc_status_block_sm *hc_sm, int igu_sb_id, int igu_seg_id) { @@ -5474,7 +5569,6 @@ static void bnx2x_setup_ndsb_state_machine(struct hc_status_block_sm *hc_sm, hc_sm->time_to_expire = 0xFFFFFFFF; } - /* allocates state machine ids. */ static void bnx2x_map_sb_state_machines(struct hc_index_data *index_data) { @@ -5700,7 +5794,7 @@ static void bnx2x_init_eq_ring(struct bnx2x *bp) bp->eq_cons = 0; bp->eq_prod = NUM_EQ_DESC; bp->eq_cons_sb = BNX2X_EQ_INDEX; - /* we want a warning message before it gets rought... */ + /* we want a warning message before it gets wrought... */ atomic_set(&bp->eq_spq_left, min_t(int, MAX_SP_DESC_CNT - MAX_SPQ_PENDING, NUM_EQ_DESC) - 1); } @@ -5784,7 +5878,7 @@ static int bnx2x_fill_accept_flags(struct bnx2x *bp, u32 rx_mode, break; case BNX2X_RX_MODE_PROMISC: - /* According to deffinition of SI mode, iface in promisc mode + /* According to definition of SI mode, iface in promisc mode * should receive matched and unmatched (in resolution of port) * unicast packets. */ @@ -5927,7 +6021,7 @@ static void bnx2x_init_eth_fp(struct bnx2x *bp, int fp_idx) /* init shortcut */ fp->ustorm_rx_prods_offset = bnx2x_rx_ustorm_prods_offset(fp); - /* Setup SB indicies */ + /* Setup SB indices */ fp->rx_cons_sb = BNX2X_RX_SB_INDEX; /* Configure Queue State object */ @@ -5983,6 +6077,8 @@ static void bnx2x_init_tx_ring_one(struct bnx2x_fp_txdata *txdata) BCM_PAGE_SIZE*(i % NUM_TX_RINGS))); } + *txdata->tx_cons_sb = cpu_to_le16(0); + SET_FLAG(txdata->tx_db.data.header.header, DOORBELL_HDR_DB_TYPE, 1); txdata->tx_db.data.zero_fill1 = 0; txdata->tx_db.data.prod = 0; @@ -6001,6 +6097,7 @@ static void bnx2x_init_tx_rings_cnic(struct bnx2x *bp) for_each_tx_queue_cnic(bp, i) bnx2x_init_tx_ring_one(bp->fp[i].txdata_ptr[0]); } + static void bnx2x_init_tx_rings(struct bnx2x *bp) { int i; @@ -6043,11 +6140,6 @@ void bnx2x_pre_irq_nic_init(struct bnx2x *bp) bnx2x_init_rx_rings(bp); bnx2x_init_tx_rings(bp); - if (IS_VF(bp)) { - bnx2x_memset_stats(bp); - return; - } - if (IS_PF(bp)) { /* Initialize MOD_ABS interrupts */ bnx2x_init_mod_abs_int(bp, &bp->link_vars, bp->common.chip_id, @@ -6058,6 +6150,8 @@ void bnx2x_pre_irq_nic_init(struct bnx2x *bp) bnx2x_init_def_sb(bp); bnx2x_update_dsb_idx(bp); bnx2x_init_sp_ring(bp); + } else { + bnx2x_memset_stats(bp); } } @@ -6236,7 +6330,7 @@ static int bnx2x_int_mem_test(struct bnx2x *bp) if (val == 0x10) break; - msleep(10); + usleep_range(10000, 20000); count--; } if (val != 0x10) { @@ -6251,7 +6345,7 @@ static int bnx2x_int_mem_test(struct bnx2x *bp) if (val == 1) break; - msleep(10); + usleep_range(10000, 20000); count--; } if (val != 0x1) { @@ -6292,7 +6386,7 @@ static int bnx2x_int_mem_test(struct bnx2x *bp) if (val == 0xb0) break; - msleep(10); + usleep_range(10000, 20000); count--; } if (val != 0xb0) { @@ -6681,7 +6775,7 @@ static int bnx2x_init_hw_common(struct bnx2x *bp) * stay set) * f. If this is VNIC 3 of a port then also init * first_timers_ilt_entry to zero and last_timers_ilt_entry - * to the last enrty in the ILT. + * to the last entry in the ILT. * * Notes: * Currently the PF error in the PGLC is non recoverable. @@ -6772,7 +6866,6 @@ static int bnx2x_init_hw_common(struct bnx2x *bp) bnx2x_init_block(bp, BLOCK_QM, PHASE_COMMON); - /* QM queues pointers table */ bnx2x_qm_init_ptr_table(bp, bp->qm_cid_count, INITOP_SET); @@ -7013,7 +7106,6 @@ static int bnx2x_init_hw_port(struct bnx2x *bp) u32 low, high; u32 val; - DP(NETIF_MSG_HW, "starting port init port %d\n", port); REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0); @@ -7078,7 +7170,6 @@ static int bnx2x_init_hw_port(struct bnx2x *bp) BRB1_REG_MAC_GUARANTIED_1 : BRB1_REG_MAC_GUARANTIED_0), 40); - bnx2x_init_block(bp, BLOCK_PRS, init_phase); if (CHIP_IS_E3B0(bp)) { if (IS_MF_AFEX(bp)) { @@ -7150,8 +7241,8 @@ static int bnx2x_init_hw_port(struct bnx2x *bp) bnx2x_init_block(bp, BLOCK_MISC_AEU, init_phase); /* init aeu_mask_attn_func_0/1: - * - SF mode: bits 3-7 are masked. only bits 0-2 are in use - * - MF mode: bit 3 is masked. bits 0-2 are in use as in SF + * - SF mode: bits 3-7 are masked. Only bits 0-2 are in use + * - MF mode: bit 3 is masked. Bits 0-2 are in use as in SF * bits 4-7 are used for "per vn group attention" */ val = IS_MF(bp) ? 0xF7 : 0x7; /* Enable DCBX attention for all but E1 */ @@ -7275,7 +7366,6 @@ void bnx2x_igu_clear_sb_gen(struct bnx2x *bp, u8 func, u8 idu_sb_id, bool is_pf) while (!(REG_RD(bp, igu_addr_ack) & sb_bit) && --cnt) msleep(20); - if (!(REG_RD(bp, igu_addr_ack) & sb_bit)) { DP(NETIF_MSG_HW, "Unable to finish IGU cleanup: idu_sb_id %d offset %d bit %d (cnt %d)\n", @@ -7295,7 +7385,6 @@ static void bnx2x_clear_func_ilt(struct bnx2x *bp, u32 func) bnx2x_ilt_wr(bp, i, 0); } - static void bnx2x_init_searcher(struct bnx2x *bp) { int port = BP_PORT(bp); @@ -7331,7 +7420,6 @@ static int bnx2x_reset_nic_mode(struct bnx2x *bp) int rc, i, port = BP_PORT(bp); int vlan_en = 0, mac_en[NUM_MACS]; - /* Close input from network */ if (bp->mf_mode == SINGLE_FUNCTION) { bnx2x_set_rx_filter(&bp->link_params, 0); @@ -7406,7 +7494,7 @@ int bnx2x_init_hw_func_cnic(struct bnx2x *bp) bnx2x_ilt_init_op_cnic(bp, INITOP_SET); if (CONFIGURE_NIC_MODE(bp)) { - /* Configrue searcher as part of function hw init */ + /* Configure searcher as part of function hw init */ bnx2x_init_searcher(bp); /* Reset NIC mode */ @@ -7479,8 +7567,7 @@ static int bnx2x_init_hw_func(struct bnx2x *bp) } else { /* Set NIC mode */ REG_WR(bp, PRS_REG_NIC_MODE, 1); - DP(NETIF_MSG_IFUP, "NIC MODE configrued\n"); - + DP(NETIF_MSG_IFUP, "NIC MODE configured\n"); } if (!CHIP_IS_E1x(bp)) { @@ -7677,7 +7764,7 @@ static int bnx2x_init_hw_func(struct bnx2x *bp) } bnx2x_igu_clear_sb(bp, bp->igu_dsb_id); - /* !!! these should become driver const once + /* !!! These should become driver const once rf-tool supports split-68 const */ REG_WR(bp, IGU_REG_SB_INT_BEFORE_MASK_LSB, 0); REG_WR(bp, IGU_REG_SB_INT_BEFORE_MASK_MSB, 0); @@ -7734,7 +7821,6 @@ static int bnx2x_init_hw_func(struct bnx2x *bp) return 0; } - void bnx2x_free_mem_cnic(struct bnx2x *bp) { bnx2x_ilt_mem_op_cnic(bp, ILT_MEMOP_FREE); @@ -7779,7 +7865,6 @@ void bnx2x_free_mem(struct bnx2x *bp) bnx2x_iov_free_mem(bp); } - int bnx2x_alloc_mem_cnic(struct bnx2x *bp) { if (!CHIP_IS_E1x(bp)) @@ -7793,7 +7878,7 @@ int bnx2x_alloc_mem_cnic(struct bnx2x *bp) host_hc_status_block_e1x)); if (CONFIGURE_NIC_MODE(bp) && !bp->t2) - /* allocate searcher T2 table, as it wan't allocated before */ + /* allocate searcher T2 table, as it wasn't allocated before */ BNX2X_PCI_ALLOC(bp->t2, &bp->t2_mapping, SRC_T2_SZ); /* write address to which L5 should insert its values */ @@ -8068,7 +8153,6 @@ void bnx2x_ilt_set_info(struct bnx2x *bp) ilt_client->page_size, ilt_client->flags, ilog2(ilt_client->page_size >> 12)); - } if (CNIC_SUPPORT(bp)) { @@ -8124,7 +8208,6 @@ void bnx2x_ilt_set_info(struct bnx2x *bp) static void bnx2x_pf_q_prep_init(struct bnx2x *bp, struct bnx2x_fastpath *fp, struct bnx2x_queue_init_params *init_params) { - u8 cos; int cxt_index, cxt_offset; @@ -8133,7 +8216,7 @@ static void bnx2x_pf_q_prep_init(struct bnx2x *bp, __set_bit(BNX2X_Q_FLG_HC, &init_params->rx.flags); __set_bit(BNX2X_Q_FLG_HC, &init_params->tx.flags); - /* If HC is supporterd, enable host coalescing in the transition + /* If HC is supported, enable host coalescing in the transition * to INIT state. */ __set_bit(BNX2X_Q_FLG_HC_EN, &init_params->rx.flags); @@ -8205,7 +8288,6 @@ static int bnx2x_setup_tx_only(struct bnx2x *bp, struct bnx2x_fastpath *fp, return bnx2x_queue_state_change(bp, q_params); } - /** * bnx2x_setup_queue - setup queue * @@ -8254,7 +8336,6 @@ int bnx2x_setup_queue(struct bnx2x *bp, struct bnx2x_fastpath *fp, DP(NETIF_MSG_IFUP, "init complete\n"); - /* Now move the Queue to the SETUP state... */ memset(setup_params, 0, sizeof(*setup_params)); @@ -8315,7 +8396,6 @@ static int bnx2x_stop_queue(struct bnx2x *bp, int index) /* We want to wait for completion in this context */ __set_bit(RAMROD_COMP_WAIT, &q_params.ramrod_flags); - /* close tx-only connections */ for (tx_index = FIRST_TX_ONLY_COS_INDEX; tx_index < fp->max_cos; @@ -8369,7 +8449,6 @@ static int bnx2x_stop_queue(struct bnx2x *bp, int index) return bnx2x_queue_state_change(bp, &q_params); } - static void bnx2x_reset_func(struct bnx2x *bp) { int port = BP_PORT(bp); @@ -8422,7 +8501,7 @@ static void bnx2x_reset_func(struct bnx2x *bp) * scan to complete */ for (i = 0; i < 200; i++) { - msleep(10); + usleep_range(10000, 20000); if (!REG_RD(bp, TM_REG_LIN0_SCAN_ON + port*4)) break; } @@ -8623,14 +8702,14 @@ static int bnx2x_func_wait_started(struct bnx2x *bp) /* * (assumption: No Attention from MCP at this stage) - * PMF probably in the middle of TXdisable/enable transaction + * PMF probably in the middle of TX disable/enable transaction * 1. Sync IRS for default SB - * 2. Sync SP queue - this guarantes us that attention handling started - * 3. Wait, that TXdisable/enable transaction completes + * 2. Sync SP queue - this guarantees us that attention handling started + * 3. Wait, that TX disable/enable transaction completes * - * 1+2 guranty that if DCBx attention was scheduled it already changed - * pending bit of transaction from STARTED-->TX_STOPPED, if we alredy - * received complettion for the transaction the state is TX_STOPPED. + * 1+2 guarantee that if DCBx attention was scheduled it already changed + * pending bit of transaction from STARTED-->TX_STOPPED, if we already + * received completion for the transaction the state is TX_STOPPED. * State will return to STARTED after completion of TX_STOPPED-->STARTED * transaction. */ @@ -8660,7 +8739,7 @@ static int bnx2x_func_wait_started(struct bnx2x *bp) struct bnx2x_func_state_params func_params = {NULL}; DP(NETIF_MSG_IFDOWN, - "Hmmm... unexpected function state! Forcing STARTED-->TX_ST0PPED-->STARTED\n"); + "Hmmm... Unexpected function state! Forcing STARTED-->TX_ST0PPED-->STARTED\n"); func_params.f_obj = &bp->func_obj; __set_bit(RAMROD_DRV_CLR_ONLY, @@ -8740,7 +8819,6 @@ void bnx2x_chip_cleanup(struct bnx2x *bp, int unload_mode, bool keep_link) bnx2x_iov_chip_cleanup(bp); - /* * Send the UNLOAD_REQUEST to the MCP. This will return if * this function should perform FUNC, PORT or COMMON HW @@ -8750,7 +8828,7 @@ void bnx2x_chip_cleanup(struct bnx2x *bp, int unload_mode, bool keep_link) /* * (assumption: No Attention from MCP at this stage) - * PMF probably in the middle of TXdisable/enable transaction + * PMF probably in the middle of TX disable/enable transaction */ rc = bnx2x_func_wait_started(bp); if (rc) { @@ -8813,7 +8891,6 @@ unload_error: if (rc) BNX2X_ERR("HW_RESET failed\n"); - /* Report UNLOAD_DONE to MCP */ bnx2x_send_unload_done(bp, keep_link); } @@ -9179,7 +9256,6 @@ static int bnx2x_process_kill(struct bnx2x *bp, bool global) if (!CHIP_IS_E1x(bp) && bnx2x_er_poll_igu_vq(bp)) return -EAGAIN; - /* TBD: Indicate that "process kill" is in progress to MCP */ /* Clear "unprepared" bit */ @@ -9367,7 +9443,7 @@ static void bnx2x_parity_recover(struct bnx2x *bp) * the first leader that performs a * leader_reset() reset the global blocks in * order to clear global attentions. Otherwise - * the the gates will remain closed for that + * the gates will remain closed for that * engine. */ if (load_status || @@ -9480,14 +9556,12 @@ static void bnx2x_sp_rtnl_task(struct work_struct *work) return; } - /* if stop on error is defined no recovery flows should be executed */ + if (unlikely(bp->recovery_state != BNX2X_RECOVERY_DONE)) { #ifdef BNX2X_STOP_ON_ERROR - BNX2X_ERR("recovery flow called but STOP_ON_ERROR defined so reset not done to allow debug dump,\n" - "you will need to reboot when done\n"); - goto sp_rtnl_not_reset; + BNX2X_ERR("recovery flow called but STOP_ON_ERROR defined so reset not done to allow debug dump,\n" + "you will need to reboot when done\n"); + goto sp_rtnl_not_reset; #endif - - if (unlikely(bp->recovery_state != BNX2X_RECOVERY_DONE)) { /* * Clear all pending SP commands as we are going to reset the * function anyway. @@ -9502,6 +9576,12 @@ static void bnx2x_sp_rtnl_task(struct work_struct *work) } if (test_and_clear_bit(BNX2X_SP_RTNL_TX_TIMEOUT, &bp->sp_rtnl_state)) { +#ifdef BNX2X_STOP_ON_ERROR + BNX2X_ERR("recovery flow called but STOP_ON_ERROR defined so reset not done to allow debug dump,\n" + "you will need to reboot when done\n"); + goto sp_rtnl_not_reset; +#endif + /* * Clear all pending SP commands as we are going to reset the * function anyway. @@ -9540,6 +9620,13 @@ sp_rtnl_not_reset: "sending set mcast vf pf channel message from rtnl sp-task\n"); bnx2x_vfpf_set_mcast(bp->dev); } + if (test_and_clear_bit(BNX2X_SP_RTNL_VFPF_CHANNEL_DOWN, + &bp->sp_rtnl_state)){ + if (!test_bit(__LINK_STATE_NOCARRIER, &bp->dev->state)) { + bnx2x_tx_disable(bp); + BNX2X_ERR("PF indicated channel is not servicable anymore. This means this VF device is no longer operational\n"); + } + } if (test_and_clear_bit(BNX2X_SP_RTNL_VFPF_STORM_RX_MODE, &bp->sp_rtnl_state)) { @@ -9647,7 +9734,6 @@ static void bnx2x_prev_unload_close_mac(struct bnx2x *bp, wb_data[0] &= ~BMAC_CONTROL_RX_ENABLE; REG_WR(bp, vals->bmac_addr, wb_data[0]); REG_WR(bp, vals->bmac_addr + 0x4, wb_data[1]); - } BNX2X_DEV_INFO("Disable emac Rx\n"); vals->emac_addr = NIG_REG_NIG_EMAC0_EN + BP_PORT(bp)*4; @@ -9681,7 +9767,6 @@ static void bnx2x_prev_unload_close_mac(struct bnx2x *bp, if (mac_stopped) msleep(20); - } #define BNX2X_PREV_UNDI_PROD_ADDR(p) (BAR_TSTRORM_INTMEM + 0x1508 + ((p) << 4)) @@ -9780,6 +9865,21 @@ static bool bnx2x_prev_is_path_marked(struct bnx2x *bp) return rc; } +bool bnx2x_port_after_undi(struct bnx2x *bp) +{ + struct bnx2x_prev_path_list *entry; + bool val; + + down(&bnx2x_prev_sem); + + entry = bnx2x_prev_path_get_entry(bp); + val = !!(entry && (entry->undi & (1 << BP_PORT(bp)))); + + up(&bnx2x_prev_sem); + + return val; +} + static int bnx2x_prev_mark_path(struct bnx2x *bp, bool after_undi) { struct bnx2x_prev_path_list *tmp_list; @@ -9839,7 +9939,6 @@ static int bnx2x_do_flr(struct bnx2x *bp) u16 status; struct pci_dev *dev = bp->pdev; - if (CHIP_IS_E1x(bp)) { BNX2X_DEV_INFO("FLR not supported in E1/E1H\n"); return -EINVAL; @@ -9986,7 +10085,6 @@ static int bnx2x_prev_unload_common(struct bnx2x *bp) if (!timer_count) BNX2X_ERR("Failed to empty BRB, hope for the best\n"); - } /* No packets are in the pipeline, path is ready for reset */ @@ -10036,7 +10134,6 @@ static int bnx2x_prev_unload(struct bnx2x *bp) { int time_counter = 10; u32 rc, fw, hw_lock_reg, hw_lock_val; - struct bnx2x_prev_path_list *prev_list; BNX2X_DEV_INFO("Entering Previous Unload Flow\n"); /* clear hw from errors which may have resulted from an interrupted @@ -10049,7 +10146,7 @@ static int bnx2x_prev_unload(struct bnx2x *bp) (MISC_REG_DRIVER_CONTROL_1 + BP_FUNC(bp) * 8) : (MISC_REG_DRIVER_CONTROL_7 + (BP_FUNC(bp) - 6) * 8); - hw_lock_val = (REG_RD(bp, hw_lock_reg)); + hw_lock_val = REG_RD(bp, hw_lock_reg); if (hw_lock_val) { if (hw_lock_val & HW_LOCK_RESOURCE_NVRAM) { BNX2X_DEV_INFO("Release Previously held NVRAM lock\n"); @@ -10064,7 +10161,7 @@ static int bnx2x_prev_unload(struct bnx2x *bp) if (MCPR_ACCESS_LOCK_LOCK & REG_RD(bp, MCP_REG_MCPR_ACCESS_LOCK)) { BNX2X_DEV_INFO("Release previously held alr\n"); - REG_WR(bp, MCP_REG_MCPR_ACCESS_LOCK, 0); + bnx2x_release_alr(bp); } do { @@ -10093,7 +10190,7 @@ static int bnx2x_prev_unload(struct bnx2x *bp) break; } - /* non-common reply from MCP night require looping */ + /* non-common reply from MCP might require looping */ rc = bnx2x_prev_unload_uncommon(bp); if (rc != BNX2X_PREV_WAIT_NEEDED) break; @@ -10107,8 +10204,7 @@ static int bnx2x_prev_unload(struct bnx2x *bp) } /* Mark function if its port was used to boot from SAN */ - prev_list = bnx2x_prev_path_get_entry(bp); - if (prev_list && (prev_list->undi & (1 << BP_PORT(bp)))) + if (bnx2x_port_after_undi(bp)) bp->link_params.feature_config_flags |= FEATURE_CONFIG_BOOT_FROM_SAN; @@ -10192,8 +10288,6 @@ static void bnx2x_get_common_hwinfo(struct bnx2x *bp) bnx2x_init_shmem(bp); - - bp->common.shmem2_base = REG_RD(bp, (BP_PATH(bp) ? MISC_REG_GENERIC_CR_1 : MISC_REG_GENERIC_CR_0)); @@ -10455,6 +10549,9 @@ static void bnx2x_link_settings_supported(struct bnx2x *bp, u32 switch_cfg) PORT_HW_CFG_SPEED_CAPABILITY_D0_10G)) bp->port.supported[idx] &= ~SUPPORTED_10000baseT_Full; + if (!(bp->link_params.speed_cap_mask[idx] & + PORT_HW_CFG_SPEED_CAPABILITY_D0_20G)) + bp->port.supported[idx] &= ~SUPPORTED_20000baseKR2_Full; } BNX2X_DEV_INFO("supported 0x%x 0x%x\n", bp->port.supported[0], @@ -10765,7 +10862,6 @@ void bnx2x_get_iscsi_info(struct bnx2x *bp) */ if (!bp->cnic_eth_dev.max_iscsi_conn) bp->flags |= no_flags; - } static void bnx2x_get_ext_wwn_info(struct bnx2x *bp, int func) @@ -10782,12 +10878,56 @@ static void bnx2x_get_ext_wwn_info(struct bnx2x *bp, int func) bp->cnic_eth_dev.fcoe_wwn_node_name_lo = MF_CFG_RD(bp, func_ext_config[func].fcoe_wwn_node_name_lower); } + +static int bnx2x_shared_fcoe_funcs(struct bnx2x *bp) +{ + u8 count = 0; + + if (IS_MF(bp)) { + u8 fid; + + /* iterate over absolute function ids for this path: */ + for (fid = BP_PATH(bp); fid < E2_FUNC_MAX * 2; fid += 2) { + if (IS_MF_SD(bp)) { + u32 cfg = MF_CFG_RD(bp, + func_mf_config[fid].config); + + if (!(cfg & FUNC_MF_CFG_FUNC_HIDE) && + ((cfg & FUNC_MF_CFG_PROTOCOL_MASK) == + FUNC_MF_CFG_PROTOCOL_FCOE)) + count++; + } else { + u32 cfg = MF_CFG_RD(bp, + func_ext_config[fid]. + func_cfg); + + if ((cfg & MACP_FUNC_CFG_FLAGS_ENABLED) && + (cfg & MACP_FUNC_CFG_FLAGS_FCOE_OFFLOAD)) + count++; + } + } + } else { /* SF */ + int port, port_cnt = CHIP_MODE_IS_4_PORT(bp) ? 2 : 1; + + for (port = 0; port < port_cnt; port++) { + u32 lic = SHMEM_RD(bp, + drv_lic_key[port].max_fcoe_conn) ^ + FW_ENCODE_32BIT_PATTERN; + if (lic) + count++; + } + } + + return count; +} + static void bnx2x_get_fcoe_info(struct bnx2x *bp) { int port = BP_PORT(bp); int func = BP_ABS_FUNC(bp); u32 max_fcoe_conn = FW_ENCODE_32BIT_PATTERN ^ SHMEM_RD(bp, drv_lic_key[port].max_fcoe_conn); + u8 num_fcoe_func = bnx2x_shared_fcoe_funcs(bp); if (!CNIC_SUPPORT(bp)) { bp->flags |= NO_FCOE_FLAG; @@ -10801,9 +10941,10 @@ static void bnx2x_get_fcoe_info(struct bnx2x *bp) /* Calculate the number of maximum allowed FCoE tasks */ bp->cnic_eth_dev.max_fcoe_exchanges = MAX_NUM_FCOE_TASKS_PER_ENGINE; - if (IS_MF(bp) || CHIP_MODE_IS_4_PORT(bp)) - bp->cnic_eth_dev.max_fcoe_exchanges /= - MAX_FCOE_FUNCS_PER_ENGINE; + + /* check if FCoE resources must be shared between different functions */ + if (num_fcoe_func) + bp->cnic_eth_dev.max_fcoe_exchanges /= num_fcoe_func; /* Read the WWN: */ if (!IS_MF(bp)) { @@ -11031,7 +11172,7 @@ static int bnx2x_get_hwinfo(struct bnx2x *bp) } else { bp->common.int_block = INT_BLOCK_IGU; - /* do not allow device reset during IGU info preocessing */ + /* do not allow device reset during IGU info processing */ bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_RESET); val = REG_RD(bp, IGU_REG_BLOCK_CONFIGURATION); @@ -11110,7 +11251,7 @@ static int bnx2x_get_hwinfo(struct bnx2x *bp) E1H_FUNC_MAX * sizeof(struct drv_func_mb); /* * get mf configuration: - * 1. existence of MF configuration + * 1. Existence of MF configuration * 2. MAC address must be legal (check only upper bytes) * for Switch-Independent mode; * OVLAN must be legal for Switch-Dependent mode @@ -11384,7 +11525,6 @@ static int bnx2x_init_bp(struct bnx2x *bp) mutex_init(&bp->fw_mb_mutex); spin_lock_init(&bp->stats_lock); - INIT_DELAYED_WORK(&bp->sp_task, bnx2x_sp_task); INIT_DELAYED_WORK(&bp->sp_rtnl_task, bnx2x_sp_rtnl_task); INIT_DELAYED_WORK(&bp->period_task, bnx2x_period_task); @@ -11393,7 +11533,7 @@ static int bnx2x_init_bp(struct bnx2x *bp) if (rc) return rc; } else { - random_ether_addr(bp->dev->dev_addr); + eth_zero_addr(bp->dev->dev_addr); } bnx2x_set_modes_bitmap(bp); @@ -11417,7 +11557,6 @@ static int bnx2x_init_bp(struct bnx2x *bp) bnx2x_prev_unload(bp); } - if (CHIP_REV_IS_FPGA(bp)) dev_err(&bp->pdev->dev, "FPGA detected\n"); @@ -11489,7 +11628,7 @@ static int bnx2x_init_bp(struct bnx2x *bp) /* We need at least one default status block for slow-path events, * second status block for the L2 queue, and a third status block for - * CNIC if supproted. + * CNIC if supported. */ if (CNIC_SUPPORT(bp)) bp->min_msix_vec_cnt = 3; @@ -11497,10 +11636,11 @@ static int bnx2x_init_bp(struct bnx2x *bp) bp->min_msix_vec_cnt = 2; BNX2X_DEV_INFO("bp->min_msix_vec_cnt %d", bp->min_msix_vec_cnt); + bp->dump_preset_idx = 1; + return rc; } - /**************************************************************************** * General service functions ****************************************************************************/ @@ -11585,9 +11725,6 @@ static int bnx2x_close(struct net_device *dev) /* Unload the driver, release IRQs */ bnx2x_nic_unload(bp, UNLOAD_CLOSE, false); - /* Power off */ - bnx2x_set_power_state(bp, PCI_D3hot); - return 0; } @@ -11852,6 +11989,10 @@ static int bnx2x_validate_addr(struct net_device *dev) { struct bnx2x *bp = netdev_priv(dev); + /* query the bulletin board for mac address configured by the PF */ + if (IS_VF(bp)) + bnx2x_sample_bulletin(bp); + if (!bnx2x_is_valid_ether_addr(bp, dev->dev_addr)) { BNX2X_ERR("Non-valid Ethernet address\n"); return -EADDRNOTAVAIL; @@ -11878,12 +12019,16 @@ static const struct net_device_ops bnx2x_netdev_ops = { .ndo_setup_tc = bnx2x_setup_tc, #ifdef CONFIG_BNX2X_SRIOV .ndo_set_vf_mac = bnx2x_set_vf_mac, - .ndo_set_vf_vlan = bnx2x_set_vf_vlan, + .ndo_set_vf_vlan = bnx2x_set_vf_vlan, .ndo_get_vf_config = bnx2x_get_vf_config, #endif #ifdef NETDEV_FCOE_WWNN .ndo_fcoe_get_wwn = bnx2x_fcoe_get_wwn, #endif + +#ifdef CONFIG_NET_LL_RX_POLL + .ndo_ll_poll = bnx2x_low_latency_recv, +#endif }; static int bnx2x_set_coherency_mask(struct bnx2x *bp) @@ -11959,7 +12104,7 @@ static int bnx2x_init_dev(struct bnx2x *bp, struct pci_dev *pdev, } if (IS_PF(bp)) { - bp->pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM); + bp->pm_cap = pdev->pm_cap; if (bp->pm_cap == 0) { dev_err(&bp->pdev->dev, "Cannot find power management capability, aborting\n"); @@ -12008,8 +12153,6 @@ static int bnx2x_init_dev(struct bnx2x *bp, struct pci_dev *pdev, } BNX2X_DEV_INFO("me reg PF num: %d\n", bp->pf_num); - bnx2x_set_power_state(bp, PCI_D0); - /* clean indirect addresses */ pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, PCICFG_VENDOR_ID_OFFSET); @@ -12094,15 +12237,26 @@ err_out: return rc; } -static void bnx2x_get_pcie_width_speed(struct bnx2x *bp, int *width, int *speed) +static void bnx2x_get_pcie_width_speed(struct bnx2x *bp, int *width, + enum bnx2x_pci_bus_speed *speed) { - u32 val = 0; + u32 link_speed, val = 0; pci_read_config_dword(bp->pdev, PCICFG_LINK_CONTROL, &val); *width = (val & PCICFG_LINK_WIDTH) >> PCICFG_LINK_WIDTH_SHIFT; - /* return value of 1=2.5GHz 2=5GHz */ - *speed = (val & PCICFG_LINK_SPEED) >> PCICFG_LINK_SPEED_SHIFT; + link_speed = (val & PCICFG_LINK_SPEED) >> PCICFG_LINK_SPEED_SHIFT; + + switch (link_speed) { + case 3: + *speed = BNX2X_PCI_LINK_SPEED_8000; + break; + case 2: + *speed = BNX2X_PCI_LINK_SPEED_5000; + break; + default: + *speed = BNX2X_PCI_LINK_SPEED_2500; + } } static int bnx2x_check_firmware(struct bnx2x *bp) @@ -12327,7 +12481,6 @@ static void bnx2x_release_firmware(struct bnx2x *bp) bp->firmware = NULL; } - static struct bnx2x_func_sp_drv_ops bnx2x_func_sp_drv = { .init_hw_cmn_chip = bnx2x_init_hw_common_chip, .init_hw_cmn = bnx2x_init_hw_common, @@ -12465,7 +12618,8 @@ static int bnx2x_init_one(struct pci_dev *pdev, { struct net_device *dev = NULL; struct bnx2x *bp; - int pcie_width, pcie_speed; + int pcie_width; + enum bnx2x_pci_bus_speed pcie_speed; int rc, max_non_def_sbs; int rx_count, tx_count, rss_count, doorbell_size; int max_cos_est; @@ -12605,7 +12759,6 @@ static int bnx2x_init_one(struct pci_dev *pdev, } BNX2X_DEV_INFO("device name after netdev register %s\n", dev->name); - if (!NO_FCOE(bp)) { /* Add storage MAC address */ rtnl_lock(); @@ -12617,15 +12770,15 @@ static int bnx2x_init_one(struct pci_dev *pdev, BNX2X_DEV_INFO("got pcie width %d and speed %d\n", pcie_width, pcie_speed); - BNX2X_DEV_INFO( - "%s (%c%d) PCI-E x%d %s found at mem %lx, IRQ %d, node addr %pM\n", - board_info[ent->driver_data].name, - (CHIP_REV(bp) >> 12) + 'A', (CHIP_METAL(bp) >> 4), - pcie_width, - ((!CHIP_IS_E2(bp) && pcie_speed == 2) || - (CHIP_IS_E2(bp) && pcie_speed == 1)) ? - "5GHz (Gen2)" : "2.5GHz", - dev->base_addr, bp->pdev->irq, dev->dev_addr); + BNX2X_DEV_INFO("%s (%c%d) PCI-E x%d %s found at mem %lx, IRQ %d, node addr %pM\n", + board_info[ent->driver_data].name, + (CHIP_REV(bp) >> 12) + 'A', (CHIP_METAL(bp) >> 4), + pcie_width, + pcie_speed == BNX2X_PCI_LINK_SPEED_2500 ? "2.5GHz" : + pcie_speed == BNX2X_PCI_LINK_SPEED_5000 ? "5.0GHz" : + pcie_speed == BNX2X_PCI_LINK_SPEED_8000 ? "8.0GHz" : + "Unknown", + dev->base_addr, bp->pdev->irq, dev->dev_addr); return 0; @@ -12647,17 +12800,11 @@ init_one_exit: return rc; } -static void bnx2x_remove_one(struct pci_dev *pdev) +static void __bnx2x_remove(struct pci_dev *pdev, + struct net_device *dev, + struct bnx2x *bp, + bool remove_netdev) { - struct net_device *dev = pci_get_drvdata(pdev); - struct bnx2x *bp; - - if (!dev) { - dev_err(&pdev->dev, "BAD net device from bnx2x_init_one\n"); - return; - } - bp = netdev_priv(dev); - /* Delete storage MAC address */ if (!NO_FCOE(bp)) { rtnl_lock(); @@ -12670,7 +12817,17 @@ static void bnx2x_remove_one(struct pci_dev *pdev) bnx2x_dcbnl_update_applist(bp, true); #endif - unregister_netdev(dev); + /* Close the interface - either directly or implicitly */ + if (remove_netdev) { + unregister_netdev(dev); + } else { + rtnl_lock(); + if (netif_running(dev)) + bnx2x_close(dev); + rtnl_unlock(); + } + + bnx2x_iov_remove_one(bp); /* Power on: we can't let PCI layer write to us while we are in D3 */ if (IS_PF(bp)) @@ -12686,12 +12843,16 @@ static void bnx2x_remove_one(struct pci_dev *pdev) /* Make sure RESET task is not scheduled before continuing */ cancel_delayed_work_sync(&bp->sp_rtnl_task); - bnx2x_iov_remove_one(bp); - /* send message via vfpf channel to release the resources of this vf */ if (IS_VF(bp)) bnx2x_vfpf_release(bp); + /* Assumes no further PCIe PM changes will occur */ + if (system_state == SYSTEM_POWER_OFF) { + pci_wake_from_d3(pdev, bp->wol); + pci_set_power_state(pdev, PCI_D3hot); + } + if (bp->regview) iounmap(bp->regview); @@ -12706,7 +12867,8 @@ static void bnx2x_remove_one(struct pci_dev *pdev) } bnx2x_free_mem_bp(bp); - free_netdev(dev); + if (remove_netdev) + free_netdev(dev); if (atomic_read(&pdev->enable_cnt) == 1) pci_release_regions(pdev); @@ -12715,6 +12877,20 @@ static void bnx2x_remove_one(struct pci_dev *pdev) pci_set_drvdata(pdev, NULL); } +static void bnx2x_remove_one(struct pci_dev *pdev) +{ + struct net_device *dev = pci_get_drvdata(pdev); + struct bnx2x *bp; + + if (!dev) { + dev_err(&pdev->dev, "BAD net device from bnx2x_init_one\n"); + return; + } + bp = netdev_priv(dev); + + __bnx2x_remove(pdev, dev, bp, true); +} + static int bnx2x_eeh_nic_unload(struct bnx2x *bp) { bp->state = BNX2X_STATE_CLOSING_WAIT4_HALT; @@ -12747,19 +12923,6 @@ static int bnx2x_eeh_nic_unload(struct bnx2x *bp) return 0; } -static void bnx2x_eeh_recover(struct bnx2x *bp) -{ - u32 val; - - mutex_init(&bp->port.phy_mutex); - - - val = SHMEM_RD(bp, validity_map[BP_PORT(bp)]); - if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB)) - != (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB)) - BNX2X_ERR("BAD MCP validity signature\n"); -} - /** * bnx2x_io_error_detected - called when PCI error is detected * @pdev: Pointer to PCI device @@ -12828,6 +12991,10 @@ static pci_ers_result_t bnx2x_io_slot_reset(struct pci_dev *pdev) if (netif_running(dev)) { BNX2X_ERR("IO slot reset --> driver unload\n"); + + /* MCP should have been reset; Need to wait for validity */ + bnx2x_init_shmem(bp); + if (IS_PF(bp) && SHMEM2_HAS(bp, drv_capabilities_flag)) { u32 v; @@ -12849,7 +13016,7 @@ static pci_ers_result_t bnx2x_io_slot_reset(struct pci_dev *pdev) bnx2x_prev_unload(bp); - /* We should have resetted the engine, so It's fair to + /* We should have reseted the engine, so It's fair to * assume the FW will no longer write to the bnx2x driver. */ bnx2x_squeeze_objects(bp); @@ -12886,8 +13053,6 @@ static void bnx2x_io_resume(struct pci_dev *pdev) rtnl_lock(); - bnx2x_eeh_recover(bp); - bp->fw_seq = SHMEM_RD(bp, func_mb[BP_FW_MB_IDX(bp)].drv_mb_header) & DRV_MSG_SEQ_NUMBER_MASK; @@ -12905,6 +13070,29 @@ static const struct pci_error_handlers bnx2x_err_handler = { .resume = bnx2x_io_resume, }; +static void bnx2x_shutdown(struct pci_dev *pdev) +{ + struct net_device *dev = pci_get_drvdata(pdev); + struct bnx2x *bp; + + if (!dev) + return; + + bp = netdev_priv(dev); + if (!bp) + return; + + rtnl_lock(); + netif_device_detach(dev); + rtnl_unlock(); + + /* Don't remove the netdevice, as there are scenarios which will cause + * the kernel to hang, e.g., when trying to remove bnx2i while the + * rootfs is mounted from SAN. + */ + __bnx2x_remove(pdev, dev, bp, false); +} + static struct pci_driver bnx2x_pci_driver = { .name = DRV_MODULE_NAME, .id_table = bnx2x_pci_tbl, @@ -12916,6 +13104,7 @@ static struct pci_driver bnx2x_pci_driver = { #ifdef CONFIG_BNX2X_SRIOV .sriov_configure = bnx2x_sriov_configure, #endif + .shutdown = bnx2x_shutdown, }; static int __init bnx2x_init(void) @@ -12941,11 +13130,12 @@ static int __init bnx2x_init(void) static void __exit bnx2x_cleanup(void) { struct list_head *pos, *q; + pci_unregister_driver(&bnx2x_pci_driver); destroy_workqueue(bnx2x_wq); - /* Free globablly allocated resources */ + /* Free globally allocated resources */ list_for_each_safe(pos, q, &bnx2x_prev_list) { struct bnx2x_prev_path_list *tmp = list_entry(pos, struct bnx2x_prev_path_list, list); @@ -12968,7 +13158,7 @@ module_exit(bnx2x_cleanup); * @bp: driver handle * @set: set or clear the CAM entry * - * This function will wait until the ramdord completion returns. + * This function will wait until the ramrod completion returns. * Return 0 if success, -ENODEV if ramrod doesn't return. */ static int bnx2x_set_iscsi_eth_mac_addr(struct bnx2x *bp) @@ -12996,7 +13186,6 @@ static void bnx2x_cnic_sp_post(struct bnx2x *bp, int count) BUG_ON(bp->cnic_spq_pending < count); bp->cnic_spq_pending -= count; - for (; bp->cnic_kwq_pending; bp->cnic_kwq_pending--) { u16 type = (le16_to_cpu(bp->cnic_kwq_cons->hdr.type) & SPE_HDR_CONN_TYPE) >> @@ -13169,7 +13358,6 @@ static void bnx2x_cnic_cfc_comp(struct bnx2x *bp, int cid, u8 err) bnx2x_cnic_sp_post(bp, 0); } - /* Called with netif_addr_lock_bh() taken. * Sets an rx_mode config for an iSCSI ETH client. * Doesn't block. @@ -13210,7 +13398,6 @@ static void bnx2x_set_iscsi_eth_rx_mode(struct bnx2x *bp, bool start) } } - static int bnx2x_drv_ctl(struct net_device *dev, struct drv_ctl_info *ctl) { struct bnx2x *bp = netdev_priv(dev); @@ -13398,7 +13585,6 @@ void bnx2x_setup_cnic_info(struct bnx2x *bp) { struct cnic_eth_dev *cp = &bp->cnic_eth_dev; - cp->ctx_tbl_offset = FUNC_ILT_BASE(BP_FUNC(bp)) + bnx2x_cid_ilt_lines(bp); cp->starting_cid = bnx2x_cid_ilt_lines(bp) * ILT_PAGE_CIDS; @@ -13434,7 +13620,6 @@ static int bnx2x_register_cnic(struct net_device *dev, struct cnic_ops *ops, BNX2X_ERR("CNIC-related load failed\n"); return rc; } - } bp->cnic_enabled = true; diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h index d22bc40091ec..8e627b886d7b 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h @@ -35,6 +35,8 @@ #define ATC_REG_ATC_INT_STS_CLR 0x1101c0 /* [RW 5] Parity mask register #0 read/write */ #define ATC_REG_ATC_PRTY_MASK 0x1101d8 +/* [R 5] Parity register #0 read */ +#define ATC_REG_ATC_PRTY_STS 0x1101cc /* [RC 5] Parity register #0 read clear */ #define ATC_REG_ATC_PRTY_STS_CLR 0x1101d0 /* [RW 19] Interrupt mask register #0 read/write */ @@ -2750,6 +2752,8 @@ #define PBF_REG_PBF_INT_STS 0x1401c8 /* [RW 20] Parity mask register #0 read/write */ #define PBF_REG_PBF_PRTY_MASK 0x1401e4 +/* [R 28] Parity register #0 read */ +#define PBF_REG_PBF_PRTY_STS 0x1401d8 /* [RC 20] Parity register #0 read clear */ #define PBF_REG_PBF_PRTY_STS_CLR 0x1401dc /* [RW 16] The Ethernet type value for L2 tag 0 */ @@ -4517,6 +4521,8 @@ #define TM_REG_TM_INT_STS 0x1640f0 /* [RW 7] Parity mask register #0 read/write */ #define TM_REG_TM_PRTY_MASK 0x16410c +/* [R 7] Parity register #0 read */ +#define TM_REG_TM_PRTY_STS 0x164100 /* [RC 7] Parity register #0 read clear */ #define TM_REG_TM_PRTY_STS_CLR 0x164104 /* [RW 8] The event id for aggregated interrupt 0 */ diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c index 32a9609cc98b..8f03c984550f 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c @@ -35,9 +35,9 @@ /** * bnx2x_exe_queue_init - init the Exe Queue object * - * @o: poiter to the object + * @o: pointer to the object * @exe_len: length - * @owner: poiter to the owner + * @owner: pointer to the owner * @validate: validate function pointer * @optimize: optimize function pointer * @exec: execute function pointer @@ -142,7 +142,6 @@ free_and_exit: spin_unlock_bh(&o->lock); return rc; - } static inline void __bnx2x_exe_queue_reset_pending( @@ -163,13 +162,11 @@ static inline void __bnx2x_exe_queue_reset_pending( static inline void bnx2x_exe_queue_reset_pending(struct bnx2x *bp, struct bnx2x_exe_queue_obj *o) { - spin_lock_bh(&o->lock); __bnx2x_exe_queue_reset_pending(bp, o); spin_unlock_bh(&o->lock); - } /** @@ -179,7 +176,7 @@ static inline void bnx2x_exe_queue_reset_pending(struct bnx2x *bp, * @o: queue * @ramrod_flags: flags * - * (Atomicy is ensured using the exe_queue->lock). + * (Atomicity is ensured using the exe_queue->lock). */ static inline int bnx2x_exe_queue_step(struct bnx2x *bp, struct bnx2x_exe_queue_obj *o, @@ -192,8 +189,7 @@ static inline int bnx2x_exe_queue_step(struct bnx2x *bp, spin_lock_bh(&o->lock); - /* - * Next step should not be performed until the current is finished, + /* Next step should not be performed until the current is finished, * unless a DRV_CLEAR_ONLY bit is set. In this case we just want to * properly clear object internals without sending any command to the FW * which also implies there won't be any completion to clear the @@ -209,8 +205,7 @@ static inline int bnx2x_exe_queue_step(struct bnx2x *bp, } } - /* - * Run through the pending commands list and create a next + /* Run through the pending commands list and create a next * execution chunk. */ while (!list_empty(&o->exe_queue)) { @@ -220,8 +215,7 @@ static inline int bnx2x_exe_queue_step(struct bnx2x *bp, if (cur_len + elem->cmd_len <= o->exe_chunk_len) { cur_len += elem->cmd_len; - /* - * Prevent from both lists being empty when moving an + /* Prevent from both lists being empty when moving an * element. This will allow the call of * bnx2x_exe_queue_empty() without locking. */ @@ -241,14 +235,12 @@ static inline int bnx2x_exe_queue_step(struct bnx2x *bp, rc = o->execute(bp, o->owner, &o->pending_comp, ramrod_flags); if (rc < 0) - /* - * In case of an error return the commands back to the queue - * and reset the pending_comp. + /* In case of an error return the commands back to the queue + * and reset the pending_comp. */ list_splice_init(&o->pending_comp, &o->exe_queue); else if (!rc) - /* - * If zero is returned, means there are no outstanding pending + /* If zero is returned, means there are no outstanding pending * completions and we may dismiss the pending list. */ __bnx2x_exe_queue_reset_pending(bp, o); @@ -308,7 +300,6 @@ static inline int bnx2x_state_wait(struct bnx2x *bp, int state, /* can take a while if any port is running */ int cnt = 5000; - if (CHIP_REV_IS_EMUL(bp)) cnt *= 20; @@ -456,7 +447,6 @@ static int bnx2x_get_n_elements(struct bnx2x *bp, struct bnx2x_vlan_mac_obj *o, DP(BNX2X_MSG_SP, "copied element number %d to address %p element was:\n", counter, next); next += stride + size; - } } return counter * ETH_ALEN; @@ -518,7 +508,6 @@ static int bnx2x_check_vlan_mac_add(struct bnx2x *bp, return 0; } - /* check_del() callbacks */ static struct bnx2x_vlan_mac_registry_elem * bnx2x_check_mac_del(struct bnx2x *bp, @@ -609,7 +598,6 @@ static bool bnx2x_check_move_always_err( return false; } - static inline u8 bnx2x_vlan_mac_get_rx_tx_flag(struct bnx2x_vlan_mac_obj *o) { struct bnx2x_raw_obj *raw = &o->raw; @@ -626,7 +614,6 @@ static inline u8 bnx2x_vlan_mac_get_rx_tx_flag(struct bnx2x_vlan_mac_obj *o) return rx_tx_flag; } - void bnx2x_set_mac_in_nig(struct bnx2x *bp, bool add, unsigned char *dev_addr, int index) { @@ -693,7 +680,7 @@ static inline void bnx2x_vlan_mac_set_cmd_hdr_e2(struct bnx2x *bp, * * @cid: connection id * @type: BNX2X_FILTER_XXX_PENDING - * @hdr: poiter to header to setup + * @hdr: pointer to header to setup * @rule_cnt: * * currently we always configure one rule and echo field to contain a CID and an @@ -707,7 +694,6 @@ static inline void bnx2x_vlan_mac_set_rdata_hdr_e2(u32 cid, int type, hdr->rule_cnt = (u8)rule_cnt; } - /* hw_config() callbacks */ static void bnx2x_set_one_mac_e2(struct bnx2x *bp, struct bnx2x_vlan_mac_obj *o, @@ -723,8 +709,7 @@ static void bnx2x_set_one_mac_e2(struct bnx2x *bp, unsigned long *vlan_mac_flags = &elem->cmd_data.vlan_mac.vlan_mac_flags; u8 *mac = elem->cmd_data.vlan_mac.u.mac.mac; - /* - * Set LLH CAM entry: currently only iSCSI and ETH macs are + /* Set LLH CAM entry: currently only iSCSI and ETH macs are * relevant. In addition, current implementation is tuned for a * single ETH MAC. * @@ -879,8 +864,7 @@ static void bnx2x_set_one_mac_e1x(struct bnx2x *bp, struct bnx2x_raw_obj *raw = &o->raw; struct mac_configuration_cmd *config = (struct mac_configuration_cmd *)(raw->rdata); - /* - * 57710 and 57711 do not support MOVE command, + /* 57710 and 57711 do not support MOVE command, * so it's either ADD or DEL */ bool add = (elem->cmd_data.vlan_mac.cmd == BNX2X_VLAN_MAC_ADD) ? @@ -960,7 +944,6 @@ static void bnx2x_set_one_vlan_mac_e2(struct bnx2x *bp, u16 vlan = elem->cmd_data.vlan_mac.u.vlan_mac.vlan; u8 *mac = elem->cmd_data.vlan_mac.u.vlan_mac.mac; - /* Reset the ramrod data buffer for the first rule */ if (rule_idx == 0) memset(data, 0, sizeof(*data)); @@ -969,7 +952,7 @@ static void bnx2x_set_one_vlan_mac_e2(struct bnx2x *bp, bnx2x_vlan_mac_set_cmd_hdr_e2(bp, o, add, CLASSIFY_RULE_OPCODE_PAIR, &rule_entry->pair.header); - /* Set VLAN and MAC themselvs */ + /* Set VLAN and MAC themselves */ rule_entry->pair.vlan = cpu_to_le16(vlan); bnx2x_set_fw_mac_addr(&rule_entry->pair.mac_msb, &rule_entry->pair.mac_mid, @@ -1021,8 +1004,7 @@ static void bnx2x_set_one_vlan_mac_e1h(struct bnx2x *bp, struct bnx2x_raw_obj *raw = &o->raw; struct mac_configuration_cmd *config = (struct mac_configuration_cmd *)(raw->rdata); - /* - * 57710 and 57711 do not support MOVE command, + /* 57710 and 57711 do not support MOVE command, * so it's either ADD or DEL */ bool add = (elem->cmd_data.vlan_mac.cmd == BNX2X_VLAN_MAC_ADD) ? @@ -1046,7 +1028,7 @@ static void bnx2x_set_one_vlan_mac_e1h(struct bnx2x *bp, * * @bp: device handle * @p: command parameters - * @ppos: pointer to the cooky + * @ppos: pointer to the cookie * * reconfigure next MAC/VLAN/VLAN-MAC element from the * previously configured elements list. @@ -1054,7 +1036,7 @@ static void bnx2x_set_one_vlan_mac_e1h(struct bnx2x *bp, * from command parameters only RAMROD_COMP_WAIT bit in ramrod_flags is taken * into an account * - * pointer to the cooky - that should be given back in the next call to make + * pointer to the cookie - that should be given back in the next call to make * function handle the next element. If *ppos is set to NULL it will restart the * iterator. If returned *ppos == NULL this means that the last element has been * handled. @@ -1102,8 +1084,7 @@ static int bnx2x_vlan_mac_restore(struct bnx2x *bp, return bnx2x_config_vlan_mac(bp, p); } -/* - * bnx2x_exeq_get_mac/bnx2x_exeq_get_vlan/bnx2x_exeq_get_vlan_mac return a +/* bnx2x_exeq_get_mac/bnx2x_exeq_get_vlan/bnx2x_exeq_get_vlan_mac return a * pointer to an element with a specific criteria and NULL if such an element * hasn't been found. */ @@ -1187,8 +1168,7 @@ static inline int bnx2x_validate_vlan_mac_add(struct bnx2x *bp, return rc; } - /* - * Check if there is a pending ADD command for this + /* Check if there is a pending ADD command for this * MAC/VLAN/VLAN-MAC. Return an error if there is. */ if (exeq->get(exeq, elem)) { @@ -1196,8 +1176,7 @@ static inline int bnx2x_validate_vlan_mac_add(struct bnx2x *bp, return -EEXIST; } - /* - * TODO: Check the pending MOVE from other objects where this + /* TODO: Check the pending MOVE from other objects where this * object is a destination object. */ @@ -1240,8 +1219,7 @@ static inline int bnx2x_validate_vlan_mac_del(struct bnx2x *bp, return -EEXIST; } - /* - * Check if there are pending DEL or MOVE commands for this + /* Check if there are pending DEL or MOVE commands for this * MAC/VLAN/VLAN-MAC. Return an error if so. */ memcpy(&query_elem, elem, sizeof(query_elem)); @@ -1292,8 +1270,7 @@ static inline int bnx2x_validate_vlan_mac_move(struct bnx2x *bp, struct bnx2x_exe_queue_obj *src_exeq = &src_o->exe_queue; struct bnx2x_exe_queue_obj *dest_exeq = &dest_o->exe_queue; - /* - * Check if we can perform this operation based on the current registry + /* Check if we can perform this operation based on the current registry * state. */ if (!src_o->check_move(bp, src_o, dest_o, @@ -1302,8 +1279,7 @@ static inline int bnx2x_validate_vlan_mac_move(struct bnx2x *bp, return -EINVAL; } - /* - * Check if there is an already pending DEL or MOVE command for the + /* Check if there is an already pending DEL or MOVE command for the * source object or ADD command for a destination object. Return an * error if so. */ @@ -1392,7 +1368,7 @@ static int bnx2x_remove_vlan_mac(struct bnx2x *bp, } /** - * bnx2x_wait_vlan_mac - passivly wait for 5 seconds until all work completes. + * bnx2x_wait_vlan_mac - passively wait for 5 seconds until all work completes. * * @bp: device handle * @o: bnx2x_vlan_mac_obj @@ -1550,9 +1526,8 @@ static inline int bnx2x_vlan_mac_get_registry_elem( /* Get a new CAM offset */ if (!o->get_cam_offset(o, ®_elem->cam_offset)) { - /* - * This shell never happen, because we have checked the - * CAM availiability in the 'validate'. + /* This shall never happen, because we have checked the + * CAM availability in the 'validate'. */ WARN_ON(1); kfree(reg_elem); @@ -1599,8 +1574,7 @@ static int bnx2x_execute_vlan_mac(struct bnx2x *bp, struct bnx2x_vlan_mac_registry_elem *reg_elem; enum bnx2x_vlan_mac_cmd cmd; - /* - * If DRIVER_ONLY execution is requested, cleanup a registry + /* If DRIVER_ONLY execution is requested, cleanup a registry * and exit. Otherwise send a ramrod to FW. */ if (!drv_only) { @@ -1609,11 +1583,10 @@ static int bnx2x_execute_vlan_mac(struct bnx2x *bp, /* Set pending */ r->set_pending(r); - /* Fill tha ramrod data */ + /* Fill the ramrod data */ list_for_each_entry(elem, exe_chunk, link) { cmd = elem->cmd_data.vlan_mac.cmd; - /* - * We will add to the target object in MOVE command, so + /* We will add to the target object in MOVE command, so * change the object for a CAM search. */ if (cmd == BNX2X_VLAN_MAC_MOVE) @@ -1646,12 +1619,11 @@ static int bnx2x_execute_vlan_mac(struct bnx2x *bp, idx++; } - /* - * No need for an explicit memory barrier here as long we would - * need to ensure the ordering of writing to the SPQ element - * and updating of the SPQ producer which involves a memory - * read and we will have to put a full memory barrier there - * (inside bnx2x_sp_post()). + /* No need for an explicit memory barrier here as long we would + * need to ensure the ordering of writing to the SPQ element + * and updating of the SPQ producer which involves a memory + * read and we will have to put a full memory barrier there + * (inside bnx2x_sp_post()). */ rc = bnx2x_sp_post(bp, o->ramrod_cmd, r->cid, @@ -1766,8 +1738,7 @@ int bnx2x_config_vlan_mac( return rc; } - /* - * If nothing will be executed further in this iteration we want to + /* If nothing will be executed further in this iteration we want to * return PENDING if there are pending commands */ if (!bnx2x_exe_queue_empty(&o->exe_queue)) @@ -1786,13 +1757,11 @@ int bnx2x_config_vlan_mac( return rc; } - /* - * RAMROD_COMP_WAIT is a superset of RAMROD_EXEC. If it was set + /* RAMROD_COMP_WAIT is a superset of RAMROD_EXEC. If it was set * then user want to wait until the last command is done. */ if (test_bit(RAMROD_COMP_WAIT, &p->ramrod_flags)) { - /* - * Wait maximum for the current exe_queue length iterations plus + /* Wait maximum for the current exe_queue length iterations plus * one (for the current pending command). */ int max_iterations = bnx2x_exe_queue_length(&o->exe_queue) + 1; @@ -1818,8 +1787,6 @@ int bnx2x_config_vlan_mac( return rc; } - - /** * bnx2x_vlan_mac_del_all - delete elements with given vlan_mac_flags spec * @@ -1829,7 +1796,7 @@ int bnx2x_config_vlan_mac( * @ramrod_flags: execution flags to be used for this deletion * * if the last operation has completed successfully and there are no - * moreelements left, positive value if the last operation has completed + * more elements left, positive value if the last operation has completed * successfully and there are more previously configured elements, negative * value is current operation has failed. */ @@ -1870,8 +1837,7 @@ static int bnx2x_vlan_mac_del_all(struct bnx2x *bp, p.ramrod_flags = *ramrod_flags; p.user_req.cmd = BNX2X_VLAN_MAC_DEL; - /* - * Add all but the last VLAN-MAC to the execution queue without actually + /* Add all but the last VLAN-MAC to the execution queue without actually * execution anything. */ __clear_bit(RAMROD_COMP_WAIT, &p.ramrod_flags); @@ -1934,7 +1900,6 @@ static inline void bnx2x_init_vlan_mac_common(struct bnx2x_vlan_mac_obj *o, state, pstate, type); } - void bnx2x_init_mac_obj(struct bnx2x *bp, struct bnx2x_vlan_mac_obj *mac_obj, u8 cl_id, u32 cid, u8 func_id, void *rdata, @@ -2048,8 +2013,7 @@ void bnx2x_init_vlan_mac_obj(struct bnx2x *bp, /* CAM pool handling */ vlan_mac_obj->get_credit = bnx2x_get_credit_vlan_mac; vlan_mac_obj->put_credit = bnx2x_put_credit_vlan_mac; - /* - * CAM offset is relevant for 57710 and 57711 chips only which have a + /* CAM offset is relevant for 57710 and 57711 chips only which have a * single CAM for both MACs and VLAN-MAC pairs. So the offset * will be taken from MACs' pool object only. */ @@ -2092,7 +2056,6 @@ void bnx2x_init_vlan_mac_obj(struct bnx2x *bp, bnx2x_execute_vlan_mac, bnx2x_exeq_get_vlan_mac); } - } /* RX_MODE verbs: DROP_ALL/ACCEPT_ALL/ACCEPT_ALL_MULTI/ACCEPT_ALL_VLAN/NORMAL */ @@ -2117,12 +2080,12 @@ static int bnx2x_set_rx_mode_e1x(struct bnx2x *bp, struct tstorm_eth_mac_filter_config *mac_filters = (struct tstorm_eth_mac_filter_config *)p->rdata; - /* initial seeting is drop-all */ + /* initial setting is drop-all */ u8 drop_all_ucast = 1, drop_all_mcast = 1; u8 accp_all_ucast = 0, accp_all_bcast = 0, accp_all_mcast = 0; u8 unmatched_unicast = 0; - /* In e1x there we only take into account rx acceot flag since tx switching + /* In e1x there we only take into account rx accept flag since tx switching * isn't enabled. */ if (test_bit(BNX2X_ACCEPT_UNICAST, &p->rx_accept_flags)) /* accept matched ucast */ @@ -2245,7 +2208,6 @@ static inline void bnx2x_rx_mode_set_cmd_state_e2(struct bnx2x *bp, } cmd->state = cpu_to_le16(state); - } static int bnx2x_set_rx_mode_e2(struct bnx2x *bp, @@ -2286,9 +2248,7 @@ static int bnx2x_set_rx_mode_e2(struct bnx2x *bp, false); } - - /* - * If FCoE Queue configuration has been requested configure the Rx and + /* If FCoE Queue configuration has been requested configure the Rx and * internal switching modes for this queue in separate rules. * * FCoE queue shell never be set to ACCEPT_ALL packets of any sort: @@ -2324,8 +2284,7 @@ static int bnx2x_set_rx_mode_e2(struct bnx2x *bp, } } - /* - * Set the ramrod header (most importantly - number of rules to + /* Set the ramrod header (most importantly - number of rules to * configure). */ bnx2x_rx_mode_set_rdata_hdr_e2(p->cid, &data->header, rule_idx); @@ -2334,12 +2293,11 @@ static int bnx2x_set_rx_mode_e2(struct bnx2x *bp, data->header.rule_cnt, p->rx_accept_flags, p->tx_accept_flags); - /* - * No need for an explicit memory barrier here as long we would - * need to ensure the ordering of writing to the SPQ element - * and updating of the SPQ producer which involves a memory - * read and we will have to put a full memory barrier there - * (inside bnx2x_sp_post()). + /* No need for an explicit memory barrier here as long we would + * need to ensure the ordering of writing to the SPQ element + * and updating of the SPQ producer which involves a memory + * read and we will have to put a full memory barrier there + * (inside bnx2x_sp_post()). */ /* Send a ramrod */ @@ -2476,7 +2434,7 @@ static int bnx2x_mcast_enqueue_cmd(struct bnx2x *bp, cur_mac = (struct bnx2x_mcast_mac_elem *) ((u8 *)new_cmd + sizeof(*new_cmd)); - /* Push the MACs of the current command into the pendig command + /* Push the MACs of the current command into the pending command * MACs list: FIFO */ list_for_each_entry(pos, &p->mcast_list, link) { @@ -2909,7 +2867,6 @@ static int bnx2x_mcast_validate_e2(struct bnx2x *bp, default: BNX2X_ERR("Unknown command: %d\n", cmd); return -EINVAL; - } /* Increase the total number of MACs pending to be configured */ @@ -3034,20 +2991,18 @@ static int bnx2x_mcast_setup_e2(struct bnx2x *bp, if (!o->total_pending_num) bnx2x_mcast_refresh_registry_e2(bp, o); - /* - * If CLEAR_ONLY was requested - don't send a ramrod and clear + /* If CLEAR_ONLY was requested - don't send a ramrod and clear * RAMROD_PENDING status immediately. */ if (test_bit(RAMROD_DRV_CLR_ONLY, &p->ramrod_flags)) { raw->clear_pending(raw); return 0; } else { - /* - * No need for an explicit memory barrier here as long we would - * need to ensure the ordering of writing to the SPQ element - * and updating of the SPQ producer which involves a memory - * read and we will have to put a full memory barrier there - * (inside bnx2x_sp_post()). + /* No need for an explicit memory barrier here as long we would + * need to ensure the ordering of writing to the SPQ element + * and updating of the SPQ producer which involves a memory + * read and we will have to put a full memory barrier there + * (inside bnx2x_sp_post()). */ /* Send a ramrod */ @@ -3121,7 +3076,7 @@ static inline void bnx2x_mcast_hdl_restore_e1h(struct bnx2x *bp, } } -/* On 57711 we write the multicast MACs' aproximate match +/* On 57711 we write the multicast MACs' approximate match * table by directly into the TSTORM's internal RAM. So we don't * really need to handle any tricks to make it work. */ @@ -3223,7 +3178,6 @@ static int bnx2x_mcast_validate_e1(struct bnx2x *bp, default: BNX2X_ERR("Unknown command: %d\n", cmd); return -EINVAL; - } /* We want to ensure that commands are executed one by one for 57710. @@ -3245,7 +3199,7 @@ static void bnx2x_mcast_revert_e1(struct bnx2x *bp, /* If current command hasn't been handled yet and we are * here means that it's meant to be dropped and we have to - * update the number of outstandling MACs accordingly. + * update the number of outstanding MACs accordingly. */ if (p->mcast_list_len) o->total_pending_num -= o->max_cmd_len; @@ -3342,7 +3296,6 @@ static inline int bnx2x_mcast_handle_restore_cmd_e1( return -1; } - static inline int bnx2x_mcast_handle_pending_cmds_e1( struct bnx2x *bp, struct bnx2x_mcast_ramrod_params *p) { @@ -3352,7 +3305,6 @@ static inline int bnx2x_mcast_handle_pending_cmds_e1( union bnx2x_mcast_config_data cfg_data = {NULL}; int cnt = 0; - /* If nothing to be done - return */ if (list_empty(&o->pending_cmds_head)) return 0; @@ -3523,20 +3475,18 @@ static int bnx2x_mcast_setup_e1(struct bnx2x *bp, if (rc) return rc; - /* - * If CLEAR_ONLY was requested - don't send a ramrod and clear + /* If CLEAR_ONLY was requested - don't send a ramrod and clear * RAMROD_PENDING status immediately. */ if (test_bit(RAMROD_DRV_CLR_ONLY, &p->ramrod_flags)) { raw->clear_pending(raw); return 0; } else { - /* - * No need for an explicit memory barrier here as long we would - * need to ensure the ordering of writing to the SPQ element - * and updating of the SPQ producer which involves a memory - * read and we will have to put a full memory barrier there - * (inside bnx2x_sp_post()). + /* No need for an explicit memory barrier here as long we would + * need to ensure the ordering of writing to the SPQ element + * and updating of the SPQ producer which involves a memory + * read and we will have to put a full memory barrier there + * (inside bnx2x_sp_post()). */ /* Send a ramrod */ @@ -3550,7 +3500,6 @@ static int bnx2x_mcast_setup_e1(struct bnx2x *bp, /* Ramrod completion is pending */ return 1; } - } static int bnx2x_mcast_get_registry_size_exact(struct bnx2x_mcast_obj *o) @@ -3848,7 +3797,6 @@ static bool bnx2x_credit_pool_always_true(struct bnx2x_credit_pool_obj *o, return true; } - static bool bnx2x_credit_pool_get_entry( struct bnx2x_credit_pool_obj *o, int *offset) @@ -3999,8 +3947,7 @@ void bnx2x_init_mac_credit_pool(struct bnx2x *bp, } else { - /* - * CAM credit is equaly divided between all active functions + /* CAM credit is equaly divided between all active functions * on the PATH. */ if ((func_num > 0)) { @@ -4009,8 +3956,7 @@ void bnx2x_init_mac_credit_pool(struct bnx2x *bp, else cam_sz = BNX2X_CAM_SIZE_EMUL; - /* - * No need for CAM entries handling for 57712 and + /* No need for CAM entries handling for 57712 and * newer. */ bnx2x_init_credit_pool(p, -1, cam_sz); @@ -4018,7 +3964,6 @@ void bnx2x_init_mac_credit_pool(struct bnx2x *bp, /* this should never happen! Block MAC operations. */ bnx2x_init_credit_pool(p, 0, 0); } - } } @@ -4028,14 +3973,12 @@ void bnx2x_init_vlan_credit_pool(struct bnx2x *bp, u8 func_num) { if (CHIP_IS_E1x(bp)) { - /* - * There is no VLAN credit in HW on 57710 and 57711 only + /* There is no VLAN credit in HW on 57710 and 57711 only * MAC / MAC-VLAN can be set */ bnx2x_init_credit_pool(p, 0, -1); } else { - /* - * CAM credit is equaly divided between all active functions + /* CAM credit is equally divided between all active functions * on the PATH. */ if (func_num > 0) { @@ -4051,7 +3994,7 @@ void bnx2x_init_vlan_credit_pool(struct bnx2x *bp, /** * bnx2x_debug_print_ind_table - prints the indirection table configuration. * - * @bp: driver hanlde + * @bp: driver handle * @p: pointer to rss configuration * * Prints it when NETIF_MSG_IFUP debug level is configured. @@ -4164,12 +4107,11 @@ static int bnx2x_setup_rss(struct bnx2x *bp, data->capabilities |= ETH_RSS_UPDATE_RAMROD_DATA_UPDATE_RSS_KEY; } - /* - * No need for an explicit memory barrier here as long we would - * need to ensure the ordering of writing to the SPQ element - * and updating of the SPQ producer which involves a memory - * read and we will have to put a full memory barrier there - * (inside bnx2x_sp_post()). + /* No need for an explicit memory barrier here as long we would + * need to ensure the ordering of writing to the SPQ element + * and updating of the SPQ producer which involves a memory + * read and we will have to put a full memory barrier there + * (inside bnx2x_sp_post()). */ /* Send a ramrod */ @@ -4215,7 +4157,6 @@ int bnx2x_config_rss(struct bnx2x *bp, return rc; } - void bnx2x_init_rss_config_obj(struct bnx2x *bp, struct bnx2x_rss_config_obj *rss_obj, u8 cl_id, u32 cid, u8 func_id, u8 engine_id, @@ -4288,7 +4229,6 @@ int bnx2x_queue_state_change(struct bnx2x *bp, return !!test_bit(pending_bit, pending); } - static int bnx2x_queue_set_pending(struct bnx2x_queue_sp_obj *obj, struct bnx2x_queue_state_params *params) { @@ -4337,7 +4277,7 @@ static int bnx2x_queue_comp_cmd(struct bnx2x *bp, } if (o->next_tx_only >= o->max_cos) - /* >= becuase tx only must always be smaller than cos since the + /* >= because tx only must always be smaller than cos since the * primary connection supports COS 0 */ BNX2X_ERR("illegal value for next tx_only: %d. max cos was %d", @@ -4403,7 +4343,6 @@ static void bnx2x_q_fill_init_general_data(struct bnx2x *bp, gen_data->mtu = cpu_to_le16(params->mtu); gen_data->func_id = o->func_id; - gen_data->cos = params->cos; gen_data->traffic_type = @@ -4530,7 +4469,6 @@ static void bnx2x_q_fill_init_rx_data(struct bnx2x_queue_sp_obj *o, cpu_to_le16(params->silent_removal_value); rx_data->silent_vlan_mask = cpu_to_le16(params->silent_removal_mask); - } /* initialize the general, tx and rx parts of a queue object */ @@ -4652,12 +4590,11 @@ static inline int bnx2x_q_send_setup_e1x(struct bnx2x *bp, /* Fill the ramrod data */ bnx2x_q_fill_setup_data_cmn(bp, params, rdata); - /* - * No need for an explicit memory barrier here as long we would - * need to ensure the ordering of writing to the SPQ element - * and updating of the SPQ producer which involves a memory - * read and we will have to put a full memory barrier there - * (inside bnx2x_sp_post()). + /* No need for an explicit memory barrier here as long we would + * need to ensure the ordering of writing to the SPQ element + * and updating of the SPQ producer which involves a memory + * read and we will have to put a full memory barrier there + * (inside bnx2x_sp_post()). */ return bnx2x_sp_post(bp, ramrod, o->cids[BNX2X_PRIMARY_CID_INDEX], @@ -4681,12 +4618,11 @@ static inline int bnx2x_q_send_setup_e2(struct bnx2x *bp, bnx2x_q_fill_setup_data_cmn(bp, params, rdata); bnx2x_q_fill_setup_data_e2(bp, params, rdata); - /* - * No need for an explicit memory barrier here as long we would - * need to ensure the ordering of writing to the SPQ element - * and updating of the SPQ producer which involves a memory - * read and we will have to put a full memory barrier there - * (inside bnx2x_sp_post()). + /* No need for an explicit memory barrier here as long we would + * need to ensure the ordering of writing to the SPQ element + * and updating of the SPQ producer which involves a memory + * read and we will have to put a full memory barrier there + * (inside bnx2x_sp_post()). */ return bnx2x_sp_post(bp, ramrod, o->cids[BNX2X_PRIMARY_CID_INDEX], @@ -4706,7 +4642,6 @@ static inline int bnx2x_q_send_setup_tx_only(struct bnx2x *bp, ¶ms->params.tx_only; u8 cid_index = tx_only_params->cid_index; - if (cid_index >= o->max_cos) { BNX2X_ERR("queue[%d]: cid_index (%d) is out of range\n", o->cl_id, cid_index); @@ -4727,12 +4662,11 @@ static inline int bnx2x_q_send_setup_tx_only(struct bnx2x *bp, o->cids[cid_index], rdata->general.client_id, rdata->general.sp_client_id, rdata->general.cos); - /* - * No need for an explicit memory barrier here as long we would - * need to ensure the ordering of writing to the SPQ element - * and updating of the SPQ producer which involves a memory - * read and we will have to put a full memory barrier there - * (inside bnx2x_sp_post()). + /* No need for an explicit memory barrier here as long we would + * need to ensure the ordering of writing to the SPQ element + * and updating of the SPQ producer which involves a memory + * read and we will have to put a full memory barrier there + * (inside bnx2x_sp_post()). */ return bnx2x_sp_post(bp, ramrod, o->cids[cid_index], @@ -4761,7 +4695,7 @@ static void bnx2x_q_fill_update_data(struct bnx2x *bp, test_bit(BNX2X_Q_UPDATE_IN_VLAN_REM_CHNG, ¶ms->update_flags); - /* Outer VLAN sripping */ + /* Outer VLAN stripping */ data->outer_vlan_removal_enable_flg = test_bit(BNX2X_Q_UPDATE_OUT_VLAN_REM, ¶ms->update_flags); data->outer_vlan_removal_change_flg = @@ -4816,19 +4750,17 @@ static inline int bnx2x_q_send_update(struct bnx2x *bp, return -EINVAL; } - /* Clear the ramrod data */ memset(rdata, 0, sizeof(*rdata)); /* Fill the ramrod data */ bnx2x_q_fill_update_data(bp, o, update_params, rdata); - /* - * No need for an explicit memory barrier here as long we would - * need to ensure the ordering of writing to the SPQ element - * and updating of the SPQ producer which involves a memory - * read and we will have to put a full memory barrier there - * (inside bnx2x_sp_post()). + /* No need for an explicit memory barrier here as long we would + * need to ensure the ordering of writing to the SPQ element + * and updating of the SPQ producer which involves a memory + * read and we will have to put a full memory barrier there + * (inside bnx2x_sp_post()). */ return bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_CLIENT_UPDATE, @@ -5038,8 +4970,7 @@ static int bnx2x_queue_chk_transition(struct bnx2x *bp, ¶ms->params.update; u8 next_tx_only = o->num_tx_only; - /* - * Forget all pending for completion commands if a driver only state + /* Forget all pending for completion commands if a driver only state * transition has been requested. */ if (test_bit(RAMROD_DRV_CLR_ONLY, ¶ms->ramrod_flags)) { @@ -5047,8 +4978,7 @@ static int bnx2x_queue_chk_transition(struct bnx2x *bp, o->next_state = BNX2X_Q_STATE_MAX; } - /* - * Don't allow a next state transition if we are in the middle of + /* Don't allow a next state transition if we are in the middle of * the previous one. */ if (o->pending) { @@ -5257,8 +5187,7 @@ enum bnx2x_func_state bnx2x_func_get_state(struct bnx2x *bp, if (o->pending) return BNX2X_F_STATE_MAX; - /* - * unsure the order of reading of o->pending and o->state + /* unsure the order of reading of o->pending and o->state * o->pending should be read first */ rmb(); @@ -5356,8 +5285,7 @@ static int bnx2x_func_chk_transition(struct bnx2x *bp, enum bnx2x_func_state state = o->state, next_state = BNX2X_F_STATE_MAX; enum bnx2x_func_cmd cmd = params->cmd; - /* - * Forget all pending for completion commands if a driver only state + /* Forget all pending for completion commands if a driver only state * transition has been requested. */ if (test_bit(RAMROD_DRV_CLR_ONLY, ¶ms->ramrod_flags)) { @@ -5365,8 +5293,7 @@ static int bnx2x_func_chk_transition(struct bnx2x *bp, o->next_state = BNX2X_F_STATE_MAX; } - /* - * Don't allow a next state transition if we are in the middle of + /* Don't allow a next state transition if we are in the middle of * the previous one. */ if (o->pending) @@ -5539,7 +5466,7 @@ static int bnx2x_func_hw_init(struct bnx2x *bp, goto init_err; } - /* Handle the beginning of COMMON_XXX pases separatelly... */ + /* Handle the beginning of COMMON_XXX pases separately... */ switch (load_code) { case FW_MSG_CODE_DRV_LOAD_COMMON_CHIP: rc = bnx2x_func_init_cmn_chip(bp, drv); @@ -5573,7 +5500,7 @@ static int bnx2x_func_hw_init(struct bnx2x *bp, init_err: drv->gunzip_end(bp); - /* In case of success, complete the comand immediatelly: no ramrods + /* In case of success, complete the command immediately: no ramrods * have been sent. */ if (!rc) @@ -5598,7 +5525,7 @@ static inline void bnx2x_func_reset_func(struct bnx2x *bp, } /** - * bnx2x_func_reset_port - reser HW at port stage + * bnx2x_func_reset_port - reset HW at port stage * * @bp: device handle * @drv: @@ -5620,7 +5547,7 @@ static inline void bnx2x_func_reset_port(struct bnx2x *bp, } /** - * bnx2x_func_reset_cmn - reser HW at common stage + * bnx2x_func_reset_cmn - reset HW at common stage * * @bp: device handle * @drv: @@ -5636,7 +5563,6 @@ static inline void bnx2x_func_reset_cmn(struct bnx2x *bp, drv->reset_hw_cmn(bp); } - static inline int bnx2x_func_hw_reset(struct bnx2x *bp, struct bnx2x_func_state_params *params) { @@ -5663,7 +5589,7 @@ static inline int bnx2x_func_hw_reset(struct bnx2x *bp, break; } - /* Complete the comand immediatelly: no ramrods have been sent. */ + /* Complete the command immediately: no ramrods have been sent. */ o->complete_cmd(bp, o, BNX2X_F_CMD_HW_RESET); return 0; diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h index 43c00bc84a08..798dfe996733 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h @@ -34,8 +34,7 @@ enum { RAMROD_RESTORE, /* Execute the next command now */ RAMROD_EXEC, - /* - * Don't add a new command and continue execution of posponed + /* Don't add a new command and continue execution of postponed * commands. If not set a new command will be added to the * pending commands list. */ @@ -129,8 +128,7 @@ enum bnx2x_vlan_mac_cmd { struct bnx2x_vlan_mac_data { /* Requested command: BNX2X_VLAN_MAC_XX */ enum bnx2x_vlan_mac_cmd cmd; - /* - * used to contain the data related vlan_mac_flags bits from + /* used to contain the data related vlan_mac_flags bits from * ramrod parameters. */ unsigned long vlan_mac_flags; @@ -190,14 +188,10 @@ typedef struct bnx2x_exeq_elem * struct bnx2x_exeq_elem *elem); struct bnx2x_exe_queue_obj { - /* - * Commands pending for an execution. - */ + /* Commands pending for an execution. */ struct list_head exe_queue; - /* - * Commands pending for an completion. - */ + /* Commands pending for an completion. */ struct list_head pending_comp; spinlock_t lock; @@ -245,14 +239,13 @@ struct bnx2x_exe_queue_obj { }; /***************** Classification verbs: Set/Del MAC/VLAN/VLAN-MAC ************/ /* - * Element in the VLAN_MAC registry list having all currenty configured + * Element in the VLAN_MAC registry list having all currently configured * rules. */ struct bnx2x_vlan_mac_registry_elem { struct list_head link; - /* - * Used to store the cam offset used for the mac/vlan/vlan-mac. + /* Used to store the cam offset used for the mac/vlan/vlan-mac. * Relevant for 57710 and 57711 only. VLANs and MACs share the * same CAM for these chips. */ @@ -310,7 +303,7 @@ struct bnx2x_vlan_mac_obj { * @param n number of elements to get * @param buf buffer preallocated by caller into which elements * will be copied. Note elements are 4-byte aligned - * so buffer size must be able to accomodate the + * so buffer size must be able to accommodate the * aligned elements. * * @return number of copied bytes @@ -395,7 +388,7 @@ struct bnx2x_vlan_mac_obj { * @param bp * @param p Command parameters (RAMROD_COMP_WAIT bit in * ramrod_flags is only taken into an account) - * @param ppos a pointer to the cooky that should be given back in the + * @param ppos a pointer to the cookie that should be given back in the * next call to make function handle the next element. If * *ppos is set to NULL it will restart the iterator. * If returned *ppos == NULL this means that the last @@ -408,7 +401,7 @@ struct bnx2x_vlan_mac_obj { struct bnx2x_vlan_mac_registry_elem **ppos); /** - * Should be called on a completion arival. + * Should be called on a completion arrival. * * @param bp * @param o @@ -447,7 +440,7 @@ void bnx2x_set_mac_in_nig(struct bnx2x *bp, /** RX_MODE verbs:DROP_ALL/ACCEPT_ALL/ACCEPT_ALL_MULTI/ACCEPT_ALL_VLAN/NORMAL */ -/* RX_MODE ramrod spesial flags: set in rx_mode_flags field in +/* RX_MODE ramrod special flags: set in rx_mode_flags field in * a bnx2x_rx_mode_ramrod_params. */ enum { @@ -475,8 +468,7 @@ struct bnx2x_rx_mode_ramrod_params { unsigned long ramrod_flags; unsigned long rx_mode_flags; - /* - * rdata is either a pointer to eth_filter_rules_ramrod_data(e2) or to + /* rdata is either a pointer to eth_filter_rules_ramrod_data(e2) or to * a tstorm_eth_mac_filter_config (e1x). */ void *rdata; @@ -646,12 +638,11 @@ struct bnx2x_credit_pool_obj { /* Maximum allowed credit. put() will check against it. */ int pool_sz; - /* - * Allocate a pool table statically. + /* Allocate a pool table statically. * - * Currently the mamimum allowed size is MAX_MAC_CREDIT_E2(272) + * Currently the maximum allowed size is MAX_MAC_CREDIT_E2(272) * - * The set bit in the table will mean that the entry is available. + * The set bit in the table will mean that the entry is available. */ #define BNX2X_POOL_VEC_SIZE (MAX_MAC_CREDIT_E2 / 64) u64 pool_mirror[BNX2X_POOL_VEC_SIZE]; @@ -832,7 +823,7 @@ enum { BNX2X_Q_FLG_TUN_INC_INNER_IP_ID }; -/* Queue type options: queue type may be a compination of below. */ +/* Queue type options: queue type may be a combination of below. */ enum bnx2x_q_type { /** TODO: Consider moving both these flags into the init() * ramrod params. @@ -1002,10 +993,9 @@ struct bnx2x_queue_sp_obj { u8 cl_id; u8 func_id; - /* - * number of traffic classes supported by queue. - * The primary connection of the queue suppotrs the first traffic - * class. Any further traffic class is suppoted by a tx-only + /* number of traffic classes supported by queue. + * The primary connection of the queue supports the first traffic + * class. Any further traffic class is supported by a tx-only * connection. * * Therefore max_cos is also a number of valid entries in the cids @@ -1021,7 +1011,7 @@ struct bnx2x_queue_sp_obj { /* BNX2X_Q_CMD_XX bits. This object implements "one * pending" paradigm but for debug and tracing purposes it's - * more convinient to have different bits for different + * more convenient to have different bits for different * commands. */ unsigned long pending; @@ -1210,7 +1200,7 @@ struct bnx2x_func_sp_obj { /* BNX2X_FUNC_CMD_XX bits. This object implements "one * pending" paradigm but for debug and tracing purposes it's - * more convinient to have different bits for different + * more convenient to have different bits for different * commands. */ unsigned long pending; @@ -1329,7 +1319,7 @@ void bnx2x_init_rx_mode_obj(struct bnx2x *bp, * * @p: Command parameters * - * Return: 0 - if operation was successfull and there is no pending completions, + * Return: 0 - if operation was successful and there is no pending completions, * positive number - if there are pending completions, * negative - if there were errors */ @@ -1361,7 +1351,7 @@ void bnx2x_init_mcast_obj(struct bnx2x *bp, * the current command will be enqueued to the tail of the * pending commands list. * - * Return: 0 is operation was successfull and there are no pending completions, + * Return: 0 is operation was successful and there are no pending completions, * negative if there were errors, positive if there are pending * completions. */ @@ -1377,7 +1367,6 @@ void bnx2x_init_vlan_credit_pool(struct bnx2x *bp, struct bnx2x_credit_pool_obj *p, u8 func_id, u8 func_num); - /****************** RSS CONFIGURATION ****************/ void bnx2x_init_rss_config_obj(struct bnx2x *bp, struct bnx2x_rss_config_obj *rss_obj, diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c index 2ce7c7471367..95861efb5051 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c @@ -1341,7 +1341,7 @@ int bnx2x_vfop_qdown_cmd(struct bnx2x *bp, */ /* internal vf enable - until vf is enabled internally all transactions - * are blocked. this routine should always be called last with pretend. + * are blocked. This routine should always be called last with pretend. */ static void bnx2x_vf_enable_internal(struct bnx2x *bp, u8 enable) { @@ -1459,21 +1459,16 @@ static u8 bnx2x_vf_is_pcie_pending(struct bnx2x *bp, u8 abs_vfid) struct bnx2x_virtf *vf = bnx2x_vf_by_abs_fid(bp, abs_vfid); if (!vf) - goto unknown_dev; + return false; dev = pci_get_bus_and_slot(vf->bus, vf->devfn); if (dev) return bnx2x_is_pcie_pending(dev); - -unknown_dev: return false; } int bnx2x_vf_flr_clnup_epilog(struct bnx2x *bp, u8 abs_vfid) { - /* Wait 100ms */ - msleep(100); - /* Verify no pending pci transactions */ if (bnx2x_vf_is_pcie_pending(bp, abs_vfid)) BNX2X_ERR("PCIE Transactions still pending\n"); @@ -1620,7 +1615,7 @@ next_vf_to_clean: i++) ; - DP(BNX2X_MSG_IOV, "next vf to cleanup: %d. num of vfs: %d\n", i, + DP(BNX2X_MSG_IOV, "next vf to cleanup: %d. Num of vfs: %d\n", i, BNX2X_NR_VIRTFN(bp)); if (i < BNX2X_NR_VIRTFN(bp)) { @@ -1743,7 +1738,7 @@ void bnx2x_iov_init_dq(struct bnx2x *bp) REG_WR(bp, DORQ_REG_VF_TYPE_MIN_MCID_0, 0); REG_WR(bp, DORQ_REG_VF_TYPE_MAX_MCID_0, 0x1ffff); - /* set the number of VF alllowed doorbells to the full DQ range */ + /* set the number of VF allowed doorbells to the full DQ range */ REG_WR(bp, DORQ_REG_VF_NORM_MAX_CID_COUNT, 0x20000); /* set the VF doorbell threshold */ @@ -2176,6 +2171,9 @@ int bnx2x_iov_nic_init(struct bnx2x *bp) DP(BNX2X_MSG_IOV, "num of vfs: %d\n", (bp)->vfdb->sriov.nr_virtfn); + /* let FLR complete ... */ + msleep(100); + /* initialize vf database */ for_each_vf(bp, vfid) { struct bnx2x_virtf *vf = BP_VF(bp, vfid); @@ -2403,7 +2401,7 @@ int bnx2x_iov_eq_sp_event(struct bnx2x *bp, union event_ring_elem *elem) /* extract vf and rxq index from vf_cid - relies on the following: * 1. vfid on cid reflects the true abs_vfid - * 2. the max number of VFs (per path) is 64 + * 2. The max number of VFs (per path) is 64 */ qidx = cid & ((1 << BNX2X_VF_CID_WND)-1); abs_vfid = (cid >> BNX2X_VF_CID_WND) & (BNX2X_MAX_NUM_OF_VFS-1); @@ -2461,7 +2459,7 @@ static struct bnx2x_virtf *bnx2x_vf_by_cid(struct bnx2x *bp, int vf_cid) { /* extract the vf from vf_cid - relies on the following: * 1. vfid on cid reflects the true abs_vfid - * 2. the max number of VFs (per path) is 64 + * 2. The max number of VFs (per path) is 64 */ int abs_vfid = (vf_cid >> BNX2X_VF_CID_WND) & (BNX2X_MAX_NUM_OF_VFS-1); return bnx2x_vf_by_abs_fid(bp, abs_vfid); @@ -2480,7 +2478,7 @@ void bnx2x_iov_set_queue_sp_obj(struct bnx2x *bp, int vf_cid, if (vf) { /* extract queue index from vf_cid - relies on the following: * 1. vfid on cid reflects the true abs_vfid - * 2. the max number of VFs (per path) is 64 + * 2. The max number of VFs (per path) is 64 */ int q_index = vf_cid & ((1 << BNX2X_VF_CID_WND)-1); *q_obj = &bnx2x_vfq(vf, q_index, sp_obj); @@ -2705,7 +2703,7 @@ int bnx2x_vf_acquire(struct bnx2x *bp, struct bnx2x_virtf *vf, } /* static allocation: - * the global maximum number are fixed per VF. fail the request if + * the global maximum number are fixed per VF. Fail the request if * requested number exceed these globals */ if (!bnx2x_vf_chk_avail_resc(bp, vf, resc)) { @@ -2777,6 +2775,10 @@ int bnx2x_vf_init(struct bnx2x *bp, struct bnx2x_virtf *vf, dma_addr_t *sb_map) vf->abs_vfid, vf->state); return -EINVAL; } + + /* let FLR complete ... */ + msleep(100); + /* FLR cleanup epilogue */ if (bnx2x_vf_flr_clnup_epilog(bp, vf->abs_vfid)) return -EBUSY; @@ -2890,7 +2892,7 @@ int bnx2x_vfop_close_cmd(struct bnx2x *bp, return -ENOMEM; } -/* VF release can be called either: 1. the VF was acquired but +/* VF release can be called either: 1. The VF was acquired but * not enabled 2. the vf was enabled or in the process of being * enabled */ @@ -3024,7 +3026,6 @@ void bnx2x_unlock_vf_pf_channel(struct bnx2x *bp, struct bnx2x_virtf *vf, int bnx2x_sriov_configure(struct pci_dev *dev, int num_vfs_param) { - struct bnx2x *bp = netdev_priv(pci_get_drvdata(dev)); DP(BNX2X_MSG_IOV, "bnx2x_sriov_configure called with %d, BNX2X_NR_VIRTFN(bp) was %d\n", @@ -3032,7 +3033,7 @@ int bnx2x_sriov_configure(struct pci_dev *dev, int num_vfs_param) /* HW channel is only operational when PF is up */ if (bp->state != BNX2X_STATE_OPEN) { - BNX2X_ERR("VF num configurtion via sysfs not supported while PF is down"); + BNX2X_ERR("VF num configuration via sysfs not supported while PF is down\n"); return -EINVAL; } @@ -3086,6 +3087,11 @@ void bnx2x_disable_sriov(struct bnx2x *bp) static int bnx2x_vf_ndo_sanity(struct bnx2x *bp, int vfidx, struct bnx2x_virtf *vf) { + if (bp->state != BNX2X_STATE_OPEN) { + BNX2X_ERR("vf ndo called though PF is down\n"); + return -EINVAL; + } + if (!IS_SRIOV(bp)) { BNX2X_ERR("vf ndo called though sriov is disabled\n"); return -EINVAL; @@ -3141,7 +3147,7 @@ int bnx2x_get_vf_config(struct net_device *dev, int vfidx, /* mac configured by ndo so its in bulletin board */ memcpy(&ivi->mac, bulletin->mac, ETH_ALEN); else - /* funtion has not been loaded yet. Show mac as 0s */ + /* function has not been loaded yet. Show mac as 0s */ memset(&ivi->mac, 0, ETH_ALEN); /* vlan */ @@ -3149,7 +3155,7 @@ int bnx2x_get_vf_config(struct net_device *dev, int vfidx, /* vlan configured by ndo so its in bulletin board */ memcpy(&ivi->vlan, &bulletin->vlan, VLAN_HLEN); else - /* funtion has not been loaded yet. Show vlans as 0s */ + /* function has not been loaded yet. Show vlans as 0s */ memset(&ivi->vlan, 0, VLAN_HLEN); } @@ -3189,7 +3195,7 @@ int bnx2x_set_vf_mac(struct net_device *dev, int vfidx, u8 *mac) return -EINVAL; } - /* update PF's copy of the VF's bulletin. will no longer accept mac + /* update PF's copy of the VF's bulletin. Will no longer accept mac * configuration requests from vf unless match this mac */ bulletin->valid_bitmap |= 1 << MAC_ADDR_VALID; @@ -3358,8 +3364,11 @@ int bnx2x_set_vf_vlan(struct net_device *dev, int vfidx, u16 vlan, u8 qos) return 0; } -/* crc is the first field in the bulletin board. compute the crc over the - * entire bulletin board excluding the crc field itself +/* crc is the first field in the bulletin board. Compute the crc over the + * entire bulletin board excluding the crc field itself. Use the length field + * as the Bulletin Board was posted by a PF with possibly a different version + * from the vf which will sample it. Therefore, the length is computed by the + * PF and the used blindly by the VF. */ u32 bnx2x_crc_vf_bulletin(struct bnx2x *bp, struct pf_vf_bulletin_content *bulletin) @@ -3389,7 +3398,7 @@ enum sample_bulletin_result bnx2x_sample_bulletin(struct bnx2x *bp) if (bulletin.crc == bnx2x_crc_vf_bulletin(bp, &bulletin)) break; - BNX2X_ERR("bad crc on bulletin board. contained %x computed %x\n", + BNX2X_ERR("bad crc on bulletin board. Contained %x computed %x\n", bulletin.crc, bnx2x_crc_vf_bulletin(bp, &bulletin)); } @@ -3417,6 +3426,20 @@ enum sample_bulletin_result bnx2x_sample_bulletin(struct bnx2x *bp) return PFVF_BULLETIN_UPDATED; } +void bnx2x_timer_sriov(struct bnx2x *bp) +{ + bnx2x_sample_bulletin(bp); + + /* if channel is down we need to self destruct */ + if (bp->old_bulletin.valid_bitmap & 1 << CHANNEL_DOWN) { + smp_mb__before_clear_bit(); + set_bit(BNX2X_SP_RTNL_VFPF_CHANNEL_DOWN, + &bp->sp_rtnl_state); + smp_mb__after_clear_bit(); + schedule_delayed_work(&bp->sp_rtnl_task, 0); + } +} + void __iomem *bnx2x_vf_doorbells(struct bnx2x *bp) { /* vf doorbells are embedded within the regview */ @@ -3452,7 +3475,7 @@ int bnx2x_open_epilog(struct bnx2x *bp) * register_netdevice which must have rtnl lock taken. As we are holding * the lock right now, that could only work if the probe would not take * the lock. However, as the probe of the vf may be called from other - * contexts as well (such as passthrough to vm failes) it can't assume + * contexts as well (such as passthrough to vm fails) it can't assume * the lock is being held for it. Using delayed work here allows the * probe code to simply take the lock (i.e. wait for it to be released * if it is being held). We only want to do this if the number of VFs @@ -3467,3 +3490,23 @@ int bnx2x_open_epilog(struct bnx2x *bp) return 0; } + +void bnx2x_iov_channel_down(struct bnx2x *bp) +{ + int vf_idx; + struct pf_vf_bulletin_content *bulletin; + + if (!IS_SRIOV(bp)) + return; + + for_each_vf(bp, vf_idx) { + /* locate this VFs bulletin board and update the channel down + * bit + */ + bulletin = BP_VF_BULLETIN(bp, vf_idx); + bulletin->valid_bitmap |= 1 << CHANNEL_DOWN; + + /* update vf bulletin board */ + bnx2x_post_vf_bulletin(bp, vf_idx); + } +} diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h index d67ddc554c0f..d143a7cdbbbe 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h @@ -197,7 +197,7 @@ struct bnx2x_virtf { u8 state; #define VF_FREE 0 /* VF ready to be acquired holds no resc */ -#define VF_ACQUIRED 1 /* VF aquired, but not initalized */ +#define VF_ACQUIRED 1 /* VF acquired, but not initialized */ #define VF_ENABLED 2 /* VF Enabled */ #define VF_RESET 3 /* VF FLR'd, pending cleanup */ @@ -496,7 +496,7 @@ enum { else if ((next) == VFOP_VERIFY_PEND) \ BNX2X_ERR("expected pending\n"); \ else { \ - DP(BNX2X_MSG_IOV, "no ramrod. scheduling\n"); \ + DP(BNX2X_MSG_IOV, "no ramrod. Scheduling\n"); \ atomic_set(&vf->op_in_progress, 1); \ queue_delayed_work(bnx2x_wq, &bp->sp_task, 0); \ return; \ @@ -722,7 +722,6 @@ u32 bnx2x_crc_vf_bulletin(struct bnx2x *bp, struct pf_vf_bulletin_content *bulletin); int bnx2x_post_vf_bulletin(struct bnx2x *bp, int vf); - enum sample_bulletin_result bnx2x_sample_bulletin(struct bnx2x *bp); /* VF side vfpf channel functions */ @@ -752,6 +751,7 @@ static inline int bnx2x_vf_ustorm_prods_offset(struct bnx2x *bp, } enum sample_bulletin_result bnx2x_sample_bulletin(struct bnx2x *bp); +void bnx2x_timer_sriov(struct bnx2x *bp); void __iomem *bnx2x_vf_doorbells(struct bnx2x *bp); int bnx2x_vf_pci_alloc(struct bnx2x *bp); int bnx2x_enable_sriov(struct bnx2x *bp); @@ -762,6 +762,7 @@ static inline int bnx2x_vf_headroom(struct bnx2x *bp) } void bnx2x_pf_set_vfs_vlan(struct bnx2x *bp); int bnx2x_sriov_configure(struct pci_dev *dev, int num_vfs); +void bnx2x_iov_channel_down(struct bnx2x *bp); int bnx2x_open_epilog(struct bnx2x *bp); #else /* CONFIG_BNX2X_SRIOV */ @@ -809,6 +810,7 @@ static inline enum sample_bulletin_result bnx2x_sample_bulletin(struct bnx2x *bp { return PFVF_BULLETIN_UNCHANGED; } +static inline void bnx2x_timer_sriov(struct bnx2x *bp) {} static inline void __iomem *bnx2x_vf_doorbells(struct bnx2x *bp) { @@ -818,6 +820,7 @@ static inline void __iomem *bnx2x_vf_doorbells(struct bnx2x *bp) static inline int bnx2x_vf_pci_alloc(struct bnx2x *bp) {return 0; } static inline void bnx2x_pf_set_vfs_vlan(struct bnx2x *bp) {} static inline int bnx2x_sriov_configure(struct pci_dev *dev, int num_vfs) {return 0; } +static inline void bnx2x_iov_channel_down(struct bnx2x *bp) {} static inline int bnx2x_open_epilog(struct bnx2x *bp) {return 0; } #endif /* CONFIG_BNX2X_SRIOV */ diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c index 2ca3d94fcec2..98366abd02bd 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c @@ -1002,7 +1002,6 @@ static int bnx2x_storm_stats_update(struct bnx2x *bp) qstats->valid_bytes_received_lo = qstats->total_bytes_received_lo; - UPDATE_EXTEND_TSTAT(rcv_ucast_pkts, total_unicast_packets_received); UPDATE_EXTEND_TSTAT(rcv_mcast_pkts, diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.h index d117f472816c..853824d258e8 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.h +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.h @@ -40,7 +40,6 @@ struct nig_stats { u32 egress_mac_pkt1_hi; }; - enum bnx2x_stats_event { STATS_EVENT_PMF = 0, STATS_EVENT_LINK_UP, @@ -208,7 +207,6 @@ struct bnx2x_eth_stats { u32 eee_tx_lpi; }; - struct bnx2x_eth_q_stats { u32 total_unicast_bytes_received_hi; u32 total_unicast_bytes_received_lo; @@ -331,7 +329,6 @@ struct bnx2x_fw_port_stats_old { u32 mac_discard; }; - /**************************************************************************** * Macros ****************************************************************************/ @@ -536,7 +533,6 @@ struct bnx2x_fw_port_stats_old { SUB_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \ } while (0) - /* forward */ struct bnx2x; diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c index 928b074d7d80..2088063151d6 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c @@ -113,7 +113,7 @@ static int bnx2x_send_msg2pf(struct bnx2x *bp, u8 *done, dma_addr_t msg_mapping) { struct cstorm_vf_zone_data __iomem *zone_data = REG_ADDR(bp, PXP_VF_ADDR_CSDM_GLOBAL_START); - int tout = 600, interval = 100; /* wait for 60 seconds */ + int tout = 100, interval = 100; /* wait for 10 seconds */ if (*done) { BNX2X_ERR("done was non zero before message to pf was sent\n"); @@ -121,6 +121,16 @@ static int bnx2x_send_msg2pf(struct bnx2x *bp, u8 *done, dma_addr_t msg_mapping) return -EINVAL; } + /* if PF indicated channel is down avoid sending message. Return success + * so calling flow can continue + */ + bnx2x_sample_bulletin(bp); + if (bp->old_bulletin.valid_bitmap & 1 << CHANNEL_DOWN) { + DP(BNX2X_MSG_IOV, "detecting channel down. Aborting message\n"); + *done = PFVF_STATUS_SUCCESS; + return 0; + } + /* Write message address */ writel(U64_LO(msg_mapping), &zone_data->non_trigger.vf_pf_channel.msg_addr_lo); @@ -233,7 +243,7 @@ int bnx2x_vfpf_acquire(struct bnx2x *bp, u8 tx_count, u8 rx_count) attempts++; - /* test whether the PF accepted our request. If not, humble the + /* test whether the PF accepted our request. If not, humble * the request and try again. */ if (bp->acquire_resp.hdr.status == PFVF_STATUS_SUCCESS) { @@ -333,7 +343,7 @@ int bnx2x_vfpf_release(struct bnx2x *bp) DP(BNX2X_MSG_SP, "vf released\n"); } else { /* PF reports error */ - BNX2X_ERR("PF failed our release request - are we out of sync? response status: %d\n", + BNX2X_ERR("PF failed our release request - are we out of sync? Response status: %d\n", resp->hdr.status); rc = -EAGAIN; goto out; @@ -787,7 +797,7 @@ static inline void bnx2x_set_vf_mbxs_valid(struct bnx2x *bp) storm_memset_vf_mbx_valid(bp, bnx2x_vf(bp, i, abs_vfid)); } -/* enable vf_pf mailbox (aka vf-pf-chanell) */ +/* enable vf_pf mailbox (aka vf-pf-channel) */ void bnx2x_vf_enable_mbx(struct bnx2x *bp, u8 abs_vfid) { bnx2x_vf_flr_clnup_epilog(bp, abs_vfid); @@ -844,7 +854,6 @@ static int bnx2x_copy32_vf_dmae(struct bnx2x *bp, u8 from_vf, dmae.dst_addr_hi = vf_addr_hi; } dmae.len = len32; - bnx2x_dp_dmae(bp, &dmae, BNX2X_MSG_DMAE); /* issue the command and wait for completion */ return bnx2x_issue_dmae_with_comp(bp, &dmae); @@ -1072,7 +1081,7 @@ static void bnx2x_vf_mbx_set_q_flags(struct bnx2x *bp, u32 mbx_q_flags, if (mbx_q_flags & VFPF_QUEUE_FLG_DHC) __set_bit(BNX2X_Q_FLG_DHC, sp_q_flags); - /* outer vlan removal is set according to the PF's multi fuction mode */ + /* outer vlan removal is set according to PF's multi function mode */ if (IS_MF_SD(bp)) __set_bit(BNX2X_Q_FLG_OV, sp_q_flags); } @@ -1104,7 +1113,7 @@ static void bnx2x_vf_mbx_setup_q(struct bnx2x *bp, struct bnx2x_virtf *vf, struct bnx2x_queue_init_params *init_p; struct bnx2x_queue_setup_params *setup_p; - /* reinit the VF operation context */ + /* re-init the VF operation context */ memset(&vf->op_params.qctor, 0 , sizeof(vf->op_params.qctor)); setup_p = &vf->op_params.qctor.prep_qsetup; init_p = &vf->op_params.qctor.qstate.params.init; @@ -1588,8 +1597,9 @@ static void bnx2x_vf_mbx_request(struct bnx2x *bp, struct bnx2x_virtf *vf, * support them. Or this may be because someone wrote a crappy * VF driver and is sending garbage over the channel. */ - BNX2X_ERR("unknown TLV. type %d length %d. first 20 bytes of mailbox buffer:\n", - mbx->first_tlv.tl.type, mbx->first_tlv.tl.length); + BNX2X_ERR("unknown TLV. type %d length %d vf->state was %d. first 20 bytes of mailbox buffer:\n", + mbx->first_tlv.tl.type, mbx->first_tlv.tl.length, + vf->state); for (i = 0; i < 20; i++) DP_CONT(BNX2X_MSG_IOV, "%x ", mbx->msg->req.tlv_buf_size.tlv_buffer[i]); @@ -1605,8 +1615,11 @@ static void bnx2x_vf_mbx_request(struct bnx2x *bp, struct bnx2x_virtf *vf, bnx2x_vf_mbx_resp(bp, vf); } else { /* can't send a response since this VF is unknown to us - * just unlock the channel and be done with. + * just ack the FW to release the mailbox and unlock + * the channel. */ + storm_memset_vf_mbx_ack(bp, vf->abs_vfid); + mmiowb(); bnx2x_unlock_vf_pf_channel(bp, vf, mbx->first_tlv.tl.type); } diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.h index 41708faab575..f3ad174a3a63 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.h +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.h @@ -331,7 +331,10 @@ struct pf_vf_bulletin_content { #define VLAN_VALID 1 /* when set, the vf should not access * the vfpf channel */ - +#define CHANNEL_DOWN 2 /* vfpf channel is disabled. VFs are not + * to attempt to send messages on the + * channel after this bit is set + */ u8 mac[ETH_ALEN]; u8 mac_padding[2]; diff --git a/drivers/net/ethernet/broadcom/cnic.c b/drivers/net/ethernet/broadcom/cnic.c index 6b0dc131b20e..d78d4cf140ed 100644 --- a/drivers/net/ethernet/broadcom/cnic.c +++ b/drivers/net/ethernet/broadcom/cnic.c @@ -5622,7 +5622,7 @@ static void cnic_rcv_netevent(struct cnic_local *cp, unsigned long event, static int cnic_netdev_event(struct notifier_block *this, unsigned long event, void *ptr) { - struct net_device *netdev = ptr; + struct net_device *netdev = netdev_notifier_info_to_dev(ptr); struct cnic_dev *dev; int new_dev = 0; diff --git a/drivers/net/ethernet/broadcom/sb1250-mac.c b/drivers/net/ethernet/broadcom/sb1250-mac.c index e80bfb60c3ef..c2777712da99 100644 --- a/drivers/net/ethernet/broadcom/sb1250-mac.c +++ b/drivers/net/ethernet/broadcom/sb1250-mac.c @@ -2197,7 +2197,7 @@ static const struct net_device_ops sbmac_netdev_ops = { static int sbmac_init(struct platform_device *pldev, long long base) { - struct net_device *dev = dev_get_drvdata(&pldev->dev); + struct net_device *dev = platform_get_drvdata(pldev); int idx = pldev->id; struct sbmac_softc *sc = netdev_priv(dev); unsigned char *eaddr; @@ -2275,7 +2275,7 @@ static int sbmac_init(struct platform_device *pldev, long long base) dev->name); goto free_mdio; } - dev_set_drvdata(&pldev->dev, sc->mii_bus); + platform_set_drvdata(pldev, sc->mii_bus); err = register_netdev(dev); if (err) { @@ -2300,7 +2300,6 @@ static int sbmac_init(struct platform_device *pldev, long long base) return 0; unreg_mdio: mdiobus_unregister(sc->mii_bus); - dev_set_drvdata(&pldev->dev, NULL); free_mdio: mdiobus_free(sc->mii_bus); uninit_ctx: @@ -2624,7 +2623,7 @@ static int sbmac_probe(struct platform_device *pldev) goto out_unmap; } - dev_set_drvdata(&pldev->dev, dev); + platform_set_drvdata(pldev, dev); SET_NETDEV_DEV(dev, &pldev->dev); sc = netdev_priv(dev); @@ -2649,7 +2648,7 @@ out_out: static int __exit sbmac_remove(struct platform_device *pldev) { - struct net_device *dev = dev_get_drvdata(&pldev->dev); + struct net_device *dev = platform_get_drvdata(pldev); struct sbmac_softc *sc = netdev_priv(dev); unregister_netdev(dev); diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c index a13463e8a2c3..d964f302ac94 100644 --- a/drivers/net/ethernet/broadcom/tg3.c +++ b/drivers/net/ethernet/broadcom/tg3.c @@ -968,9 +968,6 @@ static void tg3_ape_driver_state_change(struct tg3 *tp, int kind) event = APE_EVENT_STATUS_STATE_UNLOAD; break; - case RESET_KIND_SUSPEND: - event = APE_EVENT_STATUS_STATE_SUSPEND; - break; default: return; } @@ -1317,8 +1314,8 @@ static int tg3_phy_toggle_auxctl_smdsp(struct tg3 *tp, bool enable) if (err) return err; - if (enable) + if (enable) val |= MII_TG3_AUXCTL_ACTL_SMDSP_ENA; else val &= ~MII_TG3_AUXCTL_ACTL_SMDSP_ENA; @@ -1745,10 +1742,6 @@ static void tg3_write_sig_pre_reset(struct tg3 *tp, int kind) break; } } - - if (kind == RESET_KIND_INIT || - kind == RESET_KIND_SUSPEND) - tg3_ape_driver_state_change(tp, kind); } /* tp->lock is held. */ @@ -1770,9 +1763,6 @@ static void tg3_write_sig_post_reset(struct tg3 *tp, int kind) break; } } - - if (kind == RESET_KIND_SHUTDOWN) - tg3_ape_driver_state_change(tp, kind); } /* tp->lock is held. */ @@ -2341,6 +2331,46 @@ static void tg3_phy_apply_otp(struct tg3 *tp) tg3_phy_toggle_auxctl_smdsp(tp, false); } +static void tg3_eee_pull_config(struct tg3 *tp, struct ethtool_eee *eee) +{ + u32 val; + struct ethtool_eee *dest = &tp->eee; + + if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP)) + return; + + if (eee) + dest = eee; + + if (tg3_phy_cl45_read(tp, MDIO_MMD_AN, TG3_CL45_D7_EEERES_STAT, &val)) + return; + + /* Pull eee_active */ + if (val == TG3_CL45_D7_EEERES_STAT_LP_1000T || + val == TG3_CL45_D7_EEERES_STAT_LP_100TX) { + dest->eee_active = 1; + } else + dest->eee_active = 0; + + /* Pull lp advertised settings */ + if (tg3_phy_cl45_read(tp, MDIO_MMD_AN, MDIO_AN_EEE_LPABLE, &val)) + return; + dest->lp_advertised = mmd_eee_adv_to_ethtool_adv_t(val); + + /* Pull advertised and eee_enabled settings */ + if (tg3_phy_cl45_read(tp, MDIO_MMD_AN, MDIO_AN_EEE_ADV, &val)) + return; + dest->eee_enabled = !!val; + dest->advertised = mmd_eee_adv_to_ethtool_adv_t(val); + + /* Pull tx_lpi_enabled */ + val = tr32(TG3_CPMU_EEE_MODE); + dest->tx_lpi_enabled = !!(val & TG3_CPMU_EEEMD_LPI_IN_TX); + + /* Pull lpi timer value */ + dest->tx_lpi_timer = tr32(TG3_CPMU_EEE_DBTMR1) & 0xffff; +} + static void tg3_phy_eee_adjust(struct tg3 *tp, bool current_link_up) { u32 val; @@ -2364,11 +2394,8 @@ static void tg3_phy_eee_adjust(struct tg3 *tp, bool current_link_up) tw32(TG3_CPMU_EEE_CTRL, eeectl); - tg3_phy_cl45_read(tp, MDIO_MMD_AN, - TG3_CL45_D7_EEERES_STAT, &val); - - if (val == TG3_CL45_D7_EEERES_STAT_LP_1000T || - val == TG3_CL45_D7_EEERES_STAT_LP_100TX) + tg3_eee_pull_config(tp, NULL); + if (tp->eee.eee_active) tp->setlpicnt = 2; } @@ -4192,6 +4219,8 @@ static int tg3_power_down_prepare(struct tg3 *tp) tg3_write_sig_post_reset(tp, RESET_KIND_SHUTDOWN); + tg3_ape_driver_state_change(tp, RESET_KIND_SHUTDOWN); + return 0; } @@ -4292,6 +4321,16 @@ static int tg3_phy_autoneg_cfg(struct tg3 *tp, u32 advertise, u32 flowctrl) /* Advertise 1000-BaseT EEE ability */ if (advertise & ADVERTISED_1000baseT_Full) val |= MDIO_AN_EEE_ADV_1000T; + + if (!tp->eee.eee_enabled) { + val = 0; + tp->eee.advertised = 0; + } else { + tp->eee.advertised = advertise & + (ADVERTISED_100baseT_Full | + ADVERTISED_1000baseT_Full); + } + err = tg3_phy_cl45_write(tp, MDIO_MMD_AN, MDIO_AN_EEE_ADV, val); if (err) val = 0; @@ -4536,26 +4575,23 @@ static int tg3_init_5401phy_dsp(struct tg3 *tp) static bool tg3_phy_eee_config_ok(struct tg3 *tp) { - u32 val; - u32 tgtadv = 0; - u32 advertising = tp->link_config.advertising; + struct ethtool_eee eee; if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP)) return true; - if (tg3_phy_cl45_read(tp, MDIO_MMD_AN, MDIO_AN_EEE_ADV, &val)) - return false; - - val &= (MDIO_AN_EEE_ADV_100TX | MDIO_AN_EEE_ADV_1000T); - - - if (advertising & ADVERTISED_100baseT_Full) - tgtadv |= MDIO_AN_EEE_ADV_100TX; - if (advertising & ADVERTISED_1000baseT_Full) - tgtadv |= MDIO_AN_EEE_ADV_1000T; + tg3_eee_pull_config(tp, &eee); - if (val != tgtadv) - return false; + if (tp->eee.eee_enabled) { + if (tp->eee.advertised != eee.advertised || + tp->eee.tx_lpi_timer != eee.tx_lpi_timer || + tp->eee.tx_lpi_enabled != eee.tx_lpi_enabled) + return false; + } else { + /* EEE is disabled but we're advertising */ + if (eee.advertised) + return false; + } return true; } @@ -4656,6 +4692,42 @@ static void tg3_clear_mac_status(struct tg3 *tp) udelay(40); } +static void tg3_setup_eee(struct tg3 *tp) +{ + u32 val; + + val = TG3_CPMU_EEE_LNKIDL_PCIE_NL0 | + TG3_CPMU_EEE_LNKIDL_UART_IDL; + if (tg3_chip_rev_id(tp) == CHIPREV_ID_57765_A0) + val |= TG3_CPMU_EEE_LNKIDL_APE_TX_MT; + + tw32_f(TG3_CPMU_EEE_LNKIDL_CTRL, val); + + tw32_f(TG3_CPMU_EEE_CTRL, + TG3_CPMU_EEE_CTRL_EXIT_20_1_US); + + val = TG3_CPMU_EEEMD_ERLY_L1_XIT_DET | + (tp->eee.tx_lpi_enabled ? TG3_CPMU_EEEMD_LPI_IN_TX : 0) | + TG3_CPMU_EEEMD_LPI_IN_RX | + TG3_CPMU_EEEMD_EEE_ENABLE; + + if (tg3_asic_rev(tp) != ASIC_REV_5717) + val |= TG3_CPMU_EEEMD_SND_IDX_DET_EN; + + if (tg3_flag(tp, ENABLE_APE)) + val |= TG3_CPMU_EEEMD_APE_TX_DET_EN; + + tw32_f(TG3_CPMU_EEE_MODE, tp->eee.eee_enabled ? val : 0); + + tw32_f(TG3_CPMU_EEE_DBTMR1, + TG3_CPMU_DBTMR1_PCIEXIT_2047US | + (tp->eee.tx_lpi_timer & 0xffff)); + + tw32_f(TG3_CPMU_EEE_DBTMR2, + TG3_CPMU_DBTMR2_APE_TX_2047US | + TG3_CPMU_DBTMR2_TXIDXEQ_2047US); +} + static int tg3_setup_copper_phy(struct tg3 *tp, bool force_reset) { bool current_link_up; @@ -4822,8 +4894,10 @@ static int tg3_setup_copper_phy(struct tg3 *tp, bool force_reset) */ if (!eee_config_ok && (tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN) && - !force_reset) + !force_reset) { + tg3_setup_eee(tp); tg3_phy_reset(tp); + } } else { if (!(bmcr & BMCR_ANENABLE) && tp->link_config.speed == current_speed && @@ -6335,9 +6409,7 @@ static void tg3_tx_recover(struct tg3 *tp) "Please report the problem to the driver maintainer " "and include system chipset information.\n"); - spin_lock(&tp->lock); tg3_flag_set(tp, TX_RECOVERY_PENDING); - spin_unlock(&tp->lock); } static inline u32 tg3_tx_avail(struct tg3_napi *tnapi) @@ -9205,11 +9277,9 @@ static void __tg3_set_coalesce(struct tg3 *tp, struct ethtool_coalesce *ec) } /* tp->lock is held. */ -static void tg3_rings_reset(struct tg3 *tp) +static void tg3_tx_rcbs_disable(struct tg3 *tp) { - int i; - u32 stblk, txrcb, rxrcb, limit; - struct tg3_napi *tnapi = &tp->napi[0]; + u32 txrcb, limit; /* Disable all transmit rings but the first. */ if (!tg3_flag(tp, 5705_PLUS)) @@ -9226,7 +9296,33 @@ static void tg3_rings_reset(struct tg3 *tp) txrcb < limit; txrcb += TG3_BDINFO_SIZE) tg3_write_mem(tp, txrcb + TG3_BDINFO_MAXLEN_FLAGS, BDINFO_FLAGS_DISABLED); +} + +/* tp->lock is held. */ +static void tg3_tx_rcbs_init(struct tg3 *tp) +{ + int i = 0; + u32 txrcb = NIC_SRAM_SEND_RCB; + + if (tg3_flag(tp, ENABLE_TSS)) + i++; + + for (; i < tp->irq_max; i++, txrcb += TG3_BDINFO_SIZE) { + struct tg3_napi *tnapi = &tp->napi[i]; + + if (!tnapi->tx_ring) + continue; + + tg3_set_bdinfo(tp, txrcb, tnapi->tx_desc_mapping, + (TG3_TX_RING_SIZE << BDINFO_FLAGS_MAXLEN_SHIFT), + NIC_SRAM_TX_BUFFER_DESC); + } +} +/* tp->lock is held. */ +static void tg3_rx_ret_rcbs_disable(struct tg3 *tp) +{ + u32 rxrcb, limit; /* Disable all receive return rings but the first. */ if (tg3_flag(tp, 5717_PLUS)) @@ -9244,6 +9340,39 @@ static void tg3_rings_reset(struct tg3 *tp) rxrcb < limit; rxrcb += TG3_BDINFO_SIZE) tg3_write_mem(tp, rxrcb + TG3_BDINFO_MAXLEN_FLAGS, BDINFO_FLAGS_DISABLED); +} + +/* tp->lock is held. */ +static void tg3_rx_ret_rcbs_init(struct tg3 *tp) +{ + int i = 0; + u32 rxrcb = NIC_SRAM_RCV_RET_RCB; + + if (tg3_flag(tp, ENABLE_RSS)) + i++; + + for (; i < tp->irq_max; i++, rxrcb += TG3_BDINFO_SIZE) { + struct tg3_napi *tnapi = &tp->napi[i]; + + if (!tnapi->rx_rcb) + continue; + + tg3_set_bdinfo(tp, rxrcb, tnapi->rx_rcb_mapping, + (tp->rx_ret_ring_mask + 1) << + BDINFO_FLAGS_MAXLEN_SHIFT, 0); + } +} + +/* tp->lock is held. */ +static void tg3_rings_reset(struct tg3 *tp) +{ + int i; + u32 stblk; + struct tg3_napi *tnapi = &tp->napi[0]; + + tg3_tx_rcbs_disable(tp); + + tg3_rx_ret_rcbs_disable(tp); /* Disable interrupts */ tw32_mailbox_f(tp->napi[0].int_mbox, 1); @@ -9280,9 +9409,6 @@ static void tg3_rings_reset(struct tg3 *tp) tw32_tx_mbox(mbox + i * 8, 0); } - txrcb = NIC_SRAM_SEND_RCB; - rxrcb = NIC_SRAM_RCV_RET_RCB; - /* Clear status block in ram. */ memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE); @@ -9292,46 +9418,20 @@ static void tg3_rings_reset(struct tg3 *tp) tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW, ((u64) tnapi->status_mapping & 0xffffffff)); - if (tnapi->tx_ring) { - tg3_set_bdinfo(tp, txrcb, tnapi->tx_desc_mapping, - (TG3_TX_RING_SIZE << - BDINFO_FLAGS_MAXLEN_SHIFT), - NIC_SRAM_TX_BUFFER_DESC); - txrcb += TG3_BDINFO_SIZE; - } - - if (tnapi->rx_rcb) { - tg3_set_bdinfo(tp, rxrcb, tnapi->rx_rcb_mapping, - (tp->rx_ret_ring_mask + 1) << - BDINFO_FLAGS_MAXLEN_SHIFT, 0); - rxrcb += TG3_BDINFO_SIZE; - } - stblk = HOSTCC_STATBLCK_RING1; for (i = 1, tnapi++; i < tp->irq_cnt; i++, tnapi++) { u64 mapping = (u64)tnapi->status_mapping; tw32(stblk + TG3_64BIT_REG_HIGH, mapping >> 32); tw32(stblk + TG3_64BIT_REG_LOW, mapping & 0xffffffff); + stblk += 8; /* Clear status block in ram. */ memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE); - - if (tnapi->tx_ring) { - tg3_set_bdinfo(tp, txrcb, tnapi->tx_desc_mapping, - (TG3_TX_RING_SIZE << - BDINFO_FLAGS_MAXLEN_SHIFT), - NIC_SRAM_TX_BUFFER_DESC); - txrcb += TG3_BDINFO_SIZE; - } - - tg3_set_bdinfo(tp, rxrcb, tnapi->rx_rcb_mapping, - ((tp->rx_ret_ring_mask + 1) << - BDINFO_FLAGS_MAXLEN_SHIFT), 0); - - stblk += 8; - rxrcb += TG3_BDINFO_SIZE; } + + tg3_tx_rcbs_init(tp); + tg3_rx_ret_rcbs_init(tp); } static void tg3_setup_rxbd_thresholds(struct tg3 *tp) @@ -9531,46 +9631,17 @@ static int tg3_reset_hw(struct tg3 *tp, bool reset_phy) if (tg3_flag(tp, INIT_COMPLETE)) tg3_abort_hw(tp, 1); - /* Enable MAC control of LPI */ - if (tp->phy_flags & TG3_PHYFLG_EEE_CAP) { - val = TG3_CPMU_EEE_LNKIDL_PCIE_NL0 | - TG3_CPMU_EEE_LNKIDL_UART_IDL; - if (tg3_chip_rev_id(tp) == CHIPREV_ID_57765_A0) - val |= TG3_CPMU_EEE_LNKIDL_APE_TX_MT; - - tw32_f(TG3_CPMU_EEE_LNKIDL_CTRL, val); - - tw32_f(TG3_CPMU_EEE_CTRL, - TG3_CPMU_EEE_CTRL_EXIT_20_1_US); - - val = TG3_CPMU_EEEMD_ERLY_L1_XIT_DET | - TG3_CPMU_EEEMD_LPI_IN_TX | - TG3_CPMU_EEEMD_LPI_IN_RX | - TG3_CPMU_EEEMD_EEE_ENABLE; - - if (tg3_asic_rev(tp) != ASIC_REV_5717) - val |= TG3_CPMU_EEEMD_SND_IDX_DET_EN; - - if (tg3_flag(tp, ENABLE_APE)) - val |= TG3_CPMU_EEEMD_APE_TX_DET_EN; - - tw32_f(TG3_CPMU_EEE_MODE, val); - - tw32_f(TG3_CPMU_EEE_DBTMR1, - TG3_CPMU_DBTMR1_PCIEXIT_2047US | - TG3_CPMU_DBTMR1_LNKIDLE_2047US); - - tw32_f(TG3_CPMU_EEE_DBTMR2, - TG3_CPMU_DBTMR2_APE_TX_2047US | - TG3_CPMU_DBTMR2_TXIDXEQ_2047US); - } - if ((tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN) && !(tp->phy_flags & TG3_PHYFLG_USER_CONFIGURED)) { tg3_phy_pull_config(tp); + tg3_eee_pull_config(tp, NULL); tp->phy_flags |= TG3_PHYFLG_USER_CONFIGURED; } + /* Enable MAC control of LPI */ + if (tp->phy_flags & TG3_PHYFLG_EEE_CAP) + tg3_setup_eee(tp); + if (reset_phy) tg3_phy_reset(tp); @@ -11226,7 +11297,7 @@ static int tg3_start(struct tg3 *tp, bool reset_phy, bool test_irq, */ err = tg3_alloc_consistent(tp); if (err) - goto err_out1; + goto out_ints_fini; tg3_napi_init(tp); @@ -11240,12 +11311,15 @@ static int tg3_start(struct tg3 *tp, bool reset_phy, bool test_irq, tnapi = &tp->napi[i]; free_irq(tnapi->irq_vec, tnapi); } - goto err_out2; + goto out_napi_fini; } } tg3_full_lock(tp, 0); + if (init) + tg3_ape_driver_state_change(tp, RESET_KIND_INIT); + err = tg3_init_hw(tp, reset_phy); if (err) { tg3_halt(tp, RESET_KIND_SHUTDOWN, 1); @@ -11255,7 +11329,7 @@ static int tg3_start(struct tg3 *tp, bool reset_phy, bool test_irq, tg3_full_unlock(tp); if (err) - goto err_out3; + goto out_free_irq; if (test_irq && tg3_flag(tp, USING_MSI)) { err = tg3_test_msi(tp); @@ -11266,7 +11340,7 @@ static int tg3_start(struct tg3 *tp, bool reset_phy, bool test_irq, tg3_free_rings(tp); tg3_full_unlock(tp); - goto err_out2; + goto out_napi_fini; } if (!tg3_flag(tp, 57765_PLUS) && tg3_flag(tp, USING_MSI)) { @@ -11306,18 +11380,18 @@ static int tg3_start(struct tg3 *tp, bool reset_phy, bool test_irq, return 0; -err_out3: +out_free_irq: for (i = tp->irq_cnt - 1; i >= 0; i--) { struct tg3_napi *tnapi = &tp->napi[i]; free_irq(tnapi->irq_vec, tnapi); } -err_out2: +out_napi_fini: tg3_napi_disable(tp); tg3_napi_fini(tp); tg3_free_consistent(tp); -err_out1: +out_ints_fini: tg3_ints_fini(tp); return err; @@ -13362,11 +13436,13 @@ static void tg3_self_test(struct net_device *dev, struct ethtool_test *etest, struct tg3 *tp = netdev_priv(dev); bool doextlpbk = etest->flags & ETH_TEST_FL_EXTERNAL_LB; - if ((tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) && - tg3_power_up(tp)) { - etest->flags |= ETH_TEST_FL_FAILED; - memset(data, 1, sizeof(u64) * TG3_NUM_TEST); - return; + if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) { + if (tg3_power_up(tp)) { + etest->flags |= ETH_TEST_FL_FAILED; + memset(data, 1, sizeof(u64) * TG3_NUM_TEST); + return; + } + tg3_ape_driver_state_change(tp, RESET_KIND_INIT); } memset(data, 0, sizeof(u64) * TG3_NUM_TEST); @@ -13657,6 +13733,57 @@ static int tg3_set_coalesce(struct net_device *dev, struct ethtool_coalesce *ec) return 0; } +static int tg3_set_eee(struct net_device *dev, struct ethtool_eee *edata) +{ + struct tg3 *tp = netdev_priv(dev); + + if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP)) { + netdev_warn(tp->dev, "Board does not support EEE!\n"); + return -EOPNOTSUPP; + } + + if (edata->advertised != tp->eee.advertised) { + netdev_warn(tp->dev, + "Direct manipulation of EEE advertisement is not supported\n"); + return -EINVAL; + } + + if (edata->tx_lpi_timer > TG3_CPMU_DBTMR1_LNKIDLE_MAX) { + netdev_warn(tp->dev, + "Maximal Tx Lpi timer supported is %#x(u)\n", + TG3_CPMU_DBTMR1_LNKIDLE_MAX); + return -EINVAL; + } + + tp->eee = *edata; + + tp->phy_flags |= TG3_PHYFLG_USER_CONFIGURED; + tg3_warn_mgmt_link_flap(tp); + + if (netif_running(tp->dev)) { + tg3_full_lock(tp, 0); + tg3_setup_eee(tp); + tg3_phy_reset(tp); + tg3_full_unlock(tp); + } + + return 0; +} + +static int tg3_get_eee(struct net_device *dev, struct ethtool_eee *edata) +{ + struct tg3 *tp = netdev_priv(dev); + + if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP)) { + netdev_warn(tp->dev, + "Board does not support EEE!\n"); + return -EOPNOTSUPP; + } + + *edata = tp->eee; + return 0; +} + static const struct ethtool_ops tg3_ethtool_ops = { .get_settings = tg3_get_settings, .set_settings = tg3_set_settings, @@ -13690,6 +13817,8 @@ static const struct ethtool_ops tg3_ethtool_ops = { .get_channels = tg3_get_channels, .set_channels = tg3_set_channels, .get_ts_info = tg3_get_ts_info, + .get_eee = tg3_get_eee, + .set_eee = tg3_set_eee, }; static struct rtnl_link_stats64 *tg3_get_stats64(struct net_device *dev, @@ -15038,9 +15167,18 @@ static int tg3_phy_probe(struct tg3 *tp) (tg3_asic_rev(tp) == ASIC_REV_5717 && tg3_chip_rev_id(tp) != CHIPREV_ID_5717_A0) || (tg3_asic_rev(tp) == ASIC_REV_57765 && - tg3_chip_rev_id(tp) != CHIPREV_ID_57765_A0))) + tg3_chip_rev_id(tp) != CHIPREV_ID_57765_A0))) { tp->phy_flags |= TG3_PHYFLG_EEE_CAP; + tp->eee.supported = SUPPORTED_100baseT_Full | + SUPPORTED_1000baseT_Full; + tp->eee.advertised = ADVERTISED_100baseT_Full | + ADVERTISED_1000baseT_Full; + tp->eee.eee_enabled = 1; + tp->eee.tx_lpi_enabled = 1; + tp->eee.tx_lpi_timer = TG3_CPMU_DBTMR1_LNKIDLE_2047US; + } + tg3_phy_init_link_config(tp); if (!(tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN) && @@ -17112,7 +17250,7 @@ static int tg3_init_one(struct pci_dev *pdev, { struct net_device *dev; struct tg3 *tp; - int i, err, pm_cap; + int i, err; u32 sndmbx, rcvmbx, intmbx; char str[40]; u64 dma_mask, persist_dma_mask; @@ -17134,25 +17272,10 @@ static int tg3_init_one(struct pci_dev *pdev, pci_set_master(pdev); - /* Find power-management capability. */ - pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM); - if (pm_cap == 0) { - dev_err(&pdev->dev, - "Cannot find Power Management capability, aborting\n"); - err = -EIO; - goto err_out_free_res; - } - - err = pci_set_power_state(pdev, PCI_D0); - if (err) { - dev_err(&pdev->dev, "Transition to D0 failed, aborting\n"); - goto err_out_free_res; - } - dev = alloc_etherdev_mq(sizeof(*tp), TG3_IRQ_MAX_VECS); if (!dev) { err = -ENOMEM; - goto err_out_power_down; + goto err_out_free_res; } SET_NETDEV_DEV(dev, &pdev->dev); @@ -17160,7 +17283,7 @@ static int tg3_init_one(struct pci_dev *pdev, tp = netdev_priv(dev); tp->pdev = pdev; tp->dev = dev; - tp->pm_cap = pm_cap; + tp->pm_cap = pdev->pm_cap; tp->rx_mode = TG3_DEF_RX_MODE; tp->tx_mode = TG3_DEF_TX_MODE; tp->irq_sync = 1; @@ -17498,9 +17621,6 @@ err_out_iounmap: err_out_free_dev: free_netdev(dev); -err_out_power_down: - pci_set_power_state(pdev, PCI_D3hot); - err_out_free_res: pci_release_regions(pdev); @@ -17610,6 +17730,8 @@ static int tg3_resume(struct device *device) tg3_full_lock(tp, 0); + tg3_ape_driver_state_change(tp, RESET_KIND_INIT); + tg3_flag_set(tp, INIT_COMPLETE); err = tg3_restart_hw(tp, !(tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN)); @@ -17671,10 +17793,13 @@ static pci_ers_result_t tg3_io_error_detected(struct pci_dev *pdev, tg3_full_unlock(tp); done: - if (state == pci_channel_io_perm_failure) + if (state == pci_channel_io_perm_failure) { + tg3_napi_enable(tp); + dev_close(netdev); err = PCI_ERS_RESULT_DISCONNECT; - else + } else { pci_disable_device(pdev); + } rtnl_unlock(); @@ -17720,6 +17845,10 @@ static pci_ers_result_t tg3_io_slot_reset(struct pci_dev *pdev) rc = PCI_ERS_RESULT_RECOVERED; done: + if (rc != PCI_ERS_RESULT_RECOVERED && netif_running(netdev)) { + tg3_napi_enable(tp); + dev_close(netdev); + } rtnl_unlock(); return rc; @@ -17744,6 +17873,7 @@ static void tg3_io_resume(struct pci_dev *pdev) goto done; tg3_full_lock(tp, 0); + tg3_ape_driver_state_change(tp, RESET_KIND_INIT); tg3_flag_set(tp, INIT_COMPLETE); err = tg3_restart_hw(tp, true); if (err) { @@ -17781,15 +17911,4 @@ static struct pci_driver tg3_driver = { .driver.pm = &tg3_pm_ops, }; -static int __init tg3_init(void) -{ - return pci_register_driver(&tg3_driver); -} - -static void __exit tg3_cleanup(void) -{ - pci_unregister_driver(&tg3_driver); -} - -module_init(tg3_init); -module_exit(tg3_cleanup); +module_pci_driver(tg3_driver); diff --git a/drivers/net/ethernet/broadcom/tg3.h b/drivers/net/ethernet/broadcom/tg3.h index ff6e30eeae35..cd63d1189aae 100644 --- a/drivers/net/ethernet/broadcom/tg3.h +++ b/drivers/net/ethernet/broadcom/tg3.h @@ -1175,6 +1175,7 @@ #define TG3_CPMU_EEE_DBTMR1 0x000036b4 #define TG3_CPMU_DBTMR1_PCIEXIT_2047US 0x07ff0000 #define TG3_CPMU_DBTMR1_LNKIDLE_2047US 0x000007ff +#define TG3_CPMU_DBTMR1_LNKIDLE_MAX 0x0000ffff #define TG3_CPMU_EEE_DBTMR2 0x000036b8 #define TG3_CPMU_DBTMR2_APE_TX_2047US 0x07ff0000 #define TG3_CPMU_DBTMR2_TXIDXEQ_2047US 0x000007ff @@ -3372,6 +3373,7 @@ struct tg3 { unsigned int irq_cnt; struct ethtool_coalesce coal; + struct ethtool_eee eee; /* firmware info */ const char *fw_needed; |