diff options
Diffstat (limited to 'drivers')
53 files changed, 1025 insertions, 740 deletions
diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig index 5780dad6a3cb..ff652c77a0a5 100644 --- a/drivers/net/Kconfig +++ b/drivers/net/Kconfig @@ -1950,14 +1950,6 @@ config FEC Say Y here if you want to use the built-in 10/100 Fast ethernet controller on some Motorola ColdFire and Freescale i.MX processors. -config FEC2 - bool "Second FEC ethernet controller" - depends on FEC - help - Say Y here if you want to use the second built-in 10/100 Fast - ethernet controller on some Motorola ColdFire and Freescale - i.MX processors. - config FEC_MPC52xx tristate "MPC52xx FEC driver" depends on PPC_MPC52xx && PPC_BESTCOMM diff --git a/drivers/net/arm/ks8695net.c b/drivers/net/arm/ks8695net.c index 54c6d849cf25..62d6f88cbab5 100644 --- a/drivers/net/arm/ks8695net.c +++ b/drivers/net/arm/ks8695net.c @@ -854,12 +854,12 @@ ks8695_set_msglevel(struct net_device *ndev, u32 value) } /** - * ks8695_get_settings - Get device-specific settings. + * ks8695_wan_get_settings - Get device-specific settings. * @ndev: The network device to read settings from * @cmd: The ethtool structure to read into */ static int -ks8695_get_settings(struct net_device *ndev, struct ethtool_cmd *cmd) +ks8695_wan_get_settings(struct net_device *ndev, struct ethtool_cmd *cmd) { struct ks8695_priv *ksp = netdev_priv(ndev); u32 ctrl; @@ -870,69 +870,50 @@ ks8695_get_settings(struct net_device *ndev, struct ethtool_cmd *cmd) SUPPORTED_TP | SUPPORTED_MII); cmd->transceiver = XCVR_INTERNAL; - /* Port specific extras */ - switch (ksp->dtype) { - case KS8695_DTYPE_HPNA: - cmd->phy_address = 0; - /* not supported for HPNA */ - cmd->autoneg = AUTONEG_DISABLE; + cmd->advertising = ADVERTISED_TP | ADVERTISED_MII; + cmd->port = PORT_MII; + cmd->supported |= (SUPPORTED_Autoneg | SUPPORTED_Pause); + cmd->phy_address = 0; - /* BUG: Erm, dtype hpna implies no phy regs */ - /* - ctrl = readl(KS8695_MISC_VA + KS8695_HMC); - cmd->speed = (ctrl & HMC_HSS) ? SPEED_100 : SPEED_10; - cmd->duplex = (ctrl & HMC_HDS) ? DUPLEX_FULL : DUPLEX_HALF; - */ - return -EOPNOTSUPP; - case KS8695_DTYPE_WAN: - cmd->advertising = ADVERTISED_TP | ADVERTISED_MII; - cmd->port = PORT_MII; - cmd->supported |= (SUPPORTED_Autoneg | SUPPORTED_Pause); - cmd->phy_address = 0; + ctrl = readl(ksp->phyiface_regs + KS8695_WMC); + if ((ctrl & WMC_WAND) == 0) { + /* auto-negotiation is enabled */ + cmd->advertising |= ADVERTISED_Autoneg; + if (ctrl & WMC_WANA100F) + cmd->advertising |= ADVERTISED_100baseT_Full; + if (ctrl & WMC_WANA100H) + cmd->advertising |= ADVERTISED_100baseT_Half; + if (ctrl & WMC_WANA10F) + cmd->advertising |= ADVERTISED_10baseT_Full; + if (ctrl & WMC_WANA10H) + cmd->advertising |= ADVERTISED_10baseT_Half; + if (ctrl & WMC_WANAP) + cmd->advertising |= ADVERTISED_Pause; + cmd->autoneg = AUTONEG_ENABLE; + + cmd->speed = (ctrl & WMC_WSS) ? SPEED_100 : SPEED_10; + cmd->duplex = (ctrl & WMC_WDS) ? + DUPLEX_FULL : DUPLEX_HALF; + } else { + /* auto-negotiation is disabled */ + cmd->autoneg = AUTONEG_DISABLE; - ctrl = readl(ksp->phyiface_regs + KS8695_WMC); - if ((ctrl & WMC_WAND) == 0) { - /* auto-negotiation is enabled */ - cmd->advertising |= ADVERTISED_Autoneg; - if (ctrl & WMC_WANA100F) - cmd->advertising |= ADVERTISED_100baseT_Full; - if (ctrl & WMC_WANA100H) - cmd->advertising |= ADVERTISED_100baseT_Half; - if (ctrl & WMC_WANA10F) - cmd->advertising |= ADVERTISED_10baseT_Full; - if (ctrl & WMC_WANA10H) - cmd->advertising |= ADVERTISED_10baseT_Half; - if (ctrl & WMC_WANAP) - cmd->advertising |= ADVERTISED_Pause; - cmd->autoneg = AUTONEG_ENABLE; - - cmd->speed = (ctrl & WMC_WSS) ? SPEED_100 : SPEED_10; - cmd->duplex = (ctrl & WMC_WDS) ? - DUPLEX_FULL : DUPLEX_HALF; - } else { - /* auto-negotiation is disabled */ - cmd->autoneg = AUTONEG_DISABLE; - - cmd->speed = (ctrl & WMC_WANF100) ? - SPEED_100 : SPEED_10; - cmd->duplex = (ctrl & WMC_WANFF) ? - DUPLEX_FULL : DUPLEX_HALF; - } - break; - case KS8695_DTYPE_LAN: - return -EOPNOTSUPP; + cmd->speed = (ctrl & WMC_WANF100) ? + SPEED_100 : SPEED_10; + cmd->duplex = (ctrl & WMC_WANFF) ? + DUPLEX_FULL : DUPLEX_HALF; } return 0; } /** - * ks8695_set_settings - Set device-specific settings. + * ks8695_wan_set_settings - Set device-specific settings. * @ndev: The network device to configure * @cmd: The settings to configure */ static int -ks8695_set_settings(struct net_device *ndev, struct ethtool_cmd *cmd) +ks8695_wan_set_settings(struct net_device *ndev, struct ethtool_cmd *cmd) { struct ks8695_priv *ksp = netdev_priv(ndev); u32 ctrl; @@ -956,171 +937,85 @@ ks8695_set_settings(struct net_device *ndev, struct ethtool_cmd *cmd) ADVERTISED_100baseT_Full)) == 0) return -EINVAL; - switch (ksp->dtype) { - case KS8695_DTYPE_HPNA: - /* HPNA does not support auto-negotiation. */ - return -EINVAL; - case KS8695_DTYPE_WAN: - ctrl = readl(ksp->phyiface_regs + KS8695_WMC); - - ctrl &= ~(WMC_WAND | WMC_WANA100F | WMC_WANA100H | - WMC_WANA10F | WMC_WANA10H); - if (cmd->advertising & ADVERTISED_100baseT_Full) - ctrl |= WMC_WANA100F; - if (cmd->advertising & ADVERTISED_100baseT_Half) - ctrl |= WMC_WANA100H; - if (cmd->advertising & ADVERTISED_10baseT_Full) - ctrl |= WMC_WANA10F; - if (cmd->advertising & ADVERTISED_10baseT_Half) - ctrl |= WMC_WANA10H; - - /* force a re-negotiation */ - ctrl |= WMC_WANR; - writel(ctrl, ksp->phyiface_regs + KS8695_WMC); - break; - case KS8695_DTYPE_LAN: - return -EOPNOTSUPP; - } + ctrl = readl(ksp->phyiface_regs + KS8695_WMC); + ctrl &= ~(WMC_WAND | WMC_WANA100F | WMC_WANA100H | + WMC_WANA10F | WMC_WANA10H); + if (cmd->advertising & ADVERTISED_100baseT_Full) + ctrl |= WMC_WANA100F; + if (cmd->advertising & ADVERTISED_100baseT_Half) + ctrl |= WMC_WANA100H; + if (cmd->advertising & ADVERTISED_10baseT_Full) + ctrl |= WMC_WANA10F; + if (cmd->advertising & ADVERTISED_10baseT_Half) + ctrl |= WMC_WANA10H; + + /* force a re-negotiation */ + ctrl |= WMC_WANR; + writel(ctrl, ksp->phyiface_regs + KS8695_WMC); } else { - switch (ksp->dtype) { - case KS8695_DTYPE_HPNA: - /* BUG: dtype_hpna implies no phy registers */ - /* - ctrl = __raw_readl(KS8695_MISC_VA + KS8695_HMC); - - ctrl &= ~(HMC_HSS | HMC_HDS); - if (cmd->speed == SPEED_100) - ctrl |= HMC_HSS; - if (cmd->duplex == DUPLEX_FULL) - ctrl |= HMC_HDS; - - __raw_writel(ctrl, KS8695_MISC_VA + KS8695_HMC); - */ - return -EOPNOTSUPP; - case KS8695_DTYPE_WAN: - ctrl = readl(ksp->phyiface_regs + KS8695_WMC); - - /* disable auto-negotiation */ - ctrl |= WMC_WAND; - ctrl &= ~(WMC_WANF100 | WMC_WANFF); - - if (cmd->speed == SPEED_100) - ctrl |= WMC_WANF100; - if (cmd->duplex == DUPLEX_FULL) - ctrl |= WMC_WANFF; - - writel(ctrl, ksp->phyiface_regs + KS8695_WMC); - break; - case KS8695_DTYPE_LAN: - return -EOPNOTSUPP; - } + ctrl = readl(ksp->phyiface_regs + KS8695_WMC); + + /* disable auto-negotiation */ + ctrl |= WMC_WAND; + ctrl &= ~(WMC_WANF100 | WMC_WANFF); + + if (cmd->speed == SPEED_100) + ctrl |= WMC_WANF100; + if (cmd->duplex == DUPLEX_FULL) + ctrl |= WMC_WANFF; + + writel(ctrl, ksp->phyiface_regs + KS8695_WMC); } return 0; } /** - * ks8695_nwayreset - Restart the autonegotiation on the port. + * ks8695_wan_nwayreset - Restart the autonegotiation on the port. * @ndev: The network device to restart autoneotiation on */ static int -ks8695_nwayreset(struct net_device *ndev) +ks8695_wan_nwayreset(struct net_device *ndev) { struct ks8695_priv *ksp = netdev_priv(ndev); u32 ctrl; - switch (ksp->dtype) { - case KS8695_DTYPE_HPNA: - /* No phy means no autonegotiation on hpna */ - return -EINVAL; - case KS8695_DTYPE_WAN: - ctrl = readl(ksp->phyiface_regs + KS8695_WMC); - - if ((ctrl & WMC_WAND) == 0) - writel(ctrl | WMC_WANR, - ksp->phyiface_regs + KS8695_WMC); - else - /* auto-negotiation not enabled */ - return -EINVAL; - break; - case KS8695_DTYPE_LAN: - return -EOPNOTSUPP; - } - - return 0; -} + ctrl = readl(ksp->phyiface_regs + KS8695_WMC); -/** - * ks8695_get_link - Retrieve link status of network interface - * @ndev: The network interface to retrive the link status of. - */ -static u32 -ks8695_get_link(struct net_device *ndev) -{ - struct ks8695_priv *ksp = netdev_priv(ndev); - u32 ctrl; + if ((ctrl & WMC_WAND) == 0) + writel(ctrl | WMC_WANR, + ksp->phyiface_regs + KS8695_WMC); + else + /* auto-negotiation not enabled */ + return -EINVAL; - switch (ksp->dtype) { - case KS8695_DTYPE_HPNA: - /* HPNA always has link */ - return 1; - case KS8695_DTYPE_WAN: - /* WAN we can read the PHY for */ - ctrl = readl(ksp->phyiface_regs + KS8695_WMC); - return ctrl & WMC_WLS; - case KS8695_DTYPE_LAN: - return -EOPNOTSUPP; - } return 0; } /** - * ks8695_get_pause - Retrieve network pause/flow-control advertising + * ks8695_wan_get_pause - Retrieve network pause/flow-control advertising * @ndev: The device to retrieve settings from * @param: The structure to fill out with the information */ static void -ks8695_get_pause(struct net_device *ndev, struct ethtool_pauseparam *param) +ks8695_wan_get_pause(struct net_device *ndev, struct ethtool_pauseparam *param) { struct ks8695_priv *ksp = netdev_priv(ndev); u32 ctrl; - switch (ksp->dtype) { - case KS8695_DTYPE_HPNA: - /* No phy link on hpna to configure */ - return; - case KS8695_DTYPE_WAN: - ctrl = readl(ksp->phyiface_regs + KS8695_WMC); - - /* advertise Pause */ - param->autoneg = (ctrl & WMC_WANAP); + ctrl = readl(ksp->phyiface_regs + KS8695_WMC); - /* current Rx Flow-control */ - ctrl = ks8695_readreg(ksp, KS8695_DRXC); - param->rx_pause = (ctrl & DRXC_RFCE); + /* advertise Pause */ + param->autoneg = (ctrl & WMC_WANAP); - /* current Tx Flow-control */ - ctrl = ks8695_readreg(ksp, KS8695_DTXC); - param->tx_pause = (ctrl & DTXC_TFCE); - break; - case KS8695_DTYPE_LAN: - /* The LAN's "phy" is a direct-attached switch */ - return; - } -} + /* current Rx Flow-control */ + ctrl = ks8695_readreg(ksp, KS8695_DRXC); + param->rx_pause = (ctrl & DRXC_RFCE); -/** - * ks8695_set_pause - Configure pause/flow-control - * @ndev: The device to configure - * @param: The pause parameters to set - * - * TODO: Implement this - */ -static int -ks8695_set_pause(struct net_device *ndev, struct ethtool_pauseparam *param) -{ - return -EOPNOTSUPP; + /* current Tx Flow-control */ + ctrl = ks8695_readreg(ksp, KS8695_DTXC); + param->tx_pause = (ctrl & DTXC_TFCE); } /** @@ -1140,12 +1035,17 @@ ks8695_get_drvinfo(struct net_device *ndev, struct ethtool_drvinfo *info) static const struct ethtool_ops ks8695_ethtool_ops = { .get_msglevel = ks8695_get_msglevel, .set_msglevel = ks8695_set_msglevel, - .get_settings = ks8695_get_settings, - .set_settings = ks8695_set_settings, - .nway_reset = ks8695_nwayreset, - .get_link = ks8695_get_link, - .get_pauseparam = ks8695_get_pause, - .set_pauseparam = ks8695_set_pause, + .get_drvinfo = ks8695_get_drvinfo, +}; + +static const struct ethtool_ops ks8695_wan_ethtool_ops = { + .get_msglevel = ks8695_get_msglevel, + .set_msglevel = ks8695_set_msglevel, + .get_settings = ks8695_wan_get_settings, + .set_settings = ks8695_wan_set_settings, + .nway_reset = ks8695_wan_nwayreset, + .get_link = ethtool_op_get_link, + .get_pauseparam = ks8695_wan_get_pause, .get_drvinfo = ks8695_get_drvinfo, }; @@ -1541,7 +1441,6 @@ ks8695_probe(struct platform_device *pdev) /* driver system setup */ ndev->netdev_ops = &ks8695_netdev_ops; - SET_ETHTOOL_OPS(ndev, &ks8695_ethtool_ops); ndev->watchdog_timeo = msecs_to_jiffies(watchdog); netif_napi_add(ndev, &ksp->napi, ks8695_poll, NAPI_WEIGHT); @@ -1608,12 +1507,15 @@ ks8695_probe(struct platform_device *pdev) if (ksp->phyiface_regs && ksp->link_irq == -1) { ks8695_init_switch(ksp); ksp->dtype = KS8695_DTYPE_LAN; + SET_ETHTOOL_OPS(ndev, &ks8695_ethtool_ops); } else if (ksp->phyiface_regs && ksp->link_irq != -1) { ks8695_init_wan_phy(ksp); ksp->dtype = KS8695_DTYPE_WAN; + SET_ETHTOOL_OPS(ndev, &ks8695_wan_ethtool_ops); } else { /* No initialisation since HPNA does not have a PHY */ ksp->dtype = KS8695_DTYPE_HPNA; + SET_ETHTOOL_OPS(ndev, &ks8695_ethtool_ops); } /* And bring up the net_device with the net core */ diff --git a/drivers/net/bfin_mac.c b/drivers/net/bfin_mac.c index ce1e5e9d06f6..22abfb39d813 100644 --- a/drivers/net/bfin_mac.c +++ b/drivers/net/bfin_mac.c @@ -8,6 +8,11 @@ * Licensed under the GPL-2 or later. */ +#define DRV_VERSION "1.1" +#define DRV_DESC "Blackfin on-chip Ethernet MAC driver" + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + #include <linux/init.h> #include <linux/module.h> #include <linux/kernel.h> @@ -41,12 +46,7 @@ #include "bfin_mac.h" -#define DRV_NAME "bfin_mac" -#define DRV_VERSION "1.1" -#define DRV_AUTHOR "Bryan Wu, Luke Yang" -#define DRV_DESC "Blackfin on-chip Ethernet MAC driver" - -MODULE_AUTHOR(DRV_AUTHOR); +MODULE_AUTHOR("Bryan Wu, Luke Yang"); MODULE_LICENSE("GPL"); MODULE_DESCRIPTION(DRV_DESC); MODULE_ALIAS("platform:bfin_mac"); @@ -189,8 +189,7 @@ static int desc_list_init(void) /* allocate a new skb for next time receive */ new_skb = dev_alloc_skb(PKT_BUF_SZ + NET_IP_ALIGN); if (!new_skb) { - printk(KERN_NOTICE DRV_NAME - ": init: low on mem - packet dropped\n"); + pr_notice("init: low on mem - packet dropped\n"); goto init_error; } skb_reserve(new_skb, NET_IP_ALIGN); @@ -240,7 +239,7 @@ static int desc_list_init(void) init_error: desc_list_free(); - printk(KERN_ERR DRV_NAME ": kmalloc failed\n"); + pr_err("kmalloc failed\n"); return -ENOMEM; } @@ -259,8 +258,7 @@ static int bfin_mdio_poll(void) while ((bfin_read_EMAC_STAADD()) & STABUSY) { udelay(1); if (timeout_cnt-- < 0) { - printk(KERN_ERR DRV_NAME - ": wait MDC/MDIO transaction to complete timeout\n"); + pr_err("wait MDC/MDIO transaction to complete timeout\n"); return -ETIMEDOUT; } } @@ -350,9 +348,9 @@ static void bfin_mac_adjust_link(struct net_device *dev) opmode &= ~RMII_10; break; default: - printk(KERN_WARNING - "%s: Ack! Speed (%d) is not 10/100!\n", - DRV_NAME, phydev->speed); + netdev_warn(dev, + "Ack! Speed (%d) is not 10/100!\n", + phydev->speed); break; } bfin_write_EMAC_OPMODE(opmode); @@ -417,14 +415,13 @@ static int mii_probe(struct net_device *dev, int phy_mode) /* now we are supposed to have a proper phydev, to attach to... */ if (!phydev) { - printk(KERN_INFO "%s: Don't found any phy device at all\n", - dev->name); + netdev_err(dev, "no phy device found\n"); return -ENODEV; } if (phy_mode != PHY_INTERFACE_MODE_RMII && phy_mode != PHY_INTERFACE_MODE_MII) { - printk(KERN_INFO "%s: Invalid phy interface mode\n", dev->name); + netdev_err(dev, "invalid phy interface mode\n"); return -EINVAL; } @@ -432,7 +429,7 @@ static int mii_probe(struct net_device *dev, int phy_mode) 0, phy_mode); if (IS_ERR(phydev)) { - printk(KERN_ERR "%s: Could not attach to PHY\n", dev->name); + netdev_err(dev, "could not attach PHY\n"); return PTR_ERR(phydev); } @@ -453,11 +450,10 @@ static int mii_probe(struct net_device *dev, int phy_mode) lp->old_duplex = -1; lp->phydev = phydev; - printk(KERN_INFO "%s: attached PHY driver [%s] " - "(mii_bus:phy_addr=%s, irq=%d, mdc_clk=%dHz(mdc_div=%d)" - "@sclk=%dMHz)\n", - DRV_NAME, phydev->drv->name, dev_name(&phydev->dev), phydev->irq, - MDC_CLK, mdc_div, sclk/1000000); + pr_info("attached PHY driver [%s] " + "(mii_bus:phy_addr=%s, irq=%d, mdc_clk=%dHz(mdc_div=%d)@sclk=%dMHz)\n", + phydev->drv->name, dev_name(&phydev->dev), phydev->irq, + MDC_CLK, mdc_div, sclk/1000000); return 0; } @@ -502,7 +498,7 @@ bfin_mac_ethtool_setsettings(struct net_device *dev, struct ethtool_cmd *cmd) static void bfin_mac_ethtool_getdrvinfo(struct net_device *dev, struct ethtool_drvinfo *info) { - strcpy(info->driver, DRV_NAME); + strcpy(info->driver, KBUILD_MODNAME); strcpy(info->version, DRV_VERSION); strcpy(info->fw_version, "N/A"); strcpy(info->bus_info, dev_name(&dev->dev)); @@ -562,7 +558,7 @@ static const struct ethtool_ops bfin_mac_ethtool_ops = { }; /**************************************************************************/ -void setup_system_regs(struct net_device *dev) +static void setup_system_regs(struct net_device *dev) { struct bfin_mac_local *lp = netdev_priv(dev); int i; @@ -592,6 +588,10 @@ void setup_system_regs(struct net_device *dev) bfin_write_EMAC_MMC_CTL(RSTC | CROLL); + /* Set vlan regs to let 1522 bytes long packets pass through */ + bfin_write_EMAC_VLAN1(lp->vlan1_mask); + bfin_write_EMAC_VLAN2(lp->vlan2_mask); + /* Initialize the TX DMA channel registers */ bfin_write_DMA2_X_COUNT(0); bfin_write_DMA2_X_MODIFY(4); @@ -827,8 +827,7 @@ static void bfin_tx_hwtstamp(struct net_device *netdev, struct sk_buff *skb) while ((!(bfin_read_EMAC_PTP_ISTAT() & TXTL)) && (--timeout_cnt)) udelay(1); if (timeout_cnt == 0) - printk(KERN_ERR DRV_NAME - ": fails to timestamp the TX packet\n"); + netdev_err(netdev, "timestamp the TX packet failed\n"); else { struct skb_shared_hwtstamps shhwtstamps; u64 ns; @@ -1083,8 +1082,7 @@ static void bfin_mac_rx(struct net_device *dev) * we which case we simply drop the packet */ if (current_rx_ptr->status.status_word & RX_ERROR_MASK) { - printk(KERN_NOTICE DRV_NAME - ": rx: receive error - packet dropped\n"); + netdev_notice(dev, "rx: receive error - packet dropped\n"); dev->stats.rx_dropped++; goto out; } @@ -1094,8 +1092,7 @@ static void bfin_mac_rx(struct net_device *dev) new_skb = dev_alloc_skb(PKT_BUF_SZ + NET_IP_ALIGN); if (!new_skb) { - printk(KERN_NOTICE DRV_NAME - ": rx: low on mem - packet dropped\n"); + netdev_notice(dev, "rx: low on mem - packet dropped\n"); dev->stats.rx_dropped++; goto out; } @@ -1213,7 +1210,7 @@ static int bfin_mac_enable(struct phy_device *phydev) int ret; u32 opmode; - pr_debug("%s: %s\n", DRV_NAME, __func__); + pr_debug("%s\n", __func__); /* Set RX DMA */ bfin_write_DMA1_NEXT_DESC_PTR(&(rx_list_head->desc_a)); @@ -1287,19 +1284,12 @@ static void bfin_mac_multicast_hash(struct net_device *dev) { u32 emac_hashhi, emac_hashlo; struct netdev_hw_addr *ha; - char *addrs; u32 crc; emac_hashhi = emac_hashlo = 0; netdev_for_each_mc_addr(ha, dev) { - addrs = ha->addr; - - /* skip non-multicast addresses */ - if (!(*addrs & 1)) - continue; - - crc = ether_crc(ETH_ALEN, addrs); + crc = ether_crc(ETH_ALEN, ha->addr); crc >>= 26; if (crc & 0x20) @@ -1323,7 +1313,7 @@ static void bfin_mac_set_multicast_list(struct net_device *dev) u32 sysctl; if (dev->flags & IFF_PROMISC) { - printk(KERN_INFO "%s: set to promisc mode\n", dev->name); + netdev_info(dev, "set promisc mode\n"); sysctl = bfin_read_EMAC_OPMODE(); sysctl |= PR; bfin_write_EMAC_OPMODE(sysctl); @@ -1393,7 +1383,7 @@ static int bfin_mac_open(struct net_device *dev) * address using ifconfig eth0 hw ether xx:xx:xx:xx:xx:xx */ if (!is_valid_ether_addr(dev->dev_addr)) { - printk(KERN_WARNING DRV_NAME ": no valid ethernet hw addr\n"); + netdev_warn(dev, "no valid ethernet hw addr\n"); return -EINVAL; } @@ -1527,6 +1517,9 @@ static int __devinit bfin_mac_probe(struct platform_device *pdev) goto out_err_mii_probe; } + lp->vlan1_mask = ETH_P_8021Q | mii_bus_data->vlan1_mask; + lp->vlan2_mask = ETH_P_8021Q | mii_bus_data->vlan2_mask; + /* Fill in the fields of the device structure with ethernet values. */ ether_setup(ndev); @@ -1558,7 +1551,7 @@ static int __devinit bfin_mac_probe(struct platform_device *pdev) bfin_mac_hwtstamp_init(ndev); /* now, print out the card info, in a short format.. */ - dev_info(&pdev->dev, "%s, Version %s\n", DRV_DESC, DRV_VERSION); + netdev_info(ndev, "%s, Version %s\n", DRV_DESC, DRV_VERSION); return 0; @@ -1650,7 +1643,7 @@ static int __devinit bfin_mii_bus_probe(struct platform_device *pdev) * so set the GPIO pins to Ethernet mode */ pin_req = mii_bus_pd->mac_peripherals; - rc = peripheral_request_list(pin_req, DRV_NAME); + rc = peripheral_request_list(pin_req, KBUILD_MODNAME); if (rc) { dev_err(&pdev->dev, "Requesting peripherals failed!\n"); return rc; @@ -1739,7 +1732,7 @@ static struct platform_driver bfin_mac_driver = { .resume = bfin_mac_resume, .suspend = bfin_mac_suspend, .driver = { - .name = DRV_NAME, + .name = KBUILD_MODNAME, .owner = THIS_MODULE, }, }; diff --git a/drivers/net/bfin_mac.h b/drivers/net/bfin_mac.h index aed68bed2365..f8559ac9a403 100644 --- a/drivers/net/bfin_mac.h +++ b/drivers/net/bfin_mac.h @@ -17,7 +17,14 @@ #include <linux/etherdevice.h> #include <linux/bfin_mac.h> +/* + * Disable hardware checksum for bug #5600 if writeback cache is + * enabled. Otherwize, corrupted RX packet will be sent up stack + * without error mark. + */ +#ifndef CONFIG_BFIN_EXTMEM_WRITEBACK #define BFIN_MAC_CSUM_OFFLOAD +#endif #define TX_RECLAIM_JIFFIES (HZ / 5) @@ -68,7 +75,6 @@ struct bfin_mac_local { */ struct net_device_stats stats; - unsigned char Mac[6]; /* MAC address of the board */ spinlock_t lock; int wol; /* Wake On Lan */ @@ -76,6 +82,9 @@ struct bfin_mac_local { struct timer_list tx_reclaim_timer; struct net_device *ndev; + /* Data for EMAC_VLAN1 regs */ + u16 vlan1_mask, vlan2_mask; + /* MII and PHY stuffs */ int old_link; /* used by bf537_adjust_link */ int old_speed; diff --git a/drivers/net/bna/bnad_ethtool.c b/drivers/net/bna/bnad_ethtool.c index 99be5ae91991..142d6047da27 100644 --- a/drivers/net/bna/bnad_ethtool.c +++ b/drivers/net/bna/bnad_ethtool.c @@ -275,7 +275,6 @@ bnad_get_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *drvinfo) ioc_attr = kzalloc(sizeof(*ioc_attr), GFP_KERNEL); if (ioc_attr) { - memset(ioc_attr, 0, sizeof(*ioc_attr)); spin_lock_irqsave(&bnad->bna_lock, flags); bfa_nw_ioc_get_attr(&bnad->bna.device.ioc, ioc_attr); spin_unlock_irqrestore(&bnad->bna_lock, flags); diff --git a/drivers/net/cassini.c b/drivers/net/cassini.c index 7206ab2cbbf8..3437613f0454 100644 --- a/drivers/net/cassini.c +++ b/drivers/net/cassini.c @@ -3203,7 +3203,7 @@ static int cas_get_vpd_info(struct cas *cp, unsigned char *dev_addr, int phy_type = CAS_PHY_MII_MDIO0; /* default phy type */ int mac_off = 0; -#if defined(CONFIG_OF) +#if defined(CONFIG_SPARC) const unsigned char *addr; #endif @@ -3354,7 +3354,7 @@ use_random_mac_addr: if (found & VPD_FOUND_MAC) goto done; -#if defined(CONFIG_OF) +#if defined(CONFIG_SPARC) addr = of_get_property(cp->of_node, "local-mac-address", NULL); if (addr != NULL) { memcpy(dev_addr, addr, 6); @@ -5031,7 +5031,7 @@ static int __devinit cas_init_one(struct pci_dev *pdev, cp->msg_enable = (cassini_debug < 0) ? CAS_DEF_MSG_ENABLE : cassini_debug; -#if defined(CONFIG_OF) +#if defined(CONFIG_SPARC) cp->of_node = pci_device_to_OF_node(pdev); #endif diff --git a/drivers/net/cxgb4vf/cxgb4vf_main.c b/drivers/net/cxgb4vf/cxgb4vf_main.c index 3c403f895750..56166ae2059f 100644 --- a/drivers/net/cxgb4vf/cxgb4vf_main.c +++ b/drivers/net/cxgb4vf/cxgb4vf_main.c @@ -749,13 +749,19 @@ static int cxgb4vf_open(struct net_device *dev) netif_set_real_num_tx_queues(dev, pi->nqsets); err = netif_set_real_num_rx_queues(dev, pi->nqsets); if (err) - return err; - set_bit(pi->port_id, &adapter->open_device_map); + goto err_unwind; err = link_start(dev); if (err) - return err; + goto err_unwind; + netif_tx_start_all_queues(dev); + set_bit(pi->port_id, &adapter->open_device_map); return 0; + +err_unwind: + if (adapter->open_device_map == 0) + adapter_down(adapter); + return err; } /* @@ -764,13 +770,12 @@ static int cxgb4vf_open(struct net_device *dev) */ static int cxgb4vf_stop(struct net_device *dev) { - int ret; struct port_info *pi = netdev_priv(dev); struct adapter *adapter = pi->adapter; netif_tx_stop_all_queues(dev); netif_carrier_off(dev); - ret = t4vf_enable_vi(adapter, pi->viid, false, false); + t4vf_enable_vi(adapter, pi->viid, false, false); pi->link_cfg.link_ok = 0; clear_bit(pi->port_id, &adapter->open_device_map); diff --git a/drivers/net/cxgb4vf/t4vf_hw.c b/drivers/net/cxgb4vf/t4vf_hw.c index e4bec78c8e3f..0f51c80475ce 100644 --- a/drivers/net/cxgb4vf/t4vf_hw.c +++ b/drivers/net/cxgb4vf/t4vf_hw.c @@ -147,9 +147,20 @@ int t4vf_wr_mbox_core(struct adapter *adapter, const void *cmd, int size, /* * Write the command array into the Mailbox Data register array and * transfer ownership of the mailbox to the firmware. + * + * For the VFs, the Mailbox Data "registers" are actually backed by + * T4's "MA" interface rather than PL Registers (as is the case for + * the PFs). Because these are in different coherency domains, the + * write to the VF's PL-register-backed Mailbox Control can race in + * front of the writes to the MA-backed VF Mailbox Data "registers". + * So we need to do a read-back on at least one byte of the VF Mailbox + * Data registers before doing the write to the VF Mailbox Control + * register. */ for (i = 0, p = cmd; i < size; i += 8) t4_write_reg64(adapter, mbox_data + i, be64_to_cpu(*p++)); + t4_read_reg(adapter, mbox_data); /* flush write */ + t4_write_reg(adapter, mbox_ctl, MBMSGVALID | MBOWNER(MBOX_OWNER_FW)); t4_read_reg(adapter, mbox_ctl); /* flush write */ diff --git a/drivers/net/e1000/e1000_main.c b/drivers/net/e1000/e1000_main.c index 4ff88a683f61..e332aee386f6 100644 --- a/drivers/net/e1000/e1000_main.c +++ b/drivers/net/e1000/e1000_main.c @@ -3478,9 +3478,17 @@ static irqreturn_t e1000_intr(int irq, void *data) struct e1000_hw *hw = &adapter->hw; u32 icr = er32(ICR); - if (unlikely((!icr) || test_bit(__E1000_DOWN, &adapter->flags))) + if (unlikely((!icr))) return IRQ_NONE; /* Not our interrupt */ + /* + * we might have caused the interrupt, but the above + * read cleared it, and just in case the driver is + * down there is nothing to do so return handled + */ + if (unlikely(test_bit(__E1000_DOWN, &adapter->flags))) + return IRQ_HANDLED; + if (unlikely(icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC))) { hw->get_link_status = 1; /* guard against interrupt when we're going down */ diff --git a/drivers/net/e1000e/82571.c b/drivers/net/e1000e/82571.c index cb6c7b1c1fb8..7bdec0b0c562 100644 --- a/drivers/net/e1000e/82571.c +++ b/drivers/net/e1000e/82571.c @@ -1,7 +1,7 @@ /******************************************************************************* Intel PRO/1000 Linux driver - Copyright(c) 1999 - 2010 Intel Corporation. + Copyright(c) 1999 - 2011 Intel Corporation. This program is free software; you can redistribute it and/or modify it under the terms and conditions of the GNU General Public License, @@ -1310,7 +1310,7 @@ static void e1000_initialize_hw_bits_82571(struct e1000_hw *hw) * apply workaround for hardware errata documented in errata * docs Fixes issue where some error prone or unreliable PCIe * completions are occurring, particularly with ASPM enabled. - * Without fix, issue can cause tx timeouts. + * Without fix, issue can cause Tx timeouts. */ reg = er32(GCR2); reg |= 1; diff --git a/drivers/net/e1000e/Makefile b/drivers/net/e1000e/Makefile index 360c91369f35..28519acacd2d 100644 --- a/drivers/net/e1000e/Makefile +++ b/drivers/net/e1000e/Makefile @@ -1,7 +1,7 @@ ################################################################################ # # Intel PRO/1000 Linux driver -# Copyright(c) 1999 - 2008 Intel Corporation. +# Copyright(c) 1999 - 2011 Intel Corporation. # # This program is free software; you can redistribute it and/or modify it # under the terms and conditions of the GNU General Public License, diff --git a/drivers/net/e1000e/defines.h b/drivers/net/e1000e/defines.h index 7245dc2e0b7c..13149983d07e 100644 --- a/drivers/net/e1000e/defines.h +++ b/drivers/net/e1000e/defines.h @@ -1,7 +1,7 @@ /******************************************************************************* Intel PRO/1000 Linux driver - Copyright(c) 1999 - 2010 Intel Corporation. + Copyright(c) 1999 - 2011 Intel Corporation. This program is free software; you can redistribute it and/or modify it under the terms and conditions of the GNU General Public License, diff --git a/drivers/net/e1000e/e1000.h b/drivers/net/e1000e/e1000.h index 5255be753746..e610e1369053 100644 --- a/drivers/net/e1000e/e1000.h +++ b/drivers/net/e1000e/e1000.h @@ -1,7 +1,7 @@ /******************************************************************************* Intel PRO/1000 Linux driver - Copyright(c) 1999 - 2010 Intel Corporation. + Copyright(c) 1999 - 2011 Intel Corporation. This program is free software; you can redistribute it and/or modify it under the terms and conditions of the GNU General Public License, diff --git a/drivers/net/e1000e/es2lan.c b/drivers/net/e1000e/es2lan.c index e45a61c8930a..2fefa820302b 100644 --- a/drivers/net/e1000e/es2lan.c +++ b/drivers/net/e1000e/es2lan.c @@ -1,7 +1,7 @@ /******************************************************************************* Intel PRO/1000 Linux driver - Copyright(c) 1999 - 2010 Intel Corporation. + Copyright(c) 1999 - 2011 Intel Corporation. This program is free software; you can redistribute it and/or modify it under the terms and conditions of the GNU General Public License, diff --git a/drivers/net/e1000e/ethtool.c b/drivers/net/e1000e/ethtool.c index f8ed03dab9b1..fa08b6336cfb 100644 --- a/drivers/net/e1000e/ethtool.c +++ b/drivers/net/e1000e/ethtool.c @@ -1,7 +1,7 @@ /******************************************************************************* Intel PRO/1000 Linux driver - Copyright(c) 1999 - 2010 Intel Corporation. + Copyright(c) 1999 - 2011 Intel Corporation. This program is free software; you can redistribute it and/or modify it under the terms and conditions of the GNU General Public License, diff --git a/drivers/net/e1000e/hw.h b/drivers/net/e1000e/hw.h index e774380c7cec..bc0860a598c9 100644 --- a/drivers/net/e1000e/hw.h +++ b/drivers/net/e1000e/hw.h @@ -1,7 +1,7 @@ /******************************************************************************* Intel PRO/1000 Linux driver - Copyright(c) 1999 - 2010 Intel Corporation. + Copyright(c) 1999 - 2011 Intel Corporation. This program is free software; you can redistribute it and/or modify it under the terms and conditions of the GNU General Public License, @@ -102,7 +102,7 @@ enum e1e_registers { E1000_RDTR = 0x02820, /* Rx Delay Timer - RW */ E1000_RXDCTL_BASE = 0x02828, /* Rx Descriptor Control - RW */ #define E1000_RXDCTL(_n) (E1000_RXDCTL_BASE + (_n << 8)) - E1000_RADV = 0x0282C, /* RX Interrupt Absolute Delay Timer - RW */ + E1000_RADV = 0x0282C, /* Rx Interrupt Absolute Delay Timer - RW */ /* Convenience macros * diff --git a/drivers/net/e1000e/ich8lan.c b/drivers/net/e1000e/ich8lan.c index 5328a2927731..b43fc7fb1ee4 100644 --- a/drivers/net/e1000e/ich8lan.c +++ b/drivers/net/e1000e/ich8lan.c @@ -1,7 +1,7 @@ /******************************************************************************* Intel PRO/1000 Linux driver - Copyright(c) 1999 - 2010 Intel Corporation. + Copyright(c) 1999 - 2011 Intel Corporation. This program is free software; you can redistribute it and/or modify it under the terms and conditions of the GNU General Public License, diff --git a/drivers/net/e1000e/lib.c b/drivers/net/e1000e/lib.c index ff2872153b21..68aa1749bf66 100644 --- a/drivers/net/e1000e/lib.c +++ b/drivers/net/e1000e/lib.c @@ -1,7 +1,7 @@ /******************************************************************************* Intel PRO/1000 Linux driver - Copyright(c) 1999 - 2010 Intel Corporation. + Copyright(c) 1999 - 2011 Intel Corporation. This program is free software; you can redistribute it and/or modify it under the terms and conditions of the GNU General Public License, @@ -533,7 +533,7 @@ s32 e1000e_check_for_fiber_link(struct e1000_hw *hw) mac->autoneg_failed = 1; return 0; } - e_dbg("NOT RXing /C/, disable AutoNeg and force link.\n"); + e_dbg("NOT Rx'ing /C/, disable AutoNeg and force link.\n"); /* Disable auto-negotiation in the TXCW register */ ew32(TXCW, (mac->txcw & ~E1000_TXCW_ANE)); @@ -556,7 +556,7 @@ s32 e1000e_check_for_fiber_link(struct e1000_hw *hw) * and disable forced link in the Device Control register * in an attempt to auto-negotiate with our link partner. */ - e_dbg("RXing /C/, enable AutoNeg and stop forcing link.\n"); + e_dbg("Rx'ing /C/, enable AutoNeg and stop forcing link.\n"); ew32(TXCW, mac->txcw); ew32(CTRL, (ctrl & ~E1000_CTRL_SLU)); @@ -598,7 +598,7 @@ s32 e1000e_check_for_serdes_link(struct e1000_hw *hw) mac->autoneg_failed = 1; return 0; } - e_dbg("NOT RXing /C/, disable AutoNeg and force link.\n"); + e_dbg("NOT Rx'ing /C/, disable AutoNeg and force link.\n"); /* Disable auto-negotiation in the TXCW register */ ew32(TXCW, (mac->txcw & ~E1000_TXCW_ANE)); @@ -621,7 +621,7 @@ s32 e1000e_check_for_serdes_link(struct e1000_hw *hw) * and disable forced link in the Device Control register * in an attempt to auto-negotiate with our link partner. */ - e_dbg("RXing /C/, enable AutoNeg and stop forcing link.\n"); + e_dbg("Rx'ing /C/, enable AutoNeg and stop forcing link.\n"); ew32(TXCW, mac->txcw); ew32(CTRL, (ctrl & ~E1000_CTRL_SLU)); @@ -800,9 +800,9 @@ static s32 e1000_commit_fc_settings_generic(struct e1000_hw *hw) * The possible values of the "fc" parameter are: * 0: Flow control is completely disabled * 1: Rx flow control is enabled (we can receive pause frames, - * but not send pause frames). + * but not send pause frames). * 2: Tx flow control is enabled (we can send pause frames but we - * do not support receiving pause frames). + * do not support receiving pause frames). * 3: Both Rx and Tx flow control (symmetric) are enabled. */ switch (hw->fc.current_mode) { @@ -1031,9 +1031,9 @@ s32 e1000e_force_mac_fc(struct e1000_hw *hw) * The possible values of the "fc" parameter are: * 0: Flow control is completely disabled * 1: Rx flow control is enabled (we can receive pause - * frames but not send pause frames). + * frames but not send pause frames). * 2: Tx flow control is enabled (we can send pause frames - * frames but we do not receive pause frames). + * frames but we do not receive pause frames). * 3: Both Rx and Tx flow control (symmetric) is enabled. * other: No other values should be possible at this point. */ @@ -1189,7 +1189,7 @@ s32 e1000e_config_fc_after_link_up(struct e1000_hw *hw) } else { hw->fc.current_mode = e1000_fc_rx_pause; e_dbg("Flow Control = " - "RX PAUSE frames only.\r\n"); + "Rx PAUSE frames only.\r\n"); } } /* diff --git a/drivers/net/e1000e/netdev.c b/drivers/net/e1000e/netdev.c index fa5b60452547..1c18f26b0812 100644 --- a/drivers/net/e1000e/netdev.c +++ b/drivers/net/e1000e/netdev.c @@ -1,7 +1,7 @@ /******************************************************************************* Intel PRO/1000 Linux driver - Copyright(c) 1999 - 2010 Intel Corporation. + Copyright(c) 1999 - 2011 Intel Corporation. This program is free software; you can redistribute it and/or modify it under the terms and conditions of the GNU General Public License, @@ -77,17 +77,17 @@ struct e1000_reg_info { char *name; }; -#define E1000_RDFH 0x02410 /* Rx Data FIFO Head - RW */ -#define E1000_RDFT 0x02418 /* Rx Data FIFO Tail - RW */ -#define E1000_RDFHS 0x02420 /* Rx Data FIFO Head Saved - RW */ -#define E1000_RDFTS 0x02428 /* Rx Data FIFO Tail Saved - RW */ -#define E1000_RDFPC 0x02430 /* Rx Data FIFO Packet Count - RW */ +#define E1000_RDFH 0x02410 /* Rx Data FIFO Head - RW */ +#define E1000_RDFT 0x02418 /* Rx Data FIFO Tail - RW */ +#define E1000_RDFHS 0x02420 /* Rx Data FIFO Head Saved - RW */ +#define E1000_RDFTS 0x02428 /* Rx Data FIFO Tail Saved - RW */ +#define E1000_RDFPC 0x02430 /* Rx Data FIFO Packet Count - RW */ -#define E1000_TDFH 0x03410 /* Tx Data FIFO Head - RW */ -#define E1000_TDFT 0x03418 /* Tx Data FIFO Tail - RW */ -#define E1000_TDFHS 0x03420 /* Tx Data FIFO Head Saved - RW */ -#define E1000_TDFTS 0x03428 /* Tx Data FIFO Tail Saved - RW */ -#define E1000_TDFPC 0x03430 /* Tx Data FIFO Packet Count - RW */ +#define E1000_TDFH 0x03410 /* Tx Data FIFO Head - RW */ +#define E1000_TDFT 0x03418 /* Tx Data FIFO Tail - RW */ +#define E1000_TDFHS 0x03420 /* Tx Data FIFO Head Saved - RW */ +#define E1000_TDFTS 0x03428 /* Tx Data FIFO Tail Saved - RW */ +#define E1000_TDFPC 0x03430 /* Tx Data FIFO Packet Count - RW */ static const struct e1000_reg_info e1000_reg_info_tbl[] = { @@ -99,7 +99,7 @@ static const struct e1000_reg_info e1000_reg_info_tbl[] = { /* Interrupt Registers */ {E1000_ICR, "ICR"}, - /* RX Registers */ + /* Rx Registers */ {E1000_RCTL, "RCTL"}, {E1000_RDLEN, "RDLEN"}, {E1000_RDH, "RDH"}, @@ -115,7 +115,7 @@ static const struct e1000_reg_info e1000_reg_info_tbl[] = { {E1000_RDFTS, "RDFTS"}, {E1000_RDFPC, "RDFPC"}, - /* TX Registers */ + /* Tx Registers */ {E1000_TCTL, "TCTL"}, {E1000_TDBAL, "TDBAL"}, {E1000_TDBAH, "TDBAH"}, @@ -160,7 +160,7 @@ static void e1000_regdump(struct e1000_hw *hw, struct e1000_reg_info *reginfo) break; default: printk(KERN_INFO "%-15s %08x\n", - reginfo->name, __er32(hw, reginfo->ofs)); + reginfo->name, __er32(hw, reginfo->ofs)); return; } @@ -171,9 +171,8 @@ static void e1000_regdump(struct e1000_hw *hw, struct e1000_reg_info *reginfo) printk(KERN_CONT "\n"); } - /* - * e1000e_dump - Print registers, tx-ring and rx-ring + * e1000e_dump - Print registers, Tx-ring and Rx-ring */ static void e1000e_dump(struct e1000_adapter *adapter) { @@ -182,12 +181,20 @@ static void e1000e_dump(struct e1000_adapter *adapter) struct e1000_reg_info *reginfo; struct e1000_ring *tx_ring = adapter->tx_ring; struct e1000_tx_desc *tx_desc; - struct my_u0 { u64 a; u64 b; } *u0; + struct my_u0 { + u64 a; + u64 b; + } *u0; struct e1000_buffer *buffer_info; struct e1000_ring *rx_ring = adapter->rx_ring; union e1000_rx_desc_packet_split *rx_desc_ps; struct e1000_rx_desc *rx_desc; - struct my_u1 { u64 a; u64 b; u64 c; u64 d; } *u1; + struct my_u1 { + u64 a; + u64 b; + u64 c; + u64 d; + } *u1; u32 staterr; int i = 0; @@ -198,12 +205,10 @@ static void e1000e_dump(struct e1000_adapter *adapter) if (netdev) { dev_info(&adapter->pdev->dev, "Net device Info\n"); printk(KERN_INFO "Device Name state " - "trans_start last_rx\n"); + "trans_start last_rx\n"); printk(KERN_INFO "%-15s %016lX %016lX %016lX\n", - netdev->name, - netdev->state, - netdev->trans_start, - netdev->last_rx); + netdev->name, netdev->state, netdev->trans_start, + netdev->last_rx); } /* Print Registers */ @@ -214,26 +219,26 @@ static void e1000e_dump(struct e1000_adapter *adapter) e1000_regdump(hw, reginfo); } - /* Print TX Ring Summary */ + /* Print Tx Ring Summary */ if (!netdev || !netif_running(netdev)) goto exit; - dev_info(&adapter->pdev->dev, "TX Rings Summary\n"); + dev_info(&adapter->pdev->dev, "Tx Ring Summary\n"); printk(KERN_INFO "Queue [NTU] [NTC] [bi(ntc)->dma ]" - " leng ntw timestamp\n"); + " leng ntw timestamp\n"); buffer_info = &tx_ring->buffer_info[tx_ring->next_to_clean]; printk(KERN_INFO " %5d %5X %5X %016llX %04X %3X %016llX\n", - 0, tx_ring->next_to_use, tx_ring->next_to_clean, - (unsigned long long)buffer_info->dma, - buffer_info->length, - buffer_info->next_to_watch, - (unsigned long long)buffer_info->time_stamp); + 0, tx_ring->next_to_use, tx_ring->next_to_clean, + (unsigned long long)buffer_info->dma, + buffer_info->length, + buffer_info->next_to_watch, + (unsigned long long)buffer_info->time_stamp); - /* Print TX Rings */ + /* Print Tx Ring */ if (!netif_msg_tx_done(adapter)) goto rx_ring_summary; - dev_info(&adapter->pdev->dev, "TX Rings Dump\n"); + dev_info(&adapter->pdev->dev, "Tx Ring Dump\n"); /* Transmit Descriptor Formats - DEXT[29] is 0 (Legacy) or 1 (Extended) * @@ -263,22 +268,22 @@ static void e1000e_dump(struct e1000_adapter *adapter) * 63 48 47 40 39 36 35 32 31 24 23 20 19 0 */ printk(KERN_INFO "Tl[desc] [address 63:0 ] [SpeCssSCmCsLen]" - " [bi->dma ] leng ntw timestamp bi->skb " - "<-- Legacy format\n"); + " [bi->dma ] leng ntw timestamp bi->skb " + "<-- Legacy format\n"); printk(KERN_INFO "Tc[desc] [Ce CoCsIpceCoS] [MssHlRSCm0Plen]" - " [bi->dma ] leng ntw timestamp bi->skb " - "<-- Ext Context format\n"); + " [bi->dma ] leng ntw timestamp bi->skb " + "<-- Ext Context format\n"); printk(KERN_INFO "Td[desc] [address 63:0 ] [VlaPoRSCm1Dlen]" - " [bi->dma ] leng ntw timestamp bi->skb " - "<-- Ext Data format\n"); + " [bi->dma ] leng ntw timestamp bi->skb " + "<-- Ext Data format\n"); for (i = 0; tx_ring->desc && (i < tx_ring->count); i++) { tx_desc = E1000_TX_DESC(*tx_ring, i); buffer_info = &tx_ring->buffer_info[i]; u0 = (struct my_u0 *)tx_desc; printk(KERN_INFO "T%c[0x%03X] %016llX %016llX %016llX " - "%04X %3X %016llX %p", - (!(le64_to_cpu(u0->b) & (1<<29)) ? 'l' : - ((le64_to_cpu(u0->b) & (1<<20)) ? 'd' : 'c')), i, + "%04X %3X %016llX %p", + (!(le64_to_cpu(u0->b) & (1 << 29)) ? 'l' : + ((le64_to_cpu(u0->b) & (1 << 20)) ? 'd' : 'c')), i, (unsigned long long)le64_to_cpu(u0->a), (unsigned long long)le64_to_cpu(u0->b), (unsigned long long)buffer_info->dma, @@ -296,22 +301,22 @@ static void e1000e_dump(struct e1000_adapter *adapter) if (netif_msg_pktdata(adapter) && buffer_info->dma != 0) print_hex_dump(KERN_INFO, "", DUMP_PREFIX_ADDRESS, - 16, 1, phys_to_virt(buffer_info->dma), - buffer_info->length, true); + 16, 1, phys_to_virt(buffer_info->dma), + buffer_info->length, true); } - /* Print RX Rings Summary */ + /* Print Rx Ring Summary */ rx_ring_summary: - dev_info(&adapter->pdev->dev, "RX Rings Summary\n"); + dev_info(&adapter->pdev->dev, "Rx Ring Summary\n"); printk(KERN_INFO "Queue [NTU] [NTC]\n"); printk(KERN_INFO " %5d %5X %5X\n", 0, - rx_ring->next_to_use, rx_ring->next_to_clean); + rx_ring->next_to_use, rx_ring->next_to_clean); - /* Print RX Rings */ + /* Print Rx Ring */ if (!netif_msg_rx_status(adapter)) goto exit; - dev_info(&adapter->pdev->dev, "RX Rings Dump\n"); + dev_info(&adapter->pdev->dev, "Rx Ring Dump\n"); switch (adapter->rx_ps_pages) { case 1: case 2: @@ -329,7 +334,7 @@ rx_ring_summary: * +-----------------------------------------------------+ */ printk(KERN_INFO "R [desc] [buffer 0 63:0 ] " - "[buffer 1 63:0 ] " + "[buffer 1 63:0 ] " "[buffer 2 63:0 ] [buffer 3 63:0 ] [bi->dma ] " "[bi->skb] <-- Ext Pkt Split format\n"); /* [Extended] Receive Descriptor (Write-Back) Format @@ -344,7 +349,7 @@ rx_ring_summary: * 63 48 47 32 31 20 19 0 */ printk(KERN_INFO "RWB[desc] [ck ipid mrqhsh] " - "[vl l0 ee es] " + "[vl l0 ee es] " "[ l3 l2 l1 hs] [reserved ] ---------------- " "[bi->skb] <-- Ext Rx Write-Back format\n"); for (i = 0; i < rx_ring->count; i++) { @@ -352,26 +357,26 @@ rx_ring_summary: rx_desc_ps = E1000_RX_DESC_PS(*rx_ring, i); u1 = (struct my_u1 *)rx_desc_ps; staterr = - le32_to_cpu(rx_desc_ps->wb.middle.status_error); + le32_to_cpu(rx_desc_ps->wb.middle.status_error); if (staterr & E1000_RXD_STAT_DD) { /* Descriptor Done */ printk(KERN_INFO "RWB[0x%03X] %016llX " - "%016llX %016llX %016llX " - "---------------- %p", i, - (unsigned long long)le64_to_cpu(u1->a), - (unsigned long long)le64_to_cpu(u1->b), - (unsigned long long)le64_to_cpu(u1->c), - (unsigned long long)le64_to_cpu(u1->d), - buffer_info->skb); + "%016llX %016llX %016llX " + "---------------- %p", i, + (unsigned long long)le64_to_cpu(u1->a), + (unsigned long long)le64_to_cpu(u1->b), + (unsigned long long)le64_to_cpu(u1->c), + (unsigned long long)le64_to_cpu(u1->d), + buffer_info->skb); } else { printk(KERN_INFO "R [0x%03X] %016llX " - "%016llX %016llX %016llX %016llX %p", i, - (unsigned long long)le64_to_cpu(u1->a), - (unsigned long long)le64_to_cpu(u1->b), - (unsigned long long)le64_to_cpu(u1->c), - (unsigned long long)le64_to_cpu(u1->d), - (unsigned long long)buffer_info->dma, - buffer_info->skb); + "%016llX %016llX %016llX %016llX %p", i, + (unsigned long long)le64_to_cpu(u1->a), + (unsigned long long)le64_to_cpu(u1->b), + (unsigned long long)le64_to_cpu(u1->c), + (unsigned long long)le64_to_cpu(u1->d), + (unsigned long long)buffer_info->dma, + buffer_info->skb); if (netif_msg_pktdata(adapter)) print_hex_dump(KERN_INFO, "", @@ -400,18 +405,18 @@ rx_ring_summary: * 63 48 47 40 39 32 31 16 15 0 */ printk(KERN_INFO "Rl[desc] [address 63:0 ] " - "[vl er S cks ln] [bi->dma ] [bi->skb] " - "<-- Legacy format\n"); + "[vl er S cks ln] [bi->dma ] [bi->skb] " + "<-- Legacy format\n"); for (i = 0; rx_ring->desc && (i < rx_ring->count); i++) { rx_desc = E1000_RX_DESC(*rx_ring, i); buffer_info = &rx_ring->buffer_info[i]; u0 = (struct my_u0 *)rx_desc; printk(KERN_INFO "Rl[0x%03X] %016llX %016llX " - "%016llX %p", i, - (unsigned long long)le64_to_cpu(u0->a), - (unsigned long long)le64_to_cpu(u0->b), - (unsigned long long)buffer_info->dma, - buffer_info->skb); + "%016llX %p", i, + (unsigned long long)le64_to_cpu(u0->a), + (unsigned long long)le64_to_cpu(u0->b), + (unsigned long long)buffer_info->dma, + buffer_info->skb); if (i == rx_ring->next_to_use) printk(KERN_CONT " NTU\n"); else if (i == rx_ring->next_to_clean) @@ -421,9 +426,10 @@ rx_ring_summary: if (netif_msg_pktdata(adapter)) print_hex_dump(KERN_INFO, "", - DUMP_PREFIX_ADDRESS, - 16, 1, phys_to_virt(buffer_info->dma), - adapter->rx_buffer_len, true); + DUMP_PREFIX_ADDRESS, + 16, 1, + phys_to_virt(buffer_info->dma), + adapter->rx_buffer_len, true); } } @@ -450,8 +456,7 @@ static int e1000_desc_unused(struct e1000_ring *ring) * @skb: pointer to sk_buff to be indicated to stack **/ static void e1000_receive_skb(struct e1000_adapter *adapter, - struct net_device *netdev, - struct sk_buff *skb, + struct net_device *netdev, struct sk_buff *skb, u8 status, __le16 vlan) { skb->protocol = eth_type_trans(skb, netdev); @@ -464,7 +469,7 @@ static void e1000_receive_skb(struct e1000_adapter *adapter, } /** - * e1000_rx_checksum - Receive Checksum Offload for 82543 + * e1000_rx_checksum - Receive Checksum Offload * @adapter: board private structure * @status_err: receive descriptor status and error fields * @csum: receive descriptor csum field @@ -548,7 +553,7 @@ map_skb: adapter->rx_buffer_len, DMA_FROM_DEVICE); if (dma_mapping_error(&pdev->dev, buffer_info->dma)) { - dev_err(&pdev->dev, "RX DMA map failed\n"); + dev_err(&pdev->dev, "Rx DMA map failed\n"); adapter->rx_dma_failed++; break; } @@ -601,7 +606,8 @@ static void e1000_alloc_rx_buffers_ps(struct e1000_adapter *adapter, ps_page = &buffer_info->ps_pages[j]; if (j >= adapter->rx_ps_pages) { /* all unused desc entries get hw null ptr */ - rx_desc->read.buffer_addr[j+1] = ~cpu_to_le64(0); + rx_desc->read.buffer_addr[j + 1] = + ~cpu_to_le64(0); continue; } if (!ps_page->page) { @@ -617,7 +623,7 @@ static void e1000_alloc_rx_buffers_ps(struct e1000_adapter *adapter, if (dma_mapping_error(&pdev->dev, ps_page->dma)) { dev_err(&adapter->pdev->dev, - "RX DMA page map failed\n"); + "Rx DMA page map failed\n"); adapter->rx_dma_failed++; goto no_buffers; } @@ -627,8 +633,8 @@ static void e1000_alloc_rx_buffers_ps(struct e1000_adapter *adapter, * didn't change because each write-back * erases this info. */ - rx_desc->read.buffer_addr[j+1] = - cpu_to_le64(ps_page->dma); + rx_desc->read.buffer_addr[j + 1] = + cpu_to_le64(ps_page->dma); } skb = netdev_alloc_skb_ip_align(netdev, @@ -644,7 +650,7 @@ static void e1000_alloc_rx_buffers_ps(struct e1000_adapter *adapter, adapter->rx_ps_bsize0, DMA_FROM_DEVICE); if (dma_mapping_error(&pdev->dev, buffer_info->dma)) { - dev_err(&pdev->dev, "RX DMA map failed\n"); + dev_err(&pdev->dev, "Rx DMA map failed\n"); adapter->rx_dma_failed++; /* cleanup skb */ dev_kfree_skb_any(skb); @@ -662,7 +668,7 @@ static void e1000_alloc_rx_buffers_ps(struct e1000_adapter *adapter, * such as IA-64). */ wmb(); - writel(i<<1, adapter->hw.hw_addr + rx_ring->tail); + writel(i << 1, adapter->hw.hw_addr + rx_ring->tail); } i++; @@ -1106,11 +1112,10 @@ static bool e1000_clean_rx_irq_ps(struct e1000_adapter *adapter, cleaned = 1; cleaned_count++; dma_unmap_single(&pdev->dev, buffer_info->dma, - adapter->rx_ps_bsize0, - DMA_FROM_DEVICE); + adapter->rx_ps_bsize0, DMA_FROM_DEVICE); buffer_info->dma = 0; - /* see !EOP comment in other rx routine */ + /* see !EOP comment in other Rx routine */ if (!(staterr & E1000_RXD_STAT_EOP)) adapter->flags2 |= FLAG2_IS_DISCARDING; @@ -2610,7 +2615,7 @@ static void e1000_init_manageability_pt(struct e1000_adapter *adapter) } /** - * e1000_configure_tx - Configure 8254x Transmit Unit after Reset + * e1000_configure_tx - Configure Transmit Unit after Reset * @adapter: board private structure * * Configure the Tx unit of the MAC after a reset. @@ -2663,7 +2668,7 @@ static void e1000_configure_tx(struct e1000_adapter *adapter) * hthresh = 1 ==> prefetch when one or more available * pthresh = 0x1f ==> prefetch if internal cache 31 or less * BEWARE: this seems to work but should be considered first if - * there are tx hangs or other tx related bugs + * there are Tx hangs or other Tx related bugs */ txdctl |= E1000_TXDCTL_DMA_BURST_ENABLE; ew32(TXDCTL(0), txdctl); @@ -2877,7 +2882,7 @@ static void e1000_configure_rx(struct e1000_adapter *adapter) if (adapter->rx_ps_pages) { /* this is a 32 byte descriptor */ rdlen = rx_ring->count * - sizeof(union e1000_rx_desc_packet_split); + sizeof(union e1000_rx_desc_packet_split); adapter->clean_rx = e1000_clean_rx_irq_ps; adapter->alloc_rx_buf = e1000_alloc_rx_buffers_ps; } else if (adapter->netdev->mtu > ETH_FRAME_LEN + ETH_FCS_LEN) { @@ -2900,7 +2905,7 @@ static void e1000_configure_rx(struct e1000_adapter *adapter) /* * set the writeback threshold (only takes effect if the RDTR * is set). set GRAN=1 and write back up to 0x4 worth, and - * enable prefetching of 0x20 rx descriptors + * enable prefetching of 0x20 Rx descriptors * granularity = 01 * wthresh = 04, * hthresh = 04, @@ -2981,12 +2986,10 @@ static void e1000_configure_rx(struct e1000_adapter *adapter) * excessive C-state transition latencies result in * dropped transactions. */ - pm_qos_update_request( - &adapter->netdev->pm_qos_req, 55); + pm_qos_update_request(&adapter->netdev->pm_qos_req, 55); } else { - pm_qos_update_request( - &adapter->netdev->pm_qos_req, - PM_QOS_DEFAULT_VALUE); + pm_qos_update_request(&adapter->netdev->pm_qos_req, + PM_QOS_DEFAULT_VALUE); } } @@ -3152,7 +3155,7 @@ void e1000e_reset(struct e1000_adapter *adapter) /* lower 16 bits has Rx packet buffer allocation size in KB */ pba &= 0xffff; /* - * the Tx fifo also stores 16 bytes of information about the tx + * the Tx fifo also stores 16 bytes of information about the Tx * but don't include ethernet FCS because hardware appends it */ min_tx_space = (adapter->max_frame_size + @@ -3175,7 +3178,7 @@ void e1000e_reset(struct e1000_adapter *adapter) pba -= min_tx_space - tx_space; /* - * if short on Rx space, Rx wins and must trump tx + * if short on Rx space, Rx wins and must trump Tx * adjustment or use Early Receive if available */ if ((pba < min_rx_space) && @@ -4039,11 +4042,11 @@ static void e1000_print_link_info(struct e1000_adapter *adapter) adapter->netdev->name, adapter->link_speed, (adapter->link_duplex == FULL_DUPLEX) ? - "Full Duplex" : "Half Duplex", + "Full Duplex" : "Half Duplex", ((ctrl & E1000_CTRL_TFCE) && (ctrl & E1000_CTRL_RFCE)) ? - "RX/TX" : - ((ctrl & E1000_CTRL_RFCE) ? "RX" : - ((ctrl & E1000_CTRL_TFCE) ? "TX" : "None" ))); + "Rx/Tx" : + ((ctrl & E1000_CTRL_RFCE) ? "Rx" : + ((ctrl & E1000_CTRL_TFCE) ? "Tx" : "None"))); } static bool e1000e_has_link(struct e1000_adapter *adapter) @@ -4338,7 +4341,7 @@ link_up: /* Force detection of hung controller every watchdog period */ adapter->detect_tx_hung = 1; - /* flush partial descriptors to memory before detecting tx hang */ + /* flush partial descriptors to memory before detecting Tx hang */ if (adapter->flags2 & FLAG2_DMA_BURST) { ew32(TIDV, adapter->tx_int_delay | E1000_TIDV_FPD); ew32(RDTR, adapter->rx_int_delay | E1000_RDTR_FPD); @@ -4529,7 +4532,7 @@ static int e1000_tx_map(struct e1000_adapter *adapter, buffer_info->next_to_watch = i; buffer_info->dma = dma_map_single(&pdev->dev, skb->data + offset, - size, DMA_TO_DEVICE); + size, DMA_TO_DEVICE); buffer_info->mapped_as_page = false; if (dma_mapping_error(&pdev->dev, buffer_info->dma)) goto dma_error; @@ -4576,7 +4579,7 @@ static int e1000_tx_map(struct e1000_adapter *adapter, } } - segs = skb_shinfo(skb)->gso_segs ?: 1; + segs = skb_shinfo(skb)->gso_segs ? : 1; /* multiply data chunks by size of headers */ bytecount = ((segs - 1) * skb_headlen(skb)) + skb->len; @@ -4588,13 +4591,13 @@ static int e1000_tx_map(struct e1000_adapter *adapter, return count; dma_error: - dev_err(&pdev->dev, "TX DMA map failed\n"); + dev_err(&pdev->dev, "Tx DMA map failed\n"); buffer_info->dma = 0; if (count) count--; while (count--) { - if (i==0) + if (i == 0) i += tx_ring->count; i--; buffer_info = &tx_ring->buffer_info[i]; @@ -6193,7 +6196,7 @@ static int __init e1000_init_module(void) int ret; pr_info("Intel(R) PRO/1000 Network Driver - %s\n", e1000e_driver_version); - pr_info("Copyright (c) 1999 - 2010 Intel Corporation.\n"); + pr_info("Copyright(c) 1999 - 2011 Intel Corporation.\n"); ret = pci_register_driver(&e1000_driver); return ret; diff --git a/drivers/net/e1000e/param.c b/drivers/net/e1000e/param.c index a9612b0e4bca..4dd9b63273f6 100644 --- a/drivers/net/e1000e/param.c +++ b/drivers/net/e1000e/param.c @@ -1,7 +1,7 @@ /******************************************************************************* Intel PRO/1000 Linux driver - Copyright(c) 1999 - 2010 Intel Corporation. + Copyright(c) 1999 - 2011 Intel Corporation. This program is free software; you can redistribute it and/or modify it under the terms and conditions of the GNU General Public License, @@ -62,10 +62,9 @@ MODULE_PARM_DESC(copybreak, module_param_array_named(X, X, int, &num_##X, 0); \ MODULE_PARM_DESC(X, desc); - /* * Transmit Interrupt Delay in units of 1.024 microseconds - * Tx interrupt delay needs to typically be set to something non zero + * Tx interrupt delay needs to typically be set to something non-zero * * Valid Range: 0-65535 */ @@ -112,6 +111,7 @@ E1000_PARAM(InterruptThrottleRate, "Interrupt Throttling Rate"); #define DEFAULT_ITR 3 #define MAX_ITR 100000 #define MIN_ITR 100 + /* IntMode (Interrupt Mode) * * Valid Range: 0 - 2 diff --git a/drivers/net/e1000e/phy.c b/drivers/net/e1000e/phy.c index a640f1c369ae..326788eab2f7 100644 --- a/drivers/net/e1000e/phy.c +++ b/drivers/net/e1000e/phy.c @@ -1,7 +1,7 @@ /******************************************************************************* Intel PRO/1000 Linux driver - Copyright(c) 1999 - 2010 Intel Corporation. + Copyright(c) 1999 - 2011 Intel Corporation. This program is free software; you can redistribute it and/or modify it under the terms and conditions of the GNU General Public License, @@ -640,7 +640,7 @@ s32 e1000_copper_link_setup_82577(struct e1000_hw *hw) s32 ret_val; u16 phy_data; - /* Enable CRS on TX. This must be set for half-duplex operation. */ + /* Enable CRS on Tx. This must be set for half-duplex operation. */ ret_val = e1e_rphy(hw, I82577_CFG_REG, &phy_data); if (ret_val) goto out; diff --git a/drivers/net/ehea/ehea.h b/drivers/net/ehea/ehea.h index a724a2d14506..6c7257bd73fc 100644 --- a/drivers/net/ehea/ehea.h +++ b/drivers/net/ehea/ehea.h @@ -40,7 +40,7 @@ #include <asm/io.h> #define DRV_NAME "ehea" -#define DRV_VERSION "EHEA_0106" +#define DRV_VERSION "EHEA_0107" /* eHEA capability flags */ #define DLPAR_PORT_ADD_REM 1 diff --git a/drivers/net/ehea/ehea_main.c b/drivers/net/ehea/ehea_main.c index 1032b5bbe238..f75d3144b8a5 100644 --- a/drivers/net/ehea/ehea_main.c +++ b/drivers/net/ehea/ehea_main.c @@ -437,7 +437,7 @@ static void ehea_init_fill_rq1(struct ehea_port_res *pr, int nr_rq1a) } } /* Ring doorbell */ - ehea_update_rq1a(pr->qp, i); + ehea_update_rq1a(pr->qp, i - 1); } static int ehea_refill_rq_def(struct ehea_port_res *pr, @@ -1329,9 +1329,7 @@ static int ehea_fill_port_res(struct ehea_port_res *pr) int ret; struct ehea_qp_init_attr *init_attr = &pr->qp->init_attr; - ehea_init_fill_rq1(pr, init_attr->act_nr_rwqes_rq1 - - init_attr->act_nr_rwqes_rq2 - - init_attr->act_nr_rwqes_rq3 - 1); + ehea_init_fill_rq1(pr, pr->rq1_skba.len); ret = ehea_refill_rq2(pr, init_attr->act_nr_rwqes_rq2 - 1); diff --git a/drivers/net/gianfar.c b/drivers/net/gianfar.c index 45c4b7bfcf39..f1d4b450e797 100644 --- a/drivers/net/gianfar.c +++ b/drivers/net/gianfar.c @@ -433,7 +433,6 @@ static void gfar_init_mac(struct net_device *ndev) static struct net_device_stats *gfar_get_stats(struct net_device *dev) { struct gfar_private *priv = netdev_priv(dev); - struct netdev_queue *txq; unsigned long rx_packets = 0, rx_bytes = 0, rx_dropped = 0; unsigned long tx_packets = 0, tx_bytes = 0; int i = 0; @@ -449,9 +448,8 @@ static struct net_device_stats *gfar_get_stats(struct net_device *dev) dev->stats.rx_dropped = rx_dropped; for (i = 0; i < priv->num_tx_queues; i++) { - txq = netdev_get_tx_queue(dev, i); - tx_bytes += txq->tx_bytes; - tx_packets += txq->tx_packets; + tx_bytes += priv->tx_queue[i]->stats.tx_bytes; + tx_packets += priv->tx_queue[i]->stats.tx_packets; } dev->stats.tx_bytes = tx_bytes; @@ -2108,8 +2106,8 @@ static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev) } /* Update transmit stats */ - txq->tx_bytes += skb->len; - txq->tx_packets ++; + tx_queue->stats.tx_bytes += skb->len; + tx_queue->stats.tx_packets++; txbdp = txbdp_start = tx_queue->cur_tx; lstatus = txbdp->lstatus; diff --git a/drivers/net/gianfar.h b/drivers/net/gianfar.h index 68984eb88ae0..54de4135e932 100644 --- a/drivers/net/gianfar.h +++ b/drivers/net/gianfar.h @@ -907,12 +907,21 @@ enum { MQ_MG_MODE }; +/* + * Per TX queue stats + */ +struct tx_q_stats { + unsigned long tx_packets; + unsigned long tx_bytes; +}; + /** * struct gfar_priv_tx_q - per tx queue structure * @txlock: per queue tx spin lock * @tx_skbuff:skb pointers * @skb_curtx: to be used skb pointer * @skb_dirtytx:the last used skb pointer + * @stats: bytes/packets stats * @qindex: index of this queue * @dev: back pointer to the dev structure * @grp: back pointer to the group to which this queue belongs @@ -934,6 +943,7 @@ struct gfar_priv_tx_q { struct txbd8 *tx_bd_base; struct txbd8 *cur_tx; struct txbd8 *dirty_tx; + struct tx_q_stats stats; struct net_device *dev; struct gfar_priv_grp *grp; u16 skb_curtx; diff --git a/drivers/net/greth.c b/drivers/net/greth.c index 27d6960ce09e..fdb0333f5cb6 100644 --- a/drivers/net/greth.c +++ b/drivers/net/greth.c @@ -1,7 +1,7 @@ /* * Aeroflex Gaisler GRETH 10/100/1G Ethernet MAC. * - * 2005-2009 (c) Aeroflex Gaisler AB + * 2005-2010 (c) Aeroflex Gaisler AB * * This driver supports GRETH 10/100 and GRETH 10/100/1G Ethernet MACs * available in the GRLIB VHDL IP core library. @@ -356,6 +356,8 @@ static int greth_open(struct net_device *dev) dev_dbg(&dev->dev, " starting queue\n"); netif_start_queue(dev); + GRETH_REGSAVE(greth->regs->status, 0xFF); + napi_enable(&greth->napi); greth_enable_irqs(greth); @@ -371,7 +373,9 @@ static int greth_close(struct net_device *dev) napi_disable(&greth->napi); + greth_disable_irqs(greth); greth_disable_tx(greth); + greth_disable_rx(greth); netif_stop_queue(dev); @@ -388,12 +392,20 @@ greth_start_xmit(struct sk_buff *skb, struct net_device *dev) struct greth_private *greth = netdev_priv(dev); struct greth_bd *bdp; int err = NETDEV_TX_OK; - u32 status, dma_addr; + u32 status, dma_addr, ctrl; + unsigned long flags; - bdp = greth->tx_bd_base + greth->tx_next; + /* Clean TX Ring */ + greth_clean_tx(greth->netdev); if (unlikely(greth->tx_free <= 0)) { + spin_lock_irqsave(&greth->devlock, flags);/*save from poll/irq*/ + ctrl = GRETH_REGLOAD(greth->regs->control); + /* Enable TX IRQ only if not already in poll() routine */ + if (ctrl & GRETH_RXI) + GRETH_REGSAVE(greth->regs->control, ctrl | GRETH_TXI); netif_stop_queue(dev); + spin_unlock_irqrestore(&greth->devlock, flags); return NETDEV_TX_BUSY; } @@ -406,13 +418,14 @@ greth_start_xmit(struct sk_buff *skb, struct net_device *dev) goto out; } + bdp = greth->tx_bd_base + greth->tx_next; dma_addr = greth_read_bd(&bdp->addr); memcpy((unsigned char *) phys_to_virt(dma_addr), skb->data, skb->len); dma_sync_single_for_device(greth->dev, dma_addr, skb->len, DMA_TO_DEVICE); - status = GRETH_BD_EN | (skb->len & GRETH_BD_LEN); + status = GRETH_BD_EN | GRETH_BD_IE | (skb->len & GRETH_BD_LEN); /* Wrap around descriptor ring */ if (greth->tx_next == GRETH_TXBD_NUM_MASK) { @@ -422,22 +435,11 @@ greth_start_xmit(struct sk_buff *skb, struct net_device *dev) greth->tx_next = NEXT_TX(greth->tx_next); greth->tx_free--; - /* No more descriptors */ - if (unlikely(greth->tx_free == 0)) { - - /* Free transmitted descriptors */ - greth_clean_tx(dev); - - /* If nothing was cleaned, stop queue & wait for irq */ - if (unlikely(greth->tx_free == 0)) { - status |= GRETH_BD_IE; - netif_stop_queue(dev); - } - } - /* Write descriptor control word and enable transmission */ greth_write_bd(&bdp->stat, status); + spin_lock_irqsave(&greth->devlock, flags); /*save from poll/irq*/ greth_enable_tx(greth); + spin_unlock_irqrestore(&greth->devlock, flags); out: dev_kfree_skb(skb); @@ -450,13 +452,23 @@ greth_start_xmit_gbit(struct sk_buff *skb, struct net_device *dev) { struct greth_private *greth = netdev_priv(dev); struct greth_bd *bdp; - u32 status = 0, dma_addr; + u32 status = 0, dma_addr, ctrl; int curr_tx, nr_frags, i, err = NETDEV_TX_OK; + unsigned long flags; nr_frags = skb_shinfo(skb)->nr_frags; + /* Clean TX Ring */ + greth_clean_tx_gbit(dev); + if (greth->tx_free < nr_frags + 1) { + spin_lock_irqsave(&greth->devlock, flags);/*save from poll/irq*/ + ctrl = GRETH_REGLOAD(greth->regs->control); + /* Enable TX IRQ only if not already in poll() routine */ + if (ctrl & GRETH_RXI) + GRETH_REGSAVE(greth->regs->control, ctrl | GRETH_TXI); netif_stop_queue(dev); + spin_unlock_irqrestore(&greth->devlock, flags); err = NETDEV_TX_BUSY; goto out; } @@ -499,7 +511,7 @@ greth_start_xmit_gbit(struct sk_buff *skb, struct net_device *dev) greth->tx_skbuff[curr_tx] = NULL; bdp = greth->tx_bd_base + curr_tx; - status = GRETH_TXBD_CSALL; + status = GRETH_TXBD_CSALL | GRETH_BD_EN; status |= frag->size & GRETH_BD_LEN; /* Wrap around descriptor ring */ @@ -509,14 +521,8 @@ greth_start_xmit_gbit(struct sk_buff *skb, struct net_device *dev) /* More fragments left */ if (i < nr_frags - 1) status |= GRETH_TXBD_MORE; - - /* ... last fragment, check if out of descriptors */ - else if (greth->tx_free - nr_frags - 1 < (MAX_SKB_FRAGS + 1)) { - - /* Enable interrupts and stop queue */ - status |= GRETH_BD_IE; - netif_stop_queue(dev); - } + else + status |= GRETH_BD_IE; /* enable IRQ on last fragment */ greth_write_bd(&bdp->stat, status); @@ -536,26 +542,29 @@ greth_start_xmit_gbit(struct sk_buff *skb, struct net_device *dev) wmb(); - /* Enable the descriptors that we configured ... */ - for (i = 0; i < nr_frags + 1; i++) { - bdp = greth->tx_bd_base + greth->tx_next; - greth_write_bd(&bdp->stat, greth_read_bd(&bdp->stat) | GRETH_BD_EN); - greth->tx_next = NEXT_TX(greth->tx_next); - greth->tx_free--; - } + /* Enable the descriptor chain by enabling the first descriptor */ + bdp = greth->tx_bd_base + greth->tx_next; + greth_write_bd(&bdp->stat, greth_read_bd(&bdp->stat) | GRETH_BD_EN); + greth->tx_next = curr_tx; + greth->tx_free -= nr_frags + 1; + wmb(); + + spin_lock_irqsave(&greth->devlock, flags); /*save from poll/irq*/ greth_enable_tx(greth); + spin_unlock_irqrestore(&greth->devlock, flags); return NETDEV_TX_OK; frag_map_error: - /* Unmap SKB mappings that succeeded */ + /* Unmap SKB mappings that succeeded and disable descriptor */ for (i = 0; greth->tx_next + i != curr_tx; i++) { bdp = greth->tx_bd_base + greth->tx_next + i; dma_unmap_single(greth->dev, greth_read_bd(&bdp->addr), greth_read_bd(&bdp->stat) & GRETH_BD_LEN, DMA_TO_DEVICE); + greth_write_bd(&bdp->stat, 0); } map_error: if (net_ratelimit()) @@ -565,12 +574,11 @@ out: return err; } - static irqreturn_t greth_interrupt(int irq, void *dev_id) { struct net_device *dev = dev_id; struct greth_private *greth; - u32 status; + u32 status, ctrl; irqreturn_t retval = IRQ_NONE; greth = netdev_priv(dev); @@ -580,13 +588,15 @@ static irqreturn_t greth_interrupt(int irq, void *dev_id) /* Get the interrupt events that caused us to be here. */ status = GRETH_REGLOAD(greth->regs->status); - /* Handle rx and tx interrupts through poll */ - if (status & (GRETH_INT_RX | GRETH_INT_TX)) { - - /* Clear interrupt status */ - GRETH_REGORIN(greth->regs->status, - status & (GRETH_INT_RX | GRETH_INT_TX)); + /* Must see if interrupts are enabled also, INT_TX|INT_RX flags may be + * set regardless of whether IRQ is enabled or not. Especially + * important when shared IRQ. + */ + ctrl = GRETH_REGLOAD(greth->regs->control); + /* Handle rx and tx interrupts through poll */ + if (((status & (GRETH_INT_RE | GRETH_INT_RX)) && (ctrl & GRETH_RXI)) || + ((status & (GRETH_INT_TE | GRETH_INT_TX)) && (ctrl & GRETH_TXI))) { retval = IRQ_HANDLED; /* Disable interrupts and schedule poll() */ @@ -610,6 +620,8 @@ static void greth_clean_tx(struct net_device *dev) while (1) { bdp = greth->tx_bd_base + greth->tx_last; + GRETH_REGSAVE(greth->regs->status, GRETH_INT_TE | GRETH_INT_TX); + mb(); stat = greth_read_bd(&bdp->stat); if (unlikely(stat & GRETH_BD_EN)) @@ -670,7 +682,10 @@ static void greth_clean_tx_gbit(struct net_device *dev) /* We only clean fully completed SKBs */ bdp_last_frag = greth->tx_bd_base + SKIP_TX(greth->tx_last, nr_frags); - stat = bdp_last_frag->stat; + + GRETH_REGSAVE(greth->regs->status, GRETH_INT_TE | GRETH_INT_TX); + mb(); + stat = greth_read_bd(&bdp_last_frag->stat); if (stat & GRETH_BD_EN) break; @@ -702,21 +717,9 @@ static void greth_clean_tx_gbit(struct net_device *dev) greth->tx_free += nr_frags+1; dev_kfree_skb(skb); } - if (greth->tx_free > (MAX_SKB_FRAGS + 1)) { - netif_wake_queue(dev); - } -} -static int greth_pending_packets(struct greth_private *greth) -{ - struct greth_bd *bdp; - u32 status; - bdp = greth->rx_bd_base + greth->rx_cur; - status = greth_read_bd(&bdp->stat); - if (status & GRETH_BD_EN) - return 0; - else - return 1; + if (netif_queue_stopped(dev) && (greth->tx_free > (MAX_SKB_FRAGS+1))) + netif_wake_queue(dev); } static int greth_rx(struct net_device *dev, int limit) @@ -727,20 +730,24 @@ static int greth_rx(struct net_device *dev, int limit) int pkt_len; int bad, count; u32 status, dma_addr; + unsigned long flags; greth = netdev_priv(dev); for (count = 0; count < limit; ++count) { bdp = greth->rx_bd_base + greth->rx_cur; + GRETH_REGSAVE(greth->regs->status, GRETH_INT_RE | GRETH_INT_RX); + mb(); status = greth_read_bd(&bdp->stat); - dma_addr = greth_read_bd(&bdp->addr); - bad = 0; if (unlikely(status & GRETH_BD_EN)) { break; } + dma_addr = greth_read_bd(&bdp->addr); + bad = 0; + /* Check status for errors. */ if (unlikely(status & GRETH_RXBD_STATUS)) { if (status & GRETH_RXBD_ERR_FT) { @@ -802,7 +809,9 @@ static int greth_rx(struct net_device *dev, int limit) dma_sync_single_for_device(greth->dev, dma_addr, MAX_FRAME_SIZE, DMA_FROM_DEVICE); + spin_lock_irqsave(&greth->devlock, flags); /* save from XMIT */ greth_enable_rx(greth); + spin_unlock_irqrestore(&greth->devlock, flags); greth->rx_cur = NEXT_RX(greth->rx_cur); } @@ -836,6 +845,7 @@ static int greth_rx_gbit(struct net_device *dev, int limit) int pkt_len; int bad, count = 0; u32 status, dma_addr; + unsigned long flags; greth = netdev_priv(dev); @@ -843,6 +853,8 @@ static int greth_rx_gbit(struct net_device *dev, int limit) bdp = greth->rx_bd_base + greth->rx_cur; skb = greth->rx_skbuff[greth->rx_cur]; + GRETH_REGSAVE(greth->regs->status, GRETH_INT_RE | GRETH_INT_RX); + mb(); status = greth_read_bd(&bdp->stat); bad = 0; @@ -865,10 +877,9 @@ static int greth_rx_gbit(struct net_device *dev, int limit) } } - /* Allocate new skb to replace current */ - newskb = netdev_alloc_skb(dev, MAX_FRAME_SIZE + NET_IP_ALIGN); - - if (!bad && newskb) { + /* Allocate new skb to replace current, not needed if the + * current skb can be reused */ + if (!bad && (newskb=netdev_alloc_skb(dev, MAX_FRAME_SIZE + NET_IP_ALIGN))) { skb_reserve(newskb, NET_IP_ALIGN); dma_addr = dma_map_single(greth->dev, @@ -905,11 +916,22 @@ static int greth_rx_gbit(struct net_device *dev, int limit) if (net_ratelimit()) dev_warn(greth->dev, "Could not create DMA mapping, dropping packet\n"); dev_kfree_skb(newskb); + /* reusing current skb, so it is a drop */ dev->stats.rx_dropped++; } + } else if (bad) { + /* Bad Frame transfer, the skb is reused */ + dev->stats.rx_dropped++; } else { + /* Failed Allocating a new skb. This is rather stupid + * but the current "filled" skb is reused, as if + * transfer failure. One could argue that RX descriptor + * table handling should be divided into cleaning and + * filling as the TX part of the driver + */ if (net_ratelimit()) dev_warn(greth->dev, "Could not allocate SKB, dropping packet\n"); + /* reusing current skb, so it is a drop */ dev->stats.rx_dropped++; } @@ -920,7 +942,9 @@ static int greth_rx_gbit(struct net_device *dev, int limit) wmb(); greth_write_bd(&bdp->stat, status); + spin_lock_irqsave(&greth->devlock, flags); greth_enable_rx(greth); + spin_unlock_irqrestore(&greth->devlock, flags); greth->rx_cur = NEXT_RX(greth->rx_cur); } @@ -932,15 +956,18 @@ static int greth_poll(struct napi_struct *napi, int budget) { struct greth_private *greth; int work_done = 0; + unsigned long flags; + u32 mask, ctrl; greth = container_of(napi, struct greth_private, napi); - if (greth->gbit_mac) { - greth_clean_tx_gbit(greth->netdev); - } else { - greth_clean_tx(greth->netdev); +restart_txrx_poll: + if (netif_queue_stopped(greth->netdev)) { + if (greth->gbit_mac) + greth_clean_tx_gbit(greth->netdev); + else + greth_clean_tx(greth->netdev); } -restart_poll: if (greth->gbit_mac) { work_done += greth_rx_gbit(greth->netdev, budget - work_done); } else { @@ -949,15 +976,29 @@ restart_poll: if (work_done < budget) { - napi_complete(napi); + spin_lock_irqsave(&greth->devlock, flags); + + ctrl = GRETH_REGLOAD(greth->regs->control); + if (netif_queue_stopped(greth->netdev)) { + GRETH_REGSAVE(greth->regs->control, + ctrl | GRETH_TXI | GRETH_RXI); + mask = GRETH_INT_RX | GRETH_INT_RE | + GRETH_INT_TX | GRETH_INT_TE; + } else { + GRETH_REGSAVE(greth->regs->control, ctrl | GRETH_RXI); + mask = GRETH_INT_RX | GRETH_INT_RE; + } - if (greth_pending_packets(greth)) { - napi_reschedule(napi); - goto restart_poll; + if (GRETH_REGLOAD(greth->regs->status) & mask) { + GRETH_REGSAVE(greth->regs->control, ctrl); + spin_unlock_irqrestore(&greth->devlock, flags); + goto restart_txrx_poll; + } else { + __napi_complete(napi); + spin_unlock_irqrestore(&greth->devlock, flags); } } - greth_enable_irqs(greth); return work_done; } @@ -1152,11 +1193,11 @@ static const struct ethtool_ops greth_ethtool_ops = { }; static struct net_device_ops greth_netdev_ops = { - .ndo_open = greth_open, - .ndo_stop = greth_close, - .ndo_start_xmit = greth_start_xmit, - .ndo_set_mac_address = greth_set_mac_add, - .ndo_validate_addr = eth_validate_addr, + .ndo_open = greth_open, + .ndo_stop = greth_close, + .ndo_start_xmit = greth_start_xmit, + .ndo_set_mac_address = greth_set_mac_add, + .ndo_validate_addr = eth_validate_addr, }; static inline int wait_for_mdio(struct greth_private *greth) @@ -1217,29 +1258,26 @@ static void greth_link_change(struct net_device *dev) struct greth_private *greth = netdev_priv(dev); struct phy_device *phydev = greth->phy; unsigned long flags; - int status_change = 0; + u32 ctrl; spin_lock_irqsave(&greth->devlock, flags); if (phydev->link) { if ((greth->speed != phydev->speed) || (greth->duplex != phydev->duplex)) { - - GRETH_REGANDIN(greth->regs->control, - ~(GRETH_CTRL_FD | GRETH_CTRL_SP | GRETH_CTRL_GB)); + ctrl = GRETH_REGLOAD(greth->regs->control) & + ~(GRETH_CTRL_FD | GRETH_CTRL_SP | GRETH_CTRL_GB); if (phydev->duplex) - GRETH_REGORIN(greth->regs->control, GRETH_CTRL_FD); - - if (phydev->speed == SPEED_100) { - - GRETH_REGORIN(greth->regs->control, GRETH_CTRL_SP); - } + ctrl |= GRETH_CTRL_FD; + if (phydev->speed == SPEED_100) + ctrl |= GRETH_CTRL_SP; else if (phydev->speed == SPEED_1000) - GRETH_REGORIN(greth->regs->control, GRETH_CTRL_GB); + ctrl |= GRETH_CTRL_GB; + GRETH_REGSAVE(greth->regs->control, ctrl); greth->speed = phydev->speed; greth->duplex = phydev->duplex; status_change = 1; @@ -1600,6 +1638,9 @@ static struct of_device_id greth_of_match[] = { { .name = "GAISLER_ETHMAC", }, + { + .name = "01_01d", + }, {}, }; diff --git a/drivers/net/greth.h b/drivers/net/greth.h index 03ad903cd676..be0f2062bd14 100644 --- a/drivers/net/greth.h +++ b/drivers/net/greth.h @@ -23,6 +23,7 @@ #define GRETH_BD_LEN 0x7FF #define GRETH_TXEN 0x1 +#define GRETH_INT_TE 0x2 #define GRETH_INT_TX 0x8 #define GRETH_TXI 0x4 #define GRETH_TXBD_STATUS 0x0001C000 @@ -35,6 +36,7 @@ #define GRETH_TXBD_ERR_UE 0x4000 #define GRETH_TXBD_ERR_AL 0x8000 +#define GRETH_INT_RE 0x1 #define GRETH_INT_RX 0x4 #define GRETH_RXEN 0x2 #define GRETH_RXI 0x8 diff --git a/drivers/net/ixgbe/ixgbe_main.c b/drivers/net/ixgbe/ixgbe_main.c index a060610a42db..602078b84892 100644 --- a/drivers/net/ixgbe/ixgbe_main.c +++ b/drivers/net/ixgbe/ixgbe_main.c @@ -6667,8 +6667,6 @@ netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *skb, struct ixgbe_adapter *adapter, struct ixgbe_ring *tx_ring) { - struct net_device *netdev = tx_ring->netdev; - struct netdev_queue *txq; unsigned int first; unsigned int tx_flags = 0; u8 hdr_len = 0; @@ -6765,9 +6763,6 @@ netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *skb, /* add the ATR filter if ATR is on */ if (test_bit(__IXGBE_TX_FDIR_INIT_DONE, &tx_ring->state)) ixgbe_atr(tx_ring, skb, tx_flags, protocol); - txq = netdev_get_tx_queue(netdev, tx_ring->queue_index); - txq->tx_bytes += skb->len; - txq->tx_packets++; ixgbe_tx_queue(tx_ring, tx_flags, count, skb->len, hdr_len); ixgbe_maybe_stop_tx(tx_ring, DESC_NEEDED); @@ -6925,8 +6920,6 @@ static struct rtnl_link_stats64 *ixgbe_get_stats64(struct net_device *netdev, struct ixgbe_adapter *adapter = netdev_priv(netdev); int i; - /* accurate rx/tx bytes/packets stats */ - dev_txq_stats_fold(netdev, stats); rcu_read_lock(); for (i = 0; i < adapter->num_rx_queues; i++) { struct ixgbe_ring *ring = ACCESS_ONCE(adapter->rx_ring[i]); @@ -6943,6 +6936,22 @@ static struct rtnl_link_stats64 *ixgbe_get_stats64(struct net_device *netdev, stats->rx_bytes += bytes; } } + + for (i = 0; i < adapter->num_tx_queues; i++) { + struct ixgbe_ring *ring = ACCESS_ONCE(adapter->tx_ring[i]); + u64 bytes, packets; + unsigned int start; + + if (ring) { + do { + start = u64_stats_fetch_begin_bh(&ring->syncp); + packets = ring->stats.packets; + bytes = ring->stats.bytes; + } while (u64_stats_fetch_retry_bh(&ring->syncp, start)); + stats->tx_packets += packets; + stats->tx_bytes += bytes; + } + } rcu_read_unlock(); /* following stats updated by ixgbe_watchdog_task() */ stats->multicast = netdev->stats.multicast; diff --git a/drivers/net/macvtap.c b/drivers/net/macvtap.c index 21845affea13..5933621ac3ff 100644 --- a/drivers/net/macvtap.c +++ b/drivers/net/macvtap.c @@ -585,7 +585,7 @@ err: rcu_read_lock_bh(); vlan = rcu_dereference(q->vlan); if (vlan) - netdev_get_tx_queue(vlan->dev, 0)->tx_dropped++; + vlan->dev->stats.tx_dropped++; rcu_read_unlock_bh(); return err; diff --git a/drivers/net/mlx4/en_netdev.c b/drivers/net/mlx4/en_netdev.c index 6d6806b361e3..897f576b8b17 100644 --- a/drivers/net/mlx4/en_netdev.c +++ b/drivers/net/mlx4/en_netdev.c @@ -972,7 +972,8 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port, int i; int err; - dev = alloc_etherdev_mq(sizeof(struct mlx4_en_priv), prof->tx_ring_num); + dev = alloc_etherdev_mqs(sizeof(struct mlx4_en_priv), + prof->tx_ring_num, prof->rx_ring_num); if (dev == NULL) { mlx4_err(mdev, "Net device allocation failed\n"); return -ENOMEM; diff --git a/drivers/net/pcmcia/pcnet_cs.c b/drivers/net/pcmcia/pcnet_cs.c index 2c158910f7ea..e953793a33ff 100644 --- a/drivers/net/pcmcia/pcnet_cs.c +++ b/drivers/net/pcmcia/pcnet_cs.c @@ -1536,6 +1536,7 @@ static struct pcmcia_device_id pcnet_ids[] = { PCMCIA_DEVICE_PROD_ID12("CONTEC", "C-NET(PC)C-10L", 0x21cab552, 0xf6f90722), PCMCIA_DEVICE_PROD_ID12("corega", "FEther PCC-TXF", 0x0a21501a, 0xa51564a2), PCMCIA_DEVICE_PROD_ID12("corega", "Ether CF-TD", 0x0a21501a, 0x6589340a), + PCMCIA_DEVICE_PROD_ID12("corega K.K.", "corega Ether CF-TD LAN Card", 0x5261440f, 0x8797663b), PCMCIA_DEVICE_PROD_ID12("corega K.K.", "corega EtherII PCC-T", 0x5261440f, 0xfa9d85bd), PCMCIA_DEVICE_PROD_ID12("corega K.K.", "corega EtherII PCC-TD", 0x5261440f, 0xc49bd73d), PCMCIA_DEVICE_PROD_ID12("Corega K.K.", "corega EtherII PCC-TD", 0xd4fdcbd8, 0xc49bd73d), diff --git a/drivers/net/ppp_async.c b/drivers/net/ppp_async.c index 78d70a6481bf..a1b82c9c67d2 100644 --- a/drivers/net/ppp_async.c +++ b/drivers/net/ppp_async.c @@ -32,6 +32,7 @@ #include <linux/init.h> #include <linux/jiffies.h> #include <linux/slab.h> +#include <asm/unaligned.h> #include <asm/uaccess.h> #include <asm/string.h> @@ -542,7 +543,7 @@ ppp_async_encode(struct asyncppp *ap) data = ap->tpkt->data; count = ap->tpkt->len; fcs = ap->tfcs; - proto = (data[0] << 8) + data[1]; + proto = get_unaligned_be16(data); /* * LCP packets with code values between 1 (configure-reqest) @@ -963,7 +964,7 @@ static void async_lcp_peek(struct asyncppp *ap, unsigned char *data, code = data[0]; if (code != CONFACK && code != CONFREQ) return; - dlen = (data[2] << 8) + data[3]; + dlen = get_unaligned_be16(data + 2); if (len < dlen) return; /* packet got truncated or length is bogus */ @@ -997,15 +998,14 @@ static void async_lcp_peek(struct asyncppp *ap, unsigned char *data, while (dlen >= 2 && dlen >= data[1] && data[1] >= 2) { switch (data[0]) { case LCP_MRU: - val = (data[2] << 8) + data[3]; + val = get_unaligned_be16(data + 2); if (inbound) ap->mru = val; else ap->chan.mtu = val; break; case LCP_ASYNCMAP: - val = (data[2] << 24) + (data[3] << 16) - + (data[4] << 8) + data[5]; + val = get_unaligned_be32(data + 2); if (inbound) ap->raccm = val; else diff --git a/drivers/net/ppp_deflate.c b/drivers/net/ppp_deflate.c index 695bc83e0cfd..43583309a65d 100644 --- a/drivers/net/ppp_deflate.c +++ b/drivers/net/ppp_deflate.c @@ -41,6 +41,7 @@ #include <linux/ppp-comp.h> #include <linux/zlib.h> +#include <asm/unaligned.h> /* * State for a Deflate (de)compressor. @@ -232,11 +233,9 @@ static int z_compress(void *arg, unsigned char *rptr, unsigned char *obuf, */ wptr[0] = PPP_ADDRESS(rptr); wptr[1] = PPP_CONTROL(rptr); - wptr[2] = PPP_COMP >> 8; - wptr[3] = PPP_COMP; + put_unaligned_be16(PPP_COMP, wptr + 2); wptr += PPP_HDRLEN; - wptr[0] = state->seqno >> 8; - wptr[1] = state->seqno; + put_unaligned_be16(state->seqno, wptr); wptr += DEFLATE_OVHD; olen = PPP_HDRLEN + DEFLATE_OVHD; state->strm.next_out = wptr; @@ -451,7 +450,7 @@ static int z_decompress(void *arg, unsigned char *ibuf, int isize, } /* Check the sequence number. */ - seq = (ibuf[PPP_HDRLEN] << 8) + ibuf[PPP_HDRLEN+1]; + seq = get_unaligned_be16(ibuf + PPP_HDRLEN); if (seq != (state->seqno & 0xffff)) { if (state->debug) printk(KERN_DEBUG "z_decompress%d: bad seq # %d, expected %d\n", diff --git a/drivers/net/ppp_generic.c b/drivers/net/ppp_generic.c index 6456484c0299..c7a6c4466978 100644 --- a/drivers/net/ppp_generic.c +++ b/drivers/net/ppp_generic.c @@ -46,6 +46,7 @@ #include <linux/device.h> #include <linux/mutex.h> #include <linux/slab.h> +#include <asm/unaligned.h> #include <net/slhc_vj.h> #include <asm/atomic.h> @@ -210,7 +211,7 @@ struct ppp_net { }; /* Get the PPP protocol number from a skb */ -#define PPP_PROTO(skb) (((skb)->data[0] << 8) + (skb)->data[1]) +#define PPP_PROTO(skb) get_unaligned_be16((skb)->data) /* We limit the length of ppp->file.rq to this (arbitrary) value */ #define PPP_MAX_RQLEN 32 @@ -964,8 +965,7 @@ ppp_start_xmit(struct sk_buff *skb, struct net_device *dev) pp = skb_push(skb, 2); proto = npindex_to_proto[npi]; - pp[0] = proto >> 8; - pp[1] = proto; + put_unaligned_be16(proto, pp); netif_stop_queue(dev); skb_queue_tail(&ppp->file.xq, skb); @@ -1473,8 +1473,7 @@ static int ppp_mp_explode(struct ppp *ppp, struct sk_buff *skb) q = skb_put(frag, flen + hdrlen); /* make the MP header */ - q[0] = PPP_MP >> 8; - q[1] = PPP_MP; + put_unaligned_be16(PPP_MP, q); if (ppp->flags & SC_MP_XSHORTSEQ) { q[2] = bits + ((ppp->nxseq >> 8) & 0xf); q[3] = ppp->nxseq; diff --git a/drivers/net/ppp_mppe.c b/drivers/net/ppp_mppe.c index 6d1a1b80cc3e..9a1849a83e2a 100644 --- a/drivers/net/ppp_mppe.c +++ b/drivers/net/ppp_mppe.c @@ -55,6 +55,7 @@ #include <linux/ppp_defs.h> #include <linux/ppp-comp.h> #include <linux/scatterlist.h> +#include <asm/unaligned.h> #include "ppp_mppe.h" @@ -395,16 +396,14 @@ mppe_compress(void *arg, unsigned char *ibuf, unsigned char *obuf, */ obuf[0] = PPP_ADDRESS(ibuf); obuf[1] = PPP_CONTROL(ibuf); - obuf[2] = PPP_COMP >> 8; /* isize + MPPE_OVHD + 1 */ - obuf[3] = PPP_COMP; /* isize + MPPE_OVHD + 2 */ + put_unaligned_be16(PPP_COMP, obuf + 2); obuf += PPP_HDRLEN; state->ccount = (state->ccount + 1) % MPPE_CCOUNT_SPACE; if (state->debug >= 7) printk(KERN_DEBUG "mppe_compress[%d]: ccount %d\n", state->unit, state->ccount); - obuf[0] = state->ccount >> 8; - obuf[1] = state->ccount & 0xff; + put_unaligned_be16(state->ccount, obuf); if (!state->stateful || /* stateless mode */ ((state->ccount & 0xff) == 0xff) || /* "flag" packet */ diff --git a/drivers/net/ppp_synctty.c b/drivers/net/ppp_synctty.c index 4c95ec3fb8d4..4e6b72f57de8 100644 --- a/drivers/net/ppp_synctty.c +++ b/drivers/net/ppp_synctty.c @@ -45,6 +45,7 @@ #include <linux/completion.h> #include <linux/init.h> #include <linux/slab.h> +#include <asm/unaligned.h> #include <asm/uaccess.h> #define PPP_VERSION "2.4.2" @@ -563,7 +564,7 @@ ppp_sync_txmunge(struct syncppp *ap, struct sk_buff *skb) int islcp; data = skb->data; - proto = (data[0] << 8) + data[1]; + proto = get_unaligned_be16(data); /* LCP packets with codes between 1 (configure-request) * and 7 (code-reject) must be sent as though no options diff --git a/drivers/net/qlcnic/qlcnic.h b/drivers/net/qlcnic/qlcnic.h index 9c2a02d204dc..44e316fd67b8 100644 --- a/drivers/net/qlcnic/qlcnic.h +++ b/drivers/net/qlcnic/qlcnic.h @@ -34,8 +34,8 @@ #define _QLCNIC_LINUX_MAJOR 5 #define _QLCNIC_LINUX_MINOR 0 -#define _QLCNIC_LINUX_SUBVERSION 14 -#define QLCNIC_LINUX_VERSIONID "5.0.14" +#define _QLCNIC_LINUX_SUBVERSION 15 +#define QLCNIC_LINUX_VERSIONID "5.0.15" #define QLCNIC_DRV_IDC_VER 0x01 #define QLCNIC_DRIVER_VERSION ((_QLCNIC_LINUX_MAJOR << 16) |\ (_QLCNIC_LINUX_MINOR << 8) | (_QLCNIC_LINUX_SUBVERSION)) @@ -289,6 +289,26 @@ struct uni_data_desc{ u32 reserved[5]; }; +/* Flash Defines and Structures */ +#define QLCNIC_FLT_LOCATION 0x3F1000 +#define QLCNIC_FW_IMAGE_REGION 0x74 +struct qlcnic_flt_header { + u16 version; + u16 len; + u16 checksum; + u16 reserved; +}; + +struct qlcnic_flt_entry { + u8 region; + u8 reserved0; + u8 attrib; + u8 reserved1; + u32 size; + u32 start_addr; + u32 end_add; +}; + /* Magic number to let user know flash is programmed */ #define QLCNIC_BDINFO_MAGIC 0x12345678 diff --git a/drivers/net/qlcnic/qlcnic_ethtool.c b/drivers/net/qlcnic/qlcnic_ethtool.c index 1e7af709d395..4c14510e2a87 100644 --- a/drivers/net/qlcnic/qlcnic_ethtool.c +++ b/drivers/net/qlcnic/qlcnic_ethtool.c @@ -672,7 +672,7 @@ qlcnic_diag_test(struct net_device *dev, struct ethtool_test *eth_test, if (data[1]) eth_test->flags |= ETH_TEST_FL_FAILED; - if (eth_test->flags == ETH_TEST_FL_OFFLINE) { + if (eth_test->flags & ETH_TEST_FL_OFFLINE) { data[2] = qlcnic_irq_test(dev); if (data[2]) eth_test->flags |= ETH_TEST_FL_FAILED; diff --git a/drivers/net/qlcnic/qlcnic_init.c b/drivers/net/qlcnic/qlcnic_init.c index 9b9c7c39d3ee..a7f1d5b7e811 100644 --- a/drivers/net/qlcnic/qlcnic_init.c +++ b/drivers/net/qlcnic/qlcnic_init.c @@ -627,12 +627,73 @@ qlcnic_setup_idc_param(struct qlcnic_adapter *adapter) { return 0; } +static int qlcnic_get_flt_entry(struct qlcnic_adapter *adapter, u8 region, + struct qlcnic_flt_entry *region_entry) +{ + struct qlcnic_flt_header flt_hdr; + struct qlcnic_flt_entry *flt_entry; + int i = 0, ret; + u32 entry_size; + + memset(region_entry, 0, sizeof(struct qlcnic_flt_entry)); + ret = qlcnic_rom_fast_read_words(adapter, QLCNIC_FLT_LOCATION, + (u8 *)&flt_hdr, + sizeof(struct qlcnic_flt_header)); + if (ret) { + dev_warn(&adapter->pdev->dev, + "error reading flash layout header\n"); + return -EIO; + } + + entry_size = flt_hdr.len - sizeof(struct qlcnic_flt_header); + flt_entry = (struct qlcnic_flt_entry *)vzalloc(entry_size); + if (flt_entry == NULL) { + dev_warn(&adapter->pdev->dev, "error allocating memory\n"); + return -EIO; + } + + ret = qlcnic_rom_fast_read_words(adapter, QLCNIC_FLT_LOCATION + + sizeof(struct qlcnic_flt_header), + (u8 *)flt_entry, entry_size); + if (ret) { + dev_warn(&adapter->pdev->dev, + "error reading flash layout entries\n"); + goto err_out; + } + + while (i < (entry_size/sizeof(struct qlcnic_flt_entry))) { + if (flt_entry[i].region == region) + break; + i++; + } + if (i >= (entry_size/sizeof(struct qlcnic_flt_entry))) { + dev_warn(&adapter->pdev->dev, + "region=%x not found in %d regions\n", region, i); + ret = -EIO; + goto err_out; + } + memcpy(region_entry, &flt_entry[i], sizeof(struct qlcnic_flt_entry)); + +err_out: + vfree(flt_entry); + return ret; +} + int qlcnic_check_flash_fw_ver(struct qlcnic_adapter *adapter) { + struct qlcnic_flt_entry fw_entry; u32 ver = -1, min_ver; + int ret; - qlcnic_rom_fast_read(adapter, QLCNIC_FW_VERSION_OFFSET, (int *)&ver); + ret = qlcnic_get_flt_entry(adapter, QLCNIC_FW_IMAGE_REGION, &fw_entry); + if (!ret) + /* 0-4:-signature, 4-8:-fw version */ + qlcnic_rom_fast_read(adapter, fw_entry.start_addr + 4, + (int *)&ver); + else + qlcnic_rom_fast_read(adapter, QLCNIC_FW_VERSION_OFFSET, + (int *)&ver); ver = QLCNIC_DECODE_VERSION(ver); min_ver = QLCNIC_MIN_FW_VERSION; diff --git a/drivers/net/qlcnic/qlcnic_main.c b/drivers/net/qlcnic/qlcnic_main.c index 11e3a46c0911..37c04b4fade3 100644 --- a/drivers/net/qlcnic/qlcnic_main.c +++ b/drivers/net/qlcnic/qlcnic_main.c @@ -31,15 +31,15 @@ static const char qlcnic_driver_string[] = "QLogic 1/10 GbE " static struct workqueue_struct *qlcnic_wq; static int qlcnic_mac_learn; -module_param(qlcnic_mac_learn, int, 0644); +module_param(qlcnic_mac_learn, int, 0444); MODULE_PARM_DESC(qlcnic_mac_learn, "Mac Filter (0=disabled, 1=enabled)"); static int use_msi = 1; -module_param(use_msi, int, 0644); +module_param(use_msi, int, 0444); MODULE_PARM_DESC(use_msi, "MSI interrupt (0=disabled, 1=enabled"); static int use_msi_x = 1; -module_param(use_msi_x, int, 0644); +module_param(use_msi_x, int, 0444); MODULE_PARM_DESC(use_msi_x, "MSI-X interrupt (0=disabled, 1=enabled"); static int auto_fw_reset = AUTO_FW_RESET_ENABLED; @@ -47,11 +47,11 @@ module_param(auto_fw_reset, int, 0644); MODULE_PARM_DESC(auto_fw_reset, "Auto firmware reset (0=disabled, 1=enabled"); static int load_fw_file; -module_param(load_fw_file, int, 0644); +module_param(load_fw_file, int, 0444); MODULE_PARM_DESC(load_fw_file, "Load firmware from (0=flash, 1=file"); static int qlcnic_config_npars; -module_param(qlcnic_config_npars, int, 0644); +module_param(qlcnic_config_npars, int, 0444); MODULE_PARM_DESC(qlcnic_config_npars, "Configure NPARs (0=disabled, 1=enabled"); static int __devinit qlcnic_probe(struct pci_dev *pdev, diff --git a/drivers/net/r8169.c b/drivers/net/r8169.c index dd758cdb55c4..bde7d61f1930 100644 --- a/drivers/net/r8169.c +++ b/drivers/net/r8169.c @@ -554,6 +554,8 @@ struct rtl8169_private { struct mii_if_info mii; struct rtl8169_counters counters; u32 saved_wolopts; + + const struct firmware *fw; }; MODULE_AUTHOR("Realtek and the Linux r8169 crew <netdev@vger.kernel.org>"); @@ -1632,42 +1634,163 @@ rtl_phy_write_fw(struct rtl8169_private *tp, const struct firmware *fw) { __le32 *phytable = (__le32 *)fw->data; struct net_device *dev = tp->dev; - size_t i; + size_t index, fw_size = fw->size / sizeof(*phytable); + u32 predata, count; if (fw->size % sizeof(*phytable)) { netif_err(tp, probe, dev, "odd sized firmware %zd\n", fw->size); return; } - for (i = 0; i < fw->size / sizeof(*phytable); i++) { - u32 action = le32_to_cpu(phytable[i]); + for (index = 0; index < fw_size; index++) { + u32 action = le32_to_cpu(phytable[index]); + u32 regno = (action & 0x0fff0000) >> 16; - if (!action) + switch(action & 0xf0000000) { + case PHY_READ: + case PHY_DATA_OR: + case PHY_DATA_AND: + case PHY_READ_EFUSE: + case PHY_CLEAR_READCOUNT: + case PHY_WRITE: + case PHY_WRITE_PREVIOUS: + case PHY_DELAY_MS: + break; + + case PHY_BJMPN: + if (regno > index) { + netif_err(tp, probe, tp->dev, + "Out of range of firmware\n"); + return; + } + break; + case PHY_READCOUNT_EQ_SKIP: + if (index + 2 >= fw_size) { + netif_err(tp, probe, tp->dev, + "Out of range of firmware\n"); + return; + } + break; + case PHY_COMP_EQ_SKIPN: + case PHY_COMP_NEQ_SKIPN: + case PHY_SKIPN: + if (index + 1 + regno >= fw_size) { + netif_err(tp, probe, tp->dev, + "Out of range of firmware\n"); + return; + } break; - if ((action & 0xf0000000) != PHY_WRITE) { - netif_err(tp, probe, dev, - "unknown action 0x%08x\n", action); + case PHY_READ_MAC_BYTE: + case PHY_WRITE_MAC_BYTE: + case PHY_WRITE_ERI_WORD: + default: + netif_err(tp, probe, tp->dev, + "Invalid action 0x%08x\n", action); return; } } - while (i-- != 0) { - u32 action = le32_to_cpu(*phytable); + predata = 0; + count = 0; + + for (index = 0; index < fw_size; ) { + u32 action = le32_to_cpu(phytable[index]); u32 data = action & 0x0000ffff; - u32 reg = (action & 0x0fff0000) >> 16; + u32 regno = (action & 0x0fff0000) >> 16; + + if (!action) + break; switch(action & 0xf0000000) { + case PHY_READ: + predata = rtl_readphy(tp, regno); + count++; + index++; + break; + case PHY_DATA_OR: + predata |= data; + index++; + break; + case PHY_DATA_AND: + predata &= data; + index++; + break; + case PHY_BJMPN: + index -= regno; + break; + case PHY_READ_EFUSE: + predata = rtl8168d_efuse_read(tp->mmio_addr, regno); + index++; + break; + case PHY_CLEAR_READCOUNT: + count = 0; + index++; + break; case PHY_WRITE: - rtl_writephy(tp, reg, data); - phytable++; + rtl_writephy(tp, regno, data); + index++; break; + case PHY_READCOUNT_EQ_SKIP: + if (count == data) + index += 2; + else + index += 1; + break; + case PHY_COMP_EQ_SKIPN: + if (predata == data) + index += regno; + index++; + break; + case PHY_COMP_NEQ_SKIPN: + if (predata != data) + index += regno; + index++; + break; + case PHY_WRITE_PREVIOUS: + rtl_writephy(tp, regno, predata); + index++; + break; + case PHY_SKIPN: + index += regno + 1; + break; + case PHY_DELAY_MS: + mdelay(data); + index++; + break; + + case PHY_READ_MAC_BYTE: + case PHY_WRITE_MAC_BYTE: + case PHY_WRITE_ERI_WORD: default: BUG(); } } } +static void rtl_release_firmware(struct rtl8169_private *tp) +{ + release_firmware(tp->fw); + tp->fw = NULL; +} + +static int rtl_apply_firmware(struct rtl8169_private *tp, const char *fw_name) +{ + const struct firmware **fw = &tp->fw; + int rc = !*fw; + + if (rc) { + rc = request_firmware(fw, fw_name, &tp->pci_dev->dev); + if (rc < 0) + goto out; + } + + /* TODO: release firmware once rtl_phy_write_fw signals failures. */ + rtl_phy_write_fw(tp, *fw); +out: + return rc; +} + static void rtl8169s_hw_phy_config(struct rtl8169_private *tp) { static const struct phy_reg phy_reg_init[] = { @@ -2041,7 +2164,6 @@ static void rtl8168d_1_hw_phy_config(struct rtl8169_private *tp) { 0x0d, 0xf880 } }; void __iomem *ioaddr = tp->mmio_addr; - const struct firmware *fw; rtl_writephy_batch(tp, phy_reg_init_0, ARRAY_SIZE(phy_reg_init_0)); @@ -2105,11 +2227,8 @@ static void rtl8168d_1_hw_phy_config(struct rtl8169_private *tp) rtl_writephy(tp, 0x1f, 0x0005); rtl_writephy(tp, 0x05, 0x001b); - if (rtl_readphy(tp, 0x06) == 0xbf00 && - request_firmware(&fw, FIRMWARE_8168D_1, &tp->pci_dev->dev) == 0) { - rtl_phy_write_fw(tp, fw); - release_firmware(fw); - } else { + if ((rtl_readphy(tp, 0x06) != 0xbf00) || + (rtl_apply_firmware(tp, FIRMWARE_8168D_1) < 0)) { netif_warn(tp, probe, tp->dev, "unable to apply firmware patch\n"); } @@ -2159,7 +2278,6 @@ static void rtl8168d_2_hw_phy_config(struct rtl8169_private *tp) { 0x0d, 0xf880 } }; void __iomem *ioaddr = tp->mmio_addr; - const struct firmware *fw; rtl_writephy_batch(tp, phy_reg_init_0, ARRAY_SIZE(phy_reg_init_0)); @@ -2214,11 +2332,8 @@ static void rtl8168d_2_hw_phy_config(struct rtl8169_private *tp) rtl_writephy(tp, 0x1f, 0x0005); rtl_writephy(tp, 0x05, 0x001b); - if (rtl_readphy(tp, 0x06) == 0xb300 && - request_firmware(&fw, FIRMWARE_8168D_2, &tp->pci_dev->dev) == 0) { - rtl_phy_write_fw(tp, fw); - release_firmware(fw); - } else { + if ((rtl_readphy(tp, 0x06) != 0xb300) || + (rtl_apply_firmware(tp, FIRMWARE_8168D_2) < 0)) { netif_warn(tp, probe, tp->dev, "unable to apply firmware patch\n"); } @@ -3102,6 +3217,8 @@ static void __devexit rtl8169_remove_one(struct pci_dev *pdev) cancel_delayed_work_sync(&tp->task); + rtl_release_firmware(tp); + unregister_netdev(dev); if (pci_dev_run_wake(pdev)) diff --git a/drivers/net/sfc/efx.c b/drivers/net/sfc/efx.c index 711449c6e675..002bac743843 100644 --- a/drivers/net/sfc/efx.c +++ b/drivers/net/sfc/efx.c @@ -1153,6 +1153,9 @@ static int efx_wanted_channels(void) int count; int cpu; + if (rss_cpus) + return rss_cpus; + if (unlikely(!zalloc_cpumask_var(&core_mask, GFP_KERNEL))) { printk(KERN_WARNING "sfc: RSS disabled due to allocation failure\n"); @@ -1266,27 +1269,18 @@ static void efx_remove_interrupts(struct efx_nic *efx) efx->legacy_irq = 0; } -struct efx_tx_queue * -efx_get_tx_queue(struct efx_nic *efx, unsigned index, unsigned type) -{ - unsigned tx_channel_offset = - separate_tx_channels ? efx->n_channels - efx->n_tx_channels : 0; - EFX_BUG_ON_PARANOID(index >= efx->n_tx_channels || - type >= EFX_TXQ_TYPES); - return &efx->channel[tx_channel_offset + index]->tx_queue[type]; -} - static void efx_set_channels(struct efx_nic *efx) { struct efx_channel *channel; struct efx_tx_queue *tx_queue; - unsigned tx_channel_offset = + + efx->tx_channel_offset = separate_tx_channels ? efx->n_channels - efx->n_tx_channels : 0; /* Channel pointers were set in efx_init_struct() but we now * need to clear them for TX queues in any RX-only channels. */ efx_for_each_channel(channel, efx) { - if (channel->channel - tx_channel_offset >= + if (channel->channel - efx->tx_channel_offset >= efx->n_tx_channels) { efx_for_each_channel_tx_queue(tx_queue, channel) tx_queue->channel = NULL; diff --git a/drivers/net/sfc/net_driver.h b/drivers/net/sfc/net_driver.h index bdce66ddf93a..28df8665256a 100644 --- a/drivers/net/sfc/net_driver.h +++ b/drivers/net/sfc/net_driver.h @@ -735,6 +735,7 @@ struct efx_nic { unsigned next_buffer_table; unsigned n_channels; unsigned n_rx_channels; + unsigned tx_channel_offset; unsigned n_tx_channels; unsigned int rx_buffer_len; unsigned int rx_buffer_order; @@ -929,8 +930,13 @@ efx_get_channel(struct efx_nic *efx, unsigned index) _channel = (_channel->channel + 1 < (_efx)->n_channels) ? \ (_efx)->channel[_channel->channel + 1] : NULL) -extern struct efx_tx_queue * -efx_get_tx_queue(struct efx_nic *efx, unsigned index, unsigned type); +static inline struct efx_tx_queue * +efx_get_tx_queue(struct efx_nic *efx, unsigned index, unsigned type) +{ + EFX_BUG_ON_PARANOID(index >= efx->n_tx_channels || + type >= EFX_TXQ_TYPES); + return &efx->channel[efx->tx_channel_offset + index]->tx_queue[type]; +} static inline struct efx_tx_queue * efx_channel_get_tx_queue(struct efx_channel *channel, unsigned type) diff --git a/drivers/net/tile/tilepro.c b/drivers/net/tile/tilepro.c index 0e6bac5ec65b..7cb301da7474 100644 --- a/drivers/net/tile/tilepro.c +++ b/drivers/net/tile/tilepro.c @@ -142,14 +142,6 @@ MODULE_AUTHOR("Tilera"); MODULE_LICENSE("GPL"); - -#define IS_MULTICAST(mac_addr) \ - (((u8 *)(mac_addr))[0] & 0x01) - -#define IS_BROADCAST(mac_addr) \ - (((u16 *)(mac_addr))[0] == 0xffff) - - /* * Queue of incoming packets for a specific cpu and device. * @@ -795,7 +787,7 @@ static bool tile_net_poll_aux(struct tile_net_cpu *info, int index) /* * FIXME: Implement HW multicast filter. */ - if (!IS_MULTICAST(buf) && !IS_BROADCAST(buf)) { + if (is_unicast_ether_addr(buf)) { /* Filter packets not for our address. */ const u8 *mine = dev->dev_addr; filter = compare_ether_addr(mine, buf); diff --git a/drivers/net/ucc_geth.c b/drivers/net/ucc_geth.c index acbdab3d66ca..dc6cb974f25d 100644 --- a/drivers/net/ucc_geth.c +++ b/drivers/net/ucc_geth.c @@ -2031,7 +2031,7 @@ static void ucc_geth_set_multi(struct net_device *dev) netdev_for_each_mc_addr(ha, dev) { /* Only support group multicast for now. */ - if (!(ha->addr[0] & 1)) + if (!is_multicast_ether_addr(ha->addr)) continue; /* Ask CPM to run CRC and set bit in diff --git a/drivers/net/usb/cdc_ncm.c b/drivers/net/usb/cdc_ncm.c index 593c104ab199..d776c4a8d3c1 100644 --- a/drivers/net/usb/cdc_ncm.c +++ b/drivers/net/usb/cdc_ncm.c @@ -1021,13 +1021,15 @@ static int cdc_ncm_rx_fixup(struct usbnet *dev, struct sk_buff *skb_in) (temp > CDC_NCM_MAX_DATAGRAM_SIZE) || (temp < ETH_HLEN)) { pr_debug("invalid frame detected (ignored)" "offset[%u]=%u, length=%u, skb=%p\n", - x, offset, temp, skb); + x, offset, temp, skb_in); if (!x) goto error; break; } else { skb = skb_clone(skb_in, GFP_ATOMIC); + if (!skb) + goto error; skb->len = temp; skb->data = ((u8 *)skb_in->data) + offset; skb_set_tail_pointer(skb, temp); diff --git a/drivers/net/vmxnet3/vmxnet3_drv.c b/drivers/net/vmxnet3/vmxnet3_drv.c index d143e8b72b5b..cc14b4a75048 100644 --- a/drivers/net/vmxnet3/vmxnet3_drv.c +++ b/drivers/net/vmxnet3/vmxnet3_drv.c @@ -48,6 +48,9 @@ static atomic_t devices_found; static int enable_mq = 1; static int irq_share_mode; +static void +vmxnet3_write_mac_addr(struct vmxnet3_adapter *adapter, u8 *mac); + /* * Enable/Disable the given intr */ @@ -139,9 +142,13 @@ vmxnet3_check_link(struct vmxnet3_adapter *adapter, bool affectTxQueue) { u32 ret; int i; + unsigned long flags; + spin_lock_irqsave(&adapter->cmd_lock, flags); VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, VMXNET3_CMD_GET_LINK); ret = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_CMD); + spin_unlock_irqrestore(&adapter->cmd_lock, flags); + adapter->link_speed = ret >> 16; if (ret & 1) { /* Link is up. */ printk(KERN_INFO "%s: NIC Link is Up %d Mbps\n", @@ -183,8 +190,10 @@ vmxnet3_process_events(struct vmxnet3_adapter *adapter) /* Check if there is an error on xmit/recv queues */ if (events & (VMXNET3_ECR_TQERR | VMXNET3_ECR_RQERR)) { + spin_lock(&adapter->cmd_lock); VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, VMXNET3_CMD_GET_QUEUE_STATUS); + spin_unlock(&adapter->cmd_lock); for (i = 0; i < adapter->num_tx_queues; i++) if (adapter->tqd_start[i].status.stopped) @@ -804,30 +813,25 @@ vmxnet3_parse_and_copy_hdr(struct sk_buff *skb, struct vmxnet3_tx_queue *tq, skb_transport_header(skb))->doff * 4; ctx->copy_size = ctx->eth_ip_hdr_size + ctx->l4_hdr_size; } else { - unsigned int pull_size; - if (skb->ip_summed == CHECKSUM_PARTIAL) { ctx->eth_ip_hdr_size = skb_checksum_start_offset(skb); if (ctx->ipv4) { struct iphdr *iph = (struct iphdr *) skb_network_header(skb); - if (iph->protocol == IPPROTO_TCP) { - pull_size = ctx->eth_ip_hdr_size + - sizeof(struct tcphdr); - - if (unlikely(!pskb_may_pull(skb, - pull_size))) { - goto err; - } + if (iph->protocol == IPPROTO_TCP) ctx->l4_hdr_size = ((struct tcphdr *) skb_transport_header(skb))->doff * 4; - } else if (iph->protocol == IPPROTO_UDP) { + else if (iph->protocol == IPPROTO_UDP) + /* + * Use tcp header size so that bytes to + * be copied are more than required by + * the device. + */ ctx->l4_hdr_size = - sizeof(struct udphdr); - } else { + sizeof(struct tcphdr); + else ctx->l4_hdr_size = 0; - } } else { /* for simplicity, don't copy L4 headers */ ctx->l4_hdr_size = 0; @@ -1859,18 +1863,14 @@ vmxnet3_vlan_rx_register(struct net_device *netdev, struct vlan_group *grp) struct vmxnet3_adapter *adapter = netdev_priv(netdev); struct Vmxnet3_DriverShared *shared = adapter->shared; u32 *vfTable = adapter->shared->devRead.rxFilterConf.vfTable; + unsigned long flags; if (grp) { /* add vlan rx stripping. */ if (adapter->netdev->features & NETIF_F_HW_VLAN_RX) { int i; - struct Vmxnet3_DSDevRead *devRead = &shared->devRead; adapter->vlan_grp = grp; - /* update FEATURES to device */ - devRead->misc.uptFeatures |= UPT1_F_RXVLAN; - VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, - VMXNET3_CMD_UPDATE_FEATURE); /* * Clear entire vfTable; then enable untagged pkts. * Note: setting one entry in vfTable to non-zero turns @@ -1880,8 +1880,10 @@ vmxnet3_vlan_rx_register(struct net_device *netdev, struct vlan_group *grp) vfTable[i] = 0; VMXNET3_SET_VFTABLE_ENTRY(vfTable, 0); + spin_lock_irqsave(&adapter->cmd_lock, flags); VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, VMXNET3_CMD_UPDATE_VLAN_FILTERS); + spin_unlock_irqrestore(&adapter->cmd_lock, flags); } else { printk(KERN_ERR "%s: vlan_rx_register when device has " "no NETIF_F_HW_VLAN_RX\n", netdev->name); @@ -1900,13 +1902,10 @@ vmxnet3_vlan_rx_register(struct net_device *netdev, struct vlan_group *grp) */ vfTable[i] = 0; } + spin_lock_irqsave(&adapter->cmd_lock, flags); VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, VMXNET3_CMD_UPDATE_VLAN_FILTERS); - - /* update FEATURES to device */ - devRead->misc.uptFeatures &= ~UPT1_F_RXVLAN; - VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, - VMXNET3_CMD_UPDATE_FEATURE); + spin_unlock_irqrestore(&adapter->cmd_lock, flags); } } } @@ -1939,10 +1938,13 @@ vmxnet3_vlan_rx_add_vid(struct net_device *netdev, u16 vid) { struct vmxnet3_adapter *adapter = netdev_priv(netdev); u32 *vfTable = adapter->shared->devRead.rxFilterConf.vfTable; + unsigned long flags; VMXNET3_SET_VFTABLE_ENTRY(vfTable, vid); + spin_lock_irqsave(&adapter->cmd_lock, flags); VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, VMXNET3_CMD_UPDATE_VLAN_FILTERS); + spin_unlock_irqrestore(&adapter->cmd_lock, flags); } @@ -1951,10 +1953,13 @@ vmxnet3_vlan_rx_kill_vid(struct net_device *netdev, u16 vid) { struct vmxnet3_adapter *adapter = netdev_priv(netdev); u32 *vfTable = adapter->shared->devRead.rxFilterConf.vfTable; + unsigned long flags; VMXNET3_CLEAR_VFTABLE_ENTRY(vfTable, vid); + spin_lock_irqsave(&adapter->cmd_lock, flags); VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, VMXNET3_CMD_UPDATE_VLAN_FILTERS); + spin_unlock_irqrestore(&adapter->cmd_lock, flags); } @@ -1985,6 +1990,7 @@ static void vmxnet3_set_mc(struct net_device *netdev) { struct vmxnet3_adapter *adapter = netdev_priv(netdev); + unsigned long flags; struct Vmxnet3_RxFilterConf *rxConf = &adapter->shared->devRead.rxFilterConf; u8 *new_table = NULL; @@ -2020,6 +2026,7 @@ vmxnet3_set_mc(struct net_device *netdev) rxConf->mfTablePA = 0; } + spin_lock_irqsave(&adapter->cmd_lock, flags); if (new_mode != rxConf->rxMode) { rxConf->rxMode = cpu_to_le32(new_mode); VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, @@ -2028,6 +2035,7 @@ vmxnet3_set_mc(struct net_device *netdev) VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, VMXNET3_CMD_UPDATE_MAC_FILTERS); + spin_unlock_irqrestore(&adapter->cmd_lock, flags); kfree(new_table); } @@ -2080,10 +2088,8 @@ vmxnet3_setup_driver_shared(struct vmxnet3_adapter *adapter) devRead->misc.uptFeatures |= UPT1_F_LRO; devRead->misc.maxNumRxSG = cpu_to_le16(1 + MAX_SKB_FRAGS); } - if ((adapter->netdev->features & NETIF_F_HW_VLAN_RX) && - adapter->vlan_grp) { + if (adapter->netdev->features & NETIF_F_HW_VLAN_RX) devRead->misc.uptFeatures |= UPT1_F_RXVLAN; - } devRead->misc.mtu = cpu_to_le32(adapter->netdev->mtu); devRead->misc.queueDescPA = cpu_to_le64(adapter->queue_desc_pa); @@ -2168,6 +2174,8 @@ vmxnet3_setup_driver_shared(struct vmxnet3_adapter *adapter) /* rx filter settings */ devRead->rxFilterConf.rxMode = 0; vmxnet3_restore_vlan(adapter); + vmxnet3_write_mac_addr(adapter, adapter->netdev->dev_addr); + /* the rest are already zeroed */ } @@ -2177,6 +2185,7 @@ vmxnet3_activate_dev(struct vmxnet3_adapter *adapter) { int err, i; u32 ret; + unsigned long flags; dev_dbg(&adapter->netdev->dev, "%s: skb_buf_size %d, rx_buf_per_pkt %d," " ring sizes %u %u %u\n", adapter->netdev->name, @@ -2206,9 +2215,11 @@ vmxnet3_activate_dev(struct vmxnet3_adapter *adapter) adapter->shared_pa)); VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_DSAH, VMXNET3_GET_ADDR_HI( adapter->shared_pa)); + spin_lock_irqsave(&adapter->cmd_lock, flags); VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, VMXNET3_CMD_ACTIVATE_DEV); ret = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_CMD); + spin_unlock_irqrestore(&adapter->cmd_lock, flags); if (ret != 0) { printk(KERN_ERR "Failed to activate dev %s: error %u\n", @@ -2255,7 +2266,10 @@ rq_err: void vmxnet3_reset_dev(struct vmxnet3_adapter *adapter) { + unsigned long flags; + spin_lock_irqsave(&adapter->cmd_lock, flags); VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, VMXNET3_CMD_RESET_DEV); + spin_unlock_irqrestore(&adapter->cmd_lock, flags); } @@ -2263,12 +2277,15 @@ int vmxnet3_quiesce_dev(struct vmxnet3_adapter *adapter) { int i; + unsigned long flags; if (test_and_set_bit(VMXNET3_STATE_BIT_QUIESCED, &adapter->state)) return 0; + spin_lock_irqsave(&adapter->cmd_lock, flags); VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, VMXNET3_CMD_QUIESCE_DEV); + spin_unlock_irqrestore(&adapter->cmd_lock, flags); vmxnet3_disable_all_intrs(adapter); for (i = 0; i < adapter->num_rx_queues; i++) @@ -2426,7 +2443,7 @@ vmxnet3_adjust_rx_ring_size(struct vmxnet3_adapter *adapter) sz = adapter->rx_buf_per_pkt * VMXNET3_RING_SIZE_ALIGN; ring0_size = adapter->rx_queue[0].rx_ring[0].size; ring0_size = (ring0_size + sz - 1) / sz * sz; - ring0_size = min_t(u32, rq->rx_ring[0].size, VMXNET3_RX_RING_MAX_SIZE / + ring0_size = min_t(u32, ring0_size, VMXNET3_RX_RING_MAX_SIZE / sz * sz); ring1_size = adapter->rx_queue[0].rx_ring[1].size; comp_size = ring0_size + ring1_size; @@ -2695,7 +2712,7 @@ vmxnet3_acquire_msix_vectors(struct vmxnet3_adapter *adapter, break; } else { /* If fails to enable required number of MSI-x vectors - * try enabling 3 of them. One each for rx, tx and event + * try enabling minimum number of vectors required. */ vectors = vector_threshold; printk(KERN_ERR "Failed to enable %d MSI-X for %s, try" @@ -2718,9 +2735,11 @@ vmxnet3_alloc_intr_resources(struct vmxnet3_adapter *adapter) u32 cfg; /* intr settings */ + spin_lock(&adapter->cmd_lock); VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, VMXNET3_CMD_GET_CONF_INTR); cfg = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_CMD); + spin_unlock(&adapter->cmd_lock); adapter->intr.type = cfg & 0x3; adapter->intr.mask_mode = (cfg >> 2) & 0x3; @@ -2755,7 +2774,7 @@ vmxnet3_alloc_intr_resources(struct vmxnet3_adapter *adapter) */ if (err == VMXNET3_LINUX_MIN_MSIX_VECT) { if (adapter->share_intr != VMXNET3_INTR_BUDDYSHARE - || adapter->num_rx_queues != 2) { + || adapter->num_rx_queues != 1) { adapter->share_intr = VMXNET3_INTR_TXSHARE; printk(KERN_ERR "Number of rx queues : 1\n"); adapter->num_rx_queues = 1; @@ -2905,6 +2924,7 @@ vmxnet3_probe_device(struct pci_dev *pdev, adapter->netdev = netdev; adapter->pdev = pdev; + spin_lock_init(&adapter->cmd_lock); adapter->shared = pci_alloc_consistent(adapter->pdev, sizeof(struct Vmxnet3_DriverShared), &adapter->shared_pa); @@ -3108,11 +3128,15 @@ vmxnet3_suspend(struct device *device) u8 *arpreq; struct in_device *in_dev; struct in_ifaddr *ifa; + unsigned long flags; int i = 0; if (!netif_running(netdev)) return 0; + for (i = 0; i < adapter->num_rx_queues; i++) + napi_disable(&adapter->rx_queue[i].napi); + vmxnet3_disable_all_intrs(adapter); vmxnet3_free_irqs(adapter); vmxnet3_free_intr_resources(adapter); @@ -3188,8 +3212,10 @@ skip_arp: adapter->shared->devRead.pmConfDesc.confPA = cpu_to_le64(virt_to_phys( pmConf)); + spin_lock_irqsave(&adapter->cmd_lock, flags); VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, VMXNET3_CMD_UPDATE_PMCFG); + spin_unlock_irqrestore(&adapter->cmd_lock, flags); pci_save_state(pdev); pci_enable_wake(pdev, pci_choose_state(pdev, PMSG_SUSPEND), @@ -3204,7 +3230,8 @@ skip_arp: static int vmxnet3_resume(struct device *device) { - int err; + int err, i = 0; + unsigned long flags; struct pci_dev *pdev = to_pci_dev(device); struct net_device *netdev = pci_get_drvdata(pdev); struct vmxnet3_adapter *adapter = netdev_priv(netdev); @@ -3232,10 +3259,14 @@ vmxnet3_resume(struct device *device) pci_enable_wake(pdev, PCI_D0, 0); + spin_lock_irqsave(&adapter->cmd_lock, flags); VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, VMXNET3_CMD_UPDATE_PMCFG); + spin_unlock_irqrestore(&adapter->cmd_lock, flags); vmxnet3_alloc_intr_resources(adapter); vmxnet3_request_irqs(adapter); + for (i = 0; i < adapter->num_rx_queues; i++) + napi_enable(&adapter->rx_queue[i].napi); vmxnet3_enable_all_intrs(adapter); return 0; diff --git a/drivers/net/vmxnet3/vmxnet3_ethtool.c b/drivers/net/vmxnet3/vmxnet3_ethtool.c index 8e17fc8a7fe7..81254be85b92 100644 --- a/drivers/net/vmxnet3/vmxnet3_ethtool.c +++ b/drivers/net/vmxnet3/vmxnet3_ethtool.c @@ -45,6 +45,7 @@ static int vmxnet3_set_rx_csum(struct net_device *netdev, u32 val) { struct vmxnet3_adapter *adapter = netdev_priv(netdev); + unsigned long flags; if (adapter->rxcsum != val) { adapter->rxcsum = val; @@ -56,8 +57,10 @@ vmxnet3_set_rx_csum(struct net_device *netdev, u32 val) adapter->shared->devRead.misc.uptFeatures &= ~UPT1_F_RXCSUM; + spin_lock_irqsave(&adapter->cmd_lock, flags); VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, VMXNET3_CMD_UPDATE_FEATURE); + spin_unlock_irqrestore(&adapter->cmd_lock, flags); } } return 0; @@ -68,76 +71,78 @@ vmxnet3_set_rx_csum(struct net_device *netdev, u32 val) static const struct vmxnet3_stat_desc vmxnet3_tq_dev_stats[] = { /* description, offset */ - { "TSO pkts tx", offsetof(struct UPT1_TxStats, TSOPktsTxOK) }, - { "TSO bytes tx", offsetof(struct UPT1_TxStats, TSOBytesTxOK) }, - { "ucast pkts tx", offsetof(struct UPT1_TxStats, ucastPktsTxOK) }, - { "ucast bytes tx", offsetof(struct UPT1_TxStats, ucastBytesTxOK) }, - { "mcast pkts tx", offsetof(struct UPT1_TxStats, mcastPktsTxOK) }, - { "mcast bytes tx", offsetof(struct UPT1_TxStats, mcastBytesTxOK) }, - { "bcast pkts tx", offsetof(struct UPT1_TxStats, bcastPktsTxOK) }, - { "bcast bytes tx", offsetof(struct UPT1_TxStats, bcastBytesTxOK) }, - { "pkts tx err", offsetof(struct UPT1_TxStats, pktsTxError) }, - { "pkts tx discard", offsetof(struct UPT1_TxStats, pktsTxDiscard) }, + { "Tx Queue#", 0 }, + { " TSO pkts tx", offsetof(struct UPT1_TxStats, TSOPktsTxOK) }, + { " TSO bytes tx", offsetof(struct UPT1_TxStats, TSOBytesTxOK) }, + { " ucast pkts tx", offsetof(struct UPT1_TxStats, ucastPktsTxOK) }, + { " ucast bytes tx", offsetof(struct UPT1_TxStats, ucastBytesTxOK) }, + { " mcast pkts tx", offsetof(struct UPT1_TxStats, mcastPktsTxOK) }, + { " mcast bytes tx", offsetof(struct UPT1_TxStats, mcastBytesTxOK) }, + { " bcast pkts tx", offsetof(struct UPT1_TxStats, bcastPktsTxOK) }, + { " bcast bytes tx", offsetof(struct UPT1_TxStats, bcastBytesTxOK) }, + { " pkts tx err", offsetof(struct UPT1_TxStats, pktsTxError) }, + { " pkts tx discard", offsetof(struct UPT1_TxStats, pktsTxDiscard) }, }; /* per tq stats maintained by the driver */ static const struct vmxnet3_stat_desc vmxnet3_tq_driver_stats[] = { /* description, offset */ - {"drv dropped tx total", offsetof(struct vmxnet3_tq_driver_stats, - drop_total) }, - { " too many frags", offsetof(struct vmxnet3_tq_driver_stats, - drop_too_many_frags) }, - { " giant hdr", offsetof(struct vmxnet3_tq_driver_stats, - drop_oversized_hdr) }, - { " hdr err", offsetof(struct vmxnet3_tq_driver_stats, - drop_hdr_inspect_err) }, - { " tso", offsetof(struct vmxnet3_tq_driver_stats, - drop_tso) }, - { "ring full", offsetof(struct vmxnet3_tq_driver_stats, - tx_ring_full) }, - { "pkts linearized", offsetof(struct vmxnet3_tq_driver_stats, - linearized) }, - { "hdr cloned", offsetof(struct vmxnet3_tq_driver_stats, - copy_skb_header) }, - { "giant hdr", offsetof(struct vmxnet3_tq_driver_stats, - oversized_hdr) }, + {" drv dropped tx total", offsetof(struct vmxnet3_tq_driver_stats, + drop_total) }, + { " too many frags", offsetof(struct vmxnet3_tq_driver_stats, + drop_too_many_frags) }, + { " giant hdr", offsetof(struct vmxnet3_tq_driver_stats, + drop_oversized_hdr) }, + { " hdr err", offsetof(struct vmxnet3_tq_driver_stats, + drop_hdr_inspect_err) }, + { " tso", offsetof(struct vmxnet3_tq_driver_stats, + drop_tso) }, + { " ring full", offsetof(struct vmxnet3_tq_driver_stats, + tx_ring_full) }, + { " pkts linearized", offsetof(struct vmxnet3_tq_driver_stats, + linearized) }, + { " hdr cloned", offsetof(struct vmxnet3_tq_driver_stats, + copy_skb_header) }, + { " giant hdr", offsetof(struct vmxnet3_tq_driver_stats, + oversized_hdr) }, }; /* per rq stats maintained by the device */ static const struct vmxnet3_stat_desc vmxnet3_rq_dev_stats[] = { - { "LRO pkts rx", offsetof(struct UPT1_RxStats, LROPktsRxOK) }, - { "LRO byte rx", offsetof(struct UPT1_RxStats, LROBytesRxOK) }, - { "ucast pkts rx", offsetof(struct UPT1_RxStats, ucastPktsRxOK) }, - { "ucast bytes rx", offsetof(struct UPT1_RxStats, ucastBytesRxOK) }, - { "mcast pkts rx", offsetof(struct UPT1_RxStats, mcastPktsRxOK) }, - { "mcast bytes rx", offsetof(struct UPT1_RxStats, mcastBytesRxOK) }, - { "bcast pkts rx", offsetof(struct UPT1_RxStats, bcastPktsRxOK) }, - { "bcast bytes rx", offsetof(struct UPT1_RxStats, bcastBytesRxOK) }, - { "pkts rx out of buf", offsetof(struct UPT1_RxStats, pktsRxOutOfBuf) }, - { "pkts rx err", offsetof(struct UPT1_RxStats, pktsRxError) }, + { "Rx Queue#", 0 }, + { " LRO pkts rx", offsetof(struct UPT1_RxStats, LROPktsRxOK) }, + { " LRO byte rx", offsetof(struct UPT1_RxStats, LROBytesRxOK) }, + { " ucast pkts rx", offsetof(struct UPT1_RxStats, ucastPktsRxOK) }, + { " ucast bytes rx", offsetof(struct UPT1_RxStats, ucastBytesRxOK) }, + { " mcast pkts rx", offsetof(struct UPT1_RxStats, mcastPktsRxOK) }, + { " mcast bytes rx", offsetof(struct UPT1_RxStats, mcastBytesRxOK) }, + { " bcast pkts rx", offsetof(struct UPT1_RxStats, bcastPktsRxOK) }, + { " bcast bytes rx", offsetof(struct UPT1_RxStats, bcastBytesRxOK) }, + { " pkts rx OOB", offsetof(struct UPT1_RxStats, pktsRxOutOfBuf) }, + { " pkts rx err", offsetof(struct UPT1_RxStats, pktsRxError) }, }; /* per rq stats maintained by the driver */ static const struct vmxnet3_stat_desc vmxnet3_rq_driver_stats[] = { /* description, offset */ - { "drv dropped rx total", offsetof(struct vmxnet3_rq_driver_stats, - drop_total) }, - { " err", offsetof(struct vmxnet3_rq_driver_stats, - drop_err) }, - { " fcs", offsetof(struct vmxnet3_rq_driver_stats, - drop_fcs) }, - { "rx buf alloc fail", offsetof(struct vmxnet3_rq_driver_stats, - rx_buf_alloc_failure) }, + { " drv dropped rx total", offsetof(struct vmxnet3_rq_driver_stats, + drop_total) }, + { " err", offsetof(struct vmxnet3_rq_driver_stats, + drop_err) }, + { " fcs", offsetof(struct vmxnet3_rq_driver_stats, + drop_fcs) }, + { " rx buf alloc fail", offsetof(struct vmxnet3_rq_driver_stats, + rx_buf_alloc_failure) }, }; /* gloabl stats maintained by the driver */ static const struct vmxnet3_stat_desc vmxnet3_global_stats[] = { /* description, offset */ - { "tx timeout count", offsetof(struct vmxnet3_adapter, + { "tx timeout count", offsetof(struct vmxnet3_adapter, tx_timeout_count) } }; @@ -151,12 +156,15 @@ vmxnet3_get_stats(struct net_device *netdev) struct UPT1_TxStats *devTxStats; struct UPT1_RxStats *devRxStats; struct net_device_stats *net_stats = &netdev->stats; + unsigned long flags; int i; adapter = netdev_priv(netdev); /* Collect the dev stats into the shared area */ + spin_lock_irqsave(&adapter->cmd_lock, flags); VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, VMXNET3_CMD_GET_STATS); + spin_unlock_irqrestore(&adapter->cmd_lock, flags); memset(net_stats, 0, sizeof(*net_stats)); for (i = 0; i < adapter->num_tx_queues; i++) { @@ -193,12 +201,15 @@ vmxnet3_get_stats(struct net_device *netdev) static int vmxnet3_get_sset_count(struct net_device *netdev, int sset) { + struct vmxnet3_adapter *adapter = netdev_priv(netdev); switch (sset) { case ETH_SS_STATS: - return ARRAY_SIZE(vmxnet3_tq_dev_stats) + - ARRAY_SIZE(vmxnet3_tq_driver_stats) + - ARRAY_SIZE(vmxnet3_rq_dev_stats) + - ARRAY_SIZE(vmxnet3_rq_driver_stats) + + return (ARRAY_SIZE(vmxnet3_tq_dev_stats) + + ARRAY_SIZE(vmxnet3_tq_driver_stats)) * + adapter->num_tx_queues + + (ARRAY_SIZE(vmxnet3_rq_dev_stats) + + ARRAY_SIZE(vmxnet3_rq_driver_stats)) * + adapter->num_rx_queues + ARRAY_SIZE(vmxnet3_global_stats); default: return -EOPNOTSUPP; @@ -206,10 +217,16 @@ vmxnet3_get_sset_count(struct net_device *netdev, int sset) } +/* Should be multiple of 4 */ +#define NUM_TX_REGS 8 +#define NUM_RX_REGS 12 + static int vmxnet3_get_regs_len(struct net_device *netdev) { - return 20 * sizeof(u32); + struct vmxnet3_adapter *adapter = netdev_priv(netdev); + return (adapter->num_tx_queues * NUM_TX_REGS * sizeof(u32) + + adapter->num_rx_queues * NUM_RX_REGS * sizeof(u32)); } @@ -240,29 +257,37 @@ vmxnet3_get_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *drvinfo) static void vmxnet3_get_strings(struct net_device *netdev, u32 stringset, u8 *buf) { + struct vmxnet3_adapter *adapter = netdev_priv(netdev); if (stringset == ETH_SS_STATS) { - int i; - - for (i = 0; i < ARRAY_SIZE(vmxnet3_tq_dev_stats); i++) { - memcpy(buf, vmxnet3_tq_dev_stats[i].desc, - ETH_GSTRING_LEN); - buf += ETH_GSTRING_LEN; - } - for (i = 0; i < ARRAY_SIZE(vmxnet3_tq_driver_stats); i++) { - memcpy(buf, vmxnet3_tq_driver_stats[i].desc, - ETH_GSTRING_LEN); - buf += ETH_GSTRING_LEN; - } - for (i = 0; i < ARRAY_SIZE(vmxnet3_rq_dev_stats); i++) { - memcpy(buf, vmxnet3_rq_dev_stats[i].desc, - ETH_GSTRING_LEN); - buf += ETH_GSTRING_LEN; + int i, j; + for (j = 0; j < adapter->num_tx_queues; j++) { + for (i = 0; i < ARRAY_SIZE(vmxnet3_tq_dev_stats); i++) { + memcpy(buf, vmxnet3_tq_dev_stats[i].desc, + ETH_GSTRING_LEN); + buf += ETH_GSTRING_LEN; + } + for (i = 0; i < ARRAY_SIZE(vmxnet3_tq_driver_stats); + i++) { + memcpy(buf, vmxnet3_tq_driver_stats[i].desc, + ETH_GSTRING_LEN); + buf += ETH_GSTRING_LEN; + } } - for (i = 0; i < ARRAY_SIZE(vmxnet3_rq_driver_stats); i++) { - memcpy(buf, vmxnet3_rq_driver_stats[i].desc, - ETH_GSTRING_LEN); - buf += ETH_GSTRING_LEN; + + for (j = 0; j < adapter->num_rx_queues; j++) { + for (i = 0; i < ARRAY_SIZE(vmxnet3_rq_dev_stats); i++) { + memcpy(buf, vmxnet3_rq_dev_stats[i].desc, + ETH_GSTRING_LEN); + buf += ETH_GSTRING_LEN; + } + for (i = 0; i < ARRAY_SIZE(vmxnet3_rq_driver_stats); + i++) { + memcpy(buf, vmxnet3_rq_driver_stats[i].desc, + ETH_GSTRING_LEN); + buf += ETH_GSTRING_LEN; + } } + for (i = 0; i < ARRAY_SIZE(vmxnet3_global_stats); i++) { memcpy(buf, vmxnet3_global_stats[i].desc, ETH_GSTRING_LEN); @@ -277,6 +302,7 @@ vmxnet3_set_flags(struct net_device *netdev, u32 data) struct vmxnet3_adapter *adapter = netdev_priv(netdev); u8 lro_requested = (data & ETH_FLAG_LRO) == 0 ? 0 : 1; u8 lro_present = (netdev->features & NETIF_F_LRO) == 0 ? 0 : 1; + unsigned long flags; if (data & ~ETH_FLAG_LRO) return -EOPNOTSUPP; @@ -292,8 +318,10 @@ vmxnet3_set_flags(struct net_device *netdev, u32 data) else adapter->shared->devRead.misc.uptFeatures &= ~UPT1_F_LRO; + spin_lock_irqsave(&adapter->cmd_lock, flags); VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, VMXNET3_CMD_UPDATE_FEATURE); + spin_unlock_irqrestore(&adapter->cmd_lock, flags); } return 0; } @@ -303,30 +331,41 @@ vmxnet3_get_ethtool_stats(struct net_device *netdev, struct ethtool_stats *stats, u64 *buf) { struct vmxnet3_adapter *adapter = netdev_priv(netdev); + unsigned long flags; u8 *base; int i; int j = 0; + spin_lock_irqsave(&adapter->cmd_lock, flags); VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, VMXNET3_CMD_GET_STATS); + spin_unlock_irqrestore(&adapter->cmd_lock, flags); /* this does assume each counter is 64-bit wide */ -/* TODO change this for multiple queues */ - - base = (u8 *)&adapter->tqd_start[j].stats; - for (i = 0; i < ARRAY_SIZE(vmxnet3_tq_dev_stats); i++) - *buf++ = *(u64 *)(base + vmxnet3_tq_dev_stats[i].offset); - - base = (u8 *)&adapter->tx_queue[j].stats; - for (i = 0; i < ARRAY_SIZE(vmxnet3_tq_driver_stats); i++) - *buf++ = *(u64 *)(base + vmxnet3_tq_driver_stats[i].offset); - - base = (u8 *)&adapter->rqd_start[j].stats; - for (i = 0; i < ARRAY_SIZE(vmxnet3_rq_dev_stats); i++) - *buf++ = *(u64 *)(base + vmxnet3_rq_dev_stats[i].offset); + for (j = 0; j < adapter->num_tx_queues; j++) { + base = (u8 *)&adapter->tqd_start[j].stats; + *buf++ = (u64)j; + for (i = 1; i < ARRAY_SIZE(vmxnet3_tq_dev_stats); i++) + *buf++ = *(u64 *)(base + + vmxnet3_tq_dev_stats[i].offset); + + base = (u8 *)&adapter->tx_queue[j].stats; + for (i = 0; i < ARRAY_SIZE(vmxnet3_tq_driver_stats); i++) + *buf++ = *(u64 *)(base + + vmxnet3_tq_driver_stats[i].offset); + } - base = (u8 *)&adapter->rx_queue[j].stats; - for (i = 0; i < ARRAY_SIZE(vmxnet3_rq_driver_stats); i++) - *buf++ = *(u64 *)(base + vmxnet3_rq_driver_stats[i].offset); + for (j = 0; j < adapter->num_tx_queues; j++) { + base = (u8 *)&adapter->rqd_start[j].stats; + *buf++ = (u64) j; + for (i = 1; i < ARRAY_SIZE(vmxnet3_rq_dev_stats); i++) + *buf++ = *(u64 *)(base + + vmxnet3_rq_dev_stats[i].offset); + + base = (u8 *)&adapter->rx_queue[j].stats; + for (i = 0; i < ARRAY_SIZE(vmxnet3_rq_driver_stats); i++) + *buf++ = *(u64 *)(base + + vmxnet3_rq_driver_stats[i].offset); + } base = (u8 *)adapter; for (i = 0; i < ARRAY_SIZE(vmxnet3_global_stats); i++) @@ -339,7 +378,7 @@ vmxnet3_get_regs(struct net_device *netdev, struct ethtool_regs *regs, void *p) { struct vmxnet3_adapter *adapter = netdev_priv(netdev); u32 *buf = p; - int i = 0; + int i = 0, j = 0; memset(p, 0, vmxnet3_get_regs_len(netdev)); @@ -348,31 +387,35 @@ vmxnet3_get_regs(struct net_device *netdev, struct ethtool_regs *regs, void *p) /* Update vmxnet3_get_regs_len if we want to dump more registers */ /* make each ring use multiple of 16 bytes */ -/* TODO change this for multiple queues */ - buf[0] = adapter->tx_queue[i].tx_ring.next2fill; - buf[1] = adapter->tx_queue[i].tx_ring.next2comp; - buf[2] = adapter->tx_queue[i].tx_ring.gen; - buf[3] = 0; - - buf[4] = adapter->tx_queue[i].comp_ring.next2proc; - buf[5] = adapter->tx_queue[i].comp_ring.gen; - buf[6] = adapter->tx_queue[i].stopped; - buf[7] = 0; - - buf[8] = adapter->rx_queue[i].rx_ring[0].next2fill; - buf[9] = adapter->rx_queue[i].rx_ring[0].next2comp; - buf[10] = adapter->rx_queue[i].rx_ring[0].gen; - buf[11] = 0; - - buf[12] = adapter->rx_queue[i].rx_ring[1].next2fill; - buf[13] = adapter->rx_queue[i].rx_ring[1].next2comp; - buf[14] = adapter->rx_queue[i].rx_ring[1].gen; - buf[15] = 0; - - buf[16] = adapter->rx_queue[i].comp_ring.next2proc; - buf[17] = adapter->rx_queue[i].comp_ring.gen; - buf[18] = 0; - buf[19] = 0; + for (i = 0; i < adapter->num_tx_queues; i++) { + buf[j++] = adapter->tx_queue[i].tx_ring.next2fill; + buf[j++] = adapter->tx_queue[i].tx_ring.next2comp; + buf[j++] = adapter->tx_queue[i].tx_ring.gen; + buf[j++] = 0; + + buf[j++] = adapter->tx_queue[i].comp_ring.next2proc; + buf[j++] = adapter->tx_queue[i].comp_ring.gen; + buf[j++] = adapter->tx_queue[i].stopped; + buf[j++] = 0; + } + + for (i = 0; i < adapter->num_rx_queues; i++) { + buf[j++] = adapter->rx_queue[i].rx_ring[0].next2fill; + buf[j++] = adapter->rx_queue[i].rx_ring[0].next2comp; + buf[j++] = adapter->rx_queue[i].rx_ring[0].gen; + buf[j++] = 0; + + buf[j++] = adapter->rx_queue[i].rx_ring[1].next2fill; + buf[j++] = adapter->rx_queue[i].rx_ring[1].next2comp; + buf[j++] = adapter->rx_queue[i].rx_ring[1].gen; + buf[j++] = 0; + + buf[j++] = adapter->rx_queue[i].comp_ring.next2proc; + buf[j++] = adapter->rx_queue[i].comp_ring.gen; + buf[j++] = 0; + buf[j++] = 0; + } + } @@ -574,6 +617,7 @@ vmxnet3_set_rss_indir(struct net_device *netdev, const struct ethtool_rxfh_indir *p) { unsigned int i; + unsigned long flags; struct vmxnet3_adapter *adapter = netdev_priv(netdev); struct UPT1_RSSConf *rssConf = adapter->rss_conf; @@ -592,8 +636,10 @@ vmxnet3_set_rss_indir(struct net_device *netdev, for (i = 0; i < rssConf->indTableSize; i++) rssConf->indTable[i] = p->ring_index[i]; + spin_lock_irqsave(&adapter->cmd_lock, flags); VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, VMXNET3_CMD_UPDATE_RSSIDT); + spin_unlock_irqrestore(&adapter->cmd_lock, flags); return 0; diff --git a/drivers/net/vmxnet3/vmxnet3_int.h b/drivers/net/vmxnet3/vmxnet3_int.h index 7fadeed37f03..fb5d245ac878 100644 --- a/drivers/net/vmxnet3/vmxnet3_int.h +++ b/drivers/net/vmxnet3/vmxnet3_int.h @@ -68,10 +68,10 @@ /* * Version numbers */ -#define VMXNET3_DRIVER_VERSION_STRING "1.0.16.0-k" +#define VMXNET3_DRIVER_VERSION_STRING "1.0.25.0-k" /* a 32-bit int, each byte encode a verion number in VMXNET3_DRIVER_VERSION */ -#define VMXNET3_DRIVER_VERSION_NUM 0x01001000 +#define VMXNET3_DRIVER_VERSION_NUM 0x01001900 #if defined(CONFIG_PCI_MSI) /* RSS only makes sense if MSI-X is supported. */ @@ -289,7 +289,7 @@ struct vmxnet3_rx_queue { #define VMXNET3_LINUX_MAX_MSIX_VECT (VMXNET3_DEVICE_MAX_TX_QUEUES + \ VMXNET3_DEVICE_MAX_RX_QUEUES + 1) -#define VMXNET3_LINUX_MIN_MSIX_VECT 3 /* 1 for each : tx, rx and event */ +#define VMXNET3_LINUX_MIN_MSIX_VECT 2 /* 1 for tx-rx pair and 1 for event */ struct vmxnet3_intr { @@ -317,6 +317,7 @@ struct vmxnet3_adapter { struct vmxnet3_rx_queue rx_queue[VMXNET3_DEVICE_MAX_RX_QUEUES]; struct vlan_group *vlan_grp; struct vmxnet3_intr intr; + spinlock_t cmd_lock; struct Vmxnet3_DriverShared *shared; struct Vmxnet3_PMConf *pm_conf; struct Vmxnet3_TxQueueDesc *tqd_start; /* all tx queue desc */ diff --git a/drivers/net/vxge/vxge-main.c b/drivers/net/vxge/vxge-main.c index 1ac9b568f1b0..c81a6512c683 100644 --- a/drivers/net/vxge/vxge-main.c +++ b/drivers/net/vxge/vxge-main.c @@ -4120,6 +4120,7 @@ int vxge_fw_upgrade(struct vxgedev *vdev, char *fw_name, int override) "hotplug event.\n"); out: + release_firmware(fw); return ret; } diff --git a/drivers/s390/net/qeth_l2_main.c b/drivers/s390/net/qeth_l2_main.c index 7a7a1b664781..2ac8f6aff5a4 100644 --- a/drivers/s390/net/qeth_l2_main.c +++ b/drivers/s390/net/qeth_l2_main.c @@ -831,12 +831,14 @@ tx_drop: return NETDEV_TX_OK; } -static int qeth_l2_open(struct net_device *dev) +static int __qeth_l2_open(struct net_device *dev) { struct qeth_card *card = dev->ml_priv; int rc = 0; QETH_CARD_TEXT(card, 4, "qethopen"); + if (card->state == CARD_STATE_UP) + return rc; if (card->state != CARD_STATE_SOFTSETUP) return -ENODEV; @@ -857,6 +859,18 @@ static int qeth_l2_open(struct net_device *dev) return rc; } +static int qeth_l2_open(struct net_device *dev) +{ + struct qeth_card *card = dev->ml_priv; + + QETH_CARD_TEXT(card, 5, "qethope_"); + if (qeth_wait_for_threads(card, QETH_RECOVER_THREAD)) { + QETH_CARD_TEXT(card, 3, "openREC"); + return -ERESTARTSYS; + } + return __qeth_l2_open(dev); +} + static int qeth_l2_stop(struct net_device *dev) { struct qeth_card *card = dev->ml_priv; @@ -1046,7 +1060,7 @@ contin: if (recover_flag == CARD_STATE_RECOVER) { if (recovery_mode && card->info.type != QETH_CARD_TYPE_OSN) { - qeth_l2_open(card->dev); + __qeth_l2_open(card->dev); } else { rtnl_lock(); dev_open(card->dev); diff --git a/drivers/s390/net/qeth_l3_main.c b/drivers/s390/net/qeth_l3_main.c index e227e465bfc4..d09b0c44fc3d 100644 --- a/drivers/s390/net/qeth_l3_main.c +++ b/drivers/s390/net/qeth_l3_main.c @@ -2998,7 +2998,9 @@ static inline void qeth_l3_hdr_csum(struct qeth_card *card, */ if (iph->protocol == IPPROTO_UDP) hdr->hdr.l3.ext_flags |= QETH_HDR_EXT_UDP; - hdr->hdr.l3.ext_flags |= QETH_HDR_EXT_CSUM_TRANSP_REQ; + hdr->hdr.l3.ext_flags |= QETH_HDR_EXT_CSUM_TRANSP_REQ | + QETH_HDR_EXT_CSUM_HDR_REQ; + iph->check = 0; if (card->options.performance_stats) card->perf_stats.tx_csum++; } @@ -3240,12 +3242,14 @@ tx_drop: return NETDEV_TX_OK; } -static int qeth_l3_open(struct net_device *dev) +static int __qeth_l3_open(struct net_device *dev) { struct qeth_card *card = dev->ml_priv; int rc = 0; QETH_CARD_TEXT(card, 4, "qethopen"); + if (card->state == CARD_STATE_UP) + return rc; if (card->state != CARD_STATE_SOFTSETUP) return -ENODEV; card->data.state = CH_STATE_UP; @@ -3260,6 +3264,18 @@ static int qeth_l3_open(struct net_device *dev) return rc; } +static int qeth_l3_open(struct net_device *dev) +{ + struct qeth_card *card = dev->ml_priv; + + QETH_CARD_TEXT(card, 5, "qethope_"); + if (qeth_wait_for_threads(card, QETH_RECOVER_THREAD)) { + QETH_CARD_TEXT(card, 3, "openREC"); + return -ERESTARTSYS; + } + return __qeth_l3_open(dev); +} + static int qeth_l3_stop(struct net_device *dev) { struct qeth_card *card = dev->ml_priv; @@ -3564,7 +3580,7 @@ contin: netif_carrier_off(card->dev); if (recover_flag == CARD_STATE_RECOVER) { if (recovery_mode) - qeth_l3_open(card->dev); + __qeth_l3_open(card->dev); else { rtnl_lock(); dev_open(card->dev); diff --git a/drivers/vhost/vhost.c b/drivers/vhost/vhost.c index 38244f59cdd9..ade0568c07a4 100644 --- a/drivers/vhost/vhost.c +++ b/drivers/vhost/vhost.c @@ -97,22 +97,26 @@ void vhost_poll_stop(struct vhost_poll *poll) remove_wait_queue(poll->wqh, &poll->wait); } +static bool vhost_work_seq_done(struct vhost_dev *dev, struct vhost_work *work, + unsigned seq) +{ + int left; + spin_lock_irq(&dev->work_lock); + left = seq - work->done_seq; + spin_unlock_irq(&dev->work_lock); + return left <= 0; +} + static void vhost_work_flush(struct vhost_dev *dev, struct vhost_work *work) { unsigned seq; - int left; int flushing; spin_lock_irq(&dev->work_lock); seq = work->queue_seq; work->flushing++; spin_unlock_irq(&dev->work_lock); - wait_event(work->done, ({ - spin_lock_irq(&dev->work_lock); - left = seq - work->done_seq <= 0; - spin_unlock_irq(&dev->work_lock); - left; - })); + wait_event(work->done, vhost_work_seq_done(dev, work, seq)); spin_lock_irq(&dev->work_lock); flushing = --work->flushing; spin_unlock_irq(&dev->work_lock); |