diff options
Diffstat (limited to 'drivers/net')
100 files changed, 1369 insertions, 882 deletions
diff --git a/drivers/net/ariadne.c b/drivers/net/ariadne.c index 39214e512452..7ca0eded2561 100644 --- a/drivers/net/ariadne.c +++ b/drivers/net/ariadne.c @@ -425,11 +425,6 @@ static irqreturn_t ariadne_interrupt(int irq, void *data) int csr0, boguscnt; int handled = 0; - if (dev == NULL) { - printk(KERN_WARNING "ariadne_interrupt(): irq for unknown device.\n"); - return IRQ_NONE; - } - lance->RAP = CSR0; /* PCnet-ISA Controller Status */ if (!(lance->RDP & INTR)) /* Check if any interrupt has been */ diff --git a/drivers/net/atl1c/atl1c_main.c b/drivers/net/atl1c/atl1c_main.c index a699bbf20eb5..3824382faecc 100644 --- a/drivers/net/atl1c/atl1c_main.c +++ b/drivers/net/atl1c/atl1c_main.c @@ -48,6 +48,7 @@ static DEFINE_PCI_DEVICE_TABLE(atl1c_pci_tbl) = { {PCI_DEVICE(PCI_VENDOR_ID_ATTANSIC, PCI_DEVICE_ID_ATHEROS_L2C_B)}, {PCI_DEVICE(PCI_VENDOR_ID_ATTANSIC, PCI_DEVICE_ID_ATHEROS_L2C_B2)}, {PCI_DEVICE(PCI_VENDOR_ID_ATTANSIC, PCI_DEVICE_ID_ATHEROS_L1D)}, + {PCI_DEVICE(PCI_VENDOR_ID_ATTANSIC, PCI_DEVICE_ID_ATHEROS_L1D_2_0)}, /* required last entry */ { 0 } }; diff --git a/drivers/net/benet/be_cmds.c b/drivers/net/benet/be_cmds.c index 0c7811faf72c..a179cc6d79f2 100644 --- a/drivers/net/benet/be_cmds.c +++ b/drivers/net/benet/be_cmds.c @@ -1786,6 +1786,10 @@ int be_cmd_get_seeprom_data(struct be_adapter *adapter, spin_lock_bh(&adapter->mcc_lock); wrb = wrb_from_mccq(adapter); + if (!wrb) { + status = -EBUSY; + goto err; + } req = nonemb_cmd->va; sge = nonembedded_sgl(wrb); @@ -1801,6 +1805,7 @@ int be_cmd_get_seeprom_data(struct be_adapter *adapter, status = be_mcc_notify_wait(adapter); +err: spin_unlock_bh(&adapter->mcc_lock); return status; } diff --git a/drivers/net/benet/be_main.c b/drivers/net/benet/be_main.c index de40d3b7152f..28a32a6c8bf1 100644 --- a/drivers/net/benet/be_main.c +++ b/drivers/net/benet/be_main.c @@ -312,11 +312,9 @@ void be_link_status_update(struct be_adapter *adapter, bool link_up) if (adapter->link_up != link_up) { adapter->link_speed = -1; if (link_up) { - netif_start_queue(netdev); netif_carrier_on(netdev); printk(KERN_INFO "%s: Link up\n", netdev->name); } else { - netif_stop_queue(netdev); netif_carrier_off(netdev); printk(KERN_INFO "%s: Link down\n", netdev->name); } @@ -2628,8 +2626,6 @@ static void be_netdev_init(struct net_device *netdev) netif_napi_add(netdev, &adapter->tx_eq.napi, be_poll_tx_mcc, BE_NAPI_WEIGHT); - - netif_stop_queue(netdev); } static void be_unmap_pci_bars(struct be_adapter *adapter) diff --git a/drivers/net/bnx2x/bnx2x.h b/drivers/net/bnx2x/bnx2x.h index 8e4183717d91..8849699c66c4 100644 --- a/drivers/net/bnx2x/bnx2x.h +++ b/drivers/net/bnx2x/bnx2x.h @@ -22,8 +22,8 @@ * (you will need to reboot afterwards) */ /* #define BNX2X_STOP_ON_ERROR */ -#define DRV_MODULE_VERSION "1.62.00-4" -#define DRV_MODULE_RELDATE "2011/01/18" +#define DRV_MODULE_VERSION "1.62.00-6" +#define DRV_MODULE_RELDATE "2011/01/30" #define BNX2X_BC_VER 0x040200 #define BNX2X_MULTI_QUEUE @@ -1211,6 +1211,7 @@ struct bnx2x { /* DCBX Negotation results */ struct dcbx_features dcbx_local_feat; u32 dcbx_error; + u32 pending_max; }; /** @@ -1613,19 +1614,23 @@ static inline u32 reg_poll(struct bnx2x *bp, u32 reg, u32 expected, int ms, #define BNX2X_BTR 4 #define MAX_SPQ_PENDING 8 - -/* CMNG constants - derived from lab experiments, and not from system spec calculations !!! */ -#define DEF_MIN_RATE 100 -/* resolution of the rate shaping timer - 100 usec */ -#define RS_PERIODIC_TIMEOUT_USEC 100 -/* resolution of fairness algorithm in usecs - - coefficient for calculating the actual t fair */ -#define T_FAIR_COEF 10000000 +/* CMNG constants, as derived from system spec calculations */ +/* default MIN rate in case VNIC min rate is configured to zero - 100Mbps */ +#define DEF_MIN_RATE 100 +/* resolution of the rate shaping timer - 400 usec */ +#define RS_PERIODIC_TIMEOUT_USEC 400 /* number of bytes in single QM arbitration cycle - - coefficient for calculating the fairness timer */ -#define QM_ARB_BYTES 40000 -#define FAIR_MEM 2 + * coefficient for calculating the fairness timer */ +#define QM_ARB_BYTES 160000 +/* resolution of Min algorithm 1:100 */ +#define MIN_RES 100 +/* how many bytes above threshold for the minimal credit of Min algorithm*/ +#define MIN_ABOVE_THRESH 32768 +/* Fairness algorithm integration time coefficient - + * for calculating the actual Tfair */ +#define T_FAIR_COEF ((MIN_ABOVE_THRESH + QM_ARB_BYTES) * 8 * MIN_RES) +/* Memory of fairness algorithm . 2 cycles */ +#define FAIR_MEM 2 #define ATTN_NIG_FOR_FUNC (1L << 8) diff --git a/drivers/net/bnx2x/bnx2x_cmn.c b/drivers/net/bnx2x/bnx2x_cmn.c index 710ce5d04c53..a71b32940533 100644 --- a/drivers/net/bnx2x/bnx2x_cmn.c +++ b/drivers/net/bnx2x/bnx2x_cmn.c @@ -259,10 +259,44 @@ static void bnx2x_tpa_start(struct bnx2x_fastpath *fp, u16 queue, #endif } +/* Timestamp option length allowed for TPA aggregation: + * + * nop nop kind length echo val + */ +#define TPA_TSTAMP_OPT_LEN 12 +/** + * Calculate the approximate value of the MSS for this + * aggregation using the first packet of it. + * + * @param bp + * @param parsing_flags Parsing flags from the START CQE + * @param len_on_bd Total length of the first packet for the + * aggregation. + */ +static inline u16 bnx2x_set_lro_mss(struct bnx2x *bp, u16 parsing_flags, + u16 len_on_bd) +{ + /* TPA arrgregation won't have an IP options and TCP options + * other than timestamp. + */ + u16 hdrs_len = ETH_HLEN + sizeof(struct iphdr) + sizeof(struct tcphdr); + + + /* Check if there was a TCP timestamp, if there is it's will + * always be 12 bytes length: nop nop kind length echo val. + * + * Otherwise FW would close the aggregation. + */ + if (parsing_flags & PARSING_FLAGS_TIME_STAMP_EXIST_FLAG) + hdrs_len += TPA_TSTAMP_OPT_LEN; + + return len_on_bd - hdrs_len; +} + static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp, struct sk_buff *skb, struct eth_fast_path_rx_cqe *fp_cqe, - u16 cqe_idx) + u16 cqe_idx, u16 parsing_flags) { struct sw_rx_page *rx_pg, old_rx_pg; u16 len_on_bd = le16_to_cpu(fp_cqe->len_on_bd); @@ -275,8 +309,8 @@ static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp, /* This is needed in order to enable forwarding support */ if (frag_size) - skb_shinfo(skb)->gso_size = min((u32)SGE_PAGE_SIZE, - max(frag_size, (u32)len_on_bd)); + skb_shinfo(skb)->gso_size = bnx2x_set_lro_mss(bp, parsing_flags, + len_on_bd); #ifdef BNX2X_STOP_ON_ERROR if (pages > min_t(u32, 8, MAX_SKB_FRAGS)*SGE_PAGE_SIZE*PAGES_PER_SGE) { @@ -344,6 +378,8 @@ static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp, if (likely(new_skb)) { /* fix ip xsum and give it to the stack */ /* (no need to map the new skb) */ + u16 parsing_flags = + le16_to_cpu(cqe->fast_path_cqe.pars_flags.flags); prefetch(skb); prefetch(((char *)(skb)) + L1_CACHE_BYTES); @@ -373,9 +409,9 @@ static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp, } if (!bnx2x_fill_frag_skb(bp, fp, skb, - &cqe->fast_path_cqe, cqe_idx)) { - if ((le16_to_cpu(cqe->fast_path_cqe. - pars_flags.flags) & PARSING_FLAGS_VLAN)) + &cqe->fast_path_cqe, cqe_idx, + parsing_flags)) { + if (parsing_flags & PARSING_FLAGS_VLAN) __vlan_hwaccel_put_tag(skb, le16_to_cpu(cqe->fast_path_cqe. vlan_tag)); @@ -703,19 +739,20 @@ u16 bnx2x_get_mf_speed(struct bnx2x *bp) { u16 line_speed = bp->link_vars.line_speed; if (IS_MF(bp)) { - u16 maxCfg = (bp->mf_config[BP_VN(bp)] & - FUNC_MF_CFG_MAX_BW_MASK) >> - FUNC_MF_CFG_MAX_BW_SHIFT; - /* Calculate the current MAX line speed limit for the DCC - * capable devices + u16 maxCfg = bnx2x_extract_max_cfg(bp, + bp->mf_config[BP_VN(bp)]); + + /* Calculate the current MAX line speed limit for the MF + * devices */ - if (IS_MF_SD(bp)) { + if (IS_MF_SI(bp)) + line_speed = (line_speed * maxCfg) / 100; + else { /* SD mode */ u16 vn_max_rate = maxCfg * 100; if (vn_max_rate < line_speed) line_speed = vn_max_rate; - } else /* IS_MF_SI(bp)) */ - line_speed = (line_speed * maxCfg) / 100; + } } return line_speed; @@ -959,6 +996,23 @@ void bnx2x_free_skbs(struct bnx2x *bp) bnx2x_free_rx_skbs(bp); } +void bnx2x_update_max_mf_config(struct bnx2x *bp, u32 value) +{ + /* load old values */ + u32 mf_cfg = bp->mf_config[BP_VN(bp)]; + + if (value != bnx2x_extract_max_cfg(bp, mf_cfg)) { + /* leave all but MAX value */ + mf_cfg &= ~FUNC_MF_CFG_MAX_BW_MASK; + + /* set new MAX value */ + mf_cfg |= (value << FUNC_MF_CFG_MAX_BW_SHIFT) + & FUNC_MF_CFG_MAX_BW_MASK; + + bnx2x_fw_command(bp, DRV_MSG_CODE_SET_MF_BW, mf_cfg); + } +} + static void bnx2x_free_msix_irqs(struct bnx2x *bp) { int i, offset = 1; @@ -1427,6 +1481,11 @@ int bnx2x_nic_load(struct bnx2x *bp, int load_mode) bnx2x_set_eth_mac(bp, 1); + if (bp->pending_max) { + bnx2x_update_max_mf_config(bp, bp->pending_max); + bp->pending_max = 0; + } + if (bp->port.pmf) bnx2x_initial_phy_init(bp, load_mode); diff --git a/drivers/net/bnx2x/bnx2x_cmn.h b/drivers/net/bnx2x/bnx2x_cmn.h index 03eb4d68e6bb..85ea7f26b51f 100644 --- a/drivers/net/bnx2x/bnx2x_cmn.h +++ b/drivers/net/bnx2x/bnx2x_cmn.h @@ -341,6 +341,15 @@ void bnx2x_dcbx_init(struct bnx2x *bp); */ int bnx2x_set_power_state(struct bnx2x *bp, pci_power_t state); +/** + * Updates MAX part of MF configuration in HW + * (if required) + * + * @param bp + * @param value + */ +void bnx2x_update_max_mf_config(struct bnx2x *bp, u32 value); + /* dev_close main block */ int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode); @@ -1044,4 +1053,24 @@ static inline void storm_memset_cmng(struct bnx2x *bp, void bnx2x_acquire_phy_lock(struct bnx2x *bp); void bnx2x_release_phy_lock(struct bnx2x *bp); +/** + * Extracts MAX BW part from MF configuration. + * + * @param bp + * @param mf_cfg + * + * @return u16 + */ +static inline u16 bnx2x_extract_max_cfg(struct bnx2x *bp, u32 mf_cfg) +{ + u16 max_cfg = (mf_cfg & FUNC_MF_CFG_MAX_BW_MASK) >> + FUNC_MF_CFG_MAX_BW_SHIFT; + if (!max_cfg) { + BNX2X_ERR("Illegal configuration detected for Max BW - " + "using 100 instead\n"); + max_cfg = 100; + } + return max_cfg; +} + #endif /* BNX2X_CMN_H */ diff --git a/drivers/net/bnx2x/bnx2x_ethtool.c b/drivers/net/bnx2x/bnx2x_ethtool.c index 5b44a8b48509..7e92f9d0dcfd 100644 --- a/drivers/net/bnx2x/bnx2x_ethtool.c +++ b/drivers/net/bnx2x/bnx2x_ethtool.c @@ -238,7 +238,7 @@ static int bnx2x_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) speed |= (cmd->speed_hi << 16); if (IS_MF_SI(bp)) { - u32 param = 0; + u32 part; u32 line_speed = bp->link_vars.line_speed; /* use 10G if no link detected */ @@ -251,23 +251,22 @@ static int bnx2x_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) REQ_BC_VER_4_SET_MF_BW); return -EINVAL; } - if (line_speed < speed) { - BNX2X_DEV_INFO("New speed should be less or equal " - "to actual line speed\n"); + + part = (speed * 100) / line_speed; + + if (line_speed < speed || !part) { + BNX2X_DEV_INFO("Speed setting should be in a range " + "from 1%% to 100%% " + "of actual line speed\n"); return -EINVAL; } - /* load old values */ - param = bp->mf_config[BP_VN(bp)]; - - /* leave only MIN value */ - param &= FUNC_MF_CFG_MIN_BW_MASK; - /* set new MAX value */ - param |= (((speed * 100) / line_speed) - << FUNC_MF_CFG_MAX_BW_SHIFT) - & FUNC_MF_CFG_MAX_BW_MASK; + if (bp->state != BNX2X_STATE_OPEN) + /* store value for following "load" */ + bp->pending_max = part; + else + bnx2x_update_max_mf_config(bp, part); - bnx2x_fw_command(bp, DRV_MSG_CODE_SET_MF_BW, param); return 0; } @@ -1781,9 +1780,7 @@ static int bnx2x_test_nvram(struct bnx2x *bp) { 0x100, 0x350 }, /* manuf_info */ { 0x450, 0xf0 }, /* feature_info */ { 0x640, 0x64 }, /* upgrade_key_info */ - { 0x6a4, 0x64 }, { 0x708, 0x70 }, /* manuf_key_info */ - { 0x778, 0x70 }, { 0, 0 } }; __be32 buf[0x350 / 4]; @@ -1933,11 +1930,11 @@ static void bnx2x_self_test(struct net_device *dev, buf[4] = 1; etest->flags |= ETH_TEST_FL_FAILED; } - if (bp->port.pmf) - if (bnx2x_link_test(bp, is_serdes) != 0) { - buf[5] = 1; - etest->flags |= ETH_TEST_FL_FAILED; - } + + if (bnx2x_link_test(bp, is_serdes) != 0) { + buf[5] = 1; + etest->flags |= ETH_TEST_FL_FAILED; + } #ifdef BNX2X_EXTRA_DEBUG bnx2x_panic_dump(bp); diff --git a/drivers/net/bnx2x/bnx2x_init.h b/drivers/net/bnx2x/bnx2x_init.h index 5a268e9a0895..fa6dbe3f2058 100644 --- a/drivers/net/bnx2x/bnx2x_init.h +++ b/drivers/net/bnx2x/bnx2x_init.h @@ -241,7 +241,7 @@ static const struct { /* Block IGU, MISC, PXP and PXP2 parity errors as long as we don't * want to handle "system kill" flow at the moment. */ - BLOCK_PRTY_INFO(PXP, 0x3ffffff, 0x3ffffff, 0x3ffffff, 0x3ffffff), + BLOCK_PRTY_INFO(PXP, 0x7ffffff, 0x3ffffff, 0x3ffffff, 0x7ffffff), BLOCK_PRTY_INFO_0(PXP2, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff), BLOCK_PRTY_INFO_1(PXP2, 0x7ff, 0x7f, 0x7f, 0x7ff), BLOCK_PRTY_INFO(HC, 0x7, 0x7, 0x7, 0), diff --git a/drivers/net/bnx2x/bnx2x_link.c b/drivers/net/bnx2x/bnx2x_link.c index 7160ec51093e..dd1210fddfff 100644 --- a/drivers/net/bnx2x/bnx2x_link.c +++ b/drivers/net/bnx2x/bnx2x_link.c @@ -3948,48 +3948,6 @@ static u8 bnx2x_8073_8727_external_rom_boot(struct bnx2x *bp, return rc; } -static void bnx2x_8073_set_xaui_low_power_mode(struct bnx2x *bp, - struct bnx2x_phy *phy) -{ - u16 val; - bnx2x_cl45_read(bp, phy, - MDIO_PMA_DEVAD, MDIO_PMA_REG_8073_CHIP_REV, &val); - - if (val == 0) { - /* Mustn't set low power mode in 8073 A0 */ - return; - } - - /* Disable PLL sequencer (use read-modify-write to clear bit 13) */ - bnx2x_cl45_read(bp, phy, - MDIO_XS_DEVAD, MDIO_XS_PLL_SEQUENCER, &val); - val &= ~(1<<13); - bnx2x_cl45_write(bp, phy, - MDIO_XS_DEVAD, MDIO_XS_PLL_SEQUENCER, val); - - /* PLL controls */ - bnx2x_cl45_write(bp, phy, MDIO_XS_DEVAD, 0x805E, 0x1077); - bnx2x_cl45_write(bp, phy, MDIO_XS_DEVAD, 0x805D, 0x0000); - bnx2x_cl45_write(bp, phy, MDIO_XS_DEVAD, 0x805C, 0x030B); - bnx2x_cl45_write(bp, phy, MDIO_XS_DEVAD, 0x805B, 0x1240); - bnx2x_cl45_write(bp, phy, MDIO_XS_DEVAD, 0x805A, 0x2490); - - /* Tx Controls */ - bnx2x_cl45_write(bp, phy, MDIO_XS_DEVAD, 0x80A7, 0x0C74); - bnx2x_cl45_write(bp, phy, MDIO_XS_DEVAD, 0x80A6, 0x9041); - bnx2x_cl45_write(bp, phy, MDIO_XS_DEVAD, 0x80A5, 0x4640); - - /* Rx Controls */ - bnx2x_cl45_write(bp, phy, MDIO_XS_DEVAD, 0x80FE, 0x01C4); - bnx2x_cl45_write(bp, phy, MDIO_XS_DEVAD, 0x80FD, 0x9249); - bnx2x_cl45_write(bp, phy, MDIO_XS_DEVAD, 0x80FC, 0x2015); - - /* Enable PLL sequencer (use read-modify-write to set bit 13) */ - bnx2x_cl45_read(bp, phy, MDIO_XS_DEVAD, MDIO_XS_PLL_SEQUENCER, &val); - val |= (1<<13); - bnx2x_cl45_write(bp, phy, MDIO_XS_DEVAD, MDIO_XS_PLL_SEQUENCER, val); -} - /******************************************************************/ /* BCM8073 PHY SECTION */ /******************************************************************/ @@ -4148,8 +4106,6 @@ static u8 bnx2x_8073_config_init(struct bnx2x_phy *phy, bnx2x_8073_set_pause_cl37(params, phy, vars); - bnx2x_8073_set_xaui_low_power_mode(bp, phy); - bnx2x_cl45_read(bp, phy, MDIO_PMA_DEVAD, MDIO_PMA_REG_M8051_MSGOUT_REG, &tmp1); @@ -6519,6 +6475,18 @@ static void bnx2x_848xx_set_link_led(struct bnx2x_phy *phy, MDIO_PMA_DEVAD, MDIO_PMA_REG_8481_LED1_MASK, 0x80); + + /* Tell LED3 to blink on source */ + bnx2x_cl45_read(bp, phy, + MDIO_PMA_DEVAD, + MDIO_PMA_REG_8481_LINK_SIGNAL, + &val); + val &= ~(7<<6); + val |= (1<<6); /* A83B[8:6]= 1 */ + bnx2x_cl45_write(bp, phy, + MDIO_PMA_DEVAD, + MDIO_PMA_REG_8481_LINK_SIGNAL, + val); } break; } @@ -7720,10 +7688,13 @@ static u8 bnx2x_8073_common_init_phy(struct bnx2x *bp, struct bnx2x_phy phy[PORT_MAX]; struct bnx2x_phy *phy_blk[PORT_MAX]; u16 val; - s8 port; + s8 port = 0; s8 port_of_path = 0; - - bnx2x_ext_phy_hw_reset(bp, 0); + u32 swap_val, swap_override; + swap_val = REG_RD(bp, NIG_REG_PORT_SWAP); + swap_override = REG_RD(bp, NIG_REG_STRAP_OVERRIDE); + port ^= (swap_val && swap_override); + bnx2x_ext_phy_hw_reset(bp, port); /* PART1 - Reset both phys */ for (port = PORT_MAX - 1; port >= PORT_0; port--) { u32 shmem_base, shmem2_base; diff --git a/drivers/net/bnx2x/bnx2x_main.c b/drivers/net/bnx2x/bnx2x_main.c index 8cdcf5b39d1e..aa032339e321 100644 --- a/drivers/net/bnx2x/bnx2x_main.c +++ b/drivers/net/bnx2x/bnx2x_main.c @@ -1974,13 +1974,22 @@ static void bnx2x_init_vn_minmax(struct bnx2x *bp, int vn) vn_max_rate = 0; } else { + u32 maxCfg = bnx2x_extract_max_cfg(bp, vn_cfg); + vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >> FUNC_MF_CFG_MIN_BW_SHIFT) * 100; - /* If min rate is zero - set it to 1 */ + /* If fairness is enabled (not all min rates are zeroes) and + if current min rate is zero - set it to 1. + This is a requirement of the algorithm. */ if (bp->vn_weight_sum && (vn_min_rate == 0)) vn_min_rate = DEF_MIN_RATE; - vn_max_rate = ((vn_cfg & FUNC_MF_CFG_MAX_BW_MASK) >> - FUNC_MF_CFG_MAX_BW_SHIFT) * 100; + + if (IS_MF_SI(bp)) + /* maxCfg in percents of linkspeed */ + vn_max_rate = (bp->link_vars.line_speed * maxCfg) / 100; + else + /* maxCfg is absolute in 100Mb units */ + vn_max_rate = maxCfg * 100; } DP(NETIF_MSG_IFUP, @@ -2006,7 +2015,8 @@ static void bnx2x_init_vn_minmax(struct bnx2x *bp, int vn) m_fair_vn.vn_credit_delta = max_t(u32, (vn_min_rate * (T_FAIR_COEF / (8 * bp->vn_weight_sum))), - (bp->cmng.fair_vars.fair_threshold * 2)); + (bp->cmng.fair_vars.fair_threshold + + MIN_ABOVE_THRESH)); DP(NETIF_MSG_IFUP, "m_fair_vn.vn_credit_delta %d\n", m_fair_vn.vn_credit_delta); } @@ -2082,8 +2092,9 @@ static void bnx2x_cmng_fns_init(struct bnx2x *bp, u8 read_cfg, u8 cmng_type) bnx2x_calc_vn_weight_sum(bp); /* calculate and set min-max rate for each vn */ - for (vn = VN_0; vn < E1HVN_MAX; vn++) - bnx2x_init_vn_minmax(bp, vn); + if (bp->port.pmf) + for (vn = VN_0; vn < E1HVN_MAX; vn++) + bnx2x_init_vn_minmax(bp, vn); /* always enable rate shaping and fairness */ bp->cmng.flags.cmng_enables |= @@ -2152,13 +2163,6 @@ static void bnx2x_link_attn(struct bnx2x *bp) bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP); } - /* indicate link status only if link status actually changed */ - if (prev_link_status != bp->link_vars.link_status) - bnx2x_link_report(bp); - - if (IS_MF(bp)) - bnx2x_link_sync_notify(bp); - if (bp->link_vars.link_up && bp->link_vars.line_speed) { int cmng_fns = bnx2x_get_cmng_fns_mode(bp); @@ -2170,6 +2174,13 @@ static void bnx2x_link_attn(struct bnx2x *bp) DP(NETIF_MSG_IFUP, "single function mode without fairness\n"); } + + if (IS_MF(bp)) + bnx2x_link_sync_notify(bp); + + /* indicate link status only if link status actually changed */ + if (prev_link_status != bp->link_vars.link_status) + bnx2x_link_report(bp); } void bnx2x__link_status_update(struct bnx2x *bp) @@ -2301,15 +2312,10 @@ static void bnx2x_rxq_set_mac_filters(struct bnx2x *bp, u16 cl_id, u32 filters) /* accept matched ucast */ drop_all_ucast = 0; } - if (filters & BNX2X_ACCEPT_MULTICAST) { + if (filters & BNX2X_ACCEPT_MULTICAST) /* accept matched mcast */ drop_all_mcast = 0; - if (IS_MF_SI(bp)) - /* since mcast addresses won't arrive with ovlan, - * fw needs to accept all of them in - * switch-independent mode */ - accp_all_mcast = 1; - } + if (filters & BNX2X_ACCEPT_ALL_UNICAST) { /* accept all mcast */ drop_all_ucast = 0; @@ -4281,9 +4287,12 @@ void bnx2x_set_storm_rx_mode(struct bnx2x *bp) def_q_filters |= BNX2X_ACCEPT_UNICAST | BNX2X_ACCEPT_BROADCAST | BNX2X_ACCEPT_MULTICAST; #ifdef BCM_CNIC - cl_id = bnx2x_fcoe(bp, cl_id); - bnx2x_rxq_set_mac_filters(bp, cl_id, BNX2X_ACCEPT_UNICAST | - BNX2X_ACCEPT_MULTICAST); + if (!NO_FCOE(bp)) { + cl_id = bnx2x_fcoe(bp, cl_id); + bnx2x_rxq_set_mac_filters(bp, cl_id, + BNX2X_ACCEPT_UNICAST | + BNX2X_ACCEPT_MULTICAST); + } #endif break; @@ -4291,18 +4300,29 @@ void bnx2x_set_storm_rx_mode(struct bnx2x *bp) def_q_filters |= BNX2X_ACCEPT_UNICAST | BNX2X_ACCEPT_BROADCAST | BNX2X_ACCEPT_ALL_MULTICAST; #ifdef BCM_CNIC - cl_id = bnx2x_fcoe(bp, cl_id); - bnx2x_rxq_set_mac_filters(bp, cl_id, BNX2X_ACCEPT_UNICAST | - BNX2X_ACCEPT_MULTICAST); + /* + * Prevent duplication of multicast packets by configuring FCoE + * L2 Client to receive only matched unicast frames. + */ + if (!NO_FCOE(bp)) { + cl_id = bnx2x_fcoe(bp, cl_id); + bnx2x_rxq_set_mac_filters(bp, cl_id, + BNX2X_ACCEPT_UNICAST); + } #endif break; case BNX2X_RX_MODE_PROMISC: def_q_filters |= BNX2X_PROMISCUOUS_MODE; #ifdef BCM_CNIC - cl_id = bnx2x_fcoe(bp, cl_id); - bnx2x_rxq_set_mac_filters(bp, cl_id, BNX2X_ACCEPT_UNICAST | - BNX2X_ACCEPT_MULTICAST); + /* + * Prevent packets duplication by configuring DROP_ALL for FCoE + * L2 Client. + */ + if (!NO_FCOE(bp)) { + cl_id = bnx2x_fcoe(bp, cl_id); + bnx2x_rxq_set_mac_filters(bp, cl_id, BNX2X_ACCEPT_NONE); + } #endif /* pass management unicast packets as well */ llh_mask |= NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_UNCST; @@ -5296,10 +5316,6 @@ static int bnx2x_init_hw_common(struct bnx2x *bp, u32 load_code) } } - bp->port.need_hw_lock = bnx2x_hw_lock_required(bp, - bp->common.shmem_base, - bp->common.shmem2_base); - bnx2x_setup_fan_failure_detection(bp); /* clear PXP2 attentions */ @@ -5503,9 +5519,6 @@ static int bnx2x_init_hw_port(struct bnx2x *bp) bnx2x_init_block(bp, MCP_BLOCK, init_stage); bnx2x_init_block(bp, DMAE_BLOCK, init_stage); - bp->port.need_hw_lock = bnx2x_hw_lock_required(bp, - bp->common.shmem_base, - bp->common.shmem2_base); if (bnx2x_fan_failure_det_req(bp, bp->common.shmem_base, bp->common.shmem2_base, port)) { u32 reg_addr = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 : @@ -8379,6 +8392,17 @@ static void __devinit bnx2x_get_port_hwinfo(struct bnx2x *bp) (ext_phy_type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_NOT_CONN)) bp->mdio.prtad = XGXS_EXT_PHY_ADDR(ext_phy_config); + + /* + * Check if hw lock is required to access MDC/MDIO bus to the PHY(s) + * In MF mode, it is set to cover self test cases + */ + if (IS_MF(bp)) + bp->port.need_hw_lock = 1; + else + bp->port.need_hw_lock = bnx2x_hw_lock_required(bp, + bp->common.shmem_base, + bp->common.shmem2_base); } static void __devinit bnx2x_get_mac_hwinfo(struct bnx2x *bp) diff --git a/drivers/net/bnx2x/bnx2x_stats.c b/drivers/net/bnx2x/bnx2x_stats.c index bda60d590fa8..3445ded6674f 100644 --- a/drivers/net/bnx2x/bnx2x_stats.c +++ b/drivers/net/bnx2x/bnx2x_stats.c @@ -1239,14 +1239,14 @@ void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event) if (unlikely(bp->panic)) return; + bnx2x_stats_stm[bp->stats_state][event].action(bp); + /* Protect a state change flow */ spin_lock_bh(&bp->stats_lock); state = bp->stats_state; bp->stats_state = bnx2x_stats_stm[state][event].next_state; spin_unlock_bh(&bp->stats_lock); - bnx2x_stats_stm[state][event].action(bp); - if ((event != STATS_EVENT_UPDATE) || netif_msg_timer(bp)) DP(BNX2X_MSG_STATS, "state %d -> event %d -> state %d\n", state, event, bp->stats_state); diff --git a/drivers/net/bonding/bond_3ad.c b/drivers/net/bonding/bond_3ad.c index 1024ae158227..a5d5d0b5b155 100644 --- a/drivers/net/bonding/bond_3ad.c +++ b/drivers/net/bonding/bond_3ad.c @@ -281,23 +281,23 @@ static inline int __check_agg_selection_timer(struct port *port) } /** - * __get_rx_machine_lock - lock the port's RX machine + * __get_state_machine_lock - lock the port's state machines * @port: the port we're looking at * */ -static inline void __get_rx_machine_lock(struct port *port) +static inline void __get_state_machine_lock(struct port *port) { - spin_lock_bh(&(SLAVE_AD_INFO(port->slave).rx_machine_lock)); + spin_lock_bh(&(SLAVE_AD_INFO(port->slave).state_machine_lock)); } /** - * __release_rx_machine_lock - unlock the port's RX machine + * __release_state_machine_lock - unlock the port's state machines * @port: the port we're looking at * */ -static inline void __release_rx_machine_lock(struct port *port) +static inline void __release_state_machine_lock(struct port *port) { - spin_unlock_bh(&(SLAVE_AD_INFO(port->slave).rx_machine_lock)); + spin_unlock_bh(&(SLAVE_AD_INFO(port->slave).state_machine_lock)); } /** @@ -388,14 +388,14 @@ static u8 __get_duplex(struct port *port) } /** - * __initialize_port_locks - initialize a port's RX machine spinlock + * __initialize_port_locks - initialize a port's STATE machine spinlock * @port: the port we're looking at * */ static inline void __initialize_port_locks(struct port *port) { // make sure it isn't called twice - spin_lock_init(&(SLAVE_AD_INFO(port->slave).rx_machine_lock)); + spin_lock_init(&(SLAVE_AD_INFO(port->slave).state_machine_lock)); } //conversions @@ -1025,9 +1025,6 @@ static void ad_rx_machine(struct lacpdu *lacpdu, struct port *port) { rx_states_t last_state; - // Lock to prevent 2 instances of this function to run simultaneously(rx interrupt and periodic machine callback) - __get_rx_machine_lock(port); - // keep current State Machine state to compare later if it was changed last_state = port->sm_rx_state; @@ -1133,7 +1130,6 @@ static void ad_rx_machine(struct lacpdu *lacpdu, struct port *port) pr_err("%s: An illegal loopback occurred on adapter (%s).\n" "Check the configuration to verify that all adapters are connected to 802.3ad compliant switch ports\n", port->slave->dev->master->name, port->slave->dev->name); - __release_rx_machine_lock(port); return; } __update_selected(lacpdu, port); @@ -1153,7 +1149,6 @@ static void ad_rx_machine(struct lacpdu *lacpdu, struct port *port) break; } } - __release_rx_machine_lock(port); } /** @@ -2155,6 +2150,12 @@ void bond_3ad_state_machine_handler(struct work_struct *work) goto re_arm; } + /* Lock around state machines to protect data accessed + * by all (e.g., port->sm_vars). ad_rx_machine may run + * concurrently due to incoming LACPDU. + */ + __get_state_machine_lock(port); + ad_rx_machine(NULL, port); ad_periodic_machine(port); ad_port_selection_logic(port); @@ -2164,6 +2165,8 @@ void bond_3ad_state_machine_handler(struct work_struct *work) // turn off the BEGIN bit, since we already handled it if (port->sm_vars & AD_PORT_BEGIN) port->sm_vars &= ~AD_PORT_BEGIN; + + __release_state_machine_lock(port); } re_arm: @@ -2200,7 +2203,10 @@ static void bond_3ad_rx_indication(struct lacpdu *lacpdu, struct slave *slave, u case AD_TYPE_LACPDU: pr_debug("Received LACPDU on port %d\n", port->actor_port_number); + /* Protect against concurrent state machines */ + __get_state_machine_lock(port); ad_rx_machine(lacpdu, port); + __release_state_machine_lock(port); break; case AD_TYPE_MARKER: diff --git a/drivers/net/bonding/bond_3ad.h b/drivers/net/bonding/bond_3ad.h index 2c46a154f2c6..b28baff70864 100644 --- a/drivers/net/bonding/bond_3ad.h +++ b/drivers/net/bonding/bond_3ad.h @@ -264,7 +264,8 @@ struct ad_bond_info { struct ad_slave_info { struct aggregator aggregator; // 802.3ad aggregator structure struct port port; // 802.3ad port structure - spinlock_t rx_machine_lock; // To avoid race condition between callback and receive interrupt + spinlock_t state_machine_lock; /* mutex state machines vs. + incoming LACPDU */ u16 id; }; diff --git a/drivers/net/can/Kconfig b/drivers/net/can/Kconfig index 986195eaa57c..5dec456fd4a4 100644 --- a/drivers/net/can/Kconfig +++ b/drivers/net/can/Kconfig @@ -23,7 +23,7 @@ config CAN_SLCAN As only the sending and receiving of CAN frames is implemented, this driver should work with the (serial/USB) CAN hardware from: - www.canusb.com / www.can232.com / www.mictronic.com / www.canhack.de + www.canusb.com / www.can232.com / www.mictronics.de / www.canhack.de Userspace tools to attach the SLCAN line discipline (slcan_attach, slcand) can be found in the can-utils at the SocketCAN SVN, see diff --git a/drivers/net/can/at91_can.c b/drivers/net/can/at91_can.c index 2532b9631538..57d2ffbbb433 100644 --- a/drivers/net/can/at91_can.c +++ b/drivers/net/can/at91_can.c @@ -1109,7 +1109,7 @@ static ssize_t at91_sysfs_set_mb0_id(struct device *dev, return ret; } -static DEVICE_ATTR(mb0_id, S_IWUGO | S_IRUGO, +static DEVICE_ATTR(mb0_id, S_IWUSR | S_IRUGO, at91_sysfs_show_mb0_id, at91_sysfs_set_mb0_id); static struct attribute *at91_sysfs_attrs[] = { diff --git a/drivers/net/can/janz-ican3.c b/drivers/net/can/janz-ican3.c index b9a6d7a5a739..366f5cc050ae 100644 --- a/drivers/net/can/janz-ican3.c +++ b/drivers/net/can/janz-ican3.c @@ -1618,7 +1618,7 @@ static ssize_t ican3_sysfs_set_term(struct device *dev, return count; } -static DEVICE_ATTR(termination, S_IWUGO | S_IRUGO, ican3_sysfs_show_term, +static DEVICE_ATTR(termination, S_IWUSR | S_IRUGO, ican3_sysfs_show_term, ican3_sysfs_set_term); static struct attribute *ican3_sysfs_attrs[] = { diff --git a/drivers/net/can/mcp251x.c b/drivers/net/can/mcp251x.c index 7ab534aee452..7513c4523ac4 100644 --- a/drivers/net/can/mcp251x.c +++ b/drivers/net/can/mcp251x.c @@ -940,7 +940,7 @@ static int mcp251x_open(struct net_device *net) goto open_unlock; } - priv->wq = create_freezeable_workqueue("mcp251x_wq"); + priv->wq = create_freezable_workqueue("mcp251x_wq"); INIT_WORK(&priv->tx_work, mcp251x_tx_work_handler); INIT_WORK(&priv->restart_work, mcp251x_restart_work_handler); diff --git a/drivers/net/can/mscan/Kconfig b/drivers/net/can/mscan/Kconfig index 27d1d398e25e..d38706958af6 100644 --- a/drivers/net/can/mscan/Kconfig +++ b/drivers/net/can/mscan/Kconfig @@ -1,5 +1,5 @@ config CAN_MSCAN - depends on CAN_DEV && (PPC || M68K || M68KNOMMU) + depends on CAN_DEV && (PPC || M68K) tristate "Support for Freescale MSCAN based chips" ---help--- The Motorola Scalable Controller Area Network (MSCAN) definition diff --git a/drivers/net/can/pch_can.c b/drivers/net/can/pch_can.c index c42e97268248..e54712b22c27 100644 --- a/drivers/net/can/pch_can.c +++ b/drivers/net/can/pch_can.c @@ -185,7 +185,7 @@ struct pch_can_priv { static struct can_bittiming_const pch_can_bittiming_const = { .name = KBUILD_MODNAME, - .tseg1_min = 1, + .tseg1_min = 2, .tseg1_max = 16, .tseg2_min = 1, .tseg2_max = 8, @@ -959,13 +959,13 @@ static void __devexit pch_can_remove(struct pci_dev *pdev) struct pch_can_priv *priv = netdev_priv(ndev); unregister_candev(priv->ndev); - pci_iounmap(pdev, priv->regs); if (priv->use_msi) pci_disable_msi(priv->dev); pci_release_regions(pdev); pci_disable_device(pdev); pci_set_drvdata(pdev, NULL); pch_can_reset(priv); + pci_iounmap(pdev, priv->regs); free_candev(priv->ndev); } @@ -1238,6 +1238,7 @@ static int __devinit pch_can_probe(struct pci_dev *pdev, priv->use_msi = 0; } else { netdev_err(ndev, "PCH CAN opened with MSI\n"); + pci_set_master(pdev); priv->use_msi = 1; } diff --git a/drivers/net/can/softing/Kconfig b/drivers/net/can/softing/Kconfig index 92bd6bdde5e3..5de46a9a77bb 100644 --- a/drivers/net/can/softing/Kconfig +++ b/drivers/net/can/softing/Kconfig @@ -1,6 +1,6 @@ config CAN_SOFTING tristate "Softing Gmbh CAN generic support" - depends on CAN_DEV + depends on CAN_DEV && HAS_IOMEM ---help--- Support for CAN cards from Softing Gmbh & some cards from Vector Gmbh. @@ -18,7 +18,7 @@ config CAN_SOFTING config CAN_SOFTING_CS tristate "Softing Gmbh CAN pcmcia cards" depends on PCMCIA - select CAN_SOFTING + depends on CAN_SOFTING ---help--- Support for PCMCIA cards from Softing Gmbh & some cards from Vector Gmbh. diff --git a/drivers/net/can/softing/softing_cs.c b/drivers/net/can/softing/softing_cs.c index 300fe75dd1a7..c11bb4de8630 100644 --- a/drivers/net/can/softing/softing_cs.c +++ b/drivers/net/can/softing/softing_cs.c @@ -19,6 +19,7 @@ #include <linux/module.h> #include <linux/kernel.h> +#include <linux/slab.h> #include <pcmcia/cistpl.h> #include <pcmcia/ds.h> diff --git a/drivers/net/can/softing/softing_main.c b/drivers/net/can/softing/softing_main.c index 5157e15e96eb..aeea9f9ff6e8 100644 --- a/drivers/net/can/softing/softing_main.c +++ b/drivers/net/can/softing/softing_main.c @@ -633,6 +633,7 @@ static const struct net_device_ops softing_netdev_ops = { }; static const struct can_bittiming_const softing_btr_const = { + .name = "softing", .tseg1_min = 1, .tseg1_max = 16, .tseg2_min = 1, diff --git a/drivers/net/cnic.c b/drivers/net/cnic.c index 7ff170cbc7dc..302be4aa69d6 100644 --- a/drivers/net/cnic.c +++ b/drivers/net/cnic.c @@ -2760,6 +2760,8 @@ static u32 cnic_service_bnx2_queues(struct cnic_dev *dev) u32 status_idx = (u16) *cp->kcq1.status_idx_ptr; int kcqe_cnt; + /* status block index must be read before reading other fields */ + rmb(); cp->kwq_con_idx = *cp->kwq_con_idx_ptr; while ((kcqe_cnt = cnic_get_kcqes(dev, &cp->kcq1))) { @@ -2770,6 +2772,8 @@ static u32 cnic_service_bnx2_queues(struct cnic_dev *dev) barrier(); if (status_idx != *cp->kcq1.status_idx_ptr) { status_idx = (u16) *cp->kcq1.status_idx_ptr; + /* status block index must be read first */ + rmb(); cp->kwq_con_idx = *cp->kwq_con_idx_ptr; } else break; @@ -2888,6 +2892,8 @@ static u32 cnic_service_bnx2x_kcq(struct cnic_dev *dev, struct kcq_info *info) u32 last_status = *info->status_idx_ptr; int kcqe_cnt; + /* status block index must be read before reading the KCQ */ + rmb(); while ((kcqe_cnt = cnic_get_kcqes(dev, info))) { service_kcqes(dev, kcqe_cnt); @@ -2898,6 +2904,8 @@ static u32 cnic_service_bnx2x_kcq(struct cnic_dev *dev, struct kcq_info *info) break; last_status = *info->status_idx_ptr; + /* status block index must be read before reading the KCQ */ + rmb(); } return last_status; } @@ -2906,26 +2914,35 @@ static void cnic_service_bnx2x_bh(unsigned long data) { struct cnic_dev *dev = (struct cnic_dev *) data; struct cnic_local *cp = dev->cnic_priv; - u32 status_idx; + u32 status_idx, new_status_idx; if (unlikely(!test_bit(CNIC_F_CNIC_UP, &dev->flags))) return; - status_idx = cnic_service_bnx2x_kcq(dev, &cp->kcq1); + while (1) { + status_idx = cnic_service_bnx2x_kcq(dev, &cp->kcq1); - CNIC_WR16(dev, cp->kcq1.io_addr, cp->kcq1.sw_prod_idx + MAX_KCQ_IDX); + CNIC_WR16(dev, cp->kcq1.io_addr, + cp->kcq1.sw_prod_idx + MAX_KCQ_IDX); - if (BNX2X_CHIP_IS_E2(cp->chip_id)) { - status_idx = cnic_service_bnx2x_kcq(dev, &cp->kcq2); + if (!BNX2X_CHIP_IS_E2(cp->chip_id)) { + cnic_ack_bnx2x_int(dev, cp->bnx2x_igu_sb_id, USTORM_ID, + status_idx, IGU_INT_ENABLE, 1); + break; + } + + new_status_idx = cnic_service_bnx2x_kcq(dev, &cp->kcq2); + + if (new_status_idx != status_idx) + continue; CNIC_WR16(dev, cp->kcq2.io_addr, cp->kcq2.sw_prod_idx + MAX_KCQ_IDX); cnic_ack_igu_sb(dev, cp->bnx2x_igu_sb_id, IGU_SEG_ACCESS_DEF, status_idx, IGU_INT_ENABLE, 1); - } else { - cnic_ack_bnx2x_int(dev, cp->bnx2x_igu_sb_id, USTORM_ID, - status_idx, IGU_INT_ENABLE, 1); + + break; } } diff --git a/drivers/net/cxgb4/t4_msg.h b/drivers/net/cxgb4/t4_msg.h index a550d0c706f3..eb71b8250b91 100644 --- a/drivers/net/cxgb4/t4_msg.h +++ b/drivers/net/cxgb4/t4_msg.h @@ -123,6 +123,7 @@ enum { ULP_MODE_NONE = 0, ULP_MODE_ISCSI = 2, ULP_MODE_RDMA = 4, + ULP_MODE_TCPDDP = 5, ULP_MODE_FCOE = 6, }; diff --git a/drivers/net/cxgb4vf/cxgb4vf_main.c b/drivers/net/cxgb4vf/cxgb4vf_main.c index 56166ae2059f..6aad64df4dcb 100644 --- a/drivers/net/cxgb4vf/cxgb4vf_main.c +++ b/drivers/net/cxgb4vf/cxgb4vf_main.c @@ -2040,7 +2040,7 @@ static int __devinit setup_debugfs(struct adapter *adapter) { int i; - BUG_ON(adapter->debugfs_root == NULL); + BUG_ON(IS_ERR_OR_NULL(adapter->debugfs_root)); /* * Debugfs support is best effort. @@ -2061,7 +2061,7 @@ static int __devinit setup_debugfs(struct adapter *adapter) */ static void cleanup_debugfs(struct adapter *adapter) { - BUG_ON(adapter->debugfs_root == NULL); + BUG_ON(IS_ERR_OR_NULL(adapter->debugfs_root)); /* * Unlike our sister routine cleanup_proc(), we don't need to remove @@ -2489,17 +2489,6 @@ static int __devinit cxgb4vf_pci_probe(struct pci_dev *pdev, struct net_device *netdev; /* - * Vet our module parameters. - */ - if (msi != MSI_MSIX && msi != MSI_MSI) { - dev_err(&pdev->dev, "bad module parameter msi=%d; must be %d" - " (MSI-X or MSI) or %d (MSI)\n", msi, MSI_MSIX, - MSI_MSI); - err = -EINVAL; - goto err_out; - } - - /* * Print our driver banner the first time we're called to initialize a * device. */ @@ -2711,11 +2700,11 @@ static int __devinit cxgb4vf_pci_probe(struct pci_dev *pdev, /* * Set up our debugfs entries. */ - if (cxgb4vf_debugfs_root) { + if (!IS_ERR_OR_NULL(cxgb4vf_debugfs_root)) { adapter->debugfs_root = debugfs_create_dir(pci_name(pdev), cxgb4vf_debugfs_root); - if (adapter->debugfs_root == NULL) + if (IS_ERR_OR_NULL(adapter->debugfs_root)) dev_warn(&pdev->dev, "could not create debugfs" " directory"); else @@ -2770,7 +2759,7 @@ static int __devinit cxgb4vf_pci_probe(struct pci_dev *pdev, */ err_free_debugfs: - if (adapter->debugfs_root) { + if (!IS_ERR_OR_NULL(adapter->debugfs_root)) { cleanup_debugfs(adapter); debugfs_remove_recursive(adapter->debugfs_root); } @@ -2802,7 +2791,6 @@ err_release_regions: err_disable_device: pci_disable_device(pdev); -err_out: return err; } @@ -2840,7 +2828,7 @@ static void __devexit cxgb4vf_pci_remove(struct pci_dev *pdev) /* * Tear down our debugfs entries. */ - if (adapter->debugfs_root) { + if (!IS_ERR_OR_NULL(adapter->debugfs_root)) { cleanup_debugfs(adapter); debugfs_remove_recursive(adapter->debugfs_root); } @@ -2874,6 +2862,46 @@ static void __devexit cxgb4vf_pci_remove(struct pci_dev *pdev) } /* + * "Shutdown" quiesce the device, stopping Ingress Packet and Interrupt + * delivery. + */ +static void __devexit cxgb4vf_pci_shutdown(struct pci_dev *pdev) +{ + struct adapter *adapter; + int pidx; + + adapter = pci_get_drvdata(pdev); + if (!adapter) + return; + + /* + * Disable all Virtual Interfaces. This will shut down the + * delivery of all ingress packets into the chip for these + * Virtual Interfaces. + */ + for_each_port(adapter, pidx) { + struct net_device *netdev; + struct port_info *pi; + + if (!test_bit(pidx, &adapter->registered_device_map)) + continue; + + netdev = adapter->port[pidx]; + if (!netdev) + continue; + + pi = netdev_priv(netdev); + t4vf_enable_vi(adapter, pi->viid, false, false); + } + + /* + * Free up all Queues which will prevent further DMA and + * Interrupts allowing various internal pathways to drain. + */ + t4vf_free_sge_resources(adapter); +} + +/* * PCI Device registration data structures. */ #define CH_DEVICE(devid, idx) \ @@ -2906,6 +2934,7 @@ static struct pci_driver cxgb4vf_driver = { .id_table = cxgb4vf_pci_tbl, .probe = cxgb4vf_pci_probe, .remove = __devexit_p(cxgb4vf_pci_remove), + .shutdown = __devexit_p(cxgb4vf_pci_shutdown), }; /* @@ -2915,14 +2944,25 @@ static int __init cxgb4vf_module_init(void) { int ret; + /* + * Vet our module parameters. + */ + if (msi != MSI_MSIX && msi != MSI_MSI) { + printk(KERN_WARNING KBUILD_MODNAME + ": bad module parameter msi=%d; must be %d" + " (MSI-X or MSI) or %d (MSI)\n", + msi, MSI_MSIX, MSI_MSI); + return -EINVAL; + } + /* Debugfs support is optional, just warn if this fails */ cxgb4vf_debugfs_root = debugfs_create_dir(KBUILD_MODNAME, NULL); - if (!cxgb4vf_debugfs_root) + if (IS_ERR_OR_NULL(cxgb4vf_debugfs_root)) printk(KERN_WARNING KBUILD_MODNAME ": could not create" " debugfs entry, continuing\n"); ret = pci_register_driver(&cxgb4vf_driver); - if (ret < 0) + if (ret < 0 && !IS_ERR_OR_NULL(cxgb4vf_debugfs_root)) debugfs_remove(cxgb4vf_debugfs_root); return ret; } diff --git a/drivers/net/cxgb4vf/t4vf_hw.c b/drivers/net/cxgb4vf/t4vf_hw.c index 0f51c80475ce..192db226ec7f 100644 --- a/drivers/net/cxgb4vf/t4vf_hw.c +++ b/drivers/net/cxgb4vf/t4vf_hw.c @@ -171,7 +171,7 @@ int t4vf_wr_mbox_core(struct adapter *adapter, const void *cmd, int size, delay_idx = 0; ms = delay[0]; - for (i = 0; i < 500; i += ms) { + for (i = 0; i < FW_CMD_MAX_TIMEOUT; i += ms) { if (sleep_ok) { ms = delay[delay_idx]; if (delay_idx < ARRAY_SIZE(delay) - 1) diff --git a/drivers/net/davinci_emac.c b/drivers/net/davinci_emac.c index 2a628d17d178..7018bfe408a4 100644 --- a/drivers/net/davinci_emac.c +++ b/drivers/net/davinci_emac.c @@ -1008,7 +1008,7 @@ static void emac_rx_handler(void *token, int len, int status) int ret; /* free and bail if we are shutting down */ - if (unlikely(!netif_running(ndev))) { + if (unlikely(!netif_running(ndev) || !netif_carrier_ok(ndev))) { dev_kfree_skb_any(skb); return; } diff --git a/drivers/net/depca.c b/drivers/net/depca.c index 1b48b68ad4fd..8b0084d17c8c 100644 --- a/drivers/net/depca.c +++ b/drivers/net/depca.c @@ -1094,7 +1094,7 @@ static int depca_rx(struct net_device *dev) } } /* Change buffer ownership for this last frame, back to the adapter */ - for (; lp->rx_old != entry; lp->rx_old = (++lp->rx_old) & lp->rxRingMask) { + for (; lp->rx_old != entry; lp->rx_old = (lp->rx_old + 1) & lp->rxRingMask) { writel(readl(&lp->rx_ring[lp->rx_old].base) | R_OWN, &lp->rx_ring[lp->rx_old].base); } writel(readl(&lp->rx_ring[entry].base) | R_OWN, &lp->rx_ring[entry].base); @@ -1103,7 +1103,7 @@ static int depca_rx(struct net_device *dev) /* ** Update entry information */ - lp->rx_new = (++lp->rx_new) & lp->rxRingMask; + lp->rx_new = (lp->rx_new + 1) & lp->rxRingMask; } return 0; @@ -1148,7 +1148,7 @@ static int depca_tx(struct net_device *dev) } /* Update all the pointers */ - lp->tx_old = (++lp->tx_old) & lp->txRingMask; + lp->tx_old = (lp->tx_old + 1) & lp->txRingMask; } return 0; diff --git a/drivers/net/dl2k.c b/drivers/net/dl2k.c index e1a8216ff692..c05db6046050 100644 --- a/drivers/net/dl2k.c +++ b/drivers/net/dl2k.c @@ -1753,8 +1753,6 @@ rio_close (struct net_device *dev) /* Free all the skbuffs in the queue. */ for (i = 0; i < RX_RING_SIZE; i++) { - np->rx_ring[i].status = 0; - np->rx_ring[i].fraginfo = 0; skb = np->rx_skbuff[i]; if (skb) { pci_unmap_single(np->pdev, @@ -1763,6 +1761,8 @@ rio_close (struct net_device *dev) dev_kfree_skb (skb); np->rx_skbuff[i] = NULL; } + np->rx_ring[i].status = 0; + np->rx_ring[i].fraginfo = 0; } for (i = 0; i < TX_RING_SIZE; i++) { skb = np->tx_skbuff[i]; diff --git a/drivers/net/dm9000.c b/drivers/net/dm9000.c index 2d4c4fc1d900..461dd6f905f7 100644 --- a/drivers/net/dm9000.c +++ b/drivers/net/dm9000.c @@ -802,10 +802,7 @@ dm9000_init_dm9000(struct net_device *dev) /* Checksum mode */ dm9000_set_rx_csum_unlocked(dev, db->rx_csum); - /* GPIO0 on pre-activate PHY */ - iow(db, DM9000_GPR, 0); /* REG_1F bit0 activate phyxcer */ iow(db, DM9000_GPCR, GPCR_GEP_CNTL); /* Let GPIO0 output */ - iow(db, DM9000_GPR, 0); /* Enable PHY */ ncr = (db->flags & DM9000_PLATF_EXT_PHY) ? NCR_EXT_PHY : 0; @@ -852,8 +849,8 @@ static void dm9000_timeout(struct net_device *dev) unsigned long flags; /* Save previous register address */ - reg_save = readb(db->io_addr); spin_lock_irqsave(&db->lock, flags); + reg_save = readb(db->io_addr); netif_stop_queue(dev); dm9000_reset(db); @@ -1194,6 +1191,10 @@ dm9000_open(struct net_device *dev) if (request_irq(dev->irq, dm9000_interrupt, irqflags, dev->name, dev)) return -EAGAIN; + /* GPIO0 on pre-activate PHY, Reg 1F is not set by reset */ + iow(db, DM9000_GPR, 0); /* REG_1F bit0 activate phyxcer */ + mdelay(1); /* delay needs by DM9000B */ + /* Initialize DM9000 board */ dm9000_reset(db); dm9000_init_dm9000(dev); diff --git a/drivers/net/dnet.c b/drivers/net/dnet.c index 9d8a20b72fa9..8318ea06cb6d 100644 --- a/drivers/net/dnet.c +++ b/drivers/net/dnet.c @@ -337,8 +337,6 @@ static int dnet_mii_init(struct dnet *bp) for (i = 0; i < PHY_MAX_ADDR; i++) bp->mii_bus->irq[i] = PHY_POLL; - platform_set_drvdata(bp->dev, bp->mii_bus); - if (mdiobus_register(bp->mii_bus)) { err = -ENXIO; goto err_out_free_mdio_irq; @@ -863,6 +861,7 @@ static int __devinit dnet_probe(struct platform_device *pdev) bp = netdev_priv(dev); bp->dev = dev; + platform_set_drvdata(pdev, dev); SET_NETDEV_DEV(dev, &pdev->dev); spin_lock_init(&bp->lock); diff --git a/drivers/net/e1000/e1000_hw.c b/drivers/net/e1000/e1000_hw.c index aed223b1b897..7501d977d992 100644 --- a/drivers/net/e1000/e1000_hw.c +++ b/drivers/net/e1000/e1000_hw.c @@ -124,6 +124,7 @@ static s32 e1000_set_phy_type(struct e1000_hw *hw) case M88E1000_I_PHY_ID: case M88E1011_I_PHY_ID: case M88E1111_I_PHY_ID: + case M88E1118_E_PHY_ID: hw->phy_type = e1000_phy_m88; break; case IGP01E1000_I_PHY_ID: @@ -3222,7 +3223,8 @@ static s32 e1000_detect_gig_phy(struct e1000_hw *hw) break; case e1000_ce4100: if ((hw->phy_id == RTL8211B_PHY_ID) || - (hw->phy_id == RTL8201N_PHY_ID)) + (hw->phy_id == RTL8201N_PHY_ID) || + (hw->phy_id == M88E1118_E_PHY_ID)) match = true; break; case e1000_82541: diff --git a/drivers/net/e1000/e1000_hw.h b/drivers/net/e1000/e1000_hw.h index 196eeda2dd6c..c70b23d52284 100644 --- a/drivers/net/e1000/e1000_hw.h +++ b/drivers/net/e1000/e1000_hw.h @@ -2917,6 +2917,7 @@ struct e1000_host_command_info { #define M88E1000_14_PHY_ID M88E1000_E_PHY_ID #define M88E1011_I_REV_4 0x04 #define M88E1111_I_PHY_ID 0x01410CC0 +#define M88E1118_E_PHY_ID 0x01410E40 #define L1LXT971A_PHY_ID 0x001378E0 #define RTL8211B_PHY_ID 0x001CC910 diff --git a/drivers/net/e1000/e1000_osdep.h b/drivers/net/e1000/e1000_osdep.h index 55c1711f1688..33e7c45a4fe4 100644 --- a/drivers/net/e1000/e1000_osdep.h +++ b/drivers/net/e1000/e1000_osdep.h @@ -42,7 +42,8 @@ #define GBE_CONFIG_RAM_BASE \ ((unsigned int)(CONFIG_RAM_BASE + GBE_CONFIG_OFFSET)) -#define GBE_CONFIG_BASE_VIRT phys_to_virt(GBE_CONFIG_RAM_BASE) +#define GBE_CONFIG_BASE_VIRT \ + ((void __iomem *)phys_to_virt(GBE_CONFIG_RAM_BASE)) #define GBE_CONFIG_FLASH_WRITE(base, offset, count, data) \ (iowrite16_rep(base + offset, data, count)) diff --git a/drivers/net/e1000e/netdev.c b/drivers/net/e1000e/netdev.c index 1c18f26b0812..6d513a383340 100644 --- a/drivers/net/e1000e/netdev.c +++ b/drivers/net/e1000e/netdev.c @@ -937,6 +937,9 @@ static void e1000_print_hw_hang(struct work_struct *work) u16 phy_status, phy_1000t_status, phy_ext_status; u16 pci_status; + if (test_bit(__E1000_DOWN, &adapter->state)) + return; + e1e_rphy(hw, PHY_STATUS, &phy_status); e1e_rphy(hw, PHY_1000T_STATUS, &phy_1000t_status); e1e_rphy(hw, PHY_EXT_STATUS, &phy_ext_status); @@ -1506,6 +1509,9 @@ static void e1000e_downshift_workaround(struct work_struct *work) struct e1000_adapter *adapter = container_of(work, struct e1000_adapter, downshift_task); + if (test_bit(__E1000_DOWN, &adapter->state)) + return; + e1000e_gig_downshift_workaround_ich8lan(&adapter->hw); } @@ -3338,6 +3344,21 @@ int e1000e_up(struct e1000_adapter *adapter) return 0; } +static void e1000e_flush_descriptors(struct e1000_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + + if (!(adapter->flags2 & FLAG2_DMA_BURST)) + return; + + /* flush pending descriptor writebacks to memory */ + ew32(TIDV, adapter->tx_int_delay | E1000_TIDV_FPD); + ew32(RDTR, adapter->rx_int_delay | E1000_RDTR_FPD); + + /* execute the writes immediately */ + e1e_flush(); +} + void e1000e_down(struct e1000_adapter *adapter) { struct net_device *netdev = adapter->netdev; @@ -3377,6 +3398,9 @@ void e1000e_down(struct e1000_adapter *adapter) if (!pci_channel_offline(adapter->pdev)) e1000e_reset(adapter); + + e1000e_flush_descriptors(adapter); + e1000_clean_tx_ring(adapter); e1000_clean_rx_ring(adapter); @@ -3765,6 +3789,10 @@ static void e1000e_update_phy_task(struct work_struct *work) { struct e1000_adapter *adapter = container_of(work, struct e1000_adapter, update_phy_task); + + if (test_bit(__E1000_DOWN, &adapter->state)) + return; + e1000_get_phy_info(&adapter->hw); } @@ -3775,6 +3803,10 @@ static void e1000e_update_phy_task(struct work_struct *work) static void e1000_update_phy_info(unsigned long data) { struct e1000_adapter *adapter = (struct e1000_adapter *) data; + + if (test_bit(__E1000_DOWN, &adapter->state)) + return; + schedule_work(&adapter->update_phy_task); } @@ -4149,6 +4181,9 @@ static void e1000_watchdog_task(struct work_struct *work) u32 link, tctl; int tx_pending = 0; + if (test_bit(__E1000_DOWN, &adapter->state)) + return; + link = e1000e_has_link(adapter); if ((netif_carrier_ok(netdev)) && link) { /* Cancel scheduled suspend requests. */ @@ -4309,7 +4344,6 @@ link_up: * to get done, so reset controller to flush Tx. * (Do the reset outside of interrupt context). */ - adapter->tx_timeout_count++; schedule_work(&adapter->reset_task); /* return immediately since reset is imminent */ return; @@ -4338,19 +4372,12 @@ link_up: else ew32(ICS, E1000_ICS_RXDMT0); + /* flush pending descriptors to memory before detecting Tx hang */ + e1000e_flush_descriptors(adapter); + /* Force detection of hung controller every watchdog period */ adapter->detect_tx_hung = 1; - /* flush partial descriptors to memory before detecting Tx hang */ - if (adapter->flags2 & FLAG2_DMA_BURST) { - ew32(TIDV, adapter->tx_int_delay | E1000_TIDV_FPD); - ew32(RDTR, adapter->rx_int_delay | E1000_RDTR_FPD); - /* - * no need to flush the writes because the timeout code does - * an er32 first thing - */ - } - /* * With 82571 controllers, LAA may be overwritten due to controller * reset from the other port. Set the appropriate LAA in RAR[0] @@ -4888,6 +4915,10 @@ static void e1000_reset_task(struct work_struct *work) struct e1000_adapter *adapter; adapter = container_of(work, struct e1000_adapter, reset_task); + /* don't run the task if already down */ + if (test_bit(__E1000_DOWN, &adapter->state)) + return; + if (!((adapter->flags & FLAG_RX_NEEDS_RESTART) && (adapter->flags & FLAG_RX_RESTART_NOW))) { e1000e_dump(adapter); @@ -5307,7 +5338,7 @@ void e1000e_disable_aspm(struct pci_dev *pdev, u16 state) __e1000e_disable_aspm(pdev, state); } -#ifdef CONFIG_PM_OPS +#ifdef CONFIG_PM static bool e1000e_pm_ready(struct e1000_adapter *adapter) { return !!adapter->tx_ring->buffer_info; @@ -5458,7 +5489,7 @@ static int e1000_runtime_resume(struct device *dev) return __e1000_resume(pdev); } #endif /* CONFIG_PM_RUNTIME */ -#endif /* CONFIG_PM_OPS */ +#endif /* CONFIG_PM */ static void e1000_shutdown(struct pci_dev *pdev) { @@ -5936,7 +5967,8 @@ static int __devinit e1000_probe(struct pci_dev *pdev, /* APME bit in EEPROM is mapped to WUC.APME */ eeprom_data = er32(WUC); eeprom_apme_mask = E1000_WUC_APME; - if (eeprom_data & E1000_WUC_PHY_WAKE) + if ((hw->mac.type > e1000_ich10lan) && + (eeprom_data & E1000_WUC_PHY_WAKE)) adapter->flags2 |= FLAG2_HAS_PHY_WAKEUP; } else if (adapter->flags & FLAG_APME_IN_CTRL3) { if (adapter->flags & FLAG_APME_CHECK_PORT_B && @@ -6164,7 +6196,7 @@ static DEFINE_PCI_DEVICE_TABLE(e1000_pci_tbl) = { }; MODULE_DEVICE_TABLE(pci, e1000_pci_tbl); -#ifdef CONFIG_PM_OPS +#ifdef CONFIG_PM static const struct dev_pm_ops e1000_pm_ops = { SET_SYSTEM_SLEEP_PM_OPS(e1000_suspend, e1000_resume) SET_RUNTIME_PM_OPS(e1000_runtime_suspend, @@ -6178,7 +6210,7 @@ static struct pci_driver e1000_driver = { .id_table = e1000_pci_tbl, .probe = e1000_probe, .remove = __devexit_p(e1000_remove), -#ifdef CONFIG_PM_OPS +#ifdef CONFIG_PM .driver.pm = &e1000_pm_ops, #endif .shutdown = e1000_shutdown, diff --git a/drivers/net/enc28j60.c b/drivers/net/enc28j60.c index 112c5aa9af7f..907b05a1c659 100644 --- a/drivers/net/enc28j60.c +++ b/drivers/net/enc28j60.c @@ -812,7 +812,7 @@ static void enc28j60_read_tsv(struct enc28j60_net *priv, u8 tsv[TSV_SIZE]) if (netif_msg_hw(priv)) printk(KERN_DEBUG DRV_NAME ": reading TSV at addr:0x%04x\n", endptr + 1); - enc28j60_mem_read(priv, endptr + 1, sizeof(tsv), tsv); + enc28j60_mem_read(priv, endptr + 1, TSV_SIZE, tsv); } static void enc28j60_dump_tsv(struct enc28j60_net *priv, const char *msg, diff --git a/drivers/net/ethoc.c b/drivers/net/ethoc.c index b79d7e1555d5..db0290f05bdf 100644 --- a/drivers/net/ethoc.c +++ b/drivers/net/ethoc.c @@ -1163,15 +1163,11 @@ static int ethoc_resume(struct platform_device *pdev) # define ethoc_resume NULL #endif -#ifdef CONFIG_OF static struct of_device_id ethoc_match[] = { - { - .compatible = "opencores,ethoc", - }, + { .compatible = "opencores,ethoc", }, {}, }; MODULE_DEVICE_TABLE(of, ethoc_match); -#endif static struct platform_driver ethoc_driver = { .probe = ethoc_probe, @@ -1181,9 +1177,7 @@ static struct platform_driver ethoc_driver = { .driver = { .name = "ethoc", .owner = THIS_MODULE, -#ifdef CONFIG_OF .of_match_table = ethoc_match, -#endif }, }; diff --git a/drivers/net/fec.c b/drivers/net/fec.c index 2a71373719ae..cd0282d5d40f 100644 --- a/drivers/net/fec.c +++ b/drivers/net/fec.c @@ -74,7 +74,8 @@ static struct platform_device_id fec_devtype[] = { }, { .name = "imx28-fec", .driver_data = FEC_QUIRK_ENET_MAC | FEC_QUIRK_SWAP_FRAME, - } + }, + { } }; static unsigned char macaddr[ETH_ALEN]; diff --git a/drivers/net/forcedeth.c b/drivers/net/forcedeth.c index af09296ef0dd..9c0b1bac6af6 100644 --- a/drivers/net/forcedeth.c +++ b/drivers/net/forcedeth.c @@ -5645,6 +5645,8 @@ static int __devinit nv_probe(struct pci_dev *pci_dev, const struct pci_device_i goto out_error; } + netif_carrier_off(dev); + dev_info(&pci_dev->dev, "ifname %s, PHY OUI 0x%x @ %d, addr %pM\n", dev->name, np->phy_oui, np->phyaddr, dev->dev_addr); diff --git a/drivers/net/igbvf/vf.c b/drivers/net/igbvf/vf.c index 74486a8b009a..af3822f9ea9a 100644 --- a/drivers/net/igbvf/vf.c +++ b/drivers/net/igbvf/vf.c @@ -220,7 +220,7 @@ static u32 e1000_hash_mc_addr_vf(struct e1000_hw *hw, u8 *mc_addr) * The parameter rar_count will usually be hw->mac.rar_entry_count * unless there are workarounds that change this. **/ -void e1000_update_mc_addr_list_vf(struct e1000_hw *hw, +static void e1000_update_mc_addr_list_vf(struct e1000_hw *hw, u8 *mc_addr_list, u32 mc_addr_count, u32 rar_used_count, u32 rar_count) { diff --git a/drivers/net/ixgbe/ixgbe_common.c b/drivers/net/ixgbe/ixgbe_common.c index d5ede2df3e42..ebbda7d15254 100644 --- a/drivers/net/ixgbe/ixgbe_common.c +++ b/drivers/net/ixgbe/ixgbe_common.c @@ -1370,6 +1370,9 @@ s32 ixgbe_init_rx_addrs_generic(struct ixgbe_hw *hw) hw_dbg(hw, " New MAC Addr =%pM\n", hw->mac.addr); hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0, IXGBE_RAH_AV); + + /* clear VMDq pool/queue selection for RAR 0 */ + hw->mac.ops.clear_vmdq(hw, 0, IXGBE_CLEAR_VMDQ_ALL); } hw->addr_ctrl.overflow_promisc = 0; diff --git a/drivers/net/ixgbe/ixgbe_fcoe.c b/drivers/net/ixgbe/ixgbe_fcoe.c index 6342d4859790..c54a88274d51 100644 --- a/drivers/net/ixgbe/ixgbe_fcoe.c +++ b/drivers/net/ixgbe/ixgbe_fcoe.c @@ -159,13 +159,13 @@ int ixgbe_fcoe_ddp_get(struct net_device *netdev, u16 xid, struct scatterlist *sg; unsigned int i, j, dmacount; unsigned int len; - static const unsigned int bufflen = 4096; + static const unsigned int bufflen = IXGBE_FCBUFF_MIN; unsigned int firstoff = 0; unsigned int lastsize; unsigned int thisoff = 0; unsigned int thislen = 0; u32 fcbuff, fcdmarw, fcfltrw; - dma_addr_t addr; + dma_addr_t addr = 0; if (!netdev || !sgl) return 0; @@ -254,6 +254,24 @@ int ixgbe_fcoe_ddp_get(struct net_device *netdev, u16 xid, /* only the last buffer may have non-full bufflen */ lastsize = thisoff + thislen; + /* + * lastsize can not be buffer len. + * If it is then adding another buffer with lastsize = 1. + */ + if (lastsize == bufflen) { + if (j >= IXGBE_BUFFCNT_MAX) { + e_err(drv, "xid=%x:%d,%d,%d:addr=%llx " + "not enough user buffers. We need an extra " + "buffer because lastsize is bufflen.\n", + xid, i, j, dmacount, (u64)addr); + goto out_noddp_free; + } + + ddp->udl[j] = (u64)(fcoe->extra_ddp_buffer_dma); + j++; + lastsize = 1; + } + fcbuff = (IXGBE_FCBUFF_4KB << IXGBE_FCBUFF_BUFFSIZE_SHIFT); fcbuff |= ((j & 0xff) << IXGBE_FCBUFF_BUFFCNT_SHIFT); fcbuff |= (firstoff << IXGBE_FCBUFF_OFFSET_SHIFT); @@ -532,6 +550,24 @@ void ixgbe_configure_fcoe(struct ixgbe_adapter *adapter) e_err(drv, "failed to allocated FCoE DDP pool\n"); spin_lock_init(&fcoe->lock); + + /* Extra buffer to be shared by all DDPs for HW work around */ + fcoe->extra_ddp_buffer = kmalloc(IXGBE_FCBUFF_MIN, GFP_ATOMIC); + if (fcoe->extra_ddp_buffer == NULL) { + e_err(drv, "failed to allocated extra DDP buffer\n"); + goto out_extra_ddp_buffer_alloc; + } + + fcoe->extra_ddp_buffer_dma = + dma_map_single(&adapter->pdev->dev, + fcoe->extra_ddp_buffer, + IXGBE_FCBUFF_MIN, + DMA_FROM_DEVICE); + if (dma_mapping_error(&adapter->pdev->dev, + fcoe->extra_ddp_buffer_dma)) { + e_err(drv, "failed to map extra DDP buffer\n"); + goto out_extra_ddp_buffer_dma; + } } /* Enable L2 eth type filter for FCoE */ @@ -581,6 +617,14 @@ void ixgbe_configure_fcoe(struct ixgbe_adapter *adapter) } } #endif + + return; + +out_extra_ddp_buffer_dma: + kfree(fcoe->extra_ddp_buffer); +out_extra_ddp_buffer_alloc: + pci_pool_destroy(fcoe->pool); + fcoe->pool = NULL; } /** @@ -600,6 +644,11 @@ void ixgbe_cleanup_fcoe(struct ixgbe_adapter *adapter) if (fcoe->pool) { for (i = 0; i < IXGBE_FCOE_DDP_MAX; i++) ixgbe_fcoe_ddp_put(adapter->netdev, i); + dma_unmap_single(&adapter->pdev->dev, + fcoe->extra_ddp_buffer_dma, + IXGBE_FCBUFF_MIN, + DMA_FROM_DEVICE); + kfree(fcoe->extra_ddp_buffer); pci_pool_destroy(fcoe->pool); fcoe->pool = NULL; } diff --git a/drivers/net/ixgbe/ixgbe_fcoe.h b/drivers/net/ixgbe/ixgbe_fcoe.h index 4bc2c551c8db..65cc8fb14fe7 100644 --- a/drivers/net/ixgbe/ixgbe_fcoe.h +++ b/drivers/net/ixgbe/ixgbe_fcoe.h @@ -70,6 +70,8 @@ struct ixgbe_fcoe { spinlock_t lock; struct pci_pool *pool; struct ixgbe_fcoe_ddp ddp[IXGBE_FCOE_DDP_MAX]; + unsigned char *extra_ddp_buffer; + dma_addr_t extra_ddp_buffer_dma; }; #endif /* _IXGBE_FCOE_H */ diff --git a/drivers/net/ixgbe/ixgbe_main.c b/drivers/net/ixgbe/ixgbe_main.c index 602078b84892..30f9ccfb4f87 100644 --- a/drivers/net/ixgbe/ixgbe_main.c +++ b/drivers/net/ixgbe/ixgbe_main.c @@ -52,7 +52,7 @@ char ixgbe_driver_name[] = "ixgbe"; static const char ixgbe_driver_string[] = "Intel(R) 10 Gigabit PCI Express Network Driver"; -#define DRV_VERSION "3.0.12-k2" +#define DRV_VERSION "3.2.9-k2" const char ixgbe_driver_version[] = DRV_VERSION; static char ixgbe_copyright[] = "Copyright (c) 1999-2010 Intel Corporation."; @@ -3176,9 +3176,16 @@ static void ixgbe_set_rx_buffer_len(struct ixgbe_adapter *adapter) u32 mhadd, hlreg0; /* Decide whether to use packet split mode or not */ + /* On by default */ + adapter->flags |= IXGBE_FLAG_RX_PS_ENABLED; + /* Do not use packet split if we're in SR-IOV Mode */ - if (!adapter->num_vfs) - adapter->flags |= IXGBE_FLAG_RX_PS_ENABLED; + if (adapter->num_vfs) + adapter->flags &= ~IXGBE_FLAG_RX_PS_ENABLED; + + /* Disable packet split due to 82599 erratum #45 */ + if (hw->mac.type == ixgbe_mac_82599EB) + adapter->flags &= ~IXGBE_FLAG_RX_PS_ENABLED; /* Set the RX buffer length according to the mode */ if (adapter->flags & IXGBE_FLAG_RX_PS_ENABLED) { @@ -3721,7 +3728,8 @@ static void ixgbe_sfp_link_config(struct ixgbe_adapter *adapter) * We need to try and force an autonegotiation * session, then bring up link. */ - hw->mac.ops.setup_sfp(hw); + if (hw->mac.ops.setup_sfp) + hw->mac.ops.setup_sfp(hw); if (!(adapter->flags & IXGBE_FLAG_IN_SFP_LINK_TASK)) schedule_work(&adapter->multispeed_fiber_task); } else { @@ -4863,16 +4871,13 @@ static int ixgbe_alloc_q_vectors(struct ixgbe_adapter *adapter) { int q_idx, num_q_vectors; struct ixgbe_q_vector *q_vector; - int napi_vectors; int (*poll)(struct napi_struct *, int); if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) { num_q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS; - napi_vectors = adapter->num_rx_queues; poll = &ixgbe_clean_rxtx_many; } else { num_q_vectors = 1; - napi_vectors = 1; poll = &ixgbe_poll; } @@ -5964,7 +5969,8 @@ static void ixgbe_sfp_config_module_task(struct work_struct *work) unregister_netdev(adapter->netdev); return; } - hw->mac.ops.setup_sfp(hw); + if (hw->mac.ops.setup_sfp) + hw->mac.ops.setup_sfp(hw); if (!(adapter->flags & IXGBE_FLAG_IN_SFP_LINK_TASK)) /* This will also work for DA Twinax connections */ diff --git a/drivers/net/ixgbe/ixgbe_sriov.c b/drivers/net/ixgbe/ixgbe_sriov.c index 47b15738b009..187b3a16ec1f 100644 --- a/drivers/net/ixgbe/ixgbe_sriov.c +++ b/drivers/net/ixgbe/ixgbe_sriov.c @@ -110,12 +110,10 @@ static int ixgbe_set_vf_vlan(struct ixgbe_adapter *adapter, int add, int vid, return adapter->hw.mac.ops.set_vfta(&adapter->hw, vid, vf, (bool)add); } - static void ixgbe_set_vmolr(struct ixgbe_hw *hw, u32 vf, bool aupe) { u32 vmolr = IXGBE_READ_REG(hw, IXGBE_VMOLR(vf)); vmolr |= (IXGBE_VMOLR_ROMPE | - IXGBE_VMOLR_ROPE | IXGBE_VMOLR_BAM); if (aupe) vmolr |= IXGBE_VMOLR_AUPE; diff --git a/drivers/net/ixgbe/ixgbe_x540.c b/drivers/net/ixgbe/ixgbe_x540.c index 3a8923993ce3..f2518b01067d 100644 --- a/drivers/net/ixgbe/ixgbe_x540.c +++ b/drivers/net/ixgbe/ixgbe_x540.c @@ -133,17 +133,17 @@ static s32 ixgbe_reset_hw_X540(struct ixgbe_hw *hw) } ctrl = IXGBE_READ_REG(hw, IXGBE_CTRL); - IXGBE_WRITE_REG(hw, IXGBE_CTRL, (ctrl | IXGBE_CTRL_RST)); + IXGBE_WRITE_REG(hw, IXGBE_CTRL, (ctrl | reset_bit)); IXGBE_WRITE_FLUSH(hw); /* Poll for reset bit to self-clear indicating reset is complete */ for (i = 0; i < 10; i++) { udelay(1); ctrl = IXGBE_READ_REG(hw, IXGBE_CTRL); - if (!(ctrl & IXGBE_CTRL_RST)) + if (!(ctrl & reset_bit)) break; } - if (ctrl & IXGBE_CTRL_RST) { + if (ctrl & reset_bit) { status = IXGBE_ERR_RESET_FAILED; hw_dbg(hw, "Reset polling failed to complete.\n"); } diff --git a/drivers/net/macb.c b/drivers/net/macb.c index f69e73e2191e..79ccb54ab00c 100644 --- a/drivers/net/macb.c +++ b/drivers/net/macb.c @@ -260,7 +260,7 @@ static int macb_mii_init(struct macb *bp) for (i = 0; i < PHY_MAX_ADDR; i++) bp->mii_bus->irq[i] = PHY_POLL; - platform_set_drvdata(bp->dev, bp->mii_bus); + dev_set_drvdata(&bp->dev->dev, bp->mii_bus); if (mdiobus_register(bp->mii_bus)) goto err_out_free_mdio_irq; diff --git a/drivers/net/macvtap.c b/drivers/net/macvtap.c index 5933621ac3ff..fc27a9926d9e 100644 --- a/drivers/net/macvtap.c +++ b/drivers/net/macvtap.c @@ -528,8 +528,9 @@ static ssize_t macvtap_get_user(struct macvtap_queue *q, vnet_hdr_len = q->vnet_hdr_sz; err = -EINVAL; - if ((len -= vnet_hdr_len) < 0) + if (len < vnet_hdr_len) goto err; + len -= vnet_hdr_len; err = memcpy_fromiovecend((void *)&vnet_hdr, iv, 0, sizeof(vnet_hdr)); diff --git a/drivers/net/mlx4/main.c b/drivers/net/mlx4/main.c index 4ffdc18fcb8a..2765a3ce9c24 100644 --- a/drivers/net/mlx4/main.c +++ b/drivers/net/mlx4/main.c @@ -1286,6 +1286,21 @@ static DEFINE_PCI_DEVICE_TABLE(mlx4_pci_table) = { { PCI_VDEVICE(MELLANOX, 0x6764) }, /* MT26468 ConnectX EN 10GigE PCIe gen2*/ { PCI_VDEVICE(MELLANOX, 0x6746) }, /* MT26438 ConnectX EN 40GigE PCIe gen2 5GT/s */ { PCI_VDEVICE(MELLANOX, 0x676e) }, /* MT26478 ConnectX2 40GigE PCIe gen2 */ + { PCI_VDEVICE(MELLANOX, 0x1002) }, /* MT25400 Family [ConnectX-2 Virtual Function] */ + { PCI_VDEVICE(MELLANOX, 0x1003) }, /* MT27500 Family [ConnectX-3] */ + { PCI_VDEVICE(MELLANOX, 0x1004) }, /* MT27500 Family [ConnectX-3 Virtual Function] */ + { PCI_VDEVICE(MELLANOX, 0x1005) }, /* MT27510 Family */ + { PCI_VDEVICE(MELLANOX, 0x1006) }, /* MT27511 Family */ + { PCI_VDEVICE(MELLANOX, 0x1007) }, /* MT27520 Family */ + { PCI_VDEVICE(MELLANOX, 0x1008) }, /* MT27521 Family */ + { PCI_VDEVICE(MELLANOX, 0x1009) }, /* MT27530 Family */ + { PCI_VDEVICE(MELLANOX, 0x100a) }, /* MT27531 Family */ + { PCI_VDEVICE(MELLANOX, 0x100b) }, /* MT27540 Family */ + { PCI_VDEVICE(MELLANOX, 0x100c) }, /* MT27541 Family */ + { PCI_VDEVICE(MELLANOX, 0x100d) }, /* MT27550 Family */ + { PCI_VDEVICE(MELLANOX, 0x100e) }, /* MT27551 Family */ + { PCI_VDEVICE(MELLANOX, 0x100f) }, /* MT27560 Family */ + { PCI_VDEVICE(MELLANOX, 0x1010) }, /* MT27561 Family */ { 0, } }; diff --git a/drivers/net/niu.c b/drivers/net/niu.c index 2541321bad82..9fb59d3f9c92 100644 --- a/drivers/net/niu.c +++ b/drivers/net/niu.c @@ -4489,6 +4489,9 @@ static int niu_alloc_channels(struct niu *np) { struct niu_parent *parent = np->parent; int first_rx_channel, first_tx_channel; + int num_rx_rings, num_tx_rings; + struct rx_ring_info *rx_rings; + struct tx_ring_info *tx_rings; int i, port, err; port = np->port; @@ -4498,18 +4501,21 @@ static int niu_alloc_channels(struct niu *np) first_tx_channel += parent->txchan_per_port[i]; } - np->num_rx_rings = parent->rxchan_per_port[port]; - np->num_tx_rings = parent->txchan_per_port[port]; + num_rx_rings = parent->rxchan_per_port[port]; + num_tx_rings = parent->txchan_per_port[port]; - netif_set_real_num_rx_queues(np->dev, np->num_rx_rings); - netif_set_real_num_tx_queues(np->dev, np->num_tx_rings); - - np->rx_rings = kcalloc(np->num_rx_rings, sizeof(struct rx_ring_info), - GFP_KERNEL); + rx_rings = kcalloc(num_rx_rings, sizeof(struct rx_ring_info), + GFP_KERNEL); err = -ENOMEM; - if (!np->rx_rings) + if (!rx_rings) goto out_err; + np->num_rx_rings = num_rx_rings; + smp_wmb(); + np->rx_rings = rx_rings; + + netif_set_real_num_rx_queues(np->dev, num_rx_rings); + for (i = 0; i < np->num_rx_rings; i++) { struct rx_ring_info *rp = &np->rx_rings[i]; @@ -4538,12 +4544,18 @@ static int niu_alloc_channels(struct niu *np) return err; } - np->tx_rings = kcalloc(np->num_tx_rings, sizeof(struct tx_ring_info), - GFP_KERNEL); + tx_rings = kcalloc(num_tx_rings, sizeof(struct tx_ring_info), + GFP_KERNEL); err = -ENOMEM; - if (!np->tx_rings) + if (!tx_rings) goto out_err; + np->num_tx_rings = num_tx_rings; + smp_wmb(); + np->tx_rings = tx_rings; + + netif_set_real_num_tx_queues(np->dev, num_tx_rings); + for (i = 0; i < np->num_tx_rings; i++) { struct tx_ring_info *rp = &np->tx_rings[i]; @@ -6246,11 +6258,17 @@ static void niu_sync_mac_stats(struct niu *np) static void niu_get_rx_stats(struct niu *np) { unsigned long pkts, dropped, errors, bytes; + struct rx_ring_info *rx_rings; int i; pkts = dropped = errors = bytes = 0; + + rx_rings = ACCESS_ONCE(np->rx_rings); + if (!rx_rings) + goto no_rings; + for (i = 0; i < np->num_rx_rings; i++) { - struct rx_ring_info *rp = &np->rx_rings[i]; + struct rx_ring_info *rp = &rx_rings[i]; niu_sync_rx_discard_stats(np, rp, 0); @@ -6259,6 +6277,8 @@ static void niu_get_rx_stats(struct niu *np) dropped += rp->rx_dropped; errors += rp->rx_errors; } + +no_rings: np->dev->stats.rx_packets = pkts; np->dev->stats.rx_bytes = bytes; np->dev->stats.rx_dropped = dropped; @@ -6268,16 +6288,24 @@ static void niu_get_rx_stats(struct niu *np) static void niu_get_tx_stats(struct niu *np) { unsigned long pkts, errors, bytes; + struct tx_ring_info *tx_rings; int i; pkts = errors = bytes = 0; + + tx_rings = ACCESS_ONCE(np->tx_rings); + if (!tx_rings) + goto no_rings; + for (i = 0; i < np->num_tx_rings; i++) { - struct tx_ring_info *rp = &np->tx_rings[i]; + struct tx_ring_info *rp = &tx_rings[i]; pkts += rp->tx_packets; bytes += rp->tx_bytes; errors += rp->tx_errors; } + +no_rings: np->dev->stats.tx_packets = pkts; np->dev->stats.tx_bytes = bytes; np->dev->stats.tx_errors = errors; @@ -6287,9 +6315,10 @@ static struct net_device_stats *niu_get_stats(struct net_device *dev) { struct niu *np = netdev_priv(dev); - niu_get_rx_stats(np); - niu_get_tx_stats(np); - + if (netif_running(dev)) { + niu_get_rx_stats(np); + niu_get_tx_stats(np); + } return &dev->stats; } diff --git a/drivers/net/pch_gbe/pch_gbe.h b/drivers/net/pch_gbe/pch_gbe.h index a0c26a99520f..e1e33c80fb25 100644 --- a/drivers/net/pch_gbe/pch_gbe.h +++ b/drivers/net/pch_gbe/pch_gbe.h @@ -73,7 +73,7 @@ struct pch_gbe_regs { struct pch_gbe_regs_mac_adr mac_adr[16]; u32 ADDR_MASK; u32 MIIM; - u32 reserve2; + u32 MAC_ADDR_LOAD; u32 RGMII_ST; u32 RGMII_CTRL; u32 reserve3[3]; diff --git a/drivers/net/pch_gbe/pch_gbe_main.c b/drivers/net/pch_gbe/pch_gbe_main.c index 1bf12339441b..8c66e22c3a0a 100644 --- a/drivers/net/pch_gbe/pch_gbe_main.c +++ b/drivers/net/pch_gbe/pch_gbe_main.c @@ -29,6 +29,7 @@ const char pch_driver_version[] = DRV_VERSION; #define PCH_GBE_SHORT_PKT 64 #define DSC_INIT16 0xC000 #define PCH_GBE_DMA_ALIGN 0 +#define PCH_GBE_DMA_PADDING 2 #define PCH_GBE_WATCHDOG_PERIOD (1 * HZ) /* watchdog time */ #define PCH_GBE_COPYBREAK_DEFAULT 256 #define PCH_GBE_PCI_BAR 1 @@ -88,6 +89,12 @@ static unsigned int copybreak __read_mostly = PCH_GBE_COPYBREAK_DEFAULT; static int pch_gbe_mdio_read(struct net_device *netdev, int addr, int reg); static void pch_gbe_mdio_write(struct net_device *netdev, int addr, int reg, int data); + +inline void pch_gbe_mac_load_mac_addr(struct pch_gbe_hw *hw) +{ + iowrite32(0x01, &hw->reg->MAC_ADDR_LOAD); +} + /** * pch_gbe_mac_read_mac_addr - Read MAC address * @hw: Pointer to the HW structure @@ -519,7 +526,9 @@ static void pch_gbe_reset_task(struct work_struct *work) struct pch_gbe_adapter *adapter; adapter = container_of(work, struct pch_gbe_adapter, reset_task); + rtnl_lock(); pch_gbe_reinit_locked(adapter); + rtnl_unlock(); } /** @@ -528,14 +537,8 @@ static void pch_gbe_reset_task(struct work_struct *work) */ void pch_gbe_reinit_locked(struct pch_gbe_adapter *adapter) { - struct net_device *netdev = adapter->netdev; - - rtnl_lock(); - if (netif_running(netdev)) { - pch_gbe_down(adapter); - pch_gbe_up(adapter); - } - rtnl_unlock(); + pch_gbe_down(adapter); + pch_gbe_up(adapter); } /** @@ -1369,16 +1372,13 @@ pch_gbe_clean_rx(struct pch_gbe_adapter *adapter, struct pch_gbe_buffer *buffer_info; struct pch_gbe_rx_desc *rx_desc; u32 length; - unsigned char tmp_packet[ETH_HLEN]; unsigned int i; unsigned int cleaned_count = 0; bool cleaned = false; - struct sk_buff *skb; + struct sk_buff *skb, *new_skb; u8 dma_status; u16 gbec_status; u32 tcp_ip_status; - u8 skb_copy_flag = 0; - u8 skb_padding_flag = 0; i = rx_ring->next_to_clean; @@ -1422,55 +1422,70 @@ pch_gbe_clean_rx(struct pch_gbe_adapter *adapter, pr_err("Receive CRC Error\n"); } else { /* get receive length */ - /* length convert[-3], padding[-2] */ - length = (rx_desc->rx_words_eob) - 3 - 2; + /* length convert[-3] */ + length = (rx_desc->rx_words_eob) - 3; /* Decide the data conversion method */ if (!adapter->rx_csum) { /* [Header:14][payload] */ - skb_padding_flag = 0; - skb_copy_flag = 1; + if (NET_IP_ALIGN) { + /* Because alignment differs, + * the new_skb is newly allocated, + * and data is copied to new_skb.*/ + new_skb = netdev_alloc_skb(netdev, + length + NET_IP_ALIGN); + if (!new_skb) { + /* dorrop error */ + pr_err("New skb allocation " + "Error\n"); + goto dorrop; + } + skb_reserve(new_skb, NET_IP_ALIGN); + memcpy(new_skb->data, skb->data, + length); + skb = new_skb; + } else { + /* DMA buffer is used as SKB as it is.*/ + buffer_info->skb = NULL; + } } else { /* [Header:14][padding:2][payload] */ - skb_padding_flag = 1; - if (length < copybreak) - skb_copy_flag = 1; - else - skb_copy_flag = 0; - } - - /* Data conversion */ - if (skb_copy_flag) { /* recycle skb */ - struct sk_buff *new_skb; - new_skb = - netdev_alloc_skb(netdev, - length + NET_IP_ALIGN); - if (new_skb) { - if (!skb_padding_flag) { - skb_reserve(new_skb, - NET_IP_ALIGN); + /* The length includes padding length */ + length = length - PCH_GBE_DMA_PADDING; + if ((length < copybreak) || + (NET_IP_ALIGN != PCH_GBE_DMA_PADDING)) { + /* Because alignment differs, + * the new_skb is newly allocated, + * and data is copied to new_skb. + * Padding data is deleted + * at the time of a copy.*/ + new_skb = netdev_alloc_skb(netdev, + length + NET_IP_ALIGN); + if (!new_skb) { + /* dorrop error */ + pr_err("New skb allocation " + "Error\n"); + goto dorrop; } + skb_reserve(new_skb, NET_IP_ALIGN); memcpy(new_skb->data, skb->data, - length); - /* save the skb - * in buffer_info as good */ + ETH_HLEN); + memcpy(&new_skb->data[ETH_HLEN], + &skb->data[ETH_HLEN + + PCH_GBE_DMA_PADDING], + length - ETH_HLEN); skb = new_skb; - } else if (!skb_padding_flag) { - /* dorrop error */ - pr_err("New skb allocation Error\n"); - goto dorrop; + } else { + /* Padding data is deleted + * by moving header data.*/ + memmove(&skb->data[PCH_GBE_DMA_PADDING], + &skb->data[0], ETH_HLEN); + skb_reserve(skb, NET_IP_ALIGN); + buffer_info->skb = NULL; } - } else { - buffer_info->skb = NULL; - } - if (skb_padding_flag) { - memcpy(&tmp_packet[0], &skb->data[0], ETH_HLEN); - memcpy(&skb->data[NET_IP_ALIGN], &tmp_packet[0], - ETH_HLEN); - skb_reserve(skb, NET_IP_ALIGN); - } - + /* The length includes FCS length */ + length = length - ETH_FCS_LEN; /* update status of driver */ adapter->stats.rx_bytes += length; adapter->stats.rx_packets++; @@ -2322,6 +2337,7 @@ static int pch_gbe_probe(struct pci_dev *pdev, netdev->features = NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_GRO; pch_gbe_set_ethtool_ops(netdev); + pch_gbe_mac_load_mac_addr(&adapter->hw); pch_gbe_mac_reset_hw(&adapter->hw); /* setup the private structure */ @@ -2430,7 +2446,7 @@ static struct pci_driver pch_gbe_pcidev = { .id_table = pch_gbe_pcidev_id, .probe = pch_gbe_probe, .remove = pch_gbe_remove, -#ifdef CONFIG_PM_OPS +#ifdef CONFIG_PM .driver.pm = &pch_gbe_pm_ops, #endif .shutdown = pch_gbe_shutdown, diff --git a/drivers/net/pcmcia/axnet_cs.c b/drivers/net/pcmcia/axnet_cs.c index 1f42f6ac8551..d3cb77205863 100644 --- a/drivers/net/pcmcia/axnet_cs.c +++ b/drivers/net/pcmcia/axnet_cs.c @@ -1488,12 +1488,10 @@ static void ei_rx_overrun(struct net_device *dev) /* * Wait a full Tx time (1.2ms) + some guard time, NS says 1.6ms total. - * Early datasheets said to poll the reset bit, but now they say that - * it "is not a reliable indicator and subsequently should be ignored." - * We wait at least 10ms. + * We wait at least 2ms. */ - mdelay(10); + mdelay(2); /* * Reset RBCR[01] back to zero as per magic incantation. diff --git a/drivers/net/pcmcia/fmvj18x_cs.c b/drivers/net/pcmcia/fmvj18x_cs.c index 9226cda4d054..530ab5a10bd3 100644 --- a/drivers/net/pcmcia/fmvj18x_cs.c +++ b/drivers/net/pcmcia/fmvj18x_cs.c @@ -691,6 +691,7 @@ static struct pcmcia_device_id fmvj18x_ids[] = { PCMCIA_PFC_DEVICE_MANF_CARD(0, 0x0105, 0x0e0a), PCMCIA_PFC_DEVICE_MANF_CARD(0, 0x0032, 0x0e01), PCMCIA_PFC_DEVICE_MANF_CARD(0, 0x0032, 0x0a05), + PCMCIA_PFC_DEVICE_MANF_CARD(0, 0x0032, 0x0b05), PCMCIA_PFC_DEVICE_MANF_CARD(0, 0x0032, 0x1101), PCMCIA_DEVICE_NULL, }; diff --git a/drivers/net/r6040.c b/drivers/net/r6040.c index 27e6f6d43cac..e3ebd90ae651 100644 --- a/drivers/net/r6040.c +++ b/drivers/net/r6040.c @@ -49,8 +49,8 @@ #include <asm/processor.h> #define DRV_NAME "r6040" -#define DRV_VERSION "0.26" -#define DRV_RELDATE "30May2010" +#define DRV_VERSION "0.27" +#define DRV_RELDATE "23Feb2011" /* PHY CHIP Address */ #define PHY1_ADDR 1 /* For MAC1 */ @@ -69,6 +69,8 @@ /* MAC registers */ #define MCR0 0x00 /* Control register 0 */ +#define MCR0_PROMISC 0x0020 /* Promiscuous mode */ +#define MCR0_HASH_EN 0x0100 /* Enable multicast hash table function */ #define MCR1 0x04 /* Control register 1 */ #define MAC_RST 0x0001 /* Reset the MAC */ #define MBCR 0x08 /* Bus control */ @@ -851,77 +853,92 @@ static void r6040_multicast_list(struct net_device *dev) { struct r6040_private *lp = netdev_priv(dev); void __iomem *ioaddr = lp->base; - u16 *adrp; - u16 reg; unsigned long flags; struct netdev_hw_addr *ha; int i; + u16 *adrp; + u16 hash_table[4] = { 0 }; + + spin_lock_irqsave(&lp->lock, flags); - /* MAC Address */ + /* Keep our MAC Address */ adrp = (u16 *)dev->dev_addr; iowrite16(adrp[0], ioaddr + MID_0L); iowrite16(adrp[1], ioaddr + MID_0M); iowrite16(adrp[2], ioaddr + MID_0H); - /* Promiscous Mode */ - spin_lock_irqsave(&lp->lock, flags); - /* Clear AMCP & PROM bits */ - reg = ioread16(ioaddr) & ~0x0120; - if (dev->flags & IFF_PROMISC) { - reg |= 0x0020; - lp->mcr0 |= 0x0020; - } - /* Too many multicast addresses - * accept all traffic */ - else if ((netdev_mc_count(dev) > MCAST_MAX) || - (dev->flags & IFF_ALLMULTI)) - reg |= 0x0020; + lp->mcr0 = ioread16(ioaddr + MCR0) & ~(MCR0_PROMISC | MCR0_HASH_EN); - iowrite16(reg, ioaddr); - spin_unlock_irqrestore(&lp->lock, flags); + /* Promiscuous mode */ + if (dev->flags & IFF_PROMISC) + lp->mcr0 |= MCR0_PROMISC; - /* Build the hash table */ - if (netdev_mc_count(dev) > MCAST_MAX) { - u16 hash_table[4]; - u32 crc; + /* Enable multicast hash table function to + * receive all multicast packets. */ + else if (dev->flags & IFF_ALLMULTI) { + lp->mcr0 |= MCR0_HASH_EN; - for (i = 0; i < 4; i++) - hash_table[i] = 0; + for (i = 0; i < MCAST_MAX ; i++) { + iowrite16(0, ioaddr + MID_1L + 8 * i); + iowrite16(0, ioaddr + MID_1M + 8 * i); + iowrite16(0, ioaddr + MID_1H + 8 * i); + } + for (i = 0; i < 4; i++) + hash_table[i] = 0xffff; + } + /* Use internal multicast address registers if the number of + * multicast addresses is not greater than MCAST_MAX. */ + else if (netdev_mc_count(dev) <= MCAST_MAX) { + i = 0; netdev_for_each_mc_addr(ha, dev) { - char *addrs = ha->addr; + u16 *adrp = (u16 *) ha->addr; + iowrite16(adrp[0], ioaddr + MID_1L + 8 * i); + iowrite16(adrp[1], ioaddr + MID_1M + 8 * i); + iowrite16(adrp[2], ioaddr + MID_1H + 8 * i); + i++; + } + while (i < MCAST_MAX) { + iowrite16(0, ioaddr + MID_1L + 8 * i); + iowrite16(0, ioaddr + MID_1M + 8 * i); + iowrite16(0, ioaddr + MID_1H + 8 * i); + i++; + } + } + /* Otherwise, Enable multicast hash table function. */ + else { + u32 crc; - if (!(*addrs & 1)) - continue; + lp->mcr0 |= MCR0_HASH_EN; + + for (i = 0; i < MCAST_MAX ; i++) { + iowrite16(0, ioaddr + MID_1L + 8 * i); + iowrite16(0, ioaddr + MID_1M + 8 * i); + iowrite16(0, ioaddr + MID_1H + 8 * i); + } - crc = ether_crc_le(6, addrs); + /* Build multicast hash table */ + netdev_for_each_mc_addr(ha, dev) { + u8 *addrs = ha->addr; + + crc = ether_crc(ETH_ALEN, addrs); crc >>= 26; - hash_table[crc >> 4] |= 1 << (15 - (crc & 0xf)); + hash_table[crc >> 4] |= 1 << (crc & 0xf); } - /* Fill the MAC hash tables with their values */ + } + + iowrite16(lp->mcr0, ioaddr + MCR0); + + /* Fill the MAC hash tables with their values */ + if (lp->mcr0 && MCR0_HASH_EN) { iowrite16(hash_table[0], ioaddr + MAR0); iowrite16(hash_table[1], ioaddr + MAR1); iowrite16(hash_table[2], ioaddr + MAR2); iowrite16(hash_table[3], ioaddr + MAR3); } - /* Multicast Address 1~4 case */ - i = 0; - netdev_for_each_mc_addr(ha, dev) { - if (i >= MCAST_MAX) - break; - adrp = (u16 *) ha->addr; - iowrite16(adrp[0], ioaddr + MID_1L + 8 * i); - iowrite16(adrp[1], ioaddr + MID_1M + 8 * i); - iowrite16(adrp[2], ioaddr + MID_1H + 8 * i); - i++; - } - while (i < MCAST_MAX) { - iowrite16(0xffff, ioaddr + MID_1L + 8 * i); - iowrite16(0xffff, ioaddr + MID_1M + 8 * i); - iowrite16(0xffff, ioaddr + MID_1H + 8 * i); - i++; - } + + spin_unlock_irqrestore(&lp->lock, flags); } static void netdev_get_drvinfo(struct net_device *dev, diff --git a/drivers/net/r8169.c b/drivers/net/r8169.c index bde7d61f1930..7ffdb80adf40 100644 --- a/drivers/net/r8169.c +++ b/drivers/net/r8169.c @@ -25,6 +25,7 @@ #include <linux/dma-mapping.h> #include <linux/pm_runtime.h> #include <linux/firmware.h> +#include <linux/pci-aspm.h> #include <asm/system.h> #include <asm/io.h> @@ -617,8 +618,9 @@ static void ocp_write(struct rtl8169_private *tp, u8 mask, u16 reg, u32 data) } } -static void rtl8168_oob_notify(void __iomem *ioaddr, u8 cmd) +static void rtl8168_oob_notify(struct rtl8169_private *tp, u8 cmd) { + void __iomem *ioaddr = tp->mmio_addr; int i; RTL_W8(ERIDR, cmd); @@ -630,7 +632,7 @@ static void rtl8168_oob_notify(void __iomem *ioaddr, u8 cmd) break; } - ocp_write(ioaddr, 0x1, 0x30, 0x00000001); + ocp_write(tp, 0x1, 0x30, 0x00000001); } #define OOB_CMD_RESET 0x00 @@ -973,7 +975,8 @@ static void __rtl8169_check_link_status(struct net_device *dev, if (pm) pm_request_resume(&tp->pci_dev->dev); netif_carrier_on(dev); - netif_info(tp, ifup, dev, "link up\n"); + if (net_ratelimit()) + netif_info(tp, ifup, dev, "link up\n"); } else { netif_carrier_off(dev); netif_info(tp, ifdown, dev, "link down\n"); @@ -2867,8 +2870,11 @@ static void r8168_pll_power_down(struct rtl8169_private *tp) { void __iomem *ioaddr = tp->mmio_addr; - if (tp->mac_version == RTL_GIGA_MAC_VER_27) + if (((tp->mac_version == RTL_GIGA_MAC_VER_27) || + (tp->mac_version == RTL_GIGA_MAC_VER_28)) && + (ocp_read(tp, 0x0f, 0x0010) & 0x00008000)) { return; + } if (((tp->mac_version == RTL_GIGA_MAC_VER_23) || (tp->mac_version == RTL_GIGA_MAC_VER_24)) && @@ -2890,6 +2896,8 @@ static void r8168_pll_power_down(struct rtl8169_private *tp) switch (tp->mac_version) { case RTL_GIGA_MAC_VER_25: case RTL_GIGA_MAC_VER_26: + case RTL_GIGA_MAC_VER_27: + case RTL_GIGA_MAC_VER_28: RTL_W8(PMCH, RTL_R8(PMCH) & ~0x80); break; } @@ -2899,12 +2907,17 @@ static void r8168_pll_power_up(struct rtl8169_private *tp) { void __iomem *ioaddr = tp->mmio_addr; - if (tp->mac_version == RTL_GIGA_MAC_VER_27) + if (((tp->mac_version == RTL_GIGA_MAC_VER_27) || + (tp->mac_version == RTL_GIGA_MAC_VER_28)) && + (ocp_read(tp, 0x0f, 0x0010) & 0x00008000)) { return; + } switch (tp->mac_version) { case RTL_GIGA_MAC_VER_25: case RTL_GIGA_MAC_VER_26: + case RTL_GIGA_MAC_VER_27: + case RTL_GIGA_MAC_VER_28: RTL_W8(PMCH, RTL_R8(PMCH) | 0x80); break; } @@ -3008,6 +3021,11 @@ rtl8169_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) mii->reg_num_mask = 0x1f; mii->supports_gmii = !!(cfg->features & RTL_FEATURE_GMII); + /* disable ASPM completely as that cause random device stop working + * problems as well as full system hangs for some PCIe devices users */ + pci_disable_link_state(pdev, PCIE_LINK_STATE_L0S | PCIE_LINK_STATE_L1 | + PCIE_LINK_STATE_CLKPM); + /* enable device (incl. PCI PM wakeup and hotplug setup) */ rc = pci_enable_device(pdev); if (rc < 0) { @@ -3041,7 +3059,7 @@ rtl8169_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) goto err_out_mwi_2; } - tp->cp_cmd = PCIMulRW | RxChkSum; + tp->cp_cmd = RxChkSum; if ((sizeof(dma_addr_t) > 4) && !pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) && use_dac) { @@ -3189,6 +3207,8 @@ rtl8169_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) if (pci_dev_run_wake(pdev)) pm_runtime_put_noidle(&pdev->dev); + netif_carrier_off(dev); + out: return rc; @@ -3315,7 +3335,8 @@ static void rtl8169_hw_reset(struct rtl8169_private *tp) /* Disable interrupts */ rtl8169_irq_mask_and_ack(ioaddr); - if (tp->mac_version == RTL_GIGA_MAC_VER_28) { + if (tp->mac_version == RTL_GIGA_MAC_VER_27 || + tp->mac_version == RTL_GIGA_MAC_VER_28) { while (RTL_R8(TxPoll) & NPQ) udelay(20); @@ -3757,7 +3778,8 @@ static void rtl_hw_start_8168(struct net_device *dev) RTL_W16(IntrMitigate, 0x5151); /* Work around for RxFIFO overflow. */ - if (tp->mac_version == RTL_GIGA_MAC_VER_11) { + if (tp->mac_version == RTL_GIGA_MAC_VER_11 || + tp->mac_version == RTL_GIGA_MAC_VER_22) { tp->intr_event |= RxFIFOOver | PCSTimeout; tp->intr_event &= ~RxOverflow; } @@ -3843,8 +3865,7 @@ static void rtl_hw_start_8168(struct net_device *dev) Cxpl_dbg_sel | \ ASF | \ PktCntrDisable | \ - PCIDAC | \ - PCIMulRW) + Mac_dbgo_sel) static void rtl_hw_start_8102e_1(void __iomem *ioaddr, struct pci_dev *pdev) { @@ -3874,8 +3895,6 @@ static void rtl_hw_start_8102e_1(void __iomem *ioaddr, struct pci_dev *pdev) if ((cfg1 & LEDS0) && (cfg1 & LEDS1)) RTL_W8(Config1, cfg1 & ~LEDS0); - RTL_W16(CPlusCmd, RTL_R16(CPlusCmd) & ~R810X_CPCMD_QUIRK_MASK); - rtl_ephy_init(ioaddr, e_info_8102e_1, ARRAY_SIZE(e_info_8102e_1)); } @@ -3887,8 +3906,6 @@ static void rtl_hw_start_8102e_2(void __iomem *ioaddr, struct pci_dev *pdev) RTL_W8(Config1, MEMMAP | IOMAP | VPD | PMEnable); RTL_W8(Config3, RTL_R8(Config3) & ~Beacon_en); - - RTL_W16(CPlusCmd, RTL_R16(CPlusCmd) & ~R810X_CPCMD_QUIRK_MASK); } static void rtl_hw_start_8102e_3(void __iomem *ioaddr, struct pci_dev *pdev) @@ -3914,6 +3931,8 @@ static void rtl_hw_start_8101(struct net_device *dev) } } + RTL_W8(Cfg9346, Cfg9346_Unlock); + switch (tp->mac_version) { case RTL_GIGA_MAC_VER_07: rtl_hw_start_8102e_1(ioaddr, pdev); @@ -3928,14 +3947,13 @@ static void rtl_hw_start_8101(struct net_device *dev) break; } - RTL_W8(Cfg9346, Cfg9346_Unlock); + RTL_W8(Cfg9346, Cfg9346_Lock); RTL_W8(MaxTxPacketSize, TxPacketMax); rtl_set_rx_max_size(ioaddr, rx_buf_sz); - tp->cp_cmd |= rtl_rw_cpluscmd(ioaddr) | PCIMulRW; - + tp->cp_cmd &= ~R810X_CPCMD_QUIRK_MASK; RTL_W16(CPlusCmd, tp->cp_cmd); RTL_W16(IntrMitigate, 0x0000); @@ -3945,14 +3963,10 @@ static void rtl_hw_start_8101(struct net_device *dev) RTL_W8(ChipCmd, CmdTxEnb | CmdRxEnb); rtl_set_rx_tx_config_registers(tp); - RTL_W8(Cfg9346, Cfg9346_Lock); - RTL_R8(IntrMask); rtl_set_rx_mode(dev); - RTL_W8(ChipCmd, CmdTxEnb | CmdRxEnb); - RTL_W16(MultiIntr, RTL_R16(MultiIntr) & 0xf000); RTL_W16(IntrMask, tp->intr_event); @@ -4639,12 +4653,33 @@ static irqreturn_t rtl8169_interrupt(int irq, void *dev_instance) break; } - /* Work around for rx fifo overflow */ - if (unlikely(status & RxFIFOOver) && - (tp->mac_version == RTL_GIGA_MAC_VER_11)) { - netif_stop_queue(dev); - rtl8169_tx_timeout(dev); - break; + if (unlikely(status & RxFIFOOver)) { + switch (tp->mac_version) { + /* Work around for rx fifo overflow */ + case RTL_GIGA_MAC_VER_11: + case RTL_GIGA_MAC_VER_22: + case RTL_GIGA_MAC_VER_26: + netif_stop_queue(dev); + rtl8169_tx_timeout(dev); + goto done; + /* Testers needed. */ + case RTL_GIGA_MAC_VER_17: + case RTL_GIGA_MAC_VER_19: + case RTL_GIGA_MAC_VER_20: + case RTL_GIGA_MAC_VER_21: + case RTL_GIGA_MAC_VER_23: + case RTL_GIGA_MAC_VER_24: + case RTL_GIGA_MAC_VER_27: + case RTL_GIGA_MAC_VER_28: + /* Experimental science. Pktgen proof. */ + case RTL_GIGA_MAC_VER_12: + case RTL_GIGA_MAC_VER_25: + if (status == RxFIFOOver) + goto done; + break; + default: + break; + } } if (unlikely(status & SYSErr)) { @@ -4680,7 +4715,7 @@ static irqreturn_t rtl8169_interrupt(int irq, void *dev_instance) (status & RxFIFOOver) ? (status | RxOverflow) : status); status = RTL_R16(IntrStatus); } - +done: return IRQ_RETVAL(handled); } diff --git a/drivers/net/sfc/ethtool.c b/drivers/net/sfc/ethtool.c index 0e8bb19ed60d..ca886d98bdc7 100644 --- a/drivers/net/sfc/ethtool.c +++ b/drivers/net/sfc/ethtool.c @@ -569,9 +569,14 @@ static void efx_ethtool_self_test(struct net_device *net_dev, struct ethtool_test *test, u64 *data) { struct efx_nic *efx = netdev_priv(net_dev); - struct efx_self_tests efx_tests; + struct efx_self_tests *efx_tests; int already_up; - int rc; + int rc = -ENOMEM; + + efx_tests = kzalloc(sizeof(*efx_tests), GFP_KERNEL); + if (!efx_tests) + goto fail; + ASSERT_RTNL(); if (efx->state != STATE_RUNNING) { @@ -589,13 +594,11 @@ static void efx_ethtool_self_test(struct net_device *net_dev, if (rc) { netif_err(efx, drv, efx->net_dev, "failed opening device.\n"); - goto fail2; + goto fail1; } } - memset(&efx_tests, 0, sizeof(efx_tests)); - - rc = efx_selftest(efx, &efx_tests, test->flags); + rc = efx_selftest(efx, efx_tests, test->flags); if (!already_up) dev_close(efx->net_dev); @@ -604,10 +607,11 @@ static void efx_ethtool_self_test(struct net_device *net_dev, rc == 0 ? "passed" : "failed", (test->flags & ETH_TEST_FL_OFFLINE) ? "off" : "on"); - fail2: - fail1: +fail1: /* Fill ethtool results structures */ - efx_ethtool_fill_self_tests(efx, &efx_tests, NULL, data); + efx_ethtool_fill_self_tests(efx, efx_tests, NULL, data); + kfree(efx_tests); +fail: if (rc) test->flags |= ETH_TEST_FL_FAILED; } diff --git a/drivers/net/sis900.c b/drivers/net/sis900.c index 5976d1d51df1..640e368ebeee 100644 --- a/drivers/net/sis900.c +++ b/drivers/net/sis900.c @@ -1777,6 +1777,7 @@ static int sis900_rx(struct net_device *net_dev) "cur_rx:%4.4d, dirty_rx:%4.4d\n", net_dev->name, sis_priv->cur_rx, sis_priv->dirty_rx); + dev_kfree_skb(skb); break; } diff --git a/drivers/net/skge.c b/drivers/net/skge.c index 42daf98ba736..35b28f42d208 100644 --- a/drivers/net/skge.c +++ b/drivers/net/skge.c @@ -3856,9 +3856,6 @@ static struct net_device *skge_devinit(struct skge_hw *hw, int port, memcpy_fromio(dev->dev_addr, hw->regs + B2_MAC_1 + port*8, ETH_ALEN); memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len); - /* device is off until link detection */ - netif_carrier_off(dev); - return dev; } diff --git a/drivers/net/smsc911x.c b/drivers/net/smsc911x.c index 64bfdae5956f..d70bde95460b 100644 --- a/drivers/net/smsc911x.c +++ b/drivers/net/smsc911x.c @@ -1178,6 +1178,11 @@ static int smsc911x_open(struct net_device *dev) smsc911x_reg_write(pdata, HW_CFG, 0x00050000); smsc911x_reg_write(pdata, AFC_CFG, 0x006E3740); + /* Increase the legal frame size of VLAN tagged frames to 1522 bytes */ + spin_lock_irq(&pdata->mac_lock); + smsc911x_mac_write(pdata, VLAN1, ETH_P_8021Q); + spin_unlock_irq(&pdata->mac_lock); + /* Make sure EEPROM has finished loading before setting GPIO_CFG */ timeout = 50; while ((smsc911x_reg_read(pdata, E2P_CMD) & E2P_CMD_EPC_BUSY_) && diff --git a/drivers/net/stmmac/stmmac_main.c b/drivers/net/stmmac/stmmac_main.c index 34a0af3837f9..0e5f03135b50 100644 --- a/drivers/net/stmmac/stmmac_main.c +++ b/drivers/net/stmmac/stmmac_main.c @@ -1560,8 +1560,10 @@ static int stmmac_mac_device_setup(struct net_device *dev) priv->hw = device; - if (device_can_wakeup(priv->device)) + if (device_can_wakeup(priv->device)) { priv->wolopts = WAKE_MAGIC; /* Magic Frame as default */ + enable_irq_wake(dev->irq); + } return 0; } diff --git a/drivers/net/tg3.c b/drivers/net/tg3.c index 93b32d366611..06c0e5033656 100644 --- a/drivers/net/tg3.c +++ b/drivers/net/tg3.c @@ -11158,7 +11158,9 @@ static int tg3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) break; /* We have no PHY */ - if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) + if ((tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) || + ((tp->tg3_flags & TG3_FLAG_ENABLE_ASF) && + !netif_running(dev))) return -EAGAIN; spin_lock_bh(&tp->lock); @@ -11174,7 +11176,9 @@ static int tg3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) break; /* We have no PHY */ - if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) + if ((tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) || + ((tp->tg3_flags & TG3_FLAG_ENABLE_ASF) && + !netif_running(dev))) return -EAGAIN; spin_lock_bh(&tp->lock); diff --git a/drivers/net/usb/cdc_ncm.c b/drivers/net/usb/cdc_ncm.c index 04e8ce14a1d0..7113168473cf 100644 --- a/drivers/net/usb/cdc_ncm.c +++ b/drivers/net/usb/cdc_ncm.c @@ -1,7 +1,7 @@ /* * cdc_ncm.c * - * Copyright (C) ST-Ericsson 2010 + * Copyright (C) ST-Ericsson 2010-2011 * Contact: Alexey Orishko <alexey.orishko@stericsson.com> * Original author: Hans Petter Selasky <hans.petter.selasky@stericsson.com> * @@ -54,7 +54,7 @@ #include <linux/usb/usbnet.h> #include <linux/usb/cdc.h> -#define DRIVER_VERSION "17-Jan-2011" +#define DRIVER_VERSION "7-Feb-2011" /* CDC NCM subclass 3.2.1 */ #define USB_CDC_NCM_NDP16_LENGTH_MIN 0x10 @@ -77,6 +77,9 @@ */ #define CDC_NCM_DPT_DATAGRAMS_MAX 32 +/* Maximum amount of IN datagrams in NTB */ +#define CDC_NCM_DPT_DATAGRAMS_IN_MAX 0 /* unlimited */ + /* Restart the timer, if amount of datagrams is less than given value */ #define CDC_NCM_RESTART_TIMER_DATAGRAM_CNT 3 @@ -85,11 +88,6 @@ (sizeof(struct usb_cdc_ncm_nth16) + sizeof(struct usb_cdc_ncm_ndp16) + \ (CDC_NCM_DPT_DATAGRAMS_MAX + 1) * sizeof(struct usb_cdc_ncm_dpe16)) -struct connection_speed_change { - __le32 USBitRate; /* holds 3GPP downlink value, bits per second */ - __le32 DSBitRate; /* holds 3GPP uplink value, bits per second */ -} __attribute__ ((packed)); - struct cdc_ncm_data { struct usb_cdc_ncm_nth16 nth16; struct usb_cdc_ncm_ndp16 ndp16; @@ -198,10 +196,10 @@ static u8 cdc_ncm_setup(struct cdc_ncm_ctx *ctx) { struct usb_cdc_notification req; u32 val; - __le16 max_datagram_size; u8 flags; u8 iface_no; int err; + u16 ntb_fmt_supported; iface_no = ctx->control->cur_altsetting->desc.bInterfaceNumber; @@ -223,6 +221,9 @@ static u8 cdc_ncm_setup(struct cdc_ncm_ctx *ctx) ctx->tx_remainder = le16_to_cpu(ctx->ncm_parm.wNdpOutPayloadRemainder); ctx->tx_modulus = le16_to_cpu(ctx->ncm_parm.wNdpOutDivisor); ctx->tx_ndp_modulus = le16_to_cpu(ctx->ncm_parm.wNdpOutAlignment); + /* devices prior to NCM Errata shall set this field to zero */ + ctx->tx_max_datagrams = le16_to_cpu(ctx->ncm_parm.wNtbOutMaxDatagrams); + ntb_fmt_supported = le16_to_cpu(ctx->ncm_parm.bmNtbFormatsSupported); if (ctx->func_desc != NULL) flags = ctx->func_desc->bmNetworkCapabilities; @@ -231,22 +232,58 @@ static u8 cdc_ncm_setup(struct cdc_ncm_ctx *ctx) pr_debug("dwNtbInMaxSize=%u dwNtbOutMaxSize=%u " "wNdpOutPayloadRemainder=%u wNdpOutDivisor=%u " - "wNdpOutAlignment=%u flags=0x%x\n", + "wNdpOutAlignment=%u wNtbOutMaxDatagrams=%u flags=0x%x\n", ctx->rx_max, ctx->tx_max, ctx->tx_remainder, ctx->tx_modulus, - ctx->tx_ndp_modulus, flags); + ctx->tx_ndp_modulus, ctx->tx_max_datagrams, flags); - /* max count of tx datagrams without terminating NULL entry */ - ctx->tx_max_datagrams = CDC_NCM_DPT_DATAGRAMS_MAX; + /* max count of tx datagrams */ + if ((ctx->tx_max_datagrams == 0) || + (ctx->tx_max_datagrams > CDC_NCM_DPT_DATAGRAMS_MAX)) + ctx->tx_max_datagrams = CDC_NCM_DPT_DATAGRAMS_MAX; /* verify maximum size of received NTB in bytes */ - if ((ctx->rx_max < - (CDC_NCM_MIN_HDR_SIZE + CDC_NCM_MIN_DATAGRAM_SIZE)) || - (ctx->rx_max > CDC_NCM_NTB_MAX_SIZE_RX)) { + if (ctx->rx_max < USB_CDC_NCM_NTB_MIN_IN_SIZE) { + pr_debug("Using min receive length=%d\n", + USB_CDC_NCM_NTB_MIN_IN_SIZE); + ctx->rx_max = USB_CDC_NCM_NTB_MIN_IN_SIZE; + } + + if (ctx->rx_max > CDC_NCM_NTB_MAX_SIZE_RX) { pr_debug("Using default maximum receive length=%d\n", CDC_NCM_NTB_MAX_SIZE_RX); ctx->rx_max = CDC_NCM_NTB_MAX_SIZE_RX; } + /* inform device about NTB input size changes */ + if (ctx->rx_max != le32_to_cpu(ctx->ncm_parm.dwNtbInMaxSize)) { + req.bmRequestType = USB_TYPE_CLASS | USB_DIR_OUT | + USB_RECIP_INTERFACE; + req.bNotificationType = USB_CDC_SET_NTB_INPUT_SIZE; + req.wValue = 0; + req.wIndex = cpu_to_le16(iface_no); + + if (flags & USB_CDC_NCM_NCAP_NTB_INPUT_SIZE) { + struct usb_cdc_ncm_ndp_input_size ndp_in_sz; + + req.wLength = 8; + ndp_in_sz.dwNtbInMaxSize = cpu_to_le32(ctx->rx_max); + ndp_in_sz.wNtbInMaxDatagrams = + cpu_to_le16(CDC_NCM_DPT_DATAGRAMS_MAX); + ndp_in_sz.wReserved = 0; + err = cdc_ncm_do_request(ctx, &req, &ndp_in_sz, 0, NULL, + 1000); + } else { + __le32 dwNtbInMaxSize = cpu_to_le32(ctx->rx_max); + + req.wLength = 4; + err = cdc_ncm_do_request(ctx, &req, &dwNtbInMaxSize, 0, + NULL, 1000); + } + + if (err) + pr_debug("Setting NTB Input Size failed\n"); + } + /* verify maximum size of transmitted NTB in bytes */ if ((ctx->tx_max < (CDC_NCM_MIN_HDR_SIZE + CDC_NCM_MIN_DATAGRAM_SIZE)) || @@ -297,47 +334,84 @@ static u8 cdc_ncm_setup(struct cdc_ncm_ctx *ctx) /* additional configuration */ /* set CRC Mode */ - req.bmRequestType = USB_TYPE_CLASS | USB_DIR_OUT | USB_RECIP_INTERFACE; - req.bNotificationType = USB_CDC_SET_CRC_MODE; - req.wValue = cpu_to_le16(USB_CDC_NCM_CRC_NOT_APPENDED); - req.wIndex = cpu_to_le16(iface_no); - req.wLength = 0; - - err = cdc_ncm_do_request(ctx, &req, NULL, 0, NULL, 1000); - if (err) - pr_debug("Setting CRC mode off failed\n"); + if (flags & USB_CDC_NCM_NCAP_CRC_MODE) { + req.bmRequestType = USB_TYPE_CLASS | USB_DIR_OUT | + USB_RECIP_INTERFACE; + req.bNotificationType = USB_CDC_SET_CRC_MODE; + req.wValue = cpu_to_le16(USB_CDC_NCM_CRC_NOT_APPENDED); + req.wIndex = cpu_to_le16(iface_no); + req.wLength = 0; + + err = cdc_ncm_do_request(ctx, &req, NULL, 0, NULL, 1000); + if (err) + pr_debug("Setting CRC mode off failed\n"); + } - /* set NTB format */ - req.bmRequestType = USB_TYPE_CLASS | USB_DIR_OUT | USB_RECIP_INTERFACE; - req.bNotificationType = USB_CDC_SET_NTB_FORMAT; - req.wValue = cpu_to_le16(USB_CDC_NCM_NTB16_FORMAT); - req.wIndex = cpu_to_le16(iface_no); - req.wLength = 0; + /* set NTB format, if both formats are supported */ + if (ntb_fmt_supported & USB_CDC_NCM_NTH32_SIGN) { + req.bmRequestType = USB_TYPE_CLASS | USB_DIR_OUT | + USB_RECIP_INTERFACE; + req.bNotificationType = USB_CDC_SET_NTB_FORMAT; + req.wValue = cpu_to_le16(USB_CDC_NCM_NTB16_FORMAT); + req.wIndex = cpu_to_le16(iface_no); + req.wLength = 0; + + err = cdc_ncm_do_request(ctx, &req, NULL, 0, NULL, 1000); + if (err) + pr_debug("Setting NTB format to 16-bit failed\n"); + } - err = cdc_ncm_do_request(ctx, &req, NULL, 0, NULL, 1000); - if (err) - pr_debug("Setting NTB format to 16-bit failed\n"); + ctx->max_datagram_size = CDC_NCM_MIN_DATAGRAM_SIZE; /* set Max Datagram Size (MTU) */ - req.bmRequestType = USB_TYPE_CLASS | USB_DIR_IN | USB_RECIP_INTERFACE; - req.bNotificationType = USB_CDC_GET_MAX_DATAGRAM_SIZE; - req.wValue = 0; - req.wIndex = cpu_to_le16(iface_no); - req.wLength = cpu_to_le16(2); + if (flags & USB_CDC_NCM_NCAP_MAX_DATAGRAM_SIZE) { + __le16 max_datagram_size; + u16 eth_max_sz = le16_to_cpu(ctx->ether_desc->wMaxSegmentSize); + + req.bmRequestType = USB_TYPE_CLASS | USB_DIR_IN | + USB_RECIP_INTERFACE; + req.bNotificationType = USB_CDC_GET_MAX_DATAGRAM_SIZE; + req.wValue = 0; + req.wIndex = cpu_to_le16(iface_no); + req.wLength = cpu_to_le16(2); + + err = cdc_ncm_do_request(ctx, &req, &max_datagram_size, 0, NULL, + 1000); + if (err) { + pr_debug("GET_MAX_DATAGRAM_SIZE failed, use size=%u\n", + CDC_NCM_MIN_DATAGRAM_SIZE); + } else { + ctx->max_datagram_size = le16_to_cpu(max_datagram_size); + /* Check Eth descriptor value */ + if (eth_max_sz < CDC_NCM_MAX_DATAGRAM_SIZE) { + if (ctx->max_datagram_size > eth_max_sz) + ctx->max_datagram_size = eth_max_sz; + } else { + if (ctx->max_datagram_size > + CDC_NCM_MAX_DATAGRAM_SIZE) + ctx->max_datagram_size = + CDC_NCM_MAX_DATAGRAM_SIZE; + } - err = cdc_ncm_do_request(ctx, &req, &max_datagram_size, 0, NULL, 1000); - if (err) { - pr_debug(" GET_MAX_DATAGRAM_SIZE failed, using size=%u\n", - CDC_NCM_MIN_DATAGRAM_SIZE); - /* use default */ - ctx->max_datagram_size = CDC_NCM_MIN_DATAGRAM_SIZE; - } else { - ctx->max_datagram_size = le16_to_cpu(max_datagram_size); + if (ctx->max_datagram_size < CDC_NCM_MIN_DATAGRAM_SIZE) + ctx->max_datagram_size = + CDC_NCM_MIN_DATAGRAM_SIZE; + + /* if value changed, update device */ + req.bmRequestType = USB_TYPE_CLASS | USB_DIR_OUT | + USB_RECIP_INTERFACE; + req.bNotificationType = USB_CDC_SET_MAX_DATAGRAM_SIZE; + req.wValue = 0; + req.wIndex = cpu_to_le16(iface_no); + req.wLength = 2; + max_datagram_size = cpu_to_le16(ctx->max_datagram_size); + + err = cdc_ncm_do_request(ctx, &req, &max_datagram_size, + 0, NULL, 1000); + if (err) + pr_debug("SET_MAX_DATAGRAM_SIZE failed\n"); + } - if (ctx->max_datagram_size < CDC_NCM_MIN_DATAGRAM_SIZE) - ctx->max_datagram_size = CDC_NCM_MIN_DATAGRAM_SIZE; - else if (ctx->max_datagram_size > CDC_NCM_MAX_DATAGRAM_SIZE) - ctx->max_datagram_size = CDC_NCM_MAX_DATAGRAM_SIZE; } if (ctx->netdev->mtu != (ctx->max_datagram_size - ETH_HLEN)) @@ -466,19 +540,13 @@ static int cdc_ncm_bind(struct usbnet *dev, struct usb_interface *intf) ctx->ether_desc = (const struct usb_cdc_ether_desc *)buf; - dev->hard_mtu = le16_to_cpu(ctx->ether_desc->wMaxSegmentSize); - if (dev->hard_mtu < - (CDC_NCM_MIN_DATAGRAM_SIZE - ETH_HLEN)) - dev->hard_mtu = - CDC_NCM_MIN_DATAGRAM_SIZE - ETH_HLEN; - - else if (dev->hard_mtu > - (CDC_NCM_MAX_DATAGRAM_SIZE - ETH_HLEN)) - dev->hard_mtu = - CDC_NCM_MAX_DATAGRAM_SIZE - ETH_HLEN; + if (dev->hard_mtu < CDC_NCM_MIN_DATAGRAM_SIZE) + dev->hard_mtu = CDC_NCM_MIN_DATAGRAM_SIZE; + else if (dev->hard_mtu > CDC_NCM_MAX_DATAGRAM_SIZE) + dev->hard_mtu = CDC_NCM_MAX_DATAGRAM_SIZE; break; case USB_CDC_NCM_TYPE: @@ -628,13 +696,13 @@ cdc_ncm_fill_tx_frame(struct cdc_ncm_ctx *ctx, struct sk_buff *skb) u32 offset; u32 last_offset; u16 n = 0; - u8 timeout = 0; + u8 ready2send = 0; /* if there is a remaining skb, it gets priority */ if (skb != NULL) swap(skb, ctx->tx_rem_skb); else - timeout = 1; + ready2send = 1; /* * +----------------+ @@ -682,9 +750,10 @@ cdc_ncm_fill_tx_frame(struct cdc_ncm_ctx *ctx, struct sk_buff *skb) for (; n < ctx->tx_max_datagrams; n++) { /* check if end of transmit buffer is reached */ - if (offset >= ctx->tx_max) + if (offset >= ctx->tx_max) { + ready2send = 1; break; - + } /* compute maximum buffer size */ rem = ctx->tx_max - offset; @@ -711,9 +780,7 @@ cdc_ncm_fill_tx_frame(struct cdc_ncm_ctx *ctx, struct sk_buff *skb) } ctx->tx_rem_skb = skb; skb = NULL; - - /* loop one more time */ - timeout = 1; + ready2send = 1; } break; } @@ -756,7 +823,7 @@ cdc_ncm_fill_tx_frame(struct cdc_ncm_ctx *ctx, struct sk_buff *skb) ctx->tx_curr_last_offset = last_offset; goto exit_no_skb; - } else if ((n < ctx->tx_max_datagrams) && (timeout == 0)) { + } else if ((n < ctx->tx_max_datagrams) && (ready2send == 0)) { /* wait for more frames */ /* push variables */ ctx->tx_curr_skb = skb_out; @@ -813,7 +880,7 @@ cdc_ncm_fill_tx_frame(struct cdc_ncm_ctx *ctx, struct sk_buff *skb) cpu_to_le16(sizeof(ctx->tx_ncm.nth16)); ctx->tx_ncm.nth16.wSequence = cpu_to_le16(ctx->tx_seq); ctx->tx_ncm.nth16.wBlockLength = cpu_to_le16(last_offset); - ctx->tx_ncm.nth16.wFpIndex = ALIGN(sizeof(struct usb_cdc_ncm_nth16), + ctx->tx_ncm.nth16.wNdpIndex = ALIGN(sizeof(struct usb_cdc_ncm_nth16), ctx->tx_ndp_modulus); memcpy(skb_out->data, &(ctx->tx_ncm.nth16), sizeof(ctx->tx_ncm.nth16)); @@ -825,13 +892,13 @@ cdc_ncm_fill_tx_frame(struct cdc_ncm_ctx *ctx, struct sk_buff *skb) rem = sizeof(ctx->tx_ncm.ndp16) + ((ctx->tx_curr_frame_num + 1) * sizeof(struct usb_cdc_ncm_dpe16)); ctx->tx_ncm.ndp16.wLength = cpu_to_le16(rem); - ctx->tx_ncm.ndp16.wNextFpIndex = 0; /* reserved */ + ctx->tx_ncm.ndp16.wNextNdpIndex = 0; /* reserved */ - memcpy(((u8 *)skb_out->data) + ctx->tx_ncm.nth16.wFpIndex, + memcpy(((u8 *)skb_out->data) + ctx->tx_ncm.nth16.wNdpIndex, &(ctx->tx_ncm.ndp16), sizeof(ctx->tx_ncm.ndp16)); - memcpy(((u8 *)skb_out->data) + ctx->tx_ncm.nth16.wFpIndex + + memcpy(((u8 *)skb_out->data) + ctx->tx_ncm.nth16.wNdpIndex + sizeof(ctx->tx_ncm.ndp16), &(ctx->tx_ncm.dpe16), (ctx->tx_curr_frame_num + 1) * @@ -961,7 +1028,7 @@ static int cdc_ncm_rx_fixup(struct usbnet *dev, struct sk_buff *skb_in) goto error; } - temp = le16_to_cpu(ctx->rx_ncm.nth16.wFpIndex); + temp = le16_to_cpu(ctx->rx_ncm.nth16.wNdpIndex); if ((temp + sizeof(ctx->rx_ncm.ndp16)) > actlen) { pr_debug("invalid DPT16 index\n"); goto error; @@ -1048,10 +1115,10 @@ error: static void cdc_ncm_speed_change(struct cdc_ncm_ctx *ctx, - struct connection_speed_change *data) + struct usb_cdc_speed_change *data) { - uint32_t rx_speed = le32_to_cpu(data->USBitRate); - uint32_t tx_speed = le32_to_cpu(data->DSBitRate); + uint32_t rx_speed = le32_to_cpu(data->DLBitRRate); + uint32_t tx_speed = le32_to_cpu(data->ULBitRate); /* * Currently the USB-NET API does not support reporting the actual @@ -1092,7 +1159,7 @@ static void cdc_ncm_status(struct usbnet *dev, struct urb *urb) /* test for split data in 8-byte chunks */ if (test_and_clear_bit(EVENT_STS_SPLIT, &dev->flags)) { cdc_ncm_speed_change(ctx, - (struct connection_speed_change *)urb->transfer_buffer); + (struct usb_cdc_speed_change *)urb->transfer_buffer); return; } @@ -1120,12 +1187,12 @@ static void cdc_ncm_status(struct usbnet *dev, struct urb *urb) break; case USB_CDC_NOTIFY_SPEED_CHANGE: - if (urb->actual_length < - (sizeof(*event) + sizeof(struct connection_speed_change))) + if (urb->actual_length < (sizeof(*event) + + sizeof(struct usb_cdc_speed_change))) set_bit(EVENT_STS_SPLIT, &dev->flags); else cdc_ncm_speed_change(ctx, - (struct connection_speed_change *) &event[1]); + (struct usb_cdc_speed_change *) &event[1]); break; default: diff --git a/drivers/net/usb/dm9601.c b/drivers/net/usb/dm9601.c index 02b622e3b9fb..5002f5be47be 100644 --- a/drivers/net/usb/dm9601.c +++ b/drivers/net/usb/dm9601.c @@ -651,6 +651,10 @@ static const struct usb_device_id products[] = { .driver_info = (unsigned long)&dm9601_info, }, { + USB_DEVICE(0x0fe6, 0x9700), /* DM9601 USB to Fast Ethernet Adapter */ + .driver_info = (unsigned long)&dm9601_info, + }, + { USB_DEVICE(0x0a46, 0x9000), /* DM9000E */ .driver_info = (unsigned long)&dm9601_info, }, diff --git a/drivers/net/usb/hso.c b/drivers/net/usb/hso.c index bed8fcedff49..6d83812603b6 100644 --- a/drivers/net/usb/hso.c +++ b/drivers/net/usb/hso.c @@ -2628,15 +2628,15 @@ exit: static void hso_free_tiomget(struct hso_serial *serial) { - struct hso_tiocmget *tiocmget = serial->tiocmget; + struct hso_tiocmget *tiocmget; + if (!serial) + return; + tiocmget = serial->tiocmget; if (tiocmget) { - if (tiocmget->urb) { - usb_free_urb(tiocmget->urb); - tiocmget->urb = NULL; - } + usb_free_urb(tiocmget->urb); + tiocmget->urb = NULL; serial->tiocmget = NULL; kfree(tiocmget); - } } diff --git a/drivers/net/usb/usbnet.c b/drivers/net/usb/usbnet.c index ed9a41643ff4..95c41d56631c 100644 --- a/drivers/net/usb/usbnet.c +++ b/drivers/net/usb/usbnet.c @@ -931,8 +931,10 @@ fail_halt: if (urb != NULL) { clear_bit (EVENT_RX_MEMORY, &dev->flags); status = usb_autopm_get_interface(dev->intf); - if (status < 0) + if (status < 0) { + usb_free_urb(urb); goto fail_lowmem; + } if (rx_submit (dev, urb, GFP_KERNEL) == -ENOLINK) resched = 0; usb_autopm_put_interface(dev->intf); diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c index 90a23e410d1b..82dba5aaf423 100644 --- a/drivers/net/virtio_net.c +++ b/drivers/net/virtio_net.c @@ -446,6 +446,20 @@ static void skb_recv_done(struct virtqueue *rvq) } } +static void virtnet_napi_enable(struct virtnet_info *vi) +{ + napi_enable(&vi->napi); + + /* If all buffers were filled by other side before we napi_enabled, we + * won't get another interrupt, so process any outstanding packets + * now. virtnet_poll wants re-enable the queue, so we disable here. + * We synchronize against interrupts via NAPI_STATE_SCHED */ + if (napi_schedule_prep(&vi->napi)) { + virtqueue_disable_cb(vi->rvq); + __napi_schedule(&vi->napi); + } +} + static void refill_work(struct work_struct *work) { struct virtnet_info *vi; @@ -454,7 +468,7 @@ static void refill_work(struct work_struct *work) vi = container_of(work, struct virtnet_info, refill.work); napi_disable(&vi->napi); still_empty = !try_fill_recv(vi, GFP_KERNEL); - napi_enable(&vi->napi); + virtnet_napi_enable(vi); /* In theory, this can happen: if we don't get any buffers in * we will *never* try to fill again. */ @@ -638,16 +652,7 @@ static int virtnet_open(struct net_device *dev) { struct virtnet_info *vi = netdev_priv(dev); - napi_enable(&vi->napi); - - /* If all buffers were filled by other side before we napi_enabled, we - * won't get another interrupt, so process any outstanding packets - * now. virtnet_poll wants re-enable the queue, so we disable here. - * We synchronize against interrupts via NAPI_STATE_SCHED */ - if (napi_schedule_prep(&vi->napi)) { - virtqueue_disable_cb(vi->rvq); - __napi_schedule(&vi->napi); - } + virtnet_napi_enable(vi); return 0; } diff --git a/drivers/net/vxge/vxge-config.c b/drivers/net/vxge/vxge-config.c index 01c05f53e2f9..228d4f7a58af 100644 --- a/drivers/net/vxge/vxge-config.c +++ b/drivers/net/vxge/vxge-config.c @@ -3690,7 +3690,7 @@ __vxge_hw_vpath_rts_table_get(struct __vxge_hw_vpath_handle *vp, if (status != VXGE_HW_OK) goto exit; - if ((rts_table != VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_DA) || + if ((rts_table != VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_DA) && (rts_table != VXGE_HW_RTS_ACS_STEER_CTRL_DATA_STRUCT_SEL_RTH_MULTI_IT)) *data1 = 0; diff --git a/drivers/net/wireless/ath/ath5k/dma.c b/drivers/net/wireless/ath/ath5k/dma.c index 0064be7ce5c9..21091c26a9a5 100644 --- a/drivers/net/wireless/ath/ath5k/dma.c +++ b/drivers/net/wireless/ath/ath5k/dma.c @@ -838,9 +838,9 @@ int ath5k_hw_dma_stop(struct ath5k_hw *ah) for (i = 0; i < qmax; i++) { err = ath5k_hw_stop_tx_dma(ah, i); /* -EINVAL -> queue inactive */ - if (err != -EINVAL) + if (err && err != -EINVAL) return err; } - return err; + return 0; } diff --git a/drivers/net/wireless/ath/ath5k/pcu.c b/drivers/net/wireless/ath/ath5k/pcu.c index e5f2b96a4c63..a702817daf72 100644 --- a/drivers/net/wireless/ath/ath5k/pcu.c +++ b/drivers/net/wireless/ath/ath5k/pcu.c @@ -86,7 +86,7 @@ int ath5k_hw_get_frame_duration(struct ath5k_hw *ah, if (!ah->ah_bwmode) { dur = ieee80211_generic_frame_duration(sc->hw, NULL, len, rate); - return dur; + return le16_to_cpu(dur); } bitrate = rate->bitrate; @@ -265,8 +265,6 @@ static inline void ath5k_hw_write_rate_duration(struct ath5k_hw *ah) * what rate we should choose to TX ACKs. */ tx_time = ath5k_hw_get_frame_duration(ah, 10, rate); - tx_time = le16_to_cpu(tx_time); - ath5k_hw_reg_write(ah, tx_time, reg); if (!(rate->flags & IEEE80211_RATE_SHORT_PREAMBLE)) diff --git a/drivers/net/wireless/ath/ath5k/phy.c b/drivers/net/wireless/ath/ath5k/phy.c index 78c26fdccad1..62ce2f4e8605 100644 --- a/drivers/net/wireless/ath/ath5k/phy.c +++ b/drivers/net/wireless/ath/ath5k/phy.c @@ -282,6 +282,34 @@ int ath5k_hw_phy_disable(struct ath5k_hw *ah) return 0; } +/* + * Wait for synth to settle + */ +static void ath5k_hw_wait_for_synth(struct ath5k_hw *ah, + struct ieee80211_channel *channel) +{ + /* + * On 5211+ read activation -> rx delay + * and use it (100ns steps). + */ + if (ah->ah_version != AR5K_AR5210) { + u32 delay; + delay = ath5k_hw_reg_read(ah, AR5K_PHY_RX_DELAY) & + AR5K_PHY_RX_DELAY_M; + delay = (channel->hw_value & CHANNEL_CCK) ? + ((delay << 2) / 22) : (delay / 10); + if (ah->ah_bwmode == AR5K_BWMODE_10MHZ) + delay = delay << 1; + if (ah->ah_bwmode == AR5K_BWMODE_5MHZ) + delay = delay << 2; + /* XXX: /2 on turbo ? Let's be safe + * for now */ + udelay(100 + delay); + } else { + mdelay(1); + } +} + /**********************\ * RF Gain optimization * @@ -1253,6 +1281,7 @@ static int ath5k_hw_channel(struct ath5k_hw *ah, case AR5K_RF5111: ret = ath5k_hw_rf5111_channel(ah, channel); break; + case AR5K_RF2317: case AR5K_RF2425: ret = ath5k_hw_rf2425_channel(ah, channel); break; @@ -3237,6 +3266,13 @@ int ath5k_hw_phy_init(struct ath5k_hw *ah, struct ieee80211_channel *channel, /* Failed */ if (i >= 100) return -EIO; + + /* Set channel and wait for synth */ + ret = ath5k_hw_channel(ah, channel); + if (ret) + return ret; + + ath5k_hw_wait_for_synth(ah, channel); } /* @@ -3251,13 +3287,53 @@ int ath5k_hw_phy_init(struct ath5k_hw *ah, struct ieee80211_channel *channel, if (ret) return ret; + /* Write OFDM timings on 5212*/ + if (ah->ah_version == AR5K_AR5212 && + channel->hw_value & CHANNEL_OFDM) { + + ret = ath5k_hw_write_ofdm_timings(ah, channel); + if (ret) + return ret; + + /* Spur info is available only from EEPROM versions + * greater than 5.3, but the EEPROM routines will use + * static values for older versions */ + if (ah->ah_mac_srev >= AR5K_SREV_AR5424) + ath5k_hw_set_spur_mitigation_filter(ah, + channel); + } + + /* If we used fast channel switching + * we are done, release RF bus and + * fire up NF calibration. + * + * Note: Only NF calibration due to + * channel change, not AGC calibration + * since AGC is still running ! + */ + if (fast) { + /* + * Release RF Bus grant + */ + AR5K_REG_DISABLE_BITS(ah, AR5K_PHY_RFBUS_REQ, + AR5K_PHY_RFBUS_REQ_REQUEST); + + /* + * Start NF calibration + */ + AR5K_REG_ENABLE_BITS(ah, AR5K_PHY_AGCCTL, + AR5K_PHY_AGCCTL_NF); + + return ret; + } + /* * For 5210 we do all initialization using * initvals, so we don't have to modify * any settings (5210 also only supports * a/aturbo modes) */ - if ((ah->ah_version != AR5K_AR5210) && !fast) { + if (ah->ah_version != AR5K_AR5210) { /* * Write initial RF gain settings @@ -3276,22 +3352,6 @@ int ath5k_hw_phy_init(struct ath5k_hw *ah, struct ieee80211_channel *channel, if (ret) return ret; - /* Write OFDM timings on 5212*/ - if (ah->ah_version == AR5K_AR5212 && - channel->hw_value & CHANNEL_OFDM) { - - ret = ath5k_hw_write_ofdm_timings(ah, channel); - if (ret) - return ret; - - /* Spur info is available only from EEPROM versions - * greater than 5.3, but the EEPROM routines will use - * static values for older versions */ - if (ah->ah_mac_srev >= AR5K_SREV_AR5424) - ath5k_hw_set_spur_mitigation_filter(ah, - channel); - } - /*Enable/disable 802.11b mode on 5111 (enable 2111 frequency converter + CCK)*/ if (ah->ah_radio == AR5K_RF5111) { @@ -3322,47 +3382,20 @@ int ath5k_hw_phy_init(struct ath5k_hw *ah, struct ieee80211_channel *channel, */ ath5k_hw_reg_write(ah, AR5K_PHY_ACT_ENABLE, AR5K_PHY_ACT); + ath5k_hw_wait_for_synth(ah, channel); + /* - * On 5211+ read activation -> rx delay - * and use it. + * Perform ADC test to see if baseband is ready + * Set tx hold and check adc test register */ - if (ah->ah_version != AR5K_AR5210) { - u32 delay; - delay = ath5k_hw_reg_read(ah, AR5K_PHY_RX_DELAY) & - AR5K_PHY_RX_DELAY_M; - delay = (channel->hw_value & CHANNEL_CCK) ? - ((delay << 2) / 22) : (delay / 10); - if (ah->ah_bwmode == AR5K_BWMODE_10MHZ) - delay = delay << 1; - if (ah->ah_bwmode == AR5K_BWMODE_5MHZ) - delay = delay << 2; - /* XXX: /2 on turbo ? Let's be safe - * for now */ - udelay(100 + delay); - } else { - mdelay(1); - } - - if (fast) - /* - * Release RF Bus grant - */ - AR5K_REG_DISABLE_BITS(ah, AR5K_PHY_RFBUS_REQ, - AR5K_PHY_RFBUS_REQ_REQUEST); - else { - /* - * Perform ADC test to see if baseband is ready - * Set tx hold and check adc test register - */ - phy_tst1 = ath5k_hw_reg_read(ah, AR5K_PHY_TST1); - ath5k_hw_reg_write(ah, AR5K_PHY_TST1_TXHOLD, AR5K_PHY_TST1); - for (i = 0; i <= 20; i++) { - if (!(ath5k_hw_reg_read(ah, AR5K_PHY_ADC_TEST) & 0x10)) - break; - udelay(200); - } - ath5k_hw_reg_write(ah, phy_tst1, AR5K_PHY_TST1); + phy_tst1 = ath5k_hw_reg_read(ah, AR5K_PHY_TST1); + ath5k_hw_reg_write(ah, AR5K_PHY_TST1_TXHOLD, AR5K_PHY_TST1); + for (i = 0; i <= 20; i++) { + if (!(ath5k_hw_reg_read(ah, AR5K_PHY_ADC_TEST) & 0x10)) + break; + udelay(200); } + ath5k_hw_reg_write(ah, phy_tst1, AR5K_PHY_TST1); /* * Start automatic gain control calibration diff --git a/drivers/net/wireless/ath/ath9k/ar9002_hw.c b/drivers/net/wireless/ath/ath9k/ar9002_hw.c index f8a7771faee2..f44c84ab5dce 100644 --- a/drivers/net/wireless/ath/ath9k/ar9002_hw.c +++ b/drivers/net/wireless/ath/ath9k/ar9002_hw.c @@ -426,9 +426,8 @@ static void ar9002_hw_configpcipowersave(struct ath_hw *ah, } /* WAR for ASPM system hang */ - if (AR_SREV_9280(ah) || AR_SREV_9285(ah) || AR_SREV_9287(ah)) { + if (AR_SREV_9285(ah) || AR_SREV_9287(ah)) val |= (AR_WA_BIT6 | AR_WA_BIT7); - } if (AR_SREV_9285E_20(ah)) val |= AR_WA_BIT23; diff --git a/drivers/net/wireless/ath/ath9k/ath9k.h b/drivers/net/wireless/ath/ath9k/ath9k.h index 3681caf54282..1a7fa6ea4cf5 100644 --- a/drivers/net/wireless/ath/ath9k/ath9k.h +++ b/drivers/net/wireless/ath/ath9k/ath9k.h @@ -21,7 +21,6 @@ #include <linux/device.h> #include <linux/leds.h> #include <linux/completion.h> -#include <linux/pm_qos_params.h> #include "debug.h" #include "common.h" @@ -57,8 +56,6 @@ struct ath_node; #define A_MAX(a, b) ((a) > (b) ? (a) : (b)) -#define ATH9K_PM_QOS_DEFAULT_VALUE 55 - #define TSF_TO_TU(_h,_l) \ ((((u32)(_h)) << 22) | (((u32)(_l)) >> 10)) @@ -218,6 +215,7 @@ struct ath_frame_info { struct ath_buf_state { u8 bf_type; u8 bfs_paprd; + unsigned long bfs_paprd_timestamp; enum ath9k_internal_frame_type bfs_ftype; }; @@ -593,7 +591,6 @@ struct ath_softc { struct work_struct paprd_work; struct work_struct hw_check_work; struct completion paprd_complete; - bool paprd_pending; u32 intrstatus; u32 sc_flags; /* SC_OP_* */ @@ -633,8 +630,6 @@ struct ath_softc { struct ath_descdma txsdma; struct ath_ant_comb ant_comb; - - struct pm_qos_request_list pm_qos_req; }; struct ath_wiphy { @@ -666,7 +661,6 @@ static inline void ath_read_cachesize(struct ath_common *common, int *csz) extern struct ieee80211_ops ath9k_ops; extern int ath9k_modparam_nohwcrypt; extern int led_blink; -extern int ath9k_pm_qos_value; extern bool is_ath9k_unloaded; irqreturn_t ath_isr(int irq, void *dev); diff --git a/drivers/net/wireless/ath/ath9k/hif_usb.c b/drivers/net/wireless/ath/ath9k/hif_usb.c index 5ab3084eb9cb..07b1633b7f3f 100644 --- a/drivers/net/wireless/ath/ath9k/hif_usb.c +++ b/drivers/net/wireless/ath/ath9k/hif_usb.c @@ -219,8 +219,9 @@ static int __hif_usb_tx(struct hif_device_usb *hif_dev) struct tx_buf *tx_buf = NULL; struct sk_buff *nskb = NULL; int ret = 0, i; - u16 *hdr, tx_skb_cnt = 0; + u16 tx_skb_cnt = 0; u8 *buf; + __le16 *hdr; if (hif_dev->tx.tx_skb_cnt == 0) return 0; @@ -245,9 +246,9 @@ static int __hif_usb_tx(struct hif_device_usb *hif_dev) buf = tx_buf->buf; buf += tx_buf->offset; - hdr = (u16 *)buf; - *hdr++ = nskb->len; - *hdr++ = ATH_USB_TX_STREAM_MODE_TAG; + hdr = (__le16 *)buf; + *hdr++ = cpu_to_le16(nskb->len); + *hdr++ = cpu_to_le16(ATH_USB_TX_STREAM_MODE_TAG); buf += 4; memcpy(buf, nskb->data, nskb->len); tx_buf->len = nskb->len + 4; diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_init.c b/drivers/net/wireless/ath/ath9k/htc_drv_init.c index 38433f9bfe59..0352f0994caa 100644 --- a/drivers/net/wireless/ath/ath9k/htc_drv_init.c +++ b/drivers/net/wireless/ath/ath9k/htc_drv_init.c @@ -142,9 +142,6 @@ static void ath9k_deinit_priv(struct ath9k_htc_priv *priv) { ath9k_htc_exit_debug(priv->ah); ath9k_hw_deinit(priv->ah); - tasklet_kill(&priv->swba_tasklet); - tasklet_kill(&priv->rx_tasklet); - tasklet_kill(&priv->tx_tasklet); kfree(priv->ah); priv->ah = NULL; } diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_main.c b/drivers/net/wireless/ath/ath9k/htc_drv_main.c index f4d576bc3ccd..6bb59958f71e 100644 --- a/drivers/net/wireless/ath/ath9k/htc_drv_main.c +++ b/drivers/net/wireless/ath/ath9k/htc_drv_main.c @@ -1025,12 +1025,6 @@ static void ath9k_htc_stop(struct ieee80211_hw *hw) int ret = 0; u8 cmd_rsp; - /* Cancel all the running timers/work .. */ - cancel_work_sync(&priv->fatal_work); - cancel_work_sync(&priv->ps_work); - cancel_delayed_work_sync(&priv->ath9k_led_blink_work); - ath9k_led_stop_brightness(priv); - mutex_lock(&priv->mutex); if (priv->op_flags & OP_INVALID) { @@ -1044,8 +1038,23 @@ static void ath9k_htc_stop(struct ieee80211_hw *hw) WMI_CMD(WMI_DISABLE_INTR_CMDID); WMI_CMD(WMI_DRAIN_TXQ_ALL_CMDID); WMI_CMD(WMI_STOP_RECV_CMDID); + + tasklet_kill(&priv->swba_tasklet); + tasklet_kill(&priv->rx_tasklet); + tasklet_kill(&priv->tx_tasklet); + skb_queue_purge(&priv->tx_queue); + mutex_unlock(&priv->mutex); + + /* Cancel all the running timers/work .. */ + cancel_work_sync(&priv->fatal_work); + cancel_work_sync(&priv->ps_work); + cancel_delayed_work_sync(&priv->ath9k_led_blink_work); + ath9k_led_stop_brightness(priv); + + mutex_lock(&priv->mutex); + /* Remove monitor interface here */ if (ah->opmode == NL80211_IFTYPE_MONITOR) { if (ath9k_htc_remove_monitor_interface(priv)) diff --git a/drivers/net/wireless/ath/ath9k/init.c b/drivers/net/wireless/ath/ath9k/init.c index 767d8b86f1e1..a033d01bf8a0 100644 --- a/drivers/net/wireless/ath/ath9k/init.c +++ b/drivers/net/wireless/ath/ath9k/init.c @@ -41,10 +41,6 @@ static int ath9k_btcoex_enable; module_param_named(btcoex_enable, ath9k_btcoex_enable, int, 0444); MODULE_PARM_DESC(btcoex_enable, "Enable wifi-BT coexistence"); -int ath9k_pm_qos_value = ATH9K_PM_QOS_DEFAULT_VALUE; -module_param_named(pmqos, ath9k_pm_qos_value, int, S_IRUSR | S_IRGRP | S_IROTH); -MODULE_PARM_DESC(pmqos, "User specified PM-QOS value"); - bool is_ath9k_unloaded; /* We use the hw_value as an index into our private channel structure */ @@ -598,8 +594,6 @@ err_btcoex: err_queues: ath9k_hw_deinit(ah); err_hw: - tasklet_kill(&sc->intr_tq); - tasklet_kill(&sc->bcon_tasklet); kfree(ah); sc->sc_ah = NULL; @@ -764,9 +758,6 @@ int ath9k_init_device(u16 devid, struct ath_softc *sc, u16 subsysid, ath_init_leds(sc); ath_start_rfkill_poll(sc); - pm_qos_add_request(&sc->pm_qos_req, PM_QOS_CPU_DMA_LATENCY, - PM_QOS_DEFAULT_VALUE); - return 0; error_world: @@ -807,9 +798,6 @@ static void ath9k_deinit_softc(struct ath_softc *sc) ath9k_hw_deinit(sc->sc_ah); - tasklet_kill(&sc->intr_tq); - tasklet_kill(&sc->bcon_tasklet); - kfree(sc->sc_ah); sc->sc_ah = NULL; } @@ -824,6 +812,8 @@ void ath9k_deinit_device(struct ath_softc *sc) wiphy_rfkill_stop_polling(sc->hw->wiphy); ath_deinit_leds(sc); + ath9k_ps_restore(sc); + for (i = 0; i < sc->num_sec_wiphy; i++) { struct ath_wiphy *aphy = sc->sec_wiphy[i]; if (aphy == NULL) @@ -834,7 +824,6 @@ void ath9k_deinit_device(struct ath_softc *sc) } ieee80211_unregister_hw(hw); - pm_qos_remove_request(&sc->pm_qos_req); ath_rx_cleanup(sc); ath_tx_cleanup(sc); ath9k_deinit_softc(sc); diff --git a/drivers/net/wireless/ath/ath9k/mac.c b/drivers/net/wireless/ath/ath9k/mac.c index 180170d3ce25..2915b11edefb 100644 --- a/drivers/net/wireless/ath/ath9k/mac.c +++ b/drivers/net/wireless/ath/ath9k/mac.c @@ -885,7 +885,7 @@ void ath9k_hw_set_interrupts(struct ath_hw *ah, enum ath9k_int ints) struct ath_common *common = ath9k_hw_common(ah); if (!(ints & ATH9K_INT_GLOBAL)) - ath9k_hw_enable_interrupts(ah); + ath9k_hw_disable_interrupts(ah); ath_dbg(common, ATH_DBG_INTERRUPT, "0x%x => 0x%x\n", omask, ints); @@ -963,7 +963,8 @@ void ath9k_hw_set_interrupts(struct ath_hw *ah, enum ath9k_int ints) REG_CLR_BIT(ah, AR_IMR_S5, AR_IMR_S5_TIM_TIMER); } - ath9k_hw_enable_interrupts(ah); + if (ints & ATH9K_INT_GLOBAL) + ath9k_hw_enable_interrupts(ah); return; } diff --git a/drivers/net/wireless/ath/ath9k/main.c b/drivers/net/wireless/ath/ath9k/main.c index c79c97be6cd4..a09d15f7aa6e 100644 --- a/drivers/net/wireless/ath/ath9k/main.c +++ b/drivers/net/wireless/ath/ath9k/main.c @@ -325,6 +325,8 @@ static bool ath_paprd_send_frame(struct ath_softc *sc, struct sk_buff *skb, int { struct ieee80211_hw *hw = sc->hw; struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb); + struct ath_hw *ah = sc->sc_ah; + struct ath_common *common = ath9k_hw_common(ah); struct ath_tx_control txctl; int time_left; @@ -340,14 +342,16 @@ static bool ath_paprd_send_frame(struct ath_softc *sc, struct sk_buff *skb, int tx_info->control.rates[1].idx = -1; init_completion(&sc->paprd_complete); - sc->paprd_pending = true; txctl.paprd = BIT(chain); - if (ath_tx_start(hw, skb, &txctl) != 0) + + if (ath_tx_start(hw, skb, &txctl) != 0) { + ath_dbg(common, ATH_DBG_XMIT, "PAPRD TX failed\n"); + dev_kfree_skb_any(skb); return false; + } time_left = wait_for_completion_timeout(&sc->paprd_complete, msecs_to_jiffies(ATH_PAPRD_TIMEOUT)); - sc->paprd_pending = false; if (!time_left) ath_dbg(ath9k_hw_common(sc->sc_ah), ATH_DBG_CALIBRATE, @@ -953,8 +957,6 @@ void ath_radio_disable(struct ath_softc *sc, struct ieee80211_hw *hw) spin_unlock_bh(&sc->sc_pcu_lock); ath9k_ps_restore(sc); - - ath9k_setpower(sc, ATH9K_PM_FULL_SLEEP); } int ath_reset(struct ath_softc *sc, bool retry_tx) @@ -1171,12 +1173,6 @@ static int ath9k_start(struct ieee80211_hw *hw) ath9k_btcoex_timer_resume(sc); } - /* User has the option to provide pm-qos value as a module - * parameter rather than using the default value of - * 'ATH9K_PM_QOS_DEFAULT_VALUE'. - */ - pm_qos_update_request(&sc->pm_qos_req, ath9k_pm_qos_value); - if (ah->caps.pcie_lcr_extsync_en && common->bus_ops->extn_synch_en) common->bus_ops->extn_synch_en(common); @@ -1309,6 +1305,9 @@ static void ath9k_stop(struct ieee80211_hw *hw) spin_lock_bh(&sc->sc_pcu_lock); + /* prevent tasklets to enable interrupts once we disable them */ + ah->imask &= ~ATH9K_INT_GLOBAL; + /* make sure h/w will not generate any interrupt * before setting the invalid flag. */ ath9k_hw_disable_interrupts(ah); @@ -1326,6 +1325,12 @@ static void ath9k_stop(struct ieee80211_hw *hw) spin_unlock_bh(&sc->sc_pcu_lock); + /* we can now sync irq and kill any running tasklets, since we already + * disabled interrupts and not holding a spin lock */ + synchronize_irq(sc->irq); + tasklet_kill(&sc->intr_tq); + tasklet_kill(&sc->bcon_tasklet); + ath9k_ps_restore(sc); sc->ps_idle = true; @@ -1334,8 +1339,6 @@ static void ath9k_stop(struct ieee80211_hw *hw) sc->sc_flags |= SC_OP_INVALID; - pm_qos_update_request(&sc->pm_qos_req, PM_QOS_DEFAULT_VALUE); - mutex_unlock(&sc->mutex); ath_dbg(common, ATH_DBG_CONFIG, "Driver halt\n"); diff --git a/drivers/net/wireless/ath/ath9k/xmit.c b/drivers/net/wireless/ath/ath9k/xmit.c index 33a37edbaf79..07b7804aec5b 100644 --- a/drivers/net/wireless/ath/ath9k/xmit.c +++ b/drivers/net/wireless/ath/ath9k/xmit.c @@ -1725,6 +1725,9 @@ static void ath_tx_start_dma(struct ath_softc *sc, struct ath_buf *bf, ar9003_hw_set_paprd_txdesc(sc->sc_ah, bf->bf_desc, bf->bf_state.bfs_paprd); + if (txctl->paprd) + bf->bf_state.bfs_paprd_timestamp = jiffies; + ath_tx_send_normal(sc, txctl->txq, tid, &bf_head); } @@ -1886,7 +1889,9 @@ static void ath_tx_complete_buf(struct ath_softc *sc, struct ath_buf *bf, bf->bf_buf_addr = 0; if (bf->bf_state.bfs_paprd) { - if (!sc->paprd_pending) + if (time_after(jiffies, + bf->bf_state.bfs_paprd_timestamp + + msecs_to_jiffies(ATH_PAPRD_TIMEOUT))) dev_kfree_skb_any(skb); else complete(&sc->paprd_complete); diff --git a/drivers/net/wireless/ath/carl9170/rx.c b/drivers/net/wireless/ath/carl9170/rx.c index 939a0e96ed1f..84866a4b8350 100644 --- a/drivers/net/wireless/ath/carl9170/rx.c +++ b/drivers/net/wireless/ath/carl9170/rx.c @@ -564,7 +564,7 @@ static void carl9170_ps_beacon(struct ar9170 *ar, void *data, unsigned int len) cam = ieee80211_check_tim(tim_ie, tim_len, ar->common.curaid); /* 2. Maybe the AP wants to send multicast/broadcast data? */ - cam = !!(tim_ie->bitmap_ctrl & 0x01); + cam |= !!(tim_ie->bitmap_ctrl & 0x01); if (!cam) { /* back to low-power land. */ diff --git a/drivers/net/wireless/ath/carl9170/usb.c b/drivers/net/wireless/ath/carl9170/usb.c index 537732e5964f..f82c400be288 100644 --- a/drivers/net/wireless/ath/carl9170/usb.c +++ b/drivers/net/wireless/ath/carl9170/usb.c @@ -118,6 +118,8 @@ static struct usb_device_id carl9170_usb_ids[] = { { USB_DEVICE(0x057c, 0x8402) }, /* Qwest/Actiontec 802AIN Wireless N USB Network Adapter */ { USB_DEVICE(0x1668, 0x1200) }, + /* Airlive X.USB a/b/g/n */ + { USB_DEVICE(0x1b75, 0x9170) }, /* terminate */ {} diff --git a/drivers/net/wireless/ipw2x00/ipw2100.c b/drivers/net/wireless/ipw2x00/ipw2100.c index 61915f371416..471a52a2f8d4 100644 --- a/drivers/net/wireless/ipw2x00/ipw2100.c +++ b/drivers/net/wireless/ipw2x00/ipw2100.c @@ -706,11 +706,10 @@ static void schedule_reset(struct ipw2100_priv *priv) netif_stop_queue(priv->net_dev); priv->status |= STATUS_RESET_PENDING; if (priv->reset_backoff) - queue_delayed_work(priv->workqueue, &priv->reset_work, - priv->reset_backoff * HZ); + schedule_delayed_work(&priv->reset_work, + priv->reset_backoff * HZ); else - queue_delayed_work(priv->workqueue, &priv->reset_work, - 0); + schedule_delayed_work(&priv->reset_work, 0); if (priv->reset_backoff < MAX_RESET_BACKOFF) priv->reset_backoff++; @@ -1474,7 +1473,7 @@ static int ipw2100_enable_adapter(struct ipw2100_priv *priv) if (priv->stop_hang_check) { priv->stop_hang_check = 0; - queue_delayed_work(priv->workqueue, &priv->hang_check, HZ / 2); + schedule_delayed_work(&priv->hang_check, HZ / 2); } fail_up: @@ -1808,8 +1807,8 @@ static int ipw2100_up(struct ipw2100_priv *priv, int deferred) if (priv->stop_rf_kill) { priv->stop_rf_kill = 0; - queue_delayed_work(priv->workqueue, &priv->rf_kill, - round_jiffies_relative(HZ)); + schedule_delayed_work(&priv->rf_kill, + round_jiffies_relative(HZ)); } deferred = 1; @@ -2086,7 +2085,7 @@ static void isr_indicate_associated(struct ipw2100_priv *priv, u32 status) priv->status |= STATUS_ASSOCIATING; priv->connect_start = get_seconds(); - queue_delayed_work(priv->workqueue, &priv->wx_event_work, HZ / 10); + schedule_delayed_work(&priv->wx_event_work, HZ / 10); } static int ipw2100_set_essid(struct ipw2100_priv *priv, char *essid, @@ -2166,9 +2165,9 @@ static void isr_indicate_association_lost(struct ipw2100_priv *priv, u32 status) return; if (priv->status & STATUS_SECURITY_UPDATED) - queue_delayed_work(priv->workqueue, &priv->security_work, 0); + schedule_delayed_work(&priv->security_work, 0); - queue_delayed_work(priv->workqueue, &priv->wx_event_work, 0); + schedule_delayed_work(&priv->wx_event_work, 0); } static void isr_indicate_rf_kill(struct ipw2100_priv *priv, u32 status) @@ -2183,8 +2182,7 @@ static void isr_indicate_rf_kill(struct ipw2100_priv *priv, u32 status) /* Make sure the RF Kill check timer is running */ priv->stop_rf_kill = 0; cancel_delayed_work(&priv->rf_kill); - queue_delayed_work(priv->workqueue, &priv->rf_kill, - round_jiffies_relative(HZ)); + schedule_delayed_work(&priv->rf_kill, round_jiffies_relative(HZ)); } static void send_scan_event(void *data) @@ -2219,13 +2217,12 @@ static void isr_scan_complete(struct ipw2100_priv *priv, u32 status) /* Only userspace-requested scan completion events go out immediately */ if (!priv->user_requested_scan) { if (!delayed_work_pending(&priv->scan_event_later)) - queue_delayed_work(priv->workqueue, - &priv->scan_event_later, - round_jiffies_relative(msecs_to_jiffies(4000))); + schedule_delayed_work(&priv->scan_event_later, + round_jiffies_relative(msecs_to_jiffies(4000))); } else { priv->user_requested_scan = 0; cancel_delayed_work(&priv->scan_event_later); - queue_work(priv->workqueue, &priv->scan_event_now); + schedule_work(&priv->scan_event_now); } } @@ -4329,8 +4326,8 @@ static int ipw_radio_kill_sw(struct ipw2100_priv *priv, int disable_radio) /* Make sure the RF_KILL check timer is running */ priv->stop_rf_kill = 0; cancel_delayed_work(&priv->rf_kill); - queue_delayed_work(priv->workqueue, &priv->rf_kill, - round_jiffies_relative(HZ)); + schedule_delayed_work(&priv->rf_kill, + round_jiffies_relative(HZ)); } else schedule_reset(priv); } @@ -4461,20 +4458,17 @@ static void bd_queue_initialize(struct ipw2100_priv *priv, IPW_DEBUG_INFO("exit\n"); } -static void ipw2100_kill_workqueue(struct ipw2100_priv *priv) +static void ipw2100_kill_works(struct ipw2100_priv *priv) { - if (priv->workqueue) { - priv->stop_rf_kill = 1; - priv->stop_hang_check = 1; - cancel_delayed_work(&priv->reset_work); - cancel_delayed_work(&priv->security_work); - cancel_delayed_work(&priv->wx_event_work); - cancel_delayed_work(&priv->hang_check); - cancel_delayed_work(&priv->rf_kill); - cancel_delayed_work(&priv->scan_event_later); - destroy_workqueue(priv->workqueue); - priv->workqueue = NULL; - } + priv->stop_rf_kill = 1; + priv->stop_hang_check = 1; + cancel_delayed_work_sync(&priv->reset_work); + cancel_delayed_work_sync(&priv->security_work); + cancel_delayed_work_sync(&priv->wx_event_work); + cancel_delayed_work_sync(&priv->hang_check); + cancel_delayed_work_sync(&priv->rf_kill); + cancel_work_sync(&priv->scan_event_now); + cancel_delayed_work_sync(&priv->scan_event_later); } static int ipw2100_tx_allocate(struct ipw2100_priv *priv) @@ -6046,7 +6040,7 @@ static void ipw2100_hang_check(struct work_struct *work) priv->last_rtc = rtc; if (!priv->stop_hang_check) - queue_delayed_work(priv->workqueue, &priv->hang_check, HZ / 2); + schedule_delayed_work(&priv->hang_check, HZ / 2); spin_unlock_irqrestore(&priv->low_lock, flags); } @@ -6062,8 +6056,8 @@ static void ipw2100_rf_kill(struct work_struct *work) if (rf_kill_active(priv)) { IPW_DEBUG_RF_KILL("RF Kill active, rescheduling GPIO check\n"); if (!priv->stop_rf_kill) - queue_delayed_work(priv->workqueue, &priv->rf_kill, - round_jiffies_relative(HZ)); + schedule_delayed_work(&priv->rf_kill, + round_jiffies_relative(HZ)); goto exit_unlock; } @@ -6209,8 +6203,6 @@ static struct net_device *ipw2100_alloc_device(struct pci_dev *pci_dev, INIT_LIST_HEAD(&priv->fw_pend_list); INIT_STAT(&priv->fw_pend_stat); - priv->workqueue = create_workqueue(DRV_NAME); - INIT_DELAYED_WORK(&priv->reset_work, ipw2100_reset_adapter); INIT_DELAYED_WORK(&priv->security_work, ipw2100_security_work); INIT_DELAYED_WORK(&priv->wx_event_work, ipw2100_wx_event_work); @@ -6410,7 +6402,7 @@ static int ipw2100_pci_init_one(struct pci_dev *pci_dev, if (dev->irq) free_irq(dev->irq, priv); - ipw2100_kill_workqueue(priv); + ipw2100_kill_works(priv); /* These are safe to call even if they weren't allocated */ ipw2100_queues_free(priv); @@ -6460,9 +6452,7 @@ static void __devexit ipw2100_pci_remove_one(struct pci_dev *pci_dev) * first, then close() will crash. */ unregister_netdev(dev); - /* ipw2100_down will ensure that there is no more pending work - * in the workqueue's, so we can safely remove them now. */ - ipw2100_kill_workqueue(priv); + ipw2100_kill_works(priv); ipw2100_queues_free(priv); diff --git a/drivers/net/wireless/ipw2x00/ipw2100.h b/drivers/net/wireless/ipw2x00/ipw2100.h index 838002b4881e..99cba968aa58 100644 --- a/drivers/net/wireless/ipw2x00/ipw2100.h +++ b/drivers/net/wireless/ipw2x00/ipw2100.h @@ -580,7 +580,6 @@ struct ipw2100_priv { struct tasklet_struct irq_tasklet; - struct workqueue_struct *workqueue; struct delayed_work reset_work; struct delayed_work security_work; struct delayed_work wx_event_work; diff --git a/drivers/net/wireless/ipw2x00/ipw2200.c b/drivers/net/wireless/ipw2x00/ipw2200.c index ae438ed80c2f..160881f234cc 100644 --- a/drivers/net/wireless/ipw2x00/ipw2200.c +++ b/drivers/net/wireless/ipw2x00/ipw2200.c @@ -894,9 +894,8 @@ static void ipw_led_link_on(struct ipw_priv *priv) /* If we aren't associated, schedule turning the LED off */ if (!(priv->status & STATUS_ASSOCIATED)) - queue_delayed_work(priv->workqueue, - &priv->led_link_off, - LD_TIME_LINK_ON); + schedule_delayed_work(&priv->led_link_off, + LD_TIME_LINK_ON); } spin_unlock_irqrestore(&priv->lock, flags); @@ -939,8 +938,8 @@ static void ipw_led_link_off(struct ipw_priv *priv) * turning the LED on (blink while unassociated) */ if (!(priv->status & STATUS_RF_KILL_MASK) && !(priv->status & STATUS_ASSOCIATED)) - queue_delayed_work(priv->workqueue, &priv->led_link_on, - LD_TIME_LINK_OFF); + schedule_delayed_work(&priv->led_link_on, + LD_TIME_LINK_OFF); } @@ -980,13 +979,11 @@ static void __ipw_led_activity_on(struct ipw_priv *priv) priv->status |= STATUS_LED_ACT_ON; cancel_delayed_work(&priv->led_act_off); - queue_delayed_work(priv->workqueue, &priv->led_act_off, - LD_TIME_ACT_ON); + schedule_delayed_work(&priv->led_act_off, LD_TIME_ACT_ON); } else { /* Reschedule LED off for full time period */ cancel_delayed_work(&priv->led_act_off); - queue_delayed_work(priv->workqueue, &priv->led_act_off, - LD_TIME_ACT_ON); + schedule_delayed_work(&priv->led_act_off, LD_TIME_ACT_ON); } } @@ -1795,13 +1792,11 @@ static int ipw_radio_kill_sw(struct ipw_priv *priv, int disable_radio) if (disable_radio) { priv->status |= STATUS_RF_KILL_SW; - if (priv->workqueue) { - cancel_delayed_work(&priv->request_scan); - cancel_delayed_work(&priv->request_direct_scan); - cancel_delayed_work(&priv->request_passive_scan); - cancel_delayed_work(&priv->scan_event); - } - queue_work(priv->workqueue, &priv->down); + cancel_delayed_work(&priv->request_scan); + cancel_delayed_work(&priv->request_direct_scan); + cancel_delayed_work(&priv->request_passive_scan); + cancel_delayed_work(&priv->scan_event); + schedule_work(&priv->down); } else { priv->status &= ~STATUS_RF_KILL_SW; if (rf_kill_active(priv)) { @@ -1809,10 +1804,10 @@ static int ipw_radio_kill_sw(struct ipw_priv *priv, int disable_radio) "disabled by HW switch\n"); /* Make sure the RF_KILL check timer is running */ cancel_delayed_work(&priv->rf_kill); - queue_delayed_work(priv->workqueue, &priv->rf_kill, - round_jiffies_relative(2 * HZ)); + schedule_delayed_work(&priv->rf_kill, + round_jiffies_relative(2 * HZ)); } else - queue_work(priv->workqueue, &priv->up); + schedule_work(&priv->up); } return 1; @@ -2063,7 +2058,7 @@ static void ipw_irq_tasklet(struct ipw_priv *priv) cancel_delayed_work(&priv->request_passive_scan); cancel_delayed_work(&priv->scan_event); schedule_work(&priv->link_down); - queue_delayed_work(priv->workqueue, &priv->rf_kill, 2 * HZ); + schedule_delayed_work(&priv->rf_kill, 2 * HZ); handled |= IPW_INTA_BIT_RF_KILL_DONE; } @@ -2103,7 +2098,7 @@ static void ipw_irq_tasklet(struct ipw_priv *priv) priv->status &= ~STATUS_HCMD_ACTIVE; wake_up_interruptible(&priv->wait_command_queue); - queue_work(priv->workqueue, &priv->adapter_restart); + schedule_work(&priv->adapter_restart); handled |= IPW_INTA_BIT_FATAL_ERROR; } @@ -2323,11 +2318,6 @@ static int ipw_send_adapter_address(struct ipw_priv *priv, u8 * mac) return ipw_send_cmd_pdu(priv, IPW_CMD_ADAPTER_ADDRESS, ETH_ALEN, mac); } -/* - * NOTE: This must be executed from our workqueue as it results in udelay - * being called which may corrupt the keyboard if executed on default - * workqueue - */ static void ipw_adapter_restart(void *adapter) { struct ipw_priv *priv = adapter; @@ -2368,13 +2358,13 @@ static void ipw_scan_check(void *data) IPW_DEBUG_SCAN("Scan completion watchdog resetting " "adapter after (%dms).\n", jiffies_to_msecs(IPW_SCAN_CHECK_WATCHDOG)); - queue_work(priv->workqueue, &priv->adapter_restart); + schedule_work(&priv->adapter_restart); } else if (priv->status & STATUS_SCANNING) { IPW_DEBUG_SCAN("Scan completion watchdog aborting scan " "after (%dms).\n", jiffies_to_msecs(IPW_SCAN_CHECK_WATCHDOG)); ipw_abort_scan(priv); - queue_delayed_work(priv->workqueue, &priv->scan_check, HZ); + schedule_delayed_work(&priv->scan_check, HZ); } } @@ -3943,7 +3933,7 @@ static void ipw_send_disassociate(struct ipw_priv *priv, int quiet) if (priv->status & STATUS_ASSOCIATING) { IPW_DEBUG_ASSOC("Disassociating while associating.\n"); - queue_work(priv->workqueue, &priv->disassociate); + schedule_work(&priv->disassociate); return; } @@ -4360,8 +4350,7 @@ static void ipw_gather_stats(struct ipw_priv *priv) priv->quality = quality; - queue_delayed_work(priv->workqueue, &priv->gather_stats, - IPW_STATS_INTERVAL); + schedule_delayed_work(&priv->gather_stats, IPW_STATS_INTERVAL); } static void ipw_bg_gather_stats(struct work_struct *work) @@ -4396,10 +4385,10 @@ static void ipw_handle_missed_beacon(struct ipw_priv *priv, IPW_DEBUG(IPW_DL_INFO | IPW_DL_NOTIF | IPW_DL_STATE, "Aborting scan with missed beacon.\n"); - queue_work(priv->workqueue, &priv->abort_scan); + schedule_work(&priv->abort_scan); } - queue_work(priv->workqueue, &priv->disassociate); + schedule_work(&priv->disassociate); return; } @@ -4425,8 +4414,7 @@ static void ipw_handle_missed_beacon(struct ipw_priv *priv, if (!(priv->status & STATUS_ROAMING)) { priv->status |= STATUS_ROAMING; if (!(priv->status & STATUS_SCANNING)) - queue_delayed_work(priv->workqueue, - &priv->request_scan, 0); + schedule_delayed_work(&priv->request_scan, 0); } return; } @@ -4439,7 +4427,7 @@ static void ipw_handle_missed_beacon(struct ipw_priv *priv, * channels..) */ IPW_DEBUG(IPW_DL_INFO | IPW_DL_NOTIF | IPW_DL_STATE, "Aborting scan with missed beacon.\n"); - queue_work(priv->workqueue, &priv->abort_scan); + schedule_work(&priv->abort_scan); } IPW_DEBUG_NOTIF("Missed beacon: %d\n", missed_count); @@ -4462,8 +4450,8 @@ static void handle_scan_event(struct ipw_priv *priv) /* Only userspace-requested scan completion events go out immediately */ if (!priv->user_requested_scan) { if (!delayed_work_pending(&priv->scan_event)) - queue_delayed_work(priv->workqueue, &priv->scan_event, - round_jiffies_relative(msecs_to_jiffies(4000))); + schedule_delayed_work(&priv->scan_event, + round_jiffies_relative(msecs_to_jiffies(4000))); } else { union iwreq_data wrqu; @@ -4516,20 +4504,17 @@ static void ipw_rx_notification(struct ipw_priv *priv, IPW_DEBUG_ASSOC ("queueing adhoc check\n"); - queue_delayed_work(priv-> - workqueue, - &priv-> - adhoc_check, - le16_to_cpu(priv-> - assoc_request. - beacon_interval)); + schedule_delayed_work( + &priv->adhoc_check, + le16_to_cpu(priv-> + assoc_request. + beacon_interval)); break; } priv->status &= ~STATUS_ASSOCIATING; priv->status |= STATUS_ASSOCIATED; - queue_work(priv->workqueue, - &priv->system_config); + schedule_work(&priv->system_config); #ifdef CONFIG_IPW2200_QOS #define IPW_GET_PACKET_STYPE(x) WLAN_FC_GET_STYPE( \ @@ -4792,43 +4777,37 @@ static void ipw_rx_notification(struct ipw_priv *priv, #ifdef CONFIG_IPW2200_MONITOR if (priv->ieee->iw_mode == IW_MODE_MONITOR) { priv->status |= STATUS_SCAN_FORCED; - queue_delayed_work(priv->workqueue, - &priv->request_scan, 0); + schedule_delayed_work(&priv->request_scan, 0); break; } priv->status &= ~STATUS_SCAN_FORCED; #endif /* CONFIG_IPW2200_MONITOR */ /* Do queued direct scans first */ - if (priv->status & STATUS_DIRECT_SCAN_PENDING) { - queue_delayed_work(priv->workqueue, - &priv->request_direct_scan, 0); - } + if (priv->status & STATUS_DIRECT_SCAN_PENDING) + schedule_delayed_work(&priv->request_direct_scan, 0); if (!(priv->status & (STATUS_ASSOCIATED | STATUS_ASSOCIATING | STATUS_ROAMING | STATUS_DISASSOCIATING))) - queue_work(priv->workqueue, &priv->associate); + schedule_work(&priv->associate); else if (priv->status & STATUS_ROAMING) { if (x->status == SCAN_COMPLETED_STATUS_COMPLETE) /* If a scan completed and we are in roam mode, then * the scan that completed was the one requested as a * result of entering roam... so, schedule the * roam work */ - queue_work(priv->workqueue, - &priv->roam); + schedule_work(&priv->roam); else /* Don't schedule if we aborted the scan */ priv->status &= ~STATUS_ROAMING; } else if (priv->status & STATUS_SCAN_PENDING) - queue_delayed_work(priv->workqueue, - &priv->request_scan, 0); + schedule_delayed_work(&priv->request_scan, 0); else if (priv->config & CFG_BACKGROUND_SCAN && priv->status & STATUS_ASSOCIATED) - queue_delayed_work(priv->workqueue, - &priv->request_scan, - round_jiffies_relative(HZ)); + schedule_delayed_work(&priv->request_scan, + round_jiffies_relative(HZ)); /* Send an empty event to user space. * We don't send the received data on the event because @@ -5192,7 +5171,7 @@ static void ipw_rx_queue_restock(struct ipw_priv *priv) /* If the pre-allocated buffer pool is dropping low, schedule to * refill it */ if (rxq->free_count <= RX_LOW_WATERMARK) - queue_work(priv->workqueue, &priv->rx_replenish); + schedule_work(&priv->rx_replenish); /* If we've added more space for the firmware to place data, tell it */ if (write != rxq->write) @@ -6133,8 +6112,8 @@ static void ipw_adhoc_check(void *data) return; } - queue_delayed_work(priv->workqueue, &priv->adhoc_check, - le16_to_cpu(priv->assoc_request.beacon_interval)); + schedule_delayed_work(&priv->adhoc_check, + le16_to_cpu(priv->assoc_request.beacon_interval)); } static void ipw_bg_adhoc_check(struct work_struct *work) @@ -6523,8 +6502,7 @@ send_request: } else priv->status &= ~STATUS_SCAN_PENDING; - queue_delayed_work(priv->workqueue, &priv->scan_check, - IPW_SCAN_CHECK_WATCHDOG); + schedule_delayed_work(&priv->scan_check, IPW_SCAN_CHECK_WATCHDOG); done: mutex_unlock(&priv->mutex); return err; @@ -6994,8 +6972,7 @@ static int ipw_qos_handle_probe_response(struct ipw_priv *priv, !memcmp(network->ssid, priv->assoc_network->ssid, network->ssid_len)) { - queue_work(priv->workqueue, - &priv->merge_networks); + schedule_work(&priv->merge_networks); } } @@ -7663,7 +7640,7 @@ static int ipw_associate(void *data) if (priv->status & STATUS_DISASSOCIATING) { IPW_DEBUG_ASSOC("Not attempting association (in " "disassociating)\n "); - queue_work(priv->workqueue, &priv->associate); + schedule_work(&priv->associate); return 0; } @@ -7731,12 +7708,10 @@ static int ipw_associate(void *data) if (!(priv->status & STATUS_SCANNING)) { if (!(priv->config & CFG_SPEED_SCAN)) - queue_delayed_work(priv->workqueue, - &priv->request_scan, - SCAN_INTERVAL); + schedule_delayed_work(&priv->request_scan, + SCAN_INTERVAL); else - queue_delayed_work(priv->workqueue, - &priv->request_scan, 0); + schedule_delayed_work(&priv->request_scan, 0); } return 0; @@ -8899,7 +8874,7 @@ static int ipw_wx_set_mode(struct net_device *dev, priv->ieee->iw_mode = wrqu->mode; - queue_work(priv->workqueue, &priv->adapter_restart); + schedule_work(&priv->adapter_restart); mutex_unlock(&priv->mutex); return err; } @@ -9598,7 +9573,7 @@ static int ipw_wx_set_scan(struct net_device *dev, IPW_DEBUG_WX("Start scan\n"); - queue_delayed_work(priv->workqueue, work, 0); + schedule_delayed_work(work, 0); return 0; } @@ -9937,7 +9912,7 @@ static int ipw_wx_set_monitor(struct net_device *dev, #else priv->net_dev->type = ARPHRD_IEEE80211; #endif - queue_work(priv->workqueue, &priv->adapter_restart); + schedule_work(&priv->adapter_restart); } ipw_set_channel(priv, parms[1]); @@ -9947,7 +9922,7 @@ static int ipw_wx_set_monitor(struct net_device *dev, return 0; } priv->net_dev->type = ARPHRD_ETHER; - queue_work(priv->workqueue, &priv->adapter_restart); + schedule_work(&priv->adapter_restart); } mutex_unlock(&priv->mutex); return 0; @@ -9961,7 +9936,7 @@ static int ipw_wx_reset(struct net_device *dev, { struct ipw_priv *priv = libipw_priv(dev); IPW_DEBUG_WX("RESET\n"); - queue_work(priv->workqueue, &priv->adapter_restart); + schedule_work(&priv->adapter_restart); return 0; } @@ -10551,7 +10526,7 @@ static int ipw_net_set_mac_address(struct net_device *dev, void *p) memcpy(priv->mac_addr, addr->sa_data, ETH_ALEN); printk(KERN_INFO "%s: Setting MAC to %pM\n", priv->net_dev->name, priv->mac_addr); - queue_work(priv->workqueue, &priv->adapter_restart); + schedule_work(&priv->adapter_restart); mutex_unlock(&priv->mutex); return 0; } @@ -10684,9 +10659,7 @@ static void ipw_rf_kill(void *adapter) if (rf_kill_active(priv)) { IPW_DEBUG_RF_KILL("RF Kill active, rescheduling GPIO check\n"); - if (priv->workqueue) - queue_delayed_work(priv->workqueue, - &priv->rf_kill, 2 * HZ); + schedule_delayed_work(&priv->rf_kill, 2 * HZ); goto exit_unlock; } @@ -10697,7 +10670,7 @@ static void ipw_rf_kill(void *adapter) "device\n"); /* we can not do an adapter restart while inside an irq lock */ - queue_work(priv->workqueue, &priv->adapter_restart); + schedule_work(&priv->adapter_restart); } else IPW_DEBUG_RF_KILL("HW RF Kill deactivated. SW RF Kill still " "enabled\n"); @@ -10735,7 +10708,7 @@ static void ipw_link_up(struct ipw_priv *priv) notify_wx_assoc_event(priv); if (priv->config & CFG_BACKGROUND_SCAN) - queue_delayed_work(priv->workqueue, &priv->request_scan, HZ); + schedule_delayed_work(&priv->request_scan, HZ); } static void ipw_bg_link_up(struct work_struct *work) @@ -10764,7 +10737,7 @@ static void ipw_link_down(struct ipw_priv *priv) if (!(priv->status & STATUS_EXIT_PENDING)) { /* Queue up another scan... */ - queue_delayed_work(priv->workqueue, &priv->request_scan, 0); + schedule_delayed_work(&priv->request_scan, 0); } else cancel_delayed_work(&priv->scan_event); } @@ -10782,7 +10755,6 @@ static int __devinit ipw_setup_deferred_work(struct ipw_priv *priv) { int ret = 0; - priv->workqueue = create_workqueue(DRV_NAME); init_waitqueue_head(&priv->wait_command_queue); init_waitqueue_head(&priv->wait_state); @@ -11339,8 +11311,7 @@ static int ipw_up(struct ipw_priv *priv) IPW_WARNING("Radio Frequency Kill Switch is On:\n" "Kill switch must be turned off for " "wireless networking to work.\n"); - queue_delayed_work(priv->workqueue, &priv->rf_kill, - 2 * HZ); + schedule_delayed_work(&priv->rf_kill, 2 * HZ); return 0; } @@ -11350,8 +11321,7 @@ static int ipw_up(struct ipw_priv *priv) /* If configure to try and auto-associate, kick * off a scan. */ - queue_delayed_work(priv->workqueue, - &priv->request_scan, 0); + schedule_delayed_work(&priv->request_scan, 0); return 0; } @@ -11817,7 +11787,7 @@ static int __devinit ipw_pci_probe(struct pci_dev *pdev, err = request_irq(pdev->irq, ipw_isr, IRQF_SHARED, DRV_NAME, priv); if (err) { IPW_ERROR("Error allocating IRQ %d\n", pdev->irq); - goto out_destroy_workqueue; + goto out_iounmap; } SET_NETDEV_DEV(net_dev, &pdev->dev); @@ -11885,9 +11855,6 @@ static int __devinit ipw_pci_probe(struct pci_dev *pdev, sysfs_remove_group(&pdev->dev.kobj, &ipw_attribute_group); out_release_irq: free_irq(pdev->irq, priv); - out_destroy_workqueue: - destroy_workqueue(priv->workqueue); - priv->workqueue = NULL; out_iounmap: iounmap(priv->hw_base); out_pci_release_regions: @@ -11930,18 +11897,31 @@ static void __devexit ipw_pci_remove(struct pci_dev *pdev) kfree(priv->cmdlog); priv->cmdlog = NULL; } - /* ipw_down will ensure that there is no more pending work - * in the workqueue's, so we can safely remove them now. */ - cancel_delayed_work(&priv->adhoc_check); - cancel_delayed_work(&priv->gather_stats); - cancel_delayed_work(&priv->request_scan); - cancel_delayed_work(&priv->request_direct_scan); - cancel_delayed_work(&priv->request_passive_scan); - cancel_delayed_work(&priv->scan_event); - cancel_delayed_work(&priv->rf_kill); - cancel_delayed_work(&priv->scan_check); - destroy_workqueue(priv->workqueue); - priv->workqueue = NULL; + + /* make sure all works are inactive */ + cancel_delayed_work_sync(&priv->adhoc_check); + cancel_work_sync(&priv->associate); + cancel_work_sync(&priv->disassociate); + cancel_work_sync(&priv->system_config); + cancel_work_sync(&priv->rx_replenish); + cancel_work_sync(&priv->adapter_restart); + cancel_delayed_work_sync(&priv->rf_kill); + cancel_work_sync(&priv->up); + cancel_work_sync(&priv->down); + cancel_delayed_work_sync(&priv->request_scan); + cancel_delayed_work_sync(&priv->request_direct_scan); + cancel_delayed_work_sync(&priv->request_passive_scan); + cancel_delayed_work_sync(&priv->scan_event); + cancel_delayed_work_sync(&priv->gather_stats); + cancel_work_sync(&priv->abort_scan); + cancel_work_sync(&priv->roam); + cancel_delayed_work_sync(&priv->scan_check); + cancel_work_sync(&priv->link_up); + cancel_work_sync(&priv->link_down); + cancel_delayed_work_sync(&priv->led_link_on); + cancel_delayed_work_sync(&priv->led_link_off); + cancel_delayed_work_sync(&priv->led_act_off); + cancel_work_sync(&priv->merge_networks); /* Free MAC hash list for ADHOC */ for (i = 0; i < IPW_IBSS_MAC_HASH_SIZE; i++) { @@ -12029,7 +12009,7 @@ static int ipw_pci_resume(struct pci_dev *pdev) priv->suspend_time = get_seconds() - priv->suspend_at; /* Bring the device back up */ - queue_work(priv->workqueue, &priv->up); + schedule_work(&priv->up); return 0; } diff --git a/drivers/net/wireless/ipw2x00/ipw2200.h b/drivers/net/wireless/ipw2x00/ipw2200.h index d7d049c7a4fa..0441445b8bfa 100644 --- a/drivers/net/wireless/ipw2x00/ipw2200.h +++ b/drivers/net/wireless/ipw2x00/ipw2200.h @@ -1299,8 +1299,6 @@ struct ipw_priv { u8 direct_scan_ssid[IW_ESSID_MAX_SIZE]; u8 direct_scan_ssid_len; - struct workqueue_struct *workqueue; - struct delayed_work adhoc_check; struct work_struct associate; struct work_struct disassociate; diff --git a/drivers/net/wireless/iwlwifi/iwl-3945.c b/drivers/net/wireless/iwlwifi/iwl-3945.c index a9b852be4509..39b6f16c87fa 100644 --- a/drivers/net/wireless/iwlwifi/iwl-3945.c +++ b/drivers/net/wireless/iwlwifi/iwl-3945.c @@ -402,72 +402,6 @@ static void iwl3945_accumulative_statistics(struct iwl_priv *priv, } #endif -/** - * iwl3945_good_plcp_health - checks for plcp error. - * - * When the plcp error is exceeding the thresholds, reset the radio - * to improve the throughput. - */ -static bool iwl3945_good_plcp_health(struct iwl_priv *priv, - struct iwl_rx_packet *pkt) -{ - bool rc = true; - struct iwl3945_notif_statistics current_stat; - int combined_plcp_delta; - unsigned int plcp_msec; - unsigned long plcp_received_jiffies; - - if (priv->cfg->base_params->plcp_delta_threshold == - IWL_MAX_PLCP_ERR_THRESHOLD_DISABLE) { - IWL_DEBUG_RADIO(priv, "plcp_err check disabled\n"); - return rc; - } - memcpy(¤t_stat, pkt->u.raw, sizeof(struct - iwl3945_notif_statistics)); - /* - * check for plcp_err and trigger radio reset if it exceeds - * the plcp error threshold plcp_delta. - */ - plcp_received_jiffies = jiffies; - plcp_msec = jiffies_to_msecs((long) plcp_received_jiffies - - (long) priv->plcp_jiffies); - priv->plcp_jiffies = plcp_received_jiffies; - /* - * check to make sure plcp_msec is not 0 to prevent division - * by zero. - */ - if (plcp_msec) { - combined_plcp_delta = - (le32_to_cpu(current_stat.rx.ofdm.plcp_err) - - le32_to_cpu(priv->_3945.statistics.rx.ofdm.plcp_err)); - - if ((combined_plcp_delta > 0) && - ((combined_plcp_delta * 100) / plcp_msec) > - priv->cfg->base_params->plcp_delta_threshold) { - /* - * if plcp_err exceed the threshold, the following - * data is printed in csv format: - * Text: plcp_err exceeded %d, - * Received ofdm.plcp_err, - * Current ofdm.plcp_err, - * combined_plcp_delta, - * plcp_msec - */ - IWL_DEBUG_RADIO(priv, "plcp_err exceeded %u, " - "%u, %d, %u mSecs\n", - priv->cfg->base_params->plcp_delta_threshold, - le32_to_cpu(current_stat.rx.ofdm.plcp_err), - combined_plcp_delta, plcp_msec); - /* - * Reset the RF radio due to the high plcp - * error rate - */ - rc = false; - } - } - return rc; -} - void iwl3945_hw_rx_statistics(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb) { @@ -2734,7 +2668,6 @@ static struct iwl_lib_ops iwl3945_lib = { .isr_ops = { .isr = iwl_isr_legacy, }, - .check_plcp_health = iwl3945_good_plcp_health, .debugfs_ops = { .rx_stats_read = iwl3945_ucode_rx_stats_read, diff --git a/drivers/net/wireless/iwlwifi/iwl-5000.c b/drivers/net/wireless/iwlwifi/iwl-5000.c index 79ab0a6b1386..537fb8c84e3a 100644 --- a/drivers/net/wireless/iwlwifi/iwl-5000.c +++ b/drivers/net/wireless/iwlwifi/iwl-5000.c @@ -51,7 +51,7 @@ #include "iwl-agn-debugfs.h" /* Highest firmware API version supported */ -#define IWL5000_UCODE_API_MAX 2 +#define IWL5000_UCODE_API_MAX 5 #define IWL5150_UCODE_API_MAX 2 /* Lowest firmware API version supported */ diff --git a/drivers/net/wireless/iwlwifi/iwl-6000.c b/drivers/net/wireless/iwlwifi/iwl-6000.c index af505bcd7ae0..ef36aff1bb43 100644 --- a/drivers/net/wireless/iwlwifi/iwl-6000.c +++ b/drivers/net/wireless/iwlwifi/iwl-6000.c @@ -681,6 +681,8 @@ struct iwl_cfg iwl6000i_2bg_cfg = { .fw_name_pre = IWL6050_FW_PRE, \ .ucode_api_max = IWL6050_UCODE_API_MAX, \ .ucode_api_min = IWL6050_UCODE_API_MIN, \ + .valid_tx_ant = ANT_AB, /* .cfg overwrite */ \ + .valid_rx_ant = ANT_AB, /* .cfg overwrite */ \ .ops = &iwl6050_ops, \ .eeprom_ver = EEPROM_6050_EEPROM_VERSION, \ .eeprom_calib_ver = EEPROM_6050_TX_POWER_VERSION, \ diff --git a/drivers/net/wireless/iwlwifi/iwl-agn.c b/drivers/net/wireless/iwlwifi/iwl-agn.c index 36335b1b54d4..c1cfd9952e52 100644 --- a/drivers/net/wireless/iwlwifi/iwl-agn.c +++ b/drivers/net/wireless/iwlwifi/iwl-agn.c @@ -1157,6 +1157,9 @@ static void iwl_irq_tasklet_legacy(struct iwl_priv *priv) /* only Re-enable if disabled by irq */ if (test_bit(STATUS_INT_ENABLED, &priv->status)) iwl_enable_interrupts(priv); + /* Re-enable RF_KILL if it occurred */ + else if (handled & CSR_INT_BIT_RF_KILL) + iwl_enable_rfkill_int(priv); #ifdef CONFIG_IWLWIFI_DEBUG if (iwl_get_debug_level(priv) & (IWL_DL_ISR)) { @@ -1371,6 +1374,9 @@ static void iwl_irq_tasklet(struct iwl_priv *priv) /* only Re-enable if disabled by irq */ if (test_bit(STATUS_INT_ENABLED, &priv->status)) iwl_enable_interrupts(priv); + /* Re-enable RF_KILL if it occurred */ + else if (handled & CSR_INT_BIT_RF_KILL) + iwl_enable_rfkill_int(priv); } /* the threshold ratio of actual_ack_cnt to expected_ack_cnt in percent */ diff --git a/drivers/net/wireless/p54/p54pci.c b/drivers/net/wireless/p54/p54pci.c index 1eacba4daa5b..0494d7b102d4 100644 --- a/drivers/net/wireless/p54/p54pci.c +++ b/drivers/net/wireless/p54/p54pci.c @@ -199,6 +199,7 @@ static void p54p_check_rx_ring(struct ieee80211_hw *dev, u32 *index, while (i != idx) { u16 len; struct sk_buff *skb; + dma_addr_t dma_addr; desc = &ring[i]; len = le16_to_cpu(desc->len); skb = rx_buf[i]; @@ -216,17 +217,20 @@ static void p54p_check_rx_ring(struct ieee80211_hw *dev, u32 *index, len = priv->common.rx_mtu; } + dma_addr = le32_to_cpu(desc->host_addr); + pci_dma_sync_single_for_cpu(priv->pdev, dma_addr, + priv->common.rx_mtu + 32, PCI_DMA_FROMDEVICE); skb_put(skb, len); if (p54_rx(dev, skb)) { - pci_unmap_single(priv->pdev, - le32_to_cpu(desc->host_addr), - priv->common.rx_mtu + 32, - PCI_DMA_FROMDEVICE); + pci_unmap_single(priv->pdev, dma_addr, + priv->common.rx_mtu + 32, PCI_DMA_FROMDEVICE); rx_buf[i] = NULL; - desc->host_addr = 0; + desc->host_addr = cpu_to_le32(0); } else { skb_trim(skb, 0); + pci_dma_sync_single_for_device(priv->pdev, dma_addr, + priv->common.rx_mtu + 32, PCI_DMA_FROMDEVICE); desc->len = cpu_to_le16(priv->common.rx_mtu + 32); } diff --git a/drivers/net/wireless/p54/p54usb.c b/drivers/net/wireless/p54/p54usb.c index 21713a7638c4..9b344a921e74 100644 --- a/drivers/net/wireless/p54/p54usb.c +++ b/drivers/net/wireless/p54/p54usb.c @@ -98,6 +98,7 @@ static struct usb_device_id p54u_table[] __devinitdata = { {USB_DEVICE(0x1413, 0x5400)}, /* Telsey 802.11g USB2.0 Adapter */ {USB_DEVICE(0x1435, 0x0427)}, /* Inventel UR054G */ {USB_DEVICE(0x1668, 0x1050)}, /* Actiontec 802UIG-1 */ + {USB_DEVICE(0x1740, 0x1000)}, /* Senao NUB-350 */ {USB_DEVICE(0x2001, 0x3704)}, /* DLink DWL-G122 rev A2 */ {USB_DEVICE(0x2001, 0x3705)}, /* D-Link DWL-G120 rev C1 */ {USB_DEVICE(0x413c, 0x5513)}, /* Dell WLA3310 USB Wireless Adapter */ diff --git a/drivers/net/wireless/rndis_wlan.c b/drivers/net/wireless/rndis_wlan.c index 848cc2cce247..518542b4bf9e 100644 --- a/drivers/net/wireless/rndis_wlan.c +++ b/drivers/net/wireless/rndis_wlan.c @@ -2597,6 +2597,9 @@ static int rndis_set_power_mgmt(struct wiphy *wiphy, struct net_device *dev, __le32 mode; int ret; + if (priv->device_type != RNDIS_BCM4320B) + return -ENOTSUPP; + netdev_dbg(usbdev->net, "%s(): %s, %d\n", __func__, enabled ? "enabled" : "disabled", timeout); diff --git a/drivers/net/wireless/rt2x00/rt2800pci.c b/drivers/net/wireless/rt2x00/rt2800pci.c index aa97971a38af..3b3f1e45ab3e 100644 --- a/drivers/net/wireless/rt2x00/rt2800pci.c +++ b/drivers/net/wireless/rt2x00/rt2800pci.c @@ -652,6 +652,12 @@ static void rt2800pci_fill_rxdone(struct queue_entry *entry, */ rxdesc->flags |= RX_FLAG_IV_STRIPPED; + /* + * The hardware has already checked the Michael Mic and has + * stripped it from the frame. Signal this to mac80211. + */ + rxdesc->flags |= RX_FLAG_MMIC_STRIPPED; + if (rxdesc->cipher_status == RX_CRYPTO_SUCCESS) rxdesc->flags |= RX_FLAG_DECRYPTED; else if (rxdesc->cipher_status == RX_CRYPTO_FAIL_MIC) @@ -1065,6 +1071,8 @@ static DEFINE_PCI_DEVICE_TABLE(rt2800pci_device_table) = { { PCI_DEVICE(0x1814, 0x3390), PCI_DEVICE_DATA(&rt2800pci_ops) }, #endif #ifdef CONFIG_RT2800PCI_RT35XX + { PCI_DEVICE(0x1432, 0x7711), PCI_DEVICE_DATA(&rt2800pci_ops) }, + { PCI_DEVICE(0x1432, 0x7722), PCI_DEVICE_DATA(&rt2800pci_ops) }, { PCI_DEVICE(0x1814, 0x3060), PCI_DEVICE_DATA(&rt2800pci_ops) }, { PCI_DEVICE(0x1814, 0x3062), PCI_DEVICE_DATA(&rt2800pci_ops) }, { PCI_DEVICE(0x1814, 0x3562), PCI_DEVICE_DATA(&rt2800pci_ops) }, diff --git a/drivers/net/wireless/rt2x00/rt2800usb.c b/drivers/net/wireless/rt2x00/rt2800usb.c index b97a4a54ff4c..197a36c05fda 100644 --- a/drivers/net/wireless/rt2x00/rt2800usb.c +++ b/drivers/net/wireless/rt2x00/rt2800usb.c @@ -486,6 +486,12 @@ static void rt2800usb_fill_rxdone(struct queue_entry *entry, */ rxdesc->flags |= RX_FLAG_IV_STRIPPED; + /* + * The hardware has already checked the Michael Mic and has + * stripped it from the frame. Signal this to mac80211. + */ + rxdesc->flags |= RX_FLAG_MMIC_STRIPPED; + if (rxdesc->cipher_status == RX_CRYPTO_SUCCESS) rxdesc->flags |= RX_FLAG_DECRYPTED; else if (rxdesc->cipher_status == RX_CRYPTO_FAIL_MIC) diff --git a/drivers/net/wireless/rtlwifi/efuse.c b/drivers/net/wireless/rtlwifi/efuse.c index b8433f3a9bc2..62876cd5c41a 100644 --- a/drivers/net/wireless/rtlwifi/efuse.c +++ b/drivers/net/wireless/rtlwifi/efuse.c @@ -726,9 +726,9 @@ static int efuse_pg_packet_read(struct ieee80211_hw *hw, u8 offset, u8 *data) } static void efuse_write_data_case1(struct ieee80211_hw *hw, u16 *efuse_addr, - u8 efuse_data, u8 offset, int *bcontinual, - u8 *write_state, struct pgpkt_struct target_pkt, - int *repeat_times, int *bresult, u8 word_en) + u8 efuse_data, u8 offset, int *bcontinual, + u8 *write_state, struct pgpkt_struct *target_pkt, + int *repeat_times, int *bresult, u8 word_en) { struct rtl_priv *rtlpriv = rtl_priv(hw); struct pgpkt_struct tmp_pkt; @@ -744,8 +744,8 @@ static void efuse_write_data_case1(struct ieee80211_hw *hw, u16 *efuse_addr, tmp_pkt.word_en = tmp_header & 0x0F; tmp_word_cnts = efuse_calculate_word_cnts(tmp_pkt.word_en); - if (tmp_pkt.offset != target_pkt.offset) { - efuse_addr = efuse_addr + (tmp_word_cnts * 2) + 1; + if (tmp_pkt.offset != target_pkt->offset) { + *efuse_addr = *efuse_addr + (tmp_word_cnts * 2) + 1; *write_state = PG_STATE_HEADER; } else { for (tmpindex = 0; tmpindex < (tmp_word_cnts * 2); tmpindex++) { @@ -756,23 +756,23 @@ static void efuse_write_data_case1(struct ieee80211_hw *hw, u16 *efuse_addr, } if (bdataempty == false) { - efuse_addr = efuse_addr + (tmp_word_cnts * 2) + 1; + *efuse_addr = *efuse_addr + (tmp_word_cnts * 2) + 1; *write_state = PG_STATE_HEADER; } else { match_word_en = 0x0F; - if (!((target_pkt.word_en & BIT(0)) | + if (!((target_pkt->word_en & BIT(0)) | (tmp_pkt.word_en & BIT(0)))) match_word_en &= (~BIT(0)); - if (!((target_pkt.word_en & BIT(1)) | + if (!((target_pkt->word_en & BIT(1)) | (tmp_pkt.word_en & BIT(1)))) match_word_en &= (~BIT(1)); - if (!((target_pkt.word_en & BIT(2)) | + if (!((target_pkt->word_en & BIT(2)) | (tmp_pkt.word_en & BIT(2)))) match_word_en &= (~BIT(2)); - if (!((target_pkt.word_en & BIT(3)) | + if (!((target_pkt->word_en & BIT(3)) | (tmp_pkt.word_en & BIT(3)))) match_word_en &= (~BIT(3)); @@ -780,7 +780,7 @@ static void efuse_write_data_case1(struct ieee80211_hw *hw, u16 *efuse_addr, badworden = efuse_word_enable_data_write( hw, *efuse_addr + 1, tmp_pkt.word_en, - target_pkt.data); + target_pkt->data); if (0x0F != (badworden & 0x0F)) { u8 reorg_offset = offset; @@ -791,26 +791,26 @@ static void efuse_write_data_case1(struct ieee80211_hw *hw, u16 *efuse_addr, } tmp_word_en = 0x0F; - if ((target_pkt.word_en & BIT(0)) ^ + if ((target_pkt->word_en & BIT(0)) ^ (match_word_en & BIT(0))) tmp_word_en &= (~BIT(0)); - if ((target_pkt.word_en & BIT(1)) ^ + if ((target_pkt->word_en & BIT(1)) ^ (match_word_en & BIT(1))) tmp_word_en &= (~BIT(1)); - if ((target_pkt.word_en & BIT(2)) ^ + if ((target_pkt->word_en & BIT(2)) ^ (match_word_en & BIT(2))) tmp_word_en &= (~BIT(2)); - if ((target_pkt.word_en & BIT(3)) ^ + if ((target_pkt->word_en & BIT(3)) ^ (match_word_en & BIT(3))) tmp_word_en &= (~BIT(3)); if ((tmp_word_en & 0x0F) != 0x0F) { *efuse_addr = efuse_get_current_size(hw); - target_pkt.offset = offset; - target_pkt.word_en = tmp_word_en; + target_pkt->offset = offset; + target_pkt->word_en = tmp_word_en; } else *bcontinual = false; *write_state = PG_STATE_HEADER; @@ -821,8 +821,8 @@ static void efuse_write_data_case1(struct ieee80211_hw *hw, u16 *efuse_addr, } } else { *efuse_addr += (2 * tmp_word_cnts) + 1; - target_pkt.offset = offset; - target_pkt.word_en = word_en; + target_pkt->offset = offset; + target_pkt->word_en = word_en; *write_state = PG_STATE_HEADER; } } @@ -938,7 +938,7 @@ static int efuse_pg_packet_write(struct ieee80211_hw *hw, efuse_write_data_case1(hw, &efuse_addr, efuse_data, offset, &bcontinual, - &write_state, target_pkt, + &write_state, &target_pkt, &repeat_times, &bresult, word_en); else diff --git a/drivers/net/wireless/wl1251/main.c b/drivers/net/wireless/wl1251/main.c index 012e1a4016fe..40372bac9482 100644 --- a/drivers/net/wireless/wl1251/main.c +++ b/drivers/net/wireless/wl1251/main.c @@ -1039,6 +1039,9 @@ static void wl1251_op_bss_info_changed(struct ieee80211_hw *hw, if (changed & BSS_CHANGED_BEACON) { beacon = ieee80211_beacon_get(hw, vif); + if (!beacon) + goto out_sleep; + ret = wl1251_cmd_template_set(wl, CMD_BEACON, beacon->data, beacon->len); diff --git a/drivers/net/wireless/wl12xx/spi.c b/drivers/net/wireless/wl12xx/spi.c index 46714910f98c..7145ea543783 100644 --- a/drivers/net/wireless/wl12xx/spi.c +++ b/drivers/net/wireless/wl12xx/spi.c @@ -110,9 +110,8 @@ static void wl1271_spi_reset(struct wl1271 *wl) spi_message_add_tail(&t, &m); spi_sync(wl_to_spi(wl), &m); - kfree(cmd); - wl1271_dump(DEBUG_SPI, "spi reset -> ", cmd, WSPI_INIT_CMD_LEN); + kfree(cmd); } static void wl1271_spi_init(struct wl1271 *wl) diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c index 546de5749824..da1f12120346 100644 --- a/drivers/net/xen-netfront.c +++ b/drivers/net/xen-netfront.c @@ -120,6 +120,9 @@ struct netfront_info { unsigned long rx_pfn_array[NET_RX_RING_SIZE]; struct multicall_entry rx_mcl[NET_RX_RING_SIZE+1]; struct mmu_update rx_mmu[NET_RX_RING_SIZE]; + + /* Statistics */ + int rx_gso_checksum_fixup; }; struct netfront_rx_info { @@ -770,11 +773,29 @@ static RING_IDX xennet_fill_frags(struct netfront_info *np, return cons; } -static int skb_checksum_setup(struct sk_buff *skb) +static int checksum_setup(struct net_device *dev, struct sk_buff *skb) { struct iphdr *iph; unsigned char *th; int err = -EPROTO; + int recalculate_partial_csum = 0; + + /* + * A GSO SKB must be CHECKSUM_PARTIAL. However some buggy + * peers can fail to set NETRXF_csum_blank when sending a GSO + * frame. In this case force the SKB to CHECKSUM_PARTIAL and + * recalculate the partial checksum. + */ + if (skb->ip_summed != CHECKSUM_PARTIAL && skb_is_gso(skb)) { + struct netfront_info *np = netdev_priv(dev); + np->rx_gso_checksum_fixup++; + skb->ip_summed = CHECKSUM_PARTIAL; + recalculate_partial_csum = 1; + } + + /* A non-CHECKSUM_PARTIAL SKB does not require setup. */ + if (skb->ip_summed != CHECKSUM_PARTIAL) + return 0; if (skb->protocol != htons(ETH_P_IP)) goto out; @@ -788,9 +809,23 @@ static int skb_checksum_setup(struct sk_buff *skb) switch (iph->protocol) { case IPPROTO_TCP: skb->csum_offset = offsetof(struct tcphdr, check); + + if (recalculate_partial_csum) { + struct tcphdr *tcph = (struct tcphdr *)th; + tcph->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr, + skb->len - iph->ihl*4, + IPPROTO_TCP, 0); + } break; case IPPROTO_UDP: skb->csum_offset = offsetof(struct udphdr, check); + + if (recalculate_partial_csum) { + struct udphdr *udph = (struct udphdr *)th; + udph->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr, + skb->len - iph->ihl*4, + IPPROTO_UDP, 0); + } break; default: if (net_ratelimit()) @@ -829,13 +864,11 @@ static int handle_incoming_queue(struct net_device *dev, /* Ethernet work: Delayed to here as it peeks the header. */ skb->protocol = eth_type_trans(skb, dev); - if (skb->ip_summed == CHECKSUM_PARTIAL) { - if (skb_checksum_setup(skb)) { - kfree_skb(skb); - packets_dropped++; - dev->stats.rx_errors++; - continue; - } + if (checksum_setup(dev, skb)) { + kfree_skb(skb); + packets_dropped++; + dev->stats.rx_errors++; + continue; } dev->stats.rx_packets++; @@ -1632,12 +1665,59 @@ static void netback_changed(struct xenbus_device *dev, } } +static const struct xennet_stat { + char name[ETH_GSTRING_LEN]; + u16 offset; +} xennet_stats[] = { + { + "rx_gso_checksum_fixup", + offsetof(struct netfront_info, rx_gso_checksum_fixup) + }, +}; + +static int xennet_get_sset_count(struct net_device *dev, int string_set) +{ + switch (string_set) { + case ETH_SS_STATS: + return ARRAY_SIZE(xennet_stats); + default: + return -EINVAL; + } +} + +static void xennet_get_ethtool_stats(struct net_device *dev, + struct ethtool_stats *stats, u64 * data) +{ + void *np = netdev_priv(dev); + int i; + + for (i = 0; i < ARRAY_SIZE(xennet_stats); i++) + data[i] = *(int *)(np + xennet_stats[i].offset); +} + +static void xennet_get_strings(struct net_device *dev, u32 stringset, u8 * data) +{ + int i; + + switch (stringset) { + case ETH_SS_STATS: + for (i = 0; i < ARRAY_SIZE(xennet_stats); i++) + memcpy(data + i * ETH_GSTRING_LEN, + xennet_stats[i].name, ETH_GSTRING_LEN); + break; + } +} + static const struct ethtool_ops xennet_ethtool_ops = { .set_tx_csum = ethtool_op_set_tx_csum, .set_sg = xennet_set_sg, .set_tso = xennet_set_tso, .get_link = ethtool_op_get_link, + + .get_sset_count = xennet_get_sset_count, + .get_ethtool_stats = xennet_get_ethtool_stats, + .get_strings = xennet_get_strings, }; #ifdef CONFIG_SYSFS |