summaryrefslogtreecommitdiff
path: root/drivers/net
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net')
-rw-r--r--drivers/net/3c59x.c6
-rw-r--r--drivers/net/8139cp.c10
-rw-r--r--drivers/net/Kconfig18
-rw-r--r--drivers/net/Makefile1
-rw-r--r--drivers/net/atl1c/atl1c_hw.c2
-rw-r--r--drivers/net/atlx/atl1.c1
-rw-r--r--drivers/net/au1000_eth.c10
-rw-r--r--drivers/net/b44.c11
-rw-r--r--drivers/net/benet/be_cmds.c2
-rw-r--r--drivers/net/benet/be_main.c6
-rw-r--r--drivers/net/bnx2x/bnx2x.h4
-rw-r--r--drivers/net/bnx2x/bnx2x_cmn.c44
-rw-r--r--drivers/net/bnx2x/bnx2x_hsi.h9
-rw-r--r--drivers/net/bnx2x/bnx2x_init_ops.h4
-rw-r--r--drivers/net/bnx2x/bnx2x_link.c57
-rw-r--r--drivers/net/bnx2x/bnx2x_main.c2
-rw-r--r--drivers/net/bonding/bond_main.c21
-rw-r--r--drivers/net/bonding/bonding.h12
-rw-r--r--drivers/net/caif/caif_shm_u5500.c2
-rw-r--r--drivers/net/caif/caif_shmcore.c2
-rw-r--r--drivers/net/caif/caif_spi.c61
-rw-r--r--drivers/net/caif/caif_spi_slave.c13
-rw-r--r--drivers/net/can/at91_can.c2
-rw-r--r--drivers/net/can/pch_can.c10
-rw-r--r--drivers/net/cxgb3/cxgb3_main.c1
-rw-r--r--drivers/net/cxgb4/cxgb4_main.c1
-rw-r--r--drivers/net/cxgb4/t4_hw.c2
-rw-r--r--drivers/net/cxgb4vf/cxgb4vf_main.c131
-rw-r--r--drivers/net/cxgb4vf/sge.c122
-rw-r--r--drivers/net/cxgb4vf/t4vf_common.h1
-rw-r--r--drivers/net/cxgb4vf/t4vf_hw.c113
-rw-r--r--drivers/net/e1000/e1000_main.c12
-rw-r--r--drivers/net/ehea/ehea_ethtool.c9
-rw-r--r--drivers/net/ehea/ehea_main.c25
-rw-r--r--drivers/net/enic/enic_main.c3
-rw-r--r--drivers/net/gianfar.c7
-rw-r--r--drivers/net/gianfar_ethtool.c5
-rw-r--r--drivers/net/ibm_newemac/core.c1
-rw-r--r--drivers/net/ifb.c2
-rw-r--r--drivers/net/ipg.c6
-rw-r--r--drivers/net/irda/sh_sir.c2
-rw-r--r--drivers/net/ixgbe/ixgbe_main.c63
-rw-r--r--drivers/net/jme.c4
-rw-r--r--drivers/net/mlx4/fw.c4
-rw-r--r--drivers/net/netxen/netxen_nic_main.c4
-rw-r--r--drivers/net/pch_gbe/pch_gbe_main.c6
-rw-r--r--drivers/net/pch_gbe/pch_gbe_param.c8
-rw-r--r--drivers/net/pcmcia/axnet_cs.c30
-rw-r--r--drivers/net/pcmcia/pcnet_cs.c1
-rw-r--r--drivers/net/phy/Kconfig2
-rw-r--r--drivers/net/phy/icplus.c59
-rw-r--r--drivers/net/phy/marvell.c182
-rw-r--r--drivers/net/ppp_generic.c43
-rw-r--r--drivers/net/pppoe.c2
-rw-r--r--drivers/net/qlcnic/qlcnic_main.c1
-rw-r--r--drivers/net/qlge/qlge.h1
-rw-r--r--drivers/net/qlge/qlge_main.c7
-rw-r--r--drivers/net/qlge/qlge_mpi.c12
-rw-r--r--drivers/net/r8169.c38
-rw-r--r--drivers/net/sfc/efx.c43
-rw-r--r--drivers/net/sfc/net_driver.h2
-rw-r--r--drivers/net/sfc/nic.c6
-rw-r--r--drivers/net/skge.c1
-rw-r--r--drivers/net/smsc911x.h2
-rw-r--r--drivers/net/stmmac/stmmac_main.c4
-rw-r--r--drivers/net/tile/Makefile10
-rw-r--r--drivers/net/tile/tilepro.c2406
-rw-r--r--drivers/net/tulip/de2104x.c1
-rw-r--r--drivers/net/tulip/dmfe.c6
-rw-r--r--drivers/net/ucc_geth.c25
-rw-r--r--drivers/net/ucc_geth.h3
-rw-r--r--drivers/net/usb/hso.c14
-rw-r--r--drivers/net/usb/usbnet.c11
-rw-r--r--drivers/net/virtio_net.c12
-rw-r--r--drivers/net/vmxnet3/vmxnet3_drv.c2
-rw-r--r--drivers/net/vmxnet3/vmxnet3_int.h8
-rw-r--r--drivers/net/wan/hd64572.c5
-rw-r--r--drivers/net/wan/x25_asy.c13
-rw-r--r--drivers/net/wireless/ath/ath5k/base.c13
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9002_hw.c3
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_eeprom.c73
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_eeprom.h9
-rw-r--r--drivers/net/wireless/ath/ath9k/ath9k.h5
-rw-r--r--drivers/net/wireless/ath/ath9k/eeprom.c6
-rw-r--r--drivers/net/wireless/ath/ath9k/eeprom.h27
-rw-r--r--drivers/net/wireless/ath/ath9k/eeprom_9287.c2
-rw-r--r--drivers/net/wireless/ath/ath9k/eeprom_def.c23
-rw-r--r--drivers/net/wireless/ath/ath9k/hif_usb.c47
-rw-r--r--drivers/net/wireless/ath/ath9k/htc.h3
-rw-r--r--drivers/net/wireless/ath/ath9k/htc_drv_init.c8
-rw-r--r--drivers/net/wireless/ath/ath9k/htc_drv_main.c4
-rw-r--r--drivers/net/wireless/ath/ath9k/htc_drv_txrx.c2
-rw-r--r--drivers/net/wireless/ath/ath9k/hw.c18
-rw-r--r--drivers/net/wireless/ath/ath9k/hw.h1
-rw-r--r--drivers/net/wireless/ath/ath9k/init.c6
-rw-r--r--drivers/net/wireless/ath/ath9k/mac.c3
-rw-r--r--drivers/net/wireless/ath/ath9k/main.c51
-rw-r--r--drivers/net/wireless/ath/ath9k/recv.c15
-rw-r--r--drivers/net/wireless/ath/ath9k/reg.h15
-rw-r--r--drivers/net/wireless/ath/ath9k/xmit.c22
-rw-r--r--drivers/net/wireless/ath/carl9170/fw.c3
-rw-r--r--drivers/net/wireless/ath/carl9170/main.c5
-rw-r--r--drivers/net/wireless/ath/carl9170/tx.c2
-rw-r--r--drivers/net/wireless/ath/carl9170/usb.c8
-rw-r--r--drivers/net/wireless/b43/sdio.c1
-rw-r--r--drivers/net/wireless/ipw2x00/libipw_module.c9
-rw-r--r--drivers/net/wireless/iwlwifi/iwl3945-base.c3
-rw-r--r--drivers/net/wireless/libertas/cfg.c5
-rw-r--r--drivers/net/wireless/libertas/dev.h1
-rw-r--r--drivers/net/wireless/libertas/if_sdio.c1
-rw-r--r--drivers/net/wireless/libertas/if_spi.c1
-rw-r--r--drivers/net/wireless/libertas/main.c9
-rw-r--r--drivers/net/wireless/orinoco/main.c18
-rw-r--r--drivers/net/wireless/orinoco/orinoco_cs.c14
-rw-r--r--drivers/net/wireless/orinoco/orinoco_usb.c1
-rw-r--r--drivers/net/wireless/orinoco/scan.c8
-rw-r--r--drivers/net/wireless/orinoco/scan.h1
-rw-r--r--drivers/net/wireless/orinoco/spectrum_cs.c14
-rw-r--r--drivers/net/wireless/orinoco/wext.c4
-rw-r--r--drivers/net/wireless/rt2x00/Kconfig3
-rw-r--r--drivers/net/xen-netfront.c4
121 files changed, 3624 insertions, 641 deletions
diff --git a/drivers/net/3c59x.c b/drivers/net/3c59x.c
index e1da258bbfb7..0a92436f0538 100644
--- a/drivers/net/3c59x.c
+++ b/drivers/net/3c59x.c
@@ -699,7 +699,8 @@ DEFINE_WINDOW_IO(32)
#define DEVICE_PCI(dev) NULL
#endif
-#define VORTEX_PCI(vp) (((vp)->gendev) ? DEVICE_PCI((vp)->gendev) : NULL)
+#define VORTEX_PCI(vp) \
+ ((struct pci_dev *) (((vp)->gendev) ? DEVICE_PCI((vp)->gendev) : NULL))
#ifdef CONFIG_EISA
#define DEVICE_EISA(dev) (((dev)->bus == &eisa_bus_type) ? to_eisa_device((dev)) : NULL)
@@ -707,7 +708,8 @@ DEFINE_WINDOW_IO(32)
#define DEVICE_EISA(dev) NULL
#endif
-#define VORTEX_EISA(vp) (((vp)->gendev) ? DEVICE_EISA((vp)->gendev) : NULL)
+#define VORTEX_EISA(vp) \
+ ((struct eisa_device *) (((vp)->gendev) ? DEVICE_EISA((vp)->gendev) : NULL))
/* The action to take with a media selection timer tick.
Note that we deviate from the 3Com order by checking 10base2 before AUI.
diff --git a/drivers/net/8139cp.c b/drivers/net/8139cp.c
index ac422cd332ea..dd16e83933a2 100644
--- a/drivers/net/8139cp.c
+++ b/drivers/net/8139cp.c
@@ -490,13 +490,11 @@ static inline unsigned int cp_rx_csum_ok (u32 status)
{
unsigned int protocol = (status >> 16) & 0x3;
- if (likely((protocol == RxProtoTCP) && (!(status & TCPFail))))
+ if (((protocol == RxProtoTCP) && !(status & TCPFail)) ||
+ ((protocol == RxProtoUDP) && !(status & UDPFail)))
return 1;
- else if ((protocol == RxProtoUDP) && (!(status & UDPFail)))
- return 1;
- else if ((protocol == RxProtoIP) && (!(status & IPFail)))
- return 1;
- return 0;
+ else
+ return 0;
}
static int cp_rx_poll(struct napi_struct *napi, int budget)
diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig
index f6668cdaac85..4f1755bddf6b 100644
--- a/drivers/net/Kconfig
+++ b/drivers/net/Kconfig
@@ -2543,10 +2543,10 @@ config PCH_GBE
depends on PCI
select MII
---help---
- This is a gigabit ethernet driver for Topcliff PCH.
- Topcliff PCH is the platform controller hub that is used in Intel's
+ This is a gigabit ethernet driver for EG20T PCH.
+ EG20T PCH is the platform controller hub that is used in Intel's
general embedded platform.
- Topcliff PCH has Gigabit Ethernet interface.
+ EG20T PCH has Gigabit Ethernet interface.
Using this interface, it is able to access system devices connected
to Gigabit Ethernet.
This driver enables Gigabit Ethernet function.
@@ -2945,6 +2945,18 @@ source "drivers/s390/net/Kconfig"
source "drivers/net/caif/Kconfig"
+config TILE_NET
+ tristate "Tilera GBE/XGBE network driver support"
+ depends on TILE
+ default y
+ select CRC32
+ help
+ This is a standard Linux network device driver for the
+ on-chip Tilera Gigabit Ethernet and XAUI interfaces.
+
+ To compile this driver as a module, choose M here: the module
+ will be called tile_net.
+
config XEN_NETDEV_FRONTEND
tristate "Xen network device frontend driver"
depends on XEN
diff --git a/drivers/net/Makefile b/drivers/net/Makefile
index 652fc6b98039..b90738d13994 100644
--- a/drivers/net/Makefile
+++ b/drivers/net/Makefile
@@ -301,3 +301,4 @@ obj-$(CONFIG_CAIF) += caif/
obj-$(CONFIG_OCTEON_MGMT_ETHERNET) += octeon/
obj-$(CONFIG_PCH_GBE) += pch_gbe/
+obj-$(CONFIG_TILE_NET) += tile/
diff --git a/drivers/net/atl1c/atl1c_hw.c b/drivers/net/atl1c/atl1c_hw.c
index 919080b2c3a5..1bf672009948 100644
--- a/drivers/net/atl1c/atl1c_hw.c
+++ b/drivers/net/atl1c/atl1c_hw.c
@@ -82,7 +82,7 @@ static int atl1c_get_permanent_address(struct atl1c_hw *hw)
addr[0] = addr[1] = 0;
AT_READ_REG(hw, REG_OTP_CTRL, &otp_ctrl_data);
if (atl1c_check_eeprom_exist(hw)) {
- if (hw->nic_type == athr_l1c || hw->nic_type == athr_l2c_b) {
+ if (hw->nic_type == athr_l1c || hw->nic_type == athr_l2c) {
/* Enable OTP CLK */
if (!(otp_ctrl_data & OTP_CTRL_CLK_EN)) {
otp_ctrl_data |= OTP_CTRL_CLK_EN;
diff --git a/drivers/net/atlx/atl1.c b/drivers/net/atlx/atl1.c
index 43579b3b24ac..53363108994e 100644
--- a/drivers/net/atlx/atl1.c
+++ b/drivers/net/atlx/atl1.c
@@ -3043,7 +3043,6 @@ static int __devinit atl1_probe(struct pci_dev *pdev,
atl1_pcie_patch(adapter);
/* assume we have no link for now */
netif_carrier_off(netdev);
- netif_stop_queue(netdev);
setup_timer(&adapter->phy_config_timer, atl1_phy_config,
(unsigned long)adapter);
diff --git a/drivers/net/au1000_eth.c b/drivers/net/au1000_eth.c
index 43489f89c142..53eff9ba6e95 100644
--- a/drivers/net/au1000_eth.c
+++ b/drivers/net/au1000_eth.c
@@ -155,10 +155,10 @@ static void au1000_enable_mac(struct net_device *dev, int force_reset)
spin_lock_irqsave(&aup->lock, flags);
if (force_reset || (!aup->mac_enabled)) {
- writel(MAC_EN_CLOCK_ENABLE, &aup->enable);
+ writel(MAC_EN_CLOCK_ENABLE, aup->enable);
au_sync_delay(2);
writel((MAC_EN_RESET0 | MAC_EN_RESET1 | MAC_EN_RESET2
- | MAC_EN_CLOCK_ENABLE), &aup->enable);
+ | MAC_EN_CLOCK_ENABLE), aup->enable);
au_sync_delay(2);
aup->mac_enabled = 1;
@@ -503,9 +503,9 @@ static void au1000_reset_mac_unlocked(struct net_device *dev)
au1000_hard_stop(dev);
- writel(MAC_EN_CLOCK_ENABLE, &aup->enable);
+ writel(MAC_EN_CLOCK_ENABLE, aup->enable);
au_sync_delay(2);
- writel(0, &aup->enable);
+ writel(0, aup->enable);
au_sync_delay(2);
aup->tx_full = 0;
@@ -1119,7 +1119,7 @@ static int __devinit au1000_probe(struct platform_device *pdev)
/* set a random MAC now in case platform_data doesn't provide one */
random_ether_addr(dev->dev_addr);
- writel(0, &aup->enable);
+ writel(0, aup->enable);
aup->mac_enabled = 0;
pd = pdev->dev.platform_data;
diff --git a/drivers/net/b44.c b/drivers/net/b44.c
index c6e86315b3f8..2e2b76258ab4 100644
--- a/drivers/net/b44.c
+++ b/drivers/net/b44.c
@@ -381,11 +381,11 @@ static void b44_set_flow_ctrl(struct b44 *bp, u32 local, u32 remote)
__b44_set_flow_ctrl(bp, pause_enab);
}
-#ifdef SSB_DRIVER_MIPS
-extern char *nvram_get(char *name);
+#ifdef CONFIG_BCM47XX
+#include <asm/mach-bcm47xx/nvram.h>
static void b44_wap54g10_workaround(struct b44 *bp)
{
- const char *str;
+ char buf[20];
u32 val;
int err;
@@ -394,10 +394,9 @@ static void b44_wap54g10_workaround(struct b44 *bp)
* see https://dev.openwrt.org/ticket/146
* check and reset bit "isolate"
*/
- str = nvram_get("boardnum");
- if (!str)
+ if (nvram_getenv("boardnum", buf, sizeof(buf)) < 0)
return;
- if (simple_strtoul(str, NULL, 0) == 2) {
+ if (simple_strtoul(buf, NULL, 0) == 2) {
err = __b44_readphy(bp, 0, MII_BMCR, &val);
if (err)
goto error;
diff --git a/drivers/net/benet/be_cmds.c b/drivers/net/benet/be_cmds.c
index 36eca1ce75d4..e4465d222a7d 100644
--- a/drivers/net/benet/be_cmds.c
+++ b/drivers/net/benet/be_cmds.c
@@ -1235,7 +1235,7 @@ int be_cmd_multicast_set(struct be_adapter *adapter, u32 if_id,
i = 0;
netdev_for_each_mc_addr(ha, netdev)
- memcpy(req->mac[i].byte, ha->addr, ETH_ALEN);
+ memcpy(req->mac[i++].byte, ha->addr, ETH_ALEN);
} else {
req->promiscuous = 1;
}
diff --git a/drivers/net/benet/be_main.c b/drivers/net/benet/be_main.c
index c36cd2ffbadc..93354eee2cfd 100644
--- a/drivers/net/benet/be_main.c
+++ b/drivers/net/benet/be_main.c
@@ -2458,6 +2458,12 @@ int be_load_fw(struct be_adapter *adapter, u8 *func)
int status, i = 0, num_imgs = 0;
const u8 *p;
+ if (!netif_running(adapter->netdev)) {
+ dev_err(&adapter->pdev->dev,
+ "Firmware load not allowed (interface is down)\n");
+ return -EPERM;
+ }
+
strcpy(fw_file, func);
status = request_firmware(&fw, fw_file, &adapter->pdev->dev);
diff --git a/drivers/net/bnx2x/bnx2x.h b/drivers/net/bnx2x/bnx2x.h
index 9eea225decaf..d255428122fc 100644
--- a/drivers/net/bnx2x/bnx2x.h
+++ b/drivers/net/bnx2x/bnx2x.h
@@ -20,8 +20,8 @@
* (you will need to reboot afterwards) */
/* #define BNX2X_STOP_ON_ERROR */
-#define DRV_MODULE_VERSION "1.60.00-3"
-#define DRV_MODULE_RELDATE "2010/10/19"
+#define DRV_MODULE_VERSION "1.60.01-0"
+#define DRV_MODULE_RELDATE "2010/11/12"
#define BNX2X_BC_VER 0x040200
#define BNX2X_MULTI_QUEUE
diff --git a/drivers/net/bnx2x/bnx2x_cmn.c b/drivers/net/bnx2x/bnx2x_cmn.c
index 459614d2d7bc..0af361e4e3d1 100644
--- a/drivers/net/bnx2x/bnx2x_cmn.c
+++ b/drivers/net/bnx2x/bnx2x_cmn.c
@@ -1680,7 +1680,7 @@ static inline u32 bnx2x_xmit_type(struct bnx2x *bp, struct sk_buff *skb)
rc = XMIT_PLAIN;
else {
- if (skb->protocol == htons(ETH_P_IPV6)) {
+ if (vlan_get_protocol(skb) == htons(ETH_P_IPV6)) {
rc = XMIT_CSUM_V6;
if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
rc |= XMIT_CSUM_TCP;
@@ -1782,15 +1782,15 @@ exit_lbl:
}
#endif
-static inline void bnx2x_set_pbd_gso_e2(struct sk_buff *skb,
- struct eth_tx_parse_bd_e2 *pbd,
- u32 xmit_type)
+static inline void bnx2x_set_pbd_gso_e2(struct sk_buff *skb, u32 *parsing_data,
+ u32 xmit_type)
{
- pbd->parsing_data |= cpu_to_le16(skb_shinfo(skb)->gso_size) <<
- ETH_TX_PARSE_BD_E2_LSO_MSS_SHIFT;
+ *parsing_data |= (skb_shinfo(skb)->gso_size <<
+ ETH_TX_PARSE_BD_E2_LSO_MSS_SHIFT) &
+ ETH_TX_PARSE_BD_E2_LSO_MSS;
if ((xmit_type & XMIT_GSO_V6) &&
(ipv6_hdr(skb)->nexthdr == NEXTHDR_IPV6))
- pbd->parsing_data |= ETH_TX_PARSE_BD_E2_IPV6_WITH_EXT_HDR;
+ *parsing_data |= ETH_TX_PARSE_BD_E2_IPV6_WITH_EXT_HDR;
}
/**
@@ -1835,15 +1835,15 @@ static inline void bnx2x_set_pbd_gso(struct sk_buff *skb,
* @return header len
*/
static inline u8 bnx2x_set_pbd_csum_e2(struct bnx2x *bp, struct sk_buff *skb,
- struct eth_tx_parse_bd_e2 *pbd,
- u32 xmit_type)
+ u32 *parsing_data, u32 xmit_type)
{
- pbd->parsing_data |= cpu_to_le16(tcp_hdrlen(skb)/4) <<
- ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW_SHIFT;
+ *parsing_data |= ((tcp_hdrlen(skb)/4) <<
+ ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW_SHIFT) &
+ ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW;
- pbd->parsing_data |= cpu_to_le16(((unsigned char *)tcp_hdr(skb) -
- skb->data) / 2) <<
- ETH_TX_PARSE_BD_E2_TCP_HDR_START_OFFSET_W_SHIFT;
+ *parsing_data |= ((((u8 *)tcp_hdr(skb) - skb->data) / 2) <<
+ ETH_TX_PARSE_BD_E2_TCP_HDR_START_OFFSET_W_SHIFT) &
+ ETH_TX_PARSE_BD_E2_TCP_HDR_START_OFFSET_W;
return skb_transport_header(skb) + tcp_hdrlen(skb) - skb->data;
}
@@ -1912,6 +1912,7 @@ netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
struct eth_tx_bd *tx_data_bd, *total_pkt_bd = NULL;
struct eth_tx_parse_bd_e1x *pbd_e1x = NULL;
struct eth_tx_parse_bd_e2 *pbd_e2 = NULL;
+ u32 pbd_e2_parsing_data = 0;
u16 pkt_prod, bd_prod;
int nbd, fp_index;
dma_addr_t mapping;
@@ -2033,8 +2034,9 @@ netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
memset(pbd_e2, 0, sizeof(struct eth_tx_parse_bd_e2));
/* Set PBD in checksum offload case */
if (xmit_type & XMIT_CSUM)
- hlen = bnx2x_set_pbd_csum_e2(bp,
- skb, pbd_e2, xmit_type);
+ hlen = bnx2x_set_pbd_csum_e2(bp, skb,
+ &pbd_e2_parsing_data,
+ xmit_type);
} else {
pbd_e1x = &fp->tx_desc_ring[bd_prod].parse_bd_e1x;
memset(pbd_e1x, 0, sizeof(struct eth_tx_parse_bd_e1x));
@@ -2076,10 +2078,18 @@ netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
bd_prod = bnx2x_tx_split(bp, fp, tx_buf, &tx_start_bd,
hlen, bd_prod, ++nbd);
if (CHIP_IS_E2(bp))
- bnx2x_set_pbd_gso_e2(skb, pbd_e2, xmit_type);
+ bnx2x_set_pbd_gso_e2(skb, &pbd_e2_parsing_data,
+ xmit_type);
else
bnx2x_set_pbd_gso(skb, pbd_e1x, xmit_type);
}
+
+ /* Set the PBD's parsing_data field if not zero
+ * (for the chips newer than 57711).
+ */
+ if (pbd_e2_parsing_data)
+ pbd_e2->parsing_data = cpu_to_le32(pbd_e2_parsing_data);
+
tx_data_bd = (struct eth_tx_bd *)tx_start_bd;
/* Handle fragmented skb */
diff --git a/drivers/net/bnx2x/bnx2x_hsi.h b/drivers/net/bnx2x/bnx2x_hsi.h
index 18c8e23a0e82..4cfd4e9b5586 100644
--- a/drivers/net/bnx2x/bnx2x_hsi.h
+++ b/drivers/net/bnx2x/bnx2x_hsi.h
@@ -244,7 +244,14 @@ struct port_hw_cfg { /* port 0: 0x12c port 1: 0x2bc */
u16 xgxs_config_tx[4]; /* 0x1A0 */
- u32 Reserved1[57]; /* 0x1A8 */
+ u32 Reserved1[56]; /* 0x1A8 */
+ u32 default_cfg; /* 0x288 */
+ /* Enable BAM on KR */
+#define PORT_HW_CFG_ENABLE_BAM_ON_KR_MASK 0x00100000
+#define PORT_HW_CFG_ENABLE_BAM_ON_KR_SHIFT 20
+#define PORT_HW_CFG_ENABLE_BAM_ON_KR_DISABLED 0x00000000
+#define PORT_HW_CFG_ENABLE_BAM_ON_KR_ENABLED 0x00100000
+
u32 speed_capability_mask2; /* 0x28C */
#define PORT_HW_CFG_SPEED_CAPABILITY2_D3_MASK 0x0000FFFF
#define PORT_HW_CFG_SPEED_CAPABILITY2_D3_SHIFT 0
diff --git a/drivers/net/bnx2x/bnx2x_init_ops.h b/drivers/net/bnx2x/bnx2x_init_ops.h
index a306b0e46b61..66df29fcf751 100644
--- a/drivers/net/bnx2x/bnx2x_init_ops.h
+++ b/drivers/net/bnx2x/bnx2x_init_ops.h
@@ -838,7 +838,7 @@ static void bnx2x_qm_init_ptr_table(struct bnx2x *bp, int qm_cid_count,
/****************************************************************************
* SRC initializations
****************************************************************************/
-
+#ifdef BCM_CNIC
/* called during init func stage */
static void bnx2x_src_init_t2(struct bnx2x *bp, struct src_ent *t2,
dma_addr_t t2_mapping, int src_cid_count)
@@ -862,5 +862,5 @@ static void bnx2x_src_init_t2(struct bnx2x *bp, struct src_ent *t2,
U64_HI((u64)t2_mapping +
(src_cid_count-1) * sizeof(struct src_ent)));
}
-
+#endif
#endif /* BNX2X_INIT_OPS_H */
diff --git a/drivers/net/bnx2x/bnx2x_link.c b/drivers/net/bnx2x/bnx2x_link.c
index 2326774df843..580919619252 100644
--- a/drivers/net/bnx2x/bnx2x_link.c
+++ b/drivers/net/bnx2x/bnx2x_link.c
@@ -610,7 +610,7 @@ static u8 bnx2x_bmac_enable(struct link_params *params,
/* reset and unreset the BigMac */
REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
(MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << port));
- udelay(10);
+ msleep(1);
REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET,
(MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << port));
@@ -3525,13 +3525,19 @@ static u8 bnx2x_8073_config_init(struct bnx2x_phy *phy,
DP(NETIF_MSG_LINK, "Before rom RX_ALARM(port1): 0x%x\n", tmp1);
/* Enable CL37 BAM */
- bnx2x_cl45_read(bp, phy,
- MDIO_AN_DEVAD,
- MDIO_AN_REG_8073_BAM, &val);
- bnx2x_cl45_write(bp, phy,
- MDIO_AN_DEVAD,
- MDIO_AN_REG_8073_BAM, val | 1);
+ if (REG_RD(bp, params->shmem_base +
+ offsetof(struct shmem_region, dev_info.
+ port_hw_config[params->port].default_cfg)) &
+ PORT_HW_CFG_ENABLE_BAM_ON_KR_ENABLED) {
+ bnx2x_cl45_read(bp, phy,
+ MDIO_AN_DEVAD,
+ MDIO_AN_REG_8073_BAM, &val);
+ bnx2x_cl45_write(bp, phy,
+ MDIO_AN_DEVAD,
+ MDIO_AN_REG_8073_BAM, val | 1);
+ DP(NETIF_MSG_LINK, "Enable CL37 BAM on KR\n");
+ }
if (params->loopback_mode == LOOPBACK_EXT) {
bnx2x_807x_force_10G(bp, phy);
DP(NETIF_MSG_LINK, "Forced speed 10G on 807X\n");
@@ -5302,7 +5308,7 @@ static u8 bnx2x_848xx_cmn_config_init(struct bnx2x_phy *phy,
{
struct bnx2x *bp = params->bp;
u16 autoneg_val, an_1000_val, an_10_100_val;
- bnx2x_wait_reset_complete(bp, phy);
+
bnx2x_bits_en(bp, NIG_REG_LATCH_BC_0 + params->port*4,
1 << NIG_LATCH_BC_ENABLE_MI_INT);
@@ -5431,6 +5437,7 @@ static u8 bnx2x_8481_config_init(struct bnx2x_phy *phy,
/* HW reset */
bnx2x_ext_phy_hw_reset(bp, params->port);
+ bnx2x_wait_reset_complete(bp, phy);
bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, MDIO_PMA_REG_CTRL, 1<<15);
return bnx2x_848xx_cmn_config_init(phy, params, vars);
@@ -5441,7 +5448,7 @@ static u8 bnx2x_848x3_config_init(struct bnx2x_phy *phy,
struct link_vars *vars)
{
struct bnx2x *bp = params->bp;
- u8 port = params->port, initialize = 1;
+ u8 port, initialize = 1;
u16 val;
u16 temp;
u32 actual_phy_selection;
@@ -5450,11 +5457,16 @@ static u8 bnx2x_848x3_config_init(struct bnx2x_phy *phy,
/* This is just for MDIO_CTL_REG_84823_MEDIA register. */
msleep(1);
+ if (CHIP_IS_E2(bp))
+ port = BP_PATH(bp);
+ else
+ port = params->port;
bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_3,
MISC_REGISTERS_GPIO_OUTPUT_HIGH,
port);
- msleep(200); /* 100 is not enough */
-
+ bnx2x_wait_reset_complete(bp, phy);
+ /* Wait for GPHY to come out of reset */
+ msleep(50);
/* BCM84823 requires that XGXS links up first @ 10G for normal
behavior */
temp = vars->line_speed;
@@ -5625,7 +5637,11 @@ static void bnx2x_848x3_link_reset(struct bnx2x_phy *phy,
struct link_params *params)
{
struct bnx2x *bp = params->bp;
- u8 port = params->port;
+ u8 port;
+ if (CHIP_IS_E2(bp))
+ port = BP_PATH(bp);
+ else
+ port = params->port;
bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_3,
MISC_REGISTERS_GPIO_OUTPUT_LOW,
port);
@@ -6928,7 +6944,7 @@ u8 bnx2x_link_reset(struct link_params *params, struct link_vars *vars,
u8 reset_ext_phy)
{
struct bnx2x *bp = params->bp;
- u8 phy_index, port = params->port;
+ u8 phy_index, port = params->port, clear_latch_ind = 0;
DP(NETIF_MSG_LINK, "Resetting the link of port %d\n", port);
/* disable attentions */
vars->link_status = 0;
@@ -6966,9 +6982,18 @@ u8 bnx2x_link_reset(struct link_params *params, struct link_vars *vars,
params->phy[phy_index].link_reset(
&params->phy[phy_index],
params);
+ if (params->phy[phy_index].flags &
+ FLAGS_REARM_LATCH_SIGNAL)
+ clear_latch_ind = 1;
}
}
+ if (clear_latch_ind) {
+ /* Clear latching indication */
+ bnx2x_rearm_latch_signal(bp, port, 0);
+ bnx2x_bits_dis(bp, NIG_REG_LATCH_BC_0 + port*4,
+ 1 << NIG_LATCH_BC_ENABLE_MI_INT);
+ }
if (params->phy[INT_PHY].link_reset)
params->phy[INT_PHY].link_reset(
&params->phy[INT_PHY], params);
@@ -6999,6 +7024,7 @@ static u8 bnx2x_8073_common_init_phy(struct bnx2x *bp,
s8 port;
s8 port_of_path = 0;
+ bnx2x_ext_phy_hw_reset(bp, 0);
/* PART1 - Reset both phys */
for (port = PORT_MAX - 1; port >= PORT_0; port--) {
u32 shmem_base, shmem2_base;
@@ -7021,7 +7047,8 @@ static u8 bnx2x_8073_common_init_phy(struct bnx2x *bp,
return -EINVAL;
}
/* disable attentions */
- bnx2x_bits_dis(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4,
+ bnx2x_bits_dis(bp, NIG_REG_MASK_INTERRUPT_PORT0 +
+ port_of_path*4,
(NIG_MASK_XGXS0_LINK_STATUS |
NIG_MASK_XGXS0_LINK10G |
NIG_MASK_SERDES0_LINK_STATUS |
@@ -7132,7 +7159,7 @@ static u8 bnx2x_8726_common_init_phy(struct bnx2x *bp,
(1<<(MISC_REGISTERS_GPIO_3 + MISC_REGISTERS_GPIO_PORT_SHIFT)));
REG_WR(bp, MISC_REG_GPIO_EVENT_EN, val);
- bnx2x_ext_phy_hw_reset(bp, 1);
+ bnx2x_ext_phy_hw_reset(bp, 0);
msleep(5);
for (port = 0; port < PORT_MAX; port++) {
u32 shmem_base, shmem2_base;
diff --git a/drivers/net/bnx2x/bnx2x_main.c b/drivers/net/bnx2x/bnx2x_main.c
index 4a6f0eac8b12..be52edcf8346 100644
--- a/drivers/net/bnx2x/bnx2x_main.c
+++ b/drivers/net/bnx2x/bnx2x_main.c
@@ -9064,7 +9064,7 @@ static int __devinit bnx2x_init_one(struct pci_dev *pdev,
default:
pr_err("Unknown board_type (%ld), aborting\n",
ent->driver_data);
- return ENODEV;
+ return -ENODEV;
}
cid_count += CNIC_CONTEXT_USE;
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
index bdb68a600382..d0ea760ce419 100644
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -171,7 +171,7 @@ MODULE_PARM_DESC(resend_igmp, "Number of IGMP membership reports to send on link
/*----------------------------- Global variables ----------------------------*/
#ifdef CONFIG_NET_POLL_CONTROLLER
-cpumask_var_t netpoll_block_tx;
+atomic_t netpoll_block_tx = ATOMIC_INIT(0);
#endif
static const char * const version =
@@ -878,8 +878,10 @@ static void __bond_resend_igmp_join_requests(struct net_device *dev)
rcu_read_lock();
in_dev = __in_dev_get_rcu(dev);
if (in_dev) {
+ read_lock(&in_dev->mc_list_lock);
for (im = in_dev->mc_list; im; im = im->next)
ip_mc_rejoin_group(im);
+ read_unlock(&in_dev->mc_list_lock);
}
rcu_read_unlock();
@@ -1574,7 +1576,7 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
/* If this is the first slave, then we need to set the master's hardware
* address to be the same as the slave's. */
- if (bond->slave_cnt == 0)
+ if (is_zero_ether_addr(bond->dev->dev_addr))
memcpy(bond->dev->dev_addr, slave_dev->dev_addr,
slave_dev->addr_len);
@@ -5297,13 +5299,6 @@ static int __init bonding_init(void)
if (res)
goto out;
-#ifdef CONFIG_NET_POLL_CONTROLLER
- if (!alloc_cpumask_var(&netpoll_block_tx, GFP_KERNEL)) {
- res = -ENOMEM;
- goto out;
- }
-#endif
-
res = register_pernet_subsys(&bond_net_ops);
if (res)
goto out;
@@ -5332,9 +5327,6 @@ err:
rtnl_link_unregister(&bond_link_ops);
err_link:
unregister_pernet_subsys(&bond_net_ops);
-#ifdef CONFIG_NET_POLL_CONTROLLER
- free_cpumask_var(netpoll_block_tx);
-#endif
goto out;
}
@@ -5351,7 +5343,10 @@ static void __exit bonding_exit(void)
unregister_pernet_subsys(&bond_net_ops);
#ifdef CONFIG_NET_POLL_CONTROLLER
- free_cpumask_var(netpoll_block_tx);
+ /*
+ * Make sure we don't have an imbalance on our netpoll blocking
+ */
+ WARN_ON(atomic_read(&netpoll_block_tx));
#endif
}
diff --git a/drivers/net/bonding/bonding.h b/drivers/net/bonding/bonding.h
index 4eedb12df6ca..c2f081352a03 100644
--- a/drivers/net/bonding/bonding.h
+++ b/drivers/net/bonding/bonding.h
@@ -119,26 +119,22 @@
#ifdef CONFIG_NET_POLL_CONTROLLER
-extern cpumask_var_t netpoll_block_tx;
+extern atomic_t netpoll_block_tx;
static inline void block_netpoll_tx(void)
{
- preempt_disable();
- BUG_ON(cpumask_test_and_set_cpu(smp_processor_id(),
- netpoll_block_tx));
+ atomic_inc(&netpoll_block_tx);
}
static inline void unblock_netpoll_tx(void)
{
- BUG_ON(!cpumask_test_and_clear_cpu(smp_processor_id(),
- netpoll_block_tx));
- preempt_enable();
+ atomic_dec(&netpoll_block_tx);
}
static inline int is_netpoll_tx_blocked(struct net_device *dev)
{
if (unlikely(dev->priv_flags & IFF_IN_NETPOLL))
- return cpumask_test_cpu(smp_processor_id(), netpoll_block_tx);
+ return atomic_read(&netpoll_block_tx);
return 0;
}
#else
diff --git a/drivers/net/caif/caif_shm_u5500.c b/drivers/net/caif/caif_shm_u5500.c
index 1cd90da86f13..32b1c6fb2de1 100644
--- a/drivers/net/caif/caif_shm_u5500.c
+++ b/drivers/net/caif/caif_shm_u5500.c
@@ -5,7 +5,7 @@
* License terms: GNU General Public License (GPL) version 2
*/
-#define pr_fmt(fmt) KBUILD_MODNAME ":" __func__ "():" fmt
+#define pr_fmt(fmt) KBUILD_MODNAME ":" fmt
#include <linux/version.h>
#include <linux/init.h>
diff --git a/drivers/net/caif/caif_shmcore.c b/drivers/net/caif/caif_shmcore.c
index 19f9c0656667..80511167f35b 100644
--- a/drivers/net/caif/caif_shmcore.c
+++ b/drivers/net/caif/caif_shmcore.c
@@ -6,7 +6,7 @@
* License terms: GNU General Public License (GPL) version 2
*/
-#define pr_fmt(fmt) KBUILD_MODNAME ":" __func__ "():" fmt
+#define pr_fmt(fmt) KBUILD_MODNAME ":" fmt
#include <linux/spinlock.h>
#include <linux/sched.h>
diff --git a/drivers/net/caif/caif_spi.c b/drivers/net/caif/caif_spi.c
index 8427533fe313..20da1996d354 100644
--- a/drivers/net/caif/caif_spi.c
+++ b/drivers/net/caif/caif_spi.c
@@ -33,6 +33,9 @@ MODULE_LICENSE("GPL");
MODULE_AUTHOR("Daniel Martensson<daniel.martensson@stericsson.com>");
MODULE_DESCRIPTION("CAIF SPI driver");
+/* Returns the number of padding bytes for alignment. */
+#define PAD_POW2(x, pow) ((((x)&((pow)-1))==0) ? 0 : (((pow)-((x)&((pow)-1)))))
+
static int spi_loop;
module_param(spi_loop, bool, S_IRUGO);
MODULE_PARM_DESC(spi_loop, "SPI running in loopback mode.");
@@ -41,7 +44,10 @@ MODULE_PARM_DESC(spi_loop, "SPI running in loopback mode.");
module_param(spi_frm_align, int, S_IRUGO);
MODULE_PARM_DESC(spi_frm_align, "SPI frame alignment.");
-/* SPI padding options. */
+/*
+ * SPI padding options.
+ * Warning: must be a base of 2 (& operation used) and can not be zero !
+ */
module_param(spi_up_head_align, int, S_IRUGO);
MODULE_PARM_DESC(spi_up_head_align, "SPI uplink head alignment.");
@@ -240,15 +246,13 @@ static ssize_t dbgfs_frame(struct file *file, char __user *user_buf,
static const struct file_operations dbgfs_state_fops = {
.open = dbgfs_open,
.read = dbgfs_state,
- .owner = THIS_MODULE,
- .llseek = default_llseek,
+ .owner = THIS_MODULE
};
static const struct file_operations dbgfs_frame_fops = {
.open = dbgfs_open,
.read = dbgfs_frame,
- .owner = THIS_MODULE,
- .llseek = default_llseek,
+ .owner = THIS_MODULE
};
static inline void dev_debugfs_add(struct cfspi *cfspi)
@@ -337,6 +341,9 @@ int cfspi_xmitfrm(struct cfspi *cfspi, u8 *buf, size_t len)
u8 *dst = buf;
caif_assert(buf);
+ if (cfspi->slave && !cfspi->slave_talked)
+ cfspi->slave_talked = true;
+
do {
struct sk_buff *skb;
struct caif_payload_info *info;
@@ -357,8 +364,8 @@ int cfspi_xmitfrm(struct cfspi *cfspi, u8 *buf, size_t len)
* Compute head offset i.e. number of bytes to add to
* get the start of the payload aligned.
*/
- if (spi_up_head_align) {
- spad = 1 + ((info->hdr_len + 1) & spi_up_head_align);
+ if (spi_up_head_align > 1) {
+ spad = 1 + PAD_POW2((info->hdr_len + 1), spi_up_head_align);
*dst = (u8)(spad - 1);
dst += spad;
}
@@ -373,7 +380,7 @@ int cfspi_xmitfrm(struct cfspi *cfspi, u8 *buf, size_t len)
* Compute tail offset i.e. number of bytes to add to
* get the complete CAIF frame aligned.
*/
- epad = (skb->len + spad) & spi_up_tail_align;
+ epad = PAD_POW2((skb->len + spad), spi_up_tail_align);
dst += epad;
dev_kfree_skb(skb);
@@ -417,14 +424,14 @@ int cfspi_xmitlen(struct cfspi *cfspi)
* Compute head offset i.e. number of bytes to add to
* get the start of the payload aligned.
*/
- if (spi_up_head_align)
- spad = 1 + ((info->hdr_len + 1) & spi_up_head_align);
+ if (spi_up_head_align > 1)
+ spad = 1 + PAD_POW2((info->hdr_len + 1), spi_up_head_align);
/*
* Compute tail offset i.e. number of bytes to add to
* get the complete CAIF frame aligned.
*/
- epad = (skb->len + spad) & spi_up_tail_align;
+ epad = PAD_POW2((skb->len + spad), spi_up_tail_align);
if ((skb->len + spad + epad + frm_len) <= CAIF_MAX_SPI_FRAME) {
skb_queue_tail(&cfspi->chead, skb);
@@ -433,6 +440,7 @@ int cfspi_xmitlen(struct cfspi *cfspi)
} else {
/* Put back packet. */
skb_queue_head(&cfspi->qhead, skb);
+ break;
}
} while (pkts <= CAIF_MAX_SPI_PKTS);
@@ -453,6 +461,15 @@ static void cfspi_ss_cb(bool assert, struct cfspi_ifc *ifc)
{
struct cfspi *cfspi = (struct cfspi *)ifc->priv;
+ /*
+ * The slave device is the master on the link. Interrupts before the
+ * slave has transmitted are considered spurious.
+ */
+ if (cfspi->slave && !cfspi->slave_talked) {
+ printk(KERN_WARNING "CFSPI: Spurious SS interrupt.\n");
+ return;
+ }
+
if (!in_interrupt())
spin_lock(&cfspi->lock);
if (assert) {
@@ -465,7 +482,8 @@ static void cfspi_ss_cb(bool assert, struct cfspi_ifc *ifc)
spin_unlock(&cfspi->lock);
/* Wake up the xfer thread. */
- wake_up_interruptible(&cfspi->wait);
+ if (assert)
+ wake_up_interruptible(&cfspi->wait);
}
static void cfspi_xfer_done_cb(struct cfspi_ifc *ifc)
@@ -523,7 +541,7 @@ int cfspi_rxfrm(struct cfspi *cfspi, u8 *buf, size_t len)
* Compute head offset i.e. number of bytes added to
* get the start of the payload aligned.
*/
- if (spi_down_head_align) {
+ if (spi_down_head_align > 1) {
spad = 1 + *src;
src += spad;
}
@@ -564,7 +582,7 @@ int cfspi_rxfrm(struct cfspi *cfspi, u8 *buf, size_t len)
* Compute tail offset i.e. number of bytes added to
* get the complete CAIF frame aligned.
*/
- epad = (pkt_len + spad) & spi_down_tail_align;
+ epad = PAD_POW2((pkt_len + spad), spi_down_tail_align);
src += epad;
} while ((src - buf) < len);
@@ -617,19 +635,28 @@ int cfspi_spi_probe(struct platform_device *pdev)
ndev = alloc_netdev(sizeof(struct cfspi),
"cfspi%d", cfspi_setup);
- if (!dev)
- return -ENODEV;
+ if (!ndev)
+ return -ENOMEM;
cfspi = netdev_priv(ndev);
netif_stop_queue(ndev);
cfspi->ndev = ndev;
cfspi->pdev = pdev;
- /* Set flow info */
+ /* Set flow info. */
cfspi->flow_off_sent = 0;
cfspi->qd_low_mark = LOW_WATER_MARK;
cfspi->qd_high_mark = HIGH_WATER_MARK;
+ /* Set slave info. */
+ if (!strncmp(cfspi_spi_driver.driver.name, "cfspi_sspi", 10)) {
+ cfspi->slave = true;
+ cfspi->slave_talked = false;
+ } else {
+ cfspi->slave = false;
+ cfspi->slave_talked = false;
+ }
+
/* Assign the SPI device. */
cfspi->dev = dev;
/* Assign the device ifc to this SPI interface. */
diff --git a/drivers/net/caif/caif_spi_slave.c b/drivers/net/caif/caif_spi_slave.c
index 2111dbfea6fe..1b9943a4edab 100644
--- a/drivers/net/caif/caif_spi_slave.c
+++ b/drivers/net/caif/caif_spi_slave.c
@@ -36,10 +36,15 @@ static inline int forward_to_spi_cmd(struct cfspi *cfspi)
#endif
int spi_frm_align = 2;
-int spi_up_head_align = 1;
-int spi_up_tail_align;
-int spi_down_head_align = 3;
-int spi_down_tail_align = 1;
+
+/*
+ * SPI padding options.
+ * Warning: must be a base of 2 (& operation used) and can not be zero !
+ */
+int spi_up_head_align = 1 << 1;
+int spi_up_tail_align = 1 << 0;
+int spi_down_head_align = 1 << 2;
+int spi_down_tail_align = 1 << 1;
#ifdef CONFIG_DEBUG_FS
static inline void debugfs_store_prev(struct cfspi *cfspi)
diff --git a/drivers/net/can/at91_can.c b/drivers/net/can/at91_can.c
index cee98fa668bd..7ef83d06f7ed 100644
--- a/drivers/net/can/at91_can.c
+++ b/drivers/net/can/at91_can.c
@@ -1,7 +1,7 @@
/*
* at91_can.c - CAN network driver for AT91 SoC CAN controller
*
- * (C) 2007 by Hans J. Koch <hjk@linutronix.de>
+ * (C) 2007 by Hans J. Koch <hjk@hansjkoch.de>
* (C) 2008, 2009, 2010 by Marc Kleine-Budde <kernel@pengutronix.de>
*
* This software may be distributed under the terms of the GNU General
diff --git a/drivers/net/can/pch_can.c b/drivers/net/can/pch_can.c
index 55ec324caaf4..672718261c68 100644
--- a/drivers/net/can/pch_can.c
+++ b/drivers/net/can/pch_can.c
@@ -213,12 +213,12 @@ static DEFINE_PCI_DEVICE_TABLE(pch_pci_tbl) = {
};
MODULE_DEVICE_TABLE(pci, pch_pci_tbl);
-static inline void pch_can_bit_set(u32 *addr, u32 mask)
+static inline void pch_can_bit_set(void __iomem *addr, u32 mask)
{
iowrite32(ioread32(addr) | mask, addr);
}
-static inline void pch_can_bit_clear(u32 *addr, u32 mask)
+static inline void pch_can_bit_clear(void __iomem *addr, u32 mask)
{
iowrite32(ioread32(addr) & ~mask, addr);
}
@@ -1437,7 +1437,7 @@ probe_exit_endev:
return rc;
}
-static struct pci_driver pch_can_pcidev = {
+static struct pci_driver pch_can_pci_driver = {
.name = "pch_can",
.id_table = pch_pci_tbl,
.probe = pch_can_probe,
@@ -1448,13 +1448,13 @@ static struct pci_driver pch_can_pcidev = {
static int __init pch_can_pci_init(void)
{
- return pci_register_driver(&pch_can_pcidev);
+ return pci_register_driver(&pch_can_pci_driver);
}
module_init(pch_can_pci_init);
static void __exit pch_can_pci_exit(void)
{
- pci_unregister_driver(&pch_can_pcidev);
+ pci_unregister_driver(&pch_can_pci_driver);
}
module_exit(pch_can_pci_exit);
diff --git a/drivers/net/cxgb3/cxgb3_main.c b/drivers/net/cxgb3/cxgb3_main.c
index 407d4e272075..046d846c652d 100644
--- a/drivers/net/cxgb3/cxgb3_main.c
+++ b/drivers/net/cxgb3/cxgb3_main.c
@@ -3341,7 +3341,6 @@ static int __devinit init_one(struct pci_dev *pdev,
adapter->name = adapter->port[i]->name;
__set_bit(i, &adapter->registered_device_map);
- netif_tx_stop_all_queues(adapter->port[i]);
}
}
if (!adapter->registered_device_map) {
diff --git a/drivers/net/cxgb4/cxgb4_main.c b/drivers/net/cxgb4/cxgb4_main.c
index f17703f410b3..f50bc98310f8 100644
--- a/drivers/net/cxgb4/cxgb4_main.c
+++ b/drivers/net/cxgb4/cxgb4_main.c
@@ -3736,7 +3736,6 @@ static int __devinit init_one(struct pci_dev *pdev,
__set_bit(i, &adapter->registered_device_map);
adapter->chan_map[adap2pinfo(adapter, i)->tx_chan] = i;
- netif_tx_stop_all_queues(adapter->port[i]);
}
}
if (!adapter->registered_device_map) {
diff --git a/drivers/net/cxgb4/t4_hw.c b/drivers/net/cxgb4/t4_hw.c
index bb813d94aea8..e97521c801ea 100644
--- a/drivers/net/cxgb4/t4_hw.c
+++ b/drivers/net/cxgb4/t4_hw.c
@@ -2408,7 +2408,7 @@ int t4_alloc_mac_filt(struct adapter *adap, unsigned int mbox,
if (index < NEXACT_MAC)
ret++;
else if (hash)
- *hash |= (1 << hash_mac_addr(addr[i]));
+ *hash |= (1ULL << hash_mac_addr(addr[i]));
}
return ret;
}
diff --git a/drivers/net/cxgb4vf/cxgb4vf_main.c b/drivers/net/cxgb4vf/cxgb4vf_main.c
index 555ecc5a2e93..6bf464afa90e 100644
--- a/drivers/net/cxgb4vf/cxgb4vf_main.c
+++ b/drivers/net/cxgb4vf/cxgb4vf_main.c
@@ -753,7 +753,9 @@ static int cxgb4vf_open(struct net_device *dev)
if (err)
return err;
set_bit(pi->port_id, &adapter->open_device_map);
- link_start(dev);
+ err = link_start(dev);
+ if (err)
+ return err;
netif_tx_start_all_queues(dev);
return 0;
}
@@ -814,40 +816,48 @@ static struct net_device_stats *cxgb4vf_get_stats(struct net_device *dev)
}
/*
- * Collect up to maxaddrs worth of a netdevice's unicast addresses into an
- * array of addrss pointers and return the number collected.
+ * Collect up to maxaddrs worth of a netdevice's unicast addresses, starting
+ * at a specified offset within the list, into an array of addrss pointers and
+ * return the number collected.
*/
-static inline int collect_netdev_uc_list_addrs(const struct net_device *dev,
- const u8 **addr,
- unsigned int maxaddrs)
+static inline unsigned int collect_netdev_uc_list_addrs(const struct net_device *dev,
+ const u8 **addr,
+ unsigned int offset,
+ unsigned int maxaddrs)
{
+ unsigned int index = 0;
unsigned int naddr = 0;
const struct netdev_hw_addr *ha;
- for_each_dev_addr(dev, ha) {
- addr[naddr++] = ha->addr;
- if (naddr >= maxaddrs)
- break;
- }
+ for_each_dev_addr(dev, ha)
+ if (index++ >= offset) {
+ addr[naddr++] = ha->addr;
+ if (naddr >= maxaddrs)
+ break;
+ }
return naddr;
}
/*
- * Collect up to maxaddrs worth of a netdevice's multicast addresses into an
- * array of addrss pointers and return the number collected.
+ * Collect up to maxaddrs worth of a netdevice's multicast addresses, starting
+ * at a specified offset within the list, into an array of addrss pointers and
+ * return the number collected.
*/
-static inline int collect_netdev_mc_list_addrs(const struct net_device *dev,
- const u8 **addr,
- unsigned int maxaddrs)
+static inline unsigned int collect_netdev_mc_list_addrs(const struct net_device *dev,
+ const u8 **addr,
+ unsigned int offset,
+ unsigned int maxaddrs)
{
+ unsigned int index = 0;
unsigned int naddr = 0;
const struct netdev_hw_addr *ha;
- netdev_for_each_mc_addr(ha, dev) {
- addr[naddr++] = ha->addr;
- if (naddr >= maxaddrs)
- break;
- }
+ netdev_for_each_mc_addr(ha, dev)
+ if (index++ >= offset) {
+ addr[naddr++] = ha->addr;
+ if (naddr >= maxaddrs)
+ break;
+ }
return naddr;
}
@@ -860,16 +870,20 @@ static int set_addr_filters(const struct net_device *dev, bool sleep)
u64 mhash = 0;
u64 uhash = 0;
bool free = true;
- u16 filt_idx[7];
+ unsigned int offset, naddr;
const u8 *addr[7];
- int ret, naddr = 0;
+ int ret;
const struct port_info *pi = netdev_priv(dev);
/* first do the secondary unicast addresses */
- naddr = collect_netdev_uc_list_addrs(dev, addr, ARRAY_SIZE(addr));
- if (naddr > 0) {
+ for (offset = 0; ; offset += naddr) {
+ naddr = collect_netdev_uc_list_addrs(dev, addr, offset,
+ ARRAY_SIZE(addr));
+ if (naddr == 0)
+ break;
+
ret = t4vf_alloc_mac_filt(pi->adapter, pi->viid, free,
- naddr, addr, filt_idx, &uhash, sleep);
+ naddr, addr, NULL, &uhash, sleep);
if (ret < 0)
return ret;
@@ -877,12 +891,17 @@ static int set_addr_filters(const struct net_device *dev, bool sleep)
}
/* next set up the multicast addresses */
- naddr = collect_netdev_mc_list_addrs(dev, addr, ARRAY_SIZE(addr));
- if (naddr > 0) {
+ for (offset = 0; ; offset += naddr) {
+ naddr = collect_netdev_mc_list_addrs(dev, addr, offset,
+ ARRAY_SIZE(addr));
+ if (naddr == 0)
+ break;
+
ret = t4vf_alloc_mac_filt(pi->adapter, pi->viid, free,
- naddr, addr, filt_idx, &mhash, sleep);
+ naddr, addr, NULL, &mhash, sleep);
if (ret < 0)
return ret;
+ free = false;
}
return t4vf_set_addr_hash(pi->adapter, pi->viid, uhash != 0,
@@ -1103,18 +1122,6 @@ static int cxgb4vf_set_mac_addr(struct net_device *dev, void *_addr)
return 0;
}
-/*
- * Return a TX Queue on which to send the specified skb.
- */
-static u16 cxgb4vf_select_queue(struct net_device *dev, struct sk_buff *skb)
-{
- /*
- * XXX For now just use the default hash but we probably want to
- * XXX look at other possibilities ...
- */
- return skb_tx_hash(dev, skb);
-}
-
#ifdef CONFIG_NET_POLL_CONTROLLER
/*
* Poll all of our receive queues. This is called outside of normal interrupt
@@ -2075,6 +2082,22 @@ static int adap_init0(struct adapter *adapter)
}
/*
+ * Some environments do not properly handle PCIE FLRs -- e.g. in Linux
+ * 2.6.31 and later we can't call pci_reset_function() in order to
+ * issue an FLR because of a self- deadlock on the device semaphore.
+ * Meanwhile, the OS infrastructure doesn't issue FLRs in all the
+ * cases where they're needed -- for instance, some versions of KVM
+ * fail to reset "Assigned Devices" when the VM reboots. Therefore we
+ * use the firmware based reset in order to reset any per function
+ * state.
+ */
+ err = t4vf_fw_reset(adapter);
+ if (err < 0) {
+ dev_err(adapter->pdev_dev, "FW reset failed: err=%d\n", err);
+ return err;
+ }
+
+ /*
* Grab basic operational parameters. These will predominantly have
* been set up by the Physical Function Driver or will be hard coded
* into the adapter. We just have to live with them ... Note that
@@ -2246,6 +2269,7 @@ static void __devinit cfg_queues(struct adapter *adapter)
{
struct sge *s = &adapter->sge;
int q10g, n10g, qidx, pidx, qs;
+ size_t iqe_size;
/*
* We should not be called till we know how many Queue Sets we can
@@ -2290,6 +2314,13 @@ static void __devinit cfg_queues(struct adapter *adapter)
s->ethqsets = qidx;
/*
+ * The Ingress Queue Entry Size for our various Response Queues needs
+ * to be big enough to accommodate the largest message we can receive
+ * from the chip/firmware; which is 64 bytes ...
+ */
+ iqe_size = 64;
+
+ /*
* Set up default Queue Set parameters ... Start off with the
* shortest interrupt holdoff timer.
*/
@@ -2297,7 +2328,7 @@ static void __devinit cfg_queues(struct adapter *adapter)
struct sge_eth_rxq *rxq = &s->ethrxq[qs];
struct sge_eth_txq *txq = &s->ethtxq[qs];
- init_rspq(&rxq->rspq, 0, 0, 1024, L1_CACHE_BYTES);
+ init_rspq(&rxq->rspq, 0, 0, 1024, iqe_size);
rxq->fl.size = 72;
txq->q.size = 1024;
}
@@ -2306,8 +2337,7 @@ static void __devinit cfg_queues(struct adapter *adapter)
* The firmware event queue is used for link state changes and
* notifications of TX DMA completions.
*/
- init_rspq(&s->fw_evtq, SGE_TIMER_RSTRT_CNTR, 0, 512,
- L1_CACHE_BYTES);
+ init_rspq(&s->fw_evtq, SGE_TIMER_RSTRT_CNTR, 0, 512, iqe_size);
/*
* The forwarded interrupt queue is used when we're in MSI interrupt
@@ -2323,7 +2353,7 @@ static void __devinit cfg_queues(struct adapter *adapter)
* any time ...
*/
init_rspq(&s->intrq, SGE_TIMER_RSTRT_CNTR, 0, MSIX_ENTRIES + 1,
- L1_CACHE_BYTES);
+ iqe_size);
}
/*
@@ -2417,7 +2447,6 @@ static const struct net_device_ops cxgb4vf_netdev_ops = {
.ndo_get_stats = cxgb4vf_get_stats,
.ndo_set_rx_mode = cxgb4vf_set_rxmode,
.ndo_set_mac_address = cxgb4vf_set_mac_addr,
- .ndo_select_queue = cxgb4vf_select_queue,
.ndo_validate_addr = eth_validate_addr,
.ndo_do_ioctl = cxgb4vf_do_ioctl,
.ndo_change_mtu = cxgb4vf_change_mtu,
@@ -2600,7 +2629,6 @@ static int __devinit cxgb4vf_pci_probe(struct pci_dev *pdev,
pi->xact_addr_filt = -1;
pi->rx_offload = RX_CSO;
netif_carrier_off(netdev);
- netif_tx_stop_all_queues(netdev);
netdev->irq = pdev->irq;
netdev->features = (NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
@@ -2625,7 +2653,6 @@ static int __devinit cxgb4vf_pci_probe(struct pci_dev *pdev,
netdev->do_ioctl = cxgb4vf_do_ioctl;
netdev->change_mtu = cxgb4vf_change_mtu;
netdev->set_mac_address = cxgb4vf_set_mac_addr;
- netdev->select_queue = cxgb4vf_select_queue;
#ifdef CONFIG_NET_POLL_CONTROLLER
netdev->poll_controller = cxgb4vf_poll_controller;
#endif
@@ -2844,6 +2871,14 @@ static struct pci_device_id cxgb4vf_pci_tbl[] = {
CH_DEVICE(0x4800, 0), /* T440-dbg */
CH_DEVICE(0x4801, 0), /* T420-cr */
CH_DEVICE(0x4802, 0), /* T422-cr */
+ CH_DEVICE(0x4803, 0), /* T440-cr */
+ CH_DEVICE(0x4804, 0), /* T420-bch */
+ CH_DEVICE(0x4805, 0), /* T440-bch */
+ CH_DEVICE(0x4806, 0), /* T460-ch */
+ CH_DEVICE(0x4807, 0), /* T420-so */
+ CH_DEVICE(0x4808, 0), /* T420-cx */
+ CH_DEVICE(0x4809, 0), /* T420-bt */
+ CH_DEVICE(0x480a, 0), /* T404-bt */
{ 0, }
};
diff --git a/drivers/net/cxgb4vf/sge.c b/drivers/net/cxgb4vf/sge.c
index f10864ddafbe..ecf0770bf0ff 100644
--- a/drivers/net/cxgb4vf/sge.c
+++ b/drivers/net/cxgb4vf/sge.c
@@ -154,13 +154,14 @@ enum {
*/
RX_COPY_THRES = 256,
RX_PULL_LEN = 128,
-};
-/*
- * Can't define this in the above enum because PKTSHIFT isn't a constant in
- * the VF Driver ...
- */
-#define RX_PKT_PULL_LEN (RX_PULL_LEN + PKTSHIFT)
+ /*
+ * Main body length for sk_buffs used for RX Ethernet packets with
+ * fragments. Should be >= RX_PULL_LEN but possibly bigger to give
+ * pskb_may_pull() some room.
+ */
+ RX_SKB_LEN = 512,
+};
/*
* Software state per TX descriptor.
@@ -1355,6 +1356,67 @@ out_free:
}
/**
+ * t4vf_pktgl_to_skb - build an sk_buff from a packet gather list
+ * @gl: the gather list
+ * @skb_len: size of sk_buff main body if it carries fragments
+ * @pull_len: amount of data to move to the sk_buff's main body
+ *
+ * Builds an sk_buff from the given packet gather list. Returns the
+ * sk_buff or %NULL if sk_buff allocation failed.
+ */
+struct sk_buff *t4vf_pktgl_to_skb(const struct pkt_gl *gl,
+ unsigned int skb_len, unsigned int pull_len)
+{
+ struct sk_buff *skb;
+ struct skb_shared_info *ssi;
+
+ /*
+ * If the ingress packet is small enough, allocate an skb large enough
+ * for all of the data and copy it inline. Otherwise, allocate an skb
+ * with enough room to pull in the header and reference the rest of
+ * the data via the skb fragment list.
+ *
+ * Below we rely on RX_COPY_THRES being less than the smallest Rx
+ * buff! size, which is expected since buffers are at least
+ * PAGE_SIZEd. In this case packets up to RX_COPY_THRES have only one
+ * fragment.
+ */
+ if (gl->tot_len <= RX_COPY_THRES) {
+ /* small packets have only one fragment */
+ skb = alloc_skb(gl->tot_len, GFP_ATOMIC);
+ if (unlikely(!skb))
+ goto out;
+ __skb_put(skb, gl->tot_len);
+ skb_copy_to_linear_data(skb, gl->va, gl->tot_len);
+ } else {
+ skb = alloc_skb(skb_len, GFP_ATOMIC);
+ if (unlikely(!skb))
+ goto out;
+ __skb_put(skb, pull_len);
+ skb_copy_to_linear_data(skb, gl->va, pull_len);
+
+ ssi = skb_shinfo(skb);
+ ssi->frags[0].page = gl->frags[0].page;
+ ssi->frags[0].page_offset = gl->frags[0].page_offset + pull_len;
+ ssi->frags[0].size = gl->frags[0].size - pull_len;
+ if (gl->nfrags > 1)
+ memcpy(&ssi->frags[1], &gl->frags[1],
+ (gl->nfrags-1) * sizeof(skb_frag_t));
+ ssi->nr_frags = gl->nfrags;
+
+ skb->len = gl->tot_len;
+ skb->data_len = skb->len - pull_len;
+ skb->truesize += skb->data_len;
+
+ /* Get a reference for the last page, we don't own it */
+ get_page(gl->frags[gl->nfrags - 1].page);
+ }
+
+out:
+ return skb;
+}
+
+/**
* t4vf_pktgl_free - free a packet gather list
* @gl: the gather list
*
@@ -1463,10 +1525,8 @@ int t4vf_ethrx_handler(struct sge_rspq *rspq, const __be64 *rsp,
{
struct sk_buff *skb;
struct port_info *pi;
- struct skb_shared_info *ssi;
const struct cpl_rx_pkt *pkt = (void *)&rsp[1];
bool csum_ok = pkt->csum_calc && !pkt->err_vec;
- unsigned int len = be16_to_cpu(pkt->len);
struct sge_eth_rxq *rxq = container_of(rspq, struct sge_eth_rxq, rspq);
/*
@@ -1481,42 +1541,14 @@ int t4vf_ethrx_handler(struct sge_rspq *rspq, const __be64 *rsp,
}
/*
- * If the ingress packet is small enough, allocate an skb large enough
- * for all of the data and copy it inline. Otherwise, allocate an skb
- * with enough room to pull in the header and reference the rest of
- * the data via the skb fragment list.
+ * Convert the Packet Gather List into an skb.
*/
- if (len <= RX_COPY_THRES) {
- /* small packets have only one fragment */
- skb = alloc_skb(gl->frags[0].size, GFP_ATOMIC);
- if (!skb)
- goto nomem;
- __skb_put(skb, gl->frags[0].size);
- skb_copy_to_linear_data(skb, gl->va, gl->frags[0].size);
- } else {
- skb = alloc_skb(RX_PKT_PULL_LEN, GFP_ATOMIC);
- if (!skb)
- goto nomem;
- __skb_put(skb, RX_PKT_PULL_LEN);
- skb_copy_to_linear_data(skb, gl->va, RX_PKT_PULL_LEN);
-
- ssi = skb_shinfo(skb);
- ssi->frags[0].page = gl->frags[0].page;
- ssi->frags[0].page_offset = (gl->frags[0].page_offset +
- RX_PKT_PULL_LEN);
- ssi->frags[0].size = gl->frags[0].size - RX_PKT_PULL_LEN;
- if (gl->nfrags > 1)
- memcpy(&ssi->frags[1], &gl->frags[1],
- (gl->nfrags-1) * sizeof(skb_frag_t));
- ssi->nr_frags = gl->nfrags;
- skb->len = len + PKTSHIFT;
- skb->data_len = skb->len - RX_PKT_PULL_LEN;
- skb->truesize += skb->data_len;
-
- /* Get a reference for the last page, we don't own it */
- get_page(gl->frags[gl->nfrags - 1].page);
+ skb = t4vf_pktgl_to_skb(gl, RX_SKB_LEN, RX_PULL_LEN);
+ if (unlikely(!skb)) {
+ t4vf_pktgl_free(gl);
+ rxq->stats.rx_drops++;
+ return 0;
}
-
__skb_pull(skb, PKTSHIFT);
skb->protocol = eth_type_trans(skb, rspq->netdev);
skb_record_rx_queue(skb, rspq->idx);
@@ -1549,11 +1581,6 @@ int t4vf_ethrx_handler(struct sge_rspq *rspq, const __be64 *rsp,
netif_receive_skb(skb);
return 0;
-
-nomem:
- t4vf_pktgl_free(gl);
- rxq->stats.rx_drops++;
- return 0;
}
/**
@@ -1679,6 +1706,7 @@ int process_responses(struct sge_rspq *rspq, int budget)
}
len = RSPD_LEN(len);
}
+ gl.tot_len = len;
/*
* Gather packet fragments.
diff --git a/drivers/net/cxgb4vf/t4vf_common.h b/drivers/net/cxgb4vf/t4vf_common.h
index 873cb7d86c57..a65c80aed1f2 100644
--- a/drivers/net/cxgb4vf/t4vf_common.h
+++ b/drivers/net/cxgb4vf/t4vf_common.h
@@ -235,6 +235,7 @@ static inline int t4vf_wr_mbox_ns(struct adapter *adapter, const void *cmd,
int __devinit t4vf_wait_dev_ready(struct adapter *);
int __devinit t4vf_port_init(struct adapter *, int);
+int t4vf_fw_reset(struct adapter *);
int t4vf_query_params(struct adapter *, unsigned int, const u32 *, u32 *);
int t4vf_set_params(struct adapter *, unsigned int, const u32 *, const u32 *);
diff --git a/drivers/net/cxgb4vf/t4vf_hw.c b/drivers/net/cxgb4vf/t4vf_hw.c
index ea1c123f0cb4..19520afe1a12 100644
--- a/drivers/net/cxgb4vf/t4vf_hw.c
+++ b/drivers/net/cxgb4vf/t4vf_hw.c
@@ -326,6 +326,25 @@ int __devinit t4vf_port_init(struct adapter *adapter, int pidx)
}
/**
+ * t4vf_fw_reset - issue a reset to FW
+ * @adapter: the adapter
+ *
+ * Issues a reset command to FW. For a Physical Function this would
+ * result in the Firmware reseting all of its state. For a Virtual
+ * Function this just resets the state associated with the VF.
+ */
+int t4vf_fw_reset(struct adapter *adapter)
+{
+ struct fw_reset_cmd cmd;
+
+ memset(&cmd, 0, sizeof(cmd));
+ cmd.op_to_write = cpu_to_be32(FW_CMD_OP(FW_RESET_CMD) |
+ FW_CMD_WRITE);
+ cmd.retval_len16 = cpu_to_be32(FW_LEN16(cmd));
+ return t4vf_wr_mbox(adapter, &cmd, sizeof(cmd), NULL);
+}
+
+/**
* t4vf_query_params - query FW or device parameters
* @adapter: the adapter
* @nparams: the number of parameters
@@ -995,48 +1014,72 @@ int t4vf_alloc_mac_filt(struct adapter *adapter, unsigned int viid, bool free,
unsigned int naddr, const u8 **addr, u16 *idx,
u64 *hash, bool sleep_ok)
{
- int i, ret;
+ int offset, ret = 0;
+ unsigned nfilters = 0;
+ unsigned int rem = naddr;
struct fw_vi_mac_cmd cmd, rpl;
- struct fw_vi_mac_exact *p;
- size_t len16;
- if (naddr > ARRAY_SIZE(cmd.u.exact))
+ if (naddr > FW_CLS_TCAM_NUM_ENTRIES)
return -EINVAL;
- len16 = DIV_ROUND_UP(offsetof(struct fw_vi_mac_cmd,
- u.exact[naddr]), 16);
- memset(&cmd, 0, sizeof(cmd));
- cmd.op_to_viid = cpu_to_be32(FW_CMD_OP(FW_VI_MAC_CMD) |
- FW_CMD_REQUEST |
- FW_CMD_WRITE |
- (free ? FW_CMD_EXEC : 0) |
- FW_VI_MAC_CMD_VIID(viid));
- cmd.freemacs_to_len16 = cpu_to_be32(FW_VI_MAC_CMD_FREEMACS(free) |
- FW_CMD_LEN16(len16));
+ for (offset = 0; offset < naddr; /**/) {
+ unsigned int fw_naddr = (rem < ARRAY_SIZE(cmd.u.exact)
+ ? rem
+ : ARRAY_SIZE(cmd.u.exact));
+ size_t len16 = DIV_ROUND_UP(offsetof(struct fw_vi_mac_cmd,
+ u.exact[fw_naddr]), 16);
+ struct fw_vi_mac_exact *p;
+ int i;
+
+ memset(&cmd, 0, sizeof(cmd));
+ cmd.op_to_viid = cpu_to_be32(FW_CMD_OP(FW_VI_MAC_CMD) |
+ FW_CMD_REQUEST |
+ FW_CMD_WRITE |
+ (free ? FW_CMD_EXEC : 0) |
+ FW_VI_MAC_CMD_VIID(viid));
+ cmd.freemacs_to_len16 =
+ cpu_to_be32(FW_VI_MAC_CMD_FREEMACS(free) |
+ FW_CMD_LEN16(len16));
+
+ for (i = 0, p = cmd.u.exact; i < fw_naddr; i++, p++) {
+ p->valid_to_idx = cpu_to_be16(
+ FW_VI_MAC_CMD_VALID |
+ FW_VI_MAC_CMD_IDX(FW_VI_MAC_ADD_MAC));
+ memcpy(p->macaddr, addr[offset+i], sizeof(p->macaddr));
+ }
- for (i = 0, p = cmd.u.exact; i < naddr; i++, p++) {
- p->valid_to_idx =
- cpu_to_be16(FW_VI_MAC_CMD_VALID |
- FW_VI_MAC_CMD_IDX(FW_VI_MAC_ADD_MAC));
- memcpy(p->macaddr, addr[i], sizeof(p->macaddr));
- }
- ret = t4vf_wr_mbox_core(adapter, &cmd, sizeof(cmd), &rpl, sleep_ok);
- if (ret)
- return ret;
-
- for (i = 0, p = rpl.u.exact; i < naddr; i++, p++) {
- u16 index = FW_VI_MAC_CMD_IDX_GET(be16_to_cpu(p->valid_to_idx));
-
- if (idx)
- idx[i] = (index >= FW_CLS_TCAM_NUM_ENTRIES
- ? 0xffff
- : index);
- if (index < FW_CLS_TCAM_NUM_ENTRIES)
- ret++;
- else if (hash)
- *hash |= (1 << hash_mac_addr(addr[i]));
+ ret = t4vf_wr_mbox_core(adapter, &cmd, sizeof(cmd), &rpl,
+ sleep_ok);
+ if (ret && ret != -ENOMEM)
+ break;
+
+ for (i = 0, p = rpl.u.exact; i < fw_naddr; i++, p++) {
+ u16 index = FW_VI_MAC_CMD_IDX_GET(
+ be16_to_cpu(p->valid_to_idx));
+
+ if (idx)
+ idx[offset+i] =
+ (index >= FW_CLS_TCAM_NUM_ENTRIES
+ ? 0xffff
+ : index);
+ if (index < FW_CLS_TCAM_NUM_ENTRIES)
+ nfilters++;
+ else if (hash)
+ *hash |= (1ULL << hash_mac_addr(addr[offset+i]));
+ }
+
+ free = false;
+ offset += fw_naddr;
+ rem -= fw_naddr;
}
+
+ /*
+ * If there were no errors or we merely ran out of room in our MAC
+ * address arena, return the number of filters actually written.
+ */
+ if (ret == 0 || ret == -ENOMEM)
+ ret = nfilters;
return ret;
}
diff --git a/drivers/net/e1000/e1000_main.c b/drivers/net/e1000/e1000_main.c
index 1b8a43a29c01..e02c5d17af07 100644
--- a/drivers/net/e1000/e1000_main.c
+++ b/drivers/net/e1000/e1000_main.c
@@ -31,7 +31,7 @@
char e1000_driver_name[] = "e1000";
static char e1000_driver_string[] = "Intel(R) PRO/1000 Network Driver";
-#define DRV_VERSION "7.3.21-k6-NAPI"
+#define DRV_VERSION "7.3.21-k8-NAPI"
const char e1000_driver_version[] = DRV_VERSION;
static const char e1000_copyright[] = "Copyright (c) 1999-2006 Intel Corporation.";
@@ -485,9 +485,6 @@ void e1000_down(struct e1000_adapter *adapter)
struct net_device *netdev = adapter->netdev;
u32 rctl, tctl;
- /* signal that we're down so the interrupt handler does not
- * reschedule our watchdog timer */
- set_bit(__E1000_DOWN, &adapter->flags);
/* disable receives in the hardware */
rctl = er32(RCTL);
@@ -508,6 +505,13 @@ void e1000_down(struct e1000_adapter *adapter)
e1000_irq_disable(adapter);
+ /*
+ * Setting DOWN must be after irq_disable to prevent
+ * a screaming interrupt. Setting DOWN also prevents
+ * timers and tasks from rescheduling.
+ */
+ set_bit(__E1000_DOWN, &adapter->flags);
+
del_timer_sync(&adapter->tx_fifo_stall_timer);
del_timer_sync(&adapter->watchdog_timer);
del_timer_sync(&adapter->phy_info_timer);
diff --git a/drivers/net/ehea/ehea_ethtool.c b/drivers/net/ehea/ehea_ethtool.c
index 75b099ce49c9..1f37ee6b2a26 100644
--- a/drivers/net/ehea/ehea_ethtool.c
+++ b/drivers/net/ehea/ehea_ethtool.c
@@ -261,6 +261,13 @@ static void ehea_get_ethtool_stats(struct net_device *dev,
}
+static int ehea_set_flags(struct net_device *dev, u32 data)
+{
+ return ethtool_op_set_flags(dev, data, ETH_FLAG_LRO
+ | ETH_FLAG_TXVLAN
+ | ETH_FLAG_RXVLAN);
+}
+
const struct ethtool_ops ehea_ethtool_ops = {
.get_settings = ehea_get_settings,
.get_drvinfo = ehea_get_drvinfo,
@@ -273,6 +280,8 @@ const struct ethtool_ops ehea_ethtool_ops = {
.get_ethtool_stats = ehea_get_ethtool_stats,
.get_rx_csum = ehea_get_rx_csum,
.set_settings = ehea_set_settings,
+ .get_flags = ethtool_op_get_flags,
+ .set_flags = ehea_set_flags,
.nway_reset = ehea_nway_reset, /* Restart autonegotiation */
};
diff --git a/drivers/net/ehea/ehea_main.c b/drivers/net/ehea/ehea_main.c
index 182b2a7be8dc..b95f087cd5a9 100644
--- a/drivers/net/ehea/ehea_main.c
+++ b/drivers/net/ehea/ehea_main.c
@@ -400,6 +400,7 @@ static void ehea_refill_rq1(struct ehea_port_res *pr, int index, int nr_of_wqes)
skb_arr_rq1[index] = netdev_alloc_skb(dev,
EHEA_L_PKT_SIZE);
if (!skb_arr_rq1[index]) {
+ ehea_info("Unable to allocate enough skb in the array\n");
pr->rq1_skba.os_skbs = fill_wqes - i;
break;
}
@@ -422,13 +423,20 @@ static void ehea_init_fill_rq1(struct ehea_port_res *pr, int nr_rq1a)
struct net_device *dev = pr->port->netdev;
int i;
- for (i = 0; i < pr->rq1_skba.len; i++) {
+ if (nr_rq1a > pr->rq1_skba.len) {
+ ehea_error("NR_RQ1A bigger than skb array len\n");
+ return;
+ }
+
+ for (i = 0; i < nr_rq1a; i++) {
skb_arr_rq1[i] = netdev_alloc_skb(dev, EHEA_L_PKT_SIZE);
- if (!skb_arr_rq1[i])
+ if (!skb_arr_rq1[i]) {
+ ehea_info("No enough memory to allocate skb array\n");
break;
+ }
}
/* Ring doorbell */
- ehea_update_rq1a(pr->qp, nr_rq1a);
+ ehea_update_rq1a(pr->qp, i);
}
static int ehea_refill_rq_def(struct ehea_port_res *pr,
@@ -675,7 +683,7 @@ static void ehea_proc_skb(struct ehea_port_res *pr, struct ehea_cqe *cqe,
int vlan_extracted = ((cqe->status & EHEA_CQE_VLAN_TAG_XTRACT) &&
pr->port->vgrp);
- if (use_lro) {
+ if (skb->dev->features & NETIF_F_LRO) {
if (vlan_extracted)
lro_vlan_hwaccel_receive_skb(&pr->lro_mgr, skb,
pr->port->vgrp,
@@ -735,8 +743,10 @@ static int ehea_proc_rwqes(struct net_device *dev,
skb = netdev_alloc_skb(dev,
EHEA_L_PKT_SIZE);
- if (!skb)
+ if (!skb) {
+ ehea_info("Not enough memory to allocate skb\n");
break;
+ }
}
skb_copy_to_linear_data(skb, ((char *)cqe) + 64,
cqe->num_bytes_transfered - 4);
@@ -777,7 +787,7 @@ static int ehea_proc_rwqes(struct net_device *dev,
}
cqe = ehea_poll_rq1(qp, &wqe_index);
}
- if (use_lro)
+ if (dev->features & NETIF_F_LRO)
lro_flush_all(&pr->lro_mgr);
pr->rx_packets += processed;
@@ -3268,6 +3278,9 @@ struct ehea_port *ehea_setup_single_port(struct ehea_adapter *adapter,
| NETIF_F_LLTX;
dev->watchdog_timeo = EHEA_WATCH_DOG_TIMEOUT;
+ if (use_lro)
+ dev->features |= NETIF_F_LRO;
+
INIT_WORK(&port->reset_task, ehea_reset_port);
ret = register_netdev(dev);
diff --git a/drivers/net/enic/enic_main.c b/drivers/net/enic/enic_main.c
index a466ef91dd43..aa28b270c045 100644
--- a/drivers/net/enic/enic_main.c
+++ b/drivers/net/enic/enic_main.c
@@ -1962,7 +1962,8 @@ static void enic_poll_controller(struct net_device *netdev)
case VNIC_DEV_INTR_MODE_MSIX:
for (i = 0; i < enic->rq_count; i++) {
intr = enic_msix_rq_intr(enic, i);
- enic_isr_msix_rq(enic->msix_entry[intr].vector, enic);
+ enic_isr_msix_rq(enic->msix_entry[intr].vector,
+ &enic->napi[i]);
}
intr = enic_msix_wq_intr(enic, i);
enic_isr_msix_wq(enic->msix_entry[intr].vector, enic);
diff --git a/drivers/net/gianfar.c b/drivers/net/gianfar.c
index 49e4ce1246a7..d1bec6269173 100644
--- a/drivers/net/gianfar.c
+++ b/drivers/net/gianfar.c
@@ -577,11 +577,10 @@ static int gfar_parse_group(struct device_node *np,
irq_of_parse_and_map(np, 1);
priv->gfargrp[priv->num_grps].interruptError =
irq_of_parse_and_map(np,2);
- if (priv->gfargrp[priv->num_grps].interruptTransmit < 0 ||
- priv->gfargrp[priv->num_grps].interruptReceive < 0 ||
- priv->gfargrp[priv->num_grps].interruptError < 0) {
+ if (priv->gfargrp[priv->num_grps].interruptTransmit == NO_IRQ ||
+ priv->gfargrp[priv->num_grps].interruptReceive == NO_IRQ ||
+ priv->gfargrp[priv->num_grps].interruptError == NO_IRQ)
return -EINVAL;
- }
}
priv->gfargrp[priv->num_grps].grp_id = priv->num_grps;
diff --git a/drivers/net/gianfar_ethtool.c b/drivers/net/gianfar_ethtool.c
index 5c566ebc54b8..3bc8e276ba4d 100644
--- a/drivers/net/gianfar_ethtool.c
+++ b/drivers/net/gianfar_ethtool.c
@@ -635,9 +635,10 @@ static int gfar_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
if (wol->wolopts & ~WAKE_MAGIC)
return -EINVAL;
+ device_set_wakeup_enable(&dev->dev, wol->wolopts & WAKE_MAGIC);
+
spin_lock_irqsave(&priv->bflock, flags);
- priv->wol_en = wol->wolopts & WAKE_MAGIC ? 1 : 0;
- device_set_wakeup_enable(&dev->dev, priv->wol_en);
+ priv->wol_en = !!device_may_wakeup(&dev->dev);
spin_unlock_irqrestore(&priv->bflock, flags);
return 0;
diff --git a/drivers/net/ibm_newemac/core.c b/drivers/net/ibm_newemac/core.c
index 385dc3204cb7..06bb9b799458 100644
--- a/drivers/net/ibm_newemac/core.c
+++ b/drivers/net/ibm_newemac/core.c
@@ -2871,7 +2871,6 @@ static int __devinit emac_probe(struct platform_device *ofdev,
SET_ETHTOOL_OPS(ndev, &emac_ethtool_ops);
netif_carrier_off(ndev);
- netif_stop_queue(ndev);
err = register_netdev(ndev);
if (err) {
diff --git a/drivers/net/ifb.c b/drivers/net/ifb.c
index ab9f675c5b8b..fe337bd121aa 100644
--- a/drivers/net/ifb.c
+++ b/drivers/net/ifb.c
@@ -104,6 +104,8 @@ static void ri_tasklet(unsigned long dev)
rcu_read_unlock();
dev_kfree_skb(skb);
stats->tx_dropped++;
+ if (skb_queue_len(&dp->tq) != 0)
+ goto resched;
break;
}
rcu_read_unlock();
diff --git a/drivers/net/ipg.c b/drivers/net/ipg.c
index dc0198092343..aa93655c3aa7 100644
--- a/drivers/net/ipg.c
+++ b/drivers/net/ipg.c
@@ -88,16 +88,14 @@ static const char *ipg_brand_name[] = {
"IC PLUS IP1000 1000/100/10 based NIC",
"Sundance Technology ST2021 based NIC",
"Tamarack Microelectronics TC9020/9021 based NIC",
- "Tamarack Microelectronics TC9020/9021 based NIC",
"D-Link NIC IP1000A"
};
static DEFINE_PCI_DEVICE_TABLE(ipg_pci_tbl) = {
{ PCI_VDEVICE(SUNDANCE, 0x1023), 0 },
{ PCI_VDEVICE(SUNDANCE, 0x2021), 1 },
- { PCI_VDEVICE(SUNDANCE, 0x1021), 2 },
- { PCI_VDEVICE(DLINK, 0x9021), 3 },
- { PCI_VDEVICE(DLINK, 0x4020), 4 },
+ { PCI_VDEVICE(DLINK, 0x9021), 2 },
+ { PCI_VDEVICE(DLINK, 0x4020), 3 },
{ 0, }
};
diff --git a/drivers/net/irda/sh_sir.c b/drivers/net/irda/sh_sir.c
index 00b38bccd6d0..52a7c86af663 100644
--- a/drivers/net/irda/sh_sir.c
+++ b/drivers/net/irda/sh_sir.c
@@ -258,7 +258,7 @@ static int sh_sir_set_baudrate(struct sh_sir_self *self, u32 baudrate)
/* Baud Rate Error Correction x 10000 */
u32 rate_err_array[] = {
- 0000, 0625, 1250, 1875,
+ 0, 625, 1250, 1875,
2500, 3125, 3750, 4375,
5000, 5625, 6250, 6875,
7500, 8125, 8750, 9375,
diff --git a/drivers/net/ixgbe/ixgbe_main.c b/drivers/net/ixgbe/ixgbe_main.c
index 2bd3eb4ee5a1..eee0b298bd36 100644
--- a/drivers/net/ixgbe/ixgbe_main.c
+++ b/drivers/net/ixgbe/ixgbe_main.c
@@ -764,8 +764,9 @@ static bool ixgbe_clean_tx_irq(struct ixgbe_q_vector *q_vector,
#ifdef IXGBE_FCOE
/* adjust for FCoE Sequence Offload */
if ((adapter->flags & IXGBE_FLAG_FCOE_ENABLED)
- && (skb->protocol == htons(ETH_P_FCOE)) &&
- skb_is_gso(skb)) {
+ && skb_is_gso(skb)
+ && vlan_get_protocol(skb) ==
+ htons(ETH_P_FCOE)) {
hlen = skb_transport_offset(skb) +
sizeof(struct fc_frame_header) +
sizeof(struct fcoe_crc_eof);
@@ -4770,6 +4771,9 @@ void ixgbe_clear_interrupt_scheme(struct ixgbe_adapter *adapter)
adapter->rx_ring[i] = NULL;
}
+ adapter->num_tx_queues = 0;
+ adapter->num_rx_queues = 0;
+
ixgbe_free_q_vectors(adapter);
ixgbe_reset_interrupt_capability(adapter);
}
@@ -5823,7 +5827,7 @@ static void ixgbe_watchdog_task(struct work_struct *work)
static int ixgbe_tso(struct ixgbe_adapter *adapter,
struct ixgbe_ring *tx_ring, struct sk_buff *skb,
- u32 tx_flags, u8 *hdr_len)
+ u32 tx_flags, u8 *hdr_len, __be16 protocol)
{
struct ixgbe_adv_tx_context_desc *context_desc;
unsigned int i;
@@ -5841,7 +5845,7 @@ static int ixgbe_tso(struct ixgbe_adapter *adapter,
l4len = tcp_hdrlen(skb);
*hdr_len += l4len;
- if (skb->protocol == htons(ETH_P_IP)) {
+ if (protocol == htons(ETH_P_IP)) {
struct iphdr *iph = ip_hdr(skb);
iph->tot_len = 0;
iph->check = 0;
@@ -5880,7 +5884,7 @@ static int ixgbe_tso(struct ixgbe_adapter *adapter,
type_tucmd_mlhl = (IXGBE_TXD_CMD_DEXT |
IXGBE_ADVTXD_DTYP_CTXT);
- if (skb->protocol == htons(ETH_P_IP))
+ if (protocol == htons(ETH_P_IP))
type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_IPV4;
type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_L4T_TCP;
context_desc->type_tucmd_mlhl = cpu_to_le32(type_tucmd_mlhl);
@@ -5906,16 +5910,10 @@ static int ixgbe_tso(struct ixgbe_adapter *adapter,
return false;
}
-static u32 ixgbe_psum(struct ixgbe_adapter *adapter, struct sk_buff *skb)
+static u32 ixgbe_psum(struct ixgbe_adapter *adapter, struct sk_buff *skb,
+ __be16 protocol)
{
u32 rtn = 0;
- __be16 protocol;
-
- if (skb->protocol == cpu_to_be16(ETH_P_8021Q))
- protocol = ((const struct vlan_ethhdr *)skb->data)->
- h_vlan_encapsulated_proto;
- else
- protocol = skb->protocol;
switch (protocol) {
case cpu_to_be16(ETH_P_IP):
@@ -5943,7 +5941,7 @@ static u32 ixgbe_psum(struct ixgbe_adapter *adapter, struct sk_buff *skb)
default:
if (unlikely(net_ratelimit()))
e_warn(probe, "partial checksum but proto=%x!\n",
- skb->protocol);
+ protocol);
break;
}
@@ -5952,7 +5950,8 @@ static u32 ixgbe_psum(struct ixgbe_adapter *adapter, struct sk_buff *skb)
static bool ixgbe_tx_csum(struct ixgbe_adapter *adapter,
struct ixgbe_ring *tx_ring,
- struct sk_buff *skb, u32 tx_flags)
+ struct sk_buff *skb, u32 tx_flags,
+ __be16 protocol)
{
struct ixgbe_adv_tx_context_desc *context_desc;
unsigned int i;
@@ -5981,7 +5980,7 @@ static bool ixgbe_tx_csum(struct ixgbe_adapter *adapter,
IXGBE_ADVTXD_DTYP_CTXT);
if (skb->ip_summed == CHECKSUM_PARTIAL)
- type_tucmd_mlhl |= ixgbe_psum(adapter, skb);
+ type_tucmd_mlhl |= ixgbe_psum(adapter, skb, protocol);
context_desc->type_tucmd_mlhl = cpu_to_le32(type_tucmd_mlhl);
/* use index zero for tx checksum offload */
@@ -6179,7 +6178,7 @@ static void ixgbe_tx_queue(struct ixgbe_adapter *adapter,
}
static void ixgbe_atr(struct ixgbe_adapter *adapter, struct sk_buff *skb,
- int queue, u32 tx_flags)
+ int queue, u32 tx_flags, __be16 protocol)
{
struct ixgbe_atr_input atr_input;
struct tcphdr *th;
@@ -6190,7 +6189,7 @@ static void ixgbe_atr(struct ixgbe_adapter *adapter, struct sk_buff *skb,
u8 l4type = 0;
/* Right now, we support IPv4 only */
- if (skb->protocol != htons(ETH_P_IP))
+ if (protocol != htons(ETH_P_IP))
return;
/* check if we're UDP or TCP */
if (iph->protocol == IPPROTO_TCP) {
@@ -6257,10 +6256,13 @@ static u16 ixgbe_select_queue(struct net_device *dev, struct sk_buff *skb)
{
struct ixgbe_adapter *adapter = netdev_priv(dev);
int txq = smp_processor_id();
-
#ifdef IXGBE_FCOE
- if ((skb->protocol == htons(ETH_P_FCOE)) ||
- (skb->protocol == htons(ETH_P_FIP))) {
+ __be16 protocol;
+
+ protocol = vlan_get_protocol(skb);
+
+ if ((protocol == htons(ETH_P_FCOE)) ||
+ (protocol == htons(ETH_P_FIP))) {
if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED) {
txq &= (adapter->ring_feature[RING_F_FCOE].indices - 1);
txq += adapter->ring_feature[RING_F_FCOE].mask;
@@ -6303,6 +6305,9 @@ netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *skb, struct net_device *netdev
int tso;
int count = 0;
unsigned int f;
+ __be16 protocol;
+
+ protocol = vlan_get_protocol(skb);
if (vlan_tx_tag_present(skb)) {
tx_flags |= vlan_tx_tag_get(skb);
@@ -6323,8 +6328,8 @@ netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *skb, struct net_device *netdev
/* for FCoE with DCB, we force the priority to what
* was specified by the switch */
if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED &&
- (skb->protocol == htons(ETH_P_FCOE) ||
- skb->protocol == htons(ETH_P_FIP))) {
+ (protocol == htons(ETH_P_FCOE) ||
+ protocol == htons(ETH_P_FIP))) {
#ifdef CONFIG_IXGBE_DCB
if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) {
tx_flags &= ~(IXGBE_TX_FLAGS_VLAN_PRIO_MASK
@@ -6334,7 +6339,7 @@ netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *skb, struct net_device *netdev
}
#endif
/* flag for FCoE offloads */
- if (skb->protocol == htons(ETH_P_FCOE))
+ if (protocol == htons(ETH_P_FCOE))
tx_flags |= IXGBE_TX_FLAGS_FCOE;
}
#endif
@@ -6368,9 +6373,10 @@ netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *skb, struct net_device *netdev
tx_flags |= IXGBE_TX_FLAGS_FSO;
#endif /* IXGBE_FCOE */
} else {
- if (skb->protocol == htons(ETH_P_IP))
+ if (protocol == htons(ETH_P_IP))
tx_flags |= IXGBE_TX_FLAGS_IPV4;
- tso = ixgbe_tso(adapter, tx_ring, skb, tx_flags, &hdr_len);
+ tso = ixgbe_tso(adapter, tx_ring, skb, tx_flags, &hdr_len,
+ protocol);
if (tso < 0) {
dev_kfree_skb_any(skb);
return NETDEV_TX_OK;
@@ -6378,7 +6384,8 @@ netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *skb, struct net_device *netdev
if (tso)
tx_flags |= IXGBE_TX_FLAGS_TSO;
- else if (ixgbe_tx_csum(adapter, tx_ring, skb, tx_flags) &&
+ else if (ixgbe_tx_csum(adapter, tx_ring, skb, tx_flags,
+ protocol) &&
(skb->ip_summed == CHECKSUM_PARTIAL))
tx_flags |= IXGBE_TX_FLAGS_CSUM;
}
@@ -6392,7 +6399,7 @@ netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *skb, struct net_device *netdev
test_bit(__IXGBE_FDIR_INIT_DONE,
&tx_ring->reinit_state)) {
ixgbe_atr(adapter, skb, tx_ring->queue_index,
- tx_flags);
+ tx_flags, protocol);
tx_ring->atr_count = 0;
}
}
diff --git a/drivers/net/jme.c b/drivers/net/jme.c
index d85edf3119c2..c57d9a43ceca 100644
--- a/drivers/net/jme.c
+++ b/drivers/net/jme.c
@@ -2955,11 +2955,7 @@ jme_init_one(struct pci_dev *pdev,
* Tell stack that we are not ready to work until open()
*/
netif_carrier_off(netdev);
- netif_stop_queue(netdev);
- /*
- * Register netdev
- */
rc = register_netdev(netdev);
if (rc) {
pr_err("Cannot register net device\n");
diff --git a/drivers/net/mlx4/fw.c b/drivers/net/mlx4/fw.c
index b68eee2414c2..7a7e18ba278a 100644
--- a/drivers/net/mlx4/fw.c
+++ b/drivers/net/mlx4/fw.c
@@ -289,6 +289,10 @@ int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
MLX4_GET(field, outbox, QUERY_DEV_CAP_LOG_BF_REG_SZ_OFFSET);
dev_cap->bf_reg_size = 1 << (field & 0x1f);
MLX4_GET(field, outbox, QUERY_DEV_CAP_LOG_MAX_BF_REGS_PER_PAGE_OFFSET);
+ if ((1 << (field & 0x3f)) > (PAGE_SIZE / dev_cap->bf_reg_size)) {
+ mlx4_warn(dev, "firmware bug: log2 # of blue flame regs is invalid (%d), forcing 3\n", field & 0x1f);
+ field = 3;
+ }
dev_cap->bf_regs_per_page = 1 << (field & 0x3f);
mlx4_dbg(dev, "BlueFlame available (reg size %d, regs/page %d)\n",
dev_cap->bf_reg_size, dev_cap->bf_regs_per_page);
diff --git a/drivers/net/netxen/netxen_nic_main.c b/drivers/net/netxen/netxen_nic_main.c
index 35ae1aa12896..e1d30d7f2071 100644
--- a/drivers/net/netxen/netxen_nic_main.c
+++ b/drivers/net/netxen/netxen_nic_main.c
@@ -41,9 +41,6 @@
MODULE_DESCRIPTION("QLogic/NetXen (1/10) GbE Converged Ethernet Driver");
MODULE_LICENSE("GPL");
MODULE_VERSION(NETXEN_NIC_LINUX_VERSIONID);
-MODULE_FIRMWARE(NX_P2_MN_ROMIMAGE_NAME);
-MODULE_FIRMWARE(NX_P3_CT_ROMIMAGE_NAME);
-MODULE_FIRMWARE(NX_P3_MN_ROMIMAGE_NAME);
MODULE_FIRMWARE(NX_UNIFIED_ROMIMAGE_NAME);
char netxen_nic_driver_name[] = "netxen_nic";
@@ -1240,7 +1237,6 @@ netxen_setup_netdev(struct netxen_adapter *adapter,
dev_warn(&pdev->dev, "failed to read mac addr\n");
netif_carrier_off(netdev);
- netif_stop_queue(netdev);
err = register_netdev(netdev);
if (err) {
diff --git a/drivers/net/pch_gbe/pch_gbe_main.c b/drivers/net/pch_gbe/pch_gbe_main.c
index 472056b47440..03a1d280105f 100644
--- a/drivers/net/pch_gbe/pch_gbe_main.c
+++ b/drivers/net/pch_gbe/pch_gbe_main.c
@@ -1,6 +1,6 @@
/*
* Copyright (C) 1999 - 2010 Intel Corporation.
- * Copyright (C) 2010 OKI SEMICONDUCTOR Co., LTD.
+ * Copyright (C) 2010 OKI SEMICONDUCTOR CO., LTD.
*
* This code was derived from the Intel e1000e Linux driver.
*
@@ -2464,8 +2464,8 @@ static void __exit pch_gbe_exit_module(void)
module_init(pch_gbe_init_module);
module_exit(pch_gbe_exit_module);
-MODULE_DESCRIPTION("OKI semiconductor PCH Gigabit ethernet Driver");
-MODULE_AUTHOR("OKI semiconductor, <masa-korg@dsn.okisemi.com>");
+MODULE_DESCRIPTION("EG20T PCH Gigabit ethernet Driver");
+MODULE_AUTHOR("OKI SEMICONDUCTOR, <toshiharu-linux@dsn.okisemi.com>");
MODULE_LICENSE("GPL");
MODULE_VERSION(DRV_VERSION);
MODULE_DEVICE_TABLE(pci, pch_gbe_pcidev_id);
diff --git a/drivers/net/pch_gbe/pch_gbe_param.c b/drivers/net/pch_gbe/pch_gbe_param.c
index 2510146fc560..ef0996a0eaaa 100644
--- a/drivers/net/pch_gbe/pch_gbe_param.c
+++ b/drivers/net/pch_gbe/pch_gbe_param.c
@@ -434,8 +434,8 @@ void pch_gbe_check_options(struct pch_gbe_adapter *adapter)
.err = "using default of "
__MODULE_STRING(PCH_GBE_DEFAULT_TXD),
.def = PCH_GBE_DEFAULT_TXD,
- .arg = { .r = { .min = PCH_GBE_MIN_TXD } },
- .arg = { .r = { .max = PCH_GBE_MAX_TXD } }
+ .arg = { .r = { .min = PCH_GBE_MIN_TXD,
+ .max = PCH_GBE_MAX_TXD } }
};
struct pch_gbe_tx_ring *tx_ring = adapter->tx_ring;
tx_ring->count = TxDescriptors;
@@ -450,8 +450,8 @@ void pch_gbe_check_options(struct pch_gbe_adapter *adapter)
.err = "using default of "
__MODULE_STRING(PCH_GBE_DEFAULT_RXD),
.def = PCH_GBE_DEFAULT_RXD,
- .arg = { .r = { .min = PCH_GBE_MIN_RXD } },
- .arg = { .r = { .max = PCH_GBE_MAX_RXD } }
+ .arg = { .r = { .min = PCH_GBE_MIN_RXD,
+ .max = PCH_GBE_MAX_RXD } }
};
struct pch_gbe_rx_ring *rx_ring = adapter->rx_ring;
rx_ring->count = RxDescriptors;
diff --git a/drivers/net/pcmcia/axnet_cs.c b/drivers/net/pcmcia/axnet_cs.c
index d2e166e29dda..8a4d19e5de06 100644
--- a/drivers/net/pcmcia/axnet_cs.c
+++ b/drivers/net/pcmcia/axnet_cs.c
@@ -111,13 +111,14 @@ static irqreturn_t ax_interrupt(int irq, void *dev_id);
typedef struct axnet_dev_t {
struct pcmcia_device *p_dev;
- caddr_t base;
- struct timer_list watchdog;
- int stale, fast_poll;
- u_short link_status;
- u_char duplex_flag;
- int phy_id;
- int flags;
+ caddr_t base;
+ struct timer_list watchdog;
+ int stale, fast_poll;
+ u_short link_status;
+ u_char duplex_flag;
+ int phy_id;
+ int flags;
+ int active_low;
} axnet_dev_t;
static inline axnet_dev_t *PRIV(struct net_device *dev)
@@ -322,6 +323,8 @@ static int axnet_config(struct pcmcia_device *link)
if (info->flags & IS_AX88790)
outb(0x10, dev->base_addr + AXNET_GPIO); /* select Internal PHY */
+ info->active_low = 0;
+
for (i = 0; i < 32; i++) {
j = mdio_read(dev->base_addr + AXNET_MII_EEP, i, 1);
j2 = mdio_read(dev->base_addr + AXNET_MII_EEP, i, 2);
@@ -329,15 +332,18 @@ static int axnet_config(struct pcmcia_device *link)
if ((j != 0) && (j != 0xffff)) break;
}
- /* Maybe PHY is in power down mode. (PPD_SET = 1)
- Bit 2 of CCSR is active low. */
if (i == 32) {
+ /* Maybe PHY is in power down mode. (PPD_SET = 1)
+ Bit 2 of CCSR is active low. */
pcmcia_write_config_byte(link, CISREG_CCSR, 0x04);
for (i = 0; i < 32; i++) {
j = mdio_read(dev->base_addr + AXNET_MII_EEP, i, 1);
j2 = mdio_read(dev->base_addr + AXNET_MII_EEP, i, 2);
if (j == j2) continue;
- if ((j != 0) && (j != 0xffff)) break;
+ if ((j != 0) && (j != 0xffff)) {
+ info->active_low = 1;
+ break;
+ }
}
}
@@ -383,8 +389,12 @@ static int axnet_suspend(struct pcmcia_device *link)
static int axnet_resume(struct pcmcia_device *link)
{
struct net_device *dev = link->priv;
+ axnet_dev_t *info = PRIV(dev);
if (link->open) {
+ if (info->active_low == 1)
+ pcmcia_write_config_byte(link, CISREG_CCSR, 0x04);
+
axnet_reset_8390(dev);
AX88190_init(dev, 1);
netif_device_attach(dev);
diff --git a/drivers/net/pcmcia/pcnet_cs.c b/drivers/net/pcmcia/pcnet_cs.c
index 03096c80103d..d05c44692f08 100644
--- a/drivers/net/pcmcia/pcnet_cs.c
+++ b/drivers/net/pcmcia/pcnet_cs.c
@@ -1536,6 +1536,7 @@ static struct pcmcia_device_id pcnet_ids[] = {
PCMCIA_DEVICE_PROD_ID12("COMPU-SHACK", "FASTline PCMCIA 10/100 Fast-Ethernet", 0xfa2e424d, 0x3953d9b9),
PCMCIA_DEVICE_PROD_ID12("CONTEC", "C-NET(PC)C-10L", 0x21cab552, 0xf6f90722),
PCMCIA_DEVICE_PROD_ID12("corega", "FEther PCC-TXF", 0x0a21501a, 0xa51564a2),
+ PCMCIA_DEVICE_PROD_ID12("corega", "Ether CF-TD", 0x0a21501a, 0x6589340a),
PCMCIA_DEVICE_PROD_ID12("corega K.K.", "corega EtherII PCC-T", 0x5261440f, 0xfa9d85bd),
PCMCIA_DEVICE_PROD_ID12("corega K.K.", "corega EtherII PCC-TD", 0x5261440f, 0xc49bd73d),
PCMCIA_DEVICE_PROD_ID12("Corega K.K.", "corega EtherII PCC-TD", 0xd4fdcbd8, 0xc49bd73d),
diff --git a/drivers/net/phy/Kconfig b/drivers/net/phy/Kconfig
index cb3d13e4e074..35fda5ac8120 100644
--- a/drivers/net/phy/Kconfig
+++ b/drivers/net/phy/Kconfig
@@ -64,7 +64,7 @@ config BCM63XX_PHY
config ICPLUS_PHY
tristate "Drivers for ICPlus PHYs"
---help---
- Currently supports the IP175C PHY.
+ Currently supports the IP175C and IP1001 PHYs.
config REALTEK_PHY
tristate "Drivers for Realtek PHYs"
diff --git a/drivers/net/phy/icplus.c b/drivers/net/phy/icplus.c
index c1d2d251fe8b..9a09e24c30bc 100644
--- a/drivers/net/phy/icplus.c
+++ b/drivers/net/phy/icplus.c
@@ -30,7 +30,7 @@
#include <asm/irq.h>
#include <asm/uaccess.h>
-MODULE_DESCRIPTION("ICPlus IP175C PHY driver");
+MODULE_DESCRIPTION("ICPlus IP175C/IC1001 PHY drivers");
MODULE_AUTHOR("Michael Barkowski");
MODULE_LICENSE("GPL");
@@ -89,6 +89,33 @@ static int ip175c_config_init(struct phy_device *phydev)
return 0;
}
+static int ip1001_config_init(struct phy_device *phydev)
+{
+ int err, value;
+
+ /* Software Reset PHY */
+ value = phy_read(phydev, MII_BMCR);
+ value |= BMCR_RESET;
+ err = phy_write(phydev, MII_BMCR, value);
+ if (err < 0)
+ return err;
+
+ do {
+ value = phy_read(phydev, MII_BMCR);
+ } while (value & BMCR_RESET);
+
+ /* Additional delay (2ns) used to adjust RX clock phase
+ * at GMII/ RGMII interface */
+ value = phy_read(phydev, 16);
+ value |= 0x3;
+
+ err = phy_write(phydev, 16, value);
+ if (err < 0)
+ return err;
+
+ return err;
+}
+
static int ip175c_read_status(struct phy_device *phydev)
{
if (phydev->addr == 4) /* WAN port */
@@ -121,21 +148,43 @@ static struct phy_driver ip175c_driver = {
.driver = { .owner = THIS_MODULE,},
};
-static int __init ip175c_init(void)
+static struct phy_driver ip1001_driver = {
+ .phy_id = 0x02430d90,
+ .name = "ICPlus IP1001",
+ .phy_id_mask = 0x0ffffff0,
+ .features = PHY_GBIT_FEATURES | SUPPORTED_Pause |
+ SUPPORTED_Asym_Pause,
+ .config_init = &ip1001_config_init,
+ .config_aneg = &genphy_config_aneg,
+ .read_status = &genphy_read_status,
+ .suspend = genphy_suspend,
+ .resume = genphy_resume,
+ .driver = { .owner = THIS_MODULE,},
+};
+
+static int __init icplus_init(void)
{
+ int ret = 0;
+
+ ret = phy_driver_register(&ip1001_driver);
+ if (ret < 0)
+ return -ENODEV;
+
return phy_driver_register(&ip175c_driver);
}
-static void __exit ip175c_exit(void)
+static void __exit icplus_exit(void)
{
+ phy_driver_unregister(&ip1001_driver);
phy_driver_unregister(&ip175c_driver);
}
-module_init(ip175c_init);
-module_exit(ip175c_exit);
+module_init(icplus_init);
+module_exit(icplus_exit);
static struct mdio_device_id __maybe_unused icplus_tbl[] = {
{ 0x02430d80, 0x0ffffff0 },
+ { 0x02430d90, 0x0ffffff0 },
{ }
};
diff --git a/drivers/net/phy/marvell.c b/drivers/net/phy/marvell.c
index e2afdce0a437..e8b9c53c304b 100644
--- a/drivers/net/phy/marvell.c
+++ b/drivers/net/phy/marvell.c
@@ -30,11 +30,14 @@
#include <linux/ethtool.h>
#include <linux/phy.h>
#include <linux/marvell_phy.h>
+#include <linux/of.h>
#include <asm/io.h>
#include <asm/irq.h>
#include <asm/uaccess.h>
+#define MII_MARVELL_PHY_PAGE 22
+
#define MII_M1011_IEVENT 0x13
#define MII_M1011_IEVENT_CLEAR 0x0000
@@ -74,13 +77,12 @@
#define MII_88E1121_PHY_MSCR_TX_DELAY BIT(4)
#define MII_88E1121_PHY_MSCR_DELAY_MASK (~(0x3 << 4))
-#define MII_88EC048_PHY_MSCR1_REG 16
-#define MII_88EC048_PHY_MSCR1_PAD_ODD BIT(6)
+#define MII_88E1318S_PHY_MSCR1_REG 16
+#define MII_88E1318S_PHY_MSCR1_PAD_ODD BIT(6)
#define MII_88E1121_PHY_LED_CTRL 16
#define MII_88E1121_PHY_LED_PAGE 3
#define MII_88E1121_PHY_LED_DEF 0x0030
-#define MII_88E1121_PHY_PAGE 22
#define MII_M1011_PHY_STATUS 0x11
#define MII_M1011_PHY_STATUS_1000 0x8000
@@ -186,13 +188,94 @@ static int marvell_config_aneg(struct phy_device *phydev)
return 0;
}
+#ifdef CONFIG_OF_MDIO
+/*
+ * Set and/or override some configuration registers based on the
+ * marvell,reg-init property stored in the of_node for the phydev.
+ *
+ * marvell,reg-init = <reg-page reg mask value>,...;
+ *
+ * There may be one or more sets of <reg-page reg mask value>:
+ *
+ * reg-page: which register bank to use.
+ * reg: the register.
+ * mask: if non-zero, ANDed with existing register value.
+ * value: ORed with the masked value and written to the regiser.
+ *
+ */
+static int marvell_of_reg_init(struct phy_device *phydev)
+{
+ const __be32 *paddr;
+ int len, i, saved_page, current_page, page_changed, ret;
+
+ if (!phydev->dev.of_node)
+ return 0;
+
+ paddr = of_get_property(phydev->dev.of_node, "marvell,reg-init", &len);
+ if (!paddr || len < (4 * sizeof(*paddr)))
+ return 0;
+
+ saved_page = phy_read(phydev, MII_MARVELL_PHY_PAGE);
+ if (saved_page < 0)
+ return saved_page;
+ page_changed = 0;
+ current_page = saved_page;
+
+ ret = 0;
+ len /= sizeof(*paddr);
+ for (i = 0; i < len - 3; i += 4) {
+ u16 reg_page = be32_to_cpup(paddr + i);
+ u16 reg = be32_to_cpup(paddr + i + 1);
+ u16 mask = be32_to_cpup(paddr + i + 2);
+ u16 val_bits = be32_to_cpup(paddr + i + 3);
+ int val;
+
+ if (reg_page != current_page) {
+ current_page = reg_page;
+ page_changed = 1;
+ ret = phy_write(phydev, MII_MARVELL_PHY_PAGE, reg_page);
+ if (ret < 0)
+ goto err;
+ }
+
+ val = 0;
+ if (mask) {
+ val = phy_read(phydev, reg);
+ if (val < 0) {
+ ret = val;
+ goto err;
+ }
+ val &= mask;
+ }
+ val |= val_bits;
+
+ ret = phy_write(phydev, reg, val);
+ if (ret < 0)
+ goto err;
+
+ }
+err:
+ if (page_changed) {
+ i = phy_write(phydev, MII_MARVELL_PHY_PAGE, saved_page);
+ if (ret == 0)
+ ret = i;
+ }
+ return ret;
+}
+#else
+static int marvell_of_reg_init(struct phy_device *phydev)
+{
+ return 0;
+}
+#endif /* CONFIG_OF_MDIO */
+
static int m88e1121_config_aneg(struct phy_device *phydev)
{
int err, oldpage, mscr;
- oldpage = phy_read(phydev, MII_88E1121_PHY_PAGE);
+ oldpage = phy_read(phydev, MII_MARVELL_PHY_PAGE);
- err = phy_write(phydev, MII_88E1121_PHY_PAGE,
+ err = phy_write(phydev, MII_MARVELL_PHY_PAGE,
MII_88E1121_PHY_MSCR_PAGE);
if (err < 0)
return err;
@@ -218,7 +301,7 @@ static int m88e1121_config_aneg(struct phy_device *phydev)
return err;
}
- phy_write(phydev, MII_88E1121_PHY_PAGE, oldpage);
+ phy_write(phydev, MII_MARVELL_PHY_PAGE, oldpage);
err = phy_write(phydev, MII_BMCR, BMCR_RESET);
if (err < 0)
@@ -229,36 +312,36 @@ static int m88e1121_config_aneg(struct phy_device *phydev)
if (err < 0)
return err;
- oldpage = phy_read(phydev, MII_88E1121_PHY_PAGE);
+ oldpage = phy_read(phydev, MII_MARVELL_PHY_PAGE);
- phy_write(phydev, MII_88E1121_PHY_PAGE, MII_88E1121_PHY_LED_PAGE);
+ phy_write(phydev, MII_MARVELL_PHY_PAGE, MII_88E1121_PHY_LED_PAGE);
phy_write(phydev, MII_88E1121_PHY_LED_CTRL, MII_88E1121_PHY_LED_DEF);
- phy_write(phydev, MII_88E1121_PHY_PAGE, oldpage);
+ phy_write(phydev, MII_MARVELL_PHY_PAGE, oldpage);
err = genphy_config_aneg(phydev);
return err;
}
-static int m88ec048_config_aneg(struct phy_device *phydev)
+static int m88e1318_config_aneg(struct phy_device *phydev)
{
int err, oldpage, mscr;
- oldpage = phy_read(phydev, MII_88E1121_PHY_PAGE);
+ oldpage = phy_read(phydev, MII_MARVELL_PHY_PAGE);
- err = phy_write(phydev, MII_88E1121_PHY_PAGE,
+ err = phy_write(phydev, MII_MARVELL_PHY_PAGE,
MII_88E1121_PHY_MSCR_PAGE);
if (err < 0)
return err;
- mscr = phy_read(phydev, MII_88EC048_PHY_MSCR1_REG);
- mscr |= MII_88EC048_PHY_MSCR1_PAD_ODD;
+ mscr = phy_read(phydev, MII_88E1318S_PHY_MSCR1_REG);
+ mscr |= MII_88E1318S_PHY_MSCR1_PAD_ODD;
- err = phy_write(phydev, MII_88E1121_PHY_MSCR_REG, mscr);
+ err = phy_write(phydev, MII_88E1318S_PHY_MSCR1_REG, mscr);
if (err < 0)
return err;
- err = phy_write(phydev, MII_88E1121_PHY_PAGE, oldpage);
+ err = phy_write(phydev, MII_MARVELL_PHY_PAGE, oldpage);
if (err < 0)
return err;
@@ -368,6 +451,9 @@ static int m88e1111_config_init(struct phy_device *phydev)
return err;
}
+ err = marvell_of_reg_init(phydev);
+ if (err < 0)
+ return err;
err = phy_write(phydev, MII_BMCR, BMCR_RESET);
if (err < 0)
@@ -398,7 +484,7 @@ static int m88e1118_config_init(struct phy_device *phydev)
int err;
/* Change address */
- err = phy_write(phydev, 0x16, 0x0002);
+ err = phy_write(phydev, MII_MARVELL_PHY_PAGE, 0x0002);
if (err < 0)
return err;
@@ -408,7 +494,7 @@ static int m88e1118_config_init(struct phy_device *phydev)
return err;
/* Change address */
- err = phy_write(phydev, 0x16, 0x0003);
+ err = phy_write(phydev, MII_MARVELL_PHY_PAGE, 0x0003);
if (err < 0)
return err;
@@ -420,8 +506,42 @@ static int m88e1118_config_init(struct phy_device *phydev)
if (err < 0)
return err;
+ err = marvell_of_reg_init(phydev);
+ if (err < 0)
+ return err;
+
/* Reset address */
- err = phy_write(phydev, 0x16, 0x0);
+ err = phy_write(phydev, MII_MARVELL_PHY_PAGE, 0x0);
+ if (err < 0)
+ return err;
+
+ err = phy_write(phydev, MII_BMCR, BMCR_RESET);
+ if (err < 0)
+ return err;
+
+ return 0;
+}
+
+static int m88e1149_config_init(struct phy_device *phydev)
+{
+ int err;
+
+ /* Change address */
+ err = phy_write(phydev, MII_MARVELL_PHY_PAGE, 0x0002);
+ if (err < 0)
+ return err;
+
+ /* Enable 1000 Mbit */
+ err = phy_write(phydev, 0x15, 0x1048);
+ if (err < 0)
+ return err;
+
+ err = marvell_of_reg_init(phydev);
+ if (err < 0)
+ return err;
+
+ /* Reset address */
+ err = phy_write(phydev, MII_MARVELL_PHY_PAGE, 0x0);
if (err < 0)
return err;
@@ -491,6 +611,10 @@ static int m88e1145_config_init(struct phy_device *phydev)
}
}
+ err = marvell_of_reg_init(phydev);
+ if (err < 0)
+ return err;
+
return 0;
}
@@ -659,12 +783,12 @@ static struct phy_driver marvell_drivers[] = {
.driver = { .owner = THIS_MODULE },
},
{
- .phy_id = MARVELL_PHY_ID_88EC048,
+ .phy_id = MARVELL_PHY_ID_88E1318S,
.phy_id_mask = MARVELL_PHY_ID_MASK,
- .name = "Marvell 88EC048",
+ .name = "Marvell 88E1318S",
.features = PHY_GBIT_FEATURES,
.flags = PHY_HAS_INTERRUPT,
- .config_aneg = &m88ec048_config_aneg,
+ .config_aneg = &m88e1318_config_aneg,
.read_status = &marvell_read_status,
.ack_interrupt = &marvell_ack_interrupt,
.config_intr = &marvell_config_intr,
@@ -685,6 +809,19 @@ static struct phy_driver marvell_drivers[] = {
.driver = { .owner = THIS_MODULE },
},
{
+ .phy_id = MARVELL_PHY_ID_88E1149R,
+ .phy_id_mask = MARVELL_PHY_ID_MASK,
+ .name = "Marvell 88E1149R",
+ .features = PHY_GBIT_FEATURES,
+ .flags = PHY_HAS_INTERRUPT,
+ .config_init = &m88e1149_config_init,
+ .config_aneg = &m88e1118_config_aneg,
+ .read_status = &genphy_read_status,
+ .ack_interrupt = &marvell_ack_interrupt,
+ .config_intr = &marvell_config_intr,
+ .driver = { .owner = THIS_MODULE },
+ },
+ {
.phy_id = MARVELL_PHY_ID_88E1240,
.phy_id_mask = MARVELL_PHY_ID_MASK,
.name = "Marvell 88E1240",
@@ -735,6 +872,7 @@ static struct mdio_device_id __maybe_unused marvell_tbl[] = {
{ 0x01410e10, 0xfffffff0 },
{ 0x01410cb0, 0xfffffff0 },
{ 0x01410cd0, 0xfffffff0 },
+ { 0x01410e50, 0xfffffff0 },
{ 0x01410e30, 0xfffffff0 },
{ 0x01410e90, 0xfffffff0 },
{ }
diff --git a/drivers/net/ppp_generic.c b/drivers/net/ppp_generic.c
index 09cf56d0416a..39659976a1ac 100644
--- a/drivers/net/ppp_generic.c
+++ b/drivers/net/ppp_generic.c
@@ -2584,16 +2584,16 @@ ppp_create_interface(struct net *net, int unit, int *retp)
*/
dev_net_set(dev, net);
- ret = -EEXIST;
mutex_lock(&pn->all_ppp_mutex);
if (unit < 0) {
unit = unit_get(&pn->units_idr, ppp);
if (unit < 0) {
- *retp = unit;
+ ret = unit;
goto out2;
}
} else {
+ ret = -EEXIST;
if (unit_find(&pn->units_idr, unit))
goto out2; /* unit already exists */
/*
@@ -2668,10 +2668,10 @@ static void ppp_shutdown_interface(struct ppp *ppp)
ppp->closing = 1;
ppp_unlock(ppp);
unregister_netdev(ppp->dev);
+ unit_put(&pn->units_idr, ppp->file.index);
} else
ppp_unlock(ppp);
- unit_put(&pn->units_idr, ppp->file.index);
ppp->file.dead = 1;
ppp->owner = NULL;
wake_up_interruptible(&ppp->file.rwait);
@@ -2859,8 +2859,7 @@ static void __exit ppp_cleanup(void)
* by holding all_ppp_mutex
*/
-/* associate pointer with specified number */
-static int unit_set(struct idr *p, void *ptr, int n)
+static int __unit_alloc(struct idr *p, void *ptr, int n)
{
int unit, err;
@@ -2871,10 +2870,24 @@ again:
}
err = idr_get_new_above(p, ptr, n, &unit);
- if (err == -EAGAIN)
- goto again;
+ if (err < 0) {
+ if (err == -EAGAIN)
+ goto again;
+ return err;
+ }
+
+ return unit;
+}
+
+/* associate pointer with specified number */
+static int unit_set(struct idr *p, void *ptr, int n)
+{
+ int unit;
- if (unit != n) {
+ unit = __unit_alloc(p, ptr, n);
+ if (unit < 0)
+ return unit;
+ else if (unit != n) {
idr_remove(p, unit);
return -EINVAL;
}
@@ -2885,19 +2898,7 @@ again:
/* get new free unit number and associate pointer with it */
static int unit_get(struct idr *p, void *ptr)
{
- int unit, err;
-
-again:
- if (!idr_pre_get(p, GFP_KERNEL)) {
- printk(KERN_ERR "PPP: No free memory for idr\n");
- return -ENOMEM;
- }
-
- err = idr_get_new_above(p, ptr, 0, &unit);
- if (err == -EAGAIN)
- goto again;
-
- return unit;
+ return __unit_alloc(p, ptr, 0);
}
/* put unit number back to a pool */
diff --git a/drivers/net/pppoe.c b/drivers/net/pppoe.c
index d72fb0519a2a..78c0e3c9b2b5 100644
--- a/drivers/net/pppoe.c
+++ b/drivers/net/pppoe.c
@@ -948,7 +948,7 @@ static int __pppoe_xmit(struct sock *sk, struct sk_buff *skb)
abort:
kfree_skb(skb);
- return 0;
+ return 1;
}
/************************************************************************
diff --git a/drivers/net/qlcnic/qlcnic_main.c b/drivers/net/qlcnic/qlcnic_main.c
index 7a298cdf9ab3..a3dcd04be22f 100644
--- a/drivers/net/qlcnic/qlcnic_main.c
+++ b/drivers/net/qlcnic/qlcnic_main.c
@@ -1450,7 +1450,6 @@ qlcnic_setup_netdev(struct qlcnic_adapter *adapter,
netdev->irq = adapter->msix_entries[0].vector;
netif_carrier_off(netdev);
- netif_stop_queue(netdev);
err = register_netdev(netdev);
if (err) {
diff --git a/drivers/net/qlge/qlge.h b/drivers/net/qlge/qlge.h
index 22821398fc63..9787dff90d3f 100644
--- a/drivers/net/qlge/qlge.h
+++ b/drivers/net/qlge/qlge.h
@@ -2083,6 +2083,7 @@ struct ql_adapter {
u32 mailbox_in;
u32 mailbox_out;
struct mbox_params idc_mbc;
+ struct mutex mpi_mutex;
int tx_ring_size;
int rx_ring_size;
diff --git a/drivers/net/qlge/qlge_main.c b/drivers/net/qlge/qlge_main.c
index c30e0fe55a31..2555b1d34f34 100644
--- a/drivers/net/qlge/qlge_main.c
+++ b/drivers/net/qlge/qlge_main.c
@@ -62,15 +62,15 @@ static const u32 default_msg =
/* NETIF_MSG_PKTDATA | */
NETIF_MSG_HW | NETIF_MSG_WOL | 0;
-static int debug = 0x00007fff; /* defaults above */
-module_param(debug, int, 0);
+static int debug = -1; /* defaults above */
+module_param(debug, int, 0664);
MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
#define MSIX_IRQ 0
#define MSI_IRQ 1
#define LEG_IRQ 2
static int qlge_irq_type = MSIX_IRQ;
-module_param(qlge_irq_type, int, MSIX_IRQ);
+module_param(qlge_irq_type, int, 0664);
MODULE_PARM_DESC(qlge_irq_type, "0 = MSI-X, 1 = MSI, 2 = Legacy.");
static int qlge_mpi_coredump;
@@ -4629,6 +4629,7 @@ static int __devinit ql_init_device(struct pci_dev *pdev,
INIT_DELAYED_WORK(&qdev->mpi_idc_work, ql_mpi_idc_work);
INIT_DELAYED_WORK(&qdev->mpi_core_to_log, ql_mpi_core_to_log);
init_completion(&qdev->ide_completion);
+ mutex_init(&qdev->mpi_mutex);
if (!cards_found) {
dev_info(&pdev->dev, "%s\n", DRV_STRING);
diff --git a/drivers/net/qlge/qlge_mpi.c b/drivers/net/qlge/qlge_mpi.c
index 0e7c7c7ee164..a2e919bcb3c6 100644
--- a/drivers/net/qlge/qlge_mpi.c
+++ b/drivers/net/qlge/qlge_mpi.c
@@ -534,6 +534,7 @@ static int ql_mailbox_command(struct ql_adapter *qdev, struct mbox_params *mbcp)
int status;
unsigned long count;
+ mutex_lock(&qdev->mpi_mutex);
/* Begin polled mode for MPI */
ql_write32(qdev, INTR_MASK, (INTR_MASK_PI << 16));
@@ -603,6 +604,7 @@ done:
end:
/* End polled mode for MPI */
ql_write32(qdev, INTR_MASK, (INTR_MASK_PI << 16) | INTR_MASK_PI);
+ mutex_unlock(&qdev->mpi_mutex);
return status;
}
@@ -1099,9 +1101,7 @@ int ql_wait_fifo_empty(struct ql_adapter *qdev)
static int ql_set_port_cfg(struct ql_adapter *qdev)
{
int status;
- rtnl_lock();
status = ql_mb_set_port_cfg(qdev);
- rtnl_unlock();
if (status)
return status;
status = ql_idc_wait(qdev);
@@ -1122,9 +1122,7 @@ void ql_mpi_port_cfg_work(struct work_struct *work)
container_of(work, struct ql_adapter, mpi_port_cfg_work.work);
int status;
- rtnl_lock();
status = ql_mb_get_port_cfg(qdev);
- rtnl_unlock();
if (status) {
netif_err(qdev, drv, qdev->ndev,
"Bug: Failed to get port config data.\n");
@@ -1167,7 +1165,6 @@ void ql_mpi_idc_work(struct work_struct *work)
u32 aen;
int timeout;
- rtnl_lock();
aen = mbcp->mbox_out[1] >> 16;
timeout = (mbcp->mbox_out[1] >> 8) & 0xf;
@@ -1231,7 +1228,6 @@ void ql_mpi_idc_work(struct work_struct *work)
}
break;
}
- rtnl_unlock();
}
void ql_mpi_work(struct work_struct *work)
@@ -1242,7 +1238,7 @@ void ql_mpi_work(struct work_struct *work)
struct mbox_params *mbcp = &mbc;
int err = 0;
- rtnl_lock();
+ mutex_lock(&qdev->mpi_mutex);
/* Begin polled mode for MPI */
ql_write32(qdev, INTR_MASK, (INTR_MASK_PI << 16));
@@ -1259,7 +1255,7 @@ void ql_mpi_work(struct work_struct *work)
/* End polled mode for MPI */
ql_write32(qdev, INTR_MASK, (INTR_MASK_PI << 16) | INTR_MASK_PI);
- rtnl_unlock();
+ mutex_unlock(&qdev->mpi_mutex);
ql_enable_completion_interrupt(qdev, 0);
}
diff --git a/drivers/net/r8169.c b/drivers/net/r8169.c
index d88ce9fb1cbd..53b13deade95 100644
--- a/drivers/net/r8169.c
+++ b/drivers/net/r8169.c
@@ -744,26 +744,36 @@ static void rtl8169_xmii_reset_enable(void __iomem *ioaddr)
mdio_write(ioaddr, MII_BMCR, val & 0xffff);
}
-static void rtl8169_check_link_status(struct net_device *dev,
+static void __rtl8169_check_link_status(struct net_device *dev,
struct rtl8169_private *tp,
- void __iomem *ioaddr)
+ void __iomem *ioaddr,
+ bool pm)
{
unsigned long flags;
spin_lock_irqsave(&tp->lock, flags);
if (tp->link_ok(ioaddr)) {
/* This is to cancel a scheduled suspend if there's one. */
- pm_request_resume(&tp->pci_dev->dev);
+ if (pm)
+ pm_request_resume(&tp->pci_dev->dev);
netif_carrier_on(dev);
netif_info(tp, ifup, dev, "link up\n");
} else {
netif_carrier_off(dev);
netif_info(tp, ifdown, dev, "link down\n");
- pm_schedule_suspend(&tp->pci_dev->dev, 100);
+ if (pm)
+ pm_schedule_suspend(&tp->pci_dev->dev, 100);
}
spin_unlock_irqrestore(&tp->lock, flags);
}
+static void rtl8169_check_link_status(struct net_device *dev,
+ struct rtl8169_private *tp,
+ void __iomem *ioaddr)
+{
+ __rtl8169_check_link_status(dev, tp, ioaddr, false);
+}
+
#define WAKE_ANY (WAKE_PHY | WAKE_MAGIC | WAKE_UCAST | WAKE_BCAST | WAKE_MCAST)
static u32 __rtl8169_get_wol(struct rtl8169_private *tp)
@@ -846,10 +856,10 @@ static int rtl8169_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
else
tp->features &= ~RTL_FEATURE_WOL;
__rtl8169_set_wol(tp, wol->wolopts);
- device_set_wakeup_enable(&tp->pci_dev->dev, wol->wolopts);
-
spin_unlock_irq(&tp->lock);
+ device_set_wakeup_enable(&tp->pci_dev->dev, wol->wolopts);
+
return 0;
}
@@ -2931,7 +2941,7 @@ static const struct rtl_cfg_info {
.hw_start = rtl_hw_start_8168,
.region = 2,
.align = 8,
- .intr_event = SYSErr | RxFIFOOver | LinkChg | RxOverflow |
+ .intr_event = SYSErr | LinkChg | RxOverflow |
TxErr | TxOK | RxOK | RxErr,
.napi_event = TxErr | TxOK | RxOK | RxOverflow,
.features = RTL_FEATURE_GMII | RTL_FEATURE_MSI,
@@ -4440,8 +4450,7 @@ static inline void rtl8169_rx_csum(struct sk_buff *skb, u32 opts1)
u32 status = opts1 & RxProtoMask;
if (((status == RxProtoTCP) && !(opts1 & TCPFail)) ||
- ((status == RxProtoUDP) && !(opts1 & UDPFail)) ||
- ((status == RxProtoIP) && !(opts1 & IPFail)))
+ ((status == RxProtoUDP) && !(opts1 & UDPFail)))
skb->ip_summed = CHECKSUM_UNNECESSARY;
else
skb_checksum_none_assert(skb);
@@ -4588,7 +4597,8 @@ static irqreturn_t rtl8169_interrupt(int irq, void *dev_instance)
}
/* Work around for rx fifo overflow */
- if (unlikely(status & RxFIFOOver)) {
+ if (unlikely(status & RxFIFOOver) &&
+ (tp->mac_version == RTL_GIGA_MAC_VER_11)) {
netif_stop_queue(dev);
rtl8169_tx_timeout(dev);
break;
@@ -4600,7 +4610,7 @@ static irqreturn_t rtl8169_interrupt(int irq, void *dev_instance)
}
if (status & LinkChg)
- rtl8169_check_link_status(dev, tp, ioaddr);
+ __rtl8169_check_link_status(dev, tp, ioaddr, true);
/* We need to see the lastest version of tp->intr_mask to
* avoid ignoring an MSI interrupt and having to wait for
@@ -4890,11 +4900,7 @@ static int rtl8169_runtime_idle(struct device *device)
struct net_device *dev = pci_get_drvdata(pdev);
struct rtl8169_private *tp = netdev_priv(dev);
- if (!tp->TxDescArray)
- return 0;
-
- rtl8169_check_link_status(dev, tp, tp->mmio_addr);
- return -EBUSY;
+ return tp->TxDescArray ? -EBUSY : 0;
}
static const struct dev_pm_ops rtl8169_pm_ops = {
diff --git a/drivers/net/sfc/efx.c b/drivers/net/sfc/efx.c
index 05df20e47976..fb83cdd94643 100644
--- a/drivers/net/sfc/efx.c
+++ b/drivers/net/sfc/efx.c
@@ -197,7 +197,9 @@ MODULE_PARM_DESC(debug, "Bitmapped debugging message enable value");
static void efx_remove_channels(struct efx_nic *efx);
static void efx_remove_port(struct efx_nic *efx);
+static void efx_init_napi(struct efx_nic *efx);
static void efx_fini_napi(struct efx_nic *efx);
+static void efx_fini_napi_channel(struct efx_channel *channel);
static void efx_fini_struct(struct efx_nic *efx);
static void efx_start_all(struct efx_nic *efx);
static void efx_stop_all(struct efx_nic *efx);
@@ -335,8 +337,10 @@ void efx_process_channel_now(struct efx_channel *channel)
/* Disable interrupts and wait for ISRs to complete */
efx_nic_disable_interrupts(efx);
- if (efx->legacy_irq)
+ if (efx->legacy_irq) {
synchronize_irq(efx->legacy_irq);
+ efx->legacy_irq_enabled = false;
+ }
if (channel->irq)
synchronize_irq(channel->irq);
@@ -351,6 +355,8 @@ void efx_process_channel_now(struct efx_channel *channel)
efx_channel_processed(channel);
napi_enable(&channel->napi_str);
+ if (efx->legacy_irq)
+ efx->legacy_irq_enabled = true;
efx_nic_enable_interrupts(efx);
}
@@ -426,6 +432,7 @@ efx_alloc_channel(struct efx_nic *efx, int i, struct efx_channel *old_channel)
*channel = *old_channel;
+ channel->napi_dev = NULL;
memset(&channel->eventq, 0, sizeof(channel->eventq));
rx_queue = &channel->rx_queue;
@@ -736,9 +743,13 @@ efx_realloc_channels(struct efx_nic *efx, u32 rxq_entries, u32 txq_entries)
if (rc)
goto rollback;
+ efx_init_napi(efx);
+
/* Destroy old channels */
- for (i = 0; i < efx->n_channels; i++)
+ for (i = 0; i < efx->n_channels; i++) {
+ efx_fini_napi_channel(other_channel[i]);
efx_remove_channel(other_channel[i]);
+ }
out:
/* Free unused channel structures */
for (i = 0; i < efx->n_channels; i++)
@@ -1400,6 +1411,8 @@ static void efx_start_all(struct efx_nic *efx)
efx_start_channel(channel);
}
+ if (efx->legacy_irq)
+ efx->legacy_irq_enabled = true;
efx_nic_enable_interrupts(efx);
/* Switch to event based MCDI completions after enabling interrupts.
@@ -1460,8 +1473,10 @@ static void efx_stop_all(struct efx_nic *efx)
/* Disable interrupts and wait for ISR to complete */
efx_nic_disable_interrupts(efx);
- if (efx->legacy_irq)
+ if (efx->legacy_irq) {
synchronize_irq(efx->legacy_irq);
+ efx->legacy_irq_enabled = false;
+ }
efx_for_each_channel(channel, efx) {
if (channel->irq)
synchronize_irq(channel->irq);
@@ -1593,7 +1608,7 @@ static int efx_ioctl(struct net_device *net_dev, struct ifreq *ifr, int cmd)
*
**************************************************************************/
-static int efx_init_napi(struct efx_nic *efx)
+static void efx_init_napi(struct efx_nic *efx)
{
struct efx_channel *channel;
@@ -1602,18 +1617,21 @@ static int efx_init_napi(struct efx_nic *efx)
netif_napi_add(channel->napi_dev, &channel->napi_str,
efx_poll, napi_weight);
}
- return 0;
+}
+
+static void efx_fini_napi_channel(struct efx_channel *channel)
+{
+ if (channel->napi_dev)
+ netif_napi_del(&channel->napi_str);
+ channel->napi_dev = NULL;
}
static void efx_fini_napi(struct efx_nic *efx)
{
struct efx_channel *channel;
- efx_for_each_channel(channel, efx) {
- if (channel->napi_dev)
- netif_napi_del(&channel->napi_str);
- channel->napi_dev = NULL;
- }
+ efx_for_each_channel(channel, efx)
+ efx_fini_napi_channel(channel);
}
/**************************************************************************
@@ -2335,9 +2353,7 @@ static int efx_pci_probe_main(struct efx_nic *efx)
if (rc)
goto fail1;
- rc = efx_init_napi(efx);
- if (rc)
- goto fail2;
+ efx_init_napi(efx);
rc = efx->type->init(efx);
if (rc) {
@@ -2368,7 +2384,6 @@ static int efx_pci_probe_main(struct efx_nic *efx)
efx->type->fini(efx);
fail3:
efx_fini_napi(efx);
- fail2:
efx_remove_all(efx);
fail1:
return rc;
diff --git a/drivers/net/sfc/net_driver.h b/drivers/net/sfc/net_driver.h
index 0a7e26d73b52..b137c889152b 100644
--- a/drivers/net/sfc/net_driver.h
+++ b/drivers/net/sfc/net_driver.h
@@ -621,6 +621,7 @@ struct efx_filter_state;
* @pci_dev: The PCI device
* @type: Controller type attributes
* @legacy_irq: IRQ number
+ * @legacy_irq_enabled: Are IRQs enabled on NIC (INT_EN_KER register)?
* @workqueue: Workqueue for port reconfigures and the HW monitor.
* Work items do not hold and must not acquire RTNL.
* @workqueue_name: Name of workqueue
@@ -709,6 +710,7 @@ struct efx_nic {
struct pci_dev *pci_dev;
const struct efx_nic_type *type;
int legacy_irq;
+ bool legacy_irq_enabled;
struct workqueue_struct *workqueue;
char workqueue_name[16];
struct work_struct reset_work;
diff --git a/drivers/net/sfc/nic.c b/drivers/net/sfc/nic.c
index 41c36b9a4244..67cb0c96838c 100644
--- a/drivers/net/sfc/nic.c
+++ b/drivers/net/sfc/nic.c
@@ -1418,6 +1418,12 @@ static irqreturn_t efx_legacy_interrupt(int irq, void *dev_id)
u32 queues;
int syserr;
+ /* Could this be ours? If interrupts are disabled then the
+ * channel state may not be valid.
+ */
+ if (!efx->legacy_irq_enabled)
+ return result;
+
/* Read the ISR which also ACKs the interrupts */
efx_readd(efx, &reg, FR_BZ_INT_ISR0);
queues = EFX_EXTRACT_DWORD(reg, 0, 31);
diff --git a/drivers/net/skge.c b/drivers/net/skge.c
index bfec2e0f5275..220e0398f1d5 100644
--- a/drivers/net/skge.c
+++ b/drivers/net/skge.c
@@ -3858,7 +3858,6 @@ static struct net_device *skge_devinit(struct skge_hw *hw, int port,
/* device is off until link detection */
netif_carrier_off(dev);
- netif_stop_queue(dev);
return dev;
}
diff --git a/drivers/net/smsc911x.h b/drivers/net/smsc911x.h
index 52f38e12a879..50f712e99e96 100644
--- a/drivers/net/smsc911x.h
+++ b/drivers/net/smsc911x.h
@@ -22,7 +22,7 @@
#define __SMSC911X_H__
#define TX_FIFO_LOW_THRESHOLD ((u32)1600)
-#define SMSC911X_EEPROM_SIZE ((u32)7)
+#define SMSC911X_EEPROM_SIZE ((u32)128)
#define USE_DEBUG 0
/* This is the maximum number of packets to be received every
diff --git a/drivers/net/stmmac/stmmac_main.c b/drivers/net/stmmac/stmmac_main.c
index 06bc6034ce81..2114837809e7 100644
--- a/drivers/net/stmmac/stmmac_main.c
+++ b/drivers/net/stmmac/stmmac_main.c
@@ -1509,6 +1509,8 @@ static int stmmac_probe(struct net_device *dev)
pr_warning("\tno valid MAC address;"
"please, use ifconfig or nwhwconfig!\n");
+ spin_lock_init(&priv->lock);
+
ret = register_netdev(dev);
if (ret) {
pr_err("%s: ERROR %i registering the device\n",
@@ -1520,8 +1522,6 @@ static int stmmac_probe(struct net_device *dev)
dev->name, (dev->features & NETIF_F_SG) ? "on" : "off",
(dev->features & NETIF_F_HW_CSUM) ? "on" : "off");
- spin_lock_init(&priv->lock);
-
return ret;
}
diff --git a/drivers/net/tile/Makefile b/drivers/net/tile/Makefile
new file mode 100644
index 000000000000..f634f142cab4
--- /dev/null
+++ b/drivers/net/tile/Makefile
@@ -0,0 +1,10 @@
+#
+# Makefile for the TILE on-chip networking support.
+#
+
+obj-$(CONFIG_TILE_NET) += tile_net.o
+ifdef CONFIG_TILEGX
+tile_net-objs := tilegx.o mpipe.o iorpc_mpipe.o dma_queue.o
+else
+tile_net-objs := tilepro.o
+endif
diff --git a/drivers/net/tile/tilepro.c b/drivers/net/tile/tilepro.c
new file mode 100644
index 000000000000..0e6bac5ec65b
--- /dev/null
+++ b/drivers/net/tile/tilepro.c
@@ -0,0 +1,2406 @@
+/*
+ * Copyright 2010 Tilera Corporation. All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation, version 2.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
+ * NON INFRINGEMENT. See the GNU General Public License for
+ * more details.
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/moduleparam.h>
+#include <linux/sched.h>
+#include <linux/kernel.h> /* printk() */
+#include <linux/slab.h> /* kmalloc() */
+#include <linux/errno.h> /* error codes */
+#include <linux/types.h> /* size_t */
+#include <linux/interrupt.h>
+#include <linux/in.h>
+#include <linux/netdevice.h> /* struct device, and other headers */
+#include <linux/etherdevice.h> /* eth_type_trans */
+#include <linux/skbuff.h>
+#include <linux/ioctl.h>
+#include <linux/cdev.h>
+#include <linux/hugetlb.h>
+#include <linux/in6.h>
+#include <linux/timer.h>
+#include <linux/io.h>
+#include <asm/checksum.h>
+#include <asm/homecache.h>
+
+#include <hv/drv_xgbe_intf.h>
+#include <hv/drv_xgbe_impl.h>
+#include <hv/hypervisor.h>
+#include <hv/netio_intf.h>
+
+/* For TSO */
+#include <linux/ip.h>
+#include <linux/tcp.h>
+
+
+/* There is no singlethread_cpu, so schedule work on the current cpu. */
+#define singlethread_cpu -1
+
+
+/*
+ * First, "tile_net_init_module()" initializes all four "devices" which
+ * can be used by linux.
+ *
+ * Then, "ifconfig DEVICE up" calls "tile_net_open()", which analyzes
+ * the network cpus, then uses "tile_net_open_aux()" to initialize
+ * LIPP/LEPP, and then uses "tile_net_open_inner()" to register all
+ * the tiles, provide buffers to LIPP, allow ingress to start, and
+ * turn on hypervisor interrupt handling (and NAPI) on all tiles.
+ *
+ * If registration fails due to the link being down, then "retry_work"
+ * is used to keep calling "tile_net_open_inner()" until it succeeds.
+ *
+ * If "ifconfig DEVICE down" is called, it uses "tile_net_stop()" to
+ * stop egress, drain the LIPP buffers, unregister all the tiles, stop
+ * LIPP/LEPP, and wipe the LEPP queue.
+ *
+ * We start out with the ingress interrupt enabled on each CPU. When
+ * this interrupt fires, we disable it, and call "napi_schedule()".
+ * This will cause "tile_net_poll()" to be called, which will pull
+ * packets from the netio queue, filtering them out, or passing them
+ * to "netif_receive_skb()". If our budget is exhausted, we will
+ * return, knowing we will be called again later. Otherwise, we
+ * reenable the ingress interrupt, and call "napi_complete()".
+ *
+ *
+ * NOTE: The use of "native_driver" ensures that EPP exists, and that
+ * "epp_sendv" is legal, and that "LIPP" is being used.
+ *
+ * NOTE: Failing to free completions for an arbitrarily long time
+ * (which is defined to be illegal) does in fact cause bizarre
+ * problems. The "egress_timer" helps prevent this from happening.
+ *
+ * NOTE: The egress code can be interrupted by the interrupt handler.
+ */
+
+
+/* HACK: Allow use of "jumbo" packets. */
+/* This should be 1500 if "jumbo" is not set in LIPP. */
+/* This should be at most 10226 (10240 - 14) if "jumbo" is set in LIPP. */
+/* ISSUE: This has not been thoroughly tested (except at 1500). */
+#define TILE_NET_MTU 1500
+
+/* HACK: Define to support GSO. */
+/* ISSUE: This may actually hurt performance of the TCP blaster. */
+/* #define TILE_NET_GSO */
+
+/* Define this to collapse "duplicate" acks. */
+/* #define IGNORE_DUP_ACKS */
+
+/* HACK: Define this to verify incoming packets. */
+/* #define TILE_NET_VERIFY_INGRESS */
+
+/* Use 3000 to enable the Linux Traffic Control (QoS) layer, else 0. */
+#define TILE_NET_TX_QUEUE_LEN 0
+
+/* Define to dump packets (prints out the whole packet on tx and rx). */
+/* #define TILE_NET_DUMP_PACKETS */
+
+/* Define to enable debug spew (all PDEBUG's are enabled). */
+/* #define TILE_NET_DEBUG */
+
+
+/* Define to activate paranoia checks. */
+/* #define TILE_NET_PARANOIA */
+
+/* Default transmit lockup timeout period, in jiffies. */
+#define TILE_NET_TIMEOUT (5 * HZ)
+
+/* Default retry interval for bringing up the NetIO interface, in jiffies. */
+#define TILE_NET_RETRY_INTERVAL (5 * HZ)
+
+/* Number of ports (xgbe0, xgbe1, gbe0, gbe1). */
+#define TILE_NET_DEVS 4
+
+
+
+/* Paranoia. */
+#if NET_IP_ALIGN != LIPP_PACKET_PADDING
+#error "NET_IP_ALIGN must match LIPP_PACKET_PADDING."
+#endif
+
+
+/* Debug print. */
+#ifdef TILE_NET_DEBUG
+#define PDEBUG(fmt, args...) net_printk(fmt, ## args)
+#else
+#define PDEBUG(fmt, args...)
+#endif
+
+
+MODULE_AUTHOR("Tilera");
+MODULE_LICENSE("GPL");
+
+
+#define IS_MULTICAST(mac_addr) \
+ (((u8 *)(mac_addr))[0] & 0x01)
+
+#define IS_BROADCAST(mac_addr) \
+ (((u16 *)(mac_addr))[0] == 0xffff)
+
+
+/*
+ * Queue of incoming packets for a specific cpu and device.
+ *
+ * Includes a pointer to the "system" data, and the actual "user" data.
+ */
+struct tile_netio_queue {
+ netio_queue_impl_t *__system_part;
+ netio_queue_user_impl_t __user_part;
+
+};
+
+
+/*
+ * Statistics counters for a specific cpu and device.
+ */
+struct tile_net_stats_t {
+ u32 rx_packets;
+ u32 rx_bytes;
+ u32 tx_packets;
+ u32 tx_bytes;
+};
+
+
+/*
+ * Info for a specific cpu and device.
+ *
+ * ISSUE: There is a "dev" pointer in "napi" as well.
+ */
+struct tile_net_cpu {
+ /* The NAPI struct. */
+ struct napi_struct napi;
+ /* Packet queue. */
+ struct tile_netio_queue queue;
+ /* Statistics. */
+ struct tile_net_stats_t stats;
+ /* ISSUE: Is this needed? */
+ bool napi_enabled;
+ /* True if this tile has succcessfully registered with the IPP. */
+ bool registered;
+ /* True if the link was down last time we tried to register. */
+ bool link_down;
+ /* True if "egress_timer" is scheduled. */
+ bool egress_timer_scheduled;
+ /* Number of small sk_buffs which must still be provided. */
+ unsigned int num_needed_small_buffers;
+ /* Number of large sk_buffs which must still be provided. */
+ unsigned int num_needed_large_buffers;
+ /* A timer for handling egress completions. */
+ struct timer_list egress_timer;
+};
+
+
+/*
+ * Info for a specific device.
+ */
+struct tile_net_priv {
+ /* Our network device. */
+ struct net_device *dev;
+ /* The actual egress queue. */
+ lepp_queue_t *epp_queue;
+ /* Protects "epp_queue->cmd_tail" and "epp_queue->comp_tail" */
+ spinlock_t cmd_lock;
+ /* Protects "epp_queue->comp_head". */
+ spinlock_t comp_lock;
+ /* The hypervisor handle for this interface. */
+ int hv_devhdl;
+ /* The intr bit mask that IDs this device. */
+ u32 intr_id;
+ /* True iff "tile_net_open_aux()" has succeeded. */
+ int partly_opened;
+ /* True iff "tile_net_open_inner()" has succeeded. */
+ int fully_opened;
+ /* Effective network cpus. */
+ struct cpumask network_cpus_map;
+ /* Number of network cpus. */
+ int network_cpus_count;
+ /* Credits per network cpu. */
+ int network_cpus_credits;
+ /* Network stats. */
+ struct net_device_stats stats;
+ /* For NetIO bringup retries. */
+ struct delayed_work retry_work;
+ /* Quick access to per cpu data. */
+ struct tile_net_cpu *cpu[NR_CPUS];
+};
+
+
+/*
+ * The actual devices (xgbe0, xgbe1, gbe0, gbe1).
+ */
+static struct net_device *tile_net_devs[TILE_NET_DEVS];
+
+/*
+ * The "tile_net_cpu" structures for each device.
+ */
+static DEFINE_PER_CPU(struct tile_net_cpu, hv_xgbe0);
+static DEFINE_PER_CPU(struct tile_net_cpu, hv_xgbe1);
+static DEFINE_PER_CPU(struct tile_net_cpu, hv_gbe0);
+static DEFINE_PER_CPU(struct tile_net_cpu, hv_gbe1);
+
+
+/*
+ * True if "network_cpus" was specified.
+ */
+static bool network_cpus_used;
+
+/*
+ * The actual cpus in "network_cpus".
+ */
+static struct cpumask network_cpus_map;
+
+
+
+#ifdef TILE_NET_DEBUG
+/*
+ * printk with extra stuff.
+ *
+ * We print the CPU we're running in brackets.
+ */
+static void net_printk(char *fmt, ...)
+{
+ int i;
+ int len;
+ va_list args;
+ static char buf[256];
+
+ len = sprintf(buf, "tile_net[%2.2d]: ", smp_processor_id());
+ va_start(args, fmt);
+ i = vscnprintf(buf + len, sizeof(buf) - len - 1, fmt, args);
+ va_end(args);
+ buf[255] = '\0';
+ pr_notice(buf);
+}
+#endif
+
+
+#ifdef TILE_NET_DUMP_PACKETS
+/*
+ * Dump a packet.
+ */
+static void dump_packet(unsigned char *data, unsigned long length, char *s)
+{
+ unsigned long i;
+ static unsigned int count;
+
+ pr_info("dump_packet(data %p, length 0x%lx s %s count 0x%x)\n",
+ data, length, s, count++);
+
+ pr_info("\n");
+
+ for (i = 0; i < length; i++) {
+ if ((i & 0xf) == 0)
+ sprintf(buf, "%8.8lx:", i);
+ sprintf(buf + strlen(buf), " %2.2x", data[i]);
+ if ((i & 0xf) == 0xf || i == length - 1)
+ pr_info("%s\n", buf);
+ }
+}
+#endif
+
+
+/*
+ * Provide support for the __netio_fastio1() swint
+ * (see <hv/drv_xgbe_intf.h> for how it is used).
+ *
+ * The fastio swint2 call may clobber all the caller-saved registers.
+ * It rarely clobbers memory, but we allow for the possibility in
+ * the signature just to be on the safe side.
+ *
+ * Also, gcc doesn't seem to allow an input operand to be
+ * clobbered, so we fake it with dummy outputs.
+ *
+ * This function can't be static because of the way it is declared
+ * in the netio header.
+ */
+inline int __netio_fastio1(u32 fastio_index, u32 arg0)
+{
+ long result, clobber_r1, clobber_r10;
+ asm volatile("swint2"
+ : "=R00" (result),
+ "=R01" (clobber_r1), "=R10" (clobber_r10)
+ : "R10" (fastio_index), "R01" (arg0)
+ : "memory", "r2", "r3", "r4",
+ "r5", "r6", "r7", "r8", "r9",
+ "r11", "r12", "r13", "r14",
+ "r15", "r16", "r17", "r18", "r19",
+ "r20", "r21", "r22", "r23", "r24",
+ "r25", "r26", "r27", "r28", "r29");
+ return result;
+}
+
+
+/*
+ * Provide a linux buffer to LIPP.
+ */
+static void tile_net_provide_linux_buffer(struct tile_net_cpu *info,
+ void *va, bool small)
+{
+ struct tile_netio_queue *queue = &info->queue;
+
+ /* Convert "va" and "small" to "linux_buffer_t". */
+ unsigned int buffer = ((unsigned int)(__pa(va) >> 7) << 1) + small;
+
+ __netio_fastio_free_buffer(queue->__user_part.__fastio_index, buffer);
+}
+
+
+/*
+ * Provide a linux buffer for LIPP.
+ */
+static bool tile_net_provide_needed_buffer(struct tile_net_cpu *info,
+ bool small)
+{
+ /* ISSUE: What should we use here? */
+ unsigned int large_size = NET_IP_ALIGN + TILE_NET_MTU + 100;
+
+ /* Round up to ensure to avoid "false sharing" with last cache line. */
+ unsigned int buffer_size =
+ (((small ? LIPP_SMALL_PACKET_SIZE : large_size) +
+ CHIP_L2_LINE_SIZE() - 1) & -CHIP_L2_LINE_SIZE());
+
+ /*
+ * ISSUE: Since CPAs are 38 bits, and we can only encode the
+ * high 31 bits in a "linux_buffer_t", the low 7 bits must be
+ * zero, and thus, we must align the actual "va" mod 128.
+ */
+ const unsigned long align = 128;
+
+ struct sk_buff *skb;
+ void *va;
+
+ struct sk_buff **skb_ptr;
+
+ /* Note that "dev_alloc_skb()" adds NET_SKB_PAD more bytes, */
+ /* and also "reserves" that many bytes. */
+ /* ISSUE: Can we "share" the NET_SKB_PAD bytes with "skb_ptr"? */
+ int len = sizeof(*skb_ptr) + align + buffer_size;
+
+ while (1) {
+
+ /* Allocate (or fail). */
+ skb = dev_alloc_skb(len);
+ if (skb == NULL)
+ return false;
+
+ /* Make room for a back-pointer to 'skb'. */
+ skb_reserve(skb, sizeof(*skb_ptr));
+
+ /* Make sure we are aligned. */
+ skb_reserve(skb, -(long)skb->data & (align - 1));
+
+ /* This address is given to IPP. */
+ va = skb->data;
+
+ if (small)
+ break;
+
+ /* ISSUE: This has never been observed! */
+ /* Large buffers must not span a huge page. */
+ if (((((long)va & ~HPAGE_MASK) + 1535) & HPAGE_MASK) == 0)
+ break;
+ pr_err("Leaking unaligned linux buffer at %p.\n", va);
+ }
+
+ /* Skip two bytes to satisfy LIPP assumptions. */
+ /* Note that this aligns IP on a 16 byte boundary. */
+ /* ISSUE: Do this when the packet arrives? */
+ skb_reserve(skb, NET_IP_ALIGN);
+
+ /* Save a back-pointer to 'skb'. */
+ skb_ptr = va - sizeof(*skb_ptr);
+ *skb_ptr = skb;
+
+ /* Invalidate the packet buffer. */
+ if (!hash_default)
+ __inv_buffer(skb->data, buffer_size);
+
+ /* Make sure "skb_ptr" has been flushed. */
+ __insn_mf();
+
+#ifdef TILE_NET_PARANOIA
+#if CHIP_HAS_CBOX_HOME_MAP()
+ if (hash_default) {
+ HV_PTE pte = *virt_to_pte(current->mm, (unsigned long)va);
+ if (hv_pte_get_mode(pte) != HV_PTE_MODE_CACHE_HASH_L3)
+ panic("Non-coherent ingress buffer!");
+ }
+#endif
+#endif
+
+ /* Provide the new buffer. */
+ tile_net_provide_linux_buffer(info, va, small);
+
+ return true;
+}
+
+
+/*
+ * Provide linux buffers for LIPP.
+ */
+static void tile_net_provide_needed_buffers(struct tile_net_cpu *info)
+{
+ while (info->num_needed_small_buffers != 0) {
+ if (!tile_net_provide_needed_buffer(info, true))
+ goto oops;
+ info->num_needed_small_buffers--;
+ }
+
+ while (info->num_needed_large_buffers != 0) {
+ if (!tile_net_provide_needed_buffer(info, false))
+ goto oops;
+ info->num_needed_large_buffers--;
+ }
+
+ return;
+
+oops:
+
+ /* Add a description to the page allocation failure dump. */
+ pr_notice("Could not provide a linux buffer to LIPP.\n");
+}
+
+
+/*
+ * Grab some LEPP completions, and store them in "comps", of size
+ * "comps_size", and return the number of completions which were
+ * stored, so the caller can free them.
+ *
+ * If "pending" is not NULL, it will be set to true if there might
+ * still be some pending completions caused by this tile, else false.
+ */
+static unsigned int tile_net_lepp_grab_comps(struct net_device *dev,
+ struct sk_buff *comps[],
+ unsigned int comps_size,
+ bool *pending)
+{
+ struct tile_net_priv *priv = netdev_priv(dev);
+
+ lepp_queue_t *eq = priv->epp_queue;
+
+ unsigned int n = 0;
+
+ unsigned int comp_head;
+ unsigned int comp_busy;
+ unsigned int comp_tail;
+
+ spin_lock(&priv->comp_lock);
+
+ comp_head = eq->comp_head;
+ comp_busy = eq->comp_busy;
+ comp_tail = eq->comp_tail;
+
+ while (comp_head != comp_busy && n < comps_size) {
+ comps[n++] = eq->comps[comp_head];
+ LEPP_QINC(comp_head);
+ }
+
+ if (pending != NULL)
+ *pending = (comp_head != comp_tail);
+
+ eq->comp_head = comp_head;
+
+ spin_unlock(&priv->comp_lock);
+
+ return n;
+}
+
+
+/*
+ * Make sure the egress timer is scheduled.
+ *
+ * Note that we use "schedule if not scheduled" logic instead of the more
+ * obvious "reschedule" logic, because "reschedule" is fairly expensive.
+ */
+static void tile_net_schedule_egress_timer(struct tile_net_cpu *info)
+{
+ if (!info->egress_timer_scheduled) {
+ mod_timer_pinned(&info->egress_timer, jiffies + 1);
+ info->egress_timer_scheduled = true;
+ }
+}
+
+
+/*
+ * The "function" for "info->egress_timer".
+ *
+ * This timer will reschedule itself as long as there are any pending
+ * completions expected (on behalf of any tile).
+ *
+ * ISSUE: Realistically, will the timer ever stop scheduling itself?
+ *
+ * ISSUE: This timer is almost never actually needed, so just use a global
+ * timer that can run on any tile.
+ *
+ * ISSUE: Maybe instead track number of expected completions, and free
+ * only that many, resetting to zero if "pending" is ever false.
+ */
+static void tile_net_handle_egress_timer(unsigned long arg)
+{
+ struct tile_net_cpu *info = (struct tile_net_cpu *)arg;
+ struct net_device *dev = info->napi.dev;
+
+ struct sk_buff *olds[32];
+ unsigned int wanted = 32;
+ unsigned int i, nolds = 0;
+ bool pending;
+
+ /* The timer is no longer scheduled. */
+ info->egress_timer_scheduled = false;
+
+ nolds = tile_net_lepp_grab_comps(dev, olds, wanted, &pending);
+
+ for (i = 0; i < nolds; i++)
+ kfree_skb(olds[i]);
+
+ /* Reschedule timer if needed. */
+ if (pending)
+ tile_net_schedule_egress_timer(info);
+}
+
+
+#ifdef IGNORE_DUP_ACKS
+
+/*
+ * Help detect "duplicate" ACKs. These are sequential packets (for a
+ * given flow) which are exactly 66 bytes long, sharing everything but
+ * ID=2@0x12, Hsum=2@0x18, Ack=4@0x2a, WinSize=2@0x30, Csum=2@0x32,
+ * Tstamps=10@0x38. The ID's are +1, the Hsum's are -1, the Ack's are
+ * +N, and the Tstamps are usually identical.
+ *
+ * NOTE: Apparently truly duplicate acks (with identical "ack" values),
+ * should not be collapsed, as they are used for some kind of flow control.
+ */
+static bool is_dup_ack(char *s1, char *s2, unsigned int len)
+{
+ int i;
+
+ unsigned long long ignorable = 0;
+
+ /* Identification. */
+ ignorable |= (1ULL << 0x12);
+ ignorable |= (1ULL << 0x13);
+
+ /* Header checksum. */
+ ignorable |= (1ULL << 0x18);
+ ignorable |= (1ULL << 0x19);
+
+ /* ACK. */
+ ignorable |= (1ULL << 0x2a);
+ ignorable |= (1ULL << 0x2b);
+ ignorable |= (1ULL << 0x2c);
+ ignorable |= (1ULL << 0x2d);
+
+ /* WinSize. */
+ ignorable |= (1ULL << 0x30);
+ ignorable |= (1ULL << 0x31);
+
+ /* Checksum. */
+ ignorable |= (1ULL << 0x32);
+ ignorable |= (1ULL << 0x33);
+
+ for (i = 0; i < len; i++, ignorable >>= 1) {
+
+ if ((ignorable & 1) || (s1[i] == s2[i]))
+ continue;
+
+#ifdef TILE_NET_DEBUG
+ /* HACK: Mention non-timestamp diffs. */
+ if (i < 0x38 && i != 0x2f &&
+ net_ratelimit())
+ pr_info("Diff at 0x%x\n", i);
+#endif
+
+ return false;
+ }
+
+#ifdef TILE_NET_NO_SUPPRESS_DUP_ACKS
+ /* HACK: Do not suppress truly duplicate ACKs. */
+ /* ISSUE: Is this actually necessary or helpful? */
+ if (s1[0x2a] == s2[0x2a] &&
+ s1[0x2b] == s2[0x2b] &&
+ s1[0x2c] == s2[0x2c] &&
+ s1[0x2d] == s2[0x2d]) {
+ return false;
+ }
+#endif
+
+ return true;
+}
+
+#endif
+
+
+
+/*
+ * Like "tile_net_handle_packets()", but just discard packets.
+ */
+static void tile_net_discard_packets(struct net_device *dev)
+{
+ struct tile_net_priv *priv = netdev_priv(dev);
+ int my_cpu = smp_processor_id();
+ struct tile_net_cpu *info = priv->cpu[my_cpu];
+ struct tile_netio_queue *queue = &info->queue;
+ netio_queue_impl_t *qsp = queue->__system_part;
+ netio_queue_user_impl_t *qup = &queue->__user_part;
+
+ while (qup->__packet_receive_read !=
+ qsp->__packet_receive_queue.__packet_write) {
+
+ int index = qup->__packet_receive_read;
+
+ int index2_aux = index + sizeof(netio_pkt_t);
+ int index2 =
+ ((index2_aux ==
+ qsp->__packet_receive_queue.__last_packet_plus_one) ?
+ 0 : index2_aux);
+
+ netio_pkt_t *pkt = (netio_pkt_t *)
+ ((unsigned long) &qsp[1] + index);
+
+ /* Extract the "linux_buffer_t". */
+ unsigned int buffer = pkt->__packet.word;
+
+ /* Convert "linux_buffer_t" to "va". */
+ void *va = __va((phys_addr_t)(buffer >> 1) << 7);
+
+ /* Acquire the associated "skb". */
+ struct sk_buff **skb_ptr = va - sizeof(*skb_ptr);
+ struct sk_buff *skb = *skb_ptr;
+
+ kfree_skb(skb);
+
+ /* Consume this packet. */
+ qup->__packet_receive_read = index2;
+ }
+}
+
+
+/*
+ * Handle the next packet. Return true if "processed", false if "filtered".
+ */
+static bool tile_net_poll_aux(struct tile_net_cpu *info, int index)
+{
+ struct net_device *dev = info->napi.dev;
+
+ struct tile_netio_queue *queue = &info->queue;
+ netio_queue_impl_t *qsp = queue->__system_part;
+ netio_queue_user_impl_t *qup = &queue->__user_part;
+ struct tile_net_stats_t *stats = &info->stats;
+
+ int filter;
+
+ int index2_aux = index + sizeof(netio_pkt_t);
+ int index2 =
+ ((index2_aux ==
+ qsp->__packet_receive_queue.__last_packet_plus_one) ?
+ 0 : index2_aux);
+
+ netio_pkt_t *pkt = (netio_pkt_t *)((unsigned long) &qsp[1] + index);
+
+ netio_pkt_metadata_t *metadata = NETIO_PKT_METADATA(pkt);
+
+ /* Extract the packet size. */
+ unsigned long len =
+ (NETIO_PKT_CUSTOM_LENGTH(pkt) +
+ NET_IP_ALIGN - NETIO_PACKET_PADDING);
+
+ /* Extract the "linux_buffer_t". */
+ unsigned int buffer = pkt->__packet.word;
+
+ /* Extract "small" (vs "large"). */
+ bool small = ((buffer & 1) != 0);
+
+ /* Convert "linux_buffer_t" to "va". */
+ void *va = __va((phys_addr_t)(buffer >> 1) << 7);
+
+ /* Extract the packet data pointer. */
+ /* Compare to "NETIO_PKT_CUSTOM_DATA(pkt)". */
+ unsigned char *buf = va + NET_IP_ALIGN;
+
+#ifdef IGNORE_DUP_ACKS
+
+ static int other;
+ static int final;
+ static int keep;
+ static int skip;
+
+#endif
+
+ /* Invalidate the packet buffer. */
+ if (!hash_default)
+ __inv_buffer(buf, len);
+
+ /* ISSUE: Is this needed? */
+ dev->last_rx = jiffies;
+
+#ifdef TILE_NET_DUMP_PACKETS
+ dump_packet(buf, len, "rx");
+#endif /* TILE_NET_DUMP_PACKETS */
+
+#ifdef TILE_NET_VERIFY_INGRESS
+ if (!NETIO_PKT_L4_CSUM_CORRECT_M(metadata, pkt) &&
+ NETIO_PKT_L4_CSUM_CALCULATED_M(metadata, pkt)) {
+ /*
+ * FIXME: This complains about UDP packets
+ * with a "zero" checksum (bug 6624).
+ */
+#ifdef TILE_NET_PANIC_ON_BAD
+ dump_packet(buf, len, "rx");
+ panic("Bad L4 checksum.");
+#else
+ pr_warning("Bad L4 checksum on %d byte packet.\n", len);
+#endif
+ }
+ if (!NETIO_PKT_L3_CSUM_CORRECT_M(metadata, pkt) &&
+ NETIO_PKT_L3_CSUM_CALCULATED_M(metadata, pkt)) {
+ dump_packet(buf, len, "rx");
+ panic("Bad L3 checksum.");
+ }
+ switch (NETIO_PKT_STATUS_M(metadata, pkt)) {
+ case NETIO_PKT_STATUS_OVERSIZE:
+ if (len >= 64) {
+ dump_packet(buf, len, "rx");
+ panic("Unexpected OVERSIZE.");
+ }
+ break;
+ case NETIO_PKT_STATUS_BAD:
+#ifdef TILE_NET_PANIC_ON_BAD
+ dump_packet(buf, len, "rx");
+ panic("Unexpected BAD packet.");
+#else
+ pr_warning("Unexpected BAD %d byte packet.\n", len);
+#endif
+ }
+#endif
+
+ filter = 0;
+
+ if (!(dev->flags & IFF_UP)) {
+ /* Filter packets received before we're up. */
+ filter = 1;
+ } else if (!(dev->flags & IFF_PROMISC)) {
+ /*
+ * FIXME: Implement HW multicast filter.
+ */
+ if (!IS_MULTICAST(buf) && !IS_BROADCAST(buf)) {
+ /* Filter packets not for our address. */
+ const u8 *mine = dev->dev_addr;
+ filter = compare_ether_addr(mine, buf);
+ }
+ }
+
+#ifdef IGNORE_DUP_ACKS
+
+ if (len != 66) {
+ /* FIXME: Must check "is_tcp_ack(buf, len)" somehow. */
+
+ other++;
+
+ } else if (index2 ==
+ qsp->__packet_receive_queue.__packet_write) {
+
+ final++;
+
+ } else {
+
+ netio_pkt_t *pkt2 = (netio_pkt_t *)
+ ((unsigned long) &qsp[1] + index2);
+
+ netio_pkt_metadata_t *metadata2 =
+ NETIO_PKT_METADATA(pkt2);
+
+ /* Extract the packet size. */
+ unsigned long len2 =
+ (NETIO_PKT_CUSTOM_LENGTH(pkt2) +
+ NET_IP_ALIGN - NETIO_PACKET_PADDING);
+
+ if (len2 == 66 &&
+ NETIO_PKT_FLOW_HASH_M(metadata, pkt) ==
+ NETIO_PKT_FLOW_HASH_M(metadata2, pkt2)) {
+
+ /* Extract the "linux_buffer_t". */
+ unsigned int buffer2 = pkt2->__packet.word;
+
+ /* Convert "linux_buffer_t" to "va". */
+ void *va2 =
+ __va((phys_addr_t)(buffer2 >> 1) << 7);
+
+ /* Extract the packet data pointer. */
+ /* Compare to "NETIO_PKT_CUSTOM_DATA(pkt)". */
+ unsigned char *buf2 = va2 + NET_IP_ALIGN;
+
+ /* Invalidate the packet buffer. */
+ if (!hash_default)
+ __inv_buffer(buf2, len2);
+
+ if (is_dup_ack(buf, buf2, len)) {
+ skip++;
+ filter = 1;
+ } else {
+ keep++;
+ }
+ }
+ }
+
+ if (net_ratelimit())
+ pr_info("Other %d Final %d Keep %d Skip %d.\n",
+ other, final, keep, skip);
+
+#endif
+
+ if (filter) {
+
+ /* ISSUE: Update "drop" statistics? */
+
+ tile_net_provide_linux_buffer(info, va, small);
+
+ } else {
+
+ /* Acquire the associated "skb". */
+ struct sk_buff **skb_ptr = va - sizeof(*skb_ptr);
+ struct sk_buff *skb = *skb_ptr;
+
+ /* Paranoia. */
+ if (skb->data != buf)
+ panic("Corrupt linux buffer from LIPP! "
+ "VA=%p, skb=%p, skb->data=%p\n",
+ va, skb, skb->data);
+
+ /* Encode the actual packet length. */
+ skb_put(skb, len);
+
+ /* NOTE: This call also sets "skb->dev = dev". */
+ skb->protocol = eth_type_trans(skb, dev);
+
+ /* ISSUE: Discard corrupt packets? */
+ /* ISSUE: Discard packets with bad checksums? */
+
+ /* Avoid recomputing TCP/UDP checksums. */
+ if (NETIO_PKT_L4_CSUM_CORRECT_M(metadata, pkt))
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+
+ netif_receive_skb(skb);
+
+ stats->rx_packets++;
+ stats->rx_bytes += len;
+
+ if (small)
+ info->num_needed_small_buffers++;
+ else
+ info->num_needed_large_buffers++;
+ }
+
+ /* Return four credits after every fourth packet. */
+ if (--qup->__receive_credit_remaining == 0) {
+ u32 interval = qup->__receive_credit_interval;
+ qup->__receive_credit_remaining = interval;
+ __netio_fastio_return_credits(qup->__fastio_index, interval);
+ }
+
+ /* Consume this packet. */
+ qup->__packet_receive_read = index2;
+
+ return !filter;
+}
+
+
+/*
+ * Handle some packets for the given device on the current CPU.
+ *
+ * ISSUE: The "rotting packet" race condition occurs if a packet
+ * arrives after the queue appears to be empty, and before the
+ * hypervisor interrupt is re-enabled.
+ */
+static int tile_net_poll(struct napi_struct *napi, int budget)
+{
+ struct net_device *dev = napi->dev;
+ struct tile_net_priv *priv = netdev_priv(dev);
+ int my_cpu = smp_processor_id();
+ struct tile_net_cpu *info = priv->cpu[my_cpu];
+ struct tile_netio_queue *queue = &info->queue;
+ netio_queue_impl_t *qsp = queue->__system_part;
+ netio_queue_user_impl_t *qup = &queue->__user_part;
+
+ unsigned int work = 0;
+
+ while (1) {
+ int index = qup->__packet_receive_read;
+ if (index == qsp->__packet_receive_queue.__packet_write)
+ break;
+
+ if (tile_net_poll_aux(info, index)) {
+ if (++work >= budget)
+ goto done;
+ }
+ }
+
+ napi_complete(&info->napi);
+
+ /* Re-enable hypervisor interrupts. */
+ enable_percpu_irq(priv->intr_id);
+
+ /* HACK: Avoid the "rotting packet" problem. */
+ if (qup->__packet_receive_read !=
+ qsp->__packet_receive_queue.__packet_write)
+ napi_schedule(&info->napi);
+
+ /* ISSUE: Handle completions? */
+
+done:
+
+ tile_net_provide_needed_buffers(info);
+
+ return work;
+}
+
+
+/*
+ * Handle an ingress interrupt for the given device on the current cpu.
+ */
+static irqreturn_t tile_net_handle_ingress_interrupt(int irq, void *dev_ptr)
+{
+ struct net_device *dev = (struct net_device *)dev_ptr;
+ struct tile_net_priv *priv = netdev_priv(dev);
+ int my_cpu = smp_processor_id();
+ struct tile_net_cpu *info = priv->cpu[my_cpu];
+
+ /* Disable hypervisor interrupt. */
+ disable_percpu_irq(priv->intr_id);
+
+ napi_schedule(&info->napi);
+
+ return IRQ_HANDLED;
+}
+
+
+/*
+ * One time initialization per interface.
+ */
+static int tile_net_open_aux(struct net_device *dev)
+{
+ struct tile_net_priv *priv = netdev_priv(dev);
+
+ int ret;
+ int dummy;
+ unsigned int epp_lotar;
+
+ /*
+ * Find out where EPP memory should be homed.
+ */
+ ret = hv_dev_pread(priv->hv_devhdl, 0,
+ (HV_VirtAddr)&epp_lotar, sizeof(epp_lotar),
+ NETIO_EPP_SHM_OFF);
+ if (ret < 0) {
+ pr_err("could not read epp_shm_queue lotar.\n");
+ return -EIO;
+ }
+
+ /*
+ * Home the page on the EPP.
+ */
+ {
+ int epp_home = hv_lotar_to_cpu(epp_lotar);
+ struct page *page = virt_to_page(priv->epp_queue);
+ homecache_change_page_home(page, 0, epp_home);
+ }
+
+ /*
+ * Register the EPP shared memory queue.
+ */
+ {
+ netio_ipp_address_t ea = {
+ .va = 0,
+ .pa = __pa(priv->epp_queue),
+ .pte = hv_pte(0),
+ .size = PAGE_SIZE,
+ };
+ ea.pte = hv_pte_set_lotar(ea.pte, epp_lotar);
+ ea.pte = hv_pte_set_mode(ea.pte, HV_PTE_MODE_CACHE_TILE_L3);
+ ret = hv_dev_pwrite(priv->hv_devhdl, 0,
+ (HV_VirtAddr)&ea,
+ sizeof(ea),
+ NETIO_EPP_SHM_OFF);
+ if (ret < 0)
+ return -EIO;
+ }
+
+ /*
+ * Start LIPP/LEPP.
+ */
+ if (hv_dev_pwrite(priv->hv_devhdl, 0, (HV_VirtAddr)&dummy,
+ sizeof(dummy), NETIO_IPP_START_SHIM_OFF) < 0) {
+ pr_warning("Failed to start LIPP/LEPP.\n");
+ return -EIO;
+ }
+
+ return 0;
+}
+
+
+/*
+ * Register with hypervisor on each CPU.
+ *
+ * Strangely, this function does important things even if it "fails",
+ * which is especially common if the link is not up yet. Hopefully
+ * these things are all "harmless" if done twice!
+ */
+static void tile_net_register(void *dev_ptr)
+{
+ struct net_device *dev = (struct net_device *)dev_ptr;
+ struct tile_net_priv *priv = netdev_priv(dev);
+ int my_cpu = smp_processor_id();
+ struct tile_net_cpu *info;
+
+ struct tile_netio_queue *queue;
+
+ /* Only network cpus can receive packets. */
+ int queue_id =
+ cpumask_test_cpu(my_cpu, &priv->network_cpus_map) ? 0 : 255;
+
+ netio_input_config_t config = {
+ .flags = 0,
+ .num_receive_packets = priv->network_cpus_credits,
+ .queue_id = queue_id
+ };
+
+ int ret = 0;
+ netio_queue_impl_t *queuep;
+
+ PDEBUG("tile_net_register(queue_id %d)\n", queue_id);
+
+ if (!strcmp(dev->name, "xgbe0"))
+ info = &__get_cpu_var(hv_xgbe0);
+ else if (!strcmp(dev->name, "xgbe1"))
+ info = &__get_cpu_var(hv_xgbe1);
+ else if (!strcmp(dev->name, "gbe0"))
+ info = &__get_cpu_var(hv_gbe0);
+ else if (!strcmp(dev->name, "gbe1"))
+ info = &__get_cpu_var(hv_gbe1);
+ else
+ BUG();
+
+ /* Initialize the egress timer. */
+ init_timer(&info->egress_timer);
+ info->egress_timer.data = (long)info;
+ info->egress_timer.function = tile_net_handle_egress_timer;
+
+ priv->cpu[my_cpu] = info;
+
+ /*
+ * Register ourselves with the IPP.
+ */
+ ret = hv_dev_pwrite(priv->hv_devhdl, 0,
+ (HV_VirtAddr)&config,
+ sizeof(netio_input_config_t),
+ NETIO_IPP_INPUT_REGISTER_OFF);
+ PDEBUG("hv_dev_pwrite(NETIO_IPP_INPUT_REGISTER_OFF) returned %d\n",
+ ret);
+ if (ret < 0) {
+ printk(KERN_DEBUG "hv_dev_pwrite NETIO_IPP_INPUT_REGISTER_OFF"
+ " failure %d\n", ret);
+ info->link_down = (ret == NETIO_LINK_DOWN);
+ return;
+ }
+
+ /*
+ * Get the pointer to our queue's system part.
+ */
+
+ ret = hv_dev_pread(priv->hv_devhdl, 0,
+ (HV_VirtAddr)&queuep,
+ sizeof(netio_queue_impl_t *),
+ NETIO_IPP_INPUT_REGISTER_OFF);
+ PDEBUG("hv_dev_pread(NETIO_IPP_INPUT_REGISTER_OFF) returned %d\n",
+ ret);
+ PDEBUG("queuep %p\n", queuep);
+ if (ret <= 0) {
+ /* ISSUE: Shouldn't this be a fatal error? */
+ pr_err("hv_dev_pread NETIO_IPP_INPUT_REGISTER_OFF failure\n");
+ return;
+ }
+
+ queue = &info->queue;
+
+ queue->__system_part = queuep;
+
+ memset(&queue->__user_part, 0, sizeof(netio_queue_user_impl_t));
+
+ /* This is traditionally "config.num_receive_packets / 2". */
+ queue->__user_part.__receive_credit_interval = 4;
+ queue->__user_part.__receive_credit_remaining =
+ queue->__user_part.__receive_credit_interval;
+
+ /*
+ * Get a fastio index from the hypervisor.
+ * ISSUE: Shouldn't this check the result?
+ */
+ ret = hv_dev_pread(priv->hv_devhdl, 0,
+ (HV_VirtAddr)&queue->__user_part.__fastio_index,
+ sizeof(queue->__user_part.__fastio_index),
+ NETIO_IPP_GET_FASTIO_OFF);
+ PDEBUG("hv_dev_pread(NETIO_IPP_GET_FASTIO_OFF) returned %d\n", ret);
+
+ netif_napi_add(dev, &info->napi, tile_net_poll, 64);
+
+ /* Now we are registered. */
+ info->registered = true;
+}
+
+
+/*
+ * Unregister with hypervisor on each CPU.
+ */
+static void tile_net_unregister(void *dev_ptr)
+{
+ struct net_device *dev = (struct net_device *)dev_ptr;
+ struct tile_net_priv *priv = netdev_priv(dev);
+ int my_cpu = smp_processor_id();
+ struct tile_net_cpu *info = priv->cpu[my_cpu];
+
+ int ret = 0;
+ int dummy = 0;
+
+ /* Do nothing if never registered. */
+ if (info == NULL)
+ return;
+
+ /* Do nothing if already unregistered. */
+ if (!info->registered)
+ return;
+
+ /*
+ * Unregister ourselves with LIPP.
+ */
+ ret = hv_dev_pwrite(priv->hv_devhdl, 0, (HV_VirtAddr)&dummy,
+ sizeof(dummy), NETIO_IPP_INPUT_UNREGISTER_OFF);
+ PDEBUG("hv_dev_pwrite(NETIO_IPP_INPUT_UNREGISTER_OFF) returned %d\n",
+ ret);
+ if (ret < 0) {
+ /* FIXME: Just panic? */
+ pr_err("hv_dev_pwrite NETIO_IPP_INPUT_UNREGISTER_OFF"
+ " failure %d\n", ret);
+ }
+
+ /*
+ * Discard all packets still in our NetIO queue. Hopefully,
+ * once the unregister call is complete, there will be no
+ * packets still in flight on the IDN.
+ */
+ tile_net_discard_packets(dev);
+
+ /* Reset state. */
+ info->num_needed_small_buffers = 0;
+ info->num_needed_large_buffers = 0;
+
+ /* Cancel egress timer. */
+ del_timer(&info->egress_timer);
+ info->egress_timer_scheduled = false;
+
+ netif_napi_del(&info->napi);
+
+ /* Now we are unregistered. */
+ info->registered = false;
+}
+
+
+/*
+ * Helper function for "tile_net_stop()".
+ *
+ * Also used to handle registration failure in "tile_net_open_inner()",
+ * when "fully_opened" is known to be false, and the various extra
+ * steps in "tile_net_stop()" are not necessary. ISSUE: It might be
+ * simpler if we could just call "tile_net_stop()" anyway.
+ */
+static void tile_net_stop_aux(struct net_device *dev)
+{
+ struct tile_net_priv *priv = netdev_priv(dev);
+
+ int dummy = 0;
+
+ /* Unregister all tiles, so LIPP will stop delivering packets. */
+ on_each_cpu(tile_net_unregister, (void *)dev, 1);
+
+ /* Stop LIPP/LEPP. */
+ if (hv_dev_pwrite(priv->hv_devhdl, 0, (HV_VirtAddr)&dummy,
+ sizeof(dummy), NETIO_IPP_STOP_SHIM_OFF) < 0)
+ panic("Failed to stop LIPP/LEPP!\n");
+
+ priv->partly_opened = 0;
+}
+
+
+/*
+ * Disable ingress interrupts for the given device on the current cpu.
+ */
+static void tile_net_disable_intr(void *dev_ptr)
+{
+ struct net_device *dev = (struct net_device *)dev_ptr;
+ struct tile_net_priv *priv = netdev_priv(dev);
+ int my_cpu = smp_processor_id();
+ struct tile_net_cpu *info = priv->cpu[my_cpu];
+
+ /* Disable hypervisor interrupt. */
+ disable_percpu_irq(priv->intr_id);
+
+ /* Disable NAPI if needed. */
+ if (info != NULL && info->napi_enabled) {
+ napi_disable(&info->napi);
+ info->napi_enabled = false;
+ }
+}
+
+
+/*
+ * Enable ingress interrupts for the given device on the current cpu.
+ */
+static void tile_net_enable_intr(void *dev_ptr)
+{
+ struct net_device *dev = (struct net_device *)dev_ptr;
+ struct tile_net_priv *priv = netdev_priv(dev);
+ int my_cpu = smp_processor_id();
+ struct tile_net_cpu *info = priv->cpu[my_cpu];
+
+ /* Enable hypervisor interrupt. */
+ enable_percpu_irq(priv->intr_id);
+
+ /* Enable NAPI. */
+ napi_enable(&info->napi);
+ info->napi_enabled = true;
+}
+
+
+/*
+ * tile_net_open_inner does most of the work of bringing up the interface.
+ * It's called from tile_net_open(), and also from tile_net_retry_open().
+ * The return value is 0 if the interface was brought up, < 0 if
+ * tile_net_open() should return the return value as an error, and > 0 if
+ * tile_net_open() should return success and schedule a work item to
+ * periodically retry the bringup.
+ */
+static int tile_net_open_inner(struct net_device *dev)
+{
+ struct tile_net_priv *priv = netdev_priv(dev);
+ int my_cpu = smp_processor_id();
+ struct tile_net_cpu *info;
+ struct tile_netio_queue *queue;
+ unsigned int irq;
+ int i;
+
+ /*
+ * First try to register just on the local CPU, and handle any
+ * semi-expected "link down" failure specially. Note that we
+ * do NOT call "tile_net_stop_aux()", unlike below.
+ */
+ tile_net_register(dev);
+ info = priv->cpu[my_cpu];
+ if (!info->registered) {
+ if (info->link_down)
+ return 1;
+ return -EAGAIN;
+ }
+
+ /*
+ * Now register everywhere else. If any registration fails,
+ * even for "link down" (which might not be possible), we
+ * clean up using "tile_net_stop_aux()".
+ */
+ smp_call_function(tile_net_register, (void *)dev, 1);
+ for_each_online_cpu(i) {
+ if (!priv->cpu[i]->registered) {
+ tile_net_stop_aux(dev);
+ return -EAGAIN;
+ }
+ }
+
+ queue = &info->queue;
+
+ /*
+ * Set the device intr bit mask.
+ * The tile_net_register above sets per tile __intr_id.
+ */
+ priv->intr_id = queue->__system_part->__intr_id;
+ BUG_ON(!priv->intr_id);
+
+ /*
+ * Register the device interrupt handler.
+ * The __ffs() function returns the index into the interrupt handler
+ * table from the interrupt bit mask which should have one bit
+ * and one bit only set.
+ */
+ irq = __ffs(priv->intr_id);
+ tile_irq_activate(irq, TILE_IRQ_PERCPU);
+ BUG_ON(request_irq(irq, tile_net_handle_ingress_interrupt,
+ 0, dev->name, (void *)dev) != 0);
+
+ /* ISSUE: How could "priv->fully_opened" ever be "true" here? */
+
+ if (!priv->fully_opened) {
+
+ int dummy = 0;
+
+ /* Allocate initial buffers. */
+
+ int max_buffers =
+ priv->network_cpus_count * priv->network_cpus_credits;
+
+ info->num_needed_small_buffers =
+ min(LIPP_SMALL_BUFFERS, max_buffers);
+
+ info->num_needed_large_buffers =
+ min(LIPP_LARGE_BUFFERS, max_buffers);
+
+ tile_net_provide_needed_buffers(info);
+
+ if (info->num_needed_small_buffers != 0 ||
+ info->num_needed_large_buffers != 0)
+ panic("Insufficient memory for buffer stack!");
+
+ /* Start LIPP/LEPP and activate "ingress" at the shim. */
+ if (hv_dev_pwrite(priv->hv_devhdl, 0, (HV_VirtAddr)&dummy,
+ sizeof(dummy), NETIO_IPP_INPUT_INIT_OFF) < 0)
+ panic("Failed to activate the LIPP Shim!\n");
+
+ priv->fully_opened = 1;
+ }
+
+ /* On each tile, enable the hypervisor to trigger interrupts. */
+ /* ISSUE: Do this before starting LIPP/LEPP? */
+ on_each_cpu(tile_net_enable_intr, (void *)dev, 1);
+
+ /* Start our transmit queue. */
+ netif_start_queue(dev);
+
+ return 0;
+}
+
+
+/*
+ * Called periodically to retry bringing up the NetIO interface,
+ * if it doesn't come up cleanly during tile_net_open().
+ */
+static void tile_net_open_retry(struct work_struct *w)
+{
+ struct delayed_work *dw =
+ container_of(w, struct delayed_work, work);
+
+ struct tile_net_priv *priv =
+ container_of(dw, struct tile_net_priv, retry_work);
+
+ /*
+ * Try to bring the NetIO interface up. If it fails, reschedule
+ * ourselves to try again later; otherwise, tell Linux we now have
+ * a working link. ISSUE: What if the return value is negative?
+ */
+ if (tile_net_open_inner(priv->dev))
+ schedule_delayed_work_on(singlethread_cpu, &priv->retry_work,
+ TILE_NET_RETRY_INTERVAL);
+ else
+ netif_carrier_on(priv->dev);
+}
+
+
+/*
+ * Called when a network interface is made active.
+ *
+ * Returns 0 on success, negative value on failure.
+ *
+ * The open entry point is called when a network interface is made
+ * active by the system (IFF_UP). At this point all resources needed
+ * for transmit and receive operations are allocated, the interrupt
+ * handler is registered with the OS, the watchdog timer is started,
+ * and the stack is notified that the interface is ready.
+ *
+ * If the actual link is not available yet, then we tell Linux that
+ * we have no carrier, and we keep checking until the link comes up.
+ */
+static int tile_net_open(struct net_device *dev)
+{
+ int ret = 0;
+ struct tile_net_priv *priv = netdev_priv(dev);
+
+ /*
+ * We rely on priv->partly_opened to tell us if this is the
+ * first time this interface is being brought up. If it is
+ * set, the IPP was already initialized and should not be
+ * initialized again.
+ */
+ if (!priv->partly_opened) {
+
+ int count;
+ int credits;
+
+ /* Initialize LIPP/LEPP, and start the Shim. */
+ ret = tile_net_open_aux(dev);
+ if (ret < 0) {
+ pr_err("tile_net_open_aux failed: %d\n", ret);
+ return ret;
+ }
+
+ /* Analyze the network cpus. */
+
+ if (network_cpus_used)
+ cpumask_copy(&priv->network_cpus_map,
+ &network_cpus_map);
+ else
+ cpumask_copy(&priv->network_cpus_map, cpu_online_mask);
+
+
+ count = cpumask_weight(&priv->network_cpus_map);
+
+ /* Limit credits to available buffers, and apply min. */
+ credits = max(16, (LIPP_LARGE_BUFFERS / count) & ~1);
+
+ /* Apply "GBE" max limit. */
+ /* ISSUE: Use higher limit for XGBE? */
+ credits = min(NETIO_MAX_RECEIVE_PKTS, credits);
+
+ priv->network_cpus_count = count;
+ priv->network_cpus_credits = credits;
+
+#ifdef TILE_NET_DEBUG
+ pr_info("Using %d network cpus, with %d credits each\n",
+ priv->network_cpus_count, priv->network_cpus_credits);
+#endif
+
+ priv->partly_opened = 1;
+ }
+
+ /*
+ * Attempt to bring up the link.
+ */
+ ret = tile_net_open_inner(dev);
+ if (ret <= 0) {
+ if (ret == 0)
+ netif_carrier_on(dev);
+ return ret;
+ }
+
+ /*
+ * We were unable to bring up the NetIO interface, but we want to
+ * try again in a little bit. Tell Linux that we have no carrier
+ * so it doesn't try to use the interface before the link comes up
+ * and then remember to try again later.
+ */
+ netif_carrier_off(dev);
+ schedule_delayed_work_on(singlethread_cpu, &priv->retry_work,
+ TILE_NET_RETRY_INTERVAL);
+
+ return 0;
+}
+
+
+/*
+ * Disables a network interface.
+ *
+ * Returns 0, this is not allowed to fail.
+ *
+ * The close entry point is called when an interface is de-activated
+ * by the OS. The hardware is still under the drivers control, but
+ * needs to be disabled. A global MAC reset is issued to stop the
+ * hardware, and all transmit and receive resources are freed.
+ *
+ * ISSUE: Can this can be called while "tile_net_poll()" is running?
+ */
+static int tile_net_stop(struct net_device *dev)
+{
+ struct tile_net_priv *priv = netdev_priv(dev);
+
+ bool pending = true;
+
+ PDEBUG("tile_net_stop()\n");
+
+ /* ISSUE: Only needed if not yet fully open. */
+ cancel_delayed_work_sync(&priv->retry_work);
+
+ /* Can't transmit any more. */
+ netif_stop_queue(dev);
+
+ /*
+ * Disable hypervisor interrupts on each tile.
+ */
+ on_each_cpu(tile_net_disable_intr, (void *)dev, 1);
+
+ /*
+ * Unregister the interrupt handler.
+ * The __ffs() function returns the index into the interrupt handler
+ * table from the interrupt bit mask which should have one bit
+ * and one bit only set.
+ */
+ if (priv->intr_id)
+ free_irq(__ffs(priv->intr_id), dev);
+
+ /*
+ * Drain all the LIPP buffers.
+ */
+
+ while (true) {
+ int buffer;
+
+ /* NOTE: This should never fail. */
+ if (hv_dev_pread(priv->hv_devhdl, 0, (HV_VirtAddr)&buffer,
+ sizeof(buffer), NETIO_IPP_DRAIN_OFF) < 0)
+ break;
+
+ /* Stop when done. */
+ if (buffer == 0)
+ break;
+
+ {
+ /* Convert "linux_buffer_t" to "va". */
+ void *va = __va((phys_addr_t)(buffer >> 1) << 7);
+
+ /* Acquire the associated "skb". */
+ struct sk_buff **skb_ptr = va - sizeof(*skb_ptr);
+ struct sk_buff *skb = *skb_ptr;
+
+ kfree_skb(skb);
+ }
+ }
+
+ /* Stop LIPP/LEPP. */
+ tile_net_stop_aux(dev);
+
+
+ priv->fully_opened = 0;
+
+
+ /*
+ * XXX: ISSUE: It appears that, in practice anyway, by the
+ * time we get here, there are no pending completions.
+ */
+ while (pending) {
+
+ struct sk_buff *olds[32];
+ unsigned int wanted = 32;
+ unsigned int i, nolds = 0;
+
+ nolds = tile_net_lepp_grab_comps(dev, olds,
+ wanted, &pending);
+
+ /* ISSUE: We have never actually seen this debug spew. */
+ if (nolds != 0)
+ pr_info("During tile_net_stop(), grabbed %d comps.\n",
+ nolds);
+
+ for (i = 0; i < nolds; i++)
+ kfree_skb(olds[i]);
+ }
+
+
+ /* Wipe the EPP queue. */
+ memset(priv->epp_queue, 0, sizeof(lepp_queue_t));
+
+ /* Evict the EPP queue. */
+ finv_buffer(priv->epp_queue, PAGE_SIZE);
+
+ return 0;
+}
+
+
+/*
+ * Prepare the "frags" info for the resulting LEPP command.
+ *
+ * If needed, flush the memory used by the frags.
+ */
+static unsigned int tile_net_tx_frags(lepp_frag_t *frags,
+ struct sk_buff *skb,
+ void *b_data, unsigned int b_len)
+{
+ unsigned int i, n = 0;
+
+ struct skb_shared_info *sh = skb_shinfo(skb);
+
+ phys_addr_t cpa;
+
+ if (b_len != 0) {
+
+ if (!hash_default)
+ finv_buffer_remote(b_data, b_len);
+
+ cpa = __pa(b_data);
+ frags[n].cpa_lo = cpa;
+ frags[n].cpa_hi = cpa >> 32;
+ frags[n].length = b_len;
+ frags[n].hash_for_home = hash_default;
+ n++;
+ }
+
+ for (i = 0; i < sh->nr_frags; i++) {
+
+ skb_frag_t *f = &sh->frags[i];
+ unsigned long pfn = page_to_pfn(f->page);
+
+ /* FIXME: Compute "hash_for_home" properly. */
+ /* ISSUE: The hypervisor checks CHIP_HAS_REV1_DMA_PACKETS(). */
+ int hash_for_home = hash_default;
+
+ /* FIXME: Hmmm. */
+ if (!hash_default) {
+ void *va = pfn_to_kaddr(pfn) + f->page_offset;
+ BUG_ON(PageHighMem(f->page));
+ finv_buffer_remote(va, f->size);
+ }
+
+ cpa = ((phys_addr_t)pfn << PAGE_SHIFT) + f->page_offset;
+ frags[n].cpa_lo = cpa;
+ frags[n].cpa_hi = cpa >> 32;
+ frags[n].length = f->size;
+ frags[n].hash_for_home = hash_for_home;
+ n++;
+ }
+
+ return n;
+}
+
+
+/*
+ * This function takes "skb", consisting of a header template and a
+ * payload, and hands it to LEPP, to emit as one or more segments,
+ * each consisting of a possibly modified header, plus a piece of the
+ * payload, via a process known as "tcp segmentation offload".
+ *
+ * Usually, "data" will contain the header template, of size "sh_len",
+ * and "sh->frags" will contain "skb->data_len" bytes of payload, and
+ * there will be "sh->gso_segs" segments.
+ *
+ * Sometimes, if "sendfile()" requires copying, we will be called with
+ * "data" containing the header and payload, with "frags" being empty.
+ *
+ * In theory, "sh->nr_frags" could be 3, but in practice, it seems
+ * that this will never actually happen.
+ *
+ * See "emulate_large_send_offload()" for some reference code, which
+ * does not handle checksumming.
+ *
+ * ISSUE: How do we make sure that high memory DMA does not migrate?
+ */
+static int tile_net_tx_tso(struct sk_buff *skb, struct net_device *dev)
+{
+ struct tile_net_priv *priv = netdev_priv(dev);
+ int my_cpu = smp_processor_id();
+ struct tile_net_cpu *info = priv->cpu[my_cpu];
+ struct tile_net_stats_t *stats = &info->stats;
+
+ struct skb_shared_info *sh = skb_shinfo(skb);
+
+ unsigned char *data = skb->data;
+
+ /* The ip header follows the ethernet header. */
+ struct iphdr *ih = ip_hdr(skb);
+ unsigned int ih_len = ih->ihl * 4;
+
+ /* Note that "nh == ih", by definition. */
+ unsigned char *nh = skb_network_header(skb);
+ unsigned int eh_len = nh - data;
+
+ /* The tcp header follows the ip header. */
+ struct tcphdr *th = (struct tcphdr *)(nh + ih_len);
+ unsigned int th_len = th->doff * 4;
+
+ /* The total number of header bytes. */
+ /* NOTE: This may be less than skb_headlen(skb). */
+ unsigned int sh_len = eh_len + ih_len + th_len;
+
+ /* The number of payload bytes at "skb->data + sh_len". */
+ /* This is non-zero for sendfile() without HIGHDMA. */
+ unsigned int b_len = skb_headlen(skb) - sh_len;
+
+ /* The total number of payload bytes. */
+ unsigned int d_len = b_len + skb->data_len;
+
+ /* The maximum payload size. */
+ unsigned int p_len = sh->gso_size;
+
+ /* The total number of segments. */
+ unsigned int num_segs = sh->gso_segs;
+
+ /* The temporary copy of the command. */
+ u32 cmd_body[(LEPP_MAX_CMD_SIZE + 3) / 4];
+ lepp_tso_cmd_t *cmd = (lepp_tso_cmd_t *)cmd_body;
+
+ /* Analyze the "frags". */
+ unsigned int num_frags =
+ tile_net_tx_frags(cmd->frags, skb, data + sh_len, b_len);
+
+ /* The size of the command, including frags and header. */
+ size_t cmd_size = LEPP_TSO_CMD_SIZE(num_frags, sh_len);
+
+ /* The command header. */
+ lepp_tso_cmd_t cmd_init = {
+ .tso = true,
+ .header_size = sh_len,
+ .ip_offset = eh_len,
+ .tcp_offset = eh_len + ih_len,
+ .payload_size = p_len,
+ .num_frags = num_frags,
+ };
+
+ unsigned long irqflags;
+
+ lepp_queue_t *eq = priv->epp_queue;
+
+ struct sk_buff *olds[4];
+ unsigned int wanted = 4;
+ unsigned int i, nolds = 0;
+
+ unsigned int cmd_head, cmd_tail, cmd_next;
+ unsigned int comp_tail;
+
+ unsigned int free_slots;
+
+
+ /* Paranoia. */
+ BUG_ON(skb->protocol != htons(ETH_P_IP));
+ BUG_ON(ih->protocol != IPPROTO_TCP);
+ BUG_ON(skb->ip_summed != CHECKSUM_PARTIAL);
+ BUG_ON(num_frags > LEPP_MAX_FRAGS);
+ /*--BUG_ON(num_segs != (d_len + (p_len - 1)) / p_len); */
+ BUG_ON(num_segs <= 1);
+
+
+ /* Finish preparing the command. */
+
+ /* Copy the command header. */
+ *cmd = cmd_init;
+
+ /* Copy the "header". */
+ memcpy(&cmd->frags[num_frags], data, sh_len);
+
+
+ /* Prefetch and wait, to minimize time spent holding the spinlock. */
+ prefetch_L1(&eq->comp_tail);
+ prefetch_L1(&eq->cmd_tail);
+ mb();
+
+
+ /* Enqueue the command. */
+
+ spin_lock_irqsave(&priv->cmd_lock, irqflags);
+
+ /*
+ * Handle completions if needed to make room.
+ * HACK: Spin until there is sufficient room.
+ */
+ free_slots = lepp_num_free_comp_slots(eq);
+ if (free_slots < 1) {
+spin:
+ nolds += tile_net_lepp_grab_comps(dev, olds + nolds,
+ wanted - nolds, NULL);
+ if (lepp_num_free_comp_slots(eq) < 1)
+ goto spin;
+ }
+
+ cmd_head = eq->cmd_head;
+ cmd_tail = eq->cmd_tail;
+
+ /* NOTE: The "gotos" below are untested. */
+
+ /* Prepare to advance, detecting full queue. */
+ cmd_next = cmd_tail + cmd_size;
+ if (cmd_tail < cmd_head && cmd_next >= cmd_head)
+ goto spin;
+ if (cmd_next > LEPP_CMD_LIMIT) {
+ cmd_next = 0;
+ if (cmd_next == cmd_head)
+ goto spin;
+ }
+
+ /* Copy the command. */
+ memcpy(&eq->cmds[cmd_tail], cmd, cmd_size);
+
+ /* Advance. */
+ cmd_tail = cmd_next;
+
+ /* Record "skb" for eventual freeing. */
+ comp_tail = eq->comp_tail;
+ eq->comps[comp_tail] = skb;
+ LEPP_QINC(comp_tail);
+ eq->comp_tail = comp_tail;
+
+ /* Flush before allowing LEPP to handle the command. */
+ __insn_mf();
+
+ eq->cmd_tail = cmd_tail;
+
+ spin_unlock_irqrestore(&priv->cmd_lock, irqflags);
+
+ if (nolds == 0)
+ nolds = tile_net_lepp_grab_comps(dev, olds, wanted, NULL);
+
+ /* Handle completions. */
+ for (i = 0; i < nolds; i++)
+ kfree_skb(olds[i]);
+
+ /* Update stats. */
+ stats->tx_packets += num_segs;
+ stats->tx_bytes += (num_segs * sh_len) + d_len;
+
+ /* Make sure the egress timer is scheduled. */
+ tile_net_schedule_egress_timer(info);
+
+ return NETDEV_TX_OK;
+}
+
+
+/*
+ * Transmit a packet (called by the kernel via "hard_start_xmit" hook).
+ */
+static int tile_net_tx(struct sk_buff *skb, struct net_device *dev)
+{
+ struct tile_net_priv *priv = netdev_priv(dev);
+ int my_cpu = smp_processor_id();
+ struct tile_net_cpu *info = priv->cpu[my_cpu];
+ struct tile_net_stats_t *stats = &info->stats;
+
+ unsigned long irqflags;
+
+ struct skb_shared_info *sh = skb_shinfo(skb);
+
+ unsigned int len = skb->len;
+ unsigned char *data = skb->data;
+
+ unsigned int csum_start = skb->csum_start - skb_headroom(skb);
+
+ lepp_frag_t frags[LEPP_MAX_FRAGS];
+
+ unsigned int num_frags;
+
+ lepp_queue_t *eq = priv->epp_queue;
+
+ struct sk_buff *olds[4];
+ unsigned int wanted = 4;
+ unsigned int i, nolds = 0;
+
+ unsigned int cmd_size = sizeof(lepp_cmd_t);
+
+ unsigned int cmd_head, cmd_tail, cmd_next;
+ unsigned int comp_tail;
+
+ lepp_cmd_t cmds[LEPP_MAX_FRAGS];
+
+ unsigned int free_slots;
+
+
+ /*
+ * This is paranoia, since we think that if the link doesn't come
+ * up, telling Linux we have no carrier will keep it from trying
+ * to transmit. If it does, though, we can't execute this routine,
+ * since data structures we depend on aren't set up yet.
+ */
+ if (!info->registered)
+ return NETDEV_TX_BUSY;
+
+
+ /* Save the timestamp. */
+ dev->trans_start = jiffies;
+
+
+#ifdef TILE_NET_PARANOIA
+#if CHIP_HAS_CBOX_HOME_MAP()
+ if (hash_default) {
+ HV_PTE pte = *virt_to_pte(current->mm, (unsigned long)data);
+ if (hv_pte_get_mode(pte) != HV_PTE_MODE_CACHE_HASH_L3)
+ panic("Non-coherent egress buffer!");
+ }
+#endif
+#endif
+
+
+#ifdef TILE_NET_DUMP_PACKETS
+ /* ISSUE: Does not dump the "frags". */
+ dump_packet(data, skb_headlen(skb), "tx");
+#endif /* TILE_NET_DUMP_PACKETS */
+
+
+ if (sh->gso_size != 0)
+ return tile_net_tx_tso(skb, dev);
+
+
+ /* Prepare the commands. */
+
+ num_frags = tile_net_tx_frags(frags, skb, data, skb_headlen(skb));
+
+ for (i = 0; i < num_frags; i++) {
+
+ bool final = (i == num_frags - 1);
+
+ lepp_cmd_t cmd = {
+ .cpa_lo = frags[i].cpa_lo,
+ .cpa_hi = frags[i].cpa_hi,
+ .length = frags[i].length,
+ .hash_for_home = frags[i].hash_for_home,
+ .send_completion = final,
+ .end_of_packet = final
+ };
+
+ if (i == 0 && skb->ip_summed == CHECKSUM_PARTIAL) {
+ cmd.compute_checksum = 1;
+ cmd.checksum_data.bits.start_byte = csum_start;
+ cmd.checksum_data.bits.count = len - csum_start;
+ cmd.checksum_data.bits.destination_byte =
+ csum_start + skb->csum_offset;
+ }
+
+ cmds[i] = cmd;
+ }
+
+
+ /* Prefetch and wait, to minimize time spent holding the spinlock. */
+ prefetch_L1(&eq->comp_tail);
+ prefetch_L1(&eq->cmd_tail);
+ mb();
+
+
+ /* Enqueue the commands. */
+
+ spin_lock_irqsave(&priv->cmd_lock, irqflags);
+
+ /*
+ * Handle completions if needed to make room.
+ * HACK: Spin until there is sufficient room.
+ */
+ free_slots = lepp_num_free_comp_slots(eq);
+ if (free_slots < 1) {
+spin:
+ nolds += tile_net_lepp_grab_comps(dev, olds + nolds,
+ wanted - nolds, NULL);
+ if (lepp_num_free_comp_slots(eq) < 1)
+ goto spin;
+ }
+
+ cmd_head = eq->cmd_head;
+ cmd_tail = eq->cmd_tail;
+
+ /* NOTE: The "gotos" below are untested. */
+
+ /* Copy the commands, or fail. */
+ for (i = 0; i < num_frags; i++) {
+
+ /* Prepare to advance, detecting full queue. */
+ cmd_next = cmd_tail + cmd_size;
+ if (cmd_tail < cmd_head && cmd_next >= cmd_head)
+ goto spin;
+ if (cmd_next > LEPP_CMD_LIMIT) {
+ cmd_next = 0;
+ if (cmd_next == cmd_head)
+ goto spin;
+ }
+
+ /* Copy the command. */
+ *(lepp_cmd_t *)&eq->cmds[cmd_tail] = cmds[i];
+
+ /* Advance. */
+ cmd_tail = cmd_next;
+ }
+
+ /* Record "skb" for eventual freeing. */
+ comp_tail = eq->comp_tail;
+ eq->comps[comp_tail] = skb;
+ LEPP_QINC(comp_tail);
+ eq->comp_tail = comp_tail;
+
+ /* Flush before allowing LEPP to handle the command. */
+ __insn_mf();
+
+ eq->cmd_tail = cmd_tail;
+
+ spin_unlock_irqrestore(&priv->cmd_lock, irqflags);
+
+ if (nolds == 0)
+ nolds = tile_net_lepp_grab_comps(dev, olds, wanted, NULL);
+
+ /* Handle completions. */
+ for (i = 0; i < nolds; i++)
+ kfree_skb(olds[i]);
+
+ /* HACK: Track "expanded" size for short packets (e.g. 42 < 60). */
+ stats->tx_packets++;
+ stats->tx_bytes += ((len >= ETH_ZLEN) ? len : ETH_ZLEN);
+
+ /* Make sure the egress timer is scheduled. */
+ tile_net_schedule_egress_timer(info);
+
+ return NETDEV_TX_OK;
+}
+
+
+/*
+ * Deal with a transmit timeout.
+ */
+static void tile_net_tx_timeout(struct net_device *dev)
+{
+ PDEBUG("tile_net_tx_timeout()\n");
+ PDEBUG("Transmit timeout at %ld, latency %ld\n", jiffies,
+ jiffies - dev->trans_start);
+
+ /* XXX: ISSUE: This doesn't seem useful for us. */
+ netif_wake_queue(dev);
+}
+
+
+/*
+ * Ioctl commands.
+ */
+static int tile_net_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
+{
+ return -EOPNOTSUPP;
+}
+
+
+/*
+ * Get System Network Statistics.
+ *
+ * Returns the address of the device statistics structure.
+ */
+static struct net_device_stats *tile_net_get_stats(struct net_device *dev)
+{
+ struct tile_net_priv *priv = netdev_priv(dev);
+ u32 rx_packets = 0;
+ u32 tx_packets = 0;
+ u32 rx_bytes = 0;
+ u32 tx_bytes = 0;
+ int i;
+
+ for_each_online_cpu(i) {
+ if (priv->cpu[i]) {
+ rx_packets += priv->cpu[i]->stats.rx_packets;
+ rx_bytes += priv->cpu[i]->stats.rx_bytes;
+ tx_packets += priv->cpu[i]->stats.tx_packets;
+ tx_bytes += priv->cpu[i]->stats.tx_bytes;
+ }
+ }
+
+ priv->stats.rx_packets = rx_packets;
+ priv->stats.rx_bytes = rx_bytes;
+ priv->stats.tx_packets = tx_packets;
+ priv->stats.tx_bytes = tx_bytes;
+
+ return &priv->stats;
+}
+
+
+/*
+ * Change the "mtu".
+ *
+ * The "change_mtu" method is usually not needed.
+ * If you need it, it must be like this.
+ */
+static int tile_net_change_mtu(struct net_device *dev, int new_mtu)
+{
+ PDEBUG("tile_net_change_mtu()\n");
+
+ /* Check ranges. */
+ if ((new_mtu < 68) || (new_mtu > 1500))
+ return -EINVAL;
+
+ /* Accept the value. */
+ dev->mtu = new_mtu;
+
+ return 0;
+}
+
+
+/*
+ * Change the Ethernet Address of the NIC.
+ *
+ * The hypervisor driver does not support changing MAC address. However,
+ * the IPP does not do anything with the MAC address, so the address which
+ * gets used on outgoing packets, and which is accepted on incoming packets,
+ * is completely up to the NetIO program or kernel driver which is actually
+ * handling them.
+ *
+ * Returns 0 on success, negative on failure.
+ */
+static int tile_net_set_mac_address(struct net_device *dev, void *p)
+{
+ struct sockaddr *addr = p;
+
+ if (!is_valid_ether_addr(addr->sa_data))
+ return -EINVAL;
+
+ /* ISSUE: Note that "dev_addr" is now a pointer. */
+ memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
+
+ return 0;
+}
+
+
+/*
+ * Obtain the MAC address from the hypervisor.
+ * This must be done before opening the device.
+ */
+static int tile_net_get_mac(struct net_device *dev)
+{
+ struct tile_net_priv *priv = netdev_priv(dev);
+
+ char hv_dev_name[32];
+ int len;
+
+ __netio_getset_offset_t offset = { .word = NETIO_IPP_PARAM_OFF };
+
+ int ret;
+
+ /* For example, "xgbe0". */
+ strcpy(hv_dev_name, dev->name);
+ len = strlen(hv_dev_name);
+
+ /* For example, "xgbe/0". */
+ hv_dev_name[len] = hv_dev_name[len - 1];
+ hv_dev_name[len - 1] = '/';
+ len++;
+
+ /* For example, "xgbe/0/native_hash". */
+ strcpy(hv_dev_name + len, hash_default ? "/native_hash" : "/native");
+
+ /* Get the hypervisor handle for this device. */
+ priv->hv_devhdl = hv_dev_open((HV_VirtAddr)hv_dev_name, 0);
+ PDEBUG("hv_dev_open(%s) returned %d %p\n",
+ hv_dev_name, priv->hv_devhdl, &priv->hv_devhdl);
+ if (priv->hv_devhdl < 0) {
+ if (priv->hv_devhdl == HV_ENODEV)
+ printk(KERN_DEBUG "Ignoring unconfigured device %s\n",
+ hv_dev_name);
+ else
+ printk(KERN_DEBUG "hv_dev_open(%s) returned %d\n",
+ hv_dev_name, priv->hv_devhdl);
+ return -1;
+ }
+
+ /*
+ * Read the hardware address from the hypervisor.
+ * ISSUE: Note that "dev_addr" is now a pointer.
+ */
+ offset.bits.class = NETIO_PARAM;
+ offset.bits.addr = NETIO_PARAM_MAC;
+ ret = hv_dev_pread(priv->hv_devhdl, 0,
+ (HV_VirtAddr)dev->dev_addr, dev->addr_len,
+ offset.word);
+ PDEBUG("hv_dev_pread(NETIO_PARAM_MAC) returned %d\n", ret);
+ if (ret <= 0) {
+ printk(KERN_DEBUG "hv_dev_pread(NETIO_PARAM_MAC) %s failed\n",
+ dev->name);
+ /*
+ * Since the device is configured by the hypervisor but we
+ * can't get its MAC address, we are most likely running
+ * the simulator, so let's generate a random MAC address.
+ */
+ random_ether_addr(dev->dev_addr);
+ }
+
+ return 0;
+}
+
+
+static struct net_device_ops tile_net_ops = {
+ .ndo_open = tile_net_open,
+ .ndo_stop = tile_net_stop,
+ .ndo_start_xmit = tile_net_tx,
+ .ndo_do_ioctl = tile_net_ioctl,
+ .ndo_get_stats = tile_net_get_stats,
+ .ndo_change_mtu = tile_net_change_mtu,
+ .ndo_tx_timeout = tile_net_tx_timeout,
+ .ndo_set_mac_address = tile_net_set_mac_address
+};
+
+
+/*
+ * The setup function.
+ *
+ * This uses ether_setup() to assign various fields in dev, including
+ * setting IFF_BROADCAST and IFF_MULTICAST, then sets some extra fields.
+ */
+static void tile_net_setup(struct net_device *dev)
+{
+ PDEBUG("tile_net_setup()\n");
+
+ ether_setup(dev);
+
+ dev->netdev_ops = &tile_net_ops;
+
+ dev->watchdog_timeo = TILE_NET_TIMEOUT;
+
+ /* We want lockless xmit. */
+ dev->features |= NETIF_F_LLTX;
+
+ /* We support hardware tx checksums. */
+ dev->features |= NETIF_F_HW_CSUM;
+
+ /* We support scatter/gather. */
+ dev->features |= NETIF_F_SG;
+
+ /* We support TSO. */
+ dev->features |= NETIF_F_TSO;
+
+#ifdef TILE_NET_GSO
+ /* We support GSO. */
+ dev->features |= NETIF_F_GSO;
+#endif
+
+ if (hash_default)
+ dev->features |= NETIF_F_HIGHDMA;
+
+ /* ISSUE: We should support NETIF_F_UFO. */
+
+ dev->tx_queue_len = TILE_NET_TX_QUEUE_LEN;
+
+ dev->mtu = TILE_NET_MTU;
+}
+
+
+/*
+ * Allocate the device structure, register the device, and obtain the
+ * MAC address from the hypervisor.
+ */
+static struct net_device *tile_net_dev_init(const char *name)
+{
+ int ret;
+ struct net_device *dev;
+ struct tile_net_priv *priv;
+ struct page *page;
+
+ /*
+ * Allocate the device structure. This allocates "priv", calls
+ * tile_net_setup(), and saves "name". Normally, "name" is a
+ * template, instantiated by register_netdev(), but not for us.
+ */
+ dev = alloc_netdev(sizeof(*priv), name, tile_net_setup);
+ if (!dev) {
+ pr_err("alloc_netdev(%s) failed\n", name);
+ return NULL;
+ }
+
+ priv = netdev_priv(dev);
+
+ /* Initialize "priv". */
+
+ memset(priv, 0, sizeof(*priv));
+
+ /* Save "dev" for "tile_net_open_retry()". */
+ priv->dev = dev;
+
+ INIT_DELAYED_WORK(&priv->retry_work, tile_net_open_retry);
+
+ spin_lock_init(&priv->cmd_lock);
+ spin_lock_init(&priv->comp_lock);
+
+ /* Allocate "epp_queue". */
+ BUG_ON(get_order(sizeof(lepp_queue_t)) != 0);
+ page = alloc_pages(GFP_KERNEL | __GFP_ZERO, 0);
+ if (!page) {
+ free_netdev(dev);
+ return NULL;
+ }
+ priv->epp_queue = page_address(page);
+
+ /* Register the network device. */
+ ret = register_netdev(dev);
+ if (ret) {
+ pr_err("register_netdev %s failed %d\n", dev->name, ret);
+ free_page((unsigned long)priv->epp_queue);
+ free_netdev(dev);
+ return NULL;
+ }
+
+ /* Get the MAC address. */
+ ret = tile_net_get_mac(dev);
+ if (ret < 0) {
+ unregister_netdev(dev);
+ free_page((unsigned long)priv->epp_queue);
+ free_netdev(dev);
+ return NULL;
+ }
+
+ return dev;
+}
+
+
+/*
+ * Module cleanup.
+ */
+static void tile_net_cleanup(void)
+{
+ int i;
+
+ for (i = 0; i < TILE_NET_DEVS; i++) {
+ if (tile_net_devs[i]) {
+ struct net_device *dev = tile_net_devs[i];
+ struct tile_net_priv *priv = netdev_priv(dev);
+ unregister_netdev(dev);
+ finv_buffer(priv->epp_queue, PAGE_SIZE);
+ free_page((unsigned long)priv->epp_queue);
+ free_netdev(dev);
+ }
+ }
+}
+
+
+/*
+ * Module initialization.
+ */
+static int tile_net_init_module(void)
+{
+ pr_info("Tilera IPP Net Driver\n");
+
+ tile_net_devs[0] = tile_net_dev_init("xgbe0");
+ tile_net_devs[1] = tile_net_dev_init("xgbe1");
+ tile_net_devs[2] = tile_net_dev_init("gbe0");
+ tile_net_devs[3] = tile_net_dev_init("gbe1");
+
+ return 0;
+}
+
+
+#ifndef MODULE
+/*
+ * The "network_cpus" boot argument specifies the cpus that are dedicated
+ * to handle ingress packets.
+ *
+ * The parameter should be in the form "network_cpus=m-n[,x-y]", where
+ * m, n, x, y are integer numbers that represent the cpus that can be
+ * neither a dedicated cpu nor a dataplane cpu.
+ */
+static int __init network_cpus_setup(char *str)
+{
+ int rc = cpulist_parse_crop(str, &network_cpus_map);
+ if (rc != 0) {
+ pr_warning("network_cpus=%s: malformed cpu list\n",
+ str);
+ } else {
+
+ /* Remove dedicated cpus. */
+ cpumask_and(&network_cpus_map, &network_cpus_map,
+ cpu_possible_mask);
+
+
+ if (cpumask_empty(&network_cpus_map)) {
+ pr_warning("Ignoring network_cpus='%s'.\n",
+ str);
+ } else {
+ char buf[1024];
+ cpulist_scnprintf(buf, sizeof(buf), &network_cpus_map);
+ pr_info("Linux network CPUs: %s\n", buf);
+ network_cpus_used = true;
+ }
+ }
+
+ return 0;
+}
+__setup("network_cpus=", network_cpus_setup);
+#endif
+
+
+module_init(tile_net_init_module);
+module_exit(tile_net_cleanup);
diff --git a/drivers/net/tulip/de2104x.c b/drivers/net/tulip/de2104x.c
index 28e1ffb13db9..c78a50586c1d 100644
--- a/drivers/net/tulip/de2104x.c
+++ b/drivers/net/tulip/de2104x.c
@@ -2021,7 +2021,6 @@ static int __devinit de_init_one (struct pci_dev *pdev,
de->media_timer.data = (unsigned long) de;
netif_carrier_off(dev);
- netif_stop_queue(dev);
/* wake up device, assign resources */
rc = pci_enable_device(pdev);
diff --git a/drivers/net/tulip/dmfe.c b/drivers/net/tulip/dmfe.c
index a9f7d5d1a269..7064e035757a 100644
--- a/drivers/net/tulip/dmfe.c
+++ b/drivers/net/tulip/dmfe.c
@@ -688,9 +688,6 @@ static netdev_tx_t dmfe_start_xmit(struct sk_buff *skb,
DMFE_DBUG(0, "dmfe_start_xmit", 0);
- /* Resource flag check */
- netif_stop_queue(dev);
-
/* Too large packet check */
if (skb->len > MAX_PACKET_SIZE) {
pr_err("big packet = %d\n", (u16)skb->len);
@@ -698,6 +695,9 @@ static netdev_tx_t dmfe_start_xmit(struct sk_buff *skb,
return NETDEV_TX_OK;
}
+ /* Resource flag check */
+ netif_stop_queue(dev);
+
spin_lock_irqsave(&db->lock, flags);
/* No Tx resource check, it never happen nromally */
diff --git a/drivers/net/ucc_geth.c b/drivers/net/ucc_geth.c
index a4c3f5708246..acbdab3d66ca 100644
--- a/drivers/net/ucc_geth.c
+++ b/drivers/net/ucc_geth.c
@@ -2050,12 +2050,16 @@ static void ucc_geth_stop(struct ucc_geth_private *ugeth)
ugeth_vdbg("%s: IN", __func__);
+ /*
+ * Tell the kernel the link is down.
+ * Must be done before disabling the controller
+ * or deadlock may happen.
+ */
+ phy_stop(phydev);
+
/* Disable the controller */
ugeth_disable(ugeth, COMM_DIR_RX_AND_TX);
- /* Tell the kernel the link is down */
- phy_stop(phydev);
-
/* Mask all interrupts */
out_be32(ugeth->uccf->p_uccm, 0x00000000);
@@ -2065,9 +2069,6 @@ static void ucc_geth_stop(struct ucc_geth_private *ugeth)
/* Disable Rx and Tx */
clrbits32(&ug_regs->maccfg1, MACCFG1_ENABLE_RX | MACCFG1_ENABLE_TX);
- phy_disconnect(ugeth->phydev);
- ugeth->phydev = NULL;
-
ucc_geth_memclean(ugeth);
}
@@ -3550,7 +3551,10 @@ static int ucc_geth_close(struct net_device *dev)
napi_disable(&ugeth->napi);
+ cancel_work_sync(&ugeth->timeout_work);
ucc_geth_stop(ugeth);
+ phy_disconnect(ugeth->phydev);
+ ugeth->phydev = NULL;
free_irq(ugeth->ug_info->uf_info.irq, ugeth->ndev);
@@ -3579,8 +3583,12 @@ static void ucc_geth_timeout_work(struct work_struct *work)
* Must reset MAC *and* PHY. This is done by reopening
* the device.
*/
- ucc_geth_close(dev);
- ucc_geth_open(dev);
+ netif_tx_stop_all_queues(dev);
+ ucc_geth_stop(ugeth);
+ ucc_geth_init_mac(ugeth);
+ /* Must start PHY here */
+ phy_start(ugeth->phydev);
+ netif_tx_start_all_queues(dev);
}
netif_tx_schedule_all(dev);
@@ -3594,7 +3602,6 @@ static void ucc_geth_timeout(struct net_device *dev)
{
struct ucc_geth_private *ugeth = netdev_priv(dev);
- netif_carrier_off(dev);
schedule_work(&ugeth->timeout_work);
}
diff --git a/drivers/net/ucc_geth.h b/drivers/net/ucc_geth.h
index 05a95586f3c5..055b87ab4f07 100644
--- a/drivers/net/ucc_geth.h
+++ b/drivers/net/ucc_geth.h
@@ -899,7 +899,8 @@ struct ucc_geth_hardware_statistics {
#define UCC_GETH_UTFS_INIT 512 /* Tx virtual FIFO size
*/
#define UCC_GETH_UTFET_INIT 256 /* 1/2 utfs */
-#define UCC_GETH_UTFTT_INIT 512
+#define UCC_GETH_UTFTT_INIT 256 /* 1/2 utfs
+ due to errata */
/* Gigabit Ethernet (1000 Mbps) */
#define UCC_GETH_URFS_GIGA_INIT 4096/*2048*/ /* Rx virtual
FIFO size */
diff --git a/drivers/net/usb/hso.c b/drivers/net/usb/hso.c
index b154a94de03e..812edf85d6d3 100644
--- a/drivers/net/usb/hso.c
+++ b/drivers/net/usb/hso.c
@@ -958,10 +958,6 @@ static void packetizeRx(struct hso_net *odev, unsigned char *ip_pkt,
/* Packet is complete. Inject into stack. */
/* We have IP packet here */
odev->skb_rx_buf->protocol = cpu_to_be16(ETH_P_IP);
- /* don't check it */
- odev->skb_rx_buf->ip_summed =
- CHECKSUM_UNNECESSARY;
-
skb_reset_mac_header(odev->skb_rx_buf);
/* Ship it off to the kernel */
@@ -2994,12 +2990,14 @@ static int hso_probe(struct usb_interface *interface,
case HSO_INTF_BULK:
/* It's a regular bulk interface */
- if (((port_spec & HSO_PORT_MASK) == HSO_PORT_NETWORK) &&
- !disable_net)
- hso_dev = hso_create_net_device(interface, port_spec);
- else
+ if ((port_spec & HSO_PORT_MASK) == HSO_PORT_NETWORK) {
+ if (!disable_net)
+ hso_dev =
+ hso_create_net_device(interface, port_spec);
+ } else {
hso_dev =
hso_create_bulk_serial_device(interface, port_spec);
+ }
if (!hso_dev)
goto exit;
break;
diff --git a/drivers/net/usb/usbnet.c b/drivers/net/usb/usbnet.c
index ca7fc9df1ccf..c04d49e31f81 100644
--- a/drivers/net/usb/usbnet.c
+++ b/drivers/net/usb/usbnet.c
@@ -45,6 +45,7 @@
#include <linux/usb/usbnet.h>
#include <linux/slab.h>
#include <linux/kernel.h>
+#include <linux/pm_runtime.h>
#define DRIVER_VERSION "22-Aug-2005"
@@ -1273,6 +1274,16 @@ usbnet_probe (struct usb_interface *udev, const struct usb_device_id *prod)
struct usb_device *xdev;
int status;
const char *name;
+ struct usb_driver *driver = to_usb_driver(udev->dev.driver);
+
+ /* usbnet already took usb runtime pm, so have to enable the feature
+ * for usb interface, otherwise usb_autopm_get_interface may return
+ * failure if USB_SUSPEND(RUNTIME_PM) is enabled.
+ */
+ if (!driver->supports_autosuspend) {
+ driver->supports_autosuspend = 1;
+ pm_runtime_enable(&udev->dev);
+ }
name = udev->dev.driver->name;
info = (struct driver_info *) prod->driver_info;
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index bb6b67f6b0cc..b6d402806ae6 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -986,9 +986,15 @@ static int virtnet_probe(struct virtio_device *vdev)
goto unregister;
}
- vi->status = VIRTIO_NET_S_LINK_UP;
- virtnet_update_status(vi);
- netif_carrier_on(dev);
+ /* Assume link up if device can't report link status,
+ otherwise get link status from config. */
+ if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_STATUS)) {
+ netif_carrier_off(dev);
+ virtnet_update_status(vi);
+ } else {
+ vi->status = VIRTIO_NET_S_LINK_UP;
+ netif_carrier_on(dev);
+ }
pr_debug("virtnet: registered device %s\n", dev->name);
return 0;
diff --git a/drivers/net/vmxnet3/vmxnet3_drv.c b/drivers/net/vmxnet3/vmxnet3_drv.c
index e3658e10db39..21314e06e6d7 100644
--- a/drivers/net/vmxnet3/vmxnet3_drv.c
+++ b/drivers/net/vmxnet3/vmxnet3_drv.c
@@ -873,7 +873,7 @@ vmxnet3_tq_xmit(struct sk_buff *skb, struct vmxnet3_tx_queue *tq,
count = VMXNET3_TXD_NEEDED(skb_headlen(skb)) +
skb_shinfo(skb)->nr_frags + 1;
- ctx.ipv4 = (skb->protocol == __constant_ntohs(ETH_P_IP));
+ ctx.ipv4 = (skb->protocol == cpu_to_be16(ETH_P_IP));
ctx.mss = skb_shinfo(skb)->gso_size;
if (ctx.mss) {
diff --git a/drivers/net/vmxnet3/vmxnet3_int.h b/drivers/net/vmxnet3/vmxnet3_int.h
index 8a2f4712284c..edf228843afc 100644
--- a/drivers/net/vmxnet3/vmxnet3_int.h
+++ b/drivers/net/vmxnet3/vmxnet3_int.h
@@ -330,14 +330,14 @@ struct vmxnet3_adapter {
};
#define VMXNET3_WRITE_BAR0_REG(adapter, reg, val) \
- writel(cpu_to_le32(val), (adapter)->hw_addr0 + (reg))
+ writel((val), (adapter)->hw_addr0 + (reg))
#define VMXNET3_READ_BAR0_REG(adapter, reg) \
- le32_to_cpu(readl((adapter)->hw_addr0 + (reg)))
+ readl((adapter)->hw_addr0 + (reg))
#define VMXNET3_WRITE_BAR1_REG(adapter, reg, val) \
- writel(cpu_to_le32(val), (adapter)->hw_addr1 + (reg))
+ writel((val), (adapter)->hw_addr1 + (reg))
#define VMXNET3_READ_BAR1_REG(adapter, reg) \
- le32_to_cpu(readl((adapter)->hw_addr1 + (reg)))
+ readl((adapter)->hw_addr1 + (reg))
#define VMXNET3_WAKE_QUEUE_THRESHOLD(tq) (5)
#define VMXNET3_RX_ALLOC_THRESHOLD(rq, ring_idx, adapter) \
diff --git a/drivers/net/wan/hd64572.c b/drivers/net/wan/hd64572.c
index ea476cbd38b5..e305274f83fb 100644
--- a/drivers/net/wan/hd64572.c
+++ b/drivers/net/wan/hd64572.c
@@ -293,6 +293,7 @@ static inline void sca_tx_done(port_t *port)
struct net_device *dev = port->netdev;
card_t* card = port->card;
u8 stat;
+ unsigned count = 0;
spin_lock(&port->lock);
@@ -316,10 +317,12 @@ static inline void sca_tx_done(port_t *port)
dev->stats.tx_bytes += readw(&desc->len);
}
writeb(0, &desc->stat); /* Free descriptor */
+ count++;
port->txlast = (port->txlast + 1) % card->tx_ring_buffers;
}
- netif_wake_queue(dev);
+ if (count)
+ netif_wake_queue(dev);
spin_unlock(&port->lock);
}
diff --git a/drivers/net/wan/x25_asy.c b/drivers/net/wan/x25_asy.c
index d81ad8397885..24297b274cd4 100644
--- a/drivers/net/wan/x25_asy.c
+++ b/drivers/net/wan/x25_asy.c
@@ -498,7 +498,6 @@ norbuff:
static int x25_asy_close(struct net_device *dev)
{
struct x25_asy *sl = netdev_priv(dev);
- int err;
spin_lock(&sl->lock);
if (sl->tty)
@@ -507,10 +506,6 @@ static int x25_asy_close(struct net_device *dev)
netif_stop_queue(dev);
sl->rcount = 0;
sl->xleft = 0;
- err = lapb_unregister(dev);
- if (err != LAPB_OK)
- printk(KERN_ERR "x25_asy_close: lapb_unregister error -%d\n",
- err);
spin_unlock(&sl->lock);
return 0;
}
@@ -582,7 +577,7 @@ static int x25_asy_open_tty(struct tty_struct *tty)
if (err)
return err;
/* Done. We have linked the TTY line to a channel. */
- return sl->dev->base_addr;
+ return 0;
}
@@ -595,6 +590,7 @@ static int x25_asy_open_tty(struct tty_struct *tty)
static void x25_asy_close_tty(struct tty_struct *tty)
{
struct x25_asy *sl = tty->disc_data;
+ int err;
/* First make sure we're connected. */
if (!sl || sl->magic != X25_ASY_MAGIC)
@@ -605,6 +601,11 @@ static void x25_asy_close_tty(struct tty_struct *tty)
dev_close(sl->dev);
rtnl_unlock();
+ err = lapb_unregister(sl->dev);
+ if (err != LAPB_OK)
+ printk(KERN_ERR "x25_asy_close: lapb_unregister error -%d\n",
+ err);
+
tty->disc_data = NULL;
sl->tty = NULL;
x25_asy_free(sl);
diff --git a/drivers/net/wireless/ath/ath5k/base.c b/drivers/net/wireless/ath/ath5k/base.c
index 8251946842e6..42ed923cdb1a 100644
--- a/drivers/net/wireless/ath/ath5k/base.c
+++ b/drivers/net/wireless/ath/ath5k/base.c
@@ -1917,7 +1917,8 @@ ath5k_beacon_send(struct ath5k_softc *sc)
sc->bmisscount = 0;
}
- if (sc->opmode == NL80211_IFTYPE_AP && sc->num_ap_vifs > 1) {
+ if ((sc->opmode == NL80211_IFTYPE_AP && sc->num_ap_vifs > 1) ||
+ sc->opmode == NL80211_IFTYPE_MESH_POINT) {
u64 tsf = ath5k_hw_get_tsf64(ah);
u32 tsftu = TSF_TO_TU(tsf);
int slot = ((tsftu % sc->bintval) * ATH_BCBUF) / sc->bintval;
@@ -1949,8 +1950,9 @@ ath5k_beacon_send(struct ath5k_softc *sc)
/* NB: hw still stops DMA, so proceed */
}
- /* refresh the beacon for AP mode */
- if (sc->opmode == NL80211_IFTYPE_AP)
+ /* refresh the beacon for AP or MESH mode */
+ if (sc->opmode == NL80211_IFTYPE_AP ||
+ sc->opmode == NL80211_IFTYPE_MESH_POINT)
ath5k_beacon_update(sc->hw, vif);
ath5k_hw_set_txdp(ah, sc->bhalq, bf->daddr);
@@ -2851,7 +2853,8 @@ static int ath5k_add_interface(struct ieee80211_hw *hw,
/* Assign the vap/adhoc to a beacon xmit slot. */
if ((avf->opmode == NL80211_IFTYPE_AP) ||
- (avf->opmode == NL80211_IFTYPE_ADHOC)) {
+ (avf->opmode == NL80211_IFTYPE_ADHOC) ||
+ (avf->opmode == NL80211_IFTYPE_MESH_POINT)) {
int slot;
WARN_ON(list_empty(&sc->bcbuf));
@@ -2870,7 +2873,7 @@ static int ath5k_add_interface(struct ieee80211_hw *hw,
sc->bslot[avf->bslot] = vif;
if (avf->opmode == NL80211_IFTYPE_AP)
sc->num_ap_vifs++;
- else
+ else if (avf->opmode == NL80211_IFTYPE_ADHOC)
sc->num_adhoc_vifs++;
}
diff --git a/drivers/net/wireless/ath/ath9k/ar9002_hw.c b/drivers/net/wireless/ath/ath9k/ar9002_hw.c
index a0471f2e1c7a..48261b7252d0 100644
--- a/drivers/net/wireless/ath/ath9k/ar9002_hw.c
+++ b/drivers/net/wireless/ath/ath9k/ar9002_hw.c
@@ -410,6 +410,9 @@ static void ar9002_hw_configpcipowersave(struct ath_hw *ah,
val &= ~(AR_WA_BIT6 | AR_WA_BIT7);
}
+ if (AR_SREV_9280(ah))
+ val |= AR_WA_BIT22;
+
if (AR_SREV_9285E_20(ah))
val |= AR_WA_BIT23;
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c b/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c
index c4182359bee4..a7b82f0085d2 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c
+++ b/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c
@@ -55,6 +55,8 @@
#define SUB_NUM_CTL_MODES_AT_5G_40 2 /* excluding HT40, EXT-OFDM */
#define SUB_NUM_CTL_MODES_AT_2G_40 3 /* excluding HT40, EXT-OFDM, EXT-CCK */
+#define CTL(_tpower, _flag) ((_tpower) | ((_flag) << 6))
+
static const struct ar9300_eeprom ar9300_default = {
.eepromVersion = 2,
.templateVersion = 2,
@@ -290,20 +292,21 @@ static const struct ar9300_eeprom ar9300_default = {
}
},
.ctlPowerData_2G = {
- { { {60, 0}, {60, 1}, {60, 0}, {60, 0} } },
- { { {60, 0}, {60, 1}, {60, 0}, {60, 0} } },
- { { {60, 1}, {60, 0}, {60, 0}, {60, 1} } },
+ { { CTL(60, 0), CTL(60, 1), CTL(60, 0), CTL(60, 0) } },
+ { { CTL(60, 0), CTL(60, 1), CTL(60, 0), CTL(60, 0) } },
+ { { CTL(60, 1), CTL(60, 0), CTL(60, 0), CTL(60, 1) } },
- { { {60, 1}, {60, 0}, {0, 0}, {0, 0} } },
- { { {60, 0}, {60, 1}, {60, 0}, {60, 0} } },
- { { {60, 0}, {60, 1}, {60, 0}, {60, 0} } },
+ { { CTL(60, 1), CTL(60, 0), CTL(0, 0), CTL(0, 0) } },
+ { { CTL(60, 0), CTL(60, 1), CTL(60, 0), CTL(60, 0) } },
+ { { CTL(60, 0), CTL(60, 1), CTL(60, 0), CTL(60, 0) } },
- { { {60, 0}, {60, 1}, {60, 1}, {60, 0} } },
- { { {60, 0}, {60, 1}, {60, 0}, {60, 0} } },
- { { {60, 0}, {60, 1}, {60, 0}, {60, 0} } },
+ { { CTL(60, 0), CTL(60, 1), CTL(60, 1), CTL(60, 0) } },
+ { { CTL(60, 0), CTL(60, 1), CTL(60, 0), CTL(60, 0) } },
+ { { CTL(60, 0), CTL(60, 1), CTL(60, 0), CTL(60, 0) } },
- { { {60, 0}, {60, 1}, {60, 0}, {60, 0} } },
- { { {60, 0}, {60, 1}, {60, 1}, {60, 1} } },
+ { { CTL(60, 0), CTL(60, 1), CTL(60, 0), CTL(60, 0) } },
+ { { CTL(60, 0), CTL(60, 1), CTL(60, 1), CTL(60, 1) } },
+ { { CTL(60, 0), CTL(60, 1), CTL(60, 1), CTL(60, 1) } },
},
.modalHeader5G = {
/* 4 idle,t1,t2,b (4 bits per setting) */
@@ -568,56 +571,56 @@ static const struct ar9300_eeprom ar9300_default = {
.ctlPowerData_5G = {
{
{
- {60, 1}, {60, 1}, {60, 1}, {60, 1},
- {60, 1}, {60, 1}, {60, 1}, {60, 0},
+ CTL(60, 1), CTL(60, 1), CTL(60, 1), CTL(60, 1),
+ CTL(60, 1), CTL(60, 1), CTL(60, 1), CTL(60, 0),
}
},
{
{
- {60, 1}, {60, 1}, {60, 1}, {60, 1},
- {60, 1}, {60, 1}, {60, 1}, {60, 0},
+ CTL(60, 1), CTL(60, 1), CTL(60, 1), CTL(60, 1),
+ CTL(60, 1), CTL(60, 1), CTL(60, 1), CTL(60, 0),
}
},
{
{
- {60, 0}, {60, 1}, {60, 0}, {60, 1},
- {60, 1}, {60, 1}, {60, 1}, {60, 1},
+ CTL(60, 0), CTL(60, 1), CTL(60, 0), CTL(60, 1),
+ CTL(60, 1), CTL(60, 1), CTL(60, 1), CTL(60, 1),
}
},
{
{
- {60, 0}, {60, 1}, {60, 1}, {60, 0},
- {60, 1}, {60, 0}, {60, 0}, {60, 0},
+ CTL(60, 0), CTL(60, 1), CTL(60, 1), CTL(60, 0),
+ CTL(60, 1), CTL(60, 0), CTL(60, 0), CTL(60, 0),
}
},
{
{
- {60, 1}, {60, 1}, {60, 1}, {60, 0},
- {60, 0}, {60, 0}, {60, 0}, {60, 0},
+ CTL(60, 1), CTL(60, 1), CTL(60, 1), CTL(60, 0),
+ CTL(60, 0), CTL(60, 0), CTL(60, 0), CTL(60, 0),
}
},
{
{
- {60, 1}, {60, 1}, {60, 1}, {60, 1},
- {60, 1}, {60, 0}, {60, 0}, {60, 0},
+ CTL(60, 1), CTL(60, 1), CTL(60, 1), CTL(60, 1),
+ CTL(60, 1), CTL(60, 0), CTL(60, 0), CTL(60, 0),
}
},
{
{
- {60, 1}, {60, 1}, {60, 1}, {60, 1},
- {60, 1}, {60, 1}, {60, 1}, {60, 1},
+ CTL(60, 1), CTL(60, 1), CTL(60, 1), CTL(60, 1),
+ CTL(60, 1), CTL(60, 1), CTL(60, 1), CTL(60, 1),
}
},
{
{
- {60, 1}, {60, 1}, {60, 0}, {60, 1},
- {60, 1}, {60, 1}, {60, 1}, {60, 0},
+ CTL(60, 1), CTL(60, 1), CTL(60, 0), CTL(60, 1),
+ CTL(60, 1), CTL(60, 1), CTL(60, 1), CTL(60, 0),
}
},
{
{
- {60, 1}, {60, 0}, {60, 1}, {60, 1},
- {60, 1}, {60, 1}, {60, 0}, {60, 1},
+ CTL(60, 1), CTL(60, 0), CTL(60, 1), CTL(60, 1),
+ CTL(60, 1), CTL(60, 1), CTL(60, 0), CTL(60, 1),
}
},
}
@@ -1827,9 +1830,9 @@ static u16 ar9003_hw_get_direct_edge_power(struct ar9300_eeprom *eep,
struct cal_ctl_data_5g *ctl_5g = eep->ctlPowerData_5G;
if (is2GHz)
- return ctl_2g[idx].ctlEdges[edge].tPower;
+ return CTL_EDGE_TPOWER(ctl_2g[idx].ctlEdges[edge]);
else
- return ctl_5g[idx].ctlEdges[edge].tPower;
+ return CTL_EDGE_TPOWER(ctl_5g[idx].ctlEdges[edge]);
}
static u16 ar9003_hw_get_indirect_edge_power(struct ar9300_eeprom *eep,
@@ -1847,12 +1850,12 @@ static u16 ar9003_hw_get_indirect_edge_power(struct ar9300_eeprom *eep,
if (is2GHz) {
if (ath9k_hw_fbin2freq(ctl_freqbin[edge - 1], 1) < freq &&
- ctl_2g[idx].ctlEdges[edge - 1].flag)
- return ctl_2g[idx].ctlEdges[edge - 1].tPower;
+ CTL_EDGE_FLAGS(ctl_2g[idx].ctlEdges[edge - 1]))
+ return CTL_EDGE_TPOWER(ctl_2g[idx].ctlEdges[edge - 1]);
} else {
if (ath9k_hw_fbin2freq(ctl_freqbin[edge - 1], 0) < freq &&
- ctl_5g[idx].ctlEdges[edge - 1].flag)
- return ctl_5g[idx].ctlEdges[edge - 1].tPower;
+ CTL_EDGE_FLAGS(ctl_5g[idx].ctlEdges[edge - 1]))
+ return CTL_EDGE_TPOWER(ctl_5g[idx].ctlEdges[edge - 1]);
}
return AR9300_MAX_RATE_POWER;
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_eeprom.h b/drivers/net/wireless/ath/ath9k/ar9003_eeprom.h
index 3c533bb983c7..655b3033396c 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_eeprom.h
+++ b/drivers/net/wireless/ath/ath9k/ar9003_eeprom.h
@@ -261,17 +261,12 @@ struct cal_tgt_pow_ht {
u8 tPow2x[14];
} __packed;
-struct cal_ctl_edge_pwr {
- u8 tPower:6,
- flag:2;
-} __packed;
-
struct cal_ctl_data_2g {
- struct cal_ctl_edge_pwr ctlEdges[AR9300_NUM_BAND_EDGES_2G];
+ u8 ctlEdges[AR9300_NUM_BAND_EDGES_2G];
} __packed;
struct cal_ctl_data_5g {
- struct cal_ctl_edge_pwr ctlEdges[AR9300_NUM_BAND_EDGES_5G];
+ u8 ctlEdges[AR9300_NUM_BAND_EDGES_5G];
} __packed;
struct ar9300_eeprom {
diff --git a/drivers/net/wireless/ath/ath9k/ath9k.h b/drivers/net/wireless/ath/ath9k/ath9k.h
index 9b8e7e3fcebd..0963071e8f90 100644
--- a/drivers/net/wireless/ath/ath9k/ath9k.h
+++ b/drivers/net/wireless/ath/ath9k/ath9k.h
@@ -21,6 +21,7 @@
#include <linux/device.h>
#include <linux/leds.h>
#include <linux/completion.h>
+#include <linux/pm_qos_params.h>
#include "debug.h"
#include "common.h"
@@ -328,7 +329,7 @@ int ath_rx_tasklet(struct ath_softc *sc, int flush, bool hp);
struct ath_txq *ath_txq_setup(struct ath_softc *sc, int qtype, int subtype);
void ath_tx_cleanupq(struct ath_softc *sc, struct ath_txq *txq);
int ath_tx_setup(struct ath_softc *sc, int haltype);
-void ath_drain_all_txq(struct ath_softc *sc, bool retry_tx);
+bool ath_drain_all_txq(struct ath_softc *sc, bool retry_tx);
void ath_draintxq(struct ath_softc *sc,
struct ath_txq *txq, bool retry_tx);
void ath_tx_node_init(struct ath_softc *sc, struct ath_node *an);
@@ -646,6 +647,8 @@ struct ath_softc {
struct ath_descdma txsdma;
struct ath_ant_comb ant_comb;
+
+ struct pm_qos_request_list pm_qos_req;
};
struct ath_wiphy {
diff --git a/drivers/net/wireless/ath/ath9k/eeprom.c b/drivers/net/wireless/ath/ath9k/eeprom.c
index 1266333f586d..2bbf94d0191e 100644
--- a/drivers/net/wireless/ath/ath9k/eeprom.c
+++ b/drivers/net/wireless/ath/ath9k/eeprom.c
@@ -240,16 +240,16 @@ u16 ath9k_hw_get_max_edge_power(u16 freq, struct cal_ctl_edges *pRdEdgesPower,
for (i = 0; (i < num_band_edges) &&
(pRdEdgesPower[i].bChannel != AR5416_BCHAN_UNUSED); i++) {
if (freq == ath9k_hw_fbin2freq(pRdEdgesPower[i].bChannel, is2GHz)) {
- twiceMaxEdgePower = pRdEdgesPower[i].tPower;
+ twiceMaxEdgePower = CTL_EDGE_TPOWER(pRdEdgesPower[i].ctl);
break;
} else if ((i > 0) &&
(freq < ath9k_hw_fbin2freq(pRdEdgesPower[i].bChannel,
is2GHz))) {
if (ath9k_hw_fbin2freq(pRdEdgesPower[i - 1].bChannel,
is2GHz) < freq &&
- pRdEdgesPower[i - 1].flag) {
+ CTL_EDGE_FLAGS(pRdEdgesPower[i - 1].ctl)) {
twiceMaxEdgePower =
- pRdEdgesPower[i - 1].tPower;
+ CTL_EDGE_TPOWER(pRdEdgesPower[i - 1].ctl);
}
break;
}
diff --git a/drivers/net/wireless/ath/ath9k/eeprom.h b/drivers/net/wireless/ath/ath9k/eeprom.h
index dacb45e1b906..dd59f09441a3 100644
--- a/drivers/net/wireless/ath/ath9k/eeprom.h
+++ b/drivers/net/wireless/ath/ath9k/eeprom.h
@@ -233,6 +233,18 @@
#define AR9287_CHECKSUM_LOCATION (AR9287_EEP_START_LOC + 1)
+#define CTL_EDGE_TPOWER(_ctl) ((_ctl) & 0x3f)
+#define CTL_EDGE_FLAGS(_ctl) (((_ctl) >> 6) & 0x03)
+
+#define LNA_CTL_BUF_MODE BIT(0)
+#define LNA_CTL_ISEL_LO BIT(1)
+#define LNA_CTL_ISEL_HI BIT(2)
+#define LNA_CTL_BUF_IN BIT(3)
+#define LNA_CTL_FEM_BAND BIT(4)
+#define LNA_CTL_LOCAL_BIAS BIT(5)
+#define LNA_CTL_FORCE_XPA BIT(6)
+#define LNA_CTL_USE_ANT1 BIT(7)
+
enum eeprom_param {
EEP_NFTHRESH_5,
EEP_NFTHRESH_2,
@@ -378,10 +390,7 @@ struct modal_eep_header {
u8 xatten2Margin[AR5416_MAX_CHAINS];
u8 ob_ch1;
u8 db_ch1;
- u8 useAnt1:1,
- force_xpaon:1,
- local_bias:1,
- femBandSelectUsed:1, xlnabufin:1, xlnaisel:2, xlnabufmode:1;
+ u8 lna_ctl;
u8 miscBits;
u16 xpaBiasLvlFreq[3];
u8 futureModal[6];
@@ -535,18 +544,10 @@ struct cal_target_power_ht {
u8 tPow2x[8];
} __packed;
-
-#ifdef __BIG_ENDIAN_BITFIELD
-struct cal_ctl_edges {
- u8 bChannel;
- u8 flag:2, tPower:6;
-} __packed;
-#else
struct cal_ctl_edges {
u8 bChannel;
- u8 tPower:6, flag:2;
+ u8 ctl;
} __packed;
-#endif
struct cal_data_op_loop_ar9287 {
u8 pwrPdg[2][5];
diff --git a/drivers/net/wireless/ath/ath9k/eeprom_9287.c b/drivers/net/wireless/ath/ath9k/eeprom_9287.c
index 966b9496a9dd..195406db3bd8 100644
--- a/drivers/net/wireless/ath/ath9k/eeprom_9287.c
+++ b/drivers/net/wireless/ath/ath9k/eeprom_9287.c
@@ -37,7 +37,7 @@ static bool ath9k_hw_ar9287_fill_eeprom(struct ath_hw *ah)
int addr, eep_start_loc;
eep_data = (u16 *)eep;
- if (ah->hw_version.devid == 0x7015)
+ if (AR9287_HTC_DEVID(ah))
eep_start_loc = AR9287_HTC_EEP_START_LOC;
else
eep_start_loc = AR9287_EEP_START_LOC;
diff --git a/drivers/net/wireless/ath/ath9k/eeprom_def.c b/drivers/net/wireless/ath/ath9k/eeprom_def.c
index 76b4d65472dd..a3ccb1b9638d 100644
--- a/drivers/net/wireless/ath/ath9k/eeprom_def.c
+++ b/drivers/net/wireless/ath/ath9k/eeprom_def.c
@@ -451,9 +451,10 @@ static void ath9k_hw_def_set_board_values(struct ath_hw *ah,
ath9k_hw_analog_shift_rmw(ah, AR_AN_TOP2,
AR_AN_TOP2_LOCALBIAS,
AR_AN_TOP2_LOCALBIAS_S,
- pModal->local_bias);
+ !!(pModal->lna_ctl &
+ LNA_CTL_LOCAL_BIAS));
REG_RMW_FIELD(ah, AR_PHY_XPA_CFG, AR_PHY_FORCE_XPA_CFG,
- pModal->force_xpaon);
+ !!(pModal->lna_ctl & LNA_CTL_FORCE_XPA));
}
REG_RMW_FIELD(ah, AR_PHY_SETTLING, AR_PHY_SETTLING_SWITCH,
@@ -1062,15 +1063,19 @@ static void ath9k_hw_set_def_power_per_rate_table(struct ath_hw *ah,
case 1:
break;
case 2:
- scaledPower -= REDUCE_SCALED_POWER_BY_TWO_CHAIN;
+ if (scaledPower > REDUCE_SCALED_POWER_BY_TWO_CHAIN)
+ scaledPower -= REDUCE_SCALED_POWER_BY_TWO_CHAIN;
+ else
+ scaledPower = 0;
break;
case 3:
- scaledPower -= REDUCE_SCALED_POWER_BY_THREE_CHAIN;
+ if (scaledPower > REDUCE_SCALED_POWER_BY_THREE_CHAIN)
+ scaledPower -= REDUCE_SCALED_POWER_BY_THREE_CHAIN;
+ else
+ scaledPower = 0;
break;
}
- scaledPower = max((u16)0, scaledPower);
-
if (IS_CHAN_2GHZ(chan)) {
numCtlModes = ARRAY_SIZE(ctlModesFor11g) -
SUB_NUM_CTL_MODES_AT_2G_40;
@@ -1428,9 +1433,9 @@ static u8 ath9k_hw_def_get_num_ant_config(struct ath_hw *ah,
num_ant_config = 1;
- if (pBase->version >= 0x0E0D)
- if (pModal->useAnt1)
- num_ant_config += 1;
+ if (pBase->version >= 0x0E0D &&
+ (pModal->lna_ctl & LNA_CTL_USE_ANT1))
+ num_ant_config += 1;
return num_ant_config;
}
diff --git a/drivers/net/wireless/ath/ath9k/hif_usb.c b/drivers/net/wireless/ath/ath9k/hif_usb.c
index 6576f683dba0..0de3c3d3c245 100644
--- a/drivers/net/wireless/ath/ath9k/hif_usb.c
+++ b/drivers/net/wireless/ath/ath9k/hif_usb.c
@@ -35,8 +35,14 @@ static struct usb_device_id ath9k_hif_usb_ids[] = {
{ USB_DEVICE(0x07D1, 0x3A10) }, /* Dlink Wireless 150 */
{ USB_DEVICE(0x13D3, 0x3327) }, /* Azurewave */
{ USB_DEVICE(0x13D3, 0x3328) }, /* Azurewave */
+ { USB_DEVICE(0x13D3, 0x3346) }, /* IMC Networks */
+ { USB_DEVICE(0x13D3, 0x3348) }, /* Azurewave */
+ { USB_DEVICE(0x13D3, 0x3349) }, /* Azurewave */
+ { USB_DEVICE(0x13D3, 0x3350) }, /* Azurewave */
{ USB_DEVICE(0x04CA, 0x4605) }, /* Liteon */
{ USB_DEVICE(0x083A, 0xA704) }, /* SMC Networks */
+ { USB_DEVICE(0x040D, 0x3801) }, /* VIA */
+ { USB_DEVICE(0x1668, 0x1200) }, /* Verizon */
{ },
};
@@ -540,11 +546,11 @@ static void ath9k_hif_usb_reg_in_cb(struct urb *urb)
return;
}
- usb_fill_int_urb(urb, hif_dev->udev,
+ usb_fill_bulk_urb(urb, hif_dev->udev,
usb_rcvbulkpipe(hif_dev->udev,
USB_REG_IN_PIPE),
nskb->data, MAX_REG_IN_BUF_SIZE,
- ath9k_hif_usb_reg_in_cb, nskb, 1);
+ ath9k_hif_usb_reg_in_cb, nskb);
ret = usb_submit_urb(urb, GFP_ATOMIC);
if (ret) {
@@ -720,11 +726,11 @@ static int ath9k_hif_usb_alloc_reg_in_urb(struct hif_device_usb *hif_dev)
if (!skb)
goto err;
- usb_fill_int_urb(hif_dev->reg_in_urb, hif_dev->udev,
+ usb_fill_bulk_urb(hif_dev->reg_in_urb, hif_dev->udev,
usb_rcvbulkpipe(hif_dev->udev,
USB_REG_IN_PIPE),
skb->data, MAX_REG_IN_BUF_SIZE,
- ath9k_hif_usb_reg_in_cb, skb, 1);
+ ath9k_hif_usb_reg_in_cb, skb);
if (usb_submit_urb(hif_dev->reg_in_urb, GFP_KERNEL) != 0)
goto err;
@@ -805,6 +811,8 @@ static int ath9k_hif_usb_download_fw(struct hif_device_usb *hif_dev)
case 0x7010:
case 0x7015:
case 0x9018:
+ case 0xA704:
+ case 0x1200:
firm_offset = AR7010_FIRMWARE_TEXT;
break;
default:
@@ -843,14 +851,6 @@ static int ath9k_hif_usb_dev_init(struct hif_device_usb *hif_dev)
goto err_fw_req;
}
- /* Alloc URBs */
- ret = ath9k_hif_usb_alloc_urbs(hif_dev);
- if (ret) {
- dev_err(&hif_dev->udev->dev,
- "ath9k_htc: Unable to allocate URBs\n");
- goto err_urb;
- }
-
/* Download firmware */
ret = ath9k_hif_usb_download_fw(hif_dev);
if (ret) {
@@ -866,16 +866,22 @@ static int ath9k_hif_usb_dev_init(struct hif_device_usb *hif_dev)
*/
for (idx = 0; idx < alt->desc.bNumEndpoints; idx++) {
endp = &alt->endpoint[idx].desc;
- if (((endp->bEndpointAddress & USB_ENDPOINT_NUMBER_MASK)
- == 0x04) &&
- ((endp->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK)
- == USB_ENDPOINT_XFER_INT)) {
+ if ((endp->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK)
+ == USB_ENDPOINT_XFER_INT) {
endp->bmAttributes &= ~USB_ENDPOINT_XFERTYPE_MASK;
endp->bmAttributes |= USB_ENDPOINT_XFER_BULK;
endp->bInterval = 0;
}
}
+ /* Alloc URBs */
+ ret = ath9k_hif_usb_alloc_urbs(hif_dev);
+ if (ret) {
+ dev_err(&hif_dev->udev->dev,
+ "ath9k_htc: Unable to allocate URBs\n");
+ goto err_urb;
+ }
+
return 0;
err_fw_download:
@@ -929,6 +935,8 @@ static int ath9k_hif_usb_probe(struct usb_interface *interface,
case 0x7010:
case 0x7015:
case 0x9018:
+ case 0xA704:
+ case 0x1200:
if (le16_to_cpu(udev->descriptor.bcdDevice) == 0x0202)
hif_dev->fw_name = FIRMWARE_AR7010_1_1;
else
@@ -1016,6 +1024,13 @@ static int ath9k_hif_usb_suspend(struct usb_interface *interface,
struct hif_device_usb *hif_dev =
(struct hif_device_usb *) usb_get_intfdata(interface);
+ /*
+ * The device has to be set to FULLSLEEP mode in case no
+ * interface is up.
+ */
+ if (!(hif_dev->flags & HIF_USB_START))
+ ath9k_htc_suspend(hif_dev->htc_handle);
+
ath9k_hif_usb_dealloc_urbs(hif_dev);
return 0;
diff --git a/drivers/net/wireless/ath/ath9k/htc.h b/drivers/net/wireless/ath/ath9k/htc.h
index 75ecf6a30d25..c3b561daa6c1 100644
--- a/drivers/net/wireless/ath/ath9k/htc.h
+++ b/drivers/net/wireless/ath/ath9k/htc.h
@@ -455,6 +455,8 @@ u32 ath9k_htc_calcrxfilter(struct ath9k_htc_priv *priv);
void ath9k_htc_ps_wakeup(struct ath9k_htc_priv *priv);
void ath9k_htc_ps_restore(struct ath9k_htc_priv *priv);
void ath9k_ps_work(struct work_struct *work);
+bool ath9k_htc_setpower(struct ath9k_htc_priv *priv,
+ enum ath9k_power_mode mode);
void ath9k_start_rfkill_poll(struct ath9k_htc_priv *priv);
void ath9k_init_leds(struct ath9k_htc_priv *priv);
@@ -464,6 +466,7 @@ int ath9k_htc_probe_device(struct htc_target *htc_handle, struct device *dev,
u16 devid, char *product);
void ath9k_htc_disconnect_device(struct htc_target *htc_handle, bool hotunplug);
#ifdef CONFIG_PM
+void ath9k_htc_suspend(struct htc_target *htc_handle);
int ath9k_htc_resume(struct htc_target *htc_handle);
#endif
#ifdef CONFIG_ATH9K_HTC_DEBUGFS
diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_init.c b/drivers/net/wireless/ath/ath9k/htc_drv_init.c
index 3d7b97f1b3ae..8776f49ffd41 100644
--- a/drivers/net/wireless/ath/ath9k/htc_drv_init.c
+++ b/drivers/net/wireless/ath/ath9k/htc_drv_init.c
@@ -249,6 +249,8 @@ static int ath9k_init_htc_services(struct ath9k_htc_priv *priv, u16 devid)
case 0x7010:
case 0x7015:
case 0x9018:
+ case 0xA704:
+ case 0x1200:
priv->htc->credits = 45;
break;
default:
@@ -889,6 +891,12 @@ void ath9k_htc_disconnect_device(struct htc_target *htc_handle, bool hotunplug)
}
#ifdef CONFIG_PM
+
+void ath9k_htc_suspend(struct htc_target *htc_handle)
+{
+ ath9k_htc_setpower(htc_handle->drv_priv, ATH9K_PM_FULL_SLEEP);
+}
+
int ath9k_htc_resume(struct htc_target *htc_handle)
{
int ret;
diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_main.c b/drivers/net/wireless/ath/ath9k/htc_drv_main.c
index 9a3be8da755d..51977caca47f 100644
--- a/drivers/net/wireless/ath/ath9k/htc_drv_main.c
+++ b/drivers/net/wireless/ath/ath9k/htc_drv_main.c
@@ -63,8 +63,8 @@ static enum htc_phymode ath9k_htc_get_curmode(struct ath9k_htc_priv *priv,
return mode;
}
-static bool ath9k_htc_setpower(struct ath9k_htc_priv *priv,
- enum ath9k_power_mode mode)
+bool ath9k_htc_setpower(struct ath9k_htc_priv *priv,
+ enum ath9k_power_mode mode)
{
bool ret;
diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c b/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c
index 3d19b5bc937f..29d80ca78393 100644
--- a/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c
+++ b/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c
@@ -121,7 +121,7 @@ int ath9k_htc_tx_start(struct ath9k_htc_priv *priv, struct sk_buff *skb)
tx_hdr.data_type = ATH9K_HTC_NORMAL;
}
- if (ieee80211_is_data(fc)) {
+ if (ieee80211_is_data_qos(fc)) {
qc = ieee80211_get_qos_ctl(hdr);
tx_hdr.tidno = qc[0] & IEEE80211_QOS_CTL_TID_MASK;
}
diff --git a/drivers/net/wireless/ath/ath9k/hw.c b/drivers/net/wireless/ath/ath9k/hw.c
index cc13ee117823..c7fbe25cc128 100644
--- a/drivers/net/wireless/ath/ath9k/hw.c
+++ b/drivers/net/wireless/ath/ath9k/hw.c
@@ -484,6 +484,7 @@ static int ath9k_hw_post_init(struct ath_hw *ah)
ath_print(ath9k_hw_common(ah), ATH_DBG_FATAL,
"Failed allocating banks for "
"external radio\n");
+ ath9k_hw_rf_free_ext_banks(ah);
return ecode;
}
@@ -952,9 +953,12 @@ static void ath9k_hw_set_operating_mode(struct ath_hw *ah, int opmode)
REG_SET_BIT(ah, AR_CFG, AR_CFG_AP_ADHOC_INDICATION);
break;
case NL80211_IFTYPE_STATION:
- case NL80211_IFTYPE_MONITOR:
REG_WRITE(ah, AR_STA_ID1, val | AR_STA_ID1_KSRCH_MODE);
break;
+ default:
+ if (ah->is_monitoring)
+ REG_WRITE(ah, AR_STA_ID1, val | AR_STA_ID1_KSRCH_MODE);
+ break;
}
}
@@ -1634,7 +1638,6 @@ void ath9k_hw_beaconinit(struct ath_hw *ah, u32 next_beacon, u32 beacon_period)
switch (ah->opmode) {
case NL80211_IFTYPE_STATION:
- case NL80211_IFTYPE_MONITOR:
REG_WRITE(ah, AR_NEXT_TBTT_TIMER, TU_TO_USEC(next_beacon));
REG_WRITE(ah, AR_NEXT_DMA_BEACON_ALERT, 0xffff);
REG_WRITE(ah, AR_NEXT_SWBA, 0x7ffff);
@@ -1663,6 +1666,14 @@ void ath9k_hw_beaconinit(struct ath_hw *ah, u32 next_beacon, u32 beacon_period)
AR_TBTT_TIMER_EN | AR_DBA_TIMER_EN | AR_SWBA_TIMER_EN;
break;
default:
+ if (ah->is_monitoring) {
+ REG_WRITE(ah, AR_NEXT_TBTT_TIMER,
+ TU_TO_USEC(next_beacon));
+ REG_WRITE(ah, AR_NEXT_DMA_BEACON_ALERT, 0xffff);
+ REG_WRITE(ah, AR_NEXT_SWBA, 0x7ffff);
+ flags |= AR_TBTT_TIMER_EN;
+ break;
+ }
ath_print(ath9k_hw_common(ah), ATH_DBG_BEACON,
"%s: unsupported opmode: %d\n",
__func__, ah->opmode);
@@ -2033,7 +2044,8 @@ u32 ath9k_hw_gpio_get(struct ath_hw *ah, u32 gpio)
val = REG_READ(ah, AR7010_GPIO_IN);
return (MS(val, AR7010_GPIO_IN_VAL) & AR_GPIO_BIT(gpio)) == 0;
} else if (AR_SREV_9300_20_OR_LATER(ah))
- return MS_REG_READ(AR9300, gpio) != 0;
+ return (MS(REG_READ(ah, AR_GPIO_IN), AR9300_GPIO_IN_VAL) &
+ AR_GPIO_BIT(gpio)) != 0;
else if (AR_SREV_9271(ah))
return MS_REG_READ(AR9271, gpio) != 0;
else if (AR_SREV_9287_11_OR_LATER(ah))
diff --git a/drivers/net/wireless/ath/ath9k/hw.h b/drivers/net/wireless/ath/ath9k/hw.h
index d032939768b0..d47d1b4b6002 100644
--- a/drivers/net/wireless/ath/ath9k/hw.h
+++ b/drivers/net/wireless/ath/ath9k/hw.h
@@ -622,6 +622,7 @@ struct ath_hw {
bool sw_mgmt_crypto;
bool is_pciexpress;
+ bool is_monitoring;
bool need_an_top2_fixup;
u16 tx_trig_level;
diff --git a/drivers/net/wireless/ath/ath9k/init.c b/drivers/net/wireless/ath/ath9k/init.c
index 95b41db0d86b..14b8ab386daf 100644
--- a/drivers/net/wireless/ath/ath9k/init.c
+++ b/drivers/net/wireless/ath/ath9k/init.c
@@ -661,6 +661,8 @@ void ath9k_set_hw_capab(struct ath_softc *sc, struct ieee80211_hw *hw)
hw->flags |= IEEE80211_HW_MFP_CAPABLE;
hw->wiphy->interface_modes =
+ BIT(NL80211_IFTYPE_P2P_GO) |
+ BIT(NL80211_IFTYPE_P2P_CLIENT) |
BIT(NL80211_IFTYPE_AP) |
BIT(NL80211_IFTYPE_WDS) |
BIT(NL80211_IFTYPE_STATION) |
@@ -756,6 +758,9 @@ int ath9k_init_device(u16 devid, struct ath_softc *sc, u16 subsysid,
ath_init_leds(sc);
ath_start_rfkill_poll(sc);
+ pm_qos_add_request(&sc->pm_qos_req, PM_QOS_CPU_DMA_LATENCY,
+ PM_QOS_DEFAULT_VALUE);
+
return 0;
error_world:
@@ -824,6 +829,7 @@ void ath9k_deinit_device(struct ath_softc *sc)
}
ieee80211_unregister_hw(hw);
+ pm_qos_remove_request(&sc->pm_qos_req);
ath_rx_cleanup(sc);
ath_tx_cleanup(sc);
ath9k_deinit_softc(sc);
diff --git a/drivers/net/wireless/ath/ath9k/mac.c b/drivers/net/wireless/ath/ath9k/mac.c
index 8c13479b17cd..c996963ab339 100644
--- a/drivers/net/wireless/ath/ath9k/mac.c
+++ b/drivers/net/wireless/ath/ath9k/mac.c
@@ -703,8 +703,7 @@ int ath9k_hw_rxprocdesc(struct ath_hw *ah, struct ath_desc *ds,
rs->rs_phyerr = phyerr;
} else if (ads.ds_rxstatus8 & AR_DecryptCRCErr)
rs->rs_status |= ATH9K_RXERR_DECRYPT;
- else if ((ads.ds_rxstatus8 & AR_MichaelErr) &&
- rs->rs_keyix != ATH9K_RXKEYIX_INVALID)
+ else if (ads.ds_rxstatus8 & AR_MichaelErr)
rs->rs_status |= ATH9K_RXERR_MIC;
else if (ads.ds_rxstatus8 & AR_KeyMiss)
rs->rs_status |= ATH9K_RXERR_DECRYPT;
diff --git a/drivers/net/wireless/ath/ath9k/main.c b/drivers/net/wireless/ath/ath9k/main.c
index b52f1cf8a603..c0c3464d3a86 100644
--- a/drivers/net/wireless/ath/ath9k/main.c
+++ b/drivers/net/wireless/ath/ath9k/main.c
@@ -93,11 +93,13 @@ void ath9k_ps_wakeup(struct ath_softc *sc)
{
struct ath_common *common = ath9k_hw_common(sc->sc_ah);
unsigned long flags;
+ enum ath9k_power_mode power_mode;
spin_lock_irqsave(&sc->sc_pm_lock, flags);
if (++sc->ps_usecount != 1)
goto unlock;
+ power_mode = sc->sc_ah->power_mode;
ath9k_hw_setpower(sc->sc_ah, ATH9K_PM_AWAKE);
/*
@@ -105,10 +107,12 @@ void ath9k_ps_wakeup(struct ath_softc *sc)
* useful data. Better clear them now so that they don't mess up
* survey data results.
*/
- spin_lock(&common->cc_lock);
- ath_hw_cycle_counters_update(common);
- memset(&common->cc_survey, 0, sizeof(common->cc_survey));
- spin_unlock(&common->cc_lock);
+ if (power_mode != ATH9K_PM_AWAKE) {
+ spin_lock(&common->cc_lock);
+ ath_hw_cycle_counters_update(common);
+ memset(&common->cc_survey, 0, sizeof(common->cc_survey));
+ spin_unlock(&common->cc_lock);
+ }
unlock:
spin_unlock_irqrestore(&sc->sc_pm_lock, flags);
@@ -240,11 +244,12 @@ int ath_set_channel(struct ath_softc *sc, struct ieee80211_hw *hw,
* the relevant bits of the h/w.
*/
ath9k_hw_set_interrupts(ah, 0);
- ath_drain_all_txq(sc, false);
+ stopped = ath_drain_all_txq(sc, false);
spin_lock_bh(&sc->rx.pcu_lock);
- stopped = ath_stoprecv(sc);
+ if (!ath_stoprecv(sc))
+ stopped = false;
/* XXX: do not flush receive queue here. We don't want
* to flush data frames already in queue because of
@@ -1217,6 +1222,7 @@ static int ath9k_start(struct ieee80211_hw *hw)
ah->imask |= ATH9K_INT_CST;
sc->sc_flags &= ~SC_OP_INVALID;
+ sc->sc_ah->is_monitoring = false;
/* Disable BMISS interrupt when we're not associated */
ah->imask &= ~(ATH9K_INT_SWBA | ATH9K_INT_BMISS);
@@ -1238,6 +1244,8 @@ static int ath9k_start(struct ieee80211_hw *hw)
ath9k_btcoex_timer_resume(sc);
}
+ pm_qos_update_request(&sc->pm_qos_req, 55);
+
mutex_unlock:
mutex_unlock(&sc->mutex);
@@ -1415,6 +1423,8 @@ static void ath9k_stop(struct ieee80211_hw *hw)
sc->sc_flags |= SC_OP_INVALID;
+ pm_qos_update_request(&sc->pm_qos_req, PM_QOS_DEFAULT_VALUE);
+
mutex_unlock(&sc->mutex);
ath_print(common, ATH_DBG_CONFIG, "Driver halt\n");
@@ -1493,8 +1503,7 @@ static int ath9k_add_interface(struct ieee80211_hw *hw,
ath9k_hw_set_interrupts(ah, ah->imask);
if (vif->type == NL80211_IFTYPE_AP ||
- vif->type == NL80211_IFTYPE_ADHOC ||
- vif->type == NL80211_IFTYPE_MONITOR) {
+ vif->type == NL80211_IFTYPE_ADHOC) {
sc->sc_flags |= SC_OP_ANI_RUN;
ath_start_ani(common);
}
@@ -1511,7 +1520,6 @@ static void ath9k_remove_interface(struct ieee80211_hw *hw,
struct ath_softc *sc = aphy->sc;
struct ath_common *common = ath9k_hw_common(sc->sc_ah);
struct ath_vif *avp = (void *)vif->drv_priv;
- int i;
ath_print(common, ATH_DBG_CONFIG, "Detach Interface\n");
@@ -1525,21 +1533,24 @@ static void ath9k_remove_interface(struct ieee80211_hw *hw,
if ((sc->sc_ah->opmode == NL80211_IFTYPE_AP) ||
(sc->sc_ah->opmode == NL80211_IFTYPE_ADHOC) ||
(sc->sc_ah->opmode == NL80211_IFTYPE_MESH_POINT)) {
+ /* Disable SWBA interrupt */
+ sc->sc_ah->imask &= ~ATH9K_INT_SWBA;
ath9k_ps_wakeup(sc);
+ ath9k_hw_set_interrupts(sc->sc_ah, sc->sc_ah->imask);
ath9k_hw_stoptxdma(sc->sc_ah, sc->beacon.beaconq);
ath9k_ps_restore(sc);
+ tasklet_kill(&sc->bcon_tasklet);
}
ath_beacon_return(sc, avp);
sc->sc_flags &= ~SC_OP_BEACONS;
- for (i = 0; i < ARRAY_SIZE(sc->beacon.bslot); i++) {
- if (sc->beacon.bslot[i] == vif) {
- printk(KERN_DEBUG "%s: vif had allocated beacon "
- "slot\n", __func__);
- sc->beacon.bslot[i] = NULL;
- sc->beacon.bslot_aphy[i] = NULL;
- }
+ if (sc->nbcnvifs) {
+ /* Re-enable SWBA interrupt */
+ sc->sc_ah->imask |= ATH9K_INT_SWBA;
+ ath9k_ps_wakeup(sc);
+ ath9k_hw_set_interrupts(sc->sc_ah, sc->sc_ah->imask);
+ ath9k_ps_restore(sc);
}
sc->nvifs--;
@@ -1644,8 +1655,12 @@ static int ath9k_config(struct ieee80211_hw *hw, u32 changed)
if (changed & IEEE80211_CONF_CHANGE_MONITOR) {
if (conf->flags & IEEE80211_CONF_MONITOR) {
ath_print(common, ATH_DBG_CONFIG,
- "HW opmode set to Monitor mode\n");
- sc->sc_ah->opmode = NL80211_IFTYPE_MONITOR;
+ "Monitor mode is enabled\n");
+ sc->sc_ah->is_monitoring = true;
+ } else {
+ ath_print(common, ATH_DBG_CONFIG,
+ "Monitor mode is disabled\n");
+ sc->sc_ah->is_monitoring = false;
}
}
diff --git a/drivers/net/wireless/ath/ath9k/recv.c b/drivers/net/wireless/ath/ath9k/recv.c
index fddb0129bb57..fdc2ec52b42f 100644
--- a/drivers/net/wireless/ath/ath9k/recv.c
+++ b/drivers/net/wireless/ath/ath9k/recv.c
@@ -441,7 +441,7 @@ u32 ath_calcrxfilter(struct ath_softc *sc)
*/
if (((sc->sc_ah->opmode != NL80211_IFTYPE_AP) &&
(sc->rx.rxfilter & FIF_PROMISC_IN_BSS)) ||
- (sc->sc_ah->opmode == NL80211_IFTYPE_MONITOR))
+ (sc->sc_ah->is_monitoring))
rfilt |= ATH9K_RX_FILTER_PROM;
if (sc->rx.rxfilter & FIF_CONTROL)
@@ -518,7 +518,7 @@ bool ath_stoprecv(struct ath_softc *sc)
bool stopped;
spin_lock_bh(&sc->rx.rxbuflock);
- ath9k_hw_stoppcurecv(ah);
+ ath9k_hw_abortpcurecv(ah);
ath9k_hw_setrxfilter(ah, 0);
stopped = ath9k_hw_stopdmarecv(ah);
@@ -838,6 +838,10 @@ static bool ath9k_rx_accept(struct ath_common *common,
struct ath_rx_status *rx_stats,
bool *decrypt_error)
{
+#define is_mc_or_valid_tkip_keyix ((is_mc || \
+ (rx_stats->rs_keyix != ATH9K_RXKEYIX_INVALID && \
+ test_bit(rx_stats->rs_keyix, common->tkip_keymap))))
+
struct ath_hw *ah = common->ah;
__le16 fc;
u8 rx_status_len = ah->caps.rx_status_len;
@@ -879,15 +883,18 @@ static bool ath9k_rx_accept(struct ath_common *common,
if (rx_stats->rs_status & ATH9K_RXERR_DECRYPT) {
*decrypt_error = true;
} else if (rx_stats->rs_status & ATH9K_RXERR_MIC) {
+ bool is_mc;
/*
* The MIC error bit is only valid if the frame
* is not a control frame or fragment, and it was
* decrypted using a valid TKIP key.
*/
+ is_mc = !!is_multicast_ether_addr(hdr->addr1);
+
if (!ieee80211_is_ctl(fc) &&
!ieee80211_has_morefrags(fc) &&
!(le16_to_cpu(hdr->seq_ctrl) & IEEE80211_SCTL_FRAG) &&
- test_bit(rx_stats->rs_keyix, common->tkip_keymap))
+ is_mc_or_valid_tkip_keyix)
rxs->flag |= RX_FLAG_MMIC_ERROR;
else
rx_stats->rs_status &= ~ATH9K_RXERR_MIC;
@@ -897,7 +904,7 @@ static bool ath9k_rx_accept(struct ath_common *common,
* decryption and MIC failures. For monitor mode,
* we also ignore the CRC error.
*/
- if (ah->opmode == NL80211_IFTYPE_MONITOR) {
+ if (ah->is_monitoring) {
if (rx_stats->rs_status &
~(ATH9K_RXERR_DECRYPT | ATH9K_RXERR_MIC |
ATH9K_RXERR_CRC))
diff --git a/drivers/net/wireless/ath/ath9k/reg.h b/drivers/net/wireless/ath/ath9k/reg.h
index 42976b0a01c1..2c6a22fbb0f0 100644
--- a/drivers/net/wireless/ath/ath9k/reg.h
+++ b/drivers/net/wireless/ath/ath9k/reg.h
@@ -703,6 +703,7 @@
#define AR_WA_RESET_EN (1 << 18) /* Sw Control to enable PCI-Reset to POR (bit 15) */
#define AR_WA_ANALOG_SHIFT (1 << 20)
#define AR_WA_POR_SHORT (1 << 21) /* PCI-E Phy reset control */
+#define AR_WA_BIT22 (1 << 22)
#define AR9285_WA_DEFAULT 0x004a050b
#define AR9280_WA_DEFAULT 0x0040073b
#define AR_WA_DEFAULT 0x0000073f
@@ -865,7 +866,13 @@
#define AR_DEVID_7010(_ah) \
(((_ah)->hw_version.devid == 0x7010) || \
((_ah)->hw_version.devid == 0x7015) || \
- ((_ah)->hw_version.devid == 0x9018))
+ ((_ah)->hw_version.devid == 0x9018) || \
+ ((_ah)->hw_version.devid == 0xA704) || \
+ ((_ah)->hw_version.devid == 0x1200))
+
+#define AR9287_HTC_DEVID(_ah) \
+ (((_ah)->hw_version.devid == 0x7015) || \
+ ((_ah)->hw_version.devid == 0x1200))
#define AR_RADIO_SREV_MAJOR 0xf0
#define AR_RAD5133_SREV_MAJOR 0xc0
@@ -977,11 +984,13 @@ enum {
#define AR9287_GPIO_IN_VAL_S 11
#define AR9271_GPIO_IN_VAL 0xFFFF0000
#define AR9271_GPIO_IN_VAL_S 16
-#define AR9300_GPIO_IN_VAL 0x0001FFFF
-#define AR9300_GPIO_IN_VAL_S 0
#define AR7010_GPIO_IN_VAL 0x0000FFFF
#define AR7010_GPIO_IN_VAL_S 0
+#define AR_GPIO_IN 0x404c
+#define AR9300_GPIO_IN_VAL 0x0001FFFF
+#define AR9300_GPIO_IN_VAL_S 0
+
#define AR_GPIO_OE_OUT (AR_SREV_9300_20_OR_LATER(ah) ? 0x4050 : 0x404c)
#define AR_GPIO_OE_OUT_DRV 0x3
#define AR_GPIO_OE_OUT_DRV_NO 0x0
diff --git a/drivers/net/wireless/ath/ath9k/xmit.c b/drivers/net/wireless/ath/ath9k/xmit.c
index f2ade2402ce2..aff04789f794 100644
--- a/drivers/net/wireless/ath/ath9k/xmit.c
+++ b/drivers/net/wireless/ath/ath9k/xmit.c
@@ -1120,7 +1120,7 @@ void ath_draintxq(struct ath_softc *sc, struct ath_txq *txq, bool retry_tx)
}
}
-void ath_drain_all_txq(struct ath_softc *sc, bool retry_tx)
+bool ath_drain_all_txq(struct ath_softc *sc, bool retry_tx)
{
struct ath_hw *ah = sc->sc_ah;
struct ath_common *common = ath9k_hw_common(sc->sc_ah);
@@ -1128,7 +1128,7 @@ void ath_drain_all_txq(struct ath_softc *sc, bool retry_tx)
int i, npend = 0;
if (sc->sc_flags & SC_OP_INVALID)
- return;
+ return true;
/* Stop beacon queue */
ath9k_hw_stoptxdma(sc->sc_ah, sc->beacon.beaconq);
@@ -1142,25 +1142,15 @@ void ath_drain_all_txq(struct ath_softc *sc, bool retry_tx)
}
}
- if (npend) {
- int r;
-
- ath_print(common, ATH_DBG_FATAL,
- "Failed to stop TX DMA. Resetting hardware!\n");
-
- spin_lock_bh(&sc->sc_resetlock);
- r = ath9k_hw_reset(ah, sc->sc_ah->curchan, ah->caldata, false);
- if (r)
- ath_print(common, ATH_DBG_FATAL,
- "Unable to reset hardware; reset status %d\n",
- r);
- spin_unlock_bh(&sc->sc_resetlock);
- }
+ if (npend)
+ ath_print(common, ATH_DBG_FATAL, "Failed to stop TX DMA!\n");
for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) {
if (ATH_TXQ_SETUP(sc, i))
ath_draintxq(sc, &sc->tx.txq[i], retry_tx);
}
+
+ return !npend;
}
void ath_tx_cleanupq(struct ath_softc *sc, struct ath_txq *txq)
diff --git a/drivers/net/wireless/ath/carl9170/fw.c b/drivers/net/wireless/ath/carl9170/fw.c
index ae6c006bbc56..546b4e4ec5ea 100644
--- a/drivers/net/wireless/ath/carl9170/fw.c
+++ b/drivers/net/wireless/ath/carl9170/fw.c
@@ -291,7 +291,8 @@ static int carl9170_fw(struct ar9170 *ar, const __u8 *data, size_t len)
if (SUPP(CARL9170FW_WLANTX_CAB)) {
ar->hw->wiphy->interface_modes |=
- BIT(NL80211_IFTYPE_AP);
+ BIT(NL80211_IFTYPE_AP) |
+ BIT(NL80211_IFTYPE_P2P_GO);
}
}
diff --git a/drivers/net/wireless/ath/carl9170/main.c b/drivers/net/wireless/ath/carl9170/main.c
index 980ae70ea424..dc7b30b170d0 100644
--- a/drivers/net/wireless/ath/carl9170/main.c
+++ b/drivers/net/wireless/ath/carl9170/main.c
@@ -647,7 +647,7 @@ init:
}
unlock:
- if (err && (vif_id != -1)) {
+ if (err && (vif_id >= 0)) {
vif_priv->active = false;
bitmap_release_region(&ar->vif_bitmap, vif_id, 0);
ar->vifs--;
@@ -1631,7 +1631,8 @@ void *carl9170_alloc(size_t priv_size)
* supports these modes. The code which will add the
* additional interface_modes is in fw.c.
*/
- hw->wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION);
+ hw->wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION) |
+ BIT(NL80211_IFTYPE_P2P_CLIENT);
hw->flags |= IEEE80211_HW_RX_INCLUDES_FCS |
IEEE80211_HW_REPORTS_TX_ACK_STATUS |
diff --git a/drivers/net/wireless/ath/carl9170/tx.c b/drivers/net/wireless/ath/carl9170/tx.c
index b575c865142d..7e6506a77bbb 100644
--- a/drivers/net/wireless/ath/carl9170/tx.c
+++ b/drivers/net/wireless/ath/carl9170/tx.c
@@ -810,7 +810,7 @@ static int carl9170_tx_prepare(struct ar9170 *ar, struct sk_buff *skb)
mac_tmp = cpu_to_le16(AR9170_TX_MAC_HW_DURATION |
AR9170_TX_MAC_BACKOFF);
- mac_tmp |= cpu_to_le16((hw_queue << AR9170_TX_MAC_QOS_S) &&
+ mac_tmp |= cpu_to_le16((hw_queue << AR9170_TX_MAC_QOS_S) &
AR9170_TX_MAC_QOS);
no_ack = !!(info->flags & IEEE80211_TX_CTL_NO_ACK);
diff --git a/drivers/net/wireless/ath/carl9170/usb.c b/drivers/net/wireless/ath/carl9170/usb.c
index d8607f4c144d..7504ed14c725 100644
--- a/drivers/net/wireless/ath/carl9170/usb.c
+++ b/drivers/net/wireless/ath/carl9170/usb.c
@@ -82,9 +82,11 @@ static struct usb_device_id carl9170_usb_ids[] = {
{ USB_DEVICE(0x07d1, 0x3c10) },
/* D-Link DWA 160 A2 */
{ USB_DEVICE(0x07d1, 0x3a09) },
+ /* D-Link DWA 130 D */
+ { USB_DEVICE(0x07d1, 0x3a0f) },
/* Netgear WNA1000 */
{ USB_DEVICE(0x0846, 0x9040) },
- /* Netgear WNDA3100 */
+ /* Netgear WNDA3100 (v1) */
{ USB_DEVICE(0x0846, 0x9010) },
/* Netgear WN111 v2 */
{ USB_DEVICE(0x0846, 0x9001), .driver_info = CARL9170_ONE_LED },
@@ -551,12 +553,12 @@ static int carl9170_usb_flush(struct ar9170 *ar)
usb_free_urb(urb);
}
- ret = usb_wait_anchor_empty_timeout(&ar->tx_cmd, HZ);
+ ret = usb_wait_anchor_empty_timeout(&ar->tx_cmd, 1000);
if (ret == 0)
err = -ETIMEDOUT;
/* lets wait a while until the tx - queues are dried out */
- ret = usb_wait_anchor_empty_timeout(&ar->tx_anch, HZ);
+ ret = usb_wait_anchor_empty_timeout(&ar->tx_anch, 1000);
if (ret == 0)
err = -ETIMEDOUT;
diff --git a/drivers/net/wireless/b43/sdio.c b/drivers/net/wireless/b43/sdio.c
index 9a55338d957f..09e2dfd7b175 100644
--- a/drivers/net/wireless/b43/sdio.c
+++ b/drivers/net/wireless/b43/sdio.c
@@ -163,6 +163,7 @@ static int b43_sdio_probe(struct sdio_func *func,
err_free_ssb:
kfree(sdio);
err_disable_func:
+ sdio_claim_host(func);
sdio_disable_func(func);
err_release_host:
sdio_release_host(func);
diff --git a/drivers/net/wireless/ipw2x00/libipw_module.c b/drivers/net/wireless/ipw2x00/libipw_module.c
index 32dee2ce5d31..d5ef696298ee 100644
--- a/drivers/net/wireless/ipw2x00/libipw_module.c
+++ b/drivers/net/wireless/ipw2x00/libipw_module.c
@@ -54,6 +54,7 @@
#define DRV_DESCRIPTION "802.11 data/management/control stack"
#define DRV_NAME "libipw"
+#define DRV_PROCNAME "ieee80211"
#define DRV_VERSION LIBIPW_VERSION
#define DRV_COPYRIGHT "Copyright (C) 2004-2005 Intel Corporation <jketreno@linux.intel.com>"
@@ -293,16 +294,16 @@ static int __init libipw_init(void)
struct proc_dir_entry *e;
libipw_debug_level = debug;
- libipw_proc = proc_mkdir("ieee80211", init_net.proc_net);
+ libipw_proc = proc_mkdir(DRV_PROCNAME, init_net.proc_net);
if (libipw_proc == NULL) {
- LIBIPW_ERROR("Unable to create " DRV_NAME
+ LIBIPW_ERROR("Unable to create " DRV_PROCNAME
" proc directory\n");
return -EIO;
}
e = proc_create("debug_level", S_IRUGO | S_IWUSR, libipw_proc,
&debug_level_proc_fops);
if (!e) {
- remove_proc_entry(DRV_NAME, init_net.proc_net);
+ remove_proc_entry(DRV_PROCNAME, init_net.proc_net);
libipw_proc = NULL;
return -EIO;
}
@@ -319,7 +320,7 @@ static void __exit libipw_exit(void)
#ifdef CONFIG_LIBIPW_DEBUG
if (libipw_proc) {
remove_proc_entry("debug_level", libipw_proc);
- remove_proc_entry(DRV_NAME, init_net.proc_net);
+ remove_proc_entry(DRV_PROCNAME, init_net.proc_net);
libipw_proc = NULL;
}
#endif /* CONFIG_LIBIPW_DEBUG */
diff --git a/drivers/net/wireless/iwlwifi/iwl3945-base.c b/drivers/net/wireless/iwlwifi/iwl3945-base.c
index 8f8c4b73f8b9..7edf8c2fb8c7 100644
--- a/drivers/net/wireless/iwlwifi/iwl3945-base.c
+++ b/drivers/net/wireless/iwlwifi/iwl3945-base.c
@@ -4000,7 +4000,8 @@ static int iwl3945_pci_probe(struct pci_dev *pdev, const struct pci_device_id *e
* "the hard way", rather than using device's scan.
*/
if (iwl3945_mod_params.disable_hw_scan) {
- IWL_ERR(priv, "sw scan support is deprecated\n");
+ dev_printk(KERN_DEBUG, &(pdev->dev),
+ "sw scan support is deprecated\n");
iwl3945_hw_ops.hw_scan = NULL;
}
diff --git a/drivers/net/wireless/libertas/cfg.c b/drivers/net/wireless/libertas/cfg.c
index 5046a0005034..373930afc26b 100644
--- a/drivers/net/wireless/libertas/cfg.c
+++ b/drivers/net/wireless/libertas/cfg.c
@@ -700,8 +700,9 @@ static void lbs_scan_worker(struct work_struct *work)
if (priv->scan_channel < priv->scan_req->n_channels) {
cancel_delayed_work(&priv->scan_work);
- queue_delayed_work(priv->work_thread, &priv->scan_work,
- msecs_to_jiffies(300));
+ if (!priv->stopping)
+ queue_delayed_work(priv->work_thread, &priv->scan_work,
+ msecs_to_jiffies(300));
}
/* This is the final data we are about to send */
diff --git a/drivers/net/wireless/libertas/dev.h b/drivers/net/wireless/libertas/dev.h
index f062ed583901..cb14c38caf3a 100644
--- a/drivers/net/wireless/libertas/dev.h
+++ b/drivers/net/wireless/libertas/dev.h
@@ -36,6 +36,7 @@ struct lbs_private {
/* CFG80211 */
struct wireless_dev *wdev;
bool wiphy_registered;
+ bool stopping;
struct cfg80211_scan_request *scan_req;
u8 assoc_bss[ETH_ALEN];
u8 disassoc_reason;
diff --git a/drivers/net/wireless/libertas/if_sdio.c b/drivers/net/wireless/libertas/if_sdio.c
index e5685dc317a8..b4de0ca10feb 100644
--- a/drivers/net/wireless/libertas/if_sdio.c
+++ b/drivers/net/wireless/libertas/if_sdio.c
@@ -1170,7 +1170,6 @@ static void if_sdio_remove(struct sdio_func *func)
lbs_deb_sdio("call remove card\n");
lbs_stop_card(card->priv);
lbs_remove_card(card->priv);
- card->priv->surpriseremoved = 1;
flush_workqueue(card->workqueue);
destroy_workqueue(card->workqueue);
diff --git a/drivers/net/wireless/libertas/if_spi.c b/drivers/net/wireless/libertas/if_spi.c
index 79bcb4e5d2ca..ecd4d04b2c3c 100644
--- a/drivers/net/wireless/libertas/if_spi.c
+++ b/drivers/net/wireless/libertas/if_spi.c
@@ -1055,7 +1055,6 @@ static int __devexit libertas_spi_remove(struct spi_device *spi)
lbs_stop_card(priv);
lbs_remove_card(priv); /* will call free_netdev */
- priv->surpriseremoved = 1;
free_irq(spi->irq, card);
if_spi_terminate_spi_thread(card);
if (card->pdata->teardown)
diff --git a/drivers/net/wireless/libertas/main.c b/drivers/net/wireless/libertas/main.c
index 47ce5a6ba120..fcd1bbfc632d 100644
--- a/drivers/net/wireless/libertas/main.c
+++ b/drivers/net/wireless/libertas/main.c
@@ -104,6 +104,7 @@ static int lbs_dev_open(struct net_device *dev)
lbs_deb_enter(LBS_DEB_NET);
spin_lock_irq(&priv->driver_lock);
+ priv->stopping = false;
if (priv->connect_status == LBS_CONNECTED)
netif_carrier_on(dev);
@@ -131,10 +132,16 @@ static int lbs_eth_stop(struct net_device *dev)
lbs_deb_enter(LBS_DEB_NET);
spin_lock_irq(&priv->driver_lock);
+ priv->stopping = true;
netif_stop_queue(dev);
spin_unlock_irq(&priv->driver_lock);
schedule_work(&priv->mcast_work);
+ cancel_delayed_work_sync(&priv->scan_work);
+ if (priv->scan_req) {
+ cfg80211_scan_done(priv->scan_req, false);
+ priv->scan_req = NULL;
+ }
lbs_deb_leave(LBS_DEB_NET);
return 0;
@@ -908,8 +915,6 @@ void lbs_remove_card(struct lbs_private *priv)
lbs_free_adapter(priv);
lbs_cfg_free(priv);
-
- priv->dev = NULL;
free_netdev(dev);
lbs_deb_leave(LBS_DEB_MAIN);
diff --git a/drivers/net/wireless/orinoco/main.c b/drivers/net/wireless/orinoco/main.c
index e8e2d0f4763d..f3d396e7544b 100644
--- a/drivers/net/wireless/orinoco/main.c
+++ b/drivers/net/wireless/orinoco/main.c
@@ -1392,10 +1392,9 @@ static void orinoco_process_scan_results(struct work_struct *work)
orinoco_add_hostscan_results(priv, buf, len);
kfree(buf);
- } else if (priv->scan_request) {
+ } else {
/* Either abort or complete the scan */
- cfg80211_scan_done(priv->scan_request, (len < 0));
- priv->scan_request = NULL;
+ orinoco_scan_done(priv, (len < 0));
}
spin_lock_irqsave(&priv->scan_lock, flags);
@@ -1684,6 +1683,8 @@ static int __orinoco_down(struct orinoco_private *priv)
hermes_write_regn(hw, EVACK, 0xffff);
}
+ orinoco_scan_done(priv, true);
+
/* firmware will have to reassociate */
netif_carrier_off(dev);
priv->last_linkstatus = 0xffff;
@@ -1762,10 +1763,7 @@ void orinoco_reset(struct work_struct *work)
orinoco_unlock(priv, &flags);
/* Scanning support: Notify scan cancellation */
- if (priv->scan_request) {
- cfg80211_scan_done(priv->scan_request, 1);
- priv->scan_request = NULL;
- }
+ orinoco_scan_done(priv, true);
if (priv->hard_reset) {
err = (*priv->hard_reset)(priv);
@@ -1813,6 +1811,12 @@ static int __orinoco_commit(struct orinoco_private *priv)
struct net_device *dev = priv->ndev;
int err = 0;
+ /* If we've called commit, we are reconfiguring or bringing the
+ * interface up. Maintaining countermeasures across this would
+ * be confusing, so note that we've disabled them. The port will
+ * be enabled later in orinoco_commit or __orinoco_up. */
+ priv->tkip_cm_active = 0;
+
err = orinoco_hw_program_rids(priv);
/* FIXME: what about netif_tx_lock */
diff --git a/drivers/net/wireless/orinoco/orinoco_cs.c b/drivers/net/wireless/orinoco/orinoco_cs.c
index 71b3d68b9403..32954c4b243a 100644
--- a/drivers/net/wireless/orinoco/orinoco_cs.c
+++ b/drivers/net/wireless/orinoco/orinoco_cs.c
@@ -151,20 +151,20 @@ orinoco_cs_config(struct pcmcia_device *link)
goto failed;
}
- ret = pcmcia_request_irq(link, orinoco_interrupt);
- if (ret)
- goto failed;
-
- /* We initialize the hermes structure before completing PCMCIA
- * configuration just in case the interrupt handler gets
- * called. */
mem = ioport_map(link->resource[0]->start,
resource_size(link->resource[0]));
if (!mem)
goto failed;
+ /* We initialize the hermes structure before completing PCMCIA
+ * configuration just in case the interrupt handler gets
+ * called. */
hermes_struct_init(hw, mem, HERMES_16BIT_REGSPACING);
+ ret = pcmcia_request_irq(link, orinoco_interrupt);
+ if (ret)
+ goto failed;
+
ret = pcmcia_enable_device(link);
if (ret)
goto failed;
diff --git a/drivers/net/wireless/orinoco/orinoco_usb.c b/drivers/net/wireless/orinoco/orinoco_usb.c
index a38a7bd25f19..b9aedf18a046 100644
--- a/drivers/net/wireless/orinoco/orinoco_usb.c
+++ b/drivers/net/wireless/orinoco/orinoco_usb.c
@@ -57,7 +57,6 @@
#include <linux/fcntl.h>
#include <linux/spinlock.h>
#include <linux/list.h>
-#include <linux/smp_lock.h>
#include <linux/usb.h>
#include <linux/timer.h>
diff --git a/drivers/net/wireless/orinoco/scan.c b/drivers/net/wireless/orinoco/scan.c
index 4300d9db7d8c..86cb54c842e7 100644
--- a/drivers/net/wireless/orinoco/scan.c
+++ b/drivers/net/wireless/orinoco/scan.c
@@ -229,3 +229,11 @@ void orinoco_add_hostscan_results(struct orinoco_private *priv,
priv->scan_request = NULL;
}
}
+
+void orinoco_scan_done(struct orinoco_private *priv, bool abort)
+{
+ if (priv->scan_request) {
+ cfg80211_scan_done(priv->scan_request, abort);
+ priv->scan_request = NULL;
+ }
+}
diff --git a/drivers/net/wireless/orinoco/scan.h b/drivers/net/wireless/orinoco/scan.h
index 2dc4e046dbdb..27281fb0a6dc 100644
--- a/drivers/net/wireless/orinoco/scan.h
+++ b/drivers/net/wireless/orinoco/scan.h
@@ -16,5 +16,6 @@ void orinoco_add_extscan_result(struct orinoco_private *priv,
void orinoco_add_hostscan_results(struct orinoco_private *dev,
unsigned char *buf,
size_t len);
+void orinoco_scan_done(struct orinoco_private *priv, bool abort);
#endif /* _ORINOCO_SCAN_H_ */
diff --git a/drivers/net/wireless/orinoco/spectrum_cs.c b/drivers/net/wireless/orinoco/spectrum_cs.c
index fb859a5ad2eb..db34c282e59b 100644
--- a/drivers/net/wireless/orinoco/spectrum_cs.c
+++ b/drivers/net/wireless/orinoco/spectrum_cs.c
@@ -214,21 +214,21 @@ spectrum_cs_config(struct pcmcia_device *link)
goto failed;
}
- ret = pcmcia_request_irq(link, orinoco_interrupt);
- if (ret)
- goto failed;
-
- /* We initialize the hermes structure before completing PCMCIA
- * configuration just in case the interrupt handler gets
- * called. */
mem = ioport_map(link->resource[0]->start,
resource_size(link->resource[0]));
if (!mem)
goto failed;
+ /* We initialize the hermes structure before completing PCMCIA
+ * configuration just in case the interrupt handler gets
+ * called. */
hermes_struct_init(hw, mem, HERMES_16BIT_REGSPACING);
hw->eeprom_pda = true;
+ ret = pcmcia_request_irq(link, orinoco_interrupt);
+ if (ret)
+ goto failed;
+
ret = pcmcia_enable_device(link);
if (ret)
goto failed;
diff --git a/drivers/net/wireless/orinoco/wext.c b/drivers/net/wireless/orinoco/wext.c
index 93505f93bf97..e5afabee60d1 100644
--- a/drivers/net/wireless/orinoco/wext.c
+++ b/drivers/net/wireless/orinoco/wext.c
@@ -911,10 +911,10 @@ static int orinoco_ioctl_set_auth(struct net_device *dev,
*/
if (param->value) {
priv->tkip_cm_active = 1;
- ret = hermes_enable_port(hw, 0);
+ ret = hermes_disable_port(hw, 0);
} else {
priv->tkip_cm_active = 0;
- ret = hermes_disable_port(hw, 0);
+ ret = hermes_enable_port(hw, 0);
}
break;
diff --git a/drivers/net/wireless/rt2x00/Kconfig b/drivers/net/wireless/rt2x00/Kconfig
index eea1ef2f502b..4396d4b9bfb9 100644
--- a/drivers/net/wireless/rt2x00/Kconfig
+++ b/drivers/net/wireless/rt2x00/Kconfig
@@ -221,9 +221,6 @@ config RT2X00_LIB_LEDS
boolean
default y if (RT2X00_LIB=y && LEDS_CLASS=y) || (RT2X00_LIB=m && LEDS_CLASS!=n)
-comment "rt2x00 leds support disabled due to modularized LEDS_CLASS and built-in rt2x00"
- depends on RT2X00_LIB=y && LEDS_CLASS=m
-
config RT2X00_LIB_DEBUGFS
bool "Ralink debugfs support"
depends on RT2X00_LIB && MAC80211_DEBUGFS
diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c
index 458bb57914a3..cdbeec9f83ea 100644
--- a/drivers/net/xen-netfront.c
+++ b/drivers/net/xen-netfront.c
@@ -66,8 +66,8 @@ struct netfront_cb {
#define GRANT_INVALID_REF 0
-#define NET_TX_RING_SIZE __RING_SIZE((struct xen_netif_tx_sring *)0, PAGE_SIZE)
-#define NET_RX_RING_SIZE __RING_SIZE((struct xen_netif_rx_sring *)0, PAGE_SIZE)
+#define NET_TX_RING_SIZE __CONST_RING_SIZE(xen_netif_tx, PAGE_SIZE)
+#define NET_RX_RING_SIZE __CONST_RING_SIZE(xen_netif_rx, PAGE_SIZE)
#define TX_MAX_TARGET min_t(int, NET_RX_RING_SIZE, 256)
struct netfront_info {