summaryrefslogtreecommitdiff
path: root/drivers/net
diff options
context:
space:
mode:
authorPaul Mackerras <paulus@samba.org>2006-08-01 10:37:25 +1000
committerPaul Mackerras <paulus@samba.org>2006-08-01 10:37:25 +1000
commit57cad8084e0837e0f2c97da789ec9b3f36809be9 (patch)
treee9c790afb4286f78cb08d9664f58baa7e876fe55 /drivers/net
parentcb18bd40030c879cd93fef02fd579f74dbab473d (diff)
parent49b1e3ea19b1c95c2f012b8331ffb3b169e4c042 (diff)
Merge branch 'merge'
Diffstat (limited to 'drivers/net')
-rw-r--r--drivers/net/3c59x.c166
-rw-r--r--drivers/net/8139cp.c2
-rw-r--r--drivers/net/8139too.c5
-rw-r--r--drivers/net/bnx2.c2
-rw-r--r--drivers/net/chelsio/sge.c2
-rw-r--r--drivers/net/dummy.c1
-rw-r--r--drivers/net/e1000/e1000.h13
-rw-r--r--drivers/net/e1000/e1000_ethtool.c141
-rw-r--r--drivers/net/e1000/e1000_hw.c1772
-rw-r--r--drivers/net/e1000/e1000_hw.h398
-rw-r--r--drivers/net/e1000/e1000_main.c435
-rw-r--r--drivers/net/e1000/e1000_osdep.h13
-rw-r--r--drivers/net/e1000/e1000_param.c199
-rw-r--r--drivers/net/forcedeth.c51
-rw-r--r--drivers/net/hamradio/bpqether.c7
-rw-r--r--drivers/net/ifb.c1
-rw-r--r--drivers/net/irda/ali-ircc.c3
-rw-r--r--drivers/net/irda/smsc-ircc2.c2
-rw-r--r--drivers/net/ixgb/ixgb_main.c6
-rw-r--r--drivers/net/loopback.c2
-rw-r--r--drivers/net/myri10ge/myri10ge.c17
-rw-r--r--drivers/net/s2io.c285
-rw-r--r--drivers/net/s2io.h5
-rw-r--r--drivers/net/sk98lin/h/xmac_ii.h2
-rw-r--r--drivers/net/skge.c5
-rw-r--r--drivers/net/skge.h4
-rw-r--r--drivers/net/sky2.c52
-rw-r--r--drivers/net/sky2.h2
-rw-r--r--drivers/net/smc91x.h18
-rw-r--r--drivers/net/spider_net.c580
-rw-r--r--drivers/net/spider_net.h73
-rw-r--r--drivers/net/sunhme.c9
-rw-r--r--drivers/net/sunlance.c8
-rw-r--r--drivers/net/tg3.c118
-rw-r--r--drivers/net/typhoon.c4
-rw-r--r--drivers/net/via-velocity.c17
-rw-r--r--drivers/net/wan/c101.c30
-rw-r--r--drivers/net/wan/hd6457x.c26
-rw-r--r--drivers/net/wan/hdlc_cisco.c14
-rw-r--r--drivers/net/wan/hdlc_fr.c14
-rw-r--r--drivers/net/wan/hdlc_generic.c65
-rw-r--r--drivers/net/wan/hdlc_ppp.c1
-rw-r--r--drivers/net/wan/hdlc_raw.c1
-rw-r--r--drivers/net/wan/hdlc_raw_eth.c1
-rw-r--r--drivers/net/wan/hdlc_x25.c1
-rw-r--r--drivers/net/wan/n2.c3
-rw-r--r--drivers/net/wan/wanxl.c5
-rw-r--r--drivers/net/wireless/Kconfig1
-rw-r--r--drivers/net/wireless/bcm43xx/bcm43xx_main.c4
-rw-r--r--drivers/net/wireless/orinoco.c4
-rw-r--r--drivers/net/wireless/spectrum_cs.c37
-rw-r--r--drivers/net/wireless/zd1201.c2
-rw-r--r--drivers/net/wireless/zd1211rw/zd_usb.c12
53 files changed, 3325 insertions, 1316 deletions
diff --git a/drivers/net/3c59x.c b/drivers/net/3c59x.c
index 2819de79442c..80e8ca013e44 100644
--- a/drivers/net/3c59x.c
+++ b/drivers/net/3c59x.c
@@ -17,172 +17,6 @@
410 Severn Ave., Suite 210
Annapolis MD 21403
- Linux Kernel Additions:
-
- 0.99H+lk0.9 - David S. Miller - softnet, PCI DMA updates
- 0.99H+lk1.0 - Jeff Garzik <jgarzik@pobox.com>
- Remove compatibility defines for kernel versions < 2.2.x.
- Update for new 2.3.x module interface
- LK1.1.2 (March 19, 2000)
- * New PCI interface (jgarzik)
-
- LK1.1.3 25 April 2000, Andrew Morton <andrewm@uow.edu.au>
- - Merged with 3c575_cb.c
- - Don't set RxComplete in boomerang interrupt enable reg
- - spinlock in vortex_timer to protect mdio functions
- - disable local interrupts around call to vortex_interrupt in
- vortex_tx_timeout() (So vortex_interrupt can use spin_lock())
- - Select window 3 in vortex_timer()'s write to Wn3_MAC_Ctrl
- - In vortex_start_xmit(), move the lock to _after_ we've altered
- vp->cur_tx and vp->tx_full. This defeats the race between
- vortex_start_xmit() and vortex_interrupt which was identified
- by Bogdan Costescu.
- - Merged back support for six new cards from various sources
- - Set vortex_have_pci if pci_module_init returns zero (fixes cardbus
- insertion oops)
- - Tell it that 3c905C has NWAY for 100bT autoneg
- - Fix handling of SetStatusEnd in 'Too much work..' code, as
- per 2.3.99's 3c575_cb (Dave Hinds).
- - Split ISR into two for vortex & boomerang
- - Fix MOD_INC/DEC races
- - Handle resource allocation failures.
- - Fix 3CCFE575CT LED polarity
- - Make tx_interrupt_mitigation the default
-
- LK1.1.4 25 April 2000, Andrew Morton <andrewm@uow.edu.au>
- - Add extra TxReset to vortex_up() to fix 575_cb hotplug initialisation probs.
- - Put vortex_info_tbl into __devinitdata
- - In the vortex_error StatsFull HACK, disable stats in vp->intr_enable as well
- as in the hardware.
- - Increased the loop counter in issue_and_wait from 2,000 to 4,000.
-
- LK1.1.5 28 April 2000, andrewm
- - Added powerpc defines (John Daniel <jdaniel@etresoft.com> said these work...)
- - Some extra diagnostics
- - In vortex_error(), reset the Tx on maxCollisions. Otherwise most
- chips usually get a Tx timeout.
- - Added extra_reset module parm
- - Replaced some inline timer manip with mod_timer
- (Franois romieu <Francois.Romieu@nic.fr>)
- - In vortex_up(), don't make Wn3_config initialisation dependent upon has_nway
- (this came across from 3c575_cb).
-
- LK1.1.6 06 Jun 2000, andrewm
- - Backed out the PPC defines.
- - Use del_timer_sync(), mod_timer().
- - Fix wrapped ulong comparison in boomerang_rx()
- - Add IS_TORNADO, use it to suppress 3c905C checksum error msg
- (Donald Becker, I Lee Hetherington <ilh@sls.lcs.mit.edu>)
- - Replace union wn3_config with BFINS/BFEXT manipulation for
- sparc64 (Pete Zaitcev, Peter Jones)
- - In vortex_error, do_tx_reset and vortex_tx_timeout(Vortex):
- do a netif_wake_queue() to better recover from errors. (Anders Pedersen,
- Donald Becker)
- - Print a warning on out-of-memory (rate limited to 1 per 10 secs)
- - Added two more Cardbus 575 NICs: 5b57 and 6564 (Paul Wagland)
-
- LK1.1.7 2 Jul 2000 andrewm
- - Better handling of shared IRQs
- - Reset the transmitter on a Tx reclaim error
- - Fixed crash under OOM during vortex_open() (Mark Hemment)
- - Fix Rx cessation problem during OOM (help from Mark Hemment)
- - The spinlocks around the mdio access were blocking interrupts for 300uS.
- Fix all this to use spin_lock_bh() within mdio_read/write
- - Only write to TxFreeThreshold if it's a boomerang - other NICs don't
- have one.
- - Added 802.3x MAC-layer flow control support
-
- LK1.1.8 13 Aug 2000 andrewm
- - Ignore request_region() return value - already reserved if Cardbus.
- - Merged some additional Cardbus flags from Don's 0.99Qk
- - Some fixes for 3c556 (Fred Maciel)
- - Fix for EISA initialisation (Jan Rekorajski)
- - Renamed MII_XCVR_PWR and EEPROM_230 to align with 3c575_cb and D. Becker's drivers
- - Fixed MII_XCVR_PWR for 3CCFE575CT
- - Added INVERT_LED_PWR, used it.
- - Backed out the extra_reset stuff
-
- LK1.1.9 12 Sep 2000 andrewm
- - Backed out the tx_reset_resume flags. It was a no-op.
- - In vortex_error, don't reset the Tx on txReclaim errors
- - In vortex_error, don't reset the Tx on maxCollisions errors.
- Hence backed out all the DownListPtr logic here.
- - In vortex_error, give Tornado cards a partial TxReset on
- maxCollisions (David Hinds). Defined MAX_COLLISION_RESET for this.
- - Redid some driver flags and device names based on pcmcia_cs-3.1.20.
- - Fixed a bug where, if vp->tx_full is set when the interface
- is downed, it remains set when the interface is upped. Bad
- things happen.
-
- LK1.1.10 17 Sep 2000 andrewm
- - Added EEPROM_8BIT for 3c555 (Fred Maciel)
- - Added experimental support for the 3c556B Laptop Hurricane (Louis Gerbarg)
- - Add HAS_NWAY to "3c900 Cyclone 10Mbps TPO"
-
- LK1.1.11 13 Nov 2000 andrewm
- - Dump MOD_INC/DEC_USE_COUNT, use SET_MODULE_OWNER
-
- LK1.1.12 1 Jan 2001 andrewm (2.4.0-pre1)
- - Call pci_enable_device before we request our IRQ (Tobias Ringstrom)
- - Add 3c590 PCI latency timer hack to vortex_probe1 (from 0.99Ra)
- - Added extended issue_and_wait for the 3c905CX.
- - Look for an MII on PHY index 24 first (3c905CX oddity).
- - Add HAS_NWAY to 3cSOHO100-TX (Brett Frankenberger)
- - Don't free skbs we don't own on oom path in vortex_open().
-
- LK1.1.13 27 Jan 2001
- - Added explicit `medialock' flag so we can truly
- lock the media type down with `options'.
- - "check ioremap return and some tidbits" (Arnaldo Carvalho de Melo <acme@conectiva.com.br>)
- - Added and used EEPROM_NORESET for 3c556B PM resumes.
- - Fixed leakage of vp->rx_ring.
- - Break out separate HAS_HWCKSM device capability flag.
- - Kill vp->tx_full (ANK)
- - Merge zerocopy fragment handling (ANK?)
-
- LK1.1.14 15 Feb 2001
- - Enable WOL. Can be turned on with `enable_wol' module option.
- - EISA and PCI initialisation fixes (jgarzik, Manfred Spraul)
- - If a device's internalconfig register reports it has NWAY,
- use it, even if autoselect is enabled.
-
- LK1.1.15 6 June 2001 akpm
- - Prevent double counting of received bytes (Lars Christensen)
- - Add ethtool support (jgarzik)
- - Add module parm descriptions (Andrzej M. Krzysztofowicz)
- - Implemented alloc_etherdev() API
- - Special-case the 'Tx error 82' message.
-
- LK1.1.16 18 July 2001 akpm
- - Make NETIF_F_SG dependent upon nr_free_highpages(), not on CONFIG_HIGHMEM
- - Lessen verbosity of bootup messages
- - Fix WOL - use new PM API functions.
- - Use netif_running() instead of vp->open in suspend/resume.
- - Don't reset the interface logic on open/close/rmmod. It upsets
- autonegotiation, and hence DHCP (from 0.99T).
- - Back out EEPROM_NORESET flag because of the above (we do it for all
- NICs).
- - Correct 3c982 identification string
- - Rename wait_for_completion() to issue_and_wait() to avoid completion.h
- clash.
-
- LK1.1.17 18Dec01 akpm
- - PCI ID 9805 is a Python-T, not a dual-port Cyclone. Apparently.
- And it has NWAY.
- - Mask our advertised modes (vp->advertising) with our capabilities
- (MII reg5) when deciding which duplex mode to use.
- - Add `global_options' as default for options[]. Ditto global_enable_wol,
- global_full_duplex.
-
- LK1.1.18 01Jul02 akpm
- - Fix for undocumented transceiver power-up bit on some 3c566B's
- (Donald Becker, Rahul Karnik)
-
- - See http://www.zip.com.au/~akpm/linux/#3c59x-2.3 for more details.
- - Also see Documentation/networking/vortex.txt
-
- LK1.1.19 10Nov02 Marc Zyngier <maz@wild-wind.fr.eu.org>
- - EISA sysfs integration.
*/
/*
diff --git a/drivers/net/8139cp.c b/drivers/net/8139cp.c
index d2150baa7e35..1428bb7715af 100644
--- a/drivers/net/8139cp.c
+++ b/drivers/net/8139cp.c
@@ -1916,7 +1916,7 @@ static int cp_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
regs = ioremap(pciaddr, CP_REGS_SIZE);
if (!regs) {
rc = -EIO;
- dev_err(&pdev->dev, "Cannot map PCI MMIO (%lx@%lx)\n",
+ dev_err(&pdev->dev, "Cannot map PCI MMIO (%Lx@%Lx)\n",
(unsigned long long)pci_resource_len(pdev, 1),
(unsigned long long)pciaddr);
goto err_out_res;
diff --git a/drivers/net/8139too.c b/drivers/net/8139too.c
index cd9718512d1c..e4f4eaff7679 100644
--- a/drivers/net/8139too.c
+++ b/drivers/net/8139too.c
@@ -1709,6 +1709,7 @@ static int rtl8139_start_xmit (struct sk_buff *skb, struct net_device *dev)
void __iomem *ioaddr = tp->mmio_addr;
unsigned int entry;
unsigned int len = skb->len;
+ unsigned long flags;
/* Calculate the next Tx descriptor entry. */
entry = tp->cur_tx % NUM_TX_DESC;
@@ -1725,7 +1726,7 @@ static int rtl8139_start_xmit (struct sk_buff *skb, struct net_device *dev)
return 0;
}
- spin_lock_irq(&tp->lock);
+ spin_lock_irqsave(&tp->lock, flags);
RTL_W32_F (TxStatus0 + (entry * sizeof (u32)),
tp->tx_flag | max(len, (unsigned int)ETH_ZLEN));
@@ -1736,7 +1737,7 @@ static int rtl8139_start_xmit (struct sk_buff *skb, struct net_device *dev)
if ((tp->cur_tx - NUM_TX_DESC) == tp->dirty_tx)
netif_stop_queue (dev);
- spin_unlock_irq(&tp->lock);
+ spin_unlock_irqrestore(&tp->lock, flags);
if (netif_msg_tx_queued(tp))
printk (KERN_DEBUG "%s: Queued Tx packet size %u to slot %d.\n",
diff --git a/drivers/net/bnx2.c b/drivers/net/bnx2.c
index 64b6a72b4f6a..db73de0d2511 100644
--- a/drivers/net/bnx2.c
+++ b/drivers/net/bnx2.c
@@ -1639,7 +1639,7 @@ bnx2_tx_int(struct bnx2 *bp)
skb = tx_buf->skb;
#ifdef BCM_TSO
/* partial BD completions possible with TSO packets */
- if (skb_shinfo(skb)->gso_size) {
+ if (skb_is_gso(skb)) {
u16 last_idx, last_ring_idx;
last_idx = sw_cons +
diff --git a/drivers/net/chelsio/sge.c b/drivers/net/chelsio/sge.c
index 87f94d939ff8..61b3754f50ff 100644
--- a/drivers/net/chelsio/sge.c
+++ b/drivers/net/chelsio/sge.c
@@ -1417,7 +1417,7 @@ int t1_start_xmit(struct sk_buff *skb, struct net_device *dev)
struct cpl_tx_pkt *cpl;
#ifdef NETIF_F_TSO
- if (skb_shinfo(skb)->gso_size) {
+ if (skb_is_gso(skb)) {
int eth_type;
struct cpl_tx_pkt_lso *hdr;
diff --git a/drivers/net/dummy.c b/drivers/net/dummy.c
index 36d511729f71..2146cf74425e 100644
--- a/drivers/net/dummy.c
+++ b/drivers/net/dummy.c
@@ -132,6 +132,7 @@ static int __init dummy_init_module(void)
for (i = 0; i < numdummies && !err; i++)
err = dummy_init_one(i);
if (err) {
+ i--;
while (--i >= 0)
dummy_free_one(i);
}
diff --git a/drivers/net/e1000/e1000.h b/drivers/net/e1000/e1000.h
index 3042d33e2d4d..d304297c496c 100644
--- a/drivers/net/e1000/e1000.h
+++ b/drivers/net/e1000/e1000.h
@@ -68,7 +68,6 @@
#ifdef NETIF_F_TSO
#include <net/checksum.h>
#endif
-#include <linux/workqueue.h>
#include <linux/mii.h>
#include <linux/ethtool.h>
#include <linux/if_vlan.h>
@@ -111,6 +110,9 @@ struct e1000_adapter;
#define E1000_MIN_RXD 80
#define E1000_MAX_82544_RXD 4096
+/* this is the size past which hardware will drop packets when setting LPE=0 */
+#define MAXIMUM_ETHERNET_VLAN_SIZE 1522
+
/* Supported Rx Buffer Sizes */
#define E1000_RXBUFFER_128 128 /* Used for packet split */
#define E1000_RXBUFFER_256 256 /* Used for packet split */
@@ -143,6 +145,7 @@ struct e1000_adapter;
#define AUTO_ALL_MODES 0
#define E1000_EEPROM_82544_APM 0x0004
+#define E1000_EEPROM_ICH8_APME 0x0004
#define E1000_EEPROM_APME 0x0400
#ifndef E1000_MASTER_SLAVE
@@ -254,7 +257,6 @@ struct e1000_adapter {
spinlock_t tx_queue_lock;
#endif
atomic_t irq_sem;
- struct work_struct watchdog_task;
struct work_struct reset_task;
uint8_t fc_autoneg;
@@ -339,8 +341,14 @@ struct e1000_adapter {
#ifdef NETIF_F_TSO
boolean_t tso_force;
#endif
+ boolean_t smart_power_down; /* phy smart power down */
+ unsigned long flags;
};
+enum e1000_state_t {
+ __E1000_DRIVER_TESTING,
+ __E1000_RESETTING,
+};
/* e1000_main.c */
extern char e1000_driver_name[];
@@ -348,6 +356,7 @@ extern char e1000_driver_version[];
int e1000_up(struct e1000_adapter *adapter);
void e1000_down(struct e1000_adapter *adapter);
void e1000_reset(struct e1000_adapter *adapter);
+void e1000_reinit_locked(struct e1000_adapter *adapter);
int e1000_setup_all_tx_resources(struct e1000_adapter *adapter);
void e1000_free_all_tx_resources(struct e1000_adapter *adapter);
int e1000_setup_all_rx_resources(struct e1000_adapter *adapter);
diff --git a/drivers/net/e1000/e1000_ethtool.c b/drivers/net/e1000/e1000_ethtool.c
index d19664891768..88a82ba88f57 100644
--- a/drivers/net/e1000/e1000_ethtool.c
+++ b/drivers/net/e1000/e1000_ethtool.c
@@ -109,7 +109,8 @@ e1000_get_settings(struct net_device *netdev, struct ethtool_cmd *ecmd)
SUPPORTED_1000baseT_Full|
SUPPORTED_Autoneg |
SUPPORTED_TP);
-
+ if (hw->phy_type == e1000_phy_ife)
+ ecmd->supported &= ~SUPPORTED_1000baseT_Full;
ecmd->advertising = ADVERTISED_TP;
if (hw->autoneg == 1) {
@@ -203,11 +204,9 @@ e1000_set_settings(struct net_device *netdev, struct ethtool_cmd *ecmd)
/* reset the link */
- if (netif_running(adapter->netdev)) {
- e1000_down(adapter);
- e1000_reset(adapter);
- e1000_up(adapter);
- } else
+ if (netif_running(adapter->netdev))
+ e1000_reinit_locked(adapter);
+ else
e1000_reset(adapter);
return 0;
@@ -254,10 +253,9 @@ e1000_set_pauseparam(struct net_device *netdev,
hw->original_fc = hw->fc;
if (adapter->fc_autoneg == AUTONEG_ENABLE) {
- if (netif_running(adapter->netdev)) {
- e1000_down(adapter);
- e1000_up(adapter);
- } else
+ if (netif_running(adapter->netdev))
+ e1000_reinit_locked(adapter);
+ else
e1000_reset(adapter);
} else
return ((hw->media_type == e1000_media_type_fiber) ?
@@ -279,10 +277,9 @@ e1000_set_rx_csum(struct net_device *netdev, uint32_t data)
struct e1000_adapter *adapter = netdev_priv(netdev);
adapter->rx_csum = data;
- if (netif_running(netdev)) {
- e1000_down(adapter);
- e1000_up(adapter);
- } else
+ if (netif_running(netdev))
+ e1000_reinit_locked(adapter);
+ else
e1000_reset(adapter);
return 0;
}
@@ -577,6 +574,7 @@ e1000_get_drvinfo(struct net_device *netdev,
case e1000_82572:
case e1000_82573:
case e1000_80003es2lan:
+ case e1000_ich8lan:
sprintf(firmware_version, "%d.%d-%d",
(eeprom_data & 0xF000) >> 12,
(eeprom_data & 0x0FF0) >> 4,
@@ -631,6 +629,9 @@ e1000_set_ringparam(struct net_device *netdev,
tx_ring_size = sizeof(struct e1000_tx_ring) * adapter->num_tx_queues;
rx_ring_size = sizeof(struct e1000_rx_ring) * adapter->num_rx_queues;
+ while (test_and_set_bit(__E1000_RESETTING, &adapter->flags))
+ msleep(1);
+
if (netif_running(adapter->netdev))
e1000_down(adapter);
@@ -691,9 +692,11 @@ e1000_set_ringparam(struct net_device *netdev,
adapter->rx_ring = rx_new;
adapter->tx_ring = tx_new;
if ((err = e1000_up(adapter)))
- return err;
+ goto err_setup;
}
+ clear_bit(__E1000_RESETTING, &adapter->flags);
+
return 0;
err_setup_tx:
e1000_free_all_rx_resources(adapter);
@@ -701,6 +704,8 @@ err_setup_rx:
adapter->rx_ring = rx_old;
adapter->tx_ring = tx_old;
e1000_up(adapter);
+err_setup:
+ clear_bit(__E1000_RESETTING, &adapter->flags);
return err;
}
@@ -754,6 +759,7 @@ e1000_reg_test(struct e1000_adapter *adapter, uint64_t *data)
toggle = 0x7FFFF3FF;
break;
case e1000_82573:
+ case e1000_ich8lan:
toggle = 0x7FFFF033;
break;
default:
@@ -773,11 +779,12 @@ e1000_reg_test(struct e1000_adapter *adapter, uint64_t *data)
}
/* restore previous status */
E1000_WRITE_REG(&adapter->hw, STATUS, before);
-
- REG_PATTERN_TEST(FCAL, 0xFFFFFFFF, 0xFFFFFFFF);
- REG_PATTERN_TEST(FCAH, 0x0000FFFF, 0xFFFFFFFF);
- REG_PATTERN_TEST(FCT, 0x0000FFFF, 0xFFFFFFFF);
- REG_PATTERN_TEST(VET, 0x0000FFFF, 0xFFFFFFFF);
+ if (adapter->hw.mac_type != e1000_ich8lan) {
+ REG_PATTERN_TEST(FCAL, 0xFFFFFFFF, 0xFFFFFFFF);
+ REG_PATTERN_TEST(FCAH, 0x0000FFFF, 0xFFFFFFFF);
+ REG_PATTERN_TEST(FCT, 0x0000FFFF, 0xFFFFFFFF);
+ REG_PATTERN_TEST(VET, 0x0000FFFF, 0xFFFFFFFF);
+ }
REG_PATTERN_TEST(RDTR, 0x0000FFFF, 0xFFFFFFFF);
REG_PATTERN_TEST(RDBAH, 0xFFFFFFFF, 0xFFFFFFFF);
REG_PATTERN_TEST(RDLEN, 0x000FFF80, 0x000FFFFF);
@@ -790,20 +797,22 @@ e1000_reg_test(struct e1000_adapter *adapter, uint64_t *data)
REG_PATTERN_TEST(TDLEN, 0x000FFF80, 0x000FFFFF);
REG_SET_AND_CHECK(RCTL, 0xFFFFFFFF, 0x00000000);
- REG_SET_AND_CHECK(RCTL, 0x06DFB3FE, 0x003FFFFB);
+ before = (adapter->hw.mac_type == e1000_ich8lan ?
+ 0x06C3B33E : 0x06DFB3FE);
+ REG_SET_AND_CHECK(RCTL, before, 0x003FFFFB);
REG_SET_AND_CHECK(TCTL, 0xFFFFFFFF, 0x00000000);
if (adapter->hw.mac_type >= e1000_82543) {
- REG_SET_AND_CHECK(RCTL, 0x06DFB3FE, 0xFFFFFFFF);
+ REG_SET_AND_CHECK(RCTL, before, 0xFFFFFFFF);
REG_PATTERN_TEST(RDBAL, 0xFFFFFFF0, 0xFFFFFFFF);
- REG_PATTERN_TEST(TXCW, 0xC000FFFF, 0x0000FFFF);
+ if (adapter->hw.mac_type != e1000_ich8lan)
+ REG_PATTERN_TEST(TXCW, 0xC000FFFF, 0x0000FFFF);
REG_PATTERN_TEST(TDBAL, 0xFFFFFFF0, 0xFFFFFFFF);
REG_PATTERN_TEST(TIDV, 0x0000FFFF, 0x0000FFFF);
-
- for (i = 0; i < E1000_RAR_ENTRIES; i++) {
- REG_PATTERN_TEST(RA + ((i << 1) << 2), 0xFFFFFFFF,
- 0xFFFFFFFF);
+ value = (adapter->hw.mac_type == e1000_ich8lan ?
+ E1000_RAR_ENTRIES_ICH8LAN : E1000_RAR_ENTRIES);
+ for (i = 0; i < value; i++) {
REG_PATTERN_TEST(RA + (((i << 1) + 1) << 2), 0x8003FFFF,
0xFFFFFFFF);
}
@@ -817,7 +826,9 @@ e1000_reg_test(struct e1000_adapter *adapter, uint64_t *data)
}
- for (i = 0; i < E1000_MC_TBL_SIZE; i++)
+ value = (adapter->hw.mac_type == e1000_ich8lan ?
+ E1000_MC_TBL_SIZE_ICH8LAN : E1000_MC_TBL_SIZE);
+ for (i = 0; i < value; i++)
REG_PATTERN_TEST(MTA + (i << 2), 0xFFFFFFFF, 0xFFFFFFFF);
*data = 0;
@@ -889,6 +900,8 @@ e1000_intr_test(struct e1000_adapter *adapter, uint64_t *data)
/* Test each interrupt */
for (; i < 10; i++) {
+ if (adapter->hw.mac_type == e1000_ich8lan && i == 8)
+ continue;
/* Interrupt to test */
mask = 1 << i;
@@ -1246,18 +1259,33 @@ e1000_integrated_phy_loopback(struct e1000_adapter *adapter)
} else if (adapter->hw.phy_type == e1000_phy_gg82563) {
e1000_write_phy_reg(&adapter->hw,
GG82563_PHY_KMRN_MODE_CTRL,
- 0x1CE);
+ 0x1CC);
}
- /* force 1000, set loopback */
- e1000_write_phy_reg(&adapter->hw, PHY_CTRL, 0x4140);
- /* Now set up the MAC to the same speed/duplex as the PHY. */
ctrl_reg = E1000_READ_REG(&adapter->hw, CTRL);
- ctrl_reg &= ~E1000_CTRL_SPD_SEL; /* Clear the speed sel bits */
- ctrl_reg |= (E1000_CTRL_FRCSPD | /* Set the Force Speed Bit */
- E1000_CTRL_FRCDPX | /* Set the Force Duplex Bit */
- E1000_CTRL_SPD_1000 |/* Force Speed to 1000 */
- E1000_CTRL_FD); /* Force Duplex to FULL */
+
+ if (adapter->hw.phy_type == e1000_phy_ife) {
+ /* force 100, set loopback */
+ e1000_write_phy_reg(&adapter->hw, PHY_CTRL, 0x6100);
+
+ /* Now set up the MAC to the same speed/duplex as the PHY. */
+ ctrl_reg &= ~E1000_CTRL_SPD_SEL; /* Clear the speed sel bits */
+ ctrl_reg |= (E1000_CTRL_FRCSPD | /* Set the Force Speed Bit */
+ E1000_CTRL_FRCDPX | /* Set the Force Duplex Bit */
+ E1000_CTRL_SPD_100 |/* Force Speed to 100 */
+ E1000_CTRL_FD); /* Force Duplex to FULL */
+ } else {
+ /* force 1000, set loopback */
+ e1000_write_phy_reg(&adapter->hw, PHY_CTRL, 0x4140);
+
+ /* Now set up the MAC to the same speed/duplex as the PHY. */
+ ctrl_reg = E1000_READ_REG(&adapter->hw, CTRL);
+ ctrl_reg &= ~E1000_CTRL_SPD_SEL; /* Clear the speed sel bits */
+ ctrl_reg |= (E1000_CTRL_FRCSPD | /* Set the Force Speed Bit */
+ E1000_CTRL_FRCDPX | /* Set the Force Duplex Bit */
+ E1000_CTRL_SPD_1000 |/* Force Speed to 1000 */
+ E1000_CTRL_FD); /* Force Duplex to FULL */
+ }
if (adapter->hw.media_type == e1000_media_type_copper &&
adapter->hw.phy_type == e1000_phy_m88) {
@@ -1317,6 +1345,7 @@ e1000_set_phy_loopback(struct e1000_adapter *adapter)
case e1000_82572:
case e1000_82573:
case e1000_80003es2lan:
+ case e1000_ich8lan:
return e1000_integrated_phy_loopback(adapter);
break;
@@ -1568,6 +1597,7 @@ e1000_diag_test(struct net_device *netdev,
struct e1000_adapter *adapter = netdev_priv(netdev);
boolean_t if_running = netif_running(netdev);
+ set_bit(__E1000_DRIVER_TESTING, &adapter->flags);
if (eth_test->flags == ETH_TEST_FL_OFFLINE) {
/* Offline tests */
@@ -1582,7 +1612,8 @@ e1000_diag_test(struct net_device *netdev,
eth_test->flags |= ETH_TEST_FL_FAILED;
if (if_running)
- e1000_down(adapter);
+ /* indicate we're in test mode */
+ dev_close(netdev);
else
e1000_reset(adapter);
@@ -1607,8 +1638,9 @@ e1000_diag_test(struct net_device *netdev,
adapter->hw.autoneg = autoneg;
e1000_reset(adapter);
+ clear_bit(__E1000_DRIVER_TESTING, &adapter->flags);
if (if_running)
- e1000_up(adapter);
+ dev_open(netdev);
} else {
/* Online tests */
if (e1000_link_test(adapter, &data[4]))
@@ -1619,6 +1651,8 @@ e1000_diag_test(struct net_device *netdev,
data[1] = 0;
data[2] = 0;
data[3] = 0;
+
+ clear_bit(__E1000_DRIVER_TESTING, &adapter->flags);
}
msleep_interruptible(4 * 1000);
}
@@ -1778,21 +1812,18 @@ e1000_phys_id(struct net_device *netdev, uint32_t data)
mod_timer(&adapter->blink_timer, jiffies);
msleep_interruptible(data * 1000);
del_timer_sync(&adapter->blink_timer);
- } else if (adapter->hw.mac_type < e1000_82573) {
- E1000_WRITE_REG(&adapter->hw, LEDCTL,
- (E1000_LEDCTL_LED2_BLINK_RATE |
- E1000_LEDCTL_LED0_BLINK | E1000_LEDCTL_LED2_BLINK |
- (E1000_LEDCTL_MODE_LED_ON << E1000_LEDCTL_LED2_MODE_SHIFT) |
- (E1000_LEDCTL_MODE_LINK_ACTIVITY << E1000_LEDCTL_LED0_MODE_SHIFT) |
- (E1000_LEDCTL_MODE_LED_OFF << E1000_LEDCTL_LED1_MODE_SHIFT)));
+ } else if (adapter->hw.phy_type == e1000_phy_ife) {
+ if (!adapter->blink_timer.function) {
+ init_timer(&adapter->blink_timer);
+ adapter->blink_timer.function = e1000_led_blink_callback;
+ adapter->blink_timer.data = (unsigned long) adapter;
+ }
+ mod_timer(&adapter->blink_timer, jiffies);
msleep_interruptible(data * 1000);
+ del_timer_sync(&adapter->blink_timer);
+ e1000_write_phy_reg(&(adapter->hw), IFE_PHY_SPECIAL_CONTROL_LED, 0);
} else {
- E1000_WRITE_REG(&adapter->hw, LEDCTL,
- (E1000_LEDCTL_LED2_BLINK_RATE |
- E1000_LEDCTL_LED1_BLINK | E1000_LEDCTL_LED2_BLINK |
- (E1000_LEDCTL_MODE_LED_ON << E1000_LEDCTL_LED2_MODE_SHIFT) |
- (E1000_LEDCTL_MODE_LINK_ACTIVITY << E1000_LEDCTL_LED1_MODE_SHIFT) |
- (E1000_LEDCTL_MODE_LED_OFF << E1000_LEDCTL_LED0_MODE_SHIFT)));
+ e1000_blink_led_start(&adapter->hw);
msleep_interruptible(data * 1000);
}
@@ -1807,10 +1838,8 @@ static int
e1000_nway_reset(struct net_device *netdev)
{
struct e1000_adapter *adapter = netdev_priv(netdev);
- if (netif_running(netdev)) {
- e1000_down(adapter);
- e1000_up(adapter);
- }
+ if (netif_running(netdev))
+ e1000_reinit_locked(adapter);
return 0;
}
diff --git a/drivers/net/e1000/e1000_hw.c b/drivers/net/e1000/e1000_hw.c
index 3959039b16ec..583518ae49ce 100644
--- a/drivers/net/e1000/e1000_hw.c
+++ b/drivers/net/e1000/e1000_hw.c
@@ -101,7 +101,8 @@ static void e1000_write_reg_io(struct e1000_hw *hw, uint32_t offset,
#define E1000_WRITE_REG_IO(a, reg, val) \
e1000_write_reg_io((a), E1000_##reg, val)
-static int32_t e1000_configure_kmrn_for_10_100(struct e1000_hw *hw);
+static int32_t e1000_configure_kmrn_for_10_100(struct e1000_hw *hw,
+ uint16_t duplex);
static int32_t e1000_configure_kmrn_for_1000(struct e1000_hw *hw);
/* IGP cable length table */
@@ -156,6 +157,14 @@ e1000_set_phy_type(struct e1000_hw *hw)
hw->phy_type = e1000_phy_igp;
break;
}
+ case IGP03E1000_E_PHY_ID:
+ hw->phy_type = e1000_phy_igp_3;
+ break;
+ case IFE_E_PHY_ID:
+ case IFE_PLUS_E_PHY_ID:
+ case IFE_C_E_PHY_ID:
+ hw->phy_type = e1000_phy_ife;
+ break;
case GG82563_E_PHY_ID:
if (hw->mac_type == e1000_80003es2lan) {
hw->phy_type = e1000_phy_gg82563;
@@ -332,6 +341,7 @@ e1000_set_mac_type(struct e1000_hw *hw)
break;
case E1000_DEV_ID_82541EI:
case E1000_DEV_ID_82541EI_MOBILE:
+ case E1000_DEV_ID_82541ER_LOM:
hw->mac_type = e1000_82541;
break;
case E1000_DEV_ID_82541ER:
@@ -341,6 +351,7 @@ e1000_set_mac_type(struct e1000_hw *hw)
hw->mac_type = e1000_82541_rev_2;
break;
case E1000_DEV_ID_82547EI:
+ case E1000_DEV_ID_82547EI_MOBILE:
hw->mac_type = e1000_82547;
break;
case E1000_DEV_ID_82547GI:
@@ -354,6 +365,7 @@ e1000_set_mac_type(struct e1000_hw *hw)
case E1000_DEV_ID_82572EI_COPPER:
case E1000_DEV_ID_82572EI_FIBER:
case E1000_DEV_ID_82572EI_SERDES:
+ case E1000_DEV_ID_82572EI:
hw->mac_type = e1000_82572;
break;
case E1000_DEV_ID_82573E:
@@ -361,16 +373,29 @@ e1000_set_mac_type(struct e1000_hw *hw)
case E1000_DEV_ID_82573L:
hw->mac_type = e1000_82573;
break;
+ case E1000_DEV_ID_80003ES2LAN_COPPER_SPT:
+ case E1000_DEV_ID_80003ES2LAN_SERDES_SPT:
case E1000_DEV_ID_80003ES2LAN_COPPER_DPT:
case E1000_DEV_ID_80003ES2LAN_SERDES_DPT:
hw->mac_type = e1000_80003es2lan;
break;
+ case E1000_DEV_ID_ICH8_IGP_M_AMT:
+ case E1000_DEV_ID_ICH8_IGP_AMT:
+ case E1000_DEV_ID_ICH8_IGP_C:
+ case E1000_DEV_ID_ICH8_IFE:
+ case E1000_DEV_ID_ICH8_IGP_M:
+ hw->mac_type = e1000_ich8lan;
+ break;
default:
/* Should never have loaded on this device */
return -E1000_ERR_MAC_TYPE;
}
switch(hw->mac_type) {
+ case e1000_ich8lan:
+ hw->swfwhw_semaphore_present = TRUE;
+ hw->asf_firmware_present = TRUE;
+ break;
case e1000_80003es2lan:
hw->swfw_sync_present = TRUE;
/* fall through */
@@ -423,6 +448,7 @@ e1000_set_media_type(struct e1000_hw *hw)
case e1000_82542_rev2_1:
hw->media_type = e1000_media_type_fiber;
break;
+ case e1000_ich8lan:
case e1000_82573:
/* The STATUS_TBIMODE bit is reserved or reused for the this
* device.
@@ -527,6 +553,14 @@ e1000_reset_hw(struct e1000_hw *hw)
} while(timeout);
}
+ /* Workaround for ICH8 bit corruption issue in FIFO memory */
+ if (hw->mac_type == e1000_ich8lan) {
+ /* Set Tx and Rx buffer allocation to 8k apiece. */
+ E1000_WRITE_REG(hw, PBA, E1000_PBA_8K);
+ /* Set Packet Buffer Size to 16k. */
+ E1000_WRITE_REG(hw, PBS, E1000_PBS_16K);
+ }
+
/* Issue a global reset to the MAC. This will reset the chip's
* transmit, receive, DMA, and link units. It will not effect
* the current PCI configuration. The global reset bit is self-
@@ -550,6 +584,20 @@ e1000_reset_hw(struct e1000_hw *hw)
/* Reset is performed on a shadow of the control register */
E1000_WRITE_REG(hw, CTRL_DUP, (ctrl | E1000_CTRL_RST));
break;
+ case e1000_ich8lan:
+ if (!hw->phy_reset_disable &&
+ e1000_check_phy_reset_block(hw) == E1000_SUCCESS) {
+ /* e1000_ich8lan PHY HW reset requires MAC CORE reset
+ * at the same time to make sure the interface between
+ * MAC and the external PHY is reset.
+ */
+ ctrl |= E1000_CTRL_PHY_RST;
+ }
+
+ e1000_get_software_flag(hw);
+ E1000_WRITE_REG(hw, CTRL, (ctrl | E1000_CTRL_RST));
+ msec_delay(5);
+ break;
default:
E1000_WRITE_REG(hw, CTRL, (ctrl | E1000_CTRL_RST));
break;
@@ -591,6 +639,7 @@ e1000_reset_hw(struct e1000_hw *hw)
/* fall through */
case e1000_82571:
case e1000_82572:
+ case e1000_ich8lan:
case e1000_80003es2lan:
ret_val = e1000_get_auto_rd_done(hw);
if(ret_val)
@@ -633,6 +682,12 @@ e1000_reset_hw(struct e1000_hw *hw)
e1000_pci_set_mwi(hw);
}
+ if (hw->mac_type == e1000_ich8lan) {
+ uint32_t kab = E1000_READ_REG(hw, KABGTXD);
+ kab |= E1000_KABGTXD_BGSQLBIAS;
+ E1000_WRITE_REG(hw, KABGTXD, kab);
+ }
+
return E1000_SUCCESS;
}
@@ -675,9 +730,12 @@ e1000_init_hw(struct e1000_hw *hw)
/* Disabling VLAN filtering. */
DEBUGOUT("Initializing the IEEE VLAN\n");
- if (hw->mac_type < e1000_82545_rev_3)
- E1000_WRITE_REG(hw, VET, 0);
- e1000_clear_vfta(hw);
+ /* VET hardcoded to standard value and VFTA removed in ICH8 LAN */
+ if (hw->mac_type != e1000_ich8lan) {
+ if (hw->mac_type < e1000_82545_rev_3)
+ E1000_WRITE_REG(hw, VET, 0);
+ e1000_clear_vfta(hw);
+ }
/* For 82542 (rev 2.0), disable MWI and put the receiver into reset */
if(hw->mac_type == e1000_82542_rev2_0) {
@@ -705,8 +763,14 @@ e1000_init_hw(struct e1000_hw *hw)
/* Zero out the Multicast HASH table */
DEBUGOUT("Zeroing the MTA\n");
mta_size = E1000_MC_TBL_SIZE;
- for(i = 0; i < mta_size; i++)
+ if (hw->mac_type == e1000_ich8lan)
+ mta_size = E1000_MC_TBL_SIZE_ICH8LAN;
+ for(i = 0; i < mta_size; i++) {
E1000_WRITE_REG_ARRAY(hw, MTA, i, 0);
+ /* use write flush to prevent Memory Write Block (MWB) from
+ * occuring when accessing our register space */
+ E1000_WRITE_FLUSH(hw);
+ }
/* Set the PCI priority bit correctly in the CTRL register. This
* determines if the adapter gives priority to receives, or if it
@@ -744,6 +808,10 @@ e1000_init_hw(struct e1000_hw *hw)
break;
}
+ /* More time needed for PHY to initialize */
+ if (hw->mac_type == e1000_ich8lan)
+ msec_delay(15);
+
/* Call a subroutine to configure the link and setup flow control. */
ret_val = e1000_setup_link(hw);
@@ -757,6 +825,7 @@ e1000_init_hw(struct e1000_hw *hw)
case e1000_82571:
case e1000_82572:
case e1000_82573:
+ case e1000_ich8lan:
case e1000_80003es2lan:
ctrl |= E1000_TXDCTL_COUNT_DESC;
break;
@@ -795,6 +864,7 @@ e1000_init_hw(struct e1000_hw *hw)
/* Fall through */
case e1000_82571:
case e1000_82572:
+ case e1000_ich8lan:
ctrl = E1000_READ_REG(hw, TXDCTL1);
ctrl = (ctrl & ~E1000_TXDCTL_WTHRESH) | E1000_TXDCTL_FULL_TX_DESC_WB;
if(hw->mac_type >= e1000_82571)
@@ -818,6 +888,11 @@ e1000_init_hw(struct e1000_hw *hw)
*/
e1000_clear_hw_cntrs(hw);
+ /* ICH8 No-snoop bits are opposite polarity.
+ * Set to snoop by default after reset. */
+ if (hw->mac_type == e1000_ich8lan)
+ e1000_set_pci_ex_no_snoop(hw, PCI_EX_82566_SNOOP_ALL);
+
if (hw->device_id == E1000_DEV_ID_82546GB_QUAD_COPPER ||
hw->device_id == E1000_DEV_ID_82546GB_QUAD_COPPER_KSP3) {
ctrl_ext = E1000_READ_REG(hw, CTRL_EXT);
@@ -905,6 +980,7 @@ e1000_setup_link(struct e1000_hw *hw)
*/
if (hw->fc == e1000_fc_default) {
switch (hw->mac_type) {
+ case e1000_ich8lan:
case e1000_82573:
hw->fc = e1000_fc_full;
break;
@@ -971,9 +1047,12 @@ e1000_setup_link(struct e1000_hw *hw)
*/
DEBUGOUT("Initializing the Flow Control address, type and timer regs\n");
- E1000_WRITE_REG(hw, FCAL, FLOW_CONTROL_ADDRESS_LOW);
- E1000_WRITE_REG(hw, FCAH, FLOW_CONTROL_ADDRESS_HIGH);
- E1000_WRITE_REG(hw, FCT, FLOW_CONTROL_TYPE);
+ /* FCAL/H and FCT are hardcoded to standard values in e1000_ich8lan. */
+ if (hw->mac_type != e1000_ich8lan) {
+ E1000_WRITE_REG(hw, FCT, FLOW_CONTROL_TYPE);
+ E1000_WRITE_REG(hw, FCAH, FLOW_CONTROL_ADDRESS_HIGH);
+ E1000_WRITE_REG(hw, FCAL, FLOW_CONTROL_ADDRESS_LOW);
+ }
E1000_WRITE_REG(hw, FCTTV, hw->fc_pause_time);
@@ -1237,12 +1316,13 @@ e1000_copper_link_igp_setup(struct e1000_hw *hw)
/* Wait 10ms for MAC to configure PHY from eeprom settings */
msec_delay(15);
-
+ if (hw->mac_type != e1000_ich8lan) {
/* Configure activity LED after PHY reset */
led_ctrl = E1000_READ_REG(hw, LEDCTL);
led_ctrl &= IGP_ACTIVITY_LED_MASK;
led_ctrl |= (IGP_ACTIVITY_LED_ENABLE | IGP_LED3_MODE);
E1000_WRITE_REG(hw, LEDCTL, led_ctrl);
+ }
/* disable lplu d3 during driver init */
ret_val = e1000_set_d3_lplu_state(hw, FALSE);
@@ -1478,8 +1558,7 @@ e1000_copper_link_ggp_setup(struct e1000_hw *hw)
if (ret_val)
return ret_val;
- /* Enable Pass False Carrier on the PHY */
- phy_data |= GG82563_KMCR_PASS_FALSE_CARRIER;
+ phy_data &= ~GG82563_KMCR_PASS_FALSE_CARRIER;
ret_val = e1000_write_phy_reg(hw, GG82563_PHY_KMRN_MODE_CTRL,
phy_data);
@@ -1561,28 +1640,40 @@ e1000_copper_link_mgp_setup(struct e1000_hw *hw)
phy_data &= ~M88E1000_PSCR_POLARITY_REVERSAL;
if(hw->disable_polarity_correction == 1)
phy_data |= M88E1000_PSCR_POLARITY_REVERSAL;
- ret_val = e1000_write_phy_reg(hw, M88E1000_PHY_SPEC_CTRL, phy_data);
- if(ret_val)
- return ret_val;
-
- /* Force TX_CLK in the Extended PHY Specific Control Register
- * to 25MHz clock.
- */
- ret_val = e1000_read_phy_reg(hw, M88E1000_EXT_PHY_SPEC_CTRL, &phy_data);
- if(ret_val)
+ ret_val = e1000_write_phy_reg(hw, M88E1000_PHY_SPEC_CTRL, phy_data);
+ if (ret_val)
return ret_val;
- phy_data |= M88E1000_EPSCR_TX_CLK_25;
-
if (hw->phy_revision < M88E1011_I_REV_4) {
- /* Configure Master and Slave downshift values */
- phy_data &= ~(M88E1000_EPSCR_MASTER_DOWNSHIFT_MASK |
+ /* Force TX_CLK in the Extended PHY Specific Control Register
+ * to 25MHz clock.
+ */
+ ret_val = e1000_read_phy_reg(hw, M88E1000_EXT_PHY_SPEC_CTRL, &phy_data);
+ if (ret_val)
+ return ret_val;
+
+ phy_data |= M88E1000_EPSCR_TX_CLK_25;
+
+ if ((hw->phy_revision == E1000_REVISION_2) &&
+ (hw->phy_id == M88E1111_I_PHY_ID)) {
+ /* Vidalia Phy, set the downshift counter to 5x */
+ phy_data &= ~(M88EC018_EPSCR_DOWNSHIFT_COUNTER_MASK);
+ phy_data |= M88EC018_EPSCR_DOWNSHIFT_COUNTER_5X;
+ ret_val = e1000_write_phy_reg(hw,
+ M88E1000_EXT_PHY_SPEC_CTRL, phy_data);
+ if (ret_val)
+ return ret_val;
+ } else {
+ /* Configure Master and Slave downshift values */
+ phy_data &= ~(M88E1000_EPSCR_MASTER_DOWNSHIFT_MASK |
M88E1000_EPSCR_SLAVE_DOWNSHIFT_MASK);
- phy_data |= (M88E1000_EPSCR_MASTER_DOWNSHIFT_1X |
+ phy_data |= (M88E1000_EPSCR_MASTER_DOWNSHIFT_1X |
M88E1000_EPSCR_SLAVE_DOWNSHIFT_1X);
- ret_val = e1000_write_phy_reg(hw, M88E1000_EXT_PHY_SPEC_CTRL, phy_data);
- if(ret_val)
- return ret_val;
+ ret_val = e1000_write_phy_reg(hw,
+ M88E1000_EXT_PHY_SPEC_CTRL, phy_data);
+ if (ret_val)
+ return ret_val;
+ }
}
/* SW Reset the PHY so all changes take effect */
@@ -1620,6 +1711,10 @@ e1000_copper_link_autoneg(struct e1000_hw *hw)
if(hw->autoneg_advertised == 0)
hw->autoneg_advertised = AUTONEG_ADVERTISE_SPEED_DEFAULT;
+ /* IFE phy only supports 10/100 */
+ if (hw->phy_type == e1000_phy_ife)
+ hw->autoneg_advertised &= AUTONEG_ADVERTISE_10_100_ALL;
+
DEBUGOUT("Reconfiguring auto-neg advertisement params\n");
ret_val = e1000_phy_setup_autoneg(hw);
if(ret_val) {
@@ -1717,6 +1812,26 @@ e1000_setup_copper_link(struct e1000_hw *hw)
DEBUGFUNC("e1000_setup_copper_link");
+ switch (hw->mac_type) {
+ case e1000_80003es2lan:
+ case e1000_ich8lan:
+ /* Set the mac to wait the maximum time between each
+ * iteration and increase the max iterations when
+ * polling the phy; this fixes erroneous timeouts at 10Mbps. */
+ ret_val = e1000_write_kmrn_reg(hw, GG82563_REG(0x34, 4), 0xFFFF);
+ if (ret_val)
+ return ret_val;
+ ret_val = e1000_read_kmrn_reg(hw, GG82563_REG(0x34, 9), &reg_data);
+ if (ret_val)
+ return ret_val;
+ reg_data |= 0x3F;
+ ret_val = e1000_write_kmrn_reg(hw, GG82563_REG(0x34, 9), reg_data);
+ if (ret_val)
+ return ret_val;
+ default:
+ break;
+ }
+
/* Check if it is a valid PHY and set PHY mode if necessary. */
ret_val = e1000_copper_link_preconfig(hw);
if(ret_val)
@@ -1724,10 +1839,8 @@ e1000_setup_copper_link(struct e1000_hw *hw)
switch (hw->mac_type) {
case e1000_80003es2lan:
- ret_val = e1000_read_kmrn_reg(hw, E1000_KUMCTRLSTA_OFFSET_INB_CTRL,
- &reg_data);
- if (ret_val)
- return ret_val;
+ /* Kumeran registers are written-only */
+ reg_data = E1000_KUMCTRLSTA_INB_CTRL_LINK_STATUS_TX_TIMEOUT_DEFAULT;
reg_data |= E1000_KUMCTRLSTA_INB_CTRL_DIS_PADDING;
ret_val = e1000_write_kmrn_reg(hw, E1000_KUMCTRLSTA_OFFSET_INB_CTRL,
reg_data);
@@ -1739,6 +1852,7 @@ e1000_setup_copper_link(struct e1000_hw *hw)
}
if (hw->phy_type == e1000_phy_igp ||
+ hw->phy_type == e1000_phy_igp_3 ||
hw->phy_type == e1000_phy_igp_2) {
ret_val = e1000_copper_link_igp_setup(hw);
if(ret_val)
@@ -1803,7 +1917,7 @@ e1000_setup_copper_link(struct e1000_hw *hw)
* hw - Struct containing variables accessed by shared code
******************************************************************************/
static int32_t
-e1000_configure_kmrn_for_10_100(struct e1000_hw *hw)
+e1000_configure_kmrn_for_10_100(struct e1000_hw *hw, uint16_t duplex)
{
int32_t ret_val = E1000_SUCCESS;
uint32_t tipg;
@@ -1823,6 +1937,18 @@ e1000_configure_kmrn_for_10_100(struct e1000_hw *hw)
tipg |= DEFAULT_80003ES2LAN_TIPG_IPGT_10_100;
E1000_WRITE_REG(hw, TIPG, tipg);
+ ret_val = e1000_read_phy_reg(hw, GG82563_PHY_KMRN_MODE_CTRL, &reg_data);
+
+ if (ret_val)
+ return ret_val;
+
+ if (duplex == HALF_DUPLEX)
+ reg_data |= GG82563_KMCR_PASS_FALSE_CARRIER;
+ else
+ reg_data &= ~GG82563_KMCR_PASS_FALSE_CARRIER;
+
+ ret_val = e1000_write_phy_reg(hw, GG82563_PHY_KMRN_MODE_CTRL, reg_data);
+
return ret_val;
}
@@ -1847,6 +1973,14 @@ e1000_configure_kmrn_for_1000(struct e1000_hw *hw)
tipg |= DEFAULT_80003ES2LAN_TIPG_IPGT_1000;
E1000_WRITE_REG(hw, TIPG, tipg);
+ ret_val = e1000_read_phy_reg(hw, GG82563_PHY_KMRN_MODE_CTRL, &reg_data);
+
+ if (ret_val)
+ return ret_val;
+
+ reg_data &= ~GG82563_KMCR_PASS_FALSE_CARRIER;
+ ret_val = e1000_write_phy_reg(hw, GG82563_PHY_KMRN_MODE_CTRL, reg_data);
+
return ret_val;
}
@@ -1869,10 +2003,13 @@ e1000_phy_setup_autoneg(struct e1000_hw *hw)
if(ret_val)
return ret_val;
- /* Read the MII 1000Base-T Control Register (Address 9). */
- ret_val = e1000_read_phy_reg(hw, PHY_1000T_CTRL, &mii_1000t_ctrl_reg);
- if(ret_val)
- return ret_val;
+ if (hw->phy_type != e1000_phy_ife) {
+ /* Read the MII 1000Base-T Control Register (Address 9). */
+ ret_val = e1000_read_phy_reg(hw, PHY_1000T_CTRL, &mii_1000t_ctrl_reg);
+ if (ret_val)
+ return ret_val;
+ } else
+ mii_1000t_ctrl_reg=0;
/* Need to parse both autoneg_advertised and fc and set up
* the appropriate PHY registers. First we will parse for
@@ -1923,6 +2060,9 @@ e1000_phy_setup_autoneg(struct e1000_hw *hw)
if(hw->autoneg_advertised & ADVERTISE_1000_FULL) {
DEBUGOUT("Advertise 1000mb Full duplex\n");
mii_1000t_ctrl_reg |= CR_1000T_FD_CAPS;
+ if (hw->phy_type == e1000_phy_ife) {
+ DEBUGOUT("e1000_phy_ife is a 10/100 PHY. Gigabit speed is not supported.\n");
+ }
}
/* Check for a software override of the flow control settings, and
@@ -1984,9 +2124,11 @@ e1000_phy_setup_autoneg(struct e1000_hw *hw)
DEBUGOUT1("Auto-Neg Advertising %x\n", mii_autoneg_adv_reg);
- ret_val = e1000_write_phy_reg(hw, PHY_1000T_CTRL, mii_1000t_ctrl_reg);
- if(ret_val)
- return ret_val;
+ if (hw->phy_type != e1000_phy_ife) {
+ ret_val = e1000_write_phy_reg(hw, PHY_1000T_CTRL, mii_1000t_ctrl_reg);
+ if (ret_val)
+ return ret_val;
+ }
return E1000_SUCCESS;
}
@@ -2089,6 +2231,18 @@ e1000_phy_force_speed_duplex(struct e1000_hw *hw)
/* Need to reset the PHY or these changes will be ignored */
mii_ctrl_reg |= MII_CR_RESET;
+ /* Disable MDI-X support for 10/100 */
+ } else if (hw->phy_type == e1000_phy_ife) {
+ ret_val = e1000_read_phy_reg(hw, IFE_PHY_MDIX_CONTROL, &phy_data);
+ if (ret_val)
+ return ret_val;
+
+ phy_data &= ~IFE_PMC_AUTO_MDIX;
+ phy_data &= ~IFE_PMC_FORCE_MDIX;
+
+ ret_val = e1000_write_phy_reg(hw, IFE_PHY_MDIX_CONTROL, phy_data);
+ if (ret_val)
+ return ret_val;
} else {
/* Clear Auto-Crossover to force MDI manually. IGP requires MDI
* forced whenever speed or duplex are forced.
@@ -2721,8 +2875,12 @@ e1000_check_for_link(struct e1000_hw *hw)
*/
if(hw->tbi_compatibility_en) {
uint16_t speed, duplex;
- e1000_get_speed_and_duplex(hw, &speed, &duplex);
- if(speed != SPEED_1000) {
+ ret_val = e1000_get_speed_and_duplex(hw, &speed, &duplex);
+ if (ret_val) {
+ DEBUGOUT("Error getting link speed and duplex\n");
+ return ret_val;
+ }
+ if (speed != SPEED_1000) {
/* If link speed is not set to gigabit speed, we do not need
* to enable TBI compatibility.
*/
@@ -2889,7 +3047,13 @@ e1000_get_speed_and_duplex(struct e1000_hw *hw,
if (*speed == SPEED_1000)
ret_val = e1000_configure_kmrn_for_1000(hw);
else
- ret_val = e1000_configure_kmrn_for_10_100(hw);
+ ret_val = e1000_configure_kmrn_for_10_100(hw, *duplex);
+ if (ret_val)
+ return ret_val;
+ }
+
+ if ((hw->phy_type == e1000_phy_igp_3) && (*speed == SPEED_1000)) {
+ ret_val = e1000_kumeran_lock_loss_workaround(hw);
if (ret_val)
return ret_val;
}
@@ -3079,6 +3243,9 @@ e1000_swfw_sync_acquire(struct e1000_hw *hw, uint16_t mask)
DEBUGFUNC("e1000_swfw_sync_acquire");
+ if (hw->swfwhw_semaphore_present)
+ return e1000_get_software_flag(hw);
+
if (!hw->swfw_sync_present)
return e1000_get_hw_eeprom_semaphore(hw);
@@ -3118,6 +3285,11 @@ e1000_swfw_sync_release(struct e1000_hw *hw, uint16_t mask)
DEBUGFUNC("e1000_swfw_sync_release");
+ if (hw->swfwhw_semaphore_present) {
+ e1000_release_software_flag(hw);
+ return;
+ }
+
if (!hw->swfw_sync_present) {
e1000_put_hw_eeprom_semaphore(hw);
return;
@@ -3160,7 +3332,8 @@ e1000_read_phy_reg(struct e1000_hw *hw,
if (e1000_swfw_sync_acquire(hw, swfw))
return -E1000_ERR_SWFW_SYNC;
- if((hw->phy_type == e1000_phy_igp ||
+ if ((hw->phy_type == e1000_phy_igp ||
+ hw->phy_type == e1000_phy_igp_3 ||
hw->phy_type == e1000_phy_igp_2) &&
(reg_addr > MAX_PHY_MULTI_PAGE_REG)) {
ret_val = e1000_write_phy_reg_ex(hw, IGP01E1000_PHY_PAGE_SELECT,
@@ -3299,7 +3472,8 @@ e1000_write_phy_reg(struct e1000_hw *hw,
if (e1000_swfw_sync_acquire(hw, swfw))
return -E1000_ERR_SWFW_SYNC;
- if((hw->phy_type == e1000_phy_igp ||
+ if ((hw->phy_type == e1000_phy_igp ||
+ hw->phy_type == e1000_phy_igp_3 ||
hw->phy_type == e1000_phy_igp_2) &&
(reg_addr > MAX_PHY_MULTI_PAGE_REG)) {
ret_val = e1000_write_phy_reg_ex(hw, IGP01E1000_PHY_PAGE_SELECT,
@@ -3514,7 +3688,7 @@ e1000_phy_hw_reset(struct e1000_hw *hw)
E1000_WRITE_FLUSH(hw);
if (hw->mac_type >= e1000_82571)
- msec_delay(10);
+ msec_delay_irq(10);
e1000_swfw_sync_release(hw, swfw);
} else {
/* Read the Extended Device Control Register, assert the PHY_RESET_DIR
@@ -3544,6 +3718,12 @@ e1000_phy_hw_reset(struct e1000_hw *hw)
ret_val = e1000_get_phy_cfg_done(hw);
e1000_release_software_semaphore(hw);
+ if ((hw->mac_type == e1000_ich8lan) &&
+ (hw->phy_type == e1000_phy_igp_3)) {
+ ret_val = e1000_init_lcd_from_nvm(hw);
+ if (ret_val)
+ return ret_val;
+ }
return ret_val;
}
@@ -3572,9 +3752,11 @@ e1000_phy_reset(struct e1000_hw *hw)
case e1000_82541_rev_2:
case e1000_82571:
case e1000_82572:
+ case e1000_ich8lan:
ret_val = e1000_phy_hw_reset(hw);
if(ret_val)
return ret_val;
+
break;
default:
ret_val = e1000_read_phy_reg(hw, PHY_CTRL, &phy_data);
@@ -3597,11 +3779,120 @@ e1000_phy_reset(struct e1000_hw *hw)
}
/******************************************************************************
+* Work-around for 82566 power-down: on D3 entry-
+* 1) disable gigabit link
+* 2) write VR power-down enable
+* 3) read it back
+* if successful continue, else issue LCD reset and repeat
+*
+* hw - struct containing variables accessed by shared code
+******************************************************************************/
+void
+e1000_phy_powerdown_workaround(struct e1000_hw *hw)
+{
+ int32_t reg;
+ uint16_t phy_data;
+ int32_t retry = 0;
+
+ DEBUGFUNC("e1000_phy_powerdown_workaround");
+
+ if (hw->phy_type != e1000_phy_igp_3)
+ return;
+
+ do {
+ /* Disable link */
+ reg = E1000_READ_REG(hw, PHY_CTRL);
+ E1000_WRITE_REG(hw, PHY_CTRL, reg | E1000_PHY_CTRL_GBE_DISABLE |
+ E1000_PHY_CTRL_NOND0A_GBE_DISABLE);
+
+ /* Write VR power-down enable */
+ e1000_read_phy_reg(hw, IGP3_VR_CTRL, &phy_data);
+ e1000_write_phy_reg(hw, IGP3_VR_CTRL, phy_data |
+ IGP3_VR_CTRL_MODE_SHUT);
+
+ /* Read it back and test */
+ e1000_read_phy_reg(hw, IGP3_VR_CTRL, &phy_data);
+ if ((phy_data & IGP3_VR_CTRL_MODE_SHUT) || retry)
+ break;
+
+ /* Issue PHY reset and repeat at most one more time */
+ reg = E1000_READ_REG(hw, CTRL);
+ E1000_WRITE_REG(hw, CTRL, reg | E1000_CTRL_PHY_RST);
+ retry++;
+ } while (retry);
+
+ return;
+
+}
+
+/******************************************************************************
+* Work-around for 82566 Kumeran PCS lock loss:
+* On link status change (i.e. PCI reset, speed change) and link is up and
+* speed is gigabit-
+* 0) if workaround is optionally disabled do nothing
+* 1) wait 1ms for Kumeran link to come up
+* 2) check Kumeran Diagnostic register PCS lock loss bit
+* 3) if not set the link is locked (all is good), otherwise...
+* 4) reset the PHY
+* 5) repeat up to 10 times
+* Note: this is only called for IGP3 copper when speed is 1gb.
+*
+* hw - struct containing variables accessed by shared code
+******************************************************************************/
+int32_t
+e1000_kumeran_lock_loss_workaround(struct e1000_hw *hw)
+{
+ int32_t ret_val;
+ int32_t reg;
+ int32_t cnt;
+ uint16_t phy_data;
+
+ if (hw->kmrn_lock_loss_workaround_disabled)
+ return E1000_SUCCESS;
+
+ /* Make sure link is up before proceeding. If not just return.
+ * Attempting this while link is negotiating fouls up link
+ * stability */
+ ret_val = e1000_read_phy_reg(hw, PHY_STATUS, &phy_data);
+ ret_val = e1000_read_phy_reg(hw, PHY_STATUS, &phy_data);
+
+ if (phy_data & MII_SR_LINK_STATUS) {
+ for (cnt = 0; cnt < 10; cnt++) {
+ /* read once to clear */
+ ret_val = e1000_read_phy_reg(hw, IGP3_KMRN_DIAG, &phy_data);
+ if (ret_val)
+ return ret_val;
+ /* and again to get new status */
+ ret_val = e1000_read_phy_reg(hw, IGP3_KMRN_DIAG, &phy_data);
+ if (ret_val)
+ return ret_val;
+
+ /* check for PCS lock */
+ if (!(phy_data & IGP3_KMRN_DIAG_PCS_LOCK_LOSS))
+ return E1000_SUCCESS;
+
+ /* Issue PHY reset */
+ e1000_phy_hw_reset(hw);
+ msec_delay_irq(5);
+ }
+ /* Disable GigE link negotiation */
+ reg = E1000_READ_REG(hw, PHY_CTRL);
+ E1000_WRITE_REG(hw, PHY_CTRL, reg | E1000_PHY_CTRL_GBE_DISABLE |
+ E1000_PHY_CTRL_NOND0A_GBE_DISABLE);
+
+ /* unable to acquire PCS lock */
+ return E1000_ERR_PHY;
+ }
+
+ return E1000_SUCCESS;
+}
+
+/******************************************************************************
* Probes the expected PHY address for known PHY IDs
*
* hw - Struct containing variables accessed by shared code
******************************************************************************/
-static int32_t
+int32_t
e1000_detect_gig_phy(struct e1000_hw *hw)
{
int32_t phy_init_status, ret_val;
@@ -3613,8 +3904,8 @@ e1000_detect_gig_phy(struct e1000_hw *hw)
/* The 82571 firmware may still be configuring the PHY. In this
* case, we cannot access the PHY until the configuration is done. So
* we explicitly set the PHY values. */
- if(hw->mac_type == e1000_82571 ||
- hw->mac_type == e1000_82572) {
+ if (hw->mac_type == e1000_82571 ||
+ hw->mac_type == e1000_82572) {
hw->phy_id = IGP01E1000_I_PHY_ID;
hw->phy_type = e1000_phy_igp_2;
return E1000_SUCCESS;
@@ -3631,7 +3922,7 @@ e1000_detect_gig_phy(struct e1000_hw *hw)
/* Read the PHY ID Registers to identify which PHY is onboard. */
ret_val = e1000_read_phy_reg(hw, PHY_ID1, &phy_id_high);
- if(ret_val)
+ if (ret_val)
return ret_val;
hw->phy_id = (uint32_t) (phy_id_high << 16);
@@ -3669,6 +3960,12 @@ e1000_detect_gig_phy(struct e1000_hw *hw)
case e1000_80003es2lan:
if (hw->phy_id == GG82563_E_PHY_ID) match = TRUE;
break;
+ case e1000_ich8lan:
+ if (hw->phy_id == IGP03E1000_E_PHY_ID) match = TRUE;
+ if (hw->phy_id == IFE_E_PHY_ID) match = TRUE;
+ if (hw->phy_id == IFE_PLUS_E_PHY_ID) match = TRUE;
+ if (hw->phy_id == IFE_C_E_PHY_ID) match = TRUE;
+ break;
default:
DEBUGOUT1("Invalid MAC type %d\n", hw->mac_type);
return -E1000_ERR_CONFIG;
@@ -3784,6 +4081,53 @@ e1000_phy_igp_get_info(struct e1000_hw *hw,
}
/******************************************************************************
+* Get PHY information from various PHY registers for ife PHY only.
+*
+* hw - Struct containing variables accessed by shared code
+* phy_info - PHY information structure
+******************************************************************************/
+int32_t
+e1000_phy_ife_get_info(struct e1000_hw *hw,
+ struct e1000_phy_info *phy_info)
+{
+ int32_t ret_val;
+ uint16_t phy_data, polarity;
+
+ DEBUGFUNC("e1000_phy_ife_get_info");
+
+ phy_info->downshift = (e1000_downshift)hw->speed_downgraded;
+ phy_info->extended_10bt_distance = e1000_10bt_ext_dist_enable_normal;
+
+ ret_val = e1000_read_phy_reg(hw, IFE_PHY_SPECIAL_CONTROL, &phy_data);
+ if (ret_val)
+ return ret_val;
+ phy_info->polarity_correction =
+ (phy_data & IFE_PSC_AUTO_POLARITY_DISABLE) >>
+ IFE_PSC_AUTO_POLARITY_DISABLE_SHIFT;
+
+ if (phy_info->polarity_correction == e1000_polarity_reversal_enabled) {
+ ret_val = e1000_check_polarity(hw, &polarity);
+ if (ret_val)
+ return ret_val;
+ } else {
+ /* Polarity is forced. */
+ polarity = (phy_data & IFE_PSC_FORCE_POLARITY) >>
+ IFE_PSC_FORCE_POLARITY_SHIFT;
+ }
+ phy_info->cable_polarity = polarity;
+
+ ret_val = e1000_read_phy_reg(hw, IFE_PHY_MDIX_CONTROL, &phy_data);
+ if (ret_val)
+ return ret_val;
+
+ phy_info->mdix_mode =
+ (phy_data & (IFE_PMC_AUTO_MDIX | IFE_PMC_FORCE_MDIX)) >>
+ IFE_PMC_MDIX_MODE_SHIFT;
+
+ return E1000_SUCCESS;
+}
+
+/******************************************************************************
* Get PHY information from various PHY registers fot m88 PHY only.
*
* hw - Struct containing variables accessed by shared code
@@ -3898,9 +4242,12 @@ e1000_phy_get_info(struct e1000_hw *hw,
return -E1000_ERR_CONFIG;
}
- if(hw->phy_type == e1000_phy_igp ||
+ if (hw->phy_type == e1000_phy_igp ||
+ hw->phy_type == e1000_phy_igp_3 ||
hw->phy_type == e1000_phy_igp_2)
return e1000_phy_igp_get_info(hw, phy_info);
+ else if (hw->phy_type == e1000_phy_ife)
+ return e1000_phy_ife_get_info(hw, phy_info);
else
return e1000_phy_m88_get_info(hw, phy_info);
}
@@ -4049,6 +4396,35 @@ e1000_init_eeprom_params(struct e1000_hw *hw)
eeprom->use_eerd = TRUE;
eeprom->use_eewr = FALSE;
break;
+ case e1000_ich8lan:
+ {
+ int32_t i = 0;
+ uint32_t flash_size = E1000_READ_ICH8_REG(hw, ICH8_FLASH_GFPREG);
+
+ eeprom->type = e1000_eeprom_ich8;
+ eeprom->use_eerd = FALSE;
+ eeprom->use_eewr = FALSE;
+ eeprom->word_size = E1000_SHADOW_RAM_WORDS;
+
+ /* Zero the shadow RAM structure. But don't load it from NVM
+ * so as to save time for driver init */
+ if (hw->eeprom_shadow_ram != NULL) {
+ for (i = 0; i < E1000_SHADOW_RAM_WORDS; i++) {
+ hw->eeprom_shadow_ram[i].modified = FALSE;
+ hw->eeprom_shadow_ram[i].eeprom_word = 0xFFFF;
+ }
+ }
+
+ hw->flash_base_addr = (flash_size & ICH8_GFPREG_BASE_MASK) *
+ ICH8_FLASH_SECTOR_SIZE;
+
+ hw->flash_bank_size = ((flash_size >> 16) & ICH8_GFPREG_BASE_MASK) + 1;
+ hw->flash_bank_size -= (flash_size & ICH8_GFPREG_BASE_MASK);
+ hw->flash_bank_size *= ICH8_FLASH_SECTOR_SIZE;
+ hw->flash_bank_size /= 2 * sizeof(uint16_t);
+
+ break;
+ }
default:
break;
}
@@ -4469,7 +4845,10 @@ e1000_read_eeprom(struct e1000_hw *hw,
return ret_val;
}
- if(eeprom->type == e1000_eeprom_spi) {
+ if (eeprom->type == e1000_eeprom_ich8)
+ return e1000_read_eeprom_ich8(hw, offset, words, data);
+
+ if (eeprom->type == e1000_eeprom_spi) {
uint16_t word_in;
uint8_t read_opcode = EEPROM_READ_OPCODE_SPI;
@@ -4636,7 +5015,10 @@ e1000_is_onboard_nvm_eeprom(struct e1000_hw *hw)
DEBUGFUNC("e1000_is_onboard_nvm_eeprom");
- if(hw->mac_type == e1000_82573) {
+ if (hw->mac_type == e1000_ich8lan)
+ return FALSE;
+
+ if (hw->mac_type == e1000_82573) {
eecd = E1000_READ_REG(hw, EECD);
/* Isolate bits 15 & 16 */
@@ -4686,8 +5068,22 @@ e1000_validate_eeprom_checksum(struct e1000_hw *hw)
}
}
- for(i = 0; i < (EEPROM_CHECKSUM_REG + 1); i++) {
- if(e1000_read_eeprom(hw, i, 1, &eeprom_data) < 0) {
+ if (hw->mac_type == e1000_ich8lan) {
+ /* Drivers must allocate the shadow ram structure for the
+ * EEPROM checksum to be updated. Otherwise, this bit as well
+ * as the checksum must both be set correctly for this
+ * validation to pass.
+ */
+ e1000_read_eeprom(hw, 0x19, 1, &eeprom_data);
+ if ((eeprom_data & 0x40) == 0) {
+ eeprom_data |= 0x40;
+ e1000_write_eeprom(hw, 0x19, 1, &eeprom_data);
+ e1000_update_eeprom_checksum(hw);
+ }
+ }
+
+ for (i = 0; i < (EEPROM_CHECKSUM_REG + 1); i++) {
+ if (e1000_read_eeprom(hw, i, 1, &eeprom_data) < 0) {
DEBUGOUT("EEPROM Read Error\n");
return -E1000_ERR_EEPROM;
}
@@ -4713,6 +5109,7 @@ e1000_validate_eeprom_checksum(struct e1000_hw *hw)
int32_t
e1000_update_eeprom_checksum(struct e1000_hw *hw)
{
+ uint32_t ctrl_ext;
uint16_t checksum = 0;
uint16_t i, eeprom_data;
@@ -4731,6 +5128,14 @@ e1000_update_eeprom_checksum(struct e1000_hw *hw)
return -E1000_ERR_EEPROM;
} else if (hw->eeprom.type == e1000_eeprom_flash) {
e1000_commit_shadow_ram(hw);
+ } else if (hw->eeprom.type == e1000_eeprom_ich8) {
+ e1000_commit_shadow_ram(hw);
+ /* Reload the EEPROM, or else modifications will not appear
+ * until after next adapter reset. */
+ ctrl_ext = E1000_READ_REG(hw, CTRL_EXT);
+ ctrl_ext |= E1000_CTRL_EXT_EE_RST;
+ E1000_WRITE_REG(hw, CTRL_EXT, ctrl_ext);
+ msec_delay(10);
}
return E1000_SUCCESS;
}
@@ -4770,6 +5175,9 @@ e1000_write_eeprom(struct e1000_hw *hw,
if(eeprom->use_eewr == TRUE)
return e1000_write_eeprom_eewr(hw, offset, words, data);
+ if (eeprom->type == e1000_eeprom_ich8)
+ return e1000_write_eeprom_ich8(hw, offset, words, data);
+
/* Prepare the EEPROM for writing */
if (e1000_acquire_eeprom(hw) != E1000_SUCCESS)
return -E1000_ERR_EEPROM;
@@ -4957,11 +5365,17 @@ e1000_commit_shadow_ram(struct e1000_hw *hw)
uint32_t flop = 0;
uint32_t i = 0;
int32_t error = E1000_SUCCESS;
-
- /* The flop register will be used to determine if flash type is STM */
- flop = E1000_READ_REG(hw, FLOP);
+ uint32_t old_bank_offset = 0;
+ uint32_t new_bank_offset = 0;
+ uint32_t sector_retries = 0;
+ uint8_t low_byte = 0;
+ uint8_t high_byte = 0;
+ uint8_t temp_byte = 0;
+ boolean_t sector_write_failed = FALSE;
if (hw->mac_type == e1000_82573) {
+ /* The flop register will be used to determine if flash type is STM */
+ flop = E1000_READ_REG(hw, FLOP);
for (i=0; i < attempts; i++) {
eecd = E1000_READ_REG(hw, EECD);
if ((eecd & E1000_EECD_FLUPD) == 0) {
@@ -4995,6 +5409,106 @@ e1000_commit_shadow_ram(struct e1000_hw *hw)
}
}
+ if (hw->mac_type == e1000_ich8lan && hw->eeprom_shadow_ram != NULL) {
+ /* We're writing to the opposite bank so if we're on bank 1,
+ * write to bank 0 etc. We also need to erase the segment that
+ * is going to be written */
+ if (!(E1000_READ_REG(hw, EECD) & E1000_EECD_SEC1VAL)) {
+ new_bank_offset = hw->flash_bank_size * 2;
+ old_bank_offset = 0;
+ e1000_erase_ich8_4k_segment(hw, 1);
+ } else {
+ old_bank_offset = hw->flash_bank_size * 2;
+ new_bank_offset = 0;
+ e1000_erase_ich8_4k_segment(hw, 0);
+ }
+
+ do {
+ sector_write_failed = FALSE;
+ /* Loop for every byte in the shadow RAM,
+ * which is in units of words. */
+ for (i = 0; i < E1000_SHADOW_RAM_WORDS; i++) {
+ /* Determine whether to write the value stored
+ * in the other NVM bank or a modified value stored
+ * in the shadow RAM */
+ if (hw->eeprom_shadow_ram[i].modified == TRUE) {
+ low_byte = (uint8_t)hw->eeprom_shadow_ram[i].eeprom_word;
+ e1000_read_ich8_byte(hw, (i << 1) + old_bank_offset,
+ &temp_byte);
+ udelay(100);
+ error = e1000_verify_write_ich8_byte(hw,
+ (i << 1) + new_bank_offset,
+ low_byte);
+ if (error != E1000_SUCCESS)
+ sector_write_failed = TRUE;
+ high_byte =
+ (uint8_t)(hw->eeprom_shadow_ram[i].eeprom_word >> 8);
+ e1000_read_ich8_byte(hw, (i << 1) + old_bank_offset + 1,
+ &temp_byte);
+ udelay(100);
+ } else {
+ e1000_read_ich8_byte(hw, (i << 1) + old_bank_offset,
+ &low_byte);
+ udelay(100);
+ error = e1000_verify_write_ich8_byte(hw,
+ (i << 1) + new_bank_offset, low_byte);
+ if (error != E1000_SUCCESS)
+ sector_write_failed = TRUE;
+ e1000_read_ich8_byte(hw, (i << 1) + old_bank_offset + 1,
+ &high_byte);
+ }
+
+ /* If the word is 0x13, then make sure the signature bits
+ * (15:14) are 11b until the commit has completed.
+ * This will allow us to write 10b which indicates the
+ * signature is valid. We want to do this after the write
+ * has completed so that we don't mark the segment valid
+ * while the write is still in progress */
+ if (i == E1000_ICH8_NVM_SIG_WORD)
+ high_byte = E1000_ICH8_NVM_SIG_MASK | high_byte;
+
+ error = e1000_verify_write_ich8_byte(hw,
+ (i << 1) + new_bank_offset + 1, high_byte);
+ if (error != E1000_SUCCESS)
+ sector_write_failed = TRUE;
+
+ if (sector_write_failed == FALSE) {
+ /* Clear the now not used entry in the cache */
+ hw->eeprom_shadow_ram[i].modified = FALSE;
+ hw->eeprom_shadow_ram[i].eeprom_word = 0xFFFF;
+ }
+ }
+
+ /* Don't bother writing the segment valid bits if sector
+ * programming failed. */
+ if (sector_write_failed == FALSE) {
+ /* Finally validate the new segment by setting bit 15:14
+ * to 10b in word 0x13 , this can be done without an
+ * erase as well since these bits are 11 to start with
+ * and we need to change bit 14 to 0b */
+ e1000_read_ich8_byte(hw,
+ E1000_ICH8_NVM_SIG_WORD * 2 + 1 + new_bank_offset,
+ &high_byte);
+ high_byte &= 0xBF;
+ error = e1000_verify_write_ich8_byte(hw,
+ E1000_ICH8_NVM_SIG_WORD * 2 + 1 + new_bank_offset,
+ high_byte);
+ if (error != E1000_SUCCESS)
+ sector_write_failed = TRUE;
+
+ /* And invalidate the previously valid segment by setting
+ * its signature word (0x13) high_byte to 0b. This can be
+ * done without an erase because flash erase sets all bits
+ * to 1's. We can write 1's to 0's without an erase */
+ error = e1000_verify_write_ich8_byte(hw,
+ E1000_ICH8_NVM_SIG_WORD * 2 + 1 + old_bank_offset,
+ 0);
+ if (error != E1000_SUCCESS)
+ sector_write_failed = TRUE;
+ }
+ } while (++sector_retries < 10 && sector_write_failed == TRUE);
+ }
+
return error;
}
@@ -5102,15 +5616,19 @@ e1000_init_rx_addrs(struct e1000_hw *hw)
* the other port. */
if ((hw->mac_type == e1000_82571) && (hw->laa_is_present == TRUE))
rar_num -= 1;
+ if (hw->mac_type == e1000_ich8lan)
+ rar_num = E1000_RAR_ENTRIES_ICH8LAN;
+
/* Zero out the other 15 receive addresses. */
DEBUGOUT("Clearing RAR[1-15]\n");
for(i = 1; i < rar_num; i++) {
E1000_WRITE_REG_ARRAY(hw, RA, (i << 1), 0);
+ E1000_WRITE_FLUSH(hw);
E1000_WRITE_REG_ARRAY(hw, RA, ((i << 1) + 1), 0);
+ E1000_WRITE_FLUSH(hw);
}
}
-#if 0
/******************************************************************************
* Updates the MAC's list of multicast addresses.
*
@@ -5145,6 +5663,8 @@ e1000_mc_addr_list_update(struct e1000_hw *hw,
/* Clear RAR[1-15] */
DEBUGOUT(" Clearing RAR[1-15]\n");
num_rar_entry = E1000_RAR_ENTRIES;
+ if (hw->mac_type == e1000_ich8lan)
+ num_rar_entry = E1000_RAR_ENTRIES_ICH8LAN;
/* Reserve a spot for the Locally Administered Address to work around
* an 82571 issue in which a reset on one port will reload the MAC on
* the other port. */
@@ -5153,14 +5673,19 @@ e1000_mc_addr_list_update(struct e1000_hw *hw,
for(i = rar_used_count; i < num_rar_entry; i++) {
E1000_WRITE_REG_ARRAY(hw, RA, (i << 1), 0);
+ E1000_WRITE_FLUSH(hw);
E1000_WRITE_REG_ARRAY(hw, RA, ((i << 1) + 1), 0);
+ E1000_WRITE_FLUSH(hw);
}
/* Clear the MTA */
DEBUGOUT(" Clearing MTA\n");
num_mta_entry = E1000_NUM_MTA_REGISTERS;
+ if (hw->mac_type == e1000_ich8lan)
+ num_mta_entry = E1000_NUM_MTA_REGISTERS_ICH8LAN;
for(i = 0; i < num_mta_entry; i++) {
E1000_WRITE_REG_ARRAY(hw, MTA, i, 0);
+ E1000_WRITE_FLUSH(hw);
}
/* Add the new addresses */
@@ -5194,7 +5719,6 @@ e1000_mc_addr_list_update(struct e1000_hw *hw,
}
DEBUGOUT("MC Update Complete\n");
}
-#endif /* 0 */
/******************************************************************************
* Hashes an address to determine its location in the multicast table
@@ -5217,24 +5741,46 @@ e1000_hash_mc_addr(struct e1000_hw *hw,
* LSB MSB
*/
case 0:
- /* [47:36] i.e. 0x563 for above example address */
- hash_value = ((mc_addr[4] >> 4) | (((uint16_t) mc_addr[5]) << 4));
+ if (hw->mac_type == e1000_ich8lan) {
+ /* [47:38] i.e. 0x158 for above example address */
+ hash_value = ((mc_addr[4] >> 6) | (((uint16_t) mc_addr[5]) << 2));
+ } else {
+ /* [47:36] i.e. 0x563 for above example address */
+ hash_value = ((mc_addr[4] >> 4) | (((uint16_t) mc_addr[5]) << 4));
+ }
break;
case 1:
- /* [46:35] i.e. 0xAC6 for above example address */
- hash_value = ((mc_addr[4] >> 3) | (((uint16_t) mc_addr[5]) << 5));
+ if (hw->mac_type == e1000_ich8lan) {
+ /* [46:37] i.e. 0x2B1 for above example address */
+ hash_value = ((mc_addr[4] >> 5) | (((uint16_t) mc_addr[5]) << 3));
+ } else {
+ /* [46:35] i.e. 0xAC6 for above example address */
+ hash_value = ((mc_addr[4] >> 3) | (((uint16_t) mc_addr[5]) << 5));
+ }
break;
case 2:
- /* [45:34] i.e. 0x5D8 for above example address */
- hash_value = ((mc_addr[4] >> 2) | (((uint16_t) mc_addr[5]) << 6));
+ if (hw->mac_type == e1000_ich8lan) {
+ /*[45:36] i.e. 0x163 for above example address */
+ hash_value = ((mc_addr[4] >> 4) | (((uint16_t) mc_addr[5]) << 4));
+ } else {
+ /* [45:34] i.e. 0x5D8 for above example address */
+ hash_value = ((mc_addr[4] >> 2) | (((uint16_t) mc_addr[5]) << 6));
+ }
break;
case 3:
- /* [43:32] i.e. 0x634 for above example address */
- hash_value = ((mc_addr[4]) | (((uint16_t) mc_addr[5]) << 8));
+ if (hw->mac_type == e1000_ich8lan) {
+ /* [43:34] i.e. 0x18D for above example address */
+ hash_value = ((mc_addr[4] >> 2) | (((uint16_t) mc_addr[5]) << 6));
+ } else {
+ /* [43:32] i.e. 0x634 for above example address */
+ hash_value = ((mc_addr[4]) | (((uint16_t) mc_addr[5]) << 8));
+ }
break;
}
hash_value &= 0xFFF;
+ if (hw->mac_type == e1000_ich8lan)
+ hash_value &= 0x3FF;
return hash_value;
}
@@ -5262,6 +5808,8 @@ e1000_mta_set(struct e1000_hw *hw,
* register are determined by the lower 5 bits of the value.
*/
hash_reg = (hash_value >> 5) & 0x7F;
+ if (hw->mac_type == e1000_ich8lan)
+ hash_reg &= 0x1F;
hash_bit = hash_value & 0x1F;
mta = E1000_READ_REG_ARRAY(hw, MTA, hash_reg);
@@ -5275,9 +5823,12 @@ e1000_mta_set(struct e1000_hw *hw,
if((hw->mac_type == e1000_82544) && ((hash_reg & 0x1) == 1)) {
temp = E1000_READ_REG_ARRAY(hw, MTA, (hash_reg - 1));
E1000_WRITE_REG_ARRAY(hw, MTA, hash_reg, mta);
+ E1000_WRITE_FLUSH(hw);
E1000_WRITE_REG_ARRAY(hw, MTA, (hash_reg - 1), temp);
+ E1000_WRITE_FLUSH(hw);
} else {
E1000_WRITE_REG_ARRAY(hw, MTA, hash_reg, mta);
+ E1000_WRITE_FLUSH(hw);
}
}
@@ -5334,7 +5885,9 @@ e1000_rar_set(struct e1000_hw *hw,
}
E1000_WRITE_REG_ARRAY(hw, RA, (index << 1), rar_low);
+ E1000_WRITE_FLUSH(hw);
E1000_WRITE_REG_ARRAY(hw, RA, ((index << 1) + 1), rar_high);
+ E1000_WRITE_FLUSH(hw);
}
/******************************************************************************
@@ -5351,12 +5904,18 @@ e1000_write_vfta(struct e1000_hw *hw,
{
uint32_t temp;
- if((hw->mac_type == e1000_82544) && ((offset & 0x1) == 1)) {
+ if (hw->mac_type == e1000_ich8lan)
+ return;
+
+ if ((hw->mac_type == e1000_82544) && ((offset & 0x1) == 1)) {
temp = E1000_READ_REG_ARRAY(hw, VFTA, (offset - 1));
E1000_WRITE_REG_ARRAY(hw, VFTA, offset, value);
+ E1000_WRITE_FLUSH(hw);
E1000_WRITE_REG_ARRAY(hw, VFTA, (offset - 1), temp);
+ E1000_WRITE_FLUSH(hw);
} else {
E1000_WRITE_REG_ARRAY(hw, VFTA, offset, value);
+ E1000_WRITE_FLUSH(hw);
}
}
@@ -5373,6 +5932,9 @@ e1000_clear_vfta(struct e1000_hw *hw)
uint32_t vfta_offset = 0;
uint32_t vfta_bit_in_reg = 0;
+ if (hw->mac_type == e1000_ich8lan)
+ return;
+
if (hw->mac_type == e1000_82573) {
if (hw->mng_cookie.vlan_id != 0) {
/* The VFTA is a 4096b bit-field, each identifying a single VLAN
@@ -5392,6 +5954,7 @@ e1000_clear_vfta(struct e1000_hw *hw)
* manageability unit */
vfta_value = (offset == vfta_offset) ? vfta_bit_in_reg : 0;
E1000_WRITE_REG_ARRAY(hw, VFTA, offset, vfta_value);
+ E1000_WRITE_FLUSH(hw);
}
}
@@ -5421,9 +5984,18 @@ e1000_id_led_init(struct e1000_hw * hw)
DEBUGOUT("EEPROM Read Error\n");
return -E1000_ERR_EEPROM;
}
- if((eeprom_data== ID_LED_RESERVED_0000) ||
- (eeprom_data == ID_LED_RESERVED_FFFF)) eeprom_data = ID_LED_DEFAULT;
- for(i = 0; i < 4; i++) {
+
+ if ((hw->mac_type == e1000_82573) &&
+ (eeprom_data == ID_LED_RESERVED_82573))
+ eeprom_data = ID_LED_DEFAULT_82573;
+ else if ((eeprom_data == ID_LED_RESERVED_0000) ||
+ (eeprom_data == ID_LED_RESERVED_FFFF)) {
+ if (hw->mac_type == e1000_ich8lan)
+ eeprom_data = ID_LED_DEFAULT_ICH8LAN;
+ else
+ eeprom_data = ID_LED_DEFAULT;
+ }
+ for (i = 0; i < 4; i++) {
temp = (eeprom_data >> (i << 2)) & led_mask;
switch(temp) {
case ID_LED_ON1_DEF2:
@@ -5519,6 +6091,44 @@ e1000_setup_led(struct e1000_hw *hw)
}
/******************************************************************************
+ * Used on 82571 and later Si that has LED blink bits.
+ * Callers must use their own timer and should have already called
+ * e1000_id_led_init()
+ * Call e1000_cleanup led() to stop blinking
+ *
+ * hw - Struct containing variables accessed by shared code
+ *****************************************************************************/
+int32_t
+e1000_blink_led_start(struct e1000_hw *hw)
+{
+ int16_t i;
+ uint32_t ledctl_blink = 0;
+
+ DEBUGFUNC("e1000_id_led_blink_on");
+
+ if (hw->mac_type < e1000_82571) {
+ /* Nothing to do */
+ return E1000_SUCCESS;
+ }
+ if (hw->media_type == e1000_media_type_fiber) {
+ /* always blink LED0 for PCI-E fiber */
+ ledctl_blink = E1000_LEDCTL_LED0_BLINK |
+ (E1000_LEDCTL_MODE_LED_ON << E1000_LEDCTL_LED0_MODE_SHIFT);
+ } else {
+ /* set the blink bit for each LED that's "on" (0x0E) in ledctl_mode2 */
+ ledctl_blink = hw->ledctl_mode2;
+ for (i=0; i < 4; i++)
+ if (((hw->ledctl_mode2 >> (i * 8)) & 0xFF) ==
+ E1000_LEDCTL_MODE_LED_ON)
+ ledctl_blink |= (E1000_LEDCTL_LED0_BLINK << (i * 8));
+ }
+
+ E1000_WRITE_REG(hw, LEDCTL, ledctl_blink);
+
+ return E1000_SUCCESS;
+}
+
+/******************************************************************************
* Restores the saved state of the SW controlable LED.
*
* hw - Struct containing variables accessed by shared code
@@ -5548,6 +6158,10 @@ e1000_cleanup_led(struct e1000_hw *hw)
return ret_val;
/* Fall Through */
default:
+ if (hw->phy_type == e1000_phy_ife) {
+ e1000_write_phy_reg(hw, IFE_PHY_SPECIAL_CONTROL_LED, 0);
+ break;
+ }
/* Restore LEDCTL settings */
E1000_WRITE_REG(hw, LEDCTL, hw->ledctl_default);
break;
@@ -5592,7 +6206,10 @@ e1000_led_on(struct e1000_hw *hw)
/* Clear SW Defineable Pin 0 to turn on the LED */
ctrl &= ~E1000_CTRL_SWDPIN0;
ctrl |= E1000_CTRL_SWDPIO0;
- } else if(hw->media_type == e1000_media_type_copper) {
+ } else if (hw->phy_type == e1000_phy_ife) {
+ e1000_write_phy_reg(hw, IFE_PHY_SPECIAL_CONTROL_LED,
+ (IFE_PSCL_PROBE_MODE | IFE_PSCL_PROBE_LEDS_ON));
+ } else if (hw->media_type == e1000_media_type_copper) {
E1000_WRITE_REG(hw, LEDCTL, hw->ledctl_mode2);
return E1000_SUCCESS;
}
@@ -5640,7 +6257,10 @@ e1000_led_off(struct e1000_hw *hw)
/* Set SW Defineable Pin 0 to turn off the LED */
ctrl |= E1000_CTRL_SWDPIN0;
ctrl |= E1000_CTRL_SWDPIO0;
- } else if(hw->media_type == e1000_media_type_copper) {
+ } else if (hw->phy_type == e1000_phy_ife) {
+ e1000_write_phy_reg(hw, IFE_PHY_SPECIAL_CONTROL_LED,
+ (IFE_PSCL_PROBE_MODE | IFE_PSCL_PROBE_LEDS_OFF));
+ } else if (hw->media_type == e1000_media_type_copper) {
E1000_WRITE_REG(hw, LEDCTL, hw->ledctl_mode1);
return E1000_SUCCESS;
}
@@ -5678,12 +6298,16 @@ e1000_clear_hw_cntrs(struct e1000_hw *hw)
temp = E1000_READ_REG(hw, XOFFRXC);
temp = E1000_READ_REG(hw, XOFFTXC);
temp = E1000_READ_REG(hw, FCRUC);
+
+ if (hw->mac_type != e1000_ich8lan) {
temp = E1000_READ_REG(hw, PRC64);
temp = E1000_READ_REG(hw, PRC127);
temp = E1000_READ_REG(hw, PRC255);
temp = E1000_READ_REG(hw, PRC511);
temp = E1000_READ_REG(hw, PRC1023);
temp = E1000_READ_REG(hw, PRC1522);
+ }
+
temp = E1000_READ_REG(hw, GPRC);
temp = E1000_READ_REG(hw, BPRC);
temp = E1000_READ_REG(hw, MPRC);
@@ -5703,12 +6327,16 @@ e1000_clear_hw_cntrs(struct e1000_hw *hw)
temp = E1000_READ_REG(hw, TOTH);
temp = E1000_READ_REG(hw, TPR);
temp = E1000_READ_REG(hw, TPT);
+
+ if (hw->mac_type != e1000_ich8lan) {
temp = E1000_READ_REG(hw, PTC64);
temp = E1000_READ_REG(hw, PTC127);
temp = E1000_READ_REG(hw, PTC255);
temp = E1000_READ_REG(hw, PTC511);
temp = E1000_READ_REG(hw, PTC1023);
temp = E1000_READ_REG(hw, PTC1522);
+ }
+
temp = E1000_READ_REG(hw, MPTC);
temp = E1000_READ_REG(hw, BPTC);
@@ -5731,6 +6359,9 @@ e1000_clear_hw_cntrs(struct e1000_hw *hw)
temp = E1000_READ_REG(hw, IAC);
temp = E1000_READ_REG(hw, ICRXOC);
+
+ if (hw->mac_type == e1000_ich8lan) return;
+
temp = E1000_READ_REG(hw, ICRXPTC);
temp = E1000_READ_REG(hw, ICRXATC);
temp = E1000_READ_REG(hw, ICTXPTC);
@@ -5911,6 +6542,7 @@ e1000_get_bus_info(struct e1000_hw *hw)
hw->bus_width = e1000_bus_width_pciex_1;
break;
case e1000_82571:
+ case e1000_ich8lan:
case e1000_80003es2lan:
hw->bus_type = e1000_bus_type_pci_express;
hw->bus_speed = e1000_bus_speed_2500;
@@ -5948,8 +6580,6 @@ e1000_get_bus_info(struct e1000_hw *hw)
break;
}
}
-
-#if 0
/******************************************************************************
* Reads a value from one of the devices registers using port I/O (as opposed
* memory mapped I/O). Only 82544 and newer devices support port I/O.
@@ -5967,7 +6597,6 @@ e1000_read_reg_io(struct e1000_hw *hw,
e1000_io_write(hw, io_addr, offset);
return e1000_io_read(hw, io_data);
}
-#endif /* 0 */
/******************************************************************************
* Writes a value to one of the devices registers using port I/O (as opposed to
@@ -6012,8 +6641,6 @@ e1000_get_cable_length(struct e1000_hw *hw,
{
int32_t ret_val;
uint16_t agc_value = 0;
- uint16_t cur_agc, min_agc = IGP01E1000_AGC_LENGTH_TABLE_SIZE;
- uint16_t max_agc = 0;
uint16_t i, phy_data;
uint16_t cable_length;
@@ -6086,6 +6713,8 @@ e1000_get_cable_length(struct e1000_hw *hw,
break;
}
} else if(hw->phy_type == e1000_phy_igp) { /* For IGP PHY */
+ uint16_t cur_agc_value;
+ uint16_t min_agc_value = IGP01E1000_AGC_LENGTH_TABLE_SIZE;
uint16_t agc_reg_array[IGP01E1000_PHY_CHANNEL_NUM] =
{IGP01E1000_PHY_AGC_A,
IGP01E1000_PHY_AGC_B,
@@ -6098,23 +6727,23 @@ e1000_get_cable_length(struct e1000_hw *hw,
if(ret_val)
return ret_val;
- cur_agc = phy_data >> IGP01E1000_AGC_LENGTH_SHIFT;
+ cur_agc_value = phy_data >> IGP01E1000_AGC_LENGTH_SHIFT;
- /* Array bound check. */
- if((cur_agc >= IGP01E1000_AGC_LENGTH_TABLE_SIZE - 1) ||
- (cur_agc == 0))
+ /* Value bound check. */
+ if ((cur_agc_value >= IGP01E1000_AGC_LENGTH_TABLE_SIZE - 1) ||
+ (cur_agc_value == 0))
return -E1000_ERR_PHY;
- agc_value += cur_agc;
+ agc_value += cur_agc_value;
/* Update minimal AGC value. */
- if(min_agc > cur_agc)
- min_agc = cur_agc;
+ if (min_agc_value > cur_agc_value)
+ min_agc_value = cur_agc_value;
}
/* Remove the minimal AGC result for length < 50m */
- if(agc_value < IGP01E1000_PHY_CHANNEL_NUM * e1000_igp_cable_length_50) {
- agc_value -= min_agc;
+ if (agc_value < IGP01E1000_PHY_CHANNEL_NUM * e1000_igp_cable_length_50) {
+ agc_value -= min_agc_value;
/* Get the average length of the remaining 3 channels */
agc_value /= (IGP01E1000_PHY_CHANNEL_NUM - 1);
@@ -6130,7 +6759,10 @@ e1000_get_cable_length(struct e1000_hw *hw,
IGP01E1000_AGC_RANGE) : 0;
*max_length = e1000_igp_cable_length_table[agc_value] +
IGP01E1000_AGC_RANGE;
- } else if (hw->phy_type == e1000_phy_igp_2) {
+ } else if (hw->phy_type == e1000_phy_igp_2 ||
+ hw->phy_type == e1000_phy_igp_3) {
+ uint16_t cur_agc_index, max_agc_index = 0;
+ uint16_t min_agc_index = IGP02E1000_AGC_LENGTH_TABLE_SIZE - 1;
uint16_t agc_reg_array[IGP02E1000_PHY_CHANNEL_NUM] =
{IGP02E1000_PHY_AGC_A,
IGP02E1000_PHY_AGC_B,
@@ -6145,19 +6777,27 @@ e1000_get_cable_length(struct e1000_hw *hw,
/* Getting bits 15:9, which represent the combination of course and
* fine gain values. The result is a number that can be put into
* the lookup table to obtain the approximate cable length. */
- cur_agc = (phy_data >> IGP02E1000_AGC_LENGTH_SHIFT) &
- IGP02E1000_AGC_LENGTH_MASK;
+ cur_agc_index = (phy_data >> IGP02E1000_AGC_LENGTH_SHIFT) &
+ IGP02E1000_AGC_LENGTH_MASK;
- /* Remove min & max AGC values from calculation. */
- if (e1000_igp_2_cable_length_table[min_agc] > e1000_igp_2_cable_length_table[cur_agc])
- min_agc = cur_agc;
- if (e1000_igp_2_cable_length_table[max_agc] < e1000_igp_2_cable_length_table[cur_agc])
- max_agc = cur_agc;
+ /* Array index bound check. */
+ if ((cur_agc_index >= IGP02E1000_AGC_LENGTH_TABLE_SIZE) ||
+ (cur_agc_index == 0))
+ return -E1000_ERR_PHY;
- agc_value += e1000_igp_2_cable_length_table[cur_agc];
+ /* Remove min & max AGC values from calculation. */
+ if (e1000_igp_2_cable_length_table[min_agc_index] >
+ e1000_igp_2_cable_length_table[cur_agc_index])
+ min_agc_index = cur_agc_index;
+ if (e1000_igp_2_cable_length_table[max_agc_index] <
+ e1000_igp_2_cable_length_table[cur_agc_index])
+ max_agc_index = cur_agc_index;
+
+ agc_value += e1000_igp_2_cable_length_table[cur_agc_index];
}
- agc_value -= (e1000_igp_2_cable_length_table[min_agc] + e1000_igp_2_cable_length_table[max_agc]);
+ agc_value -= (e1000_igp_2_cable_length_table[min_agc_index] +
+ e1000_igp_2_cable_length_table[max_agc_index]);
agc_value /= (IGP02E1000_PHY_CHANNEL_NUM - 2);
/* Calculate cable length with the error range of +/- 10 meters. */
@@ -6203,7 +6843,8 @@ e1000_check_polarity(struct e1000_hw *hw,
return ret_val;
*polarity = (phy_data & M88E1000_PSSR_REV_POLARITY) >>
M88E1000_PSSR_REV_POLARITY_SHIFT;
- } else if(hw->phy_type == e1000_phy_igp ||
+ } else if (hw->phy_type == e1000_phy_igp ||
+ hw->phy_type == e1000_phy_igp_3 ||
hw->phy_type == e1000_phy_igp_2) {
/* Read the Status register to check the speed */
ret_val = e1000_read_phy_reg(hw, IGP01E1000_PHY_PORT_STATUS,
@@ -6229,6 +6870,13 @@ e1000_check_polarity(struct e1000_hw *hw,
* 100 Mbps this bit is always 0) */
*polarity = phy_data & IGP01E1000_PSSR_POLARITY_REVERSED;
}
+ } else if (hw->phy_type == e1000_phy_ife) {
+ ret_val = e1000_read_phy_reg(hw, IFE_PHY_EXTENDED_STATUS_CONTROL,
+ &phy_data);
+ if (ret_val)
+ return ret_val;
+ *polarity = (phy_data & IFE_PESC_POLARITY_REVERSED) >>
+ IFE_PESC_POLARITY_REVERSED_SHIFT;
}
return E1000_SUCCESS;
}
@@ -6256,7 +6904,8 @@ e1000_check_downshift(struct e1000_hw *hw)
DEBUGFUNC("e1000_check_downshift");
- if(hw->phy_type == e1000_phy_igp ||
+ if (hw->phy_type == e1000_phy_igp ||
+ hw->phy_type == e1000_phy_igp_3 ||
hw->phy_type == e1000_phy_igp_2) {
ret_val = e1000_read_phy_reg(hw, IGP01E1000_PHY_LINK_HEALTH,
&phy_data);
@@ -6273,6 +6922,9 @@ e1000_check_downshift(struct e1000_hw *hw)
hw->speed_downgraded = (phy_data & M88E1000_PSSR_DOWNSHIFT) >>
M88E1000_PSSR_DOWNSHIFT_SHIFT;
+ } else if (hw->phy_type == e1000_phy_ife) {
+ /* e1000_phy_ife supports 10/100 speed only */
+ hw->speed_downgraded = FALSE;
}
return E1000_SUCCESS;
@@ -6317,7 +6969,9 @@ e1000_config_dsp_after_link_change(struct e1000_hw *hw,
if(speed == SPEED_1000) {
- e1000_get_cable_length(hw, &min_length, &max_length);
+ ret_val = e1000_get_cable_length(hw, &min_length, &max_length);
+ if (ret_val)
+ return ret_val;
if((hw->dsp_config_state == e1000_dsp_config_enabled) &&
min_length >= e1000_igp_cable_length_50) {
@@ -6525,20 +7179,27 @@ static int32_t
e1000_set_d3_lplu_state(struct e1000_hw *hw,
boolean_t active)
{
+ uint32_t phy_ctrl = 0;
int32_t ret_val;
uint16_t phy_data;
DEBUGFUNC("e1000_set_d3_lplu_state");
- if(hw->phy_type != e1000_phy_igp && hw->phy_type != e1000_phy_igp_2)
+ if (hw->phy_type != e1000_phy_igp && hw->phy_type != e1000_phy_igp_2
+ && hw->phy_type != e1000_phy_igp_3)
return E1000_SUCCESS;
/* During driver activity LPLU should not be used or it will attain link
* from the lowest speeds starting from 10Mbps. The capability is used for
* Dx transitions and states */
- if(hw->mac_type == e1000_82541_rev_2 || hw->mac_type == e1000_82547_rev_2) {
+ if (hw->mac_type == e1000_82541_rev_2 || hw->mac_type == e1000_82547_rev_2) {
ret_val = e1000_read_phy_reg(hw, IGP01E1000_GMII_FIFO, &phy_data);
- if(ret_val)
+ if (ret_val)
return ret_val;
+ } else if (hw->mac_type == e1000_ich8lan) {
+ /* MAC writes into PHY register based on the state transition
+ * and start auto-negotiation. SW driver can overwrite the settings
+ * in CSR PHY power control E1000_PHY_CTRL register. */
+ phy_ctrl = E1000_READ_REG(hw, PHY_CTRL);
} else {
ret_val = e1000_read_phy_reg(hw, IGP02E1000_PHY_POWER_MGMT, &phy_data);
if(ret_val)
@@ -6553,11 +7214,16 @@ e1000_set_d3_lplu_state(struct e1000_hw *hw,
if(ret_val)
return ret_val;
} else {
+ if (hw->mac_type == e1000_ich8lan) {
+ phy_ctrl &= ~E1000_PHY_CTRL_NOND0A_LPLU;
+ E1000_WRITE_REG(hw, PHY_CTRL, phy_ctrl);
+ } else {
phy_data &= ~IGP02E1000_PM_D3_LPLU;
ret_val = e1000_write_phy_reg(hw, IGP02E1000_PHY_POWER_MGMT,
phy_data);
if (ret_val)
return ret_val;
+ }
}
/* LPLU and SmartSpeed are mutually exclusive. LPLU is used during
@@ -6593,17 +7259,22 @@ e1000_set_d3_lplu_state(struct e1000_hw *hw,
(hw->autoneg_advertised == AUTONEG_ADVERTISE_10_100_ALL)) {
if(hw->mac_type == e1000_82541_rev_2 ||
- hw->mac_type == e1000_82547_rev_2) {
+ hw->mac_type == e1000_82547_rev_2) {
phy_data |= IGP01E1000_GMII_FLEX_SPD;
ret_val = e1000_write_phy_reg(hw, IGP01E1000_GMII_FIFO, phy_data);
if(ret_val)
return ret_val;
} else {
+ if (hw->mac_type == e1000_ich8lan) {
+ phy_ctrl |= E1000_PHY_CTRL_NOND0A_LPLU;
+ E1000_WRITE_REG(hw, PHY_CTRL, phy_ctrl);
+ } else {
phy_data |= IGP02E1000_PM_D3_LPLU;
ret_val = e1000_write_phy_reg(hw, IGP02E1000_PHY_POWER_MGMT,
phy_data);
if (ret_val)
return ret_val;
+ }
}
/* When LPLU is enabled we should disable SmartSpeed */
@@ -6638,6 +7309,7 @@ static int32_t
e1000_set_d0_lplu_state(struct e1000_hw *hw,
boolean_t active)
{
+ uint32_t phy_ctrl = 0;
int32_t ret_val;
uint16_t phy_data;
DEBUGFUNC("e1000_set_d0_lplu_state");
@@ -6645,15 +7317,24 @@ e1000_set_d0_lplu_state(struct e1000_hw *hw,
if(hw->mac_type <= e1000_82547_rev_2)
return E1000_SUCCESS;
+ if (hw->mac_type == e1000_ich8lan) {
+ phy_ctrl = E1000_READ_REG(hw, PHY_CTRL);
+ } else {
ret_val = e1000_read_phy_reg(hw, IGP02E1000_PHY_POWER_MGMT, &phy_data);
if(ret_val)
return ret_val;
+ }
if (!active) {
+ if (hw->mac_type == e1000_ich8lan) {
+ phy_ctrl &= ~E1000_PHY_CTRL_D0A_LPLU;
+ E1000_WRITE_REG(hw, PHY_CTRL, phy_ctrl);
+ } else {
phy_data &= ~IGP02E1000_PM_D0_LPLU;
ret_val = e1000_write_phy_reg(hw, IGP02E1000_PHY_POWER_MGMT, phy_data);
if (ret_val)
return ret_val;
+ }
/* LPLU and SmartSpeed are mutually exclusive. LPLU is used during
* Dx states where the power conservation is most important. During
@@ -6686,10 +7367,15 @@ e1000_set_d0_lplu_state(struct e1000_hw *hw,
} else {
+ if (hw->mac_type == e1000_ich8lan) {
+ phy_ctrl |= E1000_PHY_CTRL_D0A_LPLU;
+ E1000_WRITE_REG(hw, PHY_CTRL, phy_ctrl);
+ } else {
phy_data |= IGP02E1000_PM_D0_LPLU;
ret_val = e1000_write_phy_reg(hw, IGP02E1000_PHY_POWER_MGMT, phy_data);
if (ret_val)
return ret_val;
+ }
/* When LPLU is enabled we should disable SmartSpeed */
ret_val = e1000_read_phy_reg(hw, IGP01E1000_PHY_PORT_CONFIG, &phy_data);
@@ -6928,8 +7614,10 @@ e1000_mng_write_cmd_header(struct e1000_hw * hw,
length >>= 2;
/* The device driver writes the relevant command block into the ram area. */
- for (i = 0; i < length; i++)
+ for (i = 0; i < length; i++) {
E1000_WRITE_REG_ARRAY_DWORD(hw, HOST_IF, i, *((uint32_t *) hdr + i));
+ E1000_WRITE_FLUSH(hw);
+ }
return E1000_SUCCESS;
}
@@ -6961,15 +7649,18 @@ e1000_mng_write_commit(
* returns - TRUE when the mode is IAMT or FALSE.
****************************************************************************/
boolean_t
-e1000_check_mng_mode(
- struct e1000_hw *hw)
+e1000_check_mng_mode(struct e1000_hw *hw)
{
uint32_t fwsm;
fwsm = E1000_READ_REG(hw, FWSM);
- if((fwsm & E1000_FWSM_MODE_MASK) ==
- (E1000_MNG_IAMT_MODE << E1000_FWSM_MODE_SHIFT))
+ if (hw->mac_type == e1000_ich8lan) {
+ if ((fwsm & E1000_FWSM_MODE_MASK) ==
+ (E1000_MNG_ICH_IAMT_MODE << E1000_FWSM_MODE_SHIFT))
+ return TRUE;
+ } else if ((fwsm & E1000_FWSM_MODE_MASK) ==
+ (E1000_MNG_IAMT_MODE << E1000_FWSM_MODE_SHIFT))
return TRUE;
return FALSE;
@@ -7209,7 +7900,6 @@ e1000_set_pci_express_master_disable(struct e1000_hw *hw)
E1000_WRITE_REG(hw, CTRL, ctrl);
}
-#if 0
/***************************************************************************
*
* Enables PCI-Express master access.
@@ -7233,7 +7923,6 @@ e1000_enable_pciex_master(struct e1000_hw *hw)
ctrl &= ~E1000_CTRL_GIO_MASTER_DISABLE;
E1000_WRITE_REG(hw, CTRL, ctrl);
}
-#endif /* 0 */
/*******************************************************************************
*
@@ -7299,8 +7988,10 @@ e1000_get_auto_rd_done(struct e1000_hw *hw)
case e1000_82572:
case e1000_82573:
case e1000_80003es2lan:
- while(timeout) {
- if (E1000_READ_REG(hw, EECD) & E1000_EECD_AUTO_RD) break;
+ case e1000_ich8lan:
+ while (timeout) {
+ if (E1000_READ_REG(hw, EECD) & E1000_EECD_AUTO_RD)
+ break;
else msec_delay(1);
timeout--;
}
@@ -7340,7 +8031,7 @@ e1000_get_phy_cfg_done(struct e1000_hw *hw)
switch (hw->mac_type) {
default:
- msec_delay(10);
+ msec_delay_irq(10);
break;
case e1000_80003es2lan:
/* Separate *_CFG_DONE_* bit for each port */
@@ -7523,6 +8214,13 @@ int32_t
e1000_check_phy_reset_block(struct e1000_hw *hw)
{
uint32_t manc = 0;
+ uint32_t fwsm = 0;
+
+ if (hw->mac_type == e1000_ich8lan) {
+ fwsm = E1000_READ_REG(hw, FWSM);
+ return (fwsm & E1000_FWSM_RSPCIPHY) ? E1000_SUCCESS
+ : E1000_BLK_PHY_RESET;
+ }
if (hw->mac_type > e1000_82547_rev_2)
manc = E1000_READ_REG(hw, MANC);
@@ -7549,6 +8247,8 @@ e1000_arc_subsystem_valid(struct e1000_hw *hw)
if((fwsm & E1000_FWSM_MODE_MASK) != 0)
return TRUE;
break;
+ case e1000_ich8lan:
+ return TRUE;
default:
break;
}
@@ -7556,4 +8256,846 @@ e1000_arc_subsystem_valid(struct e1000_hw *hw)
}
+/******************************************************************************
+ * Configure PCI-Ex no-snoop
+ *
+ * hw - Struct containing variables accessed by shared code.
+ * no_snoop - Bitmap of no-snoop events.
+ *
+ * returns: E1000_SUCCESS
+ *
+ *****************************************************************************/
+int32_t
+e1000_set_pci_ex_no_snoop(struct e1000_hw *hw, uint32_t no_snoop)
+{
+ uint32_t gcr_reg = 0;
+
+ DEBUGFUNC("e1000_set_pci_ex_no_snoop");
+
+ if (hw->bus_type == e1000_bus_type_unknown)
+ e1000_get_bus_info(hw);
+
+ if (hw->bus_type != e1000_bus_type_pci_express)
+ return E1000_SUCCESS;
+
+ if (no_snoop) {
+ gcr_reg = E1000_READ_REG(hw, GCR);
+ gcr_reg &= ~(PCI_EX_NO_SNOOP_ALL);
+ gcr_reg |= no_snoop;
+ E1000_WRITE_REG(hw, GCR, gcr_reg);
+ }
+ if (hw->mac_type == e1000_ich8lan) {
+ uint32_t ctrl_ext;
+
+ E1000_WRITE_REG(hw, GCR, PCI_EX_82566_SNOOP_ALL);
+
+ ctrl_ext = E1000_READ_REG(hw, CTRL_EXT);
+ ctrl_ext |= E1000_CTRL_EXT_RO_DIS;
+ E1000_WRITE_REG(hw, CTRL_EXT, ctrl_ext);
+ }
+
+ return E1000_SUCCESS;
+}
+
+/***************************************************************************
+ *
+ * Get software semaphore FLAG bit (SWFLAG).
+ * SWFLAG is used to synchronize the access to all shared resource between
+ * SW, FW and HW.
+ *
+ * hw: Struct containing variables accessed by shared code
+ *
+ ***************************************************************************/
+int32_t
+e1000_get_software_flag(struct e1000_hw *hw)
+{
+ int32_t timeout = PHY_CFG_TIMEOUT;
+ uint32_t extcnf_ctrl;
+
+ DEBUGFUNC("e1000_get_software_flag");
+
+ if (hw->mac_type == e1000_ich8lan) {
+ while (timeout) {
+ extcnf_ctrl = E1000_READ_REG(hw, EXTCNF_CTRL);
+ extcnf_ctrl |= E1000_EXTCNF_CTRL_SWFLAG;
+ E1000_WRITE_REG(hw, EXTCNF_CTRL, extcnf_ctrl);
+
+ extcnf_ctrl = E1000_READ_REG(hw, EXTCNF_CTRL);
+ if (extcnf_ctrl & E1000_EXTCNF_CTRL_SWFLAG)
+ break;
+ msec_delay_irq(1);
+ timeout--;
+ }
+
+ if (!timeout) {
+ DEBUGOUT("FW or HW locks the resource too long.\n");
+ return -E1000_ERR_CONFIG;
+ }
+ }
+
+ return E1000_SUCCESS;
+}
+
+/***************************************************************************
+ *
+ * Release software semaphore FLAG bit (SWFLAG).
+ * SWFLAG is used to synchronize the access to all shared resource between
+ * SW, FW and HW.
+ *
+ * hw: Struct containing variables accessed by shared code
+ *
+ ***************************************************************************/
+void
+e1000_release_software_flag(struct e1000_hw *hw)
+{
+ uint32_t extcnf_ctrl;
+
+ DEBUGFUNC("e1000_release_software_flag");
+
+ if (hw->mac_type == e1000_ich8lan) {
+ extcnf_ctrl= E1000_READ_REG(hw, EXTCNF_CTRL);
+ extcnf_ctrl &= ~E1000_EXTCNF_CTRL_SWFLAG;
+ E1000_WRITE_REG(hw, EXTCNF_CTRL, extcnf_ctrl);
+ }
+
+ return;
+}
+
+/***************************************************************************
+ *
+ * Disable dynamic power down mode in ife PHY.
+ * It can be used to workaround band-gap problem.
+ *
+ * hw: Struct containing variables accessed by shared code
+ *
+ ***************************************************************************/
+int32_t
+e1000_ife_disable_dynamic_power_down(struct e1000_hw *hw)
+{
+ uint16_t phy_data;
+ int32_t ret_val = E1000_SUCCESS;
+
+ DEBUGFUNC("e1000_ife_disable_dynamic_power_down");
+
+ if (hw->phy_type == e1000_phy_ife) {
+ ret_val = e1000_read_phy_reg(hw, IFE_PHY_SPECIAL_CONTROL, &phy_data);
+ if (ret_val)
+ return ret_val;
+
+ phy_data |= IFE_PSC_DISABLE_DYNAMIC_POWER_DOWN;
+ ret_val = e1000_write_phy_reg(hw, IFE_PHY_SPECIAL_CONTROL, phy_data);
+ }
+
+ return ret_val;
+}
+
+/***************************************************************************
+ *
+ * Enable dynamic power down mode in ife PHY.
+ * It can be used to workaround band-gap problem.
+ *
+ * hw: Struct containing variables accessed by shared code
+ *
+ ***************************************************************************/
+int32_t
+e1000_ife_enable_dynamic_power_down(struct e1000_hw *hw)
+{
+ uint16_t phy_data;
+ int32_t ret_val = E1000_SUCCESS;
+
+ DEBUGFUNC("e1000_ife_enable_dynamic_power_down");
+
+ if (hw->phy_type == e1000_phy_ife) {
+ ret_val = e1000_read_phy_reg(hw, IFE_PHY_SPECIAL_CONTROL, &phy_data);
+ if (ret_val)
+ return ret_val;
+
+ phy_data &= ~IFE_PSC_DISABLE_DYNAMIC_POWER_DOWN;
+ ret_val = e1000_write_phy_reg(hw, IFE_PHY_SPECIAL_CONTROL, phy_data);
+ }
+
+ return ret_val;
+}
+
+/******************************************************************************
+ * Reads a 16 bit word or words from the EEPROM using the ICH8's flash access
+ * register.
+ *
+ * hw - Struct containing variables accessed by shared code
+ * offset - offset of word in the EEPROM to read
+ * data - word read from the EEPROM
+ * words - number of words to read
+ *****************************************************************************/
+int32_t
+e1000_read_eeprom_ich8(struct e1000_hw *hw, uint16_t offset, uint16_t words,
+ uint16_t *data)
+{
+ int32_t error = E1000_SUCCESS;
+ uint32_t flash_bank = 0;
+ uint32_t act_offset = 0;
+ uint32_t bank_offset = 0;
+ uint16_t word = 0;
+ uint16_t i = 0;
+
+ /* We need to know which is the valid flash bank. In the event
+ * that we didn't allocate eeprom_shadow_ram, we may not be
+ * managing flash_bank. So it cannot be trusted and needs
+ * to be updated with each read.
+ */
+ /* Value of bit 22 corresponds to the flash bank we're on. */
+ flash_bank = (E1000_READ_REG(hw, EECD) & E1000_EECD_SEC1VAL) ? 1 : 0;
+
+ /* Adjust offset appropriately if we're on bank 1 - adjust for word size */
+ bank_offset = flash_bank * (hw->flash_bank_size * 2);
+
+ error = e1000_get_software_flag(hw);
+ if (error != E1000_SUCCESS)
+ return error;
+
+ for (i = 0; i < words; i++) {
+ if (hw->eeprom_shadow_ram != NULL &&
+ hw->eeprom_shadow_ram[offset+i].modified == TRUE) {
+ data[i] = hw->eeprom_shadow_ram[offset+i].eeprom_word;
+ } else {
+ /* The NVM part needs a byte offset, hence * 2 */
+ act_offset = bank_offset + ((offset + i) * 2);
+ error = e1000_read_ich8_word(hw, act_offset, &word);
+ if (error != E1000_SUCCESS)
+ break;
+ data[i] = word;
+ }
+ }
+
+ e1000_release_software_flag(hw);
+
+ return error;
+}
+
+/******************************************************************************
+ * Writes a 16 bit word or words to the EEPROM using the ICH8's flash access
+ * register. Actually, writes are written to the shadow ram cache in the hw
+ * structure hw->e1000_shadow_ram. e1000_commit_shadow_ram flushes this to
+ * the NVM, which occurs when the NVM checksum is updated.
+ *
+ * hw - Struct containing variables accessed by shared code
+ * offset - offset of word in the EEPROM to write
+ * words - number of words to write
+ * data - words to write to the EEPROM
+ *****************************************************************************/
+int32_t
+e1000_write_eeprom_ich8(struct e1000_hw *hw, uint16_t offset, uint16_t words,
+ uint16_t *data)
+{
+ uint32_t i = 0;
+ int32_t error = E1000_SUCCESS;
+
+ error = e1000_get_software_flag(hw);
+ if (error != E1000_SUCCESS)
+ return error;
+
+ /* A driver can write to the NVM only if it has eeprom_shadow_ram
+ * allocated. Subsequent reads to the modified words are read from
+ * this cached structure as well. Writes will only go into this
+ * cached structure unless it's followed by a call to
+ * e1000_update_eeprom_checksum() where it will commit the changes
+ * and clear the "modified" field.
+ */
+ if (hw->eeprom_shadow_ram != NULL) {
+ for (i = 0; i < words; i++) {
+ if ((offset + i) < E1000_SHADOW_RAM_WORDS) {
+ hw->eeprom_shadow_ram[offset+i].modified = TRUE;
+ hw->eeprom_shadow_ram[offset+i].eeprom_word = data[i];
+ } else {
+ error = -E1000_ERR_EEPROM;
+ break;
+ }
+ }
+ } else {
+ /* Drivers have the option to not allocate eeprom_shadow_ram as long
+ * as they don't perform any NVM writes. An attempt in doing so
+ * will result in this error.
+ */
+ error = -E1000_ERR_EEPROM;
+ }
+
+ e1000_release_software_flag(hw);
+
+ return error;
+}
+
+/******************************************************************************
+ * This function does initial flash setup so that a new read/write/erase cycle
+ * can be started.
+ *
+ * hw - The pointer to the hw structure
+ ****************************************************************************/
+int32_t
+e1000_ich8_cycle_init(struct e1000_hw *hw)
+{
+ union ich8_hws_flash_status hsfsts;
+ int32_t error = E1000_ERR_EEPROM;
+ int32_t i = 0;
+
+ DEBUGFUNC("e1000_ich8_cycle_init");
+
+ hsfsts.regval = E1000_READ_ICH8_REG16(hw, ICH8_FLASH_HSFSTS);
+
+ /* May be check the Flash Des Valid bit in Hw status */
+ if (hsfsts.hsf_status.fldesvalid == 0) {
+ DEBUGOUT("Flash descriptor invalid. SW Sequencing must be used.");
+ return error;
+ }
+
+ /* Clear FCERR in Hw status by writing 1 */
+ /* Clear DAEL in Hw status by writing a 1 */
+ hsfsts.hsf_status.flcerr = 1;
+ hsfsts.hsf_status.dael = 1;
+
+ E1000_WRITE_ICH8_REG16(hw, ICH8_FLASH_HSFSTS, hsfsts.regval);
+
+ /* Either we should have a hardware SPI cycle in progress bit to check
+ * against, in order to start a new cycle or FDONE bit should be changed
+ * in the hardware so that it is 1 after harware reset, which can then be
+ * used as an indication whether a cycle is in progress or has been
+ * completed .. we should also have some software semaphore mechanism to
+ * guard FDONE or the cycle in progress bit so that two threads access to
+ * those bits can be sequentiallized or a way so that 2 threads dont
+ * start the cycle at the same time */
+
+ if (hsfsts.hsf_status.flcinprog == 0) {
+ /* There is no cycle running at present, so we can start a cycle */
+ /* Begin by setting Flash Cycle Done. */
+ hsfsts.hsf_status.flcdone = 1;
+ E1000_WRITE_ICH8_REG16(hw, ICH8_FLASH_HSFSTS, hsfsts.regval);
+ error = E1000_SUCCESS;
+ } else {
+ /* otherwise poll for sometime so the current cycle has a chance
+ * to end before giving up. */
+ for (i = 0; i < ICH8_FLASH_COMMAND_TIMEOUT; i++) {
+ hsfsts.regval = E1000_READ_ICH8_REG16(hw, ICH8_FLASH_HSFSTS);
+ if (hsfsts.hsf_status.flcinprog == 0) {
+ error = E1000_SUCCESS;
+ break;
+ }
+ udelay(1);
+ }
+ if (error == E1000_SUCCESS) {
+ /* Successful in waiting for previous cycle to timeout,
+ * now set the Flash Cycle Done. */
+ hsfsts.hsf_status.flcdone = 1;
+ E1000_WRITE_ICH8_REG16(hw, ICH8_FLASH_HSFSTS, hsfsts.regval);
+ } else {
+ DEBUGOUT("Flash controller busy, cannot get access");
+ }
+ }
+ return error;
+}
+
+/******************************************************************************
+ * This function starts a flash cycle and waits for its completion
+ *
+ * hw - The pointer to the hw structure
+ ****************************************************************************/
+int32_t
+e1000_ich8_flash_cycle(struct e1000_hw *hw, uint32_t timeout)
+{
+ union ich8_hws_flash_ctrl hsflctl;
+ union ich8_hws_flash_status hsfsts;
+ int32_t error = E1000_ERR_EEPROM;
+ uint32_t i = 0;
+
+ /* Start a cycle by writing 1 in Flash Cycle Go in Hw Flash Control */
+ hsflctl.regval = E1000_READ_ICH8_REG16(hw, ICH8_FLASH_HSFCTL);
+ hsflctl.hsf_ctrl.flcgo = 1;
+ E1000_WRITE_ICH8_REG16(hw, ICH8_FLASH_HSFCTL, hsflctl.regval);
+
+ /* wait till FDONE bit is set to 1 */
+ do {
+ hsfsts.regval = E1000_READ_ICH8_REG16(hw, ICH8_FLASH_HSFSTS);
+ if (hsfsts.hsf_status.flcdone == 1)
+ break;
+ udelay(1);
+ i++;
+ } while (i < timeout);
+ if (hsfsts.hsf_status.flcdone == 1 && hsfsts.hsf_status.flcerr == 0) {
+ error = E1000_SUCCESS;
+ }
+ return error;
+}
+
+/******************************************************************************
+ * Reads a byte or word from the NVM using the ICH8 flash access registers.
+ *
+ * hw - The pointer to the hw structure
+ * index - The index of the byte or word to read.
+ * size - Size of data to read, 1=byte 2=word
+ * data - Pointer to the word to store the value read.
+ *****************************************************************************/
+int32_t
+e1000_read_ich8_data(struct e1000_hw *hw, uint32_t index,
+ uint32_t size, uint16_t* data)
+{
+ union ich8_hws_flash_status hsfsts;
+ union ich8_hws_flash_ctrl hsflctl;
+ uint32_t flash_linear_address;
+ uint32_t flash_data = 0;
+ int32_t error = -E1000_ERR_EEPROM;
+ int32_t count = 0;
+
+ DEBUGFUNC("e1000_read_ich8_data");
+
+ if (size < 1 || size > 2 || data == 0x0 ||
+ index > ICH8_FLASH_LINEAR_ADDR_MASK)
+ return error;
+
+ flash_linear_address = (ICH8_FLASH_LINEAR_ADDR_MASK & index) +
+ hw->flash_base_addr;
+
+ do {
+ udelay(1);
+ /* Steps */
+ error = e1000_ich8_cycle_init(hw);
+ if (error != E1000_SUCCESS)
+ break;
+
+ hsflctl.regval = E1000_READ_ICH8_REG16(hw, ICH8_FLASH_HSFCTL);
+ /* 0b/1b corresponds to 1 or 2 byte size, respectively. */
+ hsflctl.hsf_ctrl.fldbcount = size - 1;
+ hsflctl.hsf_ctrl.flcycle = ICH8_CYCLE_READ;
+ E1000_WRITE_ICH8_REG16(hw, ICH8_FLASH_HSFCTL, hsflctl.regval);
+
+ /* Write the last 24 bits of index into Flash Linear address field in
+ * Flash Address */
+ /* TODO: TBD maybe check the index against the size of flash */
+
+ E1000_WRITE_ICH8_REG(hw, ICH8_FLASH_FADDR, flash_linear_address);
+
+ error = e1000_ich8_flash_cycle(hw, ICH8_FLASH_COMMAND_TIMEOUT);
+
+ /* Check if FCERR is set to 1, if set to 1, clear it and try the whole
+ * sequence a few more times, else read in (shift in) the Flash Data0,
+ * the order is least significant byte first msb to lsb */
+ if (error == E1000_SUCCESS) {
+ flash_data = E1000_READ_ICH8_REG(hw, ICH8_FLASH_FDATA0);
+ if (size == 1) {
+ *data = (uint8_t)(flash_data & 0x000000FF);
+ } else if (size == 2) {
+ *data = (uint16_t)(flash_data & 0x0000FFFF);
+ }
+ break;
+ } else {
+ /* If we've gotten here, then things are probably completely hosed,
+ * but if the error condition is detected, it won't hurt to give
+ * it another try...ICH8_FLASH_CYCLE_REPEAT_COUNT times.
+ */
+ hsfsts.regval = E1000_READ_ICH8_REG16(hw, ICH8_FLASH_HSFSTS);
+ if (hsfsts.hsf_status.flcerr == 1) {
+ /* Repeat for some time before giving up. */
+ continue;
+ } else if (hsfsts.hsf_status.flcdone == 0) {
+ DEBUGOUT("Timeout error - flash cycle did not complete.");
+ break;
+ }
+ }
+ } while (count++ < ICH8_FLASH_CYCLE_REPEAT_COUNT);
+
+ return error;
+}
+
+/******************************************************************************
+ * Writes One /two bytes to the NVM using the ICH8 flash access registers.
+ *
+ * hw - The pointer to the hw structure
+ * index - The index of the byte/word to read.
+ * size - Size of data to read, 1=byte 2=word
+ * data - The byte(s) to write to the NVM.
+ *****************************************************************************/
+int32_t
+e1000_write_ich8_data(struct e1000_hw *hw, uint32_t index, uint32_t size,
+ uint16_t data)
+{
+ union ich8_hws_flash_status hsfsts;
+ union ich8_hws_flash_ctrl hsflctl;
+ uint32_t flash_linear_address;
+ uint32_t flash_data = 0;
+ int32_t error = -E1000_ERR_EEPROM;
+ int32_t count = 0;
+
+ DEBUGFUNC("e1000_write_ich8_data");
+
+ if (size < 1 || size > 2 || data > size * 0xff ||
+ index > ICH8_FLASH_LINEAR_ADDR_MASK)
+ return error;
+
+ flash_linear_address = (ICH8_FLASH_LINEAR_ADDR_MASK & index) +
+ hw->flash_base_addr;
+
+ do {
+ udelay(1);
+ /* Steps */
+ error = e1000_ich8_cycle_init(hw);
+ if (error != E1000_SUCCESS)
+ break;
+
+ hsflctl.regval = E1000_READ_ICH8_REG16(hw, ICH8_FLASH_HSFCTL);
+ /* 0b/1b corresponds to 1 or 2 byte size, respectively. */
+ hsflctl.hsf_ctrl.fldbcount = size -1;
+ hsflctl.hsf_ctrl.flcycle = ICH8_CYCLE_WRITE;
+ E1000_WRITE_ICH8_REG16(hw, ICH8_FLASH_HSFCTL, hsflctl.regval);
+
+ /* Write the last 24 bits of index into Flash Linear address field in
+ * Flash Address */
+ E1000_WRITE_ICH8_REG(hw, ICH8_FLASH_FADDR, flash_linear_address);
+
+ if (size == 1)
+ flash_data = (uint32_t)data & 0x00FF;
+ else
+ flash_data = (uint32_t)data;
+
+ E1000_WRITE_ICH8_REG(hw, ICH8_FLASH_FDATA0, flash_data);
+
+ /* check if FCERR is set to 1 , if set to 1, clear it and try the whole
+ * sequence a few more times else done */
+ error = e1000_ich8_flash_cycle(hw, ICH8_FLASH_COMMAND_TIMEOUT);
+ if (error == E1000_SUCCESS) {
+ break;
+ } else {
+ /* If we're here, then things are most likely completely hosed,
+ * but if the error condition is detected, it won't hurt to give
+ * it another try...ICH8_FLASH_CYCLE_REPEAT_COUNT times.
+ */
+ hsfsts.regval = E1000_READ_ICH8_REG16(hw, ICH8_FLASH_HSFSTS);
+ if (hsfsts.hsf_status.flcerr == 1) {
+ /* Repeat for some time before giving up. */
+ continue;
+ } else if (hsfsts.hsf_status.flcdone == 0) {
+ DEBUGOUT("Timeout error - flash cycle did not complete.");
+ break;
+ }
+ }
+ } while (count++ < ICH8_FLASH_CYCLE_REPEAT_COUNT);
+
+ return error;
+}
+
+/******************************************************************************
+ * Reads a single byte from the NVM using the ICH8 flash access registers.
+ *
+ * hw - pointer to e1000_hw structure
+ * index - The index of the byte to read.
+ * data - Pointer to a byte to store the value read.
+ *****************************************************************************/
+int32_t
+e1000_read_ich8_byte(struct e1000_hw *hw, uint32_t index, uint8_t* data)
+{
+ int32_t status = E1000_SUCCESS;
+ uint16_t word = 0;
+
+ status = e1000_read_ich8_data(hw, index, 1, &word);
+ if (status == E1000_SUCCESS) {
+ *data = (uint8_t)word;
+ }
+
+ return status;
+}
+
+/******************************************************************************
+ * Writes a single byte to the NVM using the ICH8 flash access registers.
+ * Performs verification by reading back the value and then going through
+ * a retry algorithm before giving up.
+ *
+ * hw - pointer to e1000_hw structure
+ * index - The index of the byte to write.
+ * byte - The byte to write to the NVM.
+ *****************************************************************************/
+int32_t
+e1000_verify_write_ich8_byte(struct e1000_hw *hw, uint32_t index, uint8_t byte)
+{
+ int32_t error = E1000_SUCCESS;
+ int32_t program_retries;
+ uint8_t temp_byte;
+
+ e1000_write_ich8_byte(hw, index, byte);
+ udelay(100);
+
+ for (program_retries = 0; program_retries < 100; program_retries++) {
+ e1000_read_ich8_byte(hw, index, &temp_byte);
+ if (temp_byte == byte)
+ break;
+ udelay(10);
+ e1000_write_ich8_byte(hw, index, byte);
+ udelay(100);
+ }
+ if (program_retries == 100)
+ error = E1000_ERR_EEPROM;
+
+ return error;
+}
+
+/******************************************************************************
+ * Writes a single byte to the NVM using the ICH8 flash access registers.
+ *
+ * hw - pointer to e1000_hw structure
+ * index - The index of the byte to read.
+ * data - The byte to write to the NVM.
+ *****************************************************************************/
+int32_t
+e1000_write_ich8_byte(struct e1000_hw *hw, uint32_t index, uint8_t data)
+{
+ int32_t status = E1000_SUCCESS;
+ uint16_t word = (uint16_t)data;
+
+ status = e1000_write_ich8_data(hw, index, 1, word);
+
+ return status;
+}
+
+/******************************************************************************
+ * Reads a word from the NVM using the ICH8 flash access registers.
+ *
+ * hw - pointer to e1000_hw structure
+ * index - The starting byte index of the word to read.
+ * data - Pointer to a word to store the value read.
+ *****************************************************************************/
+int32_t
+e1000_read_ich8_word(struct e1000_hw *hw, uint32_t index, uint16_t *data)
+{
+ int32_t status = E1000_SUCCESS;
+ status = e1000_read_ich8_data(hw, index, 2, data);
+ return status;
+}
+
+/******************************************************************************
+ * Writes a word to the NVM using the ICH8 flash access registers.
+ *
+ * hw - pointer to e1000_hw structure
+ * index - The starting byte index of the word to read.
+ * data - The word to write to the NVM.
+ *****************************************************************************/
+int32_t
+e1000_write_ich8_word(struct e1000_hw *hw, uint32_t index, uint16_t data)
+{
+ int32_t status = E1000_SUCCESS;
+ status = e1000_write_ich8_data(hw, index, 2, data);
+ return status;
+}
+
+/******************************************************************************
+ * Erases the bank specified. Each bank is a 4k block. Segments are 0 based.
+ * segment N is 4096 * N + flash_reg_addr.
+ *
+ * hw - pointer to e1000_hw structure
+ * segment - 0 for first segment, 1 for second segment, etc.
+ *****************************************************************************/
+int32_t
+e1000_erase_ich8_4k_segment(struct e1000_hw *hw, uint32_t segment)
+{
+ union ich8_hws_flash_status hsfsts;
+ union ich8_hws_flash_ctrl hsflctl;
+ uint32_t flash_linear_address;
+ int32_t count = 0;
+ int32_t error = E1000_ERR_EEPROM;
+ int32_t iteration, seg_size;
+ int32_t sector_size;
+ int32_t j = 0;
+ int32_t error_flag = 0;
+
+ hsfsts.regval = E1000_READ_ICH8_REG16(hw, ICH8_FLASH_HSFSTS);
+
+ /* Determine HW Sector size: Read BERASE bits of Hw flash Status register */
+ /* 00: The Hw sector is 256 bytes, hence we need to erase 16
+ * consecutive sectors. The start index for the nth Hw sector can be
+ * calculated as = segment * 4096 + n * 256
+ * 01: The Hw sector is 4K bytes, hence we need to erase 1 sector.
+ * The start index for the nth Hw sector can be calculated
+ * as = segment * 4096
+ * 10: Error condition
+ * 11: The Hw sector size is much bigger than the size asked to
+ * erase...error condition */
+ if (hsfsts.hsf_status.berasesz == 0x0) {
+ /* Hw sector size 256 */
+ sector_size = seg_size = ICH8_FLASH_SEG_SIZE_256;
+ iteration = ICH8_FLASH_SECTOR_SIZE / ICH8_FLASH_SEG_SIZE_256;
+ } else if (hsfsts.hsf_status.berasesz == 0x1) {
+ sector_size = seg_size = ICH8_FLASH_SEG_SIZE_4K;
+ iteration = 1;
+ } else if (hsfsts.hsf_status.berasesz == 0x3) {
+ sector_size = seg_size = ICH8_FLASH_SEG_SIZE_64K;
+ iteration = 1;
+ } else {
+ return error;
+ }
+
+ for (j = 0; j < iteration ; j++) {
+ do {
+ count++;
+ /* Steps */
+ error = e1000_ich8_cycle_init(hw);
+ if (error != E1000_SUCCESS) {
+ error_flag = 1;
+ break;
+ }
+
+ /* Write a value 11 (block Erase) in Flash Cycle field in Hw flash
+ * Control */
+ hsflctl.regval = E1000_READ_ICH8_REG16(hw, ICH8_FLASH_HSFCTL);
+ hsflctl.hsf_ctrl.flcycle = ICH8_CYCLE_ERASE;
+ E1000_WRITE_ICH8_REG16(hw, ICH8_FLASH_HSFCTL, hsflctl.regval);
+
+ /* Write the last 24 bits of an index within the block into Flash
+ * Linear address field in Flash Address. This probably needs to
+ * be calculated here based off the on-chip segment size and the
+ * software segment size assumed (4K) */
+ /* TBD */
+ flash_linear_address = segment * sector_size + j * seg_size;
+ flash_linear_address &= ICH8_FLASH_LINEAR_ADDR_MASK;
+ flash_linear_address += hw->flash_base_addr;
+
+ E1000_WRITE_ICH8_REG(hw, ICH8_FLASH_FADDR, flash_linear_address);
+
+ error = e1000_ich8_flash_cycle(hw, 1000000);
+ /* Check if FCERR is set to 1. If 1, clear it and try the whole
+ * sequence a few more times else Done */
+ if (error == E1000_SUCCESS) {
+ break;
+ } else {
+ hsfsts.regval = E1000_READ_ICH8_REG16(hw, ICH8_FLASH_HSFSTS);
+ if (hsfsts.hsf_status.flcerr == 1) {
+ /* repeat for some time before giving up */
+ continue;
+ } else if (hsfsts.hsf_status.flcdone == 0) {
+ error_flag = 1;
+ break;
+ }
+ }
+ } while ((count < ICH8_FLASH_CYCLE_REPEAT_COUNT) && !error_flag);
+ if (error_flag == 1)
+ break;
+ }
+ if (error_flag != 1)
+ error = E1000_SUCCESS;
+ return error;
+}
+
+/******************************************************************************
+ *
+ * Reverse duplex setting without breaking the link.
+ *
+ * hw: Struct containing variables accessed by shared code
+ *
+ *****************************************************************************/
+int32_t
+e1000_duplex_reversal(struct e1000_hw *hw)
+{
+ int32_t ret_val;
+ uint16_t phy_data;
+
+ if (hw->phy_type != e1000_phy_igp_3)
+ return E1000_SUCCESS;
+
+ ret_val = e1000_read_phy_reg(hw, PHY_CTRL, &phy_data);
+ if (ret_val)
+ return ret_val;
+
+ phy_data ^= MII_CR_FULL_DUPLEX;
+
+ ret_val = e1000_write_phy_reg(hw, PHY_CTRL, phy_data);
+ if (ret_val)
+ return ret_val;
+
+ ret_val = e1000_read_phy_reg(hw, IGP3E1000_PHY_MISC_CTRL, &phy_data);
+ if (ret_val)
+ return ret_val;
+
+ phy_data |= IGP3_PHY_MISC_DUPLEX_MANUAL_SET;
+ ret_val = e1000_write_phy_reg(hw, IGP3E1000_PHY_MISC_CTRL, phy_data);
+
+ return ret_val;
+}
+
+int32_t
+e1000_init_lcd_from_nvm_config_region(struct e1000_hw *hw,
+ uint32_t cnf_base_addr, uint32_t cnf_size)
+{
+ uint32_t ret_val = E1000_SUCCESS;
+ uint16_t word_addr, reg_data, reg_addr;
+ uint16_t i;
+
+ /* cnf_base_addr is in DWORD */
+ word_addr = (uint16_t)(cnf_base_addr << 1);
+
+ /* cnf_size is returned in size of dwords */
+ for (i = 0; i < cnf_size; i++) {
+ ret_val = e1000_read_eeprom(hw, (word_addr + i*2), 1, &reg_data);
+ if (ret_val)
+ return ret_val;
+
+ ret_val = e1000_read_eeprom(hw, (word_addr + i*2 + 1), 1, &reg_addr);
+ if (ret_val)
+ return ret_val;
+
+ ret_val = e1000_get_software_flag(hw);
+ if (ret_val != E1000_SUCCESS)
+ return ret_val;
+
+ ret_val = e1000_write_phy_reg_ex(hw, (uint32_t)reg_addr, reg_data);
+
+ e1000_release_software_flag(hw);
+ }
+
+ return ret_val;
+}
+
+
+int32_t
+e1000_init_lcd_from_nvm(struct e1000_hw *hw)
+{
+ uint32_t reg_data, cnf_base_addr, cnf_size, ret_val, loop;
+
+ if (hw->phy_type != e1000_phy_igp_3)
+ return E1000_SUCCESS;
+
+ /* Check if SW needs configure the PHY */
+ reg_data = E1000_READ_REG(hw, FEXTNVM);
+ if (!(reg_data & FEXTNVM_SW_CONFIG))
+ return E1000_SUCCESS;
+
+ /* Wait for basic configuration completes before proceeding*/
+ loop = 0;
+ do {
+ reg_data = E1000_READ_REG(hw, STATUS) & E1000_STATUS_LAN_INIT_DONE;
+ udelay(100);
+ loop++;
+ } while ((!reg_data) && (loop < 50));
+
+ /* Clear the Init Done bit for the next init event */
+ reg_data = E1000_READ_REG(hw, STATUS);
+ reg_data &= ~E1000_STATUS_LAN_INIT_DONE;
+ E1000_WRITE_REG(hw, STATUS, reg_data);
+
+ /* Make sure HW does not configure LCD from PHY extended configuration
+ before SW configuration */
+ reg_data = E1000_READ_REG(hw, EXTCNF_CTRL);
+ if ((reg_data & E1000_EXTCNF_CTRL_LCD_WRITE_ENABLE) == 0x0000) {
+ reg_data = E1000_READ_REG(hw, EXTCNF_SIZE);
+ cnf_size = reg_data & E1000_EXTCNF_SIZE_EXT_PCIE_LENGTH;
+ cnf_size >>= 16;
+ if (cnf_size) {
+ reg_data = E1000_READ_REG(hw, EXTCNF_CTRL);
+ cnf_base_addr = reg_data & E1000_EXTCNF_CTRL_EXT_CNF_POINTER;
+ /* cnf_base_addr is in DWORD */
+ cnf_base_addr >>= 16;
+
+ /* Configure LCD from extended configuration region. */
+ ret_val = e1000_init_lcd_from_nvm_config_region(hw, cnf_base_addr,
+ cnf_size);
+ if (ret_val)
+ return ret_val;
+ }
+ }
+
+ return E1000_SUCCESS;
+}
+
+
diff --git a/drivers/net/e1000/e1000_hw.h b/drivers/net/e1000/e1000_hw.h
index 467c9ed944f8..f9341e3276b3 100644
--- a/drivers/net/e1000/e1000_hw.h
+++ b/drivers/net/e1000/e1000_hw.h
@@ -62,6 +62,7 @@ typedef enum {
e1000_82572,
e1000_82573,
e1000_80003es2lan,
+ e1000_ich8lan,
e1000_num_macs
} e1000_mac_type;
@@ -70,6 +71,7 @@ typedef enum {
e1000_eeprom_spi,
e1000_eeprom_microwire,
e1000_eeprom_flash,
+ e1000_eeprom_ich8,
e1000_eeprom_none, /* No NVM support */
e1000_num_eeprom_types
} e1000_eeprom_type;
@@ -98,6 +100,11 @@ typedef enum {
e1000_fc_default = 0xFF
} e1000_fc_type;
+struct e1000_shadow_ram {
+ uint16_t eeprom_word;
+ boolean_t modified;
+};
+
/* PCI bus types */
typedef enum {
e1000_bus_type_unknown = 0,
@@ -218,6 +225,8 @@ typedef enum {
e1000_phy_igp,
e1000_phy_igp_2,
e1000_phy_gg82563,
+ e1000_phy_igp_3,
+ e1000_phy_ife,
e1000_phy_undefined = 0xFF
} e1000_phy_type;
@@ -313,6 +322,10 @@ int32_t e1000_read_phy_reg(struct e1000_hw *hw, uint32_t reg_addr, uint16_t *phy
int32_t e1000_write_phy_reg(struct e1000_hw *hw, uint32_t reg_addr, uint16_t data);
int32_t e1000_phy_hw_reset(struct e1000_hw *hw);
int32_t e1000_phy_reset(struct e1000_hw *hw);
+void e1000_phy_powerdown_workaround(struct e1000_hw *hw);
+int32_t e1000_kumeran_lock_loss_workaround(struct e1000_hw *hw);
+int32_t e1000_init_lcd_from_nvm_config_region(struct e1000_hw *hw, uint32_t cnf_base_addr, uint32_t cnf_size);
+int32_t e1000_init_lcd_from_nvm(struct e1000_hw *hw);
int32_t e1000_phy_get_info(struct e1000_hw *hw, struct e1000_phy_info *phy_info);
int32_t e1000_validate_mdi_setting(struct e1000_hw *hw);
int32_t e1000_read_kmrn_reg(struct e1000_hw *hw, uint32_t reg_addr, uint16_t *data);
@@ -331,6 +344,7 @@ uint32_t e1000_enable_mng_pass_thru(struct e1000_hw *hw);
#define E1000_MNG_DHCP_COOKIE_OFFSET 0x6F0 /* Cookie offset */
#define E1000_MNG_DHCP_COOKIE_LENGTH 0x10 /* Cookie length */
#define E1000_MNG_IAMT_MODE 0x3
+#define E1000_MNG_ICH_IAMT_MODE 0x2
#define E1000_IAMT_SIGNATURE 0x544D4149 /* Intel(R) Active Management Technology signature */
#define E1000_MNG_DHCP_COOKIE_STATUS_PARSING_SUPPORT 0x1 /* DHCP parsing enabled */
@@ -388,6 +402,8 @@ int32_t e1000_read_part_num(struct e1000_hw *hw, uint32_t * part_num);
int32_t e1000_read_mac_addr(struct e1000_hw * hw);
int32_t e1000_swfw_sync_acquire(struct e1000_hw *hw, uint16_t mask);
void e1000_swfw_sync_release(struct e1000_hw *hw, uint16_t mask);
+void e1000_release_software_flag(struct e1000_hw *hw);
+int32_t e1000_get_software_flag(struct e1000_hw *hw);
/* Filters (multicast, vlan, receive) */
void e1000_mc_addr_list_update(struct e1000_hw *hw, uint8_t * mc_addr_list, uint32_t mc_addr_count, uint32_t pad, uint32_t rar_used_count);
@@ -401,6 +417,7 @@ int32_t e1000_setup_led(struct e1000_hw *hw);
int32_t e1000_cleanup_led(struct e1000_hw *hw);
int32_t e1000_led_on(struct e1000_hw *hw);
int32_t e1000_led_off(struct e1000_hw *hw);
+int32_t e1000_blink_led_start(struct e1000_hw *hw);
/* Adaptive IFS Functions */
@@ -422,6 +439,29 @@ int32_t e1000_disable_pciex_master(struct e1000_hw *hw);
int32_t e1000_get_software_semaphore(struct e1000_hw *hw);
void e1000_release_software_semaphore(struct e1000_hw *hw);
int32_t e1000_check_phy_reset_block(struct e1000_hw *hw);
+int32_t e1000_set_pci_ex_no_snoop(struct e1000_hw *hw, uint32_t no_snoop);
+
+int32_t e1000_read_ich8_byte(struct e1000_hw *hw, uint32_t index,
+ uint8_t *data);
+int32_t e1000_verify_write_ich8_byte(struct e1000_hw *hw, uint32_t index,
+ uint8_t byte);
+int32_t e1000_write_ich8_byte(struct e1000_hw *hw, uint32_t index,
+ uint8_t byte);
+int32_t e1000_read_ich8_word(struct e1000_hw *hw, uint32_t index,
+ uint16_t *data);
+int32_t e1000_read_ich8_data(struct e1000_hw *hw, uint32_t index,
+ uint32_t size, uint16_t *data);
+int32_t e1000_read_eeprom_ich8(struct e1000_hw *hw, uint16_t offset,
+ uint16_t words, uint16_t *data);
+int32_t e1000_write_eeprom_ich8(struct e1000_hw *hw, uint16_t offset,
+ uint16_t words, uint16_t *data);
+int32_t e1000_erase_ich8_4k_segment(struct e1000_hw *hw, uint32_t segment);
+
+
+#define E1000_READ_REG_IO(a, reg) \
+ e1000_read_reg_io((a), E1000_##reg)
+#define E1000_WRITE_REG_IO(a, reg, val) \
+ e1000_write_reg_io((a), E1000_##reg, val)
/* PCI Device IDs */
#define E1000_DEV_ID_82542 0x1000
@@ -446,6 +486,7 @@ int32_t e1000_check_phy_reset_block(struct e1000_hw *hw);
#define E1000_DEV_ID_82546EB_QUAD_COPPER 0x101D
#define E1000_DEV_ID_82541EI 0x1013
#define E1000_DEV_ID_82541EI_MOBILE 0x1018
+#define E1000_DEV_ID_82541ER_LOM 0x1014
#define E1000_DEV_ID_82541ER 0x1078
#define E1000_DEV_ID_82547GI 0x1075
#define E1000_DEV_ID_82541GI 0x1076
@@ -457,18 +498,28 @@ int32_t e1000_check_phy_reset_block(struct e1000_hw *hw);
#define E1000_DEV_ID_82546GB_PCIE 0x108A
#define E1000_DEV_ID_82546GB_QUAD_COPPER 0x1099
#define E1000_DEV_ID_82547EI 0x1019
+#define E1000_DEV_ID_82547EI_MOBILE 0x101A
#define E1000_DEV_ID_82571EB_COPPER 0x105E
#define E1000_DEV_ID_82571EB_FIBER 0x105F
#define E1000_DEV_ID_82571EB_SERDES 0x1060
#define E1000_DEV_ID_82572EI_COPPER 0x107D
#define E1000_DEV_ID_82572EI_FIBER 0x107E
#define E1000_DEV_ID_82572EI_SERDES 0x107F
+#define E1000_DEV_ID_82572EI 0x10B9
#define E1000_DEV_ID_82573E 0x108B
#define E1000_DEV_ID_82573E_IAMT 0x108C
#define E1000_DEV_ID_82573L 0x109A
#define E1000_DEV_ID_82546GB_QUAD_COPPER_KSP3 0x10B5
#define E1000_DEV_ID_80003ES2LAN_COPPER_DPT 0x1096
#define E1000_DEV_ID_80003ES2LAN_SERDES_DPT 0x1098
+#define E1000_DEV_ID_80003ES2LAN_COPPER_SPT 0x10BA
+#define E1000_DEV_ID_80003ES2LAN_SERDES_SPT 0x10BB
+
+#define E1000_DEV_ID_ICH8_IGP_M_AMT 0x1049
+#define E1000_DEV_ID_ICH8_IGP_AMT 0x104A
+#define E1000_DEV_ID_ICH8_IGP_C 0x104B
+#define E1000_DEV_ID_ICH8_IFE 0x104C
+#define E1000_DEV_ID_ICH8_IGP_M 0x104D
#define NODE_ADDRESS_SIZE 6
@@ -539,6 +590,14 @@ int32_t e1000_check_phy_reset_block(struct e1000_hw *hw);
E1000_IMS_RXSEQ | \
E1000_IMS_LSC)
+/* Additional interrupts need to be handled for e1000_ich8lan:
+ DSW = The FW changed the status of the DISSW bit in FWSM
+ PHYINT = The LAN connected device generates an interrupt
+ EPRST = Manageability reset event */
+#define IMS_ICH8LAN_ENABLE_MASK (\
+ E1000_IMS_DSW | \
+ E1000_IMS_PHYINT | \
+ E1000_IMS_EPRST)
/* Number of high/low register pairs in the RAR. The RAR (Receive Address
* Registers) holds the directed and multicast addresses that we monitor. We
@@ -546,6 +605,7 @@ int32_t e1000_check_phy_reset_block(struct e1000_hw *hw);
* E1000_RAR_ENTRIES - 1 multicast addresses.
*/
#define E1000_RAR_ENTRIES 15
+#define E1000_RAR_ENTRIES_ICH8LAN 7
#define MIN_NUMBER_OF_DESCRIPTORS 8
#define MAX_NUMBER_OF_DESCRIPTORS 0xFFF8
@@ -767,6 +827,9 @@ struct e1000_data_desc {
#define E1000_MC_TBL_SIZE 128 /* Multicast Filter Table (4096 bits) */
#define E1000_VLAN_FILTER_TBL_SIZE 128 /* VLAN Filter Table (4096 bits) */
+#define E1000_NUM_UNICAST_ICH8LAN 7
+#define E1000_MC_TBL_SIZE_ICH8LAN 32
+
/* Receive Address Register */
struct e1000_rar {
@@ -776,6 +839,7 @@ struct e1000_rar {
/* Number of entries in the Multicast Table Array (MTA). */
#define E1000_NUM_MTA_REGISTERS 128
+#define E1000_NUM_MTA_REGISTERS_ICH8LAN 32
/* IPv4 Address Table Entry */
struct e1000_ipv4_at_entry {
@@ -786,6 +850,7 @@ struct e1000_ipv4_at_entry {
/* Four wakeup IP addresses are supported */
#define E1000_WAKEUP_IP_ADDRESS_COUNT_MAX 4
#define E1000_IP4AT_SIZE E1000_WAKEUP_IP_ADDRESS_COUNT_MAX
+#define E1000_IP4AT_SIZE_ICH8LAN 3
#define E1000_IP6AT_SIZE 1
/* IPv6 Address Table Entry */
@@ -844,6 +909,7 @@ struct e1000_ffvt_entry {
#define E1000_FLA 0x0001C /* Flash Access - RW */
#define E1000_MDIC 0x00020 /* MDI Control - RW */
#define E1000_SCTL 0x00024 /* SerDes Control - RW */
+#define E1000_FEXTNVM 0x00028 /* Future Extended NVM register */
#define E1000_FCAL 0x00028 /* Flow Control Address Low - RW */
#define E1000_FCAH 0x0002C /* Flow Control Address High -RW */
#define E1000_FCT 0x00030 /* Flow Control Type - RW */
@@ -872,6 +938,8 @@ struct e1000_ffvt_entry {
#define E1000_LEDCTL 0x00E00 /* LED Control - RW */
#define E1000_EXTCNF_CTRL 0x00F00 /* Extended Configuration Control */
#define E1000_EXTCNF_SIZE 0x00F08 /* Extended Configuration Size */
+#define E1000_PHY_CTRL 0x00F10 /* PHY Control Register in CSR */
+#define FEXTNVM_SW_CONFIG 0x0001
#define E1000_PBA 0x01000 /* Packet Buffer Allocation - RW */
#define E1000_PBS 0x01008 /* Packet Buffer Size */
#define E1000_EEMNGCTL 0x01010 /* MNG EEprom Control */
@@ -899,11 +967,13 @@ struct e1000_ffvt_entry {
#define E1000_RDH0 E1000_RDH /* RX Desc Head (0) - RW */
#define E1000_RDT0 E1000_RDT /* RX Desc Tail (0) - RW */
#define E1000_RDTR0 E1000_RDTR /* RX Delay Timer (0) - RW */
-#define E1000_RXDCTL 0x02828 /* RX Descriptor Control - RW */
+#define E1000_RXDCTL 0x02828 /* RX Descriptor Control queue 0 - RW */
+#define E1000_RXDCTL1 0x02928 /* RX Descriptor Control queue 1 - RW */
#define E1000_RADV 0x0282C /* RX Interrupt Absolute Delay Timer - RW */
#define E1000_RSRPD 0x02C00 /* RX Small Packet Detect - RW */
#define E1000_RAID 0x02C08 /* Receive Ack Interrupt Delay - RW */
#define E1000_TXDMAC 0x03000 /* TX DMA Control - RW */
+#define E1000_KABGTXD 0x03004 /* AFE Band Gap Transmit Ref Data */
#define E1000_TDFH 0x03410 /* TX Data FIFO Head - RW */
#define E1000_TDFT 0x03418 /* TX Data FIFO Tail - RW */
#define E1000_TDFHS 0x03420 /* TX Data FIFO Head Saved - RW */
@@ -1050,6 +1120,7 @@ struct e1000_ffvt_entry {
#define E1000_82542_FLA E1000_FLA
#define E1000_82542_MDIC E1000_MDIC
#define E1000_82542_SCTL E1000_SCTL
+#define E1000_82542_FEXTNVM E1000_FEXTNVM
#define E1000_82542_FCAL E1000_FCAL
#define E1000_82542_FCAH E1000_FCAH
#define E1000_82542_FCT E1000_FCT
@@ -1073,6 +1144,19 @@ struct e1000_ffvt_entry {
#define E1000_82542_RDLEN0 E1000_82542_RDLEN
#define E1000_82542_RDH0 E1000_82542_RDH
#define E1000_82542_RDT0 E1000_82542_RDT
+#define E1000_82542_SRRCTL(_n) (0x280C + ((_n) << 8)) /* Split and Replication
+ * RX Control - RW */
+#define E1000_82542_DCA_RXCTRL(_n) (0x02814 + ((_n) << 8))
+#define E1000_82542_RDBAH3 0x02B04 /* RX Desc Base High Queue 3 - RW */
+#define E1000_82542_RDBAL3 0x02B00 /* RX Desc Low Queue 3 - RW */
+#define E1000_82542_RDLEN3 0x02B08 /* RX Desc Length Queue 3 - RW */
+#define E1000_82542_RDH3 0x02B10 /* RX Desc Head Queue 3 - RW */
+#define E1000_82542_RDT3 0x02B18 /* RX Desc Tail Queue 3 - RW */
+#define E1000_82542_RDBAL2 0x02A00 /* RX Desc Base Low Queue 2 - RW */
+#define E1000_82542_RDBAH2 0x02A04 /* RX Desc Base High Queue 2 - RW */
+#define E1000_82542_RDLEN2 0x02A08 /* RX Desc Length Queue 2 - RW */
+#define E1000_82542_RDH2 0x02A10 /* RX Desc Head Queue 2 - RW */
+#define E1000_82542_RDT2 0x02A18 /* RX Desc Tail Queue 2 - RW */
#define E1000_82542_RDTR1 0x00130
#define E1000_82542_RDBAL1 0x00138
#define E1000_82542_RDBAH1 0x0013C
@@ -1110,11 +1194,14 @@ struct e1000_ffvt_entry {
#define E1000_82542_FLOP E1000_FLOP
#define E1000_82542_EXTCNF_CTRL E1000_EXTCNF_CTRL
#define E1000_82542_EXTCNF_SIZE E1000_EXTCNF_SIZE
+#define E1000_82542_PHY_CTRL E1000_PHY_CTRL
#define E1000_82542_ERT E1000_ERT
#define E1000_82542_RXDCTL E1000_RXDCTL
+#define E1000_82542_RXDCTL1 E1000_RXDCTL1
#define E1000_82542_RADV E1000_RADV
#define E1000_82542_RSRPD E1000_RSRPD
#define E1000_82542_TXDMAC E1000_TXDMAC
+#define E1000_82542_KABGTXD E1000_KABGTXD
#define E1000_82542_TDFHS E1000_TDFHS
#define E1000_82542_TDFTS E1000_TDFTS
#define E1000_82542_TDFPC E1000_TDFPC
@@ -1310,13 +1397,16 @@ struct e1000_hw_stats {
/* Structure containing variables used by the shared code (e1000_hw.c) */
struct e1000_hw {
- uint8_t __iomem *hw_addr;
+ uint8_t *hw_addr;
uint8_t *flash_address;
e1000_mac_type mac_type;
e1000_phy_type phy_type;
uint32_t phy_init_script;
e1000_media_type media_type;
void *back;
+ struct e1000_shadow_ram *eeprom_shadow_ram;
+ uint32_t flash_bank_size;
+ uint32_t flash_base_addr;
e1000_fc_type fc;
e1000_bus_speed bus_speed;
e1000_bus_width bus_width;
@@ -1328,6 +1418,7 @@ struct e1000_hw {
uint32_t asf_firmware_present;
uint32_t eeprom_semaphore_present;
uint32_t swfw_sync_present;
+ uint32_t swfwhw_semaphore_present;
unsigned long io_base;
uint32_t phy_id;
uint32_t phy_revision;
@@ -1387,6 +1478,7 @@ struct e1000_hw {
boolean_t in_ifs_mode;
boolean_t mng_reg_access_disabled;
boolean_t leave_av_bit_off;
+ boolean_t kmrn_lock_loss_workaround_disabled;
};
@@ -1435,6 +1527,7 @@ struct e1000_hw {
#define E1000_CTRL_RTE 0x20000000 /* Routing tag enable */
#define E1000_CTRL_VME 0x40000000 /* IEEE VLAN mode enable */
#define E1000_CTRL_PHY_RST 0x80000000 /* PHY Reset */
+#define E1000_CTRL_SW2FW_INT 0x02000000 /* Initiate an interrupt to manageability engine */
/* Device Status */
#define E1000_STATUS_FD 0x00000001 /* Full duplex.0=half,1=full */
@@ -1449,6 +1542,8 @@ struct e1000_hw {
#define E1000_STATUS_SPEED_10 0x00000000 /* Speed 10Mb/s */
#define E1000_STATUS_SPEED_100 0x00000040 /* Speed 100Mb/s */
#define E1000_STATUS_SPEED_1000 0x00000080 /* Speed 1000Mb/s */
+#define E1000_STATUS_LAN_INIT_DONE 0x00000200 /* Lan Init Completion
+ by EEPROM/Flash */
#define E1000_STATUS_ASDV 0x00000300 /* Auto speed detect value */
#define E1000_STATUS_DOCK_CI 0x00000800 /* Change in Dock/Undock state. Clear on write '0'. */
#define E1000_STATUS_GIO_MASTER_ENABLE 0x00080000 /* Status of Master requests. */
@@ -1506,6 +1601,10 @@ struct e1000_hw {
#define E1000_STM_OPCODE 0xDB00
#define E1000_HICR_FW_RESET 0xC0
+#define E1000_SHADOW_RAM_WORDS 2048
+#define E1000_ICH8_NVM_SIG_WORD 0x13
+#define E1000_ICH8_NVM_SIG_MASK 0xC0
+
/* EEPROM Read */
#define E1000_EERD_START 0x00000001 /* Start Read */
#define E1000_EERD_DONE 0x00000010 /* Read Done */
@@ -1551,7 +1650,6 @@ struct e1000_hw {
#define E1000_CTRL_EXT_WR_WMARK_320 0x01000000
#define E1000_CTRL_EXT_WR_WMARK_384 0x02000000
#define E1000_CTRL_EXT_WR_WMARK_448 0x03000000
-#define E1000_CTRL_EXT_CANC 0x04000000 /* Interrupt delay cancellation */
#define E1000_CTRL_EXT_DRV_LOAD 0x10000000 /* Driver loaded bit for FW */
#define E1000_CTRL_EXT_IAME 0x08000000 /* Interrupt acknowledge Auto-mask */
#define E1000_CTRL_EXT_INT_TIMER_CLR 0x20000000 /* Clear Interrupt timers after IMS clear */
@@ -1591,12 +1689,31 @@ struct e1000_hw {
#define E1000_KUMCTRLSTA_FIFO_CTRL_TX_BYPASS 0x00000800
/* In-Band Control */
+#define E1000_KUMCTRLSTA_INB_CTRL_LINK_STATUS_TX_TIMEOUT_DEFAULT 0x00000500
#define E1000_KUMCTRLSTA_INB_CTRL_DIS_PADDING 0x00000010
/* Half-Duplex Control */
#define E1000_KUMCTRLSTA_HD_CTRL_10_100_DEFAULT 0x00000004
#define E1000_KUMCTRLSTA_HD_CTRL_1000_DEFAULT 0x00000000
+#define E1000_KUMCTRLSTA_OFFSET_K0S_CTRL 0x0000001E
+
+#define E1000_KUMCTRLSTA_DIAG_FELPBK 0x2000
+#define E1000_KUMCTRLSTA_DIAG_NELPBK 0x1000
+
+#define E1000_KUMCTRLSTA_K0S_100_EN 0x2000
+#define E1000_KUMCTRLSTA_K0S_GBE_EN 0x1000
+#define E1000_KUMCTRLSTA_K0S_ENTRY_LATENCY_MASK 0x0003
+
+#define E1000_KABGTXD_BGSQLBIAS 0x00050000
+
+#define E1000_PHY_CTRL_SPD_EN 0x00000001
+#define E1000_PHY_CTRL_D0A_LPLU 0x00000002
+#define E1000_PHY_CTRL_NOND0A_LPLU 0x00000004
+#define E1000_PHY_CTRL_NOND0A_GBE_DISABLE 0x00000008
+#define E1000_PHY_CTRL_GBE_DISABLE 0x00000040
+#define E1000_PHY_CTRL_B2B_EN 0x00000080
+
/* LED Control */
#define E1000_LEDCTL_LED0_MODE_MASK 0x0000000F
#define E1000_LEDCTL_LED0_MODE_SHIFT 0
@@ -1666,6 +1783,9 @@ struct e1000_hw {
#define E1000_ICR_RXD_FIFO_PAR1 0x01000000 /* queue 1 Rx descriptor FIFO parity error */
#define E1000_ICR_TXD_FIFO_PAR1 0x02000000 /* queue 1 Tx descriptor FIFO parity error */
#define E1000_ICR_ALL_PARITY 0x03F00000 /* all parity error bits */
+#define E1000_ICR_DSW 0x00000020 /* FW changed the status of DISSW bit in the FWSM */
+#define E1000_ICR_PHYINT 0x00001000 /* LAN connected device generates an interrupt */
+#define E1000_ICR_EPRST 0x00100000 /* ME handware reset occurs */
/* Interrupt Cause Set */
#define E1000_ICS_TXDW E1000_ICR_TXDW /* Transmit desc written back */
@@ -1692,6 +1812,9 @@ struct e1000_hw {
#define E1000_ICS_PB_PAR E1000_ICR_PB_PAR /* packet buffer parity error */
#define E1000_ICS_RXD_FIFO_PAR1 E1000_ICR_RXD_FIFO_PAR1 /* queue 1 Rx descriptor FIFO parity error */
#define E1000_ICS_TXD_FIFO_PAR1 E1000_ICR_TXD_FIFO_PAR1 /* queue 1 Tx descriptor FIFO parity error */
+#define E1000_ICS_DSW E1000_ICR_DSW
+#define E1000_ICS_PHYINT E1000_ICR_PHYINT
+#define E1000_ICS_EPRST E1000_ICR_EPRST
/* Interrupt Mask Set */
#define E1000_IMS_TXDW E1000_ICR_TXDW /* Transmit desc written back */
@@ -1718,6 +1841,9 @@ struct e1000_hw {
#define E1000_IMS_PB_PAR E1000_ICR_PB_PAR /* packet buffer parity error */
#define E1000_IMS_RXD_FIFO_PAR1 E1000_ICR_RXD_FIFO_PAR1 /* queue 1 Rx descriptor FIFO parity error */
#define E1000_IMS_TXD_FIFO_PAR1 E1000_ICR_TXD_FIFO_PAR1 /* queue 1 Tx descriptor FIFO parity error */
+#define E1000_IMS_DSW E1000_ICR_DSW
+#define E1000_IMS_PHYINT E1000_ICR_PHYINT
+#define E1000_IMS_EPRST E1000_ICR_EPRST
/* Interrupt Mask Clear */
#define E1000_IMC_TXDW E1000_ICR_TXDW /* Transmit desc written back */
@@ -1744,6 +1870,9 @@ struct e1000_hw {
#define E1000_IMC_PB_PAR E1000_ICR_PB_PAR /* packet buffer parity error */
#define E1000_IMC_RXD_FIFO_PAR1 E1000_ICR_RXD_FIFO_PAR1 /* queue 1 Rx descriptor FIFO parity error */
#define E1000_IMC_TXD_FIFO_PAR1 E1000_ICR_TXD_FIFO_PAR1 /* queue 1 Tx descriptor FIFO parity error */
+#define E1000_IMC_DSW E1000_ICR_DSW
+#define E1000_IMC_PHYINT E1000_ICR_PHYINT
+#define E1000_IMC_EPRST E1000_ICR_EPRST
/* Receive Control */
#define E1000_RCTL_RST 0x00000001 /* Software reset */
@@ -1918,9 +2047,10 @@ struct e1000_hw {
#define E1000_MRQC_RSS_FIELD_MASK 0xFFFF0000
#define E1000_MRQC_RSS_FIELD_IPV4_TCP 0x00010000
#define E1000_MRQC_RSS_FIELD_IPV4 0x00020000
-#define E1000_MRQC_RSS_FIELD_IPV6_TCP 0x00040000
+#define E1000_MRQC_RSS_FIELD_IPV6_TCP_EX 0x00040000
#define E1000_MRQC_RSS_FIELD_IPV6_EX 0x00080000
#define E1000_MRQC_RSS_FIELD_IPV6 0x00100000
+#define E1000_MRQC_RSS_FIELD_IPV6_TCP 0x00200000
/* Definitions for power management and wakeup registers */
/* Wake Up Control */
@@ -2010,6 +2140,15 @@ struct e1000_hw {
#define E1000_FWSM_MODE_SHIFT 1
#define E1000_FWSM_FW_VALID 0x00008000 /* FW established a valid mode */
+#define E1000_FWSM_RSPCIPHY 0x00000040 /* Reset PHY on PCI reset */
+#define E1000_FWSM_DISSW 0x10000000 /* FW disable SW Write Access */
+#define E1000_FWSM_SKUSEL_MASK 0x60000000 /* LAN SKU select */
+#define E1000_FWSM_SKUEL_SHIFT 29
+#define E1000_FWSM_SKUSEL_EMB 0x0 /* Embedded SKU */
+#define E1000_FWSM_SKUSEL_CONS 0x1 /* Consumer SKU */
+#define E1000_FWSM_SKUSEL_PERF_100 0x2 /* Perf & Corp 10/100 SKU */
+#define E1000_FWSM_SKUSEL_PERF_GBE 0x3 /* Perf & Copr GbE SKU */
+
/* FFLT Debug Register */
#define E1000_FFLT_DBG_INVC 0x00100000 /* Invalid /C/ code handling */
@@ -2082,6 +2221,8 @@ struct e1000_host_command_info {
E1000_GCR_TXDSCW_NO_SNOOP | \
E1000_GCR_TXDSCR_NO_SNOOP)
+#define PCI_EX_82566_SNOOP_ALL PCI_EX_NO_SNOOP_ALL
+
#define E1000_GCR_L1_ACT_WITHOUT_L0S_RX 0x08000000
/* Function Active and Power State to MNG */
#define E1000_FACTPS_FUNC0_POWER_STATE_MASK 0x00000003
@@ -2140,8 +2281,10 @@ struct e1000_host_command_info {
#define EEPROM_PHY_CLASS_WORD 0x0007
#define EEPROM_INIT_CONTROL1_REG 0x000A
#define EEPROM_INIT_CONTROL2_REG 0x000F
+#define EEPROM_SWDEF_PINS_CTRL_PORT_1 0x0010
#define EEPROM_INIT_CONTROL3_PORT_B 0x0014
#define EEPROM_INIT_3GIO_3 0x001A
+#define EEPROM_SWDEF_PINS_CTRL_PORT_0 0x0020
#define EEPROM_INIT_CONTROL3_PORT_A 0x0024
#define EEPROM_CFG 0x0012
#define EEPROM_FLASH_VERSION 0x0032
@@ -2153,10 +2296,16 @@ struct e1000_host_command_info {
/* Word definitions for ID LED Settings */
#define ID_LED_RESERVED_0000 0x0000
#define ID_LED_RESERVED_FFFF 0xFFFF
+#define ID_LED_RESERVED_82573 0xF746
+#define ID_LED_DEFAULT_82573 0x1811
#define ID_LED_DEFAULT ((ID_LED_OFF1_ON2 << 12) | \
(ID_LED_OFF1_OFF2 << 8) | \
(ID_LED_DEF1_DEF2 << 4) | \
(ID_LED_DEF1_DEF2))
+#define ID_LED_DEFAULT_ICH8LAN ((ID_LED_DEF1_DEF2 << 12) | \
+ (ID_LED_DEF1_OFF2 << 8) | \
+ (ID_LED_DEF1_ON2 << 4) | \
+ (ID_LED_DEF1_DEF2))
#define ID_LED_DEF1_DEF2 0x1
#define ID_LED_DEF1_ON2 0x2
#define ID_LED_DEF1_OFF2 0x3
@@ -2191,6 +2340,11 @@ struct e1000_host_command_info {
#define EEPROM_WORD0F_ASM_DIR 0x2000
#define EEPROM_WORD0F_ANE 0x0800
#define EEPROM_WORD0F_SWPDIO_EXT 0x00F0
+#define EEPROM_WORD0F_LPLU 0x0001
+
+/* Mask bits for fields in Word 0x10/0x20 of the EEPROM */
+#define EEPROM_WORD1020_GIGA_DISABLE 0x0010
+#define EEPROM_WORD1020_GIGA_DISABLE_NON_D0A 0x0008
/* Mask bits for fields in Word 0x1a of the EEPROM */
#define EEPROM_WORD1A_ASPM_MASK 0x000C
@@ -2265,23 +2419,29 @@ struct e1000_host_command_info {
#define E1000_EXTCNF_CTRL_D_UD_OWNER 0x00000010
#define E1000_EXTCNF_CTRL_MDIO_SW_OWNERSHIP 0x00000020
#define E1000_EXTCNF_CTRL_MDIO_HW_OWNERSHIP 0x00000040
-#define E1000_EXTCNF_CTRL_EXT_CNF_POINTER 0x1FFF0000
+#define E1000_EXTCNF_CTRL_EXT_CNF_POINTER 0x0FFF0000
#define E1000_EXTCNF_SIZE_EXT_PHY_LENGTH 0x000000FF
#define E1000_EXTCNF_SIZE_EXT_DOCK_LENGTH 0x0000FF00
#define E1000_EXTCNF_SIZE_EXT_PCIE_LENGTH 0x00FF0000
+#define E1000_EXTCNF_CTRL_LCD_WRITE_ENABLE 0x00000001
+#define E1000_EXTCNF_CTRL_SWFLAG 0x00000020
/* PBA constants */
+#define E1000_PBA_8K 0x0008 /* 8KB, default Rx allocation */
#define E1000_PBA_12K 0x000C /* 12KB, default Rx allocation */
#define E1000_PBA_16K 0x0010 /* 16KB, default TX allocation */
#define E1000_PBA_22K 0x0016
#define E1000_PBA_24K 0x0018
#define E1000_PBA_30K 0x001E
#define E1000_PBA_32K 0x0020
+#define E1000_PBA_34K 0x0022
#define E1000_PBA_38K 0x0026
#define E1000_PBA_40K 0x0028
#define E1000_PBA_48K 0x0030 /* 48KB, default RX allocation */
+#define E1000_PBS_16K E1000_PBA_16K
+
/* Flow Control Constants */
#define FLOW_CONTROL_ADDRESS_LOW 0x00C28001
#define FLOW_CONTROL_ADDRESS_HIGH 0x00000100
@@ -2336,7 +2496,7 @@ struct e1000_host_command_info {
/* Number of milliseconds we wait for Eeprom auto read bit done after MAC reset */
#define AUTO_READ_DONE_TIMEOUT 10
/* Number of milliseconds we wait for PHY configuration done after MAC reset */
-#define PHY_CFG_TIMEOUT 40
+#define PHY_CFG_TIMEOUT 100
#define E1000_TX_BUFFER_SIZE ((uint32_t)1514)
@@ -2764,6 +2924,17 @@ struct e1000_host_command_info {
#define M88E1000_EPSCR_TX_CLK_25 0x0070 /* 25 MHz TX_CLK */
#define M88E1000_EPSCR_TX_CLK_0 0x0000 /* NO TX_CLK */
+/* M88EC018 Rev 2 specific DownShift settings */
+#define M88EC018_EPSCR_DOWNSHIFT_COUNTER_MASK 0x0E00
+#define M88EC018_EPSCR_DOWNSHIFT_COUNTER_1X 0x0000
+#define M88EC018_EPSCR_DOWNSHIFT_COUNTER_2X 0x0200
+#define M88EC018_EPSCR_DOWNSHIFT_COUNTER_3X 0x0400
+#define M88EC018_EPSCR_DOWNSHIFT_COUNTER_4X 0x0600
+#define M88EC018_EPSCR_DOWNSHIFT_COUNTER_5X 0x0800
+#define M88EC018_EPSCR_DOWNSHIFT_COUNTER_6X 0x0A00
+#define M88EC018_EPSCR_DOWNSHIFT_COUNTER_7X 0x0C00
+#define M88EC018_EPSCR_DOWNSHIFT_COUNTER_8X 0x0E00
+
/* IGP01E1000 Specific Port Config Register - R/W */
#define IGP01E1000_PSCFR_AUTO_MDIX_PAR_DETECT 0x0010
#define IGP01E1000_PSCFR_PRE_EN 0x0020
@@ -2990,6 +3161,221 @@ struct e1000_host_command_info {
#define L1LXT971A_PHY_ID 0x001378E0
#define GG82563_E_PHY_ID 0x01410CA0
+
+/* Bits...
+ * 15-5: page
+ * 4-0: register offset
+ */
+#define PHY_PAGE_SHIFT 5
+#define PHY_REG(page, reg) \
+ (((page) << PHY_PAGE_SHIFT) | ((reg) & MAX_PHY_REG_ADDRESS))
+
+#define IGP3_PHY_PORT_CTRL \
+ PHY_REG(769, 17) /* Port General Configuration */
+#define IGP3_PHY_RATE_ADAPT_CTRL \
+ PHY_REG(769, 25) /* Rate Adapter Control Register */
+
+#define IGP3_KMRN_FIFO_CTRL_STATS \
+ PHY_REG(770, 16) /* KMRN FIFO's control/status register */
+#define IGP3_KMRN_POWER_MNG_CTRL \
+ PHY_REG(770, 17) /* KMRN Power Management Control Register */
+#define IGP3_KMRN_INBAND_CTRL \
+ PHY_REG(770, 18) /* KMRN Inband Control Register */
+#define IGP3_KMRN_DIAG \
+ PHY_REG(770, 19) /* KMRN Diagnostic register */
+#define IGP3_KMRN_DIAG_PCS_LOCK_LOSS 0x0002 /* RX PCS is not synced */
+#define IGP3_KMRN_ACK_TIMEOUT \
+ PHY_REG(770, 20) /* KMRN Acknowledge Timeouts register */
+
+#define IGP3_VR_CTRL \
+ PHY_REG(776, 18) /* Voltage regulator control register */
+#define IGP3_VR_CTRL_MODE_SHUT 0x0200 /* Enter powerdown, shutdown VRs */
+
+#define IGP3_CAPABILITY \
+ PHY_REG(776, 19) /* IGP3 Capability Register */
+
+/* Capabilities for SKU Control */
+#define IGP3_CAP_INITIATE_TEAM 0x0001 /* Able to initiate a team */
+#define IGP3_CAP_WFM 0x0002 /* Support WoL and PXE */
+#define IGP3_CAP_ASF 0x0004 /* Support ASF */
+#define IGP3_CAP_LPLU 0x0008 /* Support Low Power Link Up */
+#define IGP3_CAP_DC_AUTO_SPEED 0x0010 /* Support AC/DC Auto Link Speed */
+#define IGP3_CAP_SPD 0x0020 /* Support Smart Power Down */
+#define IGP3_CAP_MULT_QUEUE 0x0040 /* Support 2 tx & 2 rx queues */
+#define IGP3_CAP_RSS 0x0080 /* Support RSS */
+#define IGP3_CAP_8021PQ 0x0100 /* Support 802.1Q & 802.1p */
+#define IGP3_CAP_AMT_CB 0x0200 /* Support active manageability and circuit breaker */
+
+#define IGP3_PPC_JORDAN_EN 0x0001
+#define IGP3_PPC_JORDAN_GIGA_SPEED 0x0002
+
+#define IGP3_KMRN_PMC_EE_IDLE_LINK_DIS 0x0001
+#define IGP3_KMRN_PMC_K0S_ENTRY_LATENCY_MASK 0x001E
+#define IGP3_KMRN_PMC_K0S_MODE1_EN_GIGA 0x0020
+#define IGP3_KMRN_PMC_K0S_MODE1_EN_100 0x0040
+
+#define IGP3E1000_PHY_MISC_CTRL 0x1B /* Misc. Ctrl register */
+#define IGP3_PHY_MISC_DUPLEX_MANUAL_SET 0x1000 /* Duplex Manual Set */
+
+#define IGP3_KMRN_EXT_CTRL PHY_REG(770, 18)
+#define IGP3_KMRN_EC_DIS_INBAND 0x0080
+
+#define IGP03E1000_E_PHY_ID 0x02A80390
+#define IFE_E_PHY_ID 0x02A80330 /* 10/100 PHY */
+#define IFE_PLUS_E_PHY_ID 0x02A80320
+#define IFE_C_E_PHY_ID 0x02A80310
+
+#define IFE_PHY_EXTENDED_STATUS_CONTROL 0x10 /* 100BaseTx Extended Status, Control and Address */
+#define IFE_PHY_SPECIAL_CONTROL 0x11 /* 100BaseTx PHY special control register */
+#define IFE_PHY_RCV_FALSE_CARRIER 0x13 /* 100BaseTx Receive False Carrier Counter */
+#define IFE_PHY_RCV_DISCONNECT 0x14 /* 100BaseTx Receive Disconnet Counter */
+#define IFE_PHY_RCV_ERROT_FRAME 0x15 /* 100BaseTx Receive Error Frame Counter */
+#define IFE_PHY_RCV_SYMBOL_ERR 0x16 /* Receive Symbol Error Counter */
+#define IFE_PHY_PREM_EOF_ERR 0x17 /* 100BaseTx Receive Premature End Of Frame Error Counter */
+#define IFE_PHY_RCV_EOF_ERR 0x18 /* 10BaseT Receive End Of Frame Error Counter */
+#define IFE_PHY_TX_JABBER_DETECT 0x19 /* 10BaseT Transmit Jabber Detect Counter */
+#define IFE_PHY_EQUALIZER 0x1A /* PHY Equalizer Control and Status */
+#define IFE_PHY_SPECIAL_CONTROL_LED 0x1B /* PHY special control and LED configuration */
+#define IFE_PHY_MDIX_CONTROL 0x1C /* MDI/MDI-X Control register */
+#define IFE_PHY_HWI_CONTROL 0x1D /* Hardware Integrity Control (HWI) */
+
+#define IFE_PESC_REDUCED_POWER_DOWN_DISABLE 0x2000 /* Defaut 1 = Disable auto reduced power down */
+#define IFE_PESC_100BTX_POWER_DOWN 0x0400 /* Indicates the power state of 100BASE-TX */
+#define IFE_PESC_10BTX_POWER_DOWN 0x0200 /* Indicates the power state of 10BASE-T */
+#define IFE_PESC_POLARITY_REVERSED 0x0100 /* Indicates 10BASE-T polarity */
+#define IFE_PESC_PHY_ADDR_MASK 0x007C /* Bit 6:2 for sampled PHY address */
+#define IFE_PESC_SPEED 0x0002 /* Auto-negotiation speed result 1=100Mbs, 0=10Mbs */
+#define IFE_PESC_DUPLEX 0x0001 /* Auto-negotiation duplex result 1=Full, 0=Half */
+#define IFE_PESC_POLARITY_REVERSED_SHIFT 8
+
+#define IFE_PSC_DISABLE_DYNAMIC_POWER_DOWN 0x0100 /* 1 = Dyanmic Power Down disabled */
+#define IFE_PSC_FORCE_POLARITY 0x0020 /* 1=Reversed Polarity, 0=Normal */
+#define IFE_PSC_AUTO_POLARITY_DISABLE 0x0010 /* 1=Auto Polarity Disabled, 0=Enabled */
+#define IFE_PSC_JABBER_FUNC_DISABLE 0x0001 /* 1=Jabber Disabled, 0=Normal Jabber Operation */
+#define IFE_PSC_FORCE_POLARITY_SHIFT 5
+#define IFE_PSC_AUTO_POLARITY_DISABLE_SHIFT 4
+
+#define IFE_PMC_AUTO_MDIX 0x0080 /* 1=enable MDI/MDI-X feature, default 0=disabled */
+#define IFE_PMC_FORCE_MDIX 0x0040 /* 1=force MDIX-X, 0=force MDI */
+#define IFE_PMC_MDIX_STATUS 0x0020 /* 1=MDI-X, 0=MDI */
+#define IFE_PMC_AUTO_MDIX_COMPLETE 0x0010 /* Resolution algorthm is completed */
+#define IFE_PMC_MDIX_MODE_SHIFT 6
+#define IFE_PHC_MDIX_RESET_ALL_MASK 0x0000 /* Disable auto MDI-X */
+
+#define IFE_PHC_HWI_ENABLE 0x8000 /* Enable the HWI feature */
+#define IFE_PHC_ABILITY_CHECK 0x4000 /* 1= Test Passed, 0=failed */
+#define IFE_PHC_TEST_EXEC 0x2000 /* PHY launch test pulses on the wire */
+#define IFE_PHC_HIGHZ 0x0200 /* 1 = Open Circuit */
+#define IFE_PHC_LOWZ 0x0400 /* 1 = Short Circuit */
+#define IFE_PHC_LOW_HIGH_Z_MASK 0x0600 /* Mask for indication type of problem on the line */
+#define IFE_PHC_DISTANCE_MASK 0x01FF /* Mask for distance to the cable problem, in 80cm granularity */
+#define IFE_PHC_RESET_ALL_MASK 0x0000 /* Disable HWI */
+#define IFE_PSCL_PROBE_MODE 0x0020 /* LED Probe mode */
+#define IFE_PSCL_PROBE_LEDS_OFF 0x0006 /* Force LEDs 0 and 2 off */
+#define IFE_PSCL_PROBE_LEDS_ON 0x0007 /* Force LEDs 0 and 2 on */
+
+#define ICH8_FLASH_COMMAND_TIMEOUT 500 /* 500 ms , should be adjusted */
+#define ICH8_FLASH_CYCLE_REPEAT_COUNT 10 /* 10 cycles , should be adjusted */
+#define ICH8_FLASH_SEG_SIZE_256 256
+#define ICH8_FLASH_SEG_SIZE_4K 4096
+#define ICH8_FLASH_SEG_SIZE_64K 65536
+
+#define ICH8_CYCLE_READ 0x0
+#define ICH8_CYCLE_RESERVED 0x1
+#define ICH8_CYCLE_WRITE 0x2
+#define ICH8_CYCLE_ERASE 0x3
+
+#define ICH8_FLASH_GFPREG 0x0000
+#define ICH8_FLASH_HSFSTS 0x0004
+#define ICH8_FLASH_HSFCTL 0x0006
+#define ICH8_FLASH_FADDR 0x0008
+#define ICH8_FLASH_FDATA0 0x0010
+#define ICH8_FLASH_FRACC 0x0050
+#define ICH8_FLASH_FREG0 0x0054
+#define ICH8_FLASH_FREG1 0x0058
+#define ICH8_FLASH_FREG2 0x005C
+#define ICH8_FLASH_FREG3 0x0060
+#define ICH8_FLASH_FPR0 0x0074
+#define ICH8_FLASH_FPR1 0x0078
+#define ICH8_FLASH_SSFSTS 0x0090
+#define ICH8_FLASH_SSFCTL 0x0092
+#define ICH8_FLASH_PREOP 0x0094
+#define ICH8_FLASH_OPTYPE 0x0096
+#define ICH8_FLASH_OPMENU 0x0098
+
+#define ICH8_FLASH_REG_MAPSIZE 0x00A0
+#define ICH8_FLASH_SECTOR_SIZE 4096
+#define ICH8_GFPREG_BASE_MASK 0x1FFF
+#define ICH8_FLASH_LINEAR_ADDR_MASK 0x00FFFFFF
+
+/* ICH8 GbE Flash Hardware Sequencing Flash Status Register bit breakdown */
+/* Offset 04h HSFSTS */
+union ich8_hws_flash_status {
+ struct ich8_hsfsts {
+#ifdef E1000_BIG_ENDIAN
+ uint16_t reserved2 :6;
+ uint16_t fldesvalid :1;
+ uint16_t flockdn :1;
+ uint16_t flcdone :1;
+ uint16_t flcerr :1;
+ uint16_t dael :1;
+ uint16_t berasesz :2;
+ uint16_t flcinprog :1;
+ uint16_t reserved1 :2;
+#else
+ uint16_t flcdone :1; /* bit 0 Flash Cycle Done */
+ uint16_t flcerr :1; /* bit 1 Flash Cycle Error */
+ uint16_t dael :1; /* bit 2 Direct Access error Log */
+ uint16_t berasesz :2; /* bit 4:3 Block/Sector Erase Size */
+ uint16_t flcinprog :1; /* bit 5 flash SPI cycle in Progress */
+ uint16_t reserved1 :2; /* bit 13:6 Reserved */
+ uint16_t reserved2 :6; /* bit 13:6 Reserved */
+ uint16_t fldesvalid :1; /* bit 14 Flash Descriptor Valid */
+ uint16_t flockdn :1; /* bit 15 Flash Configuration Lock-Down */
+#endif
+ } hsf_status;
+ uint16_t regval;
+};
+
+/* ICH8 GbE Flash Hardware Sequencing Flash control Register bit breakdown */
+/* Offset 06h FLCTL */
+union ich8_hws_flash_ctrl {
+ struct ich8_hsflctl {
+#ifdef E1000_BIG_ENDIAN
+ uint16_t fldbcount :2;
+ uint16_t flockdn :6;
+ uint16_t flcgo :1;
+ uint16_t flcycle :2;
+ uint16_t reserved :5;
+#else
+ uint16_t flcgo :1; /* 0 Flash Cycle Go */
+ uint16_t flcycle :2; /* 2:1 Flash Cycle */
+ uint16_t reserved :5; /* 7:3 Reserved */
+ uint16_t fldbcount :2; /* 9:8 Flash Data Byte Count */
+ uint16_t flockdn :6; /* 15:10 Reserved */
+#endif
+ } hsf_ctrl;
+ uint16_t regval;
+};
+
+/* ICH8 Flash Region Access Permissions */
+union ich8_hws_flash_regacc {
+ struct ich8_flracc {
+#ifdef E1000_BIG_ENDIAN
+ uint32_t gmwag :8;
+ uint32_t gmrag :8;
+ uint32_t grwa :8;
+ uint32_t grra :8;
+#else
+ uint32_t grra :8; /* 0:7 GbE region Read Access */
+ uint32_t grwa :8; /* 8:15 GbE region Write Access */
+ uint32_t gmrag :8; /* 23:16 GbE Master Read Access Grant */
+ uint32_t gmwag :8; /* 31:24 GbE Master Write Access Grant */
+#endif
+ } hsf_flregacc;
+ uint16_t regval;
+};
+
/* Miscellaneous PHY bit definitions. */
#define PHY_PREAMBLE 0xFFFFFFFF
#define PHY_SOF 0x01
diff --git a/drivers/net/e1000/e1000_main.c b/drivers/net/e1000/e1000_main.c
index f77624f5f17b..da62db897426 100644
--- a/drivers/net/e1000/e1000_main.c
+++ b/drivers/net/e1000/e1000_main.c
@@ -36,7 +36,7 @@ static char e1000_driver_string[] = "Intel(R) PRO/1000 Network Driver";
#else
#define DRIVERNAPI "-NAPI"
#endif
-#define DRV_VERSION "7.0.38-k4"DRIVERNAPI
+#define DRV_VERSION "7.1.9-k4"DRIVERNAPI
char e1000_driver_version[] = DRV_VERSION;
static char e1000_copyright[] = "Copyright (c) 1999-2006 Intel Corporation.";
@@ -73,6 +73,11 @@ static struct pci_device_id e1000_pci_tbl[] = {
INTEL_E1000_ETHERNET_DEVICE(0x1026),
INTEL_E1000_ETHERNET_DEVICE(0x1027),
INTEL_E1000_ETHERNET_DEVICE(0x1028),
+ INTEL_E1000_ETHERNET_DEVICE(0x1049),
+ INTEL_E1000_ETHERNET_DEVICE(0x104A),
+ INTEL_E1000_ETHERNET_DEVICE(0x104B),
+ INTEL_E1000_ETHERNET_DEVICE(0x104C),
+ INTEL_E1000_ETHERNET_DEVICE(0x104D),
INTEL_E1000_ETHERNET_DEVICE(0x105E),
INTEL_E1000_ETHERNET_DEVICE(0x105F),
INTEL_E1000_ETHERNET_DEVICE(0x1060),
@@ -96,6 +101,8 @@ static struct pci_device_id e1000_pci_tbl[] = {
INTEL_E1000_ETHERNET_DEVICE(0x109A),
INTEL_E1000_ETHERNET_DEVICE(0x10B5),
INTEL_E1000_ETHERNET_DEVICE(0x10B9),
+ INTEL_E1000_ETHERNET_DEVICE(0x10BA),
+ INTEL_E1000_ETHERNET_DEVICE(0x10BB),
/* required last entry */
{0,}
};
@@ -133,7 +140,6 @@ static void e1000_clean_rx_ring(struct e1000_adapter *adapter,
static void e1000_set_multi(struct net_device *netdev);
static void e1000_update_phy_info(unsigned long data);
static void e1000_watchdog(unsigned long data);
-static void e1000_watchdog_task(struct e1000_adapter *adapter);
static void e1000_82547_tx_fifo_stall(unsigned long data);
static int e1000_xmit_frame(struct sk_buff *skb, struct net_device *netdev);
static struct net_device_stats * e1000_get_stats(struct net_device *netdev);
@@ -178,8 +184,8 @@ static void e1000_vlan_rx_add_vid(struct net_device *netdev, uint16_t vid);
static void e1000_vlan_rx_kill_vid(struct net_device *netdev, uint16_t vid);
static void e1000_restore_vlan(struct e1000_adapter *adapter);
-#ifdef CONFIG_PM
static int e1000_suspend(struct pci_dev *pdev, pm_message_t state);
+#ifdef CONFIG_PM
static int e1000_resume(struct pci_dev *pdev);
#endif
static void e1000_shutdown(struct pci_dev *pdev);
@@ -206,8 +212,8 @@ static struct pci_driver e1000_driver = {
.probe = e1000_probe,
.remove = __devexit_p(e1000_remove),
/* Power Managment Hooks */
-#ifdef CONFIG_PM
.suspend = e1000_suspend,
+#ifdef CONFIG_PM
.resume = e1000_resume,
#endif
.shutdown = e1000_shutdown,
@@ -261,6 +267,44 @@ e1000_exit_module(void)
module_exit(e1000_exit_module);
+static int e1000_request_irq(struct e1000_adapter *adapter)
+{
+ struct net_device *netdev = adapter->netdev;
+ int flags, err = 0;
+
+ flags = IRQF_SHARED;
+#ifdef CONFIG_PCI_MSI
+ if (adapter->hw.mac_type > e1000_82547_rev_2) {
+ adapter->have_msi = TRUE;
+ if ((err = pci_enable_msi(adapter->pdev))) {
+ DPRINTK(PROBE, ERR,
+ "Unable to allocate MSI interrupt Error: %d\n", err);
+ adapter->have_msi = FALSE;
+ }
+ }
+ if (adapter->have_msi)
+ flags &= ~IRQF_SHARED;
+#endif
+ if ((err = request_irq(adapter->pdev->irq, &e1000_intr, flags,
+ netdev->name, netdev)))
+ DPRINTK(PROBE, ERR,
+ "Unable to allocate interrupt Error: %d\n", err);
+
+ return err;
+}
+
+static void e1000_free_irq(struct e1000_adapter *adapter)
+{
+ struct net_device *netdev = adapter->netdev;
+
+ free_irq(adapter->pdev->irq, netdev);
+
+#ifdef CONFIG_PCI_MSI
+ if (adapter->have_msi)
+ pci_disable_msi(adapter->pdev);
+#endif
+}
+
/**
* e1000_irq_disable - Mask off interrupt generation on the NIC
* @adapter: board private structure
@@ -329,6 +373,7 @@ e1000_release_hw_control(struct e1000_adapter *adapter)
{
uint32_t ctrl_ext;
uint32_t swsm;
+ uint32_t extcnf;
/* Let firmware taken over control of h/w */
switch (adapter->hw.mac_type) {
@@ -343,6 +388,11 @@ e1000_release_hw_control(struct e1000_adapter *adapter)
swsm = E1000_READ_REG(&adapter->hw, SWSM);
E1000_WRITE_REG(&adapter->hw, SWSM,
swsm & ~E1000_SWSM_DRV_LOAD);
+ case e1000_ich8lan:
+ extcnf = E1000_READ_REG(&adapter->hw, CTRL_EXT);
+ E1000_WRITE_REG(&adapter->hw, CTRL_EXT,
+ extcnf & ~E1000_CTRL_EXT_DRV_LOAD);
+ break;
default:
break;
}
@@ -364,6 +414,7 @@ e1000_get_hw_control(struct e1000_adapter *adapter)
{
uint32_t ctrl_ext;
uint32_t swsm;
+ uint32_t extcnf;
/* Let firmware know the driver has taken over */
switch (adapter->hw.mac_type) {
case e1000_82571:
@@ -378,6 +429,11 @@ e1000_get_hw_control(struct e1000_adapter *adapter)
E1000_WRITE_REG(&adapter->hw, SWSM,
swsm | E1000_SWSM_DRV_LOAD);
break;
+ case e1000_ich8lan:
+ extcnf = E1000_READ_REG(&adapter->hw, EXTCNF_CTRL);
+ E1000_WRITE_REG(&adapter->hw, EXTCNF_CTRL,
+ extcnf | E1000_EXTCNF_CTRL_SWFLAG);
+ break;
default:
break;
}
@@ -387,18 +443,10 @@ int
e1000_up(struct e1000_adapter *adapter)
{
struct net_device *netdev = adapter->netdev;
- int i, err;
+ int i;
/* hardware has been reset, we need to reload some things */
- /* Reset the PHY if it was previously powered down */
- if (adapter->hw.media_type == e1000_media_type_copper) {
- uint16_t mii_reg;
- e1000_read_phy_reg(&adapter->hw, PHY_CTRL, &mii_reg);
- if (mii_reg & MII_CR_POWER_DOWN)
- e1000_phy_hw_reset(&adapter->hw);
- }
-
e1000_set_multi(netdev);
e1000_restore_vlan(adapter);
@@ -415,24 +463,6 @@ e1000_up(struct e1000_adapter *adapter)
E1000_DESC_UNUSED(ring));
}
-#ifdef CONFIG_PCI_MSI
- if (adapter->hw.mac_type > e1000_82547_rev_2) {
- adapter->have_msi = TRUE;
- if ((err = pci_enable_msi(adapter->pdev))) {
- DPRINTK(PROBE, ERR,
- "Unable to allocate MSI interrupt Error: %d\n", err);
- adapter->have_msi = FALSE;
- }
- }
-#endif
- if ((err = request_irq(adapter->pdev->irq, &e1000_intr,
- IRQF_SHARED | IRQF_SAMPLE_RANDOM,
- netdev->name, netdev))) {
- DPRINTK(PROBE, ERR,
- "Unable to allocate interrupt Error: %d\n", err);
- return err;
- }
-
adapter->tx_queue_len = netdev->tx_queue_len;
mod_timer(&adapter->watchdog_timer, jiffies);
@@ -445,21 +475,60 @@ e1000_up(struct e1000_adapter *adapter)
return 0;
}
+/**
+ * e1000_power_up_phy - restore link in case the phy was powered down
+ * @adapter: address of board private structure
+ *
+ * The phy may be powered down to save power and turn off link when the
+ * driver is unloaded and wake on lan is not enabled (among others)
+ * *** this routine MUST be followed by a call to e1000_reset ***
+ *
+ **/
+
+static void e1000_power_up_phy(struct e1000_adapter *adapter)
+{
+ uint16_t mii_reg = 0;
+
+ /* Just clear the power down bit to wake the phy back up */
+ if (adapter->hw.media_type == e1000_media_type_copper) {
+ /* according to the manual, the phy will retain its
+ * settings across a power-down/up cycle */
+ e1000_read_phy_reg(&adapter->hw, PHY_CTRL, &mii_reg);
+ mii_reg &= ~MII_CR_POWER_DOWN;
+ e1000_write_phy_reg(&adapter->hw, PHY_CTRL, mii_reg);
+ }
+}
+
+static void e1000_power_down_phy(struct e1000_adapter *adapter)
+{
+ boolean_t mng_mode_enabled = (adapter->hw.mac_type >= e1000_82571) &&
+ e1000_check_mng_mode(&adapter->hw);
+ /* Power down the PHY so no link is implied when interface is down
+ * The PHY cannot be powered down if any of the following is TRUE
+ * (a) WoL is enabled
+ * (b) AMT is active
+ * (c) SoL/IDER session is active */
+ if (!adapter->wol && adapter->hw.mac_type >= e1000_82540 &&
+ adapter->hw.mac_type != e1000_ich8lan &&
+ adapter->hw.media_type == e1000_media_type_copper &&
+ !(E1000_READ_REG(&adapter->hw, MANC) & E1000_MANC_SMBUS_EN) &&
+ !mng_mode_enabled &&
+ !e1000_check_phy_reset_block(&adapter->hw)) {
+ uint16_t mii_reg = 0;
+ e1000_read_phy_reg(&adapter->hw, PHY_CTRL, &mii_reg);
+ mii_reg |= MII_CR_POWER_DOWN;
+ e1000_write_phy_reg(&adapter->hw, PHY_CTRL, mii_reg);
+ mdelay(1);
+ }
+}
+
void
e1000_down(struct e1000_adapter *adapter)
{
struct net_device *netdev = adapter->netdev;
- boolean_t mng_mode_enabled = (adapter->hw.mac_type >= e1000_82571) &&
- e1000_check_mng_mode(&adapter->hw);
e1000_irq_disable(adapter);
- free_irq(adapter->pdev->irq, netdev);
-#ifdef CONFIG_PCI_MSI
- if (adapter->hw.mac_type > e1000_82547_rev_2 &&
- adapter->have_msi == TRUE)
- pci_disable_msi(adapter->pdev);
-#endif
del_timer_sync(&adapter->tx_fifo_stall_timer);
del_timer_sync(&adapter->watchdog_timer);
del_timer_sync(&adapter->phy_info_timer);
@@ -476,23 +545,17 @@ e1000_down(struct e1000_adapter *adapter)
e1000_reset(adapter);
e1000_clean_all_tx_rings(adapter);
e1000_clean_all_rx_rings(adapter);
+}
- /* Power down the PHY so no link is implied when interface is down *
- * The PHY cannot be powered down if any of the following is TRUE *
- * (a) WoL is enabled
- * (b) AMT is active
- * (c) SoL/IDER session is active */
- if (!adapter->wol && adapter->hw.mac_type >= e1000_82540 &&
- adapter->hw.media_type == e1000_media_type_copper &&
- !(E1000_READ_REG(&adapter->hw, MANC) & E1000_MANC_SMBUS_EN) &&
- !mng_mode_enabled &&
- !e1000_check_phy_reset_block(&adapter->hw)) {
- uint16_t mii_reg;
- e1000_read_phy_reg(&adapter->hw, PHY_CTRL, &mii_reg);
- mii_reg |= MII_CR_POWER_DOWN;
- e1000_write_phy_reg(&adapter->hw, PHY_CTRL, mii_reg);
- mdelay(1);
- }
+void
+e1000_reinit_locked(struct e1000_adapter *adapter)
+{
+ WARN_ON(in_interrupt());
+ while (test_and_set_bit(__E1000_RESETTING, &adapter->flags))
+ msleep(1);
+ e1000_down(adapter);
+ e1000_up(adapter);
+ clear_bit(__E1000_RESETTING, &adapter->flags);
}
void
@@ -518,6 +581,9 @@ e1000_reset(struct e1000_adapter *adapter)
case e1000_82573:
pba = E1000_PBA_12K;
break;
+ case e1000_ich8lan:
+ pba = E1000_PBA_8K;
+ break;
default:
pba = E1000_PBA_48K;
break;
@@ -542,6 +608,12 @@ e1000_reset(struct e1000_adapter *adapter)
/* Set the FC high water mark to 90% of the FIFO size.
* Required to clear last 3 LSB */
fc_high_water_mark = ((pba * 9216)/10) & 0xFFF8;
+ /* We can't use 90% on small FIFOs because the remainder
+ * would be less than 1 full frame. In this case, we size
+ * it to allow at least a full frame above the high water
+ * mark. */
+ if (pba < E1000_PBA_16K)
+ fc_high_water_mark = (pba * 1024) - 1600;
adapter->hw.fc_high_water = fc_high_water_mark;
adapter->hw.fc_low_water = fc_high_water_mark - 8;
@@ -564,6 +636,23 @@ e1000_reset(struct e1000_adapter *adapter)
e1000_reset_adaptive(&adapter->hw);
e1000_phy_get_info(&adapter->hw, &adapter->phy_info);
+
+ if (!adapter->smart_power_down &&
+ (adapter->hw.mac_type == e1000_82571 ||
+ adapter->hw.mac_type == e1000_82572)) {
+ uint16_t phy_data = 0;
+ /* speed up time to link by disabling smart power down, ignore
+ * the return value of this function because there is nothing
+ * different we would do if it failed */
+ e1000_read_phy_reg(&adapter->hw, IGP02E1000_PHY_POWER_MGMT,
+ &phy_data);
+ phy_data &= ~IGP02E1000_PM_SPD;
+ e1000_write_phy_reg(&adapter->hw, IGP02E1000_PHY_POWER_MGMT,
+ phy_data);
+ }
+
+ if (adapter->hw.mac_type < e1000_ich8lan)
+ /* FIXME: this code is duplicate and wrong for PCI Express */
if (adapter->en_mng_pt) {
manc = E1000_READ_REG(&adapter->hw, MANC);
manc |= (E1000_MANC_ARP_EN | E1000_MANC_EN_MNG2HOST);
@@ -590,6 +679,7 @@ e1000_probe(struct pci_dev *pdev,
struct net_device *netdev;
struct e1000_adapter *adapter;
unsigned long mmio_start, mmio_len;
+ unsigned long flash_start, flash_len;
static int cards_found = 0;
static int e1000_ksp3_port_a = 0; /* global ksp3 port a indication */
@@ -599,10 +689,12 @@ e1000_probe(struct pci_dev *pdev,
if ((err = pci_enable_device(pdev)))
return err;
- if (!(err = pci_set_dma_mask(pdev, DMA_64BIT_MASK))) {
+ if (!(err = pci_set_dma_mask(pdev, DMA_64BIT_MASK)) &&
+ !(err = pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK))) {
pci_using_dac = 1;
} else {
- if ((err = pci_set_dma_mask(pdev, DMA_32BIT_MASK))) {
+ if ((err = pci_set_dma_mask(pdev, DMA_32BIT_MASK)) &&
+ (err = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK))) {
E1000_ERR("No usable DMA configuration, aborting\n");
return err;
}
@@ -682,6 +774,19 @@ e1000_probe(struct pci_dev *pdev,
if ((err = e1000_sw_init(adapter)))
goto err_sw_init;
+ /* Flash BAR mapping must happen after e1000_sw_init
+ * because it depends on mac_type */
+ if ((adapter->hw.mac_type == e1000_ich8lan) &&
+ (pci_resource_flags(pdev, 1) & IORESOURCE_MEM)) {
+ flash_start = pci_resource_start(pdev, 1);
+ flash_len = pci_resource_len(pdev, 1);
+ adapter->hw.flash_address = ioremap(flash_start, flash_len);
+ if (!adapter->hw.flash_address) {
+ err = -EIO;
+ goto err_flashmap;
+ }
+ }
+
if ((err = e1000_check_phy_reset_block(&adapter->hw)))
DPRINTK(PROBE, INFO, "PHY reset is blocked due to SOL/IDER session.\n");
@@ -700,6 +805,8 @@ e1000_probe(struct pci_dev *pdev,
NETIF_F_HW_VLAN_TX |
NETIF_F_HW_VLAN_RX |
NETIF_F_HW_VLAN_FILTER;
+ if (adapter->hw.mac_type == e1000_ich8lan)
+ netdev->features &= ~NETIF_F_HW_VLAN_FILTER;
}
#ifdef NETIF_F_TSO
@@ -715,11 +822,17 @@ e1000_probe(struct pci_dev *pdev,
if (pci_using_dac)
netdev->features |= NETIF_F_HIGHDMA;
- /* hard_start_xmit is safe against parallel locking */
netdev->features |= NETIF_F_LLTX;
adapter->en_mng_pt = e1000_enable_mng_pass_thru(&adapter->hw);
+ /* initialize eeprom parameters */
+
+ if (e1000_init_eeprom_params(&adapter->hw)) {
+ E1000_ERR("EEPROM initialization failed\n");
+ return -EIO;
+ }
+
/* before reading the EEPROM, reset the controller to
* put the device in a known good starting state */
@@ -758,9 +871,6 @@ e1000_probe(struct pci_dev *pdev,
adapter->watchdog_timer.function = &e1000_watchdog;
adapter->watchdog_timer.data = (unsigned long) adapter;
- INIT_WORK(&adapter->watchdog_task,
- (void (*)(void *))e1000_watchdog_task, adapter);
-
init_timer(&adapter->phy_info_timer);
adapter->phy_info_timer.function = &e1000_update_phy_info;
adapter->phy_info_timer.data = (unsigned long) adapter;
@@ -790,6 +900,11 @@ e1000_probe(struct pci_dev *pdev,
EEPROM_INIT_CONTROL2_REG, 1, &eeprom_data);
eeprom_apme_mask = E1000_EEPROM_82544_APM;
break;
+ case e1000_ich8lan:
+ e1000_read_eeprom(&adapter->hw,
+ EEPROM_INIT_CONTROL1_REG, 1, &eeprom_data);
+ eeprom_apme_mask = E1000_EEPROM_ICH8_APME;
+ break;
case e1000_82546:
case e1000_82546_rev_3:
case e1000_82571:
@@ -849,6 +964,9 @@ e1000_probe(struct pci_dev *pdev,
return 0;
err_register:
+ if (adapter->hw.flash_address)
+ iounmap(adapter->hw.flash_address);
+err_flashmap:
err_sw_init:
err_eeprom:
iounmap(adapter->hw.hw_addr);
@@ -882,6 +1000,7 @@ e1000_remove(struct pci_dev *pdev)
flush_scheduled_work();
if (adapter->hw.mac_type >= e1000_82540 &&
+ adapter->hw.mac_type != e1000_ich8lan &&
adapter->hw.media_type == e1000_media_type_copper) {
manc = E1000_READ_REG(&adapter->hw, MANC);
if (manc & E1000_MANC_SMBUS_EN) {
@@ -910,6 +1029,8 @@ e1000_remove(struct pci_dev *pdev)
#endif
iounmap(adapter->hw.hw_addr);
+ if (adapter->hw.flash_address)
+ iounmap(adapter->hw.flash_address);
pci_release_regions(pdev);
free_netdev(netdev);
@@ -947,7 +1068,7 @@ e1000_sw_init(struct e1000_adapter *adapter)
pci_read_config_word(pdev, PCI_COMMAND, &hw->pci_cmd_word);
- adapter->rx_buffer_len = MAXIMUM_ETHERNET_FRAME_SIZE;
+ adapter->rx_buffer_len = MAXIMUM_ETHERNET_VLAN_SIZE;
adapter->rx_ps_bsize0 = E1000_RXBUFFER_128;
hw->max_frame_size = netdev->mtu +
ENET_HEADER_SIZE + ETHERNET_FCS_SIZE;
@@ -960,13 +1081,6 @@ e1000_sw_init(struct e1000_adapter *adapter)
return -EIO;
}
- /* initialize eeprom parameters */
-
- if (e1000_init_eeprom_params(hw)) {
- E1000_ERR("EEPROM initialization failed\n");
- return -EIO;
- }
-
switch (hw->mac_type) {
default:
break;
@@ -1078,6 +1192,10 @@ e1000_open(struct net_device *netdev)
struct e1000_adapter *adapter = netdev_priv(netdev);
int err;
+ /* disallow open during test */
+ if (test_bit(__E1000_DRIVER_TESTING, &adapter->flags))
+ return -EBUSY;
+
/* allocate transmit descriptors */
if ((err = e1000_setup_all_tx_resources(adapter)))
@@ -1088,6 +1206,12 @@ e1000_open(struct net_device *netdev)
if ((err = e1000_setup_all_rx_resources(adapter)))
goto err_setup_rx;
+ err = e1000_request_irq(adapter);
+ if (err)
+ goto err_up;
+
+ e1000_power_up_phy(adapter);
+
if ((err = e1000_up(adapter)))
goto err_up;
adapter->mng_vlan_id = E1000_MNG_VLAN_NONE;
@@ -1131,7 +1255,10 @@ e1000_close(struct net_device *netdev)
{
struct e1000_adapter *adapter = netdev_priv(netdev);
+ WARN_ON(test_bit(__E1000_RESETTING, &adapter->flags));
e1000_down(adapter);
+ e1000_power_down_phy(adapter);
+ e1000_free_irq(adapter);
e1000_free_all_tx_resources(adapter);
e1000_free_all_rx_resources(adapter);
@@ -1189,8 +1316,7 @@ e1000_setup_tx_resources(struct e1000_adapter *adapter,
int size;
size = sizeof(struct e1000_buffer) * txdr->count;
-
- txdr->buffer_info = vmalloc_node(size, pcibus_to_node(pdev->bus));
+ txdr->buffer_info = vmalloc(size);
if (!txdr->buffer_info) {
DPRINTK(PROBE, ERR,
"Unable to allocate memory for the transmit descriptor ring\n");
@@ -1302,11 +1428,11 @@ e1000_configure_tx(struct e1000_adapter *adapter)
tdba = adapter->tx_ring[0].dma;
tdlen = adapter->tx_ring[0].count *
sizeof(struct e1000_tx_desc);
- E1000_WRITE_REG(hw, TDBAL, (tdba & 0x00000000ffffffffULL));
- E1000_WRITE_REG(hw, TDBAH, (tdba >> 32));
E1000_WRITE_REG(hw, TDLEN, tdlen);
- E1000_WRITE_REG(hw, TDH, 0);
+ E1000_WRITE_REG(hw, TDBAH, (tdba >> 32));
+ E1000_WRITE_REG(hw, TDBAL, (tdba & 0x00000000ffffffffULL));
E1000_WRITE_REG(hw, TDT, 0);
+ E1000_WRITE_REG(hw, TDH, 0);
adapter->tx_ring[0].tdh = E1000_TDH;
adapter->tx_ring[0].tdt = E1000_TDT;
break;
@@ -1418,7 +1544,7 @@ e1000_setup_rx_resources(struct e1000_adapter *adapter,
int size, desc_len;
size = sizeof(struct e1000_buffer) * rxdr->count;
- rxdr->buffer_info = vmalloc_node(size, pcibus_to_node(pdev->bus));
+ rxdr->buffer_info = vmalloc(size);
if (!rxdr->buffer_info) {
DPRINTK(PROBE, ERR,
"Unable to allocate memory for the receive descriptor ring\n");
@@ -1560,9 +1686,6 @@ e1000_setup_rctl(struct e1000_adapter *adapter)
E1000_RCTL_LBM_NO | E1000_RCTL_RDMTS_HALF |
(adapter->hw.mc_filter_type << E1000_RCTL_MO_SHIFT);
- if (adapter->hw.mac_type > e1000_82543)
- rctl |= E1000_RCTL_SECRC;
-
if (adapter->hw.tbi_compatibility_on == 1)
rctl |= E1000_RCTL_SBP;
else
@@ -1628,7 +1751,7 @@ e1000_setup_rctl(struct e1000_adapter *adapter)
rfctl |= E1000_RFCTL_IPV6_DIS;
E1000_WRITE_REG(&adapter->hw, RFCTL, rfctl);
- rctl |= E1000_RCTL_DTYP_PS | E1000_RCTL_SECRC;
+ rctl |= E1000_RCTL_DTYP_PS;
psrctl |= adapter->rx_ps_bsize0 >>
E1000_PSRCTL_BSIZE0_SHIFT;
@@ -1712,11 +1835,11 @@ e1000_configure_rx(struct e1000_adapter *adapter)
case 1:
default:
rdba = adapter->rx_ring[0].dma;
- E1000_WRITE_REG(hw, RDBAL, (rdba & 0x00000000ffffffffULL));
- E1000_WRITE_REG(hw, RDBAH, (rdba >> 32));
E1000_WRITE_REG(hw, RDLEN, rdlen);
- E1000_WRITE_REG(hw, RDH, 0);
+ E1000_WRITE_REG(hw, RDBAH, (rdba >> 32));
+ E1000_WRITE_REG(hw, RDBAL, (rdba & 0x00000000ffffffffULL));
E1000_WRITE_REG(hw, RDT, 0);
+ E1000_WRITE_REG(hw, RDH, 0);
adapter->rx_ring[0].rdh = E1000_RDH;
adapter->rx_ring[0].rdt = E1000_RDT;
break;
@@ -1741,9 +1864,6 @@ e1000_configure_rx(struct e1000_adapter *adapter)
E1000_WRITE_REG(hw, RXCSUM, rxcsum);
}
- if (hw->mac_type == e1000_82573)
- E1000_WRITE_REG(hw, ERT, 0x0100);
-
/* Enable Receives */
E1000_WRITE_REG(hw, RCTL, rctl);
}
@@ -2083,6 +2203,12 @@ e1000_set_multi(struct net_device *netdev)
uint32_t rctl;
uint32_t hash_value;
int i, rar_entries = E1000_RAR_ENTRIES;
+ int mta_reg_count = (hw->mac_type == e1000_ich8lan) ?
+ E1000_NUM_MTA_REGISTERS_ICH8LAN :
+ E1000_NUM_MTA_REGISTERS;
+
+ if (adapter->hw.mac_type == e1000_ich8lan)
+ rar_entries = E1000_RAR_ENTRIES_ICH8LAN;
/* reserve RAR[14] for LAA over-write work-around */
if (adapter->hw.mac_type == e1000_82571)
@@ -2121,14 +2247,18 @@ e1000_set_multi(struct net_device *netdev)
mc_ptr = mc_ptr->next;
} else {
E1000_WRITE_REG_ARRAY(hw, RA, i << 1, 0);
+ E1000_WRITE_FLUSH(hw);
E1000_WRITE_REG_ARRAY(hw, RA, (i << 1) + 1, 0);
+ E1000_WRITE_FLUSH(hw);
}
}
/* clear the old settings from the multicast hash table */
- for (i = 0; i < E1000_NUM_MTA_REGISTERS; i++)
+ for (i = 0; i < mta_reg_count; i++) {
E1000_WRITE_REG_ARRAY(hw, MTA, i, 0);
+ E1000_WRITE_FLUSH(hw);
+ }
/* load any remaining addresses into the hash table */
@@ -2201,19 +2331,19 @@ static void
e1000_watchdog(unsigned long data)
{
struct e1000_adapter *adapter = (struct e1000_adapter *) data;
-
- /* Do the rest outside of interrupt context */
- schedule_work(&adapter->watchdog_task);
-}
-
-static void
-e1000_watchdog_task(struct e1000_adapter *adapter)
-{
struct net_device *netdev = adapter->netdev;
struct e1000_tx_ring *txdr = adapter->tx_ring;
uint32_t link, tctl;
-
- e1000_check_for_link(&adapter->hw);
+ int32_t ret_val;
+
+ ret_val = e1000_check_for_link(&adapter->hw);
+ if ((ret_val == E1000_ERR_PHY) &&
+ (adapter->hw.phy_type == e1000_phy_igp_3) &&
+ (E1000_READ_REG(&adapter->hw, CTRL) & E1000_PHY_CTRL_GBE_DISABLE)) {
+ /* See e1000_kumeran_lock_loss_workaround() */
+ DPRINTK(LINK, INFO,
+ "Gigabit has been disabled, downgrading speed\n");
+ }
if (adapter->hw.mac_type == e1000_82573) {
e1000_enable_tx_pkt_filtering(&adapter->hw);
if (adapter->mng_vlan_id != adapter->hw.mng_cookie.vlan_id)
@@ -2394,7 +2524,7 @@ e1000_tso(struct e1000_adapter *adapter, struct e1000_tx_ring *tx_ring,
uint8_t ipcss, ipcso, tucss, tucso, hdr_len;
int err;
- if (skb_shinfo(skb)->gso_size) {
+ if (skb_is_gso(skb)) {
if (skb_header_cloned(skb)) {
err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
if (err)
@@ -2519,7 +2649,7 @@ e1000_tx_map(struct e1000_adapter *adapter, struct e1000_tx_ring *tx_ring,
* tso gets written back prematurely before the data is fully
* DMA'd to the controller */
if (!skb->data_len && tx_ring->last_tx_tso &&
- !skb_shinfo(skb)->gso_size) {
+ !skb_is_gso(skb)) {
tx_ring->last_tx_tso = 0;
size -= 4;
}
@@ -2779,9 +2909,10 @@ e1000_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
case e1000_82571:
case e1000_82572:
case e1000_82573:
+ case e1000_ich8lan:
pull_size = min((unsigned int)4, skb->data_len);
if (!__pskb_pull_tail(skb, pull_size)) {
- printk(KERN_ERR
+ DPRINTK(DRV, ERR,
"__pskb_pull_tail failed.\n");
dev_kfree_skb_any(skb);
return NETDEV_TX_OK;
@@ -2806,8 +2937,7 @@ e1000_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
#ifdef NETIF_F_TSO
/* Controller Erratum workaround */
- if (!skb->data_len && tx_ring->last_tx_tso &&
- !skb_shinfo(skb)->gso_size)
+ if (!skb->data_len && tx_ring->last_tx_tso && !skb_is_gso(skb))
count++;
#endif
@@ -2919,8 +3049,7 @@ e1000_reset_task(struct net_device *netdev)
{
struct e1000_adapter *adapter = netdev_priv(netdev);
- e1000_down(adapter);
- e1000_up(adapter);
+ e1000_reinit_locked(adapter);
}
/**
@@ -2964,6 +3093,7 @@ e1000_change_mtu(struct net_device *netdev, int new_mtu)
/* Adapter-specific max frame size limits. */
switch (adapter->hw.mac_type) {
case e1000_undefined ... e1000_82542_rev2_1:
+ case e1000_ich8lan:
if (max_frame > MAXIMUM_ETHERNET_FRAME_SIZE) {
DPRINTK(PROBE, ERR, "Jumbo Frames not supported.\n");
return -EINVAL;
@@ -3018,7 +3148,6 @@ e1000_change_mtu(struct net_device *netdev, int new_mtu)
adapter->rx_buffer_len = E1000_RXBUFFER_16384;
/* adjust allocation if LPE protects us, and we aren't using SBP */
-#define MAXIMUM_ETHERNET_VLAN_SIZE 1522
if (!adapter->hw.tbi_compatibility_on &&
((max_frame == MAXIMUM_ETHERNET_FRAME_SIZE) ||
(max_frame == MAXIMUM_ETHERNET_VLAN_SIZE)))
@@ -3026,10 +3155,8 @@ e1000_change_mtu(struct net_device *netdev, int new_mtu)
netdev->mtu = new_mtu;
- if (netif_running(netdev)) {
- e1000_down(adapter);
- e1000_up(adapter);
- }
+ if (netif_running(netdev))
+ e1000_reinit_locked(adapter);
adapter->hw.max_frame_size = max_frame;
@@ -3074,12 +3201,15 @@ e1000_update_stats(struct e1000_adapter *adapter)
adapter->stats.bprc += E1000_READ_REG(hw, BPRC);
adapter->stats.mprc += E1000_READ_REG(hw, MPRC);
adapter->stats.roc += E1000_READ_REG(hw, ROC);
+
+ if (adapter->hw.mac_type != e1000_ich8lan) {
adapter->stats.prc64 += E1000_READ_REG(hw, PRC64);
adapter->stats.prc127 += E1000_READ_REG(hw, PRC127);
adapter->stats.prc255 += E1000_READ_REG(hw, PRC255);
adapter->stats.prc511 += E1000_READ_REG(hw, PRC511);
adapter->stats.prc1023 += E1000_READ_REG(hw, PRC1023);
adapter->stats.prc1522 += E1000_READ_REG(hw, PRC1522);
+ }
adapter->stats.symerrs += E1000_READ_REG(hw, SYMERRS);
adapter->stats.mpc += E1000_READ_REG(hw, MPC);
@@ -3107,12 +3237,16 @@ e1000_update_stats(struct e1000_adapter *adapter)
adapter->stats.totl += E1000_READ_REG(hw, TOTL);
adapter->stats.toth += E1000_READ_REG(hw, TOTH);
adapter->stats.tpr += E1000_READ_REG(hw, TPR);
+
+ if (adapter->hw.mac_type != e1000_ich8lan) {
adapter->stats.ptc64 += E1000_READ_REG(hw, PTC64);
adapter->stats.ptc127 += E1000_READ_REG(hw, PTC127);
adapter->stats.ptc255 += E1000_READ_REG(hw, PTC255);
adapter->stats.ptc511 += E1000_READ_REG(hw, PTC511);
adapter->stats.ptc1023 += E1000_READ_REG(hw, PTC1023);
adapter->stats.ptc1522 += E1000_READ_REG(hw, PTC1522);
+ }
+
adapter->stats.mptc += E1000_READ_REG(hw, MPTC);
adapter->stats.bptc += E1000_READ_REG(hw, BPTC);
@@ -3134,6 +3268,8 @@ e1000_update_stats(struct e1000_adapter *adapter)
if (hw->mac_type > e1000_82547_rev_2) {
adapter->stats.iac += E1000_READ_REG(hw, IAC);
adapter->stats.icrxoc += E1000_READ_REG(hw, ICRXOC);
+
+ if (adapter->hw.mac_type != e1000_ich8lan) {
adapter->stats.icrxptc += E1000_READ_REG(hw, ICRXPTC);
adapter->stats.icrxatc += E1000_READ_REG(hw, ICRXATC);
adapter->stats.ictxptc += E1000_READ_REG(hw, ICTXPTC);
@@ -3141,6 +3277,7 @@ e1000_update_stats(struct e1000_adapter *adapter)
adapter->stats.ictxqec += E1000_READ_REG(hw, ICTXQEC);
adapter->stats.ictxqmtc += E1000_READ_REG(hw, ICTXQMTC);
adapter->stats.icrxdmtc += E1000_READ_REG(hw, ICRXDMTC);
+ }
}
/* Fill out the OS statistics structure */
@@ -3249,8 +3386,8 @@ e1000_intr(int irq, void *data, struct pt_regs *regs)
E1000_WRITE_REG(hw, IMC, ~0);
E1000_WRITE_FLUSH(hw);
}
- if (likely(netif_rx_schedule_prep(&adapter->polling_netdev[0])))
- __netif_rx_schedule(&adapter->polling_netdev[0]);
+ if (likely(netif_rx_schedule_prep(netdev)))
+ __netif_rx_schedule(netdev);
else
e1000_irq_enable(adapter);
#else
@@ -3293,34 +3430,26 @@ e1000_clean(struct net_device *poll_dev, int *budget)
{
struct e1000_adapter *adapter;
int work_to_do = min(*budget, poll_dev->quota);
- int tx_cleaned = 0, i = 0, work_done = 0;
+ int tx_cleaned = 0, work_done = 0;
/* Must NOT use netdev_priv macro here. */
adapter = poll_dev->priv;
/* Keep link state information with original netdev */
- if (!netif_carrier_ok(adapter->netdev))
+ if (!netif_carrier_ok(poll_dev))
goto quit_polling;
- while (poll_dev != &adapter->polling_netdev[i]) {
- i++;
- BUG_ON(i == adapter->num_rx_queues);
+ /* e1000_clean is called per-cpu. This lock protects
+ * tx_ring[0] from being cleaned by multiple cpus
+ * simultaneously. A failure obtaining the lock means
+ * tx_ring[0] is currently being cleaned anyway. */
+ if (spin_trylock(&adapter->tx_queue_lock)) {
+ tx_cleaned = e1000_clean_tx_irq(adapter,
+ &adapter->tx_ring[0]);
+ spin_unlock(&adapter->tx_queue_lock);
}
- if (likely(adapter->num_tx_queues == 1)) {
- /* e1000_clean is called per-cpu. This lock protects
- * tx_ring[0] from being cleaned by multiple cpus
- * simultaneously. A failure obtaining the lock means
- * tx_ring[0] is currently being cleaned anyway. */
- if (spin_trylock(&adapter->tx_queue_lock)) {
- tx_cleaned = e1000_clean_tx_irq(adapter,
- &adapter->tx_ring[0]);
- spin_unlock(&adapter->tx_queue_lock);
- }
- } else
- tx_cleaned = e1000_clean_tx_irq(adapter, &adapter->tx_ring[i]);
-
- adapter->clean_rx(adapter, &adapter->rx_ring[i],
+ adapter->clean_rx(adapter, &adapter->rx_ring[0],
&work_done, work_to_do);
*budget -= work_done;
@@ -3328,7 +3457,7 @@ e1000_clean(struct net_device *poll_dev, int *budget)
/* If no Tx and not enough Rx work done, exit the polling mode */
if ((!tx_cleaned && (work_done == 0)) ||
- !netif_running(adapter->netdev)) {
+ !netif_running(poll_dev)) {
quit_polling:
netif_rx_complete(poll_dev);
e1000_irq_enable(adapter);
@@ -3543,11 +3672,15 @@ e1000_clean_rx_irq(struct e1000_adapter *adapter,
length = le16_to_cpu(rx_desc->length);
+ /* adjust length to remove Ethernet CRC */
+ length -= 4;
+
if (unlikely(!(status & E1000_RXD_STAT_EOP))) {
/* All receives must fit into a single buffer */
E1000_DBG("%s: Receive packet consumed multiple"
" buffers\n", netdev->name);
- dev_kfree_skb_irq(skb);
+ /* recycle */
+ buffer_info-> skb = skb;
goto next_desc;
}
@@ -3675,7 +3808,6 @@ e1000_clean_rx_irq_ps(struct e1000_adapter *adapter,
buffer_info = &rx_ring->buffer_info[i];
while (staterr & E1000_RXD_STAT_DD) {
- buffer_info = &rx_ring->buffer_info[i];
ps_page = &rx_ring->ps_page[i];
ps_page_dma = &rx_ring->ps_page_dma[i];
#ifdef CONFIG_E1000_NAPI
@@ -3747,8 +3879,9 @@ e1000_clean_rx_irq_ps(struct e1000_adapter *adapter,
pci_dma_sync_single_for_device(pdev,
ps_page_dma->ps_page_dma[0],
PAGE_SIZE, PCI_DMA_FROMDEVICE);
+ /* remove the CRC */
+ l1 -= 4;
skb_put(skb, l1);
- length += l1;
goto copydone;
} /* if */
}
@@ -3767,6 +3900,10 @@ e1000_clean_rx_irq_ps(struct e1000_adapter *adapter,
skb->truesize += length;
}
+ /* strip the ethernet crc, problem is we're using pages now so
+ * this whole operation can get a little cpu intensive */
+ pskb_trim(skb, skb->len - 4);
+
copydone:
e1000_rx_checksum(adapter, staterr,
le16_to_cpu(rx_desc->wb.lower.hi_dword.csum_ip.csum), skb);
@@ -4180,10 +4317,9 @@ e1000_mii_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
return retval;
}
}
- if (netif_running(adapter->netdev)) {
- e1000_down(adapter);
- e1000_up(adapter);
- } else
+ if (netif_running(adapter->netdev))
+ e1000_reinit_locked(adapter);
+ else
e1000_reset(adapter);
break;
case M88E1000_PHY_SPEC_CTRL:
@@ -4200,10 +4336,9 @@ e1000_mii_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
case PHY_CTRL:
if (mii_reg & MII_CR_POWER_DOWN)
break;
- if (netif_running(adapter->netdev)) {
- e1000_down(adapter);
- e1000_up(adapter);
- } else
+ if (netif_running(adapter->netdev))
+ e1000_reinit_locked(adapter);
+ else
e1000_reset(adapter);
break;
}
@@ -4277,18 +4412,21 @@ e1000_vlan_rx_register(struct net_device *netdev, struct vlan_group *grp)
ctrl |= E1000_CTRL_VME;
E1000_WRITE_REG(&adapter->hw, CTRL, ctrl);
+ if (adapter->hw.mac_type != e1000_ich8lan) {
/* enable VLAN receive filtering */
rctl = E1000_READ_REG(&adapter->hw, RCTL);
rctl |= E1000_RCTL_VFE;
rctl &= ~E1000_RCTL_CFIEN;
E1000_WRITE_REG(&adapter->hw, RCTL, rctl);
e1000_update_mng_vlan(adapter);
+ }
} else {
/* disable VLAN tag insert/strip */
ctrl = E1000_READ_REG(&adapter->hw, CTRL);
ctrl &= ~E1000_CTRL_VME;
E1000_WRITE_REG(&adapter->hw, CTRL, ctrl);
+ if (adapter->hw.mac_type != e1000_ich8lan) {
/* disable VLAN filtering */
rctl = E1000_READ_REG(&adapter->hw, RCTL);
rctl &= ~E1000_RCTL_VFE;
@@ -4297,6 +4435,7 @@ e1000_vlan_rx_register(struct net_device *netdev, struct vlan_group *grp)
e1000_vlan_rx_kill_vid(netdev, adapter->mng_vlan_id);
adapter->mng_vlan_id = E1000_MNG_VLAN_NONE;
}
+ }
}
e1000_irq_enable(adapter);
@@ -4458,12 +4597,16 @@ e1000_suspend(struct pci_dev *pdev, pm_message_t state)
struct e1000_adapter *adapter = netdev_priv(netdev);
uint32_t ctrl, ctrl_ext, rctl, manc, status;
uint32_t wufc = adapter->wol;
+#ifdef CONFIG_PM
int retval = 0;
+#endif
netif_device_detach(netdev);
- if (netif_running(netdev))
+ if (netif_running(netdev)) {
+ WARN_ON(test_bit(__E1000_RESETTING, &adapter->flags));
e1000_down(adapter);
+ }
#ifdef CONFIG_PM
/* Implement our own version of pci_save_state(pdev) because pci-
@@ -4521,7 +4664,9 @@ e1000_suspend(struct pci_dev *pdev, pm_message_t state)
pci_enable_wake(pdev, PCI_D3cold, 0);
}
+ /* FIXME: this code is incorrect for PCI Express */
if (adapter->hw.mac_type >= e1000_82540 &&
+ adapter->hw.mac_type != e1000_ich8lan &&
adapter->hw.media_type == e1000_media_type_copper) {
manc = E1000_READ_REG(&adapter->hw, MANC);
if (manc & E1000_MANC_SMBUS_EN) {
@@ -4532,6 +4677,9 @@ e1000_suspend(struct pci_dev *pdev, pm_message_t state)
}
}
+ if (adapter->hw.phy_type == e1000_phy_igp_3)
+ e1000_phy_powerdown_workaround(&adapter->hw);
+
/* Release control of h/w to f/w. If f/w is AMT enabled, this
* would have already happened in close and is redundant. */
e1000_release_hw_control(adapter);
@@ -4567,7 +4715,9 @@ e1000_resume(struct pci_dev *pdev)
netif_device_attach(netdev);
+ /* FIXME: this code is incorrect for PCI Express */
if (adapter->hw.mac_type >= e1000_82540 &&
+ adapter->hw.mac_type != e1000_ich8lan &&
adapter->hw.media_type == e1000_media_type_copper) {
manc = E1000_READ_REG(&adapter->hw, MANC);
manc &= ~(E1000_MANC_ARP_EN);
@@ -4601,6 +4751,7 @@ static void
e1000_netpoll(struct net_device *netdev)
{
struct e1000_adapter *adapter = netdev_priv(netdev);
+
disable_irq(adapter->pdev->irq);
e1000_intr(adapter->pdev->irq, netdev, NULL);
e1000_clean_tx_irq(adapter, adapter->tx_ring);
diff --git a/drivers/net/e1000/e1000_osdep.h b/drivers/net/e1000/e1000_osdep.h
index 048d052be29d..2d3e8b06cab0 100644
--- a/drivers/net/e1000/e1000_osdep.h
+++ b/drivers/net/e1000/e1000_osdep.h
@@ -127,4 +127,17 @@ typedef enum {
#define E1000_WRITE_FLUSH(a) E1000_READ_REG(a, STATUS)
+#define E1000_WRITE_ICH8_REG(a, reg, value) ( \
+ writel((value), ((a)->flash_address + reg)))
+
+#define E1000_READ_ICH8_REG(a, reg) ( \
+ readl((a)->flash_address + reg))
+
+#define E1000_WRITE_ICH8_REG16(a, reg, value) ( \
+ writew((value), ((a)->flash_address + reg)))
+
+#define E1000_READ_ICH8_REG16(a, reg) ( \
+ readw((a)->flash_address + reg))
+
+
#endif /* _E1000_OSDEP_H_ */
diff --git a/drivers/net/e1000/e1000_param.c b/drivers/net/e1000/e1000_param.c
index e55f8969a0fb..0ef413172c68 100644
--- a/drivers/net/e1000/e1000_param.c
+++ b/drivers/net/e1000/e1000_param.c
@@ -45,6 +45,16 @@
*/
#define E1000_PARAM_INIT { [0 ... E1000_MAX_NIC] = OPTION_UNSET }
+/* Module Parameters are always initialized to -1, so that the driver
+ * can tell the difference between no user specified value or the
+ * user asking for the default value.
+ * The true default values are loaded in when e1000_check_options is called.
+ *
+ * This is a GCC extension to ANSI C.
+ * See the item "Labeled Elements in Initializers" in the section
+ * "Extensions to the C Language Family" of the GCC documentation.
+ */
+
#define E1000_PARAM(X, desc) \
static int __devinitdata X[E1000_MAX_NIC+1] = E1000_PARAM_INIT; \
static int num_##X = 0; \
@@ -183,6 +193,24 @@ E1000_PARAM(RxAbsIntDelay, "Receive Absolute Interrupt Delay");
E1000_PARAM(InterruptThrottleRate, "Interrupt Throttling Rate");
+/* Enable Smart Power Down of the PHY
+ *
+ * Valid Range: 0, 1
+ *
+ * Default Value: 0 (disabled)
+ */
+
+E1000_PARAM(SmartPowerDownEnable, "Enable PHY smart power down");
+
+/* Enable Kumeran Lock Loss workaround
+ *
+ * Valid Range: 0, 1
+ *
+ * Default Value: 1 (enabled)
+ */
+
+E1000_PARAM(KumeranLockLoss, "Enable Kumeran lock loss workaround");
+
#define AUTONEG_ADV_DEFAULT 0x2F
#define AUTONEG_ADV_MASK 0x2F
#define FLOW_CONTROL_DEFAULT FLOW_CONTROL_FULL
@@ -296,6 +324,7 @@ e1000_check_options(struct e1000_adapter *adapter)
DPRINTK(PROBE, NOTICE,
"Warning: no configuration for board #%i\n", bd);
DPRINTK(PROBE, NOTICE, "Using defaults for all values\n");
+ bd = E1000_MAX_NIC;
}
{ /* Transmit Descriptor Count */
@@ -313,14 +342,9 @@ e1000_check_options(struct e1000_adapter *adapter)
opt.arg.r.max = mac_type < e1000_82544 ?
E1000_MAX_TXD : E1000_MAX_82544_TXD;
- if (num_TxDescriptors > bd) {
- tx_ring->count = TxDescriptors[bd];
- e1000_validate_option(&tx_ring->count, &opt, adapter);
- E1000_ROUNDUP(tx_ring->count,
- REQ_TX_DESCRIPTOR_MULTIPLE);
- } else {
- tx_ring->count = opt.def;
- }
+ tx_ring->count = TxDescriptors[bd];
+ e1000_validate_option(&tx_ring->count, &opt, adapter);
+ E1000_ROUNDUP(tx_ring->count, REQ_TX_DESCRIPTOR_MULTIPLE);
for (i = 0; i < adapter->num_tx_queues; i++)
tx_ring[i].count = tx_ring->count;
}
@@ -339,14 +363,9 @@ e1000_check_options(struct e1000_adapter *adapter)
opt.arg.r.max = mac_type < e1000_82544 ? E1000_MAX_RXD :
E1000_MAX_82544_RXD;
- if (num_RxDescriptors > bd) {
- rx_ring->count = RxDescriptors[bd];
- e1000_validate_option(&rx_ring->count, &opt, adapter);
- E1000_ROUNDUP(rx_ring->count,
- REQ_RX_DESCRIPTOR_MULTIPLE);
- } else {
- rx_ring->count = opt.def;
- }
+ rx_ring->count = RxDescriptors[bd];
+ e1000_validate_option(&rx_ring->count, &opt, adapter);
+ E1000_ROUNDUP(rx_ring->count, REQ_RX_DESCRIPTOR_MULTIPLE);
for (i = 0; i < adapter->num_rx_queues; i++)
rx_ring[i].count = rx_ring->count;
}
@@ -358,13 +377,9 @@ e1000_check_options(struct e1000_adapter *adapter)
.def = OPTION_ENABLED
};
- if (num_XsumRX > bd) {
- int rx_csum = XsumRX[bd];
- e1000_validate_option(&rx_csum, &opt, adapter);
- adapter->rx_csum = rx_csum;
- } else {
- adapter->rx_csum = opt.def;
- }
+ int rx_csum = XsumRX[bd];
+ e1000_validate_option(&rx_csum, &opt, adapter);
+ adapter->rx_csum = rx_csum;
}
{ /* Flow Control */
@@ -384,13 +399,9 @@ e1000_check_options(struct e1000_adapter *adapter)
.p = fc_list }}
};
- if (num_FlowControl > bd) {
- int fc = FlowControl[bd];
- e1000_validate_option(&fc, &opt, adapter);
- adapter->hw.fc = adapter->hw.original_fc = fc;
- } else {
- adapter->hw.fc = adapter->hw.original_fc = opt.def;
- }
+ int fc = FlowControl[bd];
+ e1000_validate_option(&fc, &opt, adapter);
+ adapter->hw.fc = adapter->hw.original_fc = fc;
}
{ /* Transmit Interrupt Delay */
struct e1000_option opt = {
@@ -402,13 +413,8 @@ e1000_check_options(struct e1000_adapter *adapter)
.max = MAX_TXDELAY }}
};
- if (num_TxIntDelay > bd) {
- adapter->tx_int_delay = TxIntDelay[bd];
- e1000_validate_option(&adapter->tx_int_delay, &opt,
- adapter);
- } else {
- adapter->tx_int_delay = opt.def;
- }
+ adapter->tx_int_delay = TxIntDelay[bd];
+ e1000_validate_option(&adapter->tx_int_delay, &opt, adapter);
}
{ /* Transmit Absolute Interrupt Delay */
struct e1000_option opt = {
@@ -420,13 +426,9 @@ e1000_check_options(struct e1000_adapter *adapter)
.max = MAX_TXABSDELAY }}
};
- if (num_TxAbsIntDelay > bd) {
- adapter->tx_abs_int_delay = TxAbsIntDelay[bd];
- e1000_validate_option(&adapter->tx_abs_int_delay, &opt,
- adapter);
- } else {
- adapter->tx_abs_int_delay = opt.def;
- }
+ adapter->tx_abs_int_delay = TxAbsIntDelay[bd];
+ e1000_validate_option(&adapter->tx_abs_int_delay, &opt,
+ adapter);
}
{ /* Receive Interrupt Delay */
struct e1000_option opt = {
@@ -438,13 +440,8 @@ e1000_check_options(struct e1000_adapter *adapter)
.max = MAX_RXDELAY }}
};
- if (num_RxIntDelay > bd) {
- adapter->rx_int_delay = RxIntDelay[bd];
- e1000_validate_option(&adapter->rx_int_delay, &opt,
- adapter);
- } else {
- adapter->rx_int_delay = opt.def;
- }
+ adapter->rx_int_delay = RxIntDelay[bd];
+ e1000_validate_option(&adapter->rx_int_delay, &opt, adapter);
}
{ /* Receive Absolute Interrupt Delay */
struct e1000_option opt = {
@@ -456,13 +453,9 @@ e1000_check_options(struct e1000_adapter *adapter)
.max = MAX_RXABSDELAY }}
};
- if (num_RxAbsIntDelay > bd) {
- adapter->rx_abs_int_delay = RxAbsIntDelay[bd];
- e1000_validate_option(&adapter->rx_abs_int_delay, &opt,
- adapter);
- } else {
- adapter->rx_abs_int_delay = opt.def;
- }
+ adapter->rx_abs_int_delay = RxAbsIntDelay[bd];
+ e1000_validate_option(&adapter->rx_abs_int_delay, &opt,
+ adapter);
}
{ /* Interrupt Throttling Rate */
struct e1000_option opt = {
@@ -474,26 +467,44 @@ e1000_check_options(struct e1000_adapter *adapter)
.max = MAX_ITR }}
};
- if (num_InterruptThrottleRate > bd) {
- adapter->itr = InterruptThrottleRate[bd];
- switch (adapter->itr) {
- case 0:
- DPRINTK(PROBE, INFO, "%s turned off\n",
- opt.name);
- break;
- case 1:
- DPRINTK(PROBE, INFO, "%s set to dynamic mode\n",
- opt.name);
- break;
- default:
- e1000_validate_option(&adapter->itr, &opt,
- adapter);
- break;
- }
- } else {
- adapter->itr = opt.def;
+ adapter->itr = InterruptThrottleRate[bd];
+ switch (adapter->itr) {
+ case 0:
+ DPRINTK(PROBE, INFO, "%s turned off\n", opt.name);
+ break;
+ case 1:
+ DPRINTK(PROBE, INFO, "%s set to dynamic mode\n",
+ opt.name);
+ break;
+ default:
+ e1000_validate_option(&adapter->itr, &opt, adapter);
+ break;
}
}
+ { /* Smart Power Down */
+ struct e1000_option opt = {
+ .type = enable_option,
+ .name = "PHY Smart Power Down",
+ .err = "defaulting to Disabled",
+ .def = OPTION_DISABLED
+ };
+
+ int spd = SmartPowerDownEnable[bd];
+ e1000_validate_option(&spd, &opt, adapter);
+ adapter->smart_power_down = spd;
+ }
+ { /* Kumeran Lock Loss Workaround */
+ struct e1000_option opt = {
+ .type = enable_option,
+ .name = "Kumeran Lock Loss Workaround",
+ .err = "defaulting to Enabled",
+ .def = OPTION_ENABLED
+ };
+
+ int kmrn_lock_loss = KumeranLockLoss[bd];
+ e1000_validate_option(&kmrn_lock_loss, &opt, adapter);
+ adapter->hw.kmrn_lock_loss_workaround_disabled = !kmrn_lock_loss;
+ }
switch (adapter->hw.media_type) {
case e1000_media_type_fiber:
@@ -519,17 +530,18 @@ static void __devinit
e1000_check_fiber_options(struct e1000_adapter *adapter)
{
int bd = adapter->bd_number;
- if (num_Speed > bd) {
+ bd = bd > E1000_MAX_NIC ? E1000_MAX_NIC : bd;
+ if ((Speed[bd] != OPTION_UNSET)) {
DPRINTK(PROBE, INFO, "Speed not valid for fiber adapters, "
"parameter ignored\n");
}
- if (num_Duplex > bd) {
+ if ((Duplex[bd] != OPTION_UNSET)) {
DPRINTK(PROBE, INFO, "Duplex not valid for fiber adapters, "
"parameter ignored\n");
}
- if ((num_AutoNeg > bd) && (AutoNeg[bd] != 0x20)) {
+ if ((AutoNeg[bd] != OPTION_UNSET) && (AutoNeg[bd] != 0x20)) {
DPRINTK(PROBE, INFO, "AutoNeg other than 1000/Full is "
"not valid for fiber adapters, "
"parameter ignored\n");
@@ -548,6 +560,7 @@ e1000_check_copper_options(struct e1000_adapter *adapter)
{
int speed, dplx, an;
int bd = adapter->bd_number;
+ bd = bd > E1000_MAX_NIC ? E1000_MAX_NIC : bd;
{ /* Speed */
struct e1000_opt_list speed_list[] = {{ 0, "" },
@@ -564,12 +577,8 @@ e1000_check_copper_options(struct e1000_adapter *adapter)
.p = speed_list }}
};
- if (num_Speed > bd) {
- speed = Speed[bd];
- e1000_validate_option(&speed, &opt, adapter);
- } else {
- speed = opt.def;
- }
+ speed = Speed[bd];
+ e1000_validate_option(&speed, &opt, adapter);
}
{ /* Duplex */
struct e1000_opt_list dplx_list[] = {{ 0, "" },
@@ -591,15 +600,11 @@ e1000_check_copper_options(struct e1000_adapter *adapter)
"Speed/Duplex/AutoNeg parameter ignored.\n");
return;
}
- if (num_Duplex > bd) {
- dplx = Duplex[bd];
- e1000_validate_option(&dplx, &opt, adapter);
- } else {
- dplx = opt.def;
- }
+ dplx = Duplex[bd];
+ e1000_validate_option(&dplx, &opt, adapter);
}
- if ((num_AutoNeg > bd) && (speed != 0 || dplx != 0)) {
+ if (AutoNeg[bd] != OPTION_UNSET && (speed != 0 || dplx != 0)) {
DPRINTK(PROBE, INFO,
"AutoNeg specified along with Speed or Duplex, "
"parameter ignored\n");
@@ -648,19 +653,15 @@ e1000_check_copper_options(struct e1000_adapter *adapter)
.p = an_list }}
};
- if (num_AutoNeg > bd) {
- an = AutoNeg[bd];
- e1000_validate_option(&an, &opt, adapter);
- } else {
- an = opt.def;
- }
+ an = AutoNeg[bd];
+ e1000_validate_option(&an, &opt, adapter);
adapter->hw.autoneg_advertised = an;
}
switch (speed + dplx) {
case 0:
adapter->hw.autoneg = adapter->fc_autoneg = 1;
- if ((num_Speed > bd) && (speed != 0 || dplx != 0))
+ if (Speed[bd] != OPTION_UNSET || Duplex[bd] != OPTION_UNSET)
DPRINTK(PROBE, INFO,
"Speed and duplex autonegotiation enabled\n");
break;
diff --git a/drivers/net/forcedeth.c b/drivers/net/forcedeth.c
index 037d870712ff..11b8f1b43dd5 100644
--- a/drivers/net/forcedeth.c
+++ b/drivers/net/forcedeth.c
@@ -240,10 +240,12 @@ enum {
#define NVREG_RNDSEED_FORCE2 0x2d00
#define NVREG_RNDSEED_FORCE3 0x7400
- NvRegUnknownSetupReg1 = 0xA0,
-#define NVREG_UNKSETUP1_VAL 0x16070f
- NvRegUnknownSetupReg2 = 0xA4,
-#define NVREG_UNKSETUP2_VAL 0x16
+ NvRegTxDeferral = 0xA0,
+#define NVREG_TX_DEFERRAL_DEFAULT 0x15050f
+#define NVREG_TX_DEFERRAL_RGMII_10_100 0x16070f
+#define NVREG_TX_DEFERRAL_RGMII_1000 0x14050f
+ NvRegRxDeferral = 0xA4,
+#define NVREG_RX_DEFERRAL_DEFAULT 0x16
NvRegMacAddrA = 0xA8,
NvRegMacAddrB = 0xAC,
NvRegMulticastAddrA = 0xB0,
@@ -269,8 +271,10 @@ enum {
#define NVREG_LINKSPEED_MASK (0xFFF)
NvRegUnknownSetupReg5 = 0x130,
#define NVREG_UNKSETUP5_BIT31 (1<<31)
- NvRegUnknownSetupReg3 = 0x13c,
-#define NVREG_UNKSETUP3_VAL1 0x200010
+ NvRegTxWatermark = 0x13c,
+#define NVREG_TX_WM_DESC1_DEFAULT 0x0200010
+#define NVREG_TX_WM_DESC2_3_DEFAULT 0x1e08000
+#define NVREG_TX_WM_DESC2_3_1000 0xfe08000
NvRegTxRxControl = 0x144,
#define NVREG_TXRXCTL_KICK 0x0001
#define NVREG_TXRXCTL_BIT1 0x0002
@@ -658,7 +662,7 @@ static const struct register_test nv_registers_test[] = {
{ NvRegMisc1, 0x03c },
{ NvRegOffloadConfig, 0x03ff },
{ NvRegMulticastAddrA, 0xffffffff },
- { NvRegUnknownSetupReg3, 0x0ff },
+ { NvRegTxWatermark, 0x0ff },
{ NvRegWakeUpFlags, 0x07777 },
{ 0,0 }
};
@@ -1495,7 +1499,7 @@ static int nv_start_xmit(struct sk_buff *skb, struct net_device *dev)
np->tx_skbuff[nr] = skb;
#ifdef NETIF_F_TSO
- if (skb_shinfo(skb)->gso_size)
+ if (skb_is_gso(skb))
tx_flags_extra = NV_TX2_TSO | (skb_shinfo(skb)->gso_size << NV_TX2_TSO_SHIFT);
else
#endif
@@ -2127,7 +2131,7 @@ static int nv_update_linkspeed(struct net_device *dev)
int newdup = np->duplex;
int mii_status;
int retval = 0;
- u32 control_1000, status_1000, phyreg, pause_flags;
+ u32 control_1000, status_1000, phyreg, pause_flags, txreg;
/* BMSR_LSTATUS is latched, read it twice:
* we want the current value.
@@ -2245,6 +2249,26 @@ set_speed:
phyreg |= PHY_1000;
writel(phyreg, base + NvRegPhyInterface);
+ if (phyreg & PHY_RGMII) {
+ if ((np->linkspeed & NVREG_LINKSPEED_MASK) == NVREG_LINKSPEED_1000)
+ txreg = NVREG_TX_DEFERRAL_RGMII_1000;
+ else
+ txreg = NVREG_TX_DEFERRAL_RGMII_10_100;
+ } else {
+ txreg = NVREG_TX_DEFERRAL_DEFAULT;
+ }
+ writel(txreg, base + NvRegTxDeferral);
+
+ if (np->desc_ver == DESC_VER_1) {
+ txreg = NVREG_TX_WM_DESC1_DEFAULT;
+ } else {
+ if ((np->linkspeed & NVREG_LINKSPEED_MASK) == NVREG_LINKSPEED_1000)
+ txreg = NVREG_TX_WM_DESC2_3_1000;
+ else
+ txreg = NVREG_TX_WM_DESC2_3_DEFAULT;
+ }
+ writel(txreg, base + NvRegTxWatermark);
+
writel(NVREG_MISC1_FORCE | ( np->duplex ? 0 : NVREG_MISC1_HD),
base + NvRegMisc1);
pci_push(base);
@@ -3910,7 +3934,10 @@ static int nv_open(struct net_device *dev)
/* 5) continue setup */
writel(np->linkspeed, base + NvRegLinkSpeed);
- writel(NVREG_UNKSETUP3_VAL1, base + NvRegUnknownSetupReg3);
+ if (np->desc_ver == DESC_VER_1)
+ writel(NVREG_TX_WM_DESC1_DEFAULT, base + NvRegTxWatermark);
+ else
+ writel(NVREG_TX_WM_DESC2_3_DEFAULT, base + NvRegTxWatermark);
writel(np->txrxctl_bits, base + NvRegTxRxControl);
writel(np->vlanctl_bits, base + NvRegVlanControl);
pci_push(base);
@@ -3932,8 +3959,8 @@ static int nv_open(struct net_device *dev)
writel(readl(base + NvRegReceiverStatus), base + NvRegReceiverStatus);
get_random_bytes(&i, sizeof(i));
writel(NVREG_RNDSEED_FORCE | (i&NVREG_RNDSEED_MASK), base + NvRegRandomSeed);
- writel(NVREG_UNKSETUP1_VAL, base + NvRegUnknownSetupReg1);
- writel(NVREG_UNKSETUP2_VAL, base + NvRegUnknownSetupReg2);
+ writel(NVREG_TX_DEFERRAL_DEFAULT, base + NvRegTxDeferral);
+ writel(NVREG_RX_DEFERRAL_DEFAULT, base + NvRegRxDeferral);
if (poll_interval == -1) {
if (optimization_mode == NV_OPTIMIZATION_MODE_THROUGHPUT)
writel(NVREG_POLL_DEFAULT_THROUGHPUT, base + NvRegPollingInterval);
diff --git a/drivers/net/hamradio/bpqether.c b/drivers/net/hamradio/bpqether.c
index 0641f54fc638..889f338132fa 100644
--- a/drivers/net/hamradio/bpqether.c
+++ b/drivers/net/hamradio/bpqether.c
@@ -122,6 +122,12 @@ struct bpqdev {
static LIST_HEAD(bpq_devices);
+/*
+ * bpqether network devices are paired with ethernet devices below them, so
+ * form a special "super class" of normal ethernet devices; split their locks
+ * off into a separate class since they always nest.
+ */
+static struct lock_class_key bpq_netdev_xmit_lock_key;
/* ------------------------------------------------------------------------ */
@@ -528,6 +534,7 @@ static int bpq_new_device(struct net_device *edev)
err = register_netdevice(ndev);
if (err)
goto error;
+ lockdep_set_class(&ndev->_xmit_lock, &bpq_netdev_xmit_lock_key);
/* List protected by RTNL */
list_add_rcu(&bpq->bpq_list, &bpq_devices);
diff --git a/drivers/net/ifb.c b/drivers/net/ifb.c
index 3a42afab5036..43e3f33ed5e2 100644
--- a/drivers/net/ifb.c
+++ b/drivers/net/ifb.c
@@ -271,6 +271,7 @@ static int __init ifb_init_module(void)
for (i = 0; i < numifbs && !err; i++)
err = ifb_init_one(i);
if (err) {
+ i--;
while (--i >= 0)
ifb_free_one(i);
}
diff --git a/drivers/net/irda/ali-ircc.c b/drivers/net/irda/ali-ircc.c
index bf1fca5a3fa0..e3c8cd5eca67 100644
--- a/drivers/net/irda/ali-ircc.c
+++ b/drivers/net/irda/ali-ircc.c
@@ -146,7 +146,7 @@ static int __init ali_ircc_init(void)
{
ali_chip_t *chip;
chipio_t info;
- int ret = -ENODEV;
+ int ret;
int cfg, cfg_base;
int reg, revision;
int i = 0;
@@ -160,6 +160,7 @@ static int __init ali_ircc_init(void)
return ret;
}
+ ret = -ENODEV;
/* Probe for all the ALi chipsets we know about */
for (chip= chips; chip->name; chip++, i++)
diff --git a/drivers/net/irda/smsc-ircc2.c b/drivers/net/irda/smsc-ircc2.c
index a4674044bd6f..2eff45bedc7c 100644
--- a/drivers/net/irda/smsc-ircc2.c
+++ b/drivers/net/irda/smsc-ircc2.c
@@ -2353,7 +2353,7 @@ static int __init smsc_superio_lpc(unsigned short cfg_base)
#ifdef CONFIG_PCI
#define PCIID_VENDOR_INTEL 0x8086
#define PCIID_VENDOR_ALI 0x10b9
-static struct smsc_ircc_subsystem_configuration subsystem_configurations[] __devinitdata = {
+static struct smsc_ircc_subsystem_configuration subsystem_configurations[] __initdata = {
{
.vendor = PCIID_VENDOR_INTEL, /* Intel 82801DBM LPC bridge */
.device = 0x24cc,
diff --git a/drivers/net/ixgb/ixgb_main.c b/drivers/net/ixgb/ixgb_main.c
index b91e082483f6..7bbd447289b5 100644
--- a/drivers/net/ixgb/ixgb_main.c
+++ b/drivers/net/ixgb/ixgb_main.c
@@ -1173,7 +1173,7 @@ ixgb_tso(struct ixgb_adapter *adapter, struct sk_buff *skb)
uint16_t ipcse, tucse, mss;
int err;
- if(likely(skb_shinfo(skb)->gso_size)) {
+ if (likely(skb_is_gso(skb))) {
if (skb_header_cloned(skb)) {
err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
if (err)
@@ -1281,7 +1281,7 @@ ixgb_tx_map(struct ixgb_adapter *adapter, struct sk_buff *skb,
while(len) {
buffer_info = &tx_ring->buffer_info[i];
- size = min(len, IXGB_MAX_JUMBO_FRAME_SIZE);
+ size = min(len, IXGB_MAX_DATA_PER_TXD);
buffer_info->length = size;
buffer_info->dma =
pci_map_single(adapter->pdev,
@@ -1306,7 +1306,7 @@ ixgb_tx_map(struct ixgb_adapter *adapter, struct sk_buff *skb,
while(len) {
buffer_info = &tx_ring->buffer_info[i];
- size = min(len, IXGB_MAX_JUMBO_FRAME_SIZE);
+ size = min(len, IXGB_MAX_DATA_PER_TXD);
buffer_info->length = size;
buffer_info->dma =
pci_map_page(adapter->pdev,
diff --git a/drivers/net/loopback.c b/drivers/net/loopback.c
index 43fef7de8cb9..997cbce9af6e 100644
--- a/drivers/net/loopback.c
+++ b/drivers/net/loopback.c
@@ -139,7 +139,7 @@ static int loopback_xmit(struct sk_buff *skb, struct net_device *dev)
#endif
#ifdef LOOPBACK_TSO
- if (skb_shinfo(skb)->gso_size) {
+ if (skb_is_gso(skb)) {
BUG_ON(skb->protocol != htons(ETH_P_IP));
BUG_ON(skb->nh.iph->protocol != IPPROTO_TCP);
diff --git a/drivers/net/myri10ge/myri10ge.c b/drivers/net/myri10ge/myri10ge.c
index f4c8fd373b9b..c3e52c806b13 100644
--- a/drivers/net/myri10ge/myri10ge.c
+++ b/drivers/net/myri10ge/myri10ge.c
@@ -620,7 +620,7 @@ static int myri10ge_load_firmware(struct myri10ge_priv *mgp)
return -ENXIO;
}
dev_info(&mgp->pdev->dev, "handoff confirmed\n");
- myri10ge_dummy_rdma(mgp, mgp->tx.boundary != 4096);
+ myri10ge_dummy_rdma(mgp, 1);
return 0;
}
@@ -2116,7 +2116,7 @@ abort_linearize:
}
idx = (idx + 1) & tx->mask;
} while (idx != last_idx);
- if (skb_shinfo(skb)->gso_size) {
+ if (skb_is_gso(skb)) {
printk(KERN_ERR
"myri10ge: %s: TSO but wanted to linearize?!?!?\n",
mgp->dev->name);
@@ -2412,14 +2412,20 @@ static int myri10ge_resume(struct pci_dev *pdev)
return -EIO;
}
myri10ge_restore_state(mgp);
- pci_enable_device(pdev);
+
+ status = pci_enable_device(pdev);
+ if (status < 0) {
+ dev_err(&pdev->dev, "failed to enable device\n");
+ return -EIO;
+ }
+
pci_set_master(pdev);
status = request_irq(pdev->irq, myri10ge_intr, IRQF_SHARED,
netdev->name, mgp);
if (status != 0) {
dev_err(&pdev->dev, "failed to allocate IRQ\n");
- goto abort_with_msi;
+ goto abort_with_enabled;
}
myri10ge_reset(mgp);
@@ -2438,7 +2444,8 @@ static int myri10ge_resume(struct pci_dev *pdev)
return 0;
-abort_with_msi:
+abort_with_enabled:
+ pci_disable_device(pdev);
return -EIO;
}
diff --git a/drivers/net/s2io.c b/drivers/net/s2io.c
index c6b77acb35ef..e1fe3a0a7b0b 100644
--- a/drivers/net/s2io.c
+++ b/drivers/net/s2io.c
@@ -1976,7 +1976,6 @@ static int start_nic(struct s2io_nic *nic)
XENA_dev_config_t __iomem *bar0 = nic->bar0;
struct net_device *dev = nic->dev;
register u64 val64 = 0;
- u16 interruptible;
u16 subid, i;
mac_info_t *mac_control;
struct config_param *config;
@@ -2047,16 +2046,6 @@ static int start_nic(struct s2io_nic *nic)
return FAILURE;
}
- /* Enable select interrupts */
- if (nic->intr_type != INTA)
- en_dis_able_nic_intrs(nic, ENA_ALL_INTRS, DISABLE_INTRS);
- else {
- interruptible = TX_TRAFFIC_INTR | RX_TRAFFIC_INTR;
- interruptible |= TX_PIC_INTR | RX_PIC_INTR;
- interruptible |= TX_MAC_INTR | RX_MAC_INTR;
- en_dis_able_nic_intrs(nic, interruptible, ENABLE_INTRS);
- }
-
/*
* With some switches, link might be already up at this point.
* Because of this weird behavior, when we enable laser,
@@ -3749,101 +3738,19 @@ static int s2io_open(struct net_device *dev)
if (err) {
DBG_PRINT(ERR_DBG, "%s: H/W initialization failed\n",
dev->name);
- if (err == -ENODEV)
- goto hw_init_failed;
- else
- goto hw_enable_failed;
- }
-
- /* Store the values of the MSIX table in the nic_t structure */
- store_xmsi_data(sp);
-
- /* After proper initialization of H/W, register ISR */
- if (sp->intr_type == MSI) {
- err = request_irq((int) sp->pdev->irq, s2io_msi_handle,
- IRQF_SHARED, sp->name, dev);
- if (err) {
- DBG_PRINT(ERR_DBG, "%s: MSI registration \
-failed\n", dev->name);
- goto isr_registration_failed;
- }
- }
- if (sp->intr_type == MSI_X) {
- int i;
-
- for (i=1; (sp->s2io_entries[i].in_use == MSIX_FLG); i++) {
- if (sp->s2io_entries[i].type == MSIX_FIFO_TYPE) {
- sprintf(sp->desc1, "%s:MSI-X-%d-TX",
- dev->name, i);
- err = request_irq(sp->entries[i].vector,
- s2io_msix_fifo_handle, 0, sp->desc1,
- sp->s2io_entries[i].arg);
- DBG_PRINT(ERR_DBG, "%s @ 0x%llx\n", sp->desc1,
- (unsigned long long)sp->msix_info[i].addr);
- } else {
- sprintf(sp->desc2, "%s:MSI-X-%d-RX",
- dev->name, i);
- err = request_irq(sp->entries[i].vector,
- s2io_msix_ring_handle, 0, sp->desc2,
- sp->s2io_entries[i].arg);
- DBG_PRINT(ERR_DBG, "%s @ 0x%llx\n", sp->desc2,
- (unsigned long long)sp->msix_info[i].addr);
- }
- if (err) {
- DBG_PRINT(ERR_DBG, "%s: MSI-X-%d registration \
-failed\n", dev->name, i);
- DBG_PRINT(ERR_DBG, "Returned: %d\n", err);
- goto isr_registration_failed;
- }
- sp->s2io_entries[i].in_use = MSIX_REGISTERED_SUCCESS;
- }
- }
- if (sp->intr_type == INTA) {
- err = request_irq((int) sp->pdev->irq, s2io_isr, IRQF_SHARED,
- sp->name, dev);
- if (err) {
- DBG_PRINT(ERR_DBG, "%s: ISR registration failed\n",
- dev->name);
- goto isr_registration_failed;
- }
+ goto hw_init_failed;
}
if (s2io_set_mac_addr(dev, dev->dev_addr) == FAILURE) {
DBG_PRINT(ERR_DBG, "Set Mac Address Failed\n");
+ s2io_card_down(sp);
err = -ENODEV;
- goto setting_mac_address_failed;
+ goto hw_init_failed;
}
netif_start_queue(dev);
return 0;
-setting_mac_address_failed:
- if (sp->intr_type != MSI_X)
- free_irq(sp->pdev->irq, dev);
-isr_registration_failed:
- del_timer_sync(&sp->alarm_timer);
- if (sp->intr_type == MSI_X) {
- int i;
- u16 msi_control; /* Temp variable */
-
- for (i=1; (sp->s2io_entries[i].in_use ==
- MSIX_REGISTERED_SUCCESS); i++) {
- int vector = sp->entries[i].vector;
- void *arg = sp->s2io_entries[i].arg;
-
- free_irq(vector, arg);
- }
- pci_disable_msix(sp->pdev);
-
- /* Temp */
- pci_read_config_word(sp->pdev, 0x42, &msi_control);
- msi_control &= 0xFFFE; /* Disable MSI */
- pci_write_config_word(sp->pdev, 0x42, msi_control);
- }
- else if (sp->intr_type == MSI)
- pci_disable_msi(sp->pdev);
-hw_enable_failed:
- s2io_reset(sp);
hw_init_failed:
if (sp->intr_type == MSI_X) {
if (sp->entries)
@@ -3874,7 +3781,7 @@ static int s2io_close(struct net_device *dev)
flush_scheduled_work();
netif_stop_queue(dev);
/* Reset card, kill tasklet and free Tx and Rx buffers. */
- s2io_card_down(sp, 1);
+ s2io_card_down(sp);
sp->device_close_flag = TRUE; /* Device is shut down. */
return 0;
@@ -5919,7 +5826,7 @@ static int s2io_change_mtu(struct net_device *dev, int new_mtu)
dev->mtu = new_mtu;
if (netif_running(dev)) {
- s2io_card_down(sp, 0);
+ s2io_card_down(sp);
netif_stop_queue(dev);
if (s2io_card_up(sp)) {
DBG_PRINT(ERR_DBG, "%s: Device bring up failed\n",
@@ -6216,43 +6123,106 @@ static int rxd_owner_bit_reset(nic_t *sp)
}
-static void s2io_card_down(nic_t * sp, int flag)
+static int s2io_add_isr(nic_t * sp)
{
- int cnt = 0;
- XENA_dev_config_t __iomem *bar0 = sp->bar0;
- unsigned long flags;
- register u64 val64 = 0;
+ int ret = 0;
struct net_device *dev = sp->dev;
+ int err = 0;
- del_timer_sync(&sp->alarm_timer);
- /* If s2io_set_link task is executing, wait till it completes. */
- while (test_and_set_bit(0, &(sp->link_state))) {
- msleep(50);
+ if (sp->intr_type == MSI)
+ ret = s2io_enable_msi(sp);
+ else if (sp->intr_type == MSI_X)
+ ret = s2io_enable_msi_x(sp);
+ if (ret) {
+ DBG_PRINT(ERR_DBG, "%s: Defaulting to INTA\n", dev->name);
+ sp->intr_type = INTA;
}
- atomic_set(&sp->card_state, CARD_DOWN);
- /* disable Tx and Rx traffic on the NIC */
- stop_nic(sp);
- if (flag) {
- if (sp->intr_type == MSI_X) {
- int i;
- u16 msi_control;
+ /* Store the values of the MSIX table in the nic_t structure */
+ store_xmsi_data(sp);
- for (i=1; (sp->s2io_entries[i].in_use ==
- MSIX_REGISTERED_SUCCESS); i++) {
- int vector = sp->entries[i].vector;
- void *arg = sp->s2io_entries[i].arg;
+ /* After proper initialization of H/W, register ISR */
+ if (sp->intr_type == MSI) {
+ err = request_irq((int) sp->pdev->irq, s2io_msi_handle,
+ IRQF_SHARED, sp->name, dev);
+ if (err) {
+ pci_disable_msi(sp->pdev);
+ DBG_PRINT(ERR_DBG, "%s: MSI registration failed\n",
+ dev->name);
+ return -1;
+ }
+ }
+ if (sp->intr_type == MSI_X) {
+ int i;
- free_irq(vector, arg);
+ for (i=1; (sp->s2io_entries[i].in_use == MSIX_FLG); i++) {
+ if (sp->s2io_entries[i].type == MSIX_FIFO_TYPE) {
+ sprintf(sp->desc[i], "%s:MSI-X-%d-TX",
+ dev->name, i);
+ err = request_irq(sp->entries[i].vector,
+ s2io_msix_fifo_handle, 0, sp->desc[i],
+ sp->s2io_entries[i].arg);
+ DBG_PRINT(ERR_DBG, "%s @ 0x%llx\n", sp->desc[i],
+ (unsigned long long)sp->msix_info[i].addr);
+ } else {
+ sprintf(sp->desc[i], "%s:MSI-X-%d-RX",
+ dev->name, i);
+ err = request_irq(sp->entries[i].vector,
+ s2io_msix_ring_handle, 0, sp->desc[i],
+ sp->s2io_entries[i].arg);
+ DBG_PRINT(ERR_DBG, "%s @ 0x%llx\n", sp->desc[i],
+ (unsigned long long)sp->msix_info[i].addr);
}
- pci_read_config_word(sp->pdev, 0x42, &msi_control);
- msi_control &= 0xFFFE; /* Disable MSI */
- pci_write_config_word(sp->pdev, 0x42, msi_control);
- pci_disable_msix(sp->pdev);
- } else {
- free_irq(sp->pdev->irq, dev);
- if (sp->intr_type == MSI)
- pci_disable_msi(sp->pdev);
+ if (err) {
+ DBG_PRINT(ERR_DBG,"%s:MSI-X-%d registration "
+ "failed\n", dev->name, i);
+ DBG_PRINT(ERR_DBG, "Returned: %d\n", err);
+ return -1;
+ }
+ sp->s2io_entries[i].in_use = MSIX_REGISTERED_SUCCESS;
+ }
+ }
+ if (sp->intr_type == INTA) {
+ err = request_irq((int) sp->pdev->irq, s2io_isr, IRQF_SHARED,
+ sp->name, dev);
+ if (err) {
+ DBG_PRINT(ERR_DBG, "%s: ISR registration failed\n",
+ dev->name);
+ return -1;
+ }
+ }
+ return 0;
+}
+static void s2io_rem_isr(nic_t * sp)
+{
+ int cnt = 0;
+ struct net_device *dev = sp->dev;
+
+ if (sp->intr_type == MSI_X) {
+ int i;
+ u16 msi_control;
+
+ for (i=1; (sp->s2io_entries[i].in_use ==
+ MSIX_REGISTERED_SUCCESS); i++) {
+ int vector = sp->entries[i].vector;
+ void *arg = sp->s2io_entries[i].arg;
+
+ free_irq(vector, arg);
+ }
+ pci_read_config_word(sp->pdev, 0x42, &msi_control);
+ msi_control &= 0xFFFE; /* Disable MSI */
+ pci_write_config_word(sp->pdev, 0x42, msi_control);
+
+ pci_disable_msix(sp->pdev);
+ } else {
+ free_irq(sp->pdev->irq, dev);
+ if (sp->intr_type == MSI) {
+ u16 val;
+
+ pci_disable_msi(sp->pdev);
+ pci_read_config_word(sp->pdev, 0x4c, &val);
+ val ^= 0x1;
+ pci_write_config_word(sp->pdev, 0x4c, val);
}
}
/* Waiting till all Interrupt handlers are complete */
@@ -6263,6 +6233,26 @@ static void s2io_card_down(nic_t * sp, int flag)
break;
cnt++;
} while(cnt < 5);
+}
+
+static void s2io_card_down(nic_t * sp)
+{
+ int cnt = 0;
+ XENA_dev_config_t __iomem *bar0 = sp->bar0;
+ unsigned long flags;
+ register u64 val64 = 0;
+
+ del_timer_sync(&sp->alarm_timer);
+ /* If s2io_set_link task is executing, wait till it completes. */
+ while (test_and_set_bit(0, &(sp->link_state))) {
+ msleep(50);
+ }
+ atomic_set(&sp->card_state, CARD_DOWN);
+
+ /* disable Tx and Rx traffic on the NIC */
+ stop_nic(sp);
+
+ s2io_rem_isr(sp);
/* Kill tasklet. */
tasklet_kill(&sp->task);
@@ -6314,23 +6304,16 @@ static int s2io_card_up(nic_t * sp)
mac_info_t *mac_control;
struct config_param *config;
struct net_device *dev = (struct net_device *) sp->dev;
+ u16 interruptible;
/* Initialize the H/W I/O registers */
if (init_nic(sp) != 0) {
DBG_PRINT(ERR_DBG, "%s: H/W initialization failed\n",
dev->name);
+ s2io_reset(sp);
return -ENODEV;
}
- if (sp->intr_type == MSI)
- ret = s2io_enable_msi(sp);
- else if (sp->intr_type == MSI_X)
- ret = s2io_enable_msi_x(sp);
- if (ret) {
- DBG_PRINT(ERR_DBG, "%s: Defaulting to INTA\n", dev->name);
- sp->intr_type = INTA;
- }
-
/*
* Initializing the Rx buffers. For now we are considering only 1
* Rx ring and initializing buffers into 30 Rx blocks
@@ -6361,21 +6344,39 @@ static int s2io_card_up(nic_t * sp)
sp->lro_max_aggr_per_sess = lro_max_pkts;
}
- /* Enable tasklet for the device */
- tasklet_init(&sp->task, s2io_tasklet, (unsigned long) dev);
-
/* Enable Rx Traffic and interrupts on the NIC */
if (start_nic(sp)) {
DBG_PRINT(ERR_DBG, "%s: Starting NIC failed\n", dev->name);
- tasklet_kill(&sp->task);
s2io_reset(sp);
- free_irq(dev->irq, dev);
+ free_rx_buffers(sp);
+ return -ENODEV;
+ }
+
+ /* Add interrupt service routine */
+ if (s2io_add_isr(sp) != 0) {
+ if (sp->intr_type == MSI_X)
+ s2io_rem_isr(sp);
+ s2io_reset(sp);
free_rx_buffers(sp);
return -ENODEV;
}
S2IO_TIMER_CONF(sp->alarm_timer, s2io_alarm_handle, sp, (HZ/2));
+ /* Enable tasklet for the device */
+ tasklet_init(&sp->task, s2io_tasklet, (unsigned long) dev);
+
+ /* Enable select interrupts */
+ if (sp->intr_type != INTA)
+ en_dis_able_nic_intrs(sp, ENA_ALL_INTRS, DISABLE_INTRS);
+ else {
+ interruptible = TX_TRAFFIC_INTR | RX_TRAFFIC_INTR;
+ interruptible |= TX_PIC_INTR | RX_PIC_INTR;
+ interruptible |= TX_MAC_INTR | RX_MAC_INTR;
+ en_dis_able_nic_intrs(sp, interruptible, ENABLE_INTRS);
+ }
+
+
atomic_set(&sp->card_state, CARD_UP);
return 0;
}
@@ -6395,7 +6396,7 @@ static void s2io_restart_nic(unsigned long data)
struct net_device *dev = (struct net_device *) data;
nic_t *sp = dev->priv;
- s2io_card_down(sp, 0);
+ s2io_card_down(sp);
if (s2io_card_up(sp)) {
DBG_PRINT(ERR_DBG, "%s: Device bring up failed\n",
dev->name);
diff --git a/drivers/net/s2io.h b/drivers/net/s2io.h
index c43f52179708..217097bc22f1 100644
--- a/drivers/net/s2io.h
+++ b/drivers/net/s2io.h
@@ -829,8 +829,7 @@ struct s2io_nic {
#define MSIX_FLG 0xA5
struct msix_entry *entries;
struct s2io_msix_entry *s2io_entries;
- char desc1[35];
- char desc2[35];
+ char desc[MAX_REQUESTED_MSI_X][25];
int avail_msix_vectors; /* No. of MSI-X vectors granted by system */
@@ -1002,7 +1001,7 @@ static int verify_xena_quiescence(nic_t *sp, u64 val64, int flag);
static struct ethtool_ops netdev_ethtool_ops;
static void s2io_set_link(unsigned long data);
static int s2io_set_swapper(nic_t * sp);
-static void s2io_card_down(nic_t *nic, int flag);
+static void s2io_card_down(nic_t *nic);
static int s2io_card_up(nic_t *nic);
static int get_xena_rev_id(struct pci_dev *pdev);
static void restore_xmsi_data(nic_t *nic);
diff --git a/drivers/net/sk98lin/h/xmac_ii.h b/drivers/net/sk98lin/h/xmac_ii.h
index 2b19f8ad0318..7f8e6d0084c7 100644
--- a/drivers/net/sk98lin/h/xmac_ii.h
+++ b/drivers/net/sk98lin/h/xmac_ii.h
@@ -1473,7 +1473,7 @@ extern "C" {
#define GM_TXCR_FORCE_JAM (1<<15) /* Bit 15: Force Jam / Flow-Control */
#define GM_TXCR_CRC_DIS (1<<14) /* Bit 14: Disable insertion of CRC */
#define GM_TXCR_PAD_DIS (1<<13) /* Bit 13: Disable padding of packets */
-#define GM_TXCR_COL_THR_MSK (1<<10) /* Bit 12..10: Collision Threshold */
+#define GM_TXCR_COL_THR_MSK (7<<10) /* Bit 12..10: Collision Threshold */
#define TX_COL_THR(x) (SHIFT10(x) & GM_TXCR_COL_THR_MSK)
diff --git a/drivers/net/skge.c b/drivers/net/skge.c
index 82200bfaa8ed..7de9a07b2ac2 100644
--- a/drivers/net/skge.c
+++ b/drivers/net/skge.c
@@ -516,10 +516,7 @@ static int skge_set_pauseparam(struct net_device *dev,
/* Chip internal frequency for clock calculations */
static inline u32 hwkhz(const struct skge_hw *hw)
{
- if (hw->chip_id == CHIP_ID_GENESIS)
- return 53215; /* or: 53.125 MHz */
- else
- return 78215; /* or: 78.125 MHz */
+ return (hw->chip_id == CHIP_ID_GENESIS) ? 53125 : 78125;
}
/* Chip HZ to microseconds */
diff --git a/drivers/net/skge.h b/drivers/net/skge.h
index ed19ff47ce11..593387b3c0dd 100644
--- a/drivers/net/skge.h
+++ b/drivers/net/skge.h
@@ -1734,11 +1734,11 @@ enum {
GM_TXCR_FORCE_JAM = 1<<15, /* Bit 15: Force Jam / Flow-Control */
GM_TXCR_CRC_DIS = 1<<14, /* Bit 14: Disable insertion of CRC */
GM_TXCR_PAD_DIS = 1<<13, /* Bit 13: Disable padding of packets */
- GM_TXCR_COL_THR_MSK = 1<<10, /* Bit 12..10: Collision Threshold */
+ GM_TXCR_COL_THR_MSK = 7<<10, /* Bit 12..10: Collision Threshold */
};
#define TX_COL_THR(x) (((x)<<10) & GM_TXCR_COL_THR_MSK)
-#define TX_COL_DEF 0x04
+#define TX_COL_DEF 0x04 /* late collision after 64 byte */
/* GM_RX_CTRL 16 bit r/w Receive Control Register */
enum {
diff --git a/drivers/net/sky2.c b/drivers/net/sky2.c
index 418f169a6a31..de91609ca112 100644
--- a/drivers/net/sky2.c
+++ b/drivers/net/sky2.c
@@ -50,7 +50,7 @@
#include "sky2.h"
#define DRV_NAME "sky2"
-#define DRV_VERSION "1.4"
+#define DRV_VERSION "1.5"
#define PFX DRV_NAME " "
/*
@@ -65,6 +65,7 @@
#define RX_MAX_PENDING (RX_LE_SIZE/2 - 2)
#define RX_DEF_PENDING RX_MAX_PENDING
#define RX_SKB_ALIGN 8
+#define RX_BUF_WRITE 16
#define TX_RING_SIZE 512
#define TX_DEF_PENDING (TX_RING_SIZE - 1)
@@ -234,7 +235,6 @@ static void sky2_set_power_state(struct sky2_hw *hw, pci_power_t state)
}
if (hw->chip_id == CHIP_ID_YUKON_EC_U) {
- sky2_write16(hw, B0_CTST, Y2_HW_WOL_ON);
sky2_pci_write32(hw, PCI_DEV_REG3, 0);
reg1 = sky2_pci_read32(hw, PCI_DEV_REG4);
reg1 &= P_ASPM_CONTROL_MSK;
@@ -243,6 +243,7 @@ static void sky2_set_power_state(struct sky2_hw *hw, pci_power_t state)
}
sky2_pci_write32(hw, PCI_DEV_REG1, reg1);
+ udelay(100);
break;
@@ -255,6 +256,7 @@ static void sky2_set_power_state(struct sky2_hw *hw, pci_power_t state)
else
reg1 |= (PCI_Y2_PHY1_POWD | PCI_Y2_PHY2_POWD);
sky2_pci_write32(hw, PCI_DEV_REG1, reg1);
+ udelay(100);
if (hw->chip_id == CHIP_ID_YUKON_XL && hw->chip_rev > 1)
sky2_write8(hw, B2_Y2_CLK_GATE, 0);
@@ -1159,7 +1161,7 @@ static unsigned tx_le_req(const struct sk_buff *skb)
count = sizeof(dma_addr_t) / sizeof(u32);
count += skb_shinfo(skb)->nr_frags * count;
- if (skb_shinfo(skb)->gso_size)
+ if (skb_is_gso(skb))
++count;
if (skb->ip_summed == CHECKSUM_HW)
@@ -1389,7 +1391,7 @@ static void sky2_tx_complete(struct sky2_port *sky2, u16 done)
}
sky2->tx_cons = put;
- if (tx_avail(sky2) > MAX_SKB_TX_LE)
+ if (tx_avail(sky2) > MAX_SKB_TX_LE + 4)
netif_wake_queue(dev);
}
@@ -1888,9 +1890,6 @@ resubmit:
re->skb->ip_summed = CHECKSUM_NONE;
sky2_rx_add(sky2, re->mapaddr);
- /* Tell receiver about new buffers. */
- sky2_put_idx(sky2->hw, rxqaddr[sky2->port], sky2->rx_put);
-
return skb;
oversize:
@@ -1937,7 +1936,9 @@ static inline int sky2_more_work(const struct sky2_hw *hw)
/* Process status response ring */
static int sky2_status_intr(struct sky2_hw *hw, int to_do)
{
+ struct sky2_port *sky2;
int work_done = 0;
+ unsigned buf_write[2] = { 0, 0 };
u16 hwidx = sky2_read16(hw, STAT_PUT_IDX);
rmb();
@@ -1945,7 +1946,6 @@ static int sky2_status_intr(struct sky2_hw *hw, int to_do)
while (hw->st_idx != hwidx) {
struct sky2_status_le *le = hw->st_le + hw->st_idx;
struct net_device *dev;
- struct sky2_port *sky2;
struct sk_buff *skb;
u32 status;
u16 length;
@@ -1978,6 +1978,14 @@ static int sky2_status_intr(struct sky2_hw *hw, int to_do)
#endif
netif_receive_skb(skb);
+ /* Update receiver after 16 frames */
+ if (++buf_write[le->link] == RX_BUF_WRITE) {
+ sky2_put_idx(hw, rxqaddr[le->link],
+ sky2->rx_put);
+ buf_write[le->link] = 0;
+ }
+
+ /* Stop after net poll weight */
if (++work_done >= to_do)
goto exit_loop;
break;
@@ -2016,6 +2024,16 @@ static int sky2_status_intr(struct sky2_hw *hw, int to_do)
}
exit_loop:
+ if (buf_write[0]) {
+ sky2 = netdev_priv(hw->dev[0]);
+ sky2_put_idx(hw, Q_R1, sky2->rx_put);
+ }
+
+ if (buf_write[1]) {
+ sky2 = netdev_priv(hw->dev[1]);
+ sky2_put_idx(hw, Q_R2, sky2->rx_put);
+ }
+
return work_done;
}
@@ -2186,9 +2204,6 @@ static int sky2_poll(struct net_device *dev0, int *budget)
int work_done = 0;
u32 status = sky2_read32(hw, B0_Y2_SP_EISR);
- if (!~status)
- goto out;
-
if (status & Y2_IS_HW_ERR)
sky2_hw_intr(hw);
@@ -2225,7 +2240,7 @@ static int sky2_poll(struct net_device *dev0, int *budget)
if (sky2_more_work(hw))
return 1;
-out:
+
netif_rx_complete(dev0);
sky2_read32(hw, B0_Y2_SP_LISR);
@@ -2286,7 +2301,7 @@ static inline u32 sky2_clk2us(const struct sky2_hw *hw, u32 clk)
}
-static int __devinit sky2_reset(struct sky2_hw *hw)
+static int sky2_reset(struct sky2_hw *hw)
{
u16 status;
u8 t8, pmd_type;
@@ -3437,17 +3452,14 @@ static int sky2_suspend(struct pci_dev *pdev, pm_message_t state)
return -EINVAL;
del_timer_sync(&hw->idle_timer);
+ netif_poll_disable(hw->dev[0]);
for (i = 0; i < hw->ports; i++) {
struct net_device *dev = hw->dev[i];
- if (dev) {
- if (!netif_running(dev))
- continue;
-
+ if (netif_running(dev)) {
sky2_down(dev);
netif_device_detach(dev);
- netif_poll_disable(dev);
}
}
@@ -3474,9 +3486,8 @@ static int sky2_resume(struct pci_dev *pdev)
for (i = 0; i < hw->ports; i++) {
struct net_device *dev = hw->dev[i];
- if (dev && netif_running(dev)) {
+ if (netif_running(dev)) {
netif_device_attach(dev);
- netif_poll_enable(dev);
err = sky2_up(dev);
if (err) {
@@ -3488,6 +3499,7 @@ static int sky2_resume(struct pci_dev *pdev)
}
}
+ netif_poll_enable(hw->dev[0]);
sky2_idle_start(hw);
out:
return err;
diff --git a/drivers/net/sky2.h b/drivers/net/sky2.h
index 8a0bc5525f0a..2db8d19b22d1 100644
--- a/drivers/net/sky2.h
+++ b/drivers/net/sky2.h
@@ -1480,7 +1480,7 @@ enum {
GM_TXCR_FORCE_JAM = 1<<15, /* Bit 15: Force Jam / Flow-Control */
GM_TXCR_CRC_DIS = 1<<14, /* Bit 14: Disable insertion of CRC */
GM_TXCR_PAD_DIS = 1<<13, /* Bit 13: Disable padding of packets */
- GM_TXCR_COL_THR_MSK = 1<<10, /* Bit 12..10: Collision Threshold */
+ GM_TXCR_COL_THR_MSK = 7<<10, /* Bit 12..10: Collision Threshold */
};
#define TX_COL_THR(x) (((x)<<10) & GM_TXCR_COL_THR_MSK)
diff --git a/drivers/net/smc91x.h b/drivers/net/smc91x.h
index b4028049ed76..4ec4b4d23ae5 100644
--- a/drivers/net/smc91x.h
+++ b/drivers/net/smc91x.h
@@ -354,6 +354,24 @@ static inline void LPD7_SMC_outsw (unsigned char* a, int r,
#define SMC_IRQ_FLAGS (0)
+#elif defined(CONFIG_ARCH_VERSATILE)
+
+#define SMC_CAN_USE_8BIT 1
+#define SMC_CAN_USE_16BIT 1
+#define SMC_CAN_USE_32BIT 1
+#define SMC_NOWAIT 1
+
+#define SMC_inb(a, r) readb((a) + (r))
+#define SMC_inw(a, r) readw((a) + (r))
+#define SMC_inl(a, r) readl((a) + (r))
+#define SMC_outb(v, a, r) writeb(v, (a) + (r))
+#define SMC_outw(v, a, r) writew(v, (a) + (r))
+#define SMC_outl(v, a, r) writel(v, (a) + (r))
+#define SMC_insl(a, r, p, l) readsl((a) + (r), p, l)
+#define SMC_outsl(a, r, p, l) writesl((a) + (r), p, l)
+
+#define SMC_IRQ_FLAGS (0)
+
#else
#define SMC_CAN_USE_8BIT 1
diff --git a/drivers/net/spider_net.c b/drivers/net/spider_net.c
index b30290d53f79..ec1a8e2d458e 100644
--- a/drivers/net/spider_net.c
+++ b/drivers/net/spider_net.c
@@ -84,7 +84,7 @@ MODULE_DEVICE_TABLE(pci, spider_net_pci_tbl);
*
* returns the content of the specified SMMIO register.
*/
-static u32
+static inline u32
spider_net_read_reg(struct spider_net_card *card, u32 reg)
{
u32 value;
@@ -101,7 +101,7 @@ spider_net_read_reg(struct spider_net_card *card, u32 reg)
* @reg: register to write to
* @value: value to write into the specified SMMIO register
*/
-static void
+static inline void
spider_net_write_reg(struct spider_net_card *card, u32 reg, u32 value)
{
value = cpu_to_le32(value);
@@ -259,39 +259,10 @@ spider_net_get_mac_address(struct net_device *netdev)
*
* returns the status as in the dmac_cmd_status field of the descriptor
*/
-static enum spider_net_descr_status
+static inline int
spider_net_get_descr_status(struct spider_net_descr *descr)
{
- u32 cmd_status;
-
- cmd_status = descr->dmac_cmd_status;
- cmd_status >>= SPIDER_NET_DESCR_IND_PROC_SHIFT;
- /* no need to mask out any bits, as cmd_status is 32 bits wide only
- * (and unsigned) */
- return cmd_status;
-}
-
-/**
- * spider_net_set_descr_status -- sets the status of a descriptor
- * @descr: descriptor to change
- * @status: status to set in the descriptor
- *
- * changes the status to the specified value. Doesn't change other bits
- * in the status
- */
-static void
-spider_net_set_descr_status(struct spider_net_descr *descr,
- enum spider_net_descr_status status)
-{
- u32 cmd_status;
- /* read the status */
- cmd_status = descr->dmac_cmd_status;
- /* clean the upper 4 bits */
- cmd_status &= SPIDER_NET_DESCR_IND_PROC_MASKO;
- /* add the status to it */
- cmd_status |= ((u32)status)<<SPIDER_NET_DESCR_IND_PROC_SHIFT;
- /* and write it back */
- descr->dmac_cmd_status = cmd_status;
+ return descr->dmac_cmd_status & SPIDER_NET_DESCR_IND_PROC_MASK;
}
/**
@@ -328,24 +299,23 @@ spider_net_free_chain(struct spider_net_card *card,
static int
spider_net_init_chain(struct spider_net_card *card,
struct spider_net_descr_chain *chain,
- struct spider_net_descr *start_descr, int no)
+ struct spider_net_descr *start_descr,
+ int direction, int no)
{
int i;
struct spider_net_descr *descr;
dma_addr_t buf;
- atomic_set(&card->rx_chain_refill,0);
-
descr = start_descr;
memset(descr, 0, sizeof(*descr) * no);
/* set up the hardware pointers in each descriptor */
for (i=0; i<no; i++, descr++) {
- spider_net_set_descr_status(descr, SPIDER_NET_DESCR_NOT_IN_USE);
+ descr->dmac_cmd_status = SPIDER_NET_DESCR_NOT_IN_USE;
buf = pci_map_single(card->pdev, descr,
SPIDER_NET_DESCR_SIZE,
- PCI_DMA_BIDIRECTIONAL);
+ direction);
if (buf == DMA_ERROR_CODE)
goto iommu_error;
@@ -360,10 +330,11 @@ spider_net_init_chain(struct spider_net_card *card,
start_descr->prev = descr-1;
descr = start_descr;
- for (i=0; i < no; i++, descr++) {
- descr->next_descr_addr = descr->next->bus_addr;
- }
+ if (direction == PCI_DMA_FROMDEVICE)
+ for (i=0; i < no; i++, descr++)
+ descr->next_descr_addr = descr->next->bus_addr;
+ spin_lock_init(&chain->lock);
chain->head = start_descr;
chain->tail = start_descr;
@@ -375,7 +346,7 @@ iommu_error:
if (descr->bus_addr)
pci_unmap_single(card->pdev, descr->bus_addr,
SPIDER_NET_DESCR_SIZE,
- PCI_DMA_BIDIRECTIONAL);
+ direction);
return -ENOMEM;
}
@@ -396,7 +367,7 @@ spider_net_free_rx_chain_contents(struct spider_net_card *card)
dev_kfree_skb(descr->skb);
pci_unmap_single(card->pdev, descr->buf_addr,
SPIDER_NET_MAX_FRAME,
- PCI_DMA_BIDIRECTIONAL);
+ PCI_DMA_FROMDEVICE);
}
descr = descr->next;
}
@@ -446,15 +417,16 @@ spider_net_prepare_rx_descr(struct spider_net_card *card,
skb_reserve(descr->skb, SPIDER_NET_RXBUF_ALIGN - offset);
/* io-mmu-map the skb */
buf = pci_map_single(card->pdev, descr->skb->data,
- SPIDER_NET_MAX_FRAME, PCI_DMA_BIDIRECTIONAL);
+ SPIDER_NET_MAX_FRAME, PCI_DMA_FROMDEVICE);
descr->buf_addr = buf;
if (buf == DMA_ERROR_CODE) {
dev_kfree_skb_any(descr->skb);
if (netif_msg_rx_err(card) && net_ratelimit())
pr_err("Could not iommu-map rx buffer\n");
- spider_net_set_descr_status(descr, SPIDER_NET_DESCR_NOT_IN_USE);
+ descr->dmac_cmd_status = SPIDER_NET_DESCR_NOT_IN_USE;
} else {
- descr->dmac_cmd_status = SPIDER_NET_DMAC_RX_CARDOWNED;
+ descr->dmac_cmd_status = SPIDER_NET_DESCR_CARDOWNED |
+ SPIDER_NET_DMAC_NOINTR_COMPLETE;
}
return error;
@@ -468,7 +440,7 @@ spider_net_prepare_rx_descr(struct spider_net_card *card,
* chip by writing to the appropriate register. DMA is enabled in
* spider_net_enable_rxdmac.
*/
-static void
+static inline void
spider_net_enable_rxchtails(struct spider_net_card *card)
{
/* assume chain is aligned correctly */
@@ -483,7 +455,7 @@ spider_net_enable_rxchtails(struct spider_net_card *card)
* spider_net_enable_rxdmac enables the DMA controller by setting RX_DMA_EN
* in the GDADMACCNTR register
*/
-static void
+static inline void
spider_net_enable_rxdmac(struct spider_net_card *card)
{
wmb();
@@ -500,23 +472,24 @@ spider_net_enable_rxdmac(struct spider_net_card *card)
static void
spider_net_refill_rx_chain(struct spider_net_card *card)
{
- struct spider_net_descr_chain *chain;
-
- chain = &card->rx_chain;
+ struct spider_net_descr_chain *chain = &card->rx_chain;
+ unsigned long flags;
/* one context doing the refill (and a second context seeing that
* and omitting it) is ok. If called by NAPI, we'll be called again
* as spider_net_decode_one_descr is called several times. If some
* interrupt calls us, the NAPI is about to clean up anyway. */
- if (atomic_inc_return(&card->rx_chain_refill) == 1)
- while (spider_net_get_descr_status(chain->head) ==
- SPIDER_NET_DESCR_NOT_IN_USE) {
- if (spider_net_prepare_rx_descr(card, chain->head))
- break;
- chain->head = chain->head->next;
- }
+ if (!spin_trylock_irqsave(&chain->lock, flags))
+ return;
+
+ while (spider_net_get_descr_status(chain->head) ==
+ SPIDER_NET_DESCR_NOT_IN_USE) {
+ if (spider_net_prepare_rx_descr(card, chain->head))
+ break;
+ chain->head = chain->head->next;
+ }
- atomic_dec(&card->rx_chain_refill);
+ spin_unlock_irqrestore(&chain->lock, flags);
}
/**
@@ -554,111 +527,6 @@ error:
}
/**
- * spider_net_release_tx_descr - processes a used tx descriptor
- * @card: card structure
- * @descr: descriptor to release
- *
- * releases a used tx descriptor (unmapping, freeing of skb)
- */
-static void
-spider_net_release_tx_descr(struct spider_net_card *card,
- struct spider_net_descr *descr)
-{
- struct sk_buff *skb;
-
- /* unmap the skb */
- skb = descr->skb;
- pci_unmap_single(card->pdev, descr->buf_addr, skb->len,
- PCI_DMA_BIDIRECTIONAL);
-
- dev_kfree_skb_any(skb);
-
- /* set status to not used */
- spider_net_set_descr_status(descr, SPIDER_NET_DESCR_NOT_IN_USE);
-}
-
-/**
- * spider_net_release_tx_chain - processes sent tx descriptors
- * @card: adapter structure
- * @brutal: if set, don't care about whether descriptor seems to be in use
- *
- * returns 0 if the tx ring is empty, otherwise 1.
- *
- * spider_net_release_tx_chain releases the tx descriptors that spider has
- * finished with (if non-brutal) or simply release tx descriptors (if brutal).
- * If some other context is calling this function, we return 1 so that we're
- * scheduled again (if we were scheduled) and will not loose initiative.
- */
-static int
-spider_net_release_tx_chain(struct spider_net_card *card, int brutal)
-{
- struct spider_net_descr_chain *tx_chain = &card->tx_chain;
- enum spider_net_descr_status status;
-
- if (atomic_inc_return(&card->tx_chain_release) != 1) {
- atomic_dec(&card->tx_chain_release);
- return 1;
- }
-
- for (;;) {
- status = spider_net_get_descr_status(tx_chain->tail);
- switch (status) {
- case SPIDER_NET_DESCR_CARDOWNED:
- if (!brutal)
- goto out;
- /* fallthrough, if we release the descriptors
- * brutally (then we don't care about
- * SPIDER_NET_DESCR_CARDOWNED) */
- case SPIDER_NET_DESCR_RESPONSE_ERROR:
- case SPIDER_NET_DESCR_PROTECTION_ERROR:
- case SPIDER_NET_DESCR_FORCE_END:
- if (netif_msg_tx_err(card))
- pr_err("%s: forcing end of tx descriptor "
- "with status x%02x\n",
- card->netdev->name, status);
- card->netdev_stats.tx_dropped++;
- break;
-
- case SPIDER_NET_DESCR_COMPLETE:
- card->netdev_stats.tx_packets++;
- card->netdev_stats.tx_bytes +=
- tx_chain->tail->skb->len;
- break;
-
- default: /* any other value (== SPIDER_NET_DESCR_NOT_IN_USE) */
- goto out;
- }
- spider_net_release_tx_descr(card, tx_chain->tail);
- tx_chain->tail = tx_chain->tail->next;
- }
-out:
- atomic_dec(&card->tx_chain_release);
-
- netif_wake_queue(card->netdev);
-
- if (status == SPIDER_NET_DESCR_CARDOWNED)
- return 1;
- return 0;
-}
-
-/**
- * spider_net_cleanup_tx_ring - cleans up the TX ring
- * @card: card structure
- *
- * spider_net_cleanup_tx_ring is called by the tx_timer (as we don't use
- * interrupts to cleanup our TX ring) and returns sent packets to the stack
- * by freeing them
- */
-static void
-spider_net_cleanup_tx_ring(struct spider_net_card *card)
-{
- if ( (spider_net_release_tx_chain(card, 0)) &&
- (card->netdev->flags & IFF_UP) ) {
- mod_timer(&card->tx_timer, jiffies + SPIDER_NET_TX_TIMER);
- }
-}
-
-/**
* spider_net_get_multicast_hash - generates hash for multicast filter table
* @addr: multicast address
*
@@ -761,97 +629,6 @@ spider_net_disable_rxdmac(struct spider_net_card *card)
}
/**
- * spider_net_stop - called upon ifconfig down
- * @netdev: interface device structure
- *
- * always returns 0
- */
-int
-spider_net_stop(struct net_device *netdev)
-{
- struct spider_net_card *card = netdev_priv(netdev);
-
- tasklet_kill(&card->rxram_full_tl);
- netif_poll_disable(netdev);
- netif_carrier_off(netdev);
- netif_stop_queue(netdev);
- del_timer_sync(&card->tx_timer);
-
- /* disable/mask all interrupts */
- spider_net_write_reg(card, SPIDER_NET_GHIINT0MSK, 0);
- spider_net_write_reg(card, SPIDER_NET_GHIINT1MSK, 0);
- spider_net_write_reg(card, SPIDER_NET_GHIINT2MSK, 0);
-
- /* free_irq(netdev->irq, netdev);*/
- free_irq(to_pci_dev(netdev->class_dev.dev)->irq, netdev);
-
- spider_net_write_reg(card, SPIDER_NET_GDTDMACCNTR,
- SPIDER_NET_DMA_TX_FEND_VALUE);
-
- /* turn off DMA, force end */
- spider_net_disable_rxdmac(card);
-
- /* release chains */
- spider_net_release_tx_chain(card, 1);
-
- spider_net_free_chain(card, &card->tx_chain);
- spider_net_free_chain(card, &card->rx_chain);
-
- return 0;
-}
-
-/**
- * spider_net_get_next_tx_descr - returns the next available tx descriptor
- * @card: device structure to get descriptor from
- *
- * returns the address of the next descriptor, or NULL if not available.
- */
-static struct spider_net_descr *
-spider_net_get_next_tx_descr(struct spider_net_card *card)
-{
- /* check, if head points to not-in-use descr */
- if ( spider_net_get_descr_status(card->tx_chain.head) ==
- SPIDER_NET_DESCR_NOT_IN_USE ) {
- return card->tx_chain.head;
- } else {
- return NULL;
- }
-}
-
-/**
- * spider_net_set_txdescr_cmdstat - sets the tx descriptor command field
- * @descr: descriptor structure to fill out
- * @skb: packet to consider
- *
- * fills out the command and status field of the descriptor structure,
- * depending on hardware checksum settings.
- */
-static void
-spider_net_set_txdescr_cmdstat(struct spider_net_descr *descr,
- struct sk_buff *skb)
-{
- /* make sure the other fields in the descriptor are written */
- wmb();
-
- if (skb->ip_summed != CHECKSUM_HW) {
- descr->dmac_cmd_status = SPIDER_NET_DMAC_CMDSTAT_NOCS;
- return;
- }
-
- /* is packet ip?
- * if yes: tcp? udp? */
- if (skb->protocol == htons(ETH_P_IP)) {
- if (skb->nh.iph->protocol == IPPROTO_TCP)
- descr->dmac_cmd_status = SPIDER_NET_DMAC_CMDSTAT_TCPCS;
- else if (skb->nh.iph->protocol == IPPROTO_UDP)
- descr->dmac_cmd_status = SPIDER_NET_DMAC_CMDSTAT_UDPCS;
- else /* the stack should checksum non-tcp and non-udp
- packets on his own: NETIF_F_IP_CSUM */
- descr->dmac_cmd_status = SPIDER_NET_DMAC_CMDSTAT_NOCS;
- }
-}
-
-/**
* spider_net_prepare_tx_descr - fill tx descriptor with skb data
* @card: card structure
* @descr: descriptor structure to fill out
@@ -864,13 +641,12 @@ spider_net_set_txdescr_cmdstat(struct spider_net_descr *descr,
*/
static int
spider_net_prepare_tx_descr(struct spider_net_card *card,
- struct spider_net_descr *descr,
struct sk_buff *skb)
{
+ struct spider_net_descr *descr = card->tx_chain.head;
dma_addr_t buf;
- buf = pci_map_single(card->pdev, skb->data,
- skb->len, PCI_DMA_BIDIRECTIONAL);
+ buf = pci_map_single(card->pdev, skb->data, skb->len, PCI_DMA_TODEVICE);
if (buf == DMA_ERROR_CODE) {
if (netif_msg_tx_err(card) && net_ratelimit())
pr_err("could not iommu-map packet (%p, %i). "
@@ -880,10 +656,101 @@ spider_net_prepare_tx_descr(struct spider_net_card *card,
descr->buf_addr = buf;
descr->buf_size = skb->len;
+ descr->next_descr_addr = 0;
descr->skb = skb;
descr->data_status = 0;
- spider_net_set_txdescr_cmdstat(descr,skb);
+ descr->dmac_cmd_status =
+ SPIDER_NET_DESCR_CARDOWNED | SPIDER_NET_DMAC_NOCS;
+ if (skb->protocol == htons(ETH_P_IP))
+ switch (skb->nh.iph->protocol) {
+ case IPPROTO_TCP:
+ descr->dmac_cmd_status |= SPIDER_NET_DMAC_TCP;
+ break;
+ case IPPROTO_UDP:
+ descr->dmac_cmd_status |= SPIDER_NET_DMAC_UDP;
+ break;
+ }
+
+ descr->prev->next_descr_addr = descr->bus_addr;
+
+ return 0;
+}
+
+/**
+ * spider_net_release_tx_descr - processes a used tx descriptor
+ * @card: card structure
+ * @descr: descriptor to release
+ *
+ * releases a used tx descriptor (unmapping, freeing of skb)
+ */
+static inline void
+spider_net_release_tx_descr(struct spider_net_card *card)
+{
+ struct spider_net_descr *descr = card->tx_chain.tail;
+ struct sk_buff *skb;
+
+ card->tx_chain.tail = card->tx_chain.tail->next;
+ descr->dmac_cmd_status |= SPIDER_NET_DESCR_NOT_IN_USE;
+
+ /* unmap the skb */
+ skb = descr->skb;
+ pci_unmap_single(card->pdev, descr->buf_addr, skb->len,
+ PCI_DMA_TODEVICE);
+ dev_kfree_skb_any(skb);
+}
+
+/**
+ * spider_net_release_tx_chain - processes sent tx descriptors
+ * @card: adapter structure
+ * @brutal: if set, don't care about whether descriptor seems to be in use
+ *
+ * returns 0 if the tx ring is empty, otherwise 1.
+ *
+ * spider_net_release_tx_chain releases the tx descriptors that spider has
+ * finished with (if non-brutal) or simply release tx descriptors (if brutal).
+ * If some other context is calling this function, we return 1 so that we're
+ * scheduled again (if we were scheduled) and will not loose initiative.
+ */
+static int
+spider_net_release_tx_chain(struct spider_net_card *card, int brutal)
+{
+ struct spider_net_descr_chain *chain = &card->tx_chain;
+ int status;
+
+ spider_net_read_reg(card, SPIDER_NET_GDTDMACCNTR);
+
+ while (chain->tail != chain->head) {
+ status = spider_net_get_descr_status(chain->tail);
+ switch (status) {
+ case SPIDER_NET_DESCR_COMPLETE:
+ card->netdev_stats.tx_packets++;
+ card->netdev_stats.tx_bytes += chain->tail->skb->len;
+ break;
+
+ case SPIDER_NET_DESCR_CARDOWNED:
+ if (!brutal)
+ return 1;
+ /* fallthrough, if we release the descriptors
+ * brutally (then we don't care about
+ * SPIDER_NET_DESCR_CARDOWNED) */
+
+ case SPIDER_NET_DESCR_RESPONSE_ERROR:
+ case SPIDER_NET_DESCR_PROTECTION_ERROR:
+ case SPIDER_NET_DESCR_FORCE_END:
+ if (netif_msg_tx_err(card))
+ pr_err("%s: forcing end of tx descriptor "
+ "with status x%02x\n",
+ card->netdev->name, status);
+ card->netdev_stats.tx_errors++;
+ break;
+
+ default:
+ card->netdev_stats.tx_dropped++;
+ return 1;
+ }
+ spider_net_release_tx_descr(card);
+ }
return 0;
}
@@ -896,18 +763,32 @@ spider_net_prepare_tx_descr(struct spider_net_card *card,
* spider_net_kick_tx_dma writes the current tx chain head as start address
* of the tx descriptor chain and enables the transmission DMA engine
*/
-static void
-spider_net_kick_tx_dma(struct spider_net_card *card,
- struct spider_net_descr *descr)
+static inline void
+spider_net_kick_tx_dma(struct spider_net_card *card)
{
- /* this is the only descriptor in the output chain.
- * Enable TX DMA */
+ struct spider_net_descr *descr;
- spider_net_write_reg(card, SPIDER_NET_GDTDCHA,
- descr->bus_addr);
+ if (spider_net_read_reg(card, SPIDER_NET_GDTDMACCNTR) &
+ SPIDER_NET_TX_DMA_EN)
+ goto out;
- spider_net_write_reg(card, SPIDER_NET_GDTDMACCNTR,
- SPIDER_NET_DMA_TX_VALUE);
+ descr = card->tx_chain.tail;
+ for (;;) {
+ if (spider_net_get_descr_status(descr) ==
+ SPIDER_NET_DESCR_CARDOWNED) {
+ spider_net_write_reg(card, SPIDER_NET_GDTDCHA,
+ descr->bus_addr);
+ spider_net_write_reg(card, SPIDER_NET_GDTDMACCNTR,
+ SPIDER_NET_DMA_TX_VALUE);
+ break;
+ }
+ if (descr == card->tx_chain.head)
+ break;
+ descr = descr->next;
+ }
+
+out:
+ mod_timer(&card->tx_timer, jiffies + SPIDER_NET_TX_TIMER);
}
/**
@@ -915,47 +796,69 @@ spider_net_kick_tx_dma(struct spider_net_card *card,
* @skb: packet to send out
* @netdev: interface device structure
*
- * returns 0 on success, <0 on failure
+ * returns 0 on success, !0 on failure
*/
static int
spider_net_xmit(struct sk_buff *skb, struct net_device *netdev)
{
struct spider_net_card *card = netdev_priv(netdev);
- struct spider_net_descr *descr;
+ struct spider_net_descr_chain *chain = &card->tx_chain;
+ struct spider_net_descr *descr = chain->head;
+ unsigned long flags;
int result;
+ spin_lock_irqsave(&chain->lock, flags);
+
spider_net_release_tx_chain(card, 0);
- descr = spider_net_get_next_tx_descr(card);
+ if (chain->head->next == chain->tail->prev) {
+ card->netdev_stats.tx_dropped++;
+ result = NETDEV_TX_LOCKED;
+ goto out;
+ }
- if (!descr)
- goto error;
+ if (spider_net_get_descr_status(descr) != SPIDER_NET_DESCR_NOT_IN_USE) {
+ result = NETDEV_TX_LOCKED;
+ goto out;
+ }
- result = spider_net_prepare_tx_descr(card, descr, skb);
- if (result)
- goto error;
+ if (spider_net_prepare_tx_descr(card, skb) != 0) {
+ card->netdev_stats.tx_dropped++;
+ result = NETDEV_TX_BUSY;
+ goto out;
+ }
+
+ result = NETDEV_TX_OK;
+ spider_net_kick_tx_dma(card);
card->tx_chain.head = card->tx_chain.head->next;
- if (spider_net_get_descr_status(descr->prev) !=
- SPIDER_NET_DESCR_CARDOWNED) {
- /* make sure the current descriptor is in memory. Then
- * kicking it on again makes sense, if the previous is not
- * card-owned anymore. Check the previous descriptor twice
- * to omit an mb() in heavy traffic cases */
- mb();
- if (spider_net_get_descr_status(descr->prev) !=
- SPIDER_NET_DESCR_CARDOWNED)
- spider_net_kick_tx_dma(card, descr);
- }
+out:
+ spin_unlock_irqrestore(&chain->lock, flags);
+ netif_wake_queue(netdev);
+ return result;
+}
- mod_timer(&card->tx_timer, jiffies + SPIDER_NET_TX_TIMER);
+/**
+ * spider_net_cleanup_tx_ring - cleans up the TX ring
+ * @card: card structure
+ *
+ * spider_net_cleanup_tx_ring is called by the tx_timer (as we don't use
+ * interrupts to cleanup our TX ring) and returns sent packets to the stack
+ * by freeing them
+ */
+static void
+spider_net_cleanup_tx_ring(struct spider_net_card *card)
+{
+ unsigned long flags;
- return NETDEV_TX_OK;
+ spin_lock_irqsave(&card->tx_chain.lock, flags);
-error:
- card->netdev_stats.tx_dropped++;
- return NETDEV_TX_BUSY;
+ if ((spider_net_release_tx_chain(card, 0) != 0) &&
+ (card->netdev->flags & IFF_UP))
+ spider_net_kick_tx_dma(card);
+
+ spin_unlock_irqrestore(&card->tx_chain.lock, flags);
}
/**
@@ -1002,7 +905,7 @@ spider_net_pass_skb_up(struct spider_net_descr *descr,
/* unmap descriptor */
pci_unmap_single(card->pdev, descr->buf_addr, SPIDER_NET_MAX_FRAME,
- PCI_DMA_BIDIRECTIONAL);
+ PCI_DMA_FROMDEVICE);
/* the cases we'll throw away the packet immediately */
if (data_error & SPIDER_NET_DESTROY_RX_FLAGS) {
@@ -1067,14 +970,11 @@ spider_net_pass_skb_up(struct spider_net_descr *descr,
static int
spider_net_decode_one_descr(struct spider_net_card *card, int napi)
{
- enum spider_net_descr_status status;
- struct spider_net_descr *descr;
- struct spider_net_descr_chain *chain;
+ struct spider_net_descr_chain *chain = &card->rx_chain;
+ struct spider_net_descr *descr = chain->tail;
+ int status;
int result;
- chain = &card->rx_chain;
- descr = chain->tail;
-
status = spider_net_get_descr_status(descr);
if (status == SPIDER_NET_DESCR_CARDOWNED) {
@@ -1103,7 +1003,7 @@ spider_net_decode_one_descr(struct spider_net_card *card, int napi)
card->netdev->name, status);
card->netdev_stats.rx_dropped++;
pci_unmap_single(card->pdev, descr->buf_addr,
- SPIDER_NET_MAX_FRAME, PCI_DMA_BIDIRECTIONAL);
+ SPIDER_NET_MAX_FRAME, PCI_DMA_FROMDEVICE);
dev_kfree_skb_irq(descr->skb);
goto refill;
}
@@ -1119,7 +1019,7 @@ spider_net_decode_one_descr(struct spider_net_card *card, int napi)
/* ok, we've got a packet in descr */
result = spider_net_pass_skb_up(descr, card, napi);
refill:
- spider_net_set_descr_status(descr, SPIDER_NET_DESCR_NOT_IN_USE);
+ descr->dmac_cmd_status = SPIDER_NET_DESCR_NOT_IN_USE;
/* change the descriptor state: */
if (!napi)
spider_net_refill_rx_chain(card);
@@ -1291,21 +1191,6 @@ spider_net_set_mac(struct net_device *netdev, void *p)
}
/**
- * spider_net_enable_txdmac - enables a TX DMA controller
- * @card: card structure
- *
- * spider_net_enable_txdmac enables the TX DMA controller by setting the
- * descriptor chain tail address
- */
-static void
-spider_net_enable_txdmac(struct spider_net_card *card)
-{
- /* assume chain is aligned correctly */
- spider_net_write_reg(card, SPIDER_NET_GDTDCHA,
- card->tx_chain.tail->bus_addr);
-}
-
-/**
* spider_net_handle_rxram_full - cleans up RX ring upon RX RAM full interrupt
* @card: card structure
*
@@ -1653,7 +1538,6 @@ spider_net_enable_card(struct spider_net_card *card)
{ SPIDER_NET_GMRWOLCTRL, 0 },
{ SPIDER_NET_GTESTMD, 0x10000000 },
{ SPIDER_NET_GTTQMSK, 0x00400040 },
- { SPIDER_NET_GTESTMD, 0 },
{ SPIDER_NET_GMACINTEN, 0 },
@@ -1692,9 +1576,6 @@ spider_net_enable_card(struct spider_net_card *card)
spider_net_write_reg(card, SPIDER_NET_GRXDMAEN, SPIDER_NET_WOL_VALUE);
- /* set chain tail adress for TX chain */
- spider_net_enable_txdmac(card);
-
spider_net_write_reg(card, SPIDER_NET_GMACLENLMT,
SPIDER_NET_LENLMT_VALUE);
spider_net_write_reg(card, SPIDER_NET_GMACMODE,
@@ -1709,6 +1590,9 @@ spider_net_enable_card(struct spider_net_card *card)
SPIDER_NET_INT1_MASK_VALUE);
spider_net_write_reg(card, SPIDER_NET_GHIINT2MSK,
SPIDER_NET_INT2_MASK_VALUE);
+
+ spider_net_write_reg(card, SPIDER_NET_GDTDMACCNTR,
+ SPIDER_NET_GDTDCEIDIS);
}
/**
@@ -1728,10 +1612,12 @@ spider_net_open(struct net_device *netdev)
result = -ENOMEM;
if (spider_net_init_chain(card, &card->tx_chain,
- card->descr, tx_descriptors))
+ card->descr,
+ PCI_DMA_TODEVICE, tx_descriptors))
goto alloc_tx_failed;
if (spider_net_init_chain(card, &card->rx_chain,
- card->descr + tx_descriptors, rx_descriptors))
+ card->descr + tx_descriptors,
+ PCI_DMA_FROMDEVICE, rx_descriptors))
goto alloc_rx_failed;
/* allocate rx skbs */
@@ -1938,7 +1824,7 @@ spider_net_workaround_rxramfull(struct spider_net_card *card)
/* empty sequencer data */
for (sequencer = 0; sequencer < SPIDER_NET_FIRMWARE_SEQS;
sequencer++) {
- spider_net_write_reg(card, SPIDER_NET_GSnPRGDAT +
+ spider_net_write_reg(card, SPIDER_NET_GSnPRGADR +
sequencer * 8, 0x0);
for (i = 0; i < SPIDER_NET_FIRMWARE_SEQWORDS; i++) {
spider_net_write_reg(card, SPIDER_NET_GSnPRGDAT +
@@ -1955,6 +1841,49 @@ spider_net_workaround_rxramfull(struct spider_net_card *card)
}
/**
+ * spider_net_stop - called upon ifconfig down
+ * @netdev: interface device structure
+ *
+ * always returns 0
+ */
+int
+spider_net_stop(struct net_device *netdev)
+{
+ struct spider_net_card *card = netdev_priv(netdev);
+
+ tasklet_kill(&card->rxram_full_tl);
+ netif_poll_disable(netdev);
+ netif_carrier_off(netdev);
+ netif_stop_queue(netdev);
+ del_timer_sync(&card->tx_timer);
+
+ /* disable/mask all interrupts */
+ spider_net_write_reg(card, SPIDER_NET_GHIINT0MSK, 0);
+ spider_net_write_reg(card, SPIDER_NET_GHIINT1MSK, 0);
+ spider_net_write_reg(card, SPIDER_NET_GHIINT2MSK, 0);
+
+ /* free_irq(netdev->irq, netdev);*/
+ free_irq(to_pci_dev(netdev->class_dev.dev)->irq, netdev);
+
+ spider_net_write_reg(card, SPIDER_NET_GDTDMACCNTR,
+ SPIDER_NET_DMA_TX_FEND_VALUE);
+
+ /* turn off DMA, force end */
+ spider_net_disable_rxdmac(card);
+
+ /* release chains */
+ if (spin_trylock(&card->tx_chain.lock)) {
+ spider_net_release_tx_chain(card, 1);
+ spin_unlock(&card->tx_chain.lock);
+ }
+
+ spider_net_free_chain(card, &card->tx_chain);
+ spider_net_free_chain(card, &card->rx_chain);
+
+ return 0;
+}
+
+/**
* spider_net_tx_timeout_task - task scheduled by the watchdog timeout
* function (to be called not under interrupt status)
* @data: data, is interface device structure
@@ -1982,7 +1911,7 @@ spider_net_tx_timeout_task(void *data)
goto out;
spider_net_open(netdev);
- spider_net_kick_tx_dma(card, card->tx_chain.head);
+ spider_net_kick_tx_dma(card);
netif_device_attach(netdev);
out:
@@ -2065,7 +1994,6 @@ spider_net_setup_netdev(struct spider_net_card *card)
pci_set_drvdata(card->pdev, netdev);
- atomic_set(&card->tx_chain_release,0);
card->rxram_full_tl.data = (unsigned long) card;
card->rxram_full_tl.func =
(void (*)(unsigned long)) spider_net_handle_rxram_full;
@@ -2079,7 +2007,7 @@ spider_net_setup_netdev(struct spider_net_card *card)
spider_net_setup_netdev_ops(netdev);
- netdev->features = NETIF_F_HW_CSUM;
+ netdev->features = NETIF_F_HW_CSUM | NETIF_F_LLTX;
/* some time: NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX |
* NETIF_F_HW_VLAN_FILTER */
diff --git a/drivers/net/spider_net.h b/drivers/net/spider_net.h
index 3b8d951cf73c..f6dcf180ae3d 100644
--- a/drivers/net/spider_net.h
+++ b/drivers/net/spider_net.h
@@ -208,7 +208,10 @@ extern char spider_net_driver_name[];
#define SPIDER_NET_DMA_RX_VALUE 0x80000000
#define SPIDER_NET_DMA_RX_FEND_VALUE 0x00030003
/* to set TX_DMA_EN */
-#define SPIDER_NET_DMA_TX_VALUE 0x80000000
+#define SPIDER_NET_TX_DMA_EN 0x80000000
+#define SPIDER_NET_GDTDCEIDIS 0x00000002
+#define SPIDER_NET_DMA_TX_VALUE SPIDER_NET_TX_DMA_EN | \
+ SPIDER_NET_GDTDCEIDIS
#define SPIDER_NET_DMA_TX_FEND_VALUE 0x00030003
/* SPIDER_NET_UA_DESCR_VALUE is OR'ed with the unicast address */
@@ -329,55 +332,23 @@ enum spider_net_int2_status {
(~SPIDER_NET_TXINT) & \
(~SPIDER_NET_RXINT) )
-#define SPIDER_NET_GPREXEC 0x80000000
-#define SPIDER_NET_GPRDAT_MASK 0x0000ffff
+#define SPIDER_NET_GPREXEC 0x80000000
+#define SPIDER_NET_GPRDAT_MASK 0x0000ffff
-/* descriptor bits
- *
- * 1010 descriptor ready
- * 0 descr in middle of chain
- * 000 fixed to 0
- *
- * 0 no interrupt on completion
- * 000 fixed to 0
- * 1 no ipsec processing
- * 1 last descriptor for this frame
- * 00 no checksum
- * 10 tcp checksum
- * 11 udp checksum
- *
- * 00 fixed to 0
- * 0 fixed to 0
- * 0 no interrupt on response errors
- * 0 no interrupt on invalid descr
- * 0 no interrupt on dma process termination
- * 0 no interrupt on descr chain end
- * 0 no interrupt on descr complete
- *
- * 000 fixed to 0
- * 0 response error interrupt status
- * 0 invalid descr status
- * 0 dma termination status
- * 0 descr chain end status
- * 0 descr complete status */
-#define SPIDER_NET_DMAC_CMDSTAT_NOCS 0xa00c0000
-#define SPIDER_NET_DMAC_CMDSTAT_TCPCS 0xa00e0000
-#define SPIDER_NET_DMAC_CMDSTAT_UDPCS 0xa00f0000
-#define SPIDER_NET_DESCR_IND_PROC_SHIFT 28
-#define SPIDER_NET_DESCR_IND_PROC_MASKO 0x0fffffff
-
-/* descr ready, descr is in middle of chain, get interrupt on completion */
-#define SPIDER_NET_DMAC_RX_CARDOWNED 0xa0800000
-
-enum spider_net_descr_status {
- SPIDER_NET_DESCR_COMPLETE = 0x00, /* used in rx and tx */
- SPIDER_NET_DESCR_RESPONSE_ERROR = 0x01, /* used in rx and tx */
- SPIDER_NET_DESCR_PROTECTION_ERROR = 0x02, /* used in rx and tx */
- SPIDER_NET_DESCR_FRAME_END = 0x04, /* used in rx */
- SPIDER_NET_DESCR_FORCE_END = 0x05, /* used in rx and tx */
- SPIDER_NET_DESCR_CARDOWNED = 0x0a, /* used in rx and tx */
- SPIDER_NET_DESCR_NOT_IN_USE /* any other value */
-};
+#define SPIDER_NET_DMAC_NOINTR_COMPLETE 0x00800000
+#define SPIDER_NET_DMAC_NOCS 0x00040000
+#define SPIDER_NET_DMAC_TCP 0x00020000
+#define SPIDER_NET_DMAC_UDP 0x00030000
+#define SPIDER_NET_TXDCEST 0x08000000
+
+#define SPIDER_NET_DESCR_IND_PROC_MASK 0xF0000000
+#define SPIDER_NET_DESCR_COMPLETE 0x00000000 /* used in rx and tx */
+#define SPIDER_NET_DESCR_RESPONSE_ERROR 0x10000000 /* used in rx and tx */
+#define SPIDER_NET_DESCR_PROTECTION_ERROR 0x20000000 /* used in rx and tx */
+#define SPIDER_NET_DESCR_FRAME_END 0x40000000 /* used in rx */
+#define SPIDER_NET_DESCR_FORCE_END 0x50000000 /* used in rx and tx */
+#define SPIDER_NET_DESCR_CARDOWNED 0xA0000000 /* used in rx and tx */
+#define SPIDER_NET_DESCR_NOT_IN_USE 0xF0000000
struct spider_net_descr {
/* as defined by the hardware */
@@ -398,7 +369,7 @@ struct spider_net_descr {
} __attribute__((aligned(32)));
struct spider_net_descr_chain {
- /* we walk from tail to head */
+ spinlock_t lock;
struct spider_net_descr *head;
struct spider_net_descr *tail;
};
@@ -453,8 +424,6 @@ struct spider_net_card {
struct spider_net_descr_chain tx_chain;
struct spider_net_descr_chain rx_chain;
- atomic_t rx_chain_refill;
- atomic_t tx_chain_release;
struct net_device_stats netdev_stats;
diff --git a/drivers/net/sunhme.c b/drivers/net/sunhme.c
index 8673fd4c08c7..c6f5bc3c042f 100644
--- a/drivers/net/sunhme.c
+++ b/drivers/net/sunhme.c
@@ -3255,12 +3255,7 @@ static void __devexit happy_meal_pci_remove(struct pci_dev *pdev)
}
static struct pci_device_id happymeal_pci_ids[] = {
- {
- .vendor = PCI_VENDOR_ID_SUN,
- .device = PCI_DEVICE_ID_SUN_HAPPYMEAL,
- .subvendor = PCI_ANY_ID,
- .subdevice = PCI_ANY_ID,
- },
+ { PCI_DEVICE(PCI_VENDOR_ID_SUN, PCI_DEVICE_ID_SUN_HAPPYMEAL) },
{ } /* Terminating entry */
};
@@ -3275,7 +3270,7 @@ static struct pci_driver hme_pci_driver = {
static int __init happy_meal_pci_init(void)
{
- return pci_module_init(&hme_pci_driver);
+ return pci_register_driver(&hme_pci_driver);
}
static void happy_meal_pci_exit(void)
diff --git a/drivers/net/sunlance.c b/drivers/net/sunlance.c
index 1ef9fd39a79a..0e3fdf7c6dd3 100644
--- a/drivers/net/sunlance.c
+++ b/drivers/net/sunlance.c
@@ -1537,7 +1537,7 @@ static int __init sparc_lance_init(void)
{
if ((idprom->id_machtype == (SM_SUN4|SM_4_330)) ||
(idprom->id_machtype == (SM_SUN4|SM_4_470))) {
- memset(&sun4_sdev, 0, sizeof(sdev));
+ memset(&sun4_sdev, 0, sizeof(struct sbus_dev));
sun4_sdev.reg_addrs[0].phys_addr = sun4_eth_physaddr;
sun4_sdev.irqs[0] = 6;
return sparc_lance_probe_one(&sun4_sdev, NULL, NULL);
@@ -1547,16 +1547,16 @@ static int __init sparc_lance_init(void)
static int __exit sunlance_sun4_remove(void)
{
- struct lance_private *lp = dev_get_drvdata(&sun4_sdev->dev);
+ struct lance_private *lp = dev_get_drvdata(&sun4_sdev.ofdev.dev);
struct net_device *net_dev = lp->dev;
unregister_netdevice(net_dev);
- lance_free_hwresources(root_lance_dev);
+ lance_free_hwresources(lp);
free_netdev(net_dev);
- dev_set_drvdata(&sun4_sdev->dev, NULL);
+ dev_set_drvdata(&sun4_sdev.ofdev.dev, NULL);
return 0;
}
diff --git a/drivers/net/tg3.c b/drivers/net/tg3.c
index f645921aff8b..1b8138f641e3 100644
--- a/drivers/net/tg3.c
+++ b/drivers/net/tg3.c
@@ -68,8 +68,8 @@
#define DRV_MODULE_NAME "tg3"
#define PFX DRV_MODULE_NAME ": "
-#define DRV_MODULE_VERSION "3.62"
-#define DRV_MODULE_RELDATE "June 30, 2006"
+#define DRV_MODULE_VERSION "3.63"
+#define DRV_MODULE_RELDATE "July 25, 2006"
#define TG3_DEF_MAC_MODE 0
#define TG3_DEF_RX_MODE 0
@@ -3590,6 +3590,28 @@ static irqreturn_t tg3_test_isr(int irq, void *dev_id,
static int tg3_init_hw(struct tg3 *, int);
static int tg3_halt(struct tg3 *, int, int);
+/* Restart hardware after configuration changes, self-test, etc.
+ * Invoked with tp->lock held.
+ */
+static int tg3_restart_hw(struct tg3 *tp, int reset_phy)
+{
+ int err;
+
+ err = tg3_init_hw(tp, reset_phy);
+ if (err) {
+ printk(KERN_ERR PFX "%s: Failed to re-initialize device, "
+ "aborting.\n", tp->dev->name);
+ tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
+ tg3_full_unlock(tp);
+ del_timer_sync(&tp->timer);
+ tp->irq_sync = 0;
+ netif_poll_enable(tp->dev);
+ dev_close(tp->dev);
+ tg3_full_lock(tp, 0);
+ }
+ return err;
+}
+
#ifdef CONFIG_NET_POLL_CONTROLLER
static void tg3_poll_controller(struct net_device *dev)
{
@@ -3630,13 +3652,15 @@ static void tg3_reset_task(void *_data)
}
tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
- tg3_init_hw(tp, 1);
+ if (tg3_init_hw(tp, 1))
+ goto out;
tg3_netif_start(tp);
if (restart_timer)
mod_timer(&tp->timer, jiffies + 1);
+out:
tp->tg3_flags &= ~TG3_FLAG_IN_RESET_TASK;
tg3_full_unlock(tp);
@@ -4124,6 +4148,7 @@ static inline void tg3_set_mtu(struct net_device *dev, struct tg3 *tp,
static int tg3_change_mtu(struct net_device *dev, int new_mtu)
{
struct tg3 *tp = netdev_priv(dev);
+ int err;
if (new_mtu < TG3_MIN_MTU || new_mtu > TG3_MAX_MTU(tp))
return -EINVAL;
@@ -4144,13 +4169,14 @@ static int tg3_change_mtu(struct net_device *dev, int new_mtu)
tg3_set_mtu(dev, tp, new_mtu);
- tg3_init_hw(tp, 0);
+ err = tg3_restart_hw(tp, 0);
- tg3_netif_start(tp);
+ if (!err)
+ tg3_netif_start(tp);
tg3_full_unlock(tp);
- return 0;
+ return err;
}
/* Free up pending packets in all rx/tx rings.
@@ -4232,7 +4258,7 @@ static void tg3_free_rings(struct tg3 *tp)
* end up in the driver. tp->{tx,}lock are held and thus
* we may not sleep.
*/
-static void tg3_init_rings(struct tg3 *tp)
+static int tg3_init_rings(struct tg3 *tp)
{
u32 i;
@@ -4281,18 +4307,38 @@ static void tg3_init_rings(struct tg3 *tp)
/* Now allocate fresh SKBs for each rx ring. */
for (i = 0; i < tp->rx_pending; i++) {
- if (tg3_alloc_rx_skb(tp, RXD_OPAQUE_RING_STD,
- -1, i) < 0)
+ if (tg3_alloc_rx_skb(tp, RXD_OPAQUE_RING_STD, -1, i) < 0) {
+ printk(KERN_WARNING PFX
+ "%s: Using a smaller RX standard ring, "
+ "only %d out of %d buffers were allocated "
+ "successfully.\n",
+ tp->dev->name, i, tp->rx_pending);
+ if (i == 0)
+ return -ENOMEM;
+ tp->rx_pending = i;
break;
+ }
}
if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) {
for (i = 0; i < tp->rx_jumbo_pending; i++) {
if (tg3_alloc_rx_skb(tp, RXD_OPAQUE_RING_JUMBO,
- -1, i) < 0)
+ -1, i) < 0) {
+ printk(KERN_WARNING PFX
+ "%s: Using a smaller RX jumbo ring, "
+ "only %d out of %d buffers were "
+ "allocated successfully.\n",
+ tp->dev->name, i, tp->rx_jumbo_pending);
+ if (i == 0) {
+ tg3_free_rings(tp);
+ return -ENOMEM;
+ }
+ tp->rx_jumbo_pending = i;
break;
+ }
}
}
+ return 0;
}
/*
@@ -5815,6 +5861,7 @@ static int tg3_set_mac_addr(struct net_device *dev, void *p)
{
struct tg3 *tp = netdev_priv(dev);
struct sockaddr *addr = p;
+ int err = 0;
if (!is_valid_ether_addr(addr->sa_data))
return -EINVAL;
@@ -5832,9 +5879,9 @@ static int tg3_set_mac_addr(struct net_device *dev, void *p)
tg3_full_lock(tp, 1);
tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
- tg3_init_hw(tp, 0);
-
- tg3_netif_start(tp);
+ err = tg3_restart_hw(tp, 0);
+ if (!err)
+ tg3_netif_start(tp);
tg3_full_unlock(tp);
} else {
spin_lock_bh(&tp->lock);
@@ -5842,7 +5889,7 @@ static int tg3_set_mac_addr(struct net_device *dev, void *p)
spin_unlock_bh(&tp->lock);
}
- return 0;
+ return err;
}
/* tp->lock is held. */
@@ -5942,7 +5989,9 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
* can only do this after the hardware has been
* successfully reset.
*/
- tg3_init_rings(tp);
+ err = tg3_init_rings(tp);
+ if (err)
+ return err;
/* This value is determined during the probe time DMA
* engine test, tg3_test_dma.
@@ -7956,7 +8005,7 @@ static void tg3_get_ringparam(struct net_device *dev, struct ethtool_ringparam *
static int tg3_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
{
struct tg3 *tp = netdev_priv(dev);
- int irq_sync = 0;
+ int irq_sync = 0, err = 0;
if ((ering->rx_pending > TG3_RX_RING_SIZE - 1) ||
(ering->rx_jumbo_pending > TG3_RX_JUMBO_RING_SIZE - 1) ||
@@ -7980,13 +8029,14 @@ static int tg3_set_ringparam(struct net_device *dev, struct ethtool_ringparam *e
if (netif_running(dev)) {
tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
- tg3_init_hw(tp, 1);
- tg3_netif_start(tp);
+ err = tg3_restart_hw(tp, 1);
+ if (!err)
+ tg3_netif_start(tp);
}
tg3_full_unlock(tp);
- return 0;
+ return err;
}
static void tg3_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
@@ -8001,7 +8051,7 @@ static void tg3_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam
static int tg3_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
{
struct tg3 *tp = netdev_priv(dev);
- int irq_sync = 0;
+ int irq_sync = 0, err = 0;
if (netif_running(dev)) {
tg3_netif_stop(tp);
@@ -8025,13 +8075,14 @@ static int tg3_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam
if (netif_running(dev)) {
tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
- tg3_init_hw(tp, 1);
- tg3_netif_start(tp);
+ err = tg3_restart_hw(tp, 1);
+ if (!err)
+ tg3_netif_start(tp);
}
tg3_full_unlock(tp);
- return 0;
+ return err;
}
static u32 tg3_get_rx_csum(struct net_device *dev)
@@ -8666,7 +8717,9 @@ static int tg3_test_loopback(struct tg3 *tp)
if (!netif_running(tp->dev))
return TG3_LOOPBACK_FAILED;
- tg3_reset_hw(tp, 1);
+ err = tg3_reset_hw(tp, 1);
+ if (err)
+ return TG3_LOOPBACK_FAILED;
if (tg3_run_loopback(tp, TG3_MAC_LOOPBACK))
err |= TG3_MAC_LOOPBACK_FAILED;
@@ -8740,8 +8793,8 @@ static void tg3_self_test(struct net_device *dev, struct ethtool_test *etest,
tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
if (netif_running(dev)) {
tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
- tg3_init_hw(tp, 1);
- tg3_netif_start(tp);
+ if (!tg3_restart_hw(tp, 1))
+ tg3_netif_start(tp);
}
tg3_full_unlock(tp);
@@ -10078,6 +10131,8 @@ static int __devinit tg3_get_invariants(struct tg3 *tp)
static struct pci_device_id write_reorder_chipsets[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_AMD,
PCI_DEVICE_ID_AMD_FE_GATE_700C) },
+ { PCI_DEVICE(PCI_VENDOR_ID_AMD,
+ PCI_DEVICE_ID_AMD_8131_BRIDGE) },
{ PCI_DEVICE(PCI_VENDOR_ID_VIA,
PCI_DEVICE_ID_VIA_8385_0) },
{ },
@@ -11697,7 +11752,8 @@ static int tg3_suspend(struct pci_dev *pdev, pm_message_t state)
tg3_full_lock(tp, 0);
tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
- tg3_init_hw(tp, 1);
+ if (tg3_restart_hw(tp, 1))
+ goto out;
tp->timer.expires = jiffies + tp->timer_offset;
add_timer(&tp->timer);
@@ -11705,6 +11761,7 @@ static int tg3_suspend(struct pci_dev *pdev, pm_message_t state)
netif_device_attach(dev);
tg3_netif_start(tp);
+out:
tg3_full_unlock(tp);
}
@@ -11731,16 +11788,19 @@ static int tg3_resume(struct pci_dev *pdev)
tg3_full_lock(tp, 0);
tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
- tg3_init_hw(tp, 1);
+ err = tg3_restart_hw(tp, 1);
+ if (err)
+ goto out;
tp->timer.expires = jiffies + tp->timer_offset;
add_timer(&tp->timer);
tg3_netif_start(tp);
+out:
tg3_full_unlock(tp);
- return 0;
+ return err;
}
static struct pci_driver tg3_driver = {
diff --git a/drivers/net/typhoon.c b/drivers/net/typhoon.c
index 063816f2b11e..4103c37172f9 100644
--- a/drivers/net/typhoon.c
+++ b/drivers/net/typhoon.c
@@ -805,7 +805,7 @@ typhoon_start_tx(struct sk_buff *skb, struct net_device *dev)
* If problems develop with TSO, check this first.
*/
numDesc = skb_shinfo(skb)->nr_frags + 1;
- if(skb_tso_size(skb))
+ if (skb_is_gso(skb))
numDesc++;
/* When checking for free space in the ring, we need to also
@@ -845,7 +845,7 @@ typhoon_start_tx(struct sk_buff *skb, struct net_device *dev)
TYPHOON_TX_PF_VLAN_TAG_SHIFT);
}
- if(skb_tso_size(skb)) {
+ if (skb_is_gso(skb)) {
first_txd->processFlags |= TYPHOON_TX_PF_TCP_SEGMENT;
first_txd->numDesc++;
diff --git a/drivers/net/via-velocity.c b/drivers/net/via-velocity.c
index f5b0078eb4ad..aa9cd92f46b2 100644
--- a/drivers/net/via-velocity.c
+++ b/drivers/net/via-velocity.c
@@ -2742,7 +2742,7 @@ static u32 check_connection_type(struct mac_regs __iomem * regs)
if (PHYSR0 & PHYSR0_SPDG)
status |= VELOCITY_SPEED_1000;
- if (PHYSR0 & PHYSR0_SPD10)
+ else if (PHYSR0 & PHYSR0_SPD10)
status |= VELOCITY_SPEED_10;
else
status |= VELOCITY_SPEED_100;
@@ -2851,8 +2851,17 @@ static int velocity_get_settings(struct net_device *dev, struct ethtool_cmd *cmd
u32 status;
status = check_connection_type(vptr->mac_regs);
- cmd->supported = SUPPORTED_TP | SUPPORTED_Autoneg | SUPPORTED_10baseT_Half | SUPPORTED_10baseT_Full | SUPPORTED_100baseT_Half | SUPPORTED_100baseT_Full | SUPPORTED_1000baseT_Half | SUPPORTED_1000baseT_Full;
- if (status & VELOCITY_SPEED_100)
+ cmd->supported = SUPPORTED_TP |
+ SUPPORTED_Autoneg |
+ SUPPORTED_10baseT_Half |
+ SUPPORTED_10baseT_Full |
+ SUPPORTED_100baseT_Half |
+ SUPPORTED_100baseT_Full |
+ SUPPORTED_1000baseT_Half |
+ SUPPORTED_1000baseT_Full;
+ if (status & VELOCITY_SPEED_1000)
+ cmd->speed = SPEED_1000;
+ else if (status & VELOCITY_SPEED_100)
cmd->speed = SPEED_100;
else
cmd->speed = SPEED_10;
@@ -2896,7 +2905,7 @@ static u32 velocity_get_link(struct net_device *dev)
{
struct velocity_info *vptr = netdev_priv(dev);
struct mac_regs __iomem * regs = vptr->mac_regs;
- return BYTE_REG_BITS_IS_ON(PHYSR0_LINKGD, &regs->PHYSR0) ? 0 : 1;
+ return BYTE_REG_BITS_IS_ON(PHYSR0_LINKGD, &regs->PHYSR0) ? 1 : 0;
}
static void velocity_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
diff --git a/drivers/net/wan/c101.c b/drivers/net/wan/c101.c
index c92ac9fde083..435e91ec4620 100644
--- a/drivers/net/wan/c101.c
+++ b/drivers/net/wan/c101.c
@@ -116,27 +116,33 @@ static inline void openwin(card_t *card, u8 page)
#include "hd6457x.c"
+static inline void set_carrier(port_t *port)
+{
+ if (!sca_in(MSCI1_OFFSET + ST3, port) & ST3_DCD)
+ netif_carrier_on(port_to_dev(port));
+ else
+ netif_carrier_off(port_to_dev(port));
+}
+
+
static void sca_msci_intr(port_t *port)
{
- struct net_device *dev = port_to_dev(port);
- card_t* card = port_to_card(port);
- u8 stat = sca_in(MSCI1_OFFSET + ST1, card); /* read MSCI ST1 status */
+ u8 stat = sca_in(MSCI1_OFFSET + ST1, port); /* read MSCI ST1 status */
/* Reset MSCI TX underrun status bit */
- sca_out(stat & ST1_UDRN, MSCI0_OFFSET + ST1, card);
+ sca_out(stat & ST1_UDRN, MSCI0_OFFSET + ST1, port);
if (stat & ST1_UDRN) {
- struct net_device_stats *stats = hdlc_stats(dev);
+ struct net_device_stats *stats = hdlc_stats(port_to_dev(port));
stats->tx_errors++; /* TX Underrun error detected */
stats->tx_fifo_errors++;
}
/* Reset MSCI CDCD status bit - uses ch#2 DCD input */
- sca_out(stat & ST1_CDCD, MSCI1_OFFSET + ST1, card);
+ sca_out(stat & ST1_CDCD, MSCI1_OFFSET + ST1, port);
if (stat & ST1_CDCD)
- hdlc_set_carrier(!(sca_in(MSCI1_OFFSET + ST3, card) & ST3_DCD),
- dev);
+ set_carrier(port);
}
@@ -190,8 +196,7 @@ static int c101_open(struct net_device *dev)
sca_out(IE1_UDRN, MSCI0_OFFSET + IE1, port);
sca_out(IE0_TXINT, MSCI0_OFFSET + IE0, port);
- hdlc_set_carrier(!(sca_in(MSCI1_OFFSET + ST3, port) & ST3_DCD), dev);
- printk(KERN_DEBUG "0x%X\n", sca_in(MSCI1_OFFSET + ST3, port));
+ set_carrier(port);
/* enable MSCI1 CDCD interrupt */
sca_out(IE1_CDCD, MSCI1_OFFSET + IE1, port);
@@ -378,7 +383,7 @@ static int __init c101_run(unsigned long irq, unsigned long winbase)
}
sca_init_sync_port(card); /* Set up C101 memory */
- hdlc_set_carrier(!(sca_in(MSCI1_OFFSET + ST3, card) & ST3_DCD), dev);
+ set_carrier(card);
printk(KERN_INFO "%s: Moxa C101 on IRQ%u,"
" using %u TX + %u RX packets rings\n",
@@ -443,4 +448,5 @@ module_exit(c101_cleanup);
MODULE_AUTHOR("Krzysztof Halasa <khc@pm.waw.pl>");
MODULE_DESCRIPTION("Moxa C101 serial port driver");
MODULE_LICENSE("GPL v2");
-module_param(hw, charp, 0444); /* hw=irq,ram:irq,... */
+module_param(hw, charp, 0444);
+MODULE_PARM_DESC(hw, "irq,ram:irq,...");
diff --git a/drivers/net/wan/hd6457x.c b/drivers/net/wan/hd6457x.c
index d3743321a977..dce2bb317b82 100644
--- a/drivers/net/wan/hd6457x.c
+++ b/drivers/net/wan/hd6457x.c
@@ -168,6 +168,23 @@ static inline u32 buffer_offset(port_t *port, u16 desc, int transmit)
}
+static inline void sca_set_carrier(port_t *port)
+{
+ if (!(sca_in(get_msci(port) + ST3, port_to_card(port)) & ST3_DCD)) {
+#ifdef DEBUG_LINK
+ printk(KERN_DEBUG "%s: sca_set_carrier on\n",
+ port_to_dev(port)->name);
+#endif
+ netif_carrier_on(port_to_dev(port));
+ } else {
+#ifdef DEBUG_LINK
+ printk(KERN_DEBUG "%s: sca_set_carrier off\n",
+ port_to_dev(port)->name);
+#endif
+ netif_carrier_off(port_to_dev(port));
+ }
+}
+
static void sca_init_sync_port(port_t *port)
{
@@ -237,9 +254,7 @@ static void sca_init_sync_port(port_t *port)
sca_out(DIR_BOFE, DIR_TX(phy_node(port)), card);
}
}
-
- hdlc_set_carrier(!(sca_in(get_msci(port) + ST3, card) & ST3_DCD),
- port_to_dev(port));
+ sca_set_carrier(port);
}
@@ -262,8 +277,7 @@ static inline void sca_msci_intr(port_t *port)
}
if (stat & ST1_CDCD)
- hdlc_set_carrier(!(sca_in(msci + ST3, card) & ST3_DCD),
- port_to_dev(port));
+ sca_set_carrier(port);
}
#endif
@@ -566,7 +580,7 @@ static void sca_open(struct net_device *dev)
- all DMA interrupts
*/
- hdlc_set_carrier(!(sca_in(msci + ST3, card) & ST3_DCD), dev);
+ sca_set_carrier(port);
#ifdef __HD64570_H
/* MSCI TX INT and RX INT A IRQ enable */
diff --git a/drivers/net/wan/hdlc_cisco.c b/drivers/net/wan/hdlc_cisco.c
index 1fd04662c4fc..f289daba0c7b 100644
--- a/drivers/net/wan/hdlc_cisco.c
+++ b/drivers/net/wan/hdlc_cisco.c
@@ -192,9 +192,7 @@ static int cisco_rx(struct sk_buff *skb)
"uptime %ud%uh%um%us)\n",
dev->name, days, hrs,
min, sec);
-#if 0
- netif_carrier_on(dev);
-#endif
+ netif_dormant_off(dev);
hdlc->state.cisco.up = 1;
}
}
@@ -227,9 +225,7 @@ static void cisco_timer(unsigned long arg)
hdlc->state.cisco.settings.timeout * HZ)) {
hdlc->state.cisco.up = 0;
printk(KERN_INFO "%s: Link down\n", dev->name);
-#if 0
- netif_carrier_off(dev);
-#endif
+ netif_dormant_on(dev);
}
cisco_keepalive_send(dev, CISCO_KEEPALIVE_REQ,
@@ -265,10 +261,7 @@ static void cisco_stop(struct net_device *dev)
{
hdlc_device *hdlc = dev_to_hdlc(dev);
del_timer_sync(&hdlc->state.cisco.timer);
-#if 0
- if (netif_carrier_ok(dev))
- netif_carrier_off(dev);
-#endif
+ netif_dormant_on(dev);
hdlc->state.cisco.up = 0;
hdlc->state.cisco.request_sent = 0;
}
@@ -328,6 +321,7 @@ int hdlc_cisco_ioctl(struct net_device *dev, struct ifreq *ifr)
dev->type = ARPHRD_CISCO;
dev->flags = IFF_POINTOPOINT | IFF_NOARP;
dev->addr_len = 0;
+ netif_dormant_on(dev);
return 0;
}
diff --git a/drivers/net/wan/hdlc_fr.c b/drivers/net/wan/hdlc_fr.c
index 523afe17564e..7bb737bbdeb9 100644
--- a/drivers/net/wan/hdlc_fr.c
+++ b/drivers/net/wan/hdlc_fr.c
@@ -301,7 +301,7 @@ static int pvc_open(struct net_device *dev)
if (pvc->open_count++ == 0) {
hdlc_device *hdlc = dev_to_hdlc(pvc->master);
if (hdlc->state.fr.settings.lmi == LMI_NONE)
- pvc->state.active = hdlc->carrier;
+ pvc->state.active = netif_carrier_ok(pvc->master);
pvc_carrier(pvc->state.active, pvc);
hdlc->state.fr.dce_changed = 1;
@@ -545,11 +545,7 @@ static void fr_set_link_state(int reliable, struct net_device *dev)
hdlc->state.fr.reliable = reliable;
if (reliable) {
-#if 0
- if (!netif_carrier_ok(dev))
- netif_carrier_on(dev);
-#endif
-
+ netif_dormant_off(dev);
hdlc->state.fr.n391cnt = 0; /* Request full status */
hdlc->state.fr.dce_changed = 1;
@@ -562,11 +558,7 @@ static void fr_set_link_state(int reliable, struct net_device *dev)
}
}
} else {
-#if 0
- if (netif_carrier_ok(dev))
- netif_carrier_off(dev);
-#endif
-
+ netif_dormant_on(dev);
while (pvc) { /* Deactivate all PVCs */
pvc_carrier(0, pvc);
pvc->state.exist = pvc->state.active = 0;
diff --git a/drivers/net/wan/hdlc_generic.c b/drivers/net/wan/hdlc_generic.c
index b7da55140fbd..04ca1f7b6424 100644
--- a/drivers/net/wan/hdlc_generic.c
+++ b/drivers/net/wan/hdlc_generic.c
@@ -34,10 +34,11 @@
#include <linux/inetdevice.h>
#include <linux/lapb.h>
#include <linux/rtnetlink.h>
+#include <linux/notifier.h>
#include <linux/hdlc.h>
-static const char* version = "HDLC support module revision 1.18";
+static const char* version = "HDLC support module revision 1.19";
#undef DEBUG_LINK
@@ -73,57 +74,51 @@ static int hdlc_rcv(struct sk_buff *skb, struct net_device *dev,
-static void __hdlc_set_carrier_on(struct net_device *dev)
+static inline void hdlc_proto_start(struct net_device *dev)
{
hdlc_device *hdlc = dev_to_hdlc(dev);
if (hdlc->proto.start)
return hdlc->proto.start(dev);
-#if 0
-#ifdef DEBUG_LINK
- if (netif_carrier_ok(dev))
- printk(KERN_ERR "hdlc_set_carrier_on(): already on\n");
-#endif
- netif_carrier_on(dev);
-#endif
}
-static void __hdlc_set_carrier_off(struct net_device *dev)
+static inline void hdlc_proto_stop(struct net_device *dev)
{
hdlc_device *hdlc = dev_to_hdlc(dev);
if (hdlc->proto.stop)
return hdlc->proto.stop(dev);
-
-#if 0
-#ifdef DEBUG_LINK
- if (!netif_carrier_ok(dev))
- printk(KERN_ERR "hdlc_set_carrier_off(): already off\n");
-#endif
- netif_carrier_off(dev);
-#endif
}
-void hdlc_set_carrier(int on, struct net_device *dev)
+static int hdlc_device_event(struct notifier_block *this, unsigned long event,
+ void *ptr)
{
- hdlc_device *hdlc = dev_to_hdlc(dev);
+ struct net_device *dev = ptr;
+ hdlc_device *hdlc;
unsigned long flags;
- on = on ? 1 : 0;
+ int on;
+
+ if (dev->get_stats != hdlc_get_stats)
+ return NOTIFY_DONE; /* not an HDLC device */
+
+ if (event != NETDEV_CHANGE)
+ return NOTIFY_DONE; /* Only interrested in carrier changes */
+
+ on = netif_carrier_ok(dev);
#ifdef DEBUG_LINK
- printk(KERN_DEBUG "hdlc_set_carrier %i\n", on);
+ printk(KERN_DEBUG "%s: hdlc_device_event NETDEV_CHANGE, carrier %i\n",
+ dev->name, on);
#endif
+ hdlc = dev_to_hdlc(dev);
spin_lock_irqsave(&hdlc->state_lock, flags);
if (hdlc->carrier == on)
goto carrier_exit; /* no change in DCD line level */
-#ifdef DEBUG_LINK
- printk(KERN_INFO "%s: carrier %s\n", dev->name, on ? "ON" : "off");
-#endif
hdlc->carrier = on;
if (!hdlc->open)
@@ -131,14 +126,15 @@ void hdlc_set_carrier(int on, struct net_device *dev)
if (hdlc->carrier) {
printk(KERN_INFO "%s: Carrier detected\n", dev->name);
- __hdlc_set_carrier_on(dev);
+ hdlc_proto_start(dev);
} else {
printk(KERN_INFO "%s: Carrier lost\n", dev->name);
- __hdlc_set_carrier_off(dev);
+ hdlc_proto_stop(dev);
}
carrier_exit:
spin_unlock_irqrestore(&hdlc->state_lock, flags);
+ return NOTIFY_DONE;
}
@@ -165,7 +161,7 @@ int hdlc_open(struct net_device *dev)
if (hdlc->carrier) {
printk(KERN_INFO "%s: Carrier detected\n", dev->name);
- __hdlc_set_carrier_on(dev);
+ hdlc_proto_start(dev);
} else
printk(KERN_INFO "%s: No carrier\n", dev->name);
@@ -190,7 +186,7 @@ void hdlc_close(struct net_device *dev)
hdlc->open = 0;
if (hdlc->carrier)
- __hdlc_set_carrier_off(dev);
+ hdlc_proto_stop(dev);
spin_unlock_irq(&hdlc->state_lock);
@@ -303,7 +299,6 @@ MODULE_LICENSE("GPL v2");
EXPORT_SYMBOL(hdlc_open);
EXPORT_SYMBOL(hdlc_close);
-EXPORT_SYMBOL(hdlc_set_carrier);
EXPORT_SYMBOL(hdlc_ioctl);
EXPORT_SYMBOL(hdlc_setup);
EXPORT_SYMBOL(alloc_hdlcdev);
@@ -315,9 +310,18 @@ static struct packet_type hdlc_packet_type = {
};
+static struct notifier_block hdlc_notifier = {
+ .notifier_call = hdlc_device_event,
+};
+
+
static int __init hdlc_module_init(void)
{
+ int result;
+
printk(KERN_INFO "%s\n", version);
+ if ((result = register_netdevice_notifier(&hdlc_notifier)) != 0)
+ return result;
dev_add_pack(&hdlc_packet_type);
return 0;
}
@@ -327,6 +331,7 @@ static int __init hdlc_module_init(void)
static void __exit hdlc_module_exit(void)
{
dev_remove_pack(&hdlc_packet_type);
+ unregister_netdevice_notifier(&hdlc_notifier);
}
diff --git a/drivers/net/wan/hdlc_ppp.c b/drivers/net/wan/hdlc_ppp.c
index b81263eaede0..fbaab5bf71eb 100644
--- a/drivers/net/wan/hdlc_ppp.c
+++ b/drivers/net/wan/hdlc_ppp.c
@@ -107,6 +107,7 @@ int hdlc_ppp_ioctl(struct net_device *dev, struct ifreq *ifr)
dev->hard_header = NULL;
dev->type = ARPHRD_PPP;
dev->addr_len = 0;
+ netif_dormant_off(dev);
return 0;
}
diff --git a/drivers/net/wan/hdlc_raw.c b/drivers/net/wan/hdlc_raw.c
index 9456d31cb1c1..f15aa6ba77f1 100644
--- a/drivers/net/wan/hdlc_raw.c
+++ b/drivers/net/wan/hdlc_raw.c
@@ -82,6 +82,7 @@ int hdlc_raw_ioctl(struct net_device *dev, struct ifreq *ifr)
dev->type = ARPHRD_RAWHDLC;
dev->flags = IFF_POINTOPOINT | IFF_NOARP;
dev->addr_len = 0;
+ netif_dormant_off(dev);
return 0;
}
diff --git a/drivers/net/wan/hdlc_raw_eth.c b/drivers/net/wan/hdlc_raw_eth.c
index b1285cc8fee6..d1884987f94e 100644
--- a/drivers/net/wan/hdlc_raw_eth.c
+++ b/drivers/net/wan/hdlc_raw_eth.c
@@ -100,6 +100,7 @@ int hdlc_raw_eth_ioctl(struct net_device *dev, struct ifreq *ifr)
dev->tx_queue_len = old_qlen;
memcpy(dev->dev_addr, "\x00\x01", 2);
get_random_bytes(dev->dev_addr + 2, ETH_ALEN - 2);
+ netif_dormant_off(dev);
return 0;
}
diff --git a/drivers/net/wan/hdlc_x25.c b/drivers/net/wan/hdlc_x25.c
index 07e5eef1fe0f..a867fb411f89 100644
--- a/drivers/net/wan/hdlc_x25.c
+++ b/drivers/net/wan/hdlc_x25.c
@@ -212,6 +212,7 @@ int hdlc_x25_ioctl(struct net_device *dev, struct ifreq *ifr)
dev->hard_header = NULL;
dev->type = ARPHRD_X25;
dev->addr_len = 0;
+ netif_dormant_off(dev);
return 0;
}
diff --git a/drivers/net/wan/n2.c b/drivers/net/wan/n2.c
index e013b817cab8..dcf46add3adf 100644
--- a/drivers/net/wan/n2.c
+++ b/drivers/net/wan/n2.c
@@ -564,4 +564,5 @@ module_exit(n2_cleanup);
MODULE_AUTHOR("Krzysztof Halasa <khc@pm.waw.pl>");
MODULE_DESCRIPTION("RISCom/N2 serial port driver");
MODULE_LICENSE("GPL v2");
-module_param(hw, charp, 0444); /* hw=io,irq,ram,ports:io,irq,... */
+module_param(hw, charp, 0444);
+MODULE_PARM_DESC(hw, "io,irq,ram,ports:io,irq,...");
diff --git a/drivers/net/wan/wanxl.c b/drivers/net/wan/wanxl.c
index d564224cdca9..b2031dfc4bb1 100644
--- a/drivers/net/wan/wanxl.c
+++ b/drivers/net/wan/wanxl.c
@@ -149,7 +149,10 @@ static inline void wanxl_cable_intr(port_t *port)
printk(KERN_INFO "%s: %s%s module, %s cable%s%s\n",
port->dev->name, pm, dte, cable, dsr, dcd);
- hdlc_set_carrier(value & STATUS_CABLE_DCD, port->dev);
+ if (value & STATUS_CABLE_DCD)
+ netif_carrier_on(port->dev);
+ else
+ netif_carrier_off(port->dev);
}
diff --git a/drivers/net/wireless/Kconfig b/drivers/net/wireless/Kconfig
index fa9d2c4edc93..2e8ac995d56f 100644
--- a/drivers/net/wireless/Kconfig
+++ b/drivers/net/wireless/Kconfig
@@ -447,6 +447,7 @@ config AIRO_CS
tristate "Cisco/Aironet 34X/35X/4500/4800 PCMCIA cards"
depends on NET_RADIO && PCMCIA && (BROKEN || !M32R)
select CRYPTO
+ select CRYPTO_AES
---help---
This is the standard Linux driver to support Cisco/Aironet PCMCIA
802.11 wireless cards. This driver is the same as the Aironet
diff --git a/drivers/net/wireless/bcm43xx/bcm43xx_main.c b/drivers/net/wireless/bcm43xx/bcm43xx_main.c
index e1c5a939bca4..df317c1e12a8 100644
--- a/drivers/net/wireless/bcm43xx/bcm43xx_main.c
+++ b/drivers/net/wireless/bcm43xx/bcm43xx_main.c
@@ -1547,7 +1547,7 @@ static void handle_irq_noise(struct bcm43xx_private *bcm)
goto generate_new;
/* Get the noise samples. */
- assert(bcm->noisecalc.nr_samples <= 8);
+ assert(bcm->noisecalc.nr_samples < 8);
i = bcm->noisecalc.nr_samples;
noise[0] = limit_value(noise[0], 0, ARRAY_SIZE(radio->nrssi_lt) - 1);
noise[1] = limit_value(noise[1], 0, ARRAY_SIZE(radio->nrssi_lt) - 1);
@@ -3701,7 +3701,7 @@ static void bcm43xx_ieee80211_set_security(struct net_device *net_dev,
}
if (sec->flags & SEC_AUTH_MODE) {
secinfo->auth_mode = sec->auth_mode;
- dprintk(", .auth_mode = %d\n", sec->auth_mode);
+ dprintk(", .auth_mode = %d", sec->auth_mode);
}
dprintk("\n");
if (bcm43xx_status(bcm) == BCM43xx_STAT_INITIALIZED &&
diff --git a/drivers/net/wireless/orinoco.c b/drivers/net/wireless/orinoco.c
index d6ed5781b93a..317ace7f9aae 100644
--- a/drivers/net/wireless/orinoco.c
+++ b/drivers/net/wireless/orinoco.c
@@ -2875,7 +2875,7 @@ static int orinoco_ioctl_setiwencode(struct net_device *dev,
if (orinoco_lock(priv, &flags) != 0)
return -EBUSY;
- if (erq->pointer) {
+ if (erq->length > 0) {
if ((index < 0) || (index >= ORINOCO_MAX_KEYS))
index = priv->tx_key;
@@ -2918,7 +2918,7 @@ static int orinoco_ioctl_setiwencode(struct net_device *dev,
if (erq->flags & IW_ENCODE_RESTRICTED)
restricted = 1;
- if (erq->pointer) {
+ if (erq->pointer && erq->length > 0) {
priv->keys[index].len = cpu_to_le16(xlen);
memset(priv->keys[index].data, 0,
sizeof(priv->keys[index].data));
diff --git a/drivers/net/wireless/spectrum_cs.c b/drivers/net/wireless/spectrum_cs.c
index 15465278c789..7f78b7801fb3 100644
--- a/drivers/net/wireless/spectrum_cs.c
+++ b/drivers/net/wireless/spectrum_cs.c
@@ -34,8 +34,6 @@
#include "orinoco.h"
-static unsigned char *primsym;
-static unsigned char *secsym;
static const char primary_fw_name[] = "symbol_sp24t_prim_fw";
static const char secondary_fw_name[] = "symbol_sp24t_sec_fw";
@@ -440,7 +438,7 @@ spectrum_load_blocks(hermes_t *hw, const struct dblock *first_block)
*/
static int
spectrum_dl_image(hermes_t *hw, struct pcmcia_device *link,
- const unsigned char *image)
+ const unsigned char *image, int secondary)
{
int ret;
const unsigned char *ptr;
@@ -455,7 +453,7 @@ spectrum_dl_image(hermes_t *hw, struct pcmcia_device *link,
first_block = (const struct dblock *) ptr;
/* Read the PDA */
- if (image != primsym) {
+ if (secondary) {
ret = spectrum_read_pda(hw, pda, sizeof(pda));
if (ret)
return ret;
@@ -472,7 +470,7 @@ spectrum_dl_image(hermes_t *hw, struct pcmcia_device *link,
return ret;
/* Write the PDA to the adapter */
- if (image != primsym) {
+ if (secondary) {
ret = spectrum_apply_pda(hw, first_block, pda);
if (ret)
return ret;
@@ -487,7 +485,7 @@ spectrum_dl_image(hermes_t *hw, struct pcmcia_device *link,
ret = hermes_init(hw);
/* hermes_reset() should return 0 with the secondary firmware */
- if (image != primsym && ret != 0)
+ if (secondary && ret != 0)
return -ENODEV;
/* And this should work with any firmware */
@@ -509,33 +507,30 @@ spectrum_dl_firmware(hermes_t *hw, struct pcmcia_device *link)
const struct firmware *fw_entry;
if (request_firmware(&fw_entry, primary_fw_name,
- &handle_to_dev(link)) == 0) {
- primsym = fw_entry->data;
- } else {
+ &handle_to_dev(link)) != 0) {
printk(KERN_ERR PFX "Cannot find firmware: %s\n",
primary_fw_name);
return -ENOENT;
}
- if (request_firmware(&fw_entry, secondary_fw_name,
- &handle_to_dev(link)) == 0) {
- secsym = fw_entry->data;
- } else {
- printk(KERN_ERR PFX "Cannot find firmware: %s\n",
- secondary_fw_name);
- return -ENOENT;
- }
-
/* Load primary firmware */
- ret = spectrum_dl_image(hw, link, primsym);
+ ret = spectrum_dl_image(hw, link, fw_entry->data, 0);
+ release_firmware(fw_entry);
if (ret) {
printk(KERN_ERR PFX "Primary firmware download failed\n");
return ret;
}
- /* Load secondary firmware */
- ret = spectrum_dl_image(hw, link, secsym);
+ if (request_firmware(&fw_entry, secondary_fw_name,
+ &handle_to_dev(link)) != 0) {
+ printk(KERN_ERR PFX "Cannot find firmware: %s\n",
+ secondary_fw_name);
+ return -ENOENT;
+ }
+ /* Load secondary firmware */
+ ret = spectrum_dl_image(hw, link, fw_entry->data, 1);
+ release_firmware(fw_entry);
if (ret) {
printk(KERN_ERR PFX "Secondary firmware download failed\n");
}
diff --git a/drivers/net/wireless/zd1201.c b/drivers/net/wireless/zd1201.c
index 662ecc8a33ff..c52e9bcf8d02 100644
--- a/drivers/net/wireless/zd1201.c
+++ b/drivers/net/wireless/zd1201.c
@@ -1820,6 +1820,8 @@ static int zd1201_probe(struct usb_interface *interface,
zd->dev->name);
usb_set_intfdata(interface, zd);
+ zd1201_enable(zd); /* zd1201 likes to startup enabled, */
+ zd1201_disable(zd); /* interfering with all the wifis in range */
return 0;
err_net:
diff --git a/drivers/net/wireless/zd1211rw/zd_usb.c b/drivers/net/wireless/zd1211rw/zd_usb.c
index ce1cb2c6aa8d..72f90525bf68 100644
--- a/drivers/net/wireless/zd1211rw/zd_usb.c
+++ b/drivers/net/wireless/zd1211rw/zd_usb.c
@@ -375,10 +375,8 @@ static void int_urb_complete(struct urb *urb, struct pt_regs *pt_regs)
case -ENODEV:
case -ENOENT:
case -ECONNRESET:
- goto kfree;
case -EPIPE:
- usb_clear_halt(urb->dev, EP_INT_IN);
- /* FALL-THROUGH */
+ goto kfree;
default:
goto resubmit;
}
@@ -580,10 +578,8 @@ static void rx_urb_complete(struct urb *urb, struct pt_regs *pt_regs)
case -ENODEV:
case -ENOENT:
case -ECONNRESET:
- return;
case -EPIPE:
- usb_clear_halt(urb->dev, EP_DATA_IN);
- /* FALL-THROUGH */
+ return;
default:
dev_dbg_f(urb_dev(urb), "urb %p error %d\n", urb, urb->status);
goto resubmit;
@@ -749,11 +745,9 @@ static void tx_urb_complete(struct urb *urb, struct pt_regs *pt_regs)
case -ENODEV:
case -ENOENT:
case -ECONNRESET:
+ case -EPIPE:
dev_dbg_f(urb_dev(urb), "urb %p error %d\n", urb, urb->status);
break;
- case -EPIPE:
- usb_clear_halt(urb->dev, EP_DATA_OUT);
- /* FALL-THROUGH */
default:
dev_dbg_f(urb_dev(urb), "urb %p error %d\n", urb, urb->status);
goto resubmit;