summaryrefslogtreecommitdiff
path: root/drivers/net
diff options
context:
space:
mode:
authorFugang Duan <B38611@freescale.com>2012-04-13 16:57:10 +0800
committerJason Liu <r64343@freescale.com>2012-07-20 13:36:30 +0800
commitf3641201d5ee3abc7c1b284ed74c1c3c35e5253d (patch)
tree20f9349f220666aa11ccdafcb02613560df66f22 /drivers/net
parent7ac5566074e426d33c1018a544c96efa570b57ce (diff)
ENGR00179636-01 - FEC : Enet RX FIFO overruns issue.
- Add NAPI methods. NAPI can improve the performance of high-speed networking, which can reduce the cpu loading of interrupt generate and drop packets. - Enet RX FIFO overruns number has been reduced by NAPI method. Signed-off-by: Fugang Duan <B38611@freescale.com>
Diffstat (limited to 'drivers/net')
-rw-r--r--drivers/net/Kconfig7
-rwxr-xr-xdrivers/net/fec.c204
2 files changed, 206 insertions, 5 deletions
diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig
index a6ccb2243b57..8e799247cd48 100644
--- a/drivers/net/Kconfig
+++ b/drivers/net/Kconfig
@@ -1951,6 +1951,13 @@ config FEC
Say Y here if you want to use the built-in 10/100 Fast ethernet
controller on some Motorola ColdFire and Freescale i.MX processors.
+config FEC_NAPI
+ bool "Use NAPI for FEC driver"
+ depends on FEC
+ help
+ This option enables NAPI support for the FEC's on-chip
+ Fast Ethernet Controller driver.
+
config FEC_1588
bool "Enable FEC 1588 timestamping"
depends on FEC
diff --git a/drivers/net/fec.c b/drivers/net/fec.c
index 2078ef8d25f0..a93ac31e690f 100755
--- a/drivers/net/fec.c
+++ b/drivers/net/fec.c
@@ -230,10 +230,22 @@ struct fec_enet_private {
struct fec_ptp_private *ptp_priv;
uint ptimer_present;
+
+ struct napi_struct napi;
+ int napi_weight;
+ bool use_napi;
};
+#define FEC_NAPI_WEIGHT 64
+#ifdef CONFIG_FEC_NAPI
+#define FEC_NAPI_ENABLE TRUE
+#else
+#define FEC_NAPI_ENABLE FALSE
+#endif
+
static irqreturn_t fec_enet_interrupt(int irq, void * dev_id);
static void fec_enet_tx(struct net_device *dev);
+static int fec_rx_poll(struct napi_struct *napi, int budget);
static void fec_enet_rx(struct net_device *dev);
static int fec_enet_close(struct net_device *dev);
static void fec_restart(struct net_device *dev, int duplex);
@@ -390,6 +402,29 @@ fec_timeout(struct net_device *ndev)
}
static void
+fec_rx_int_is_enabled(struct net_device *ndev, bool enabled)
+{
+ struct fec_enet_private *fep = netdev_priv(ndev);
+ uint int_events;
+
+ int_events = readl(fep->hwp + FEC_IMASK);
+ if (enabled)
+ int_events |= FEC_ENET_RXF;
+ else
+ int_events &= ~FEC_ENET_RXF;
+ writel(int_events, fep->hwp + FEC_IMASK);
+}
+
+#ifdef CONFIG_NET_POLL_CONTROLLER
+static void fec_enet_netpoll(struct net_device *ndev)
+{
+ disable_irq(ndev->irq);
+ fec_enet_interrupt(ndev->irq, ndev);
+ enable_irq(ndev->irq);
+}
+#endif
+
+static void
fec_enet_tx(struct net_device *ndev)
{
struct fec_enet_private *fep;
@@ -478,6 +513,144 @@ fec_enet_tx(struct net_device *ndev)
spin_unlock(&fep->hw_lock);
}
+/*NAPI polling Receive packets */
+static int fec_rx_poll(struct napi_struct *napi, int budget)
+{
+ struct fec_enet_private *fep =
+ container_of(napi, struct fec_enet_private, napi);
+ struct net_device *ndev = napi->dev;
+ struct fec_ptp_private *fpp = fep->ptp_priv;
+ const struct platform_device_id *id_entry =
+ platform_get_device_id(fep->pdev);
+ int pkt_received = 0;
+ struct bufdesc *bdp;
+ unsigned short status;
+ struct sk_buff *skb;
+ ushort pkt_len;
+ __u8 *data;
+
+ if (fep->use_napi)
+ WARN_ON(!budget);
+
+#ifdef CONFIG_M532x
+ flush_cache_all();
+#endif
+
+ /* First, grab all of the stats for the incoming packet.
+ * These get messed up if we get called due to a busy condition.
+ */
+ bdp = fep->cur_rx;
+
+ while (!((status = bdp->cbd_sc) & BD_ENET_RX_EMPTY)) {
+ if (pkt_received >= budget)
+ break;
+ pkt_received++;
+
+ /* Since we have allocated space to hold a complete frame,
+ * the last indicator should be set.
+ */
+ if ((status & BD_ENET_RX_LAST) == 0)
+ dev_err(&ndev->dev, "FEC ENET: rcv is not +last\n");
+
+ if (!fep->opened)
+ goto rx_processing_done;
+
+ /* Check for errors. */
+ if (status & (BD_ENET_RX_LG | BD_ENET_RX_SH | BD_ENET_RX_NO |
+ BD_ENET_RX_CR | BD_ENET_RX_OV)) {
+ ndev->stats.rx_errors++;
+ if (status & (BD_ENET_RX_LG | BD_ENET_RX_SH)) {
+ /* Frame too long or too short. */
+ ndev->stats.rx_length_errors++;
+ }
+ if (status & BD_ENET_RX_NO) /* Frame alignment */
+ ndev->stats.rx_frame_errors++;
+ if (status & BD_ENET_RX_CR) /* CRC Error */
+ ndev->stats.rx_crc_errors++;
+ if (status & BD_ENET_RX_OV) /* FIFO overrun */
+ ndev->stats.rx_fifo_errors++;
+ }
+
+ /* Report late collisions as a frame error.
+ * On this error, the BD is closed, but we don't know what we
+ * have in the buffer. So, just drop this frame on the floor.
+ */
+ if (status & BD_ENET_RX_CL) {
+ ndev->stats.rx_errors++;
+ ndev->stats.rx_frame_errors++;
+ goto rx_processing_done;
+ }
+
+ /* Process the incoming frame. */
+ ndev->stats.rx_packets++;
+ pkt_len = bdp->cbd_datlen;
+ ndev->stats.rx_bytes += pkt_len;
+ data = (__u8 *)__va(bdp->cbd_bufaddr);
+
+ if (bdp->cbd_bufaddr)
+ dma_unmap_single(&ndev->dev, bdp->cbd_bufaddr,
+ FEC_ENET_RX_FRSIZE, DMA_FROM_DEVICE);
+
+ if (id_entry->driver_data & FEC_QUIRK_SWAP_FRAME)
+ swap_buffer(data, pkt_len);
+
+ /* This does 16 byte alignment, exactly what we need.
+ * The packet length includes FCS, but we don't want to
+ * include that when passing upstream as it messes up
+ * bridging applications.
+ */
+ skb = dev_alloc_skb(pkt_len - 4 + NET_IP_ALIGN);
+
+ if (unlikely(!skb)) {
+ dev_err(&ndev->dev,
+ "%s: Memory squeeze, dropping packet.\n", ndev->name);
+ ndev->stats.rx_dropped++;
+ } else {
+ skb_reserve(skb, NET_IP_ALIGN);
+ skb_put(skb, pkt_len - 4); /* Make room */
+ skb_copy_to_linear_data(skb, data, pkt_len - 4);
+ /* 1588 messeage TS handle */
+ if (fep->ptimer_present)
+ fec_ptp_store_rxstamp(fpp, skb, bdp);
+ skb->protocol = eth_type_trans(skb, ndev);
+ netif_receive_skb(skb);
+ }
+
+ bdp->cbd_bufaddr = dma_map_single(&ndev->dev, data,
+ FEC_ENET_RX_FRSIZE, DMA_FROM_DEVICE);
+rx_processing_done:
+ /* Clear the status flags for this buffer */
+ status &= ~BD_ENET_RX_STATS;
+
+ /* Mark the buffer empty */
+ status |= BD_ENET_RX_EMPTY;
+ bdp->cbd_sc = status;
+#ifdef CONFIG_ENHANCED_BD
+ bdp->cbd_esc = BD_ENET_RX_INT;
+ bdp->cbd_prot = 0;
+ bdp->cbd_bdu = 0;
+#endif
+
+ /* Update BD pointer to next entry */
+ if (status & BD_ENET_RX_WRAP)
+ bdp = fep->rx_bd_base;
+ else
+ bdp++;
+ /* Doing this here will keep the FEC running while we process
+ * incoming frames. On a heavily loaded network, we should be
+ * able to keep up at the expense of system resources.
+ */
+ writel(0, fep->hwp + FEC_R_DES_ACTIVE);
+ }
+ fep->cur_rx = bdp;
+
+ if (pkt_received < budget) {
+ napi_complete(napi);
+ fec_rx_int_is_enabled(ndev, true);
+ }
+
+ return pkt_received;
+}
/* During a receive, the cur_rx points to the current incoming buffer.
* When we update through the ring, if the next incoming buffer has
@@ -501,8 +674,6 @@ fec_enet_rx(struct net_device *ndev)
flush_cache_all();
#endif
- spin_lock(&fep->hw_lock);
-
/* First, grab all of the stats for the incoming packet.
* These get messed up if we get called due to a busy condition.
*/
@@ -607,8 +778,6 @@ rx_processing_done:
writel(0, fep->hwp + FEC_R_DES_ACTIVE);
}
fep->cur_rx = bdp;
-
- spin_unlock(&fep->hw_lock);
}
static irqreturn_t
@@ -618,6 +787,7 @@ fec_enet_interrupt(int irq, void *dev_id)
struct fec_enet_private *fep = netdev_priv(ndev);
struct fec_ptp_private *fpp = fep->ptp_priv;
uint int_events;
+ ulong flags;
irqreturn_t ret = IRQ_NONE;
do {
@@ -626,7 +796,18 @@ fec_enet_interrupt(int irq, void *dev_id)
if (int_events & FEC_ENET_RXF) {
ret = IRQ_HANDLED;
- fec_enet_rx(ndev);
+ spin_lock_irqsave(&fep->hw_lock, flags);
+
+ if (fep->use_napi) {
+ /* Disable the RX interrupt */
+ if (napi_schedule_prep(&fep->napi)) {
+ fec_rx_int_is_enabled(ndev, false);
+ __napi_schedule(&fep->napi);
+ }
+ } else
+ fec_enet_rx(ndev);
+
+ spin_unlock_irqrestore(&fep->hw_lock, flags);
}
/* Transmit OK, or non-fatal error. Update the buffer
@@ -1106,6 +1287,9 @@ fec_enet_open(struct net_device *ndev)
struct fec_platform_data *pdata = fep->pdev->dev.platform_data;
int ret;
+ if (fep->use_napi)
+ napi_enable(&fep->napi);
+
/* I should reset the ring buffers here, but I don't yet know
* a simple way to do that.
*/
@@ -1266,6 +1450,9 @@ static const struct net_device_ops fec_netdev_ops = {
.ndo_tx_timeout = fec_timeout,
.ndo_set_mac_address = fec_set_mac_address,
.ndo_do_ioctl = fec_enet_ioctl,
+#ifdef CONFIG_NET_POLL_CONTROLLER
+ .ndo_poll_controller = fec_enet_netpoll,
+#endif
};
/*
@@ -1303,6 +1490,13 @@ static int fec_enet_init(struct net_device *ndev)
ndev->netdev_ops = &fec_netdev_ops;
ndev->ethtool_ops = &fec_enet_ethtool_ops;
+ fep->use_napi = FEC_NAPI_ENABLE;
+ fep->napi_weight = FEC_NAPI_WEIGHT;
+ if (fep->use_napi) {
+ fec_rx_int_is_enabled(ndev, false);
+ netif_napi_add(ndev, &fep->napi, fec_rx_poll, fep->napi_weight);
+ }
+
/* Initialize the receive buffer descriptors. */
bdp = fep->rx_bd_base;
for (i = 0; i < RX_RING_SIZE; i++) {