diff options
Diffstat (limited to 'drivers/net/gianfar.c')
-rw-r--r-- | drivers/net/gianfar.c | 1826 |
1 files changed, 1254 insertions, 572 deletions
diff --git a/drivers/net/gianfar.c b/drivers/net/gianfar.c index 5bf31f1509c9..16def131c390 100644 --- a/drivers/net/gianfar.c +++ b/drivers/net/gianfar.c @@ -8,9 +8,10 @@ * * Author: Andy Fleming * Maintainer: Kumar Gala + * Modifier: Sandeep Gopalpet <sandeep.kumar@freescale.com> * - * Copyright (c) 2002-2006 Freescale Semiconductor, Inc. - * Copyright (c) 2007 MontaVista Software, Inc. + * Copyright 2002-2009 Freescale Semiconductor, Inc. + * Copyright 2007 MontaVista Software, Inc. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the @@ -109,7 +110,7 @@ static void gfar_reset_task(struct work_struct *work); static void gfar_timeout(struct net_device *dev); static int gfar_close(struct net_device *dev); struct sk_buff *gfar_new_skb(struct net_device *dev); -static void gfar_new_rxbdp(struct net_device *dev, struct rxbd8 *bdp, +static void gfar_new_rxbdp(struct gfar_priv_rx_q *rx_queue, struct rxbd8 *bdp, struct sk_buff *skb); static int gfar_set_mac_address(struct net_device *dev); static int gfar_change_mtu(struct net_device *dev, int new_mtu); @@ -130,8 +131,8 @@ static int gfar_poll(struct napi_struct *napi, int budget); #ifdef CONFIG_NET_POLL_CONTROLLER static void gfar_netpoll(struct net_device *dev); #endif -int gfar_clean_rx_ring(struct net_device *dev, int rx_work_limit); -static int gfar_clean_tx_ring(struct net_device *dev); +int gfar_clean_rx_ring(struct gfar_priv_rx_q *rx_queue, int rx_work_limit); +static int gfar_clean_tx_ring(struct gfar_priv_tx_q *tx_queue); static int gfar_process_frame(struct net_device *dev, struct sk_buff *skb, int amount_pull); static void gfar_vlan_rx_register(struct net_device *netdev, @@ -142,11 +143,277 @@ void gfar_start(struct net_device *dev); static void gfar_clear_exact_match(struct net_device *dev); static void gfar_set_mac_for_addr(struct net_device *dev, int num, u8 *addr); static int gfar_ioctl(struct net_device *dev, struct ifreq *rq, int cmd); +u16 gfar_select_queue(struct net_device *dev, struct sk_buff *skb); MODULE_AUTHOR("Freescale Semiconductor, Inc"); MODULE_DESCRIPTION("Gianfar Ethernet Driver"); MODULE_LICENSE("GPL"); +static void gfar_init_rxbdp(struct gfar_priv_rx_q *rx_queue, struct rxbd8 *bdp, + dma_addr_t buf) +{ + u32 lstatus; + + bdp->bufPtr = buf; + + lstatus = BD_LFLAG(RXBD_EMPTY | RXBD_INTERRUPT); + if (bdp == rx_queue->rx_bd_base + rx_queue->rx_ring_size - 1) + lstatus |= BD_LFLAG(RXBD_WRAP); + + eieio(); + + bdp->lstatus = lstatus; +} + +static int gfar_init_bds(struct net_device *ndev) +{ + struct gfar_private *priv = netdev_priv(ndev); + struct gfar_priv_tx_q *tx_queue = NULL; + struct gfar_priv_rx_q *rx_queue = NULL; + struct txbd8 *txbdp; + struct rxbd8 *rxbdp; + int i, j; + + for (i = 0; i < priv->num_tx_queues; i++) { + tx_queue = priv->tx_queue[i]; + /* Initialize some variables in our dev structure */ + tx_queue->num_txbdfree = tx_queue->tx_ring_size; + tx_queue->dirty_tx = tx_queue->tx_bd_base; + tx_queue->cur_tx = tx_queue->tx_bd_base; + tx_queue->skb_curtx = 0; + tx_queue->skb_dirtytx = 0; + + /* Initialize Transmit Descriptor Ring */ + txbdp = tx_queue->tx_bd_base; + for (j = 0; j < tx_queue->tx_ring_size; j++) { + txbdp->lstatus = 0; + txbdp->bufPtr = 0; + txbdp++; + } + + /* Set the last descriptor in the ring to indicate wrap */ + txbdp--; + txbdp->status |= TXBD_WRAP; + } + + for (i = 0; i < priv->num_rx_queues; i++) { + rx_queue = priv->rx_queue[i]; + rx_queue->cur_rx = rx_queue->rx_bd_base; + rx_queue->skb_currx = 0; + rxbdp = rx_queue->rx_bd_base; + + for (j = 0; j < rx_queue->rx_ring_size; j++) { + struct sk_buff *skb = rx_queue->rx_skbuff[j]; + + if (skb) { + gfar_init_rxbdp(rx_queue, rxbdp, + rxbdp->bufPtr); + } else { + skb = gfar_new_skb(ndev); + if (!skb) { + pr_err("%s: Can't allocate RX buffers\n", + ndev->name); + goto err_rxalloc_fail; + } + rx_queue->rx_skbuff[j] = skb; + + gfar_new_rxbdp(rx_queue, rxbdp, skb); + } + + rxbdp++; + } + + } + + return 0; + +err_rxalloc_fail: + free_skb_resources(priv); + return -ENOMEM; +} + +static int gfar_alloc_skb_resources(struct net_device *ndev) +{ + void *vaddr; + dma_addr_t addr; + int i, j, k; + struct gfar_private *priv = netdev_priv(ndev); + struct device *dev = &priv->ofdev->dev; + struct gfar_priv_tx_q *tx_queue = NULL; + struct gfar_priv_rx_q *rx_queue = NULL; + + priv->total_tx_ring_size = 0; + for (i = 0; i < priv->num_tx_queues; i++) + priv->total_tx_ring_size += priv->tx_queue[i]->tx_ring_size; + + priv->total_rx_ring_size = 0; + for (i = 0; i < priv->num_rx_queues; i++) + priv->total_rx_ring_size += priv->rx_queue[i]->rx_ring_size; + + /* Allocate memory for the buffer descriptors */ + vaddr = dma_alloc_coherent(dev, + sizeof(struct txbd8) * priv->total_tx_ring_size + + sizeof(struct rxbd8) * priv->total_rx_ring_size, + &addr, GFP_KERNEL); + if (!vaddr) { + if (netif_msg_ifup(priv)) + pr_err("%s: Could not allocate buffer descriptors!\n", + ndev->name); + return -ENOMEM; + } + + for (i = 0; i < priv->num_tx_queues; i++) { + tx_queue = priv->tx_queue[i]; + tx_queue->tx_bd_base = (struct txbd8 *) vaddr; + tx_queue->tx_bd_dma_base = addr; + tx_queue->dev = ndev; + /* enet DMA only understands physical addresses */ + addr += sizeof(struct txbd8) *tx_queue->tx_ring_size; + vaddr += sizeof(struct txbd8) *tx_queue->tx_ring_size; + } + + /* Start the rx descriptor ring where the tx ring leaves off */ + for (i = 0; i < priv->num_rx_queues; i++) { + rx_queue = priv->rx_queue[i]; + rx_queue->rx_bd_base = (struct rxbd8 *) vaddr; + rx_queue->rx_bd_dma_base = addr; + rx_queue->dev = ndev; + addr += sizeof (struct rxbd8) * rx_queue->rx_ring_size; + vaddr += sizeof (struct rxbd8) * rx_queue->rx_ring_size; + } + + /* Setup the skbuff rings */ + for (i = 0; i < priv->num_tx_queues; i++) { + tx_queue = priv->tx_queue[i]; + tx_queue->tx_skbuff = kmalloc(sizeof(*tx_queue->tx_skbuff) * + tx_queue->tx_ring_size, GFP_KERNEL); + if (!tx_queue->tx_skbuff) { + if (netif_msg_ifup(priv)) + pr_err("%s: Could not allocate tx_skbuff\n", + ndev->name); + goto cleanup; + } + + for (k = 0; k < tx_queue->tx_ring_size; k++) + tx_queue->tx_skbuff[k] = NULL; + } + + for (i = 0; i < priv->num_rx_queues; i++) { + rx_queue = priv->rx_queue[i]; + rx_queue->rx_skbuff = kmalloc(sizeof(*rx_queue->rx_skbuff) * + rx_queue->rx_ring_size, GFP_KERNEL); + + if (!rx_queue->rx_skbuff) { + if (netif_msg_ifup(priv)) + pr_err("%s: Could not allocate rx_skbuff\n", + ndev->name); + goto cleanup; + } + + for (j = 0; j < rx_queue->rx_ring_size; j++) + rx_queue->rx_skbuff[j] = NULL; + } + + if (gfar_init_bds(ndev)) + goto cleanup; + + return 0; + +cleanup: + free_skb_resources(priv); + return -ENOMEM; +} + +static void gfar_init_tx_rx_base(struct gfar_private *priv) +{ + struct gfar __iomem *regs = priv->gfargrp[0].regs; + u32 __iomem *baddr; + int i; + + baddr = ®s->tbase0; + for(i = 0; i < priv->num_tx_queues; i++) { + gfar_write(baddr, priv->tx_queue[i]->tx_bd_dma_base); + baddr += 2; + } + + baddr = ®s->rbase0; + for(i = 0; i < priv->num_rx_queues; i++) { + gfar_write(baddr, priv->rx_queue[i]->rx_bd_dma_base); + baddr += 2; + } +} + +static void gfar_init_mac(struct net_device *ndev) +{ + struct gfar_private *priv = netdev_priv(ndev); + struct gfar __iomem *regs = priv->gfargrp[0].regs; + u32 rctrl = 0; + u32 tctrl = 0; + u32 attrs = 0; + + /* write the tx/rx base registers */ + gfar_init_tx_rx_base(priv); + + /* Configure the coalescing support */ + gfar_configure_coalescing(priv, 0xFF, 0xFF); + + if (priv->rx_filer_enable) + rctrl |= RCTRL_FILREN; + + if (priv->rx_csum_enable) + rctrl |= RCTRL_CHECKSUMMING; + + if (priv->extended_hash) { + rctrl |= RCTRL_EXTHASH; + + gfar_clear_exact_match(ndev); + rctrl |= RCTRL_EMEN; + } + + if (priv->padding) { + rctrl &= ~RCTRL_PAL_MASK; + rctrl |= RCTRL_PADDING(priv->padding); + } + + /* keep vlan related bits if it's enabled */ + if (priv->vlgrp) { + rctrl |= RCTRL_VLEX | RCTRL_PRSDEP_INIT; + tctrl |= TCTRL_VLINS; + } + + /* Init rctrl based on our settings */ + gfar_write(®s->rctrl, rctrl); + + if (ndev->features & NETIF_F_IP_CSUM) + tctrl |= TCTRL_INIT_CSUM; + + tctrl |= TCTRL_TXSCHED_PRIO; + + gfar_write(®s->tctrl, tctrl); + + /* Set the extraction length and index */ + attrs = ATTRELI_EL(priv->rx_stash_size) | + ATTRELI_EI(priv->rx_stash_index); + + gfar_write(®s->attreli, attrs); + + /* Start with defaults, and add stashing or locking + * depending on the approprate variables */ + attrs = ATTR_INIT_SETTINGS; + + if (priv->bd_stash_en) + attrs |= ATTR_BDSTASH; + + if (priv->rx_stash_size != 0) + attrs |= ATTR_BUFSTASH; + + gfar_write(®s->attr, attrs); + + gfar_write(®s->fifo_tx_thr, priv->fifo_threshold); + gfar_write(®s->fifo_tx_starve, priv->fifo_starve); + gfar_write(®s->fifo_tx_starve_shutoff, priv->fifo_starve_off); +} + static const struct net_device_ops gfar_netdev_ops = { .ndo_open = gfar_enet_open, .ndo_start_xmit = gfar_start_xmit, @@ -155,6 +422,7 @@ static const struct net_device_ops gfar_netdev_ops = { .ndo_set_multicast_list = gfar_set_multi, .ndo_tx_timeout = gfar_timeout, .ndo_do_ioctl = gfar_ioctl, + .ndo_select_queue = gfar_select_queue, .ndo_vlan_rx_register = gfar_vlan_rx_register, .ndo_set_mac_address = eth_mac_addr, .ndo_validate_addr = eth_validate_addr, @@ -163,56 +431,252 @@ static const struct net_device_ops gfar_netdev_ops = { #endif }; +unsigned int ftp_rqfpr[MAX_FILER_IDX + 1]; +unsigned int ftp_rqfcr[MAX_FILER_IDX + 1]; + +void lock_rx_qs(struct gfar_private *priv) +{ + int i = 0x0; + + for (i = 0; i < priv->num_rx_queues; i++) + spin_lock(&priv->rx_queue[i]->rxlock); +} + +void lock_tx_qs(struct gfar_private *priv) +{ + int i = 0x0; + + for (i = 0; i < priv->num_tx_queues; i++) + spin_lock(&priv->tx_queue[i]->txlock); +} + +void unlock_rx_qs(struct gfar_private *priv) +{ + int i = 0x0; + + for (i = 0; i < priv->num_rx_queues; i++) + spin_unlock(&priv->rx_queue[i]->rxlock); +} + +void unlock_tx_qs(struct gfar_private *priv) +{ + int i = 0x0; + + for (i = 0; i < priv->num_tx_queues; i++) + spin_unlock(&priv->tx_queue[i]->txlock); +} + /* Returns 1 if incoming frames use an FCB */ static inline int gfar_uses_fcb(struct gfar_private *priv) { return priv->vlgrp || priv->rx_csum_enable; } -static int gfar_of_init(struct net_device *dev) +u16 gfar_select_queue(struct net_device *dev, struct sk_buff *skb) +{ + return skb_get_queue_mapping(skb); +} +static void free_tx_pointers(struct gfar_private *priv) +{ + int i = 0; + + for (i = 0; i < priv->num_tx_queues; i++) + kfree(priv->tx_queue[i]); +} + +static void free_rx_pointers(struct gfar_private *priv) +{ + int i = 0; + + for (i = 0; i < priv->num_rx_queues; i++) + kfree(priv->rx_queue[i]); +} + +static void unmap_group_regs(struct gfar_private *priv) +{ + int i = 0; + + for (i = 0; i < MAXGROUPS; i++) + if (priv->gfargrp[i].regs) + iounmap(priv->gfargrp[i].regs); +} + +static void disable_napi(struct gfar_private *priv) +{ + int i = 0; + + for (i = 0; i < priv->num_grps; i++) + napi_disable(&priv->gfargrp[i].napi); +} + +static void enable_napi(struct gfar_private *priv) +{ + int i = 0; + + for (i = 0; i < priv->num_grps; i++) + napi_enable(&priv->gfargrp[i].napi); +} + +static int gfar_parse_group(struct device_node *np, + struct gfar_private *priv, const char *model) +{ + u32 *queue_mask; + u64 addr, size; + + addr = of_translate_address(np, + of_get_address(np, 0, &size, NULL)); + priv->gfargrp[priv->num_grps].regs = ioremap(addr, size); + + if (!priv->gfargrp[priv->num_grps].regs) + return -ENOMEM; + + priv->gfargrp[priv->num_grps].interruptTransmit = + irq_of_parse_and_map(np, 0); + + /* If we aren't the FEC we have multiple interrupts */ + if (model && strcasecmp(model, "FEC")) { + priv->gfargrp[priv->num_grps].interruptReceive = + irq_of_parse_and_map(np, 1); + priv->gfargrp[priv->num_grps].interruptError = + irq_of_parse_and_map(np,2); + if (priv->gfargrp[priv->num_grps].interruptTransmit < 0 || + priv->gfargrp[priv->num_grps].interruptReceive < 0 || + priv->gfargrp[priv->num_grps].interruptError < 0) { + return -EINVAL; + } + } + + priv->gfargrp[priv->num_grps].grp_id = priv->num_grps; + priv->gfargrp[priv->num_grps].priv = priv; + spin_lock_init(&priv->gfargrp[priv->num_grps].grplock); + if(priv->mode == MQ_MG_MODE) { + queue_mask = (u32 *)of_get_property(np, + "fsl,rx-bit-map", NULL); + priv->gfargrp[priv->num_grps].rx_bit_map = + queue_mask ? *queue_mask :(DEFAULT_MAPPING >> priv->num_grps); + queue_mask = (u32 *)of_get_property(np, + "fsl,tx-bit-map", NULL); + priv->gfargrp[priv->num_grps].tx_bit_map = + queue_mask ? *queue_mask : (DEFAULT_MAPPING >> priv->num_grps); + } else { + priv->gfargrp[priv->num_grps].rx_bit_map = 0xFF; + priv->gfargrp[priv->num_grps].tx_bit_map = 0xFF; + } + priv->num_grps++; + + return 0; +} + +static int gfar_of_init(struct of_device *ofdev, struct net_device **pdev) { const char *model; const char *ctype; const void *mac_addr; - u64 addr, size; - int err = 0; - struct gfar_private *priv = netdev_priv(dev); - struct device_node *np = priv->node; + int err = 0, i; + struct net_device *dev = NULL; + struct gfar_private *priv = NULL; + struct device_node *np = ofdev->node; + struct device_node *child = NULL; const u32 *stash; const u32 *stash_len; const u32 *stash_idx; + unsigned int num_tx_qs, num_rx_qs; + u32 *tx_queues, *rx_queues; if (!np || !of_device_is_available(np)) return -ENODEV; - /* get a pointer to the register memory */ - addr = of_translate_address(np, of_get_address(np, 0, &size, NULL)); - priv->regs = ioremap(addr, size); + /* parse the num of tx and rx queues */ + tx_queues = (u32 *)of_get_property(np, "fsl,num_tx_queues", NULL); + num_tx_qs = tx_queues ? *tx_queues : 1; + + if (num_tx_qs > MAX_TX_QS) { + printk(KERN_ERR "num_tx_qs(=%d) greater than MAX_TX_QS(=%d)\n", + num_tx_qs, MAX_TX_QS); + printk(KERN_ERR "Cannot do alloc_etherdev, aborting\n"); + return -EINVAL; + } + + rx_queues = (u32 *)of_get_property(np, "fsl,num_rx_queues", NULL); + num_rx_qs = rx_queues ? *rx_queues : 1; - if (priv->regs == NULL) + if (num_rx_qs > MAX_RX_QS) { + printk(KERN_ERR "num_rx_qs(=%d) greater than MAX_RX_QS(=%d)\n", + num_tx_qs, MAX_TX_QS); + printk(KERN_ERR "Cannot do alloc_etherdev, aborting\n"); + return -EINVAL; + } + + *pdev = alloc_etherdev_mq(sizeof(*priv), num_tx_qs); + dev = *pdev; + if (NULL == dev) return -ENOMEM; - priv->interruptTransmit = irq_of_parse_and_map(np, 0); + priv = netdev_priv(dev); + priv->node = ofdev->node; + priv->ndev = dev; + + dev->num_tx_queues = num_tx_qs; + dev->real_num_tx_queues = num_tx_qs; + priv->num_tx_queues = num_tx_qs; + priv->num_rx_queues = num_rx_qs; + priv->num_grps = 0x0; model = of_get_property(np, "model", NULL); - /* If we aren't the FEC we have multiple interrupts */ - if (model && strcasecmp(model, "FEC")) { - priv->interruptReceive = irq_of_parse_and_map(np, 1); + for (i = 0; i < MAXGROUPS; i++) + priv->gfargrp[i].regs = NULL; + + /* Parse and initialize group specific information */ + if (of_device_is_compatible(np, "fsl,etsec2")) { + priv->mode = MQ_MG_MODE; + for_each_child_of_node(np, child) { + err = gfar_parse_group(child, priv, model); + if (err) + goto err_grp_init; + } + } else { + priv->mode = SQ_SG_MODE; + err = gfar_parse_group(np, priv, model); + if(err) + goto err_grp_init; + } - priv->interruptError = irq_of_parse_and_map(np, 2); + for (i = 0; i < priv->num_tx_queues; i++) + priv->tx_queue[i] = NULL; + for (i = 0; i < priv->num_rx_queues; i++) + priv->rx_queue[i] = NULL; + + for (i = 0; i < priv->num_tx_queues; i++) { + priv->tx_queue[i] = (struct gfar_priv_tx_q *)kmalloc( + sizeof (struct gfar_priv_tx_q), GFP_KERNEL); + if (!priv->tx_queue[i]) { + err = -ENOMEM; + goto tx_alloc_failed; + } + priv->tx_queue[i]->tx_skbuff = NULL; + priv->tx_queue[i]->qindex = i; + priv->tx_queue[i]->dev = dev; + spin_lock_init(&(priv->tx_queue[i]->txlock)); + } - if (priv->interruptTransmit < 0 || - priv->interruptReceive < 0 || - priv->interruptError < 0) { - err = -EINVAL; - goto err_out; + for (i = 0; i < priv->num_rx_queues; i++) { + priv->rx_queue[i] = (struct gfar_priv_rx_q *)kmalloc( + sizeof (struct gfar_priv_rx_q), GFP_KERNEL); + if (!priv->rx_queue[i]) { + err = -ENOMEM; + goto rx_alloc_failed; } + priv->rx_queue[i]->rx_skbuff = NULL; + priv->rx_queue[i]->qindex = i; + priv->rx_queue[i]->dev = dev; + spin_lock_init(&(priv->rx_queue[i]->rxlock)); } + stash = of_get_property(np, "bd-stash", NULL); - if(stash) { + if (stash) { priv->device_flags |= FSL_GIANFAR_DEV_HAS_BD_STASHING; priv->bd_stash_en = 1; } @@ -270,8 +734,13 @@ static int gfar_of_init(struct net_device *dev) return 0; -err_out: - iounmap(priv->regs); +rx_alloc_failed: + free_rx_pointers(priv); +tx_alloc_failed: + free_tx_pointers(priv); +err_grp_init: + unmap_group_regs(priv); + free_netdev(dev); return err; } @@ -289,6 +758,85 @@ static int gfar_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) return phy_mii_ioctl(priv->phydev, if_mii(rq), cmd); } +static unsigned int reverse_bitmap(unsigned int bit_map, unsigned int max_qs) +{ + unsigned int new_bit_map = 0x0; + int mask = 0x1 << (max_qs - 1), i; + for (i = 0; i < max_qs; i++) { + if (bit_map & mask) + new_bit_map = new_bit_map + (1 << i); + mask = mask >> 0x1; + } + return new_bit_map; +} + +static u32 cluster_entry_per_class(struct gfar_private *priv, u32 rqfar, + u32 class) +{ + u32 rqfpr = FPR_FILER_MASK; + u32 rqfcr = 0x0; + + rqfar--; + rqfcr = RQFCR_CLE | RQFCR_PID_MASK | RQFCR_CMP_EXACT; + ftp_rqfpr[rqfar] = rqfpr; + ftp_rqfcr[rqfar] = rqfcr; + gfar_write_filer(priv, rqfar, rqfcr, rqfpr); + + rqfar--; + rqfcr = RQFCR_CMP_NOMATCH; + ftp_rqfpr[rqfar] = rqfpr; + ftp_rqfcr[rqfar] = rqfcr; + gfar_write_filer(priv, rqfar, rqfcr, rqfpr); + + rqfar--; + rqfcr = RQFCR_CMP_EXACT | RQFCR_PID_PARSE | RQFCR_CLE | RQFCR_AND; + rqfpr = class; + ftp_rqfcr[rqfar] = rqfcr; + ftp_rqfpr[rqfar] = rqfpr; + gfar_write_filer(priv, rqfar, rqfcr, rqfpr); + + rqfar--; + rqfcr = RQFCR_CMP_EXACT | RQFCR_PID_MASK | RQFCR_AND; + rqfpr = class; + ftp_rqfcr[rqfar] = rqfcr; + ftp_rqfpr[rqfar] = rqfpr; + gfar_write_filer(priv, rqfar, rqfcr, rqfpr); + + return rqfar; +} + +static void gfar_init_filer_table(struct gfar_private *priv) +{ + int i = 0x0; + u32 rqfar = MAX_FILER_IDX; + u32 rqfcr = 0x0; + u32 rqfpr = FPR_FILER_MASK; + + /* Default rule */ + rqfcr = RQFCR_CMP_MATCH; + ftp_rqfcr[rqfar] = rqfcr; + ftp_rqfpr[rqfar] = rqfpr; + gfar_write_filer(priv, rqfar, rqfcr, rqfpr); + + rqfar = cluster_entry_per_class(priv, rqfar, RQFPR_IPV6); + rqfar = cluster_entry_per_class(priv, rqfar, RQFPR_IPV6 | RQFPR_UDP); + rqfar = cluster_entry_per_class(priv, rqfar, RQFPR_IPV6 | RQFPR_TCP); + rqfar = cluster_entry_per_class(priv, rqfar, RQFPR_IPV4); + rqfar = cluster_entry_per_class(priv, rqfar, RQFPR_IPV4 | RQFPR_UDP); + rqfar = cluster_entry_per_class(priv, rqfar, RQFPR_IPV4 | RQFPR_TCP); + + /* cur_filer_idx indicated the fisrt non-masked rule */ + priv->cur_filer_idx = rqfar; + + /* Rest are masked rules */ + rqfcr = RQFCR_CMP_NOMATCH; + for (i = 0; i < rqfar; i++) { + ftp_rqfcr[i] = rqfcr; + ftp_rqfpr[i] = rqfpr; + gfar_write_filer(priv, i, rqfcr, rqfpr); + } +} + /* Set up the ethernet device structure, private data, * and anything else we need before we start */ static int gfar_probe(struct of_device *ofdev, @@ -297,14 +845,17 @@ static int gfar_probe(struct of_device *ofdev, u32 tempval; struct net_device *dev = NULL; struct gfar_private *priv = NULL; - int err = 0; + struct gfar __iomem *regs = NULL; + int err = 0, i, grp_idx = 0; int len_devname; + u32 rstat = 0, tstat = 0, rqueue = 0, tqueue = 0; + u32 isrg = 0; + u32 __iomem *baddr; - /* Create an ethernet device instance */ - dev = alloc_etherdev(sizeof (*priv)); + err = gfar_of_init(ofdev, &dev); - if (NULL == dev) - return -ENOMEM; + if (err) + return err; priv = netdev_priv(dev); priv->ndev = dev; @@ -312,50 +863,46 @@ static int gfar_probe(struct of_device *ofdev, priv->node = ofdev->node; SET_NETDEV_DEV(dev, &ofdev->dev); - err = gfar_of_init(dev); - - if (err) - goto regs_fail; - - spin_lock_init(&priv->txlock); - spin_lock_init(&priv->rxlock); spin_lock_init(&priv->bflock); INIT_WORK(&priv->reset_task, gfar_reset_task); dev_set_drvdata(&ofdev->dev, priv); + regs = priv->gfargrp[0].regs; /* Stop the DMA engine now, in case it was running before */ /* (The firmware could have used it, and left it running). */ gfar_halt(dev); /* Reset MAC layer */ - gfar_write(&priv->regs->maccfg1, MACCFG1_SOFT_RESET); + gfar_write(®s->maccfg1, MACCFG1_SOFT_RESET); /* We need to delay at least 3 TX clocks */ udelay(2); tempval = (MACCFG1_TX_FLOW | MACCFG1_RX_FLOW); - gfar_write(&priv->regs->maccfg1, tempval); + gfar_write(®s->maccfg1, tempval); /* Initialize MACCFG2. */ - gfar_write(&priv->regs->maccfg2, MACCFG2_INIT_SETTINGS); + gfar_write(®s->maccfg2, MACCFG2_INIT_SETTINGS); /* Initialize ECNTRL */ - gfar_write(&priv->regs->ecntrl, ECNTRL_INIT_SETTINGS); + gfar_write(®s->ecntrl, ECNTRL_INIT_SETTINGS); /* Set the dev->base_addr to the gfar reg region */ - dev->base_addr = (unsigned long) (priv->regs); + dev->base_addr = (unsigned long) regs; SET_NETDEV_DEV(dev, &ofdev->dev); /* Fill in the dev structure */ dev->watchdog_timeo = TX_TIMEOUT; - netif_napi_add(dev, &priv->napi, gfar_poll, GFAR_DEV_WEIGHT); dev->mtu = 1500; - dev->netdev_ops = &gfar_netdev_ops; dev->ethtool_ops = &gfar_ethtool_ops; + /* Register for napi ...We are registering NAPI for each grp */ + for (i = 0; i < priv->num_grps; i++) + netif_napi_add(dev, &priv->gfargrp[i].napi, gfar_poll, GFAR_DEV_WEIGHT); + if (priv->device_flags & FSL_GIANFAR_DEV_HAS_CSUM) { priv->rx_csum_enable = 1; dev->features |= NETIF_F_IP_CSUM | NETIF_F_SG | NETIF_F_HIGHDMA; @@ -371,35 +918,35 @@ static int gfar_probe(struct of_device *ofdev, priv->extended_hash = 1; priv->hash_width = 9; - priv->hash_regs[0] = &priv->regs->igaddr0; - priv->hash_regs[1] = &priv->regs->igaddr1; - priv->hash_regs[2] = &priv->regs->igaddr2; - priv->hash_regs[3] = &priv->regs->igaddr3; - priv->hash_regs[4] = &priv->regs->igaddr4; - priv->hash_regs[5] = &priv->regs->igaddr5; - priv->hash_regs[6] = &priv->regs->igaddr6; - priv->hash_regs[7] = &priv->regs->igaddr7; - priv->hash_regs[8] = &priv->regs->gaddr0; - priv->hash_regs[9] = &priv->regs->gaddr1; - priv->hash_regs[10] = &priv->regs->gaddr2; - priv->hash_regs[11] = &priv->regs->gaddr3; - priv->hash_regs[12] = &priv->regs->gaddr4; - priv->hash_regs[13] = &priv->regs->gaddr5; - priv->hash_regs[14] = &priv->regs->gaddr6; - priv->hash_regs[15] = &priv->regs->gaddr7; + priv->hash_regs[0] = ®s->igaddr0; + priv->hash_regs[1] = ®s->igaddr1; + priv->hash_regs[2] = ®s->igaddr2; + priv->hash_regs[3] = ®s->igaddr3; + priv->hash_regs[4] = ®s->igaddr4; + priv->hash_regs[5] = ®s->igaddr5; + priv->hash_regs[6] = ®s->igaddr6; + priv->hash_regs[7] = ®s->igaddr7; + priv->hash_regs[8] = ®s->gaddr0; + priv->hash_regs[9] = ®s->gaddr1; + priv->hash_regs[10] = ®s->gaddr2; + priv->hash_regs[11] = ®s->gaddr3; + priv->hash_regs[12] = ®s->gaddr4; + priv->hash_regs[13] = ®s->gaddr5; + priv->hash_regs[14] = ®s->gaddr6; + priv->hash_regs[15] = ®s->gaddr7; } else { priv->extended_hash = 0; priv->hash_width = 8; - priv->hash_regs[0] = &priv->regs->gaddr0; - priv->hash_regs[1] = &priv->regs->gaddr1; - priv->hash_regs[2] = &priv->regs->gaddr2; - priv->hash_regs[3] = &priv->regs->gaddr3; - priv->hash_regs[4] = &priv->regs->gaddr4; - priv->hash_regs[5] = &priv->regs->gaddr5; - priv->hash_regs[6] = &priv->regs->gaddr6; - priv->hash_regs[7] = &priv->regs->gaddr7; + priv->hash_regs[0] = ®s->gaddr0; + priv->hash_regs[1] = ®s->gaddr1; + priv->hash_regs[2] = ®s->gaddr2; + priv->hash_regs[3] = ®s->gaddr3; + priv->hash_regs[4] = ®s->gaddr4; + priv->hash_regs[5] = ®s->gaddr5; + priv->hash_regs[6] = ®s->gaddr6; + priv->hash_regs[7] = ®s->gaddr7; } if (priv->device_flags & FSL_GIANFAR_DEV_HAS_PADDING) @@ -410,15 +957,70 @@ static int gfar_probe(struct of_device *ofdev, if (dev->features & NETIF_F_IP_CSUM) dev->hard_header_len += GMAC_FCB_LEN; + /* Program the isrg regs only if number of grps > 1 */ + if (priv->num_grps > 1) { + baddr = ®s->isrg0; + for (i = 0; i < priv->num_grps; i++) { + isrg |= (priv->gfargrp[i].rx_bit_map << ISRG_SHIFT_RX); + isrg |= (priv->gfargrp[i].tx_bit_map << ISRG_SHIFT_TX); + gfar_write(baddr, isrg); + baddr++; + isrg = 0x0; + } + } + + /* Need to reverse the bit maps as bit_map's MSB is q0 + * but, for_each_bit parses from right to left, which + * basically reverses the queue numbers */ + for (i = 0; i< priv->num_grps; i++) { + priv->gfargrp[i].tx_bit_map = reverse_bitmap( + priv->gfargrp[i].tx_bit_map, MAX_TX_QS); + priv->gfargrp[i].rx_bit_map = reverse_bitmap( + priv->gfargrp[i].rx_bit_map, MAX_RX_QS); + } + + /* Calculate RSTAT, TSTAT, RQUEUE and TQUEUE values, + * also assign queues to groups */ + for (grp_idx = 0; grp_idx < priv->num_grps; grp_idx++) { + priv->gfargrp[grp_idx].num_rx_queues = 0x0; + for_each_bit(i, &priv->gfargrp[grp_idx].rx_bit_map, + priv->num_rx_queues) { + priv->gfargrp[grp_idx].num_rx_queues++; + priv->rx_queue[i]->grp = &priv->gfargrp[grp_idx]; + rstat = rstat | (RSTAT_CLEAR_RHALT >> i); + rqueue = rqueue | ((RQUEUE_EN0 | RQUEUE_EX0) >> i); + } + priv->gfargrp[grp_idx].num_tx_queues = 0x0; + for_each_bit (i, &priv->gfargrp[grp_idx].tx_bit_map, + priv->num_tx_queues) { + priv->gfargrp[grp_idx].num_tx_queues++; + priv->tx_queue[i]->grp = &priv->gfargrp[grp_idx]; + tstat = tstat | (TSTAT_CLEAR_THALT >> i); + tqueue = tqueue | (TQUEUE_EN0 >> i); + } + priv->gfargrp[grp_idx].rstat = rstat; + priv->gfargrp[grp_idx].tstat = tstat; + rstat = tstat =0; + } + + gfar_write(®s->rqueue, rqueue); + gfar_write(®s->tqueue, tqueue); + priv->rx_buffer_size = DEFAULT_RX_BUFFER_SIZE; - priv->tx_ring_size = DEFAULT_TX_RING_SIZE; - priv->rx_ring_size = DEFAULT_RX_RING_SIZE; - priv->num_txbdfree = DEFAULT_TX_RING_SIZE; - priv->txcoalescing = DEFAULT_TX_COALESCE; - priv->txic = DEFAULT_TXIC; - priv->rxcoalescing = DEFAULT_RX_COALESCE; - priv->rxic = DEFAULT_RXIC; + /* Initializing some of the rx/tx queue level parameters */ + for (i = 0; i < priv->num_tx_queues; i++) { + priv->tx_queue[i]->tx_ring_size = DEFAULT_TX_RING_SIZE; + priv->tx_queue[i]->num_txbdfree = DEFAULT_TX_RING_SIZE; + priv->tx_queue[i]->txcoalescing = DEFAULT_TX_COALESCE; + priv->tx_queue[i]->txic = DEFAULT_TXIC; + } + + for (i = 0; i < priv->num_rx_queues; i++) { + priv->rx_queue[i]->rx_ring_size = DEFAULT_RX_RING_SIZE; + priv->rx_queue[i]->rxcoalescing = DEFAULT_RX_COALESCE; + priv->rx_queue[i]->rxic = DEFAULT_RXIC; + } /* Enable most messages by default */ priv->msg_enable = (NETIF_MSG_IFUP << 1 ) - 1; @@ -439,20 +1041,43 @@ static int gfar_probe(struct of_device *ofdev, /* fill out IRQ number and name fields */ len_devname = strlen(dev->name); - strncpy(&priv->int_name_tx[0], dev->name, len_devname); - if (priv->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) { - strncpy(&priv->int_name_tx[len_devname], - "_tx", sizeof("_tx") + 1); - - strncpy(&priv->int_name_rx[0], dev->name, len_devname); - strncpy(&priv->int_name_rx[len_devname], - "_rx", sizeof("_rx") + 1); + for (i = 0; i < priv->num_grps; i++) { + strncpy(&priv->gfargrp[i].int_name_tx[0], dev->name, + len_devname); + if (priv->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) { + strncpy(&priv->gfargrp[i].int_name_tx[len_devname], + "_g", sizeof("_g")); + priv->gfargrp[i].int_name_tx[ + strlen(priv->gfargrp[i].int_name_tx)] = i+48; + strncpy(&priv->gfargrp[i].int_name_tx[strlen( + priv->gfargrp[i].int_name_tx)], + "_tx", sizeof("_tx") + 1); + + strncpy(&priv->gfargrp[i].int_name_rx[0], dev->name, + len_devname); + strncpy(&priv->gfargrp[i].int_name_rx[len_devname], + "_g", sizeof("_g")); + priv->gfargrp[i].int_name_rx[ + strlen(priv->gfargrp[i].int_name_rx)] = i+48; + strncpy(&priv->gfargrp[i].int_name_rx[strlen( + priv->gfargrp[i].int_name_rx)], + "_rx", sizeof("_rx") + 1); + + strncpy(&priv->gfargrp[i].int_name_er[0], dev->name, + len_devname); + strncpy(&priv->gfargrp[i].int_name_er[len_devname], + "_g", sizeof("_g")); + priv->gfargrp[i].int_name_er[strlen( + priv->gfargrp[i].int_name_er)] = i+48; + strncpy(&priv->gfargrp[i].int_name_er[strlen(\ + priv->gfargrp[i].int_name_er)], + "_er", sizeof("_er") + 1); + } else + priv->gfargrp[i].int_name_tx[len_devname] = '\0'; + } - strncpy(&priv->int_name_er[0], dev->name, len_devname); - strncpy(&priv->int_name_er[len_devname], - "_er", sizeof("_er") + 1); - } else - priv->int_name_tx[len_devname] = '\0'; + /* Initialize the filer table */ + gfar_init_filer_table(priv); /* Create all the sysfs files */ gfar_init_sysfs(dev); @@ -463,14 +1088,19 @@ static int gfar_probe(struct of_device *ofdev, /* Even more device info helps when determining which kernel */ /* provided which set of benchmarks. */ printk(KERN_INFO "%s: Running with NAPI enabled\n", dev->name); - printk(KERN_INFO "%s: %d/%d RX/TX BD ring size\n", - dev->name, priv->rx_ring_size, priv->tx_ring_size); + for (i = 0; i < priv->num_rx_queues; i++) + printk(KERN_INFO "%s: :RX BD ring size for Q[%d]: %d\n", + dev->name, i, priv->rx_queue[i]->rx_ring_size); + for(i = 0; i < priv->num_tx_queues; i++) + printk(KERN_INFO "%s:TX BD ring size for Q[%d]: %d\n", + dev->name, i, priv->tx_queue[i]->tx_ring_size); return 0; register_fail: - iounmap(priv->regs); -regs_fail: + unmap_group_regs(priv); + free_tx_pointers(priv); + free_rx_pointers(priv); if (priv->phy_node) of_node_put(priv->phy_node); if (priv->tbi_node) @@ -491,54 +1121,59 @@ static int gfar_remove(struct of_device *ofdev) dev_set_drvdata(&ofdev->dev, NULL); unregister_netdev(priv->ndev); - iounmap(priv->regs); + unmap_group_regs(priv); free_netdev(priv->ndev); return 0; } #ifdef CONFIG_PM -static int gfar_suspend(struct of_device *ofdev, pm_message_t state) + +static int gfar_suspend(struct device *dev) { - struct gfar_private *priv = dev_get_drvdata(&ofdev->dev); - struct net_device *dev = priv->ndev; + struct gfar_private *priv = dev_get_drvdata(dev); + struct net_device *ndev = priv->ndev; + struct gfar __iomem *regs = priv->gfargrp[0].regs; unsigned long flags; u32 tempval; int magic_packet = priv->wol_en && (priv->device_flags & FSL_GIANFAR_DEV_HAS_MAGIC_PACKET); - netif_device_detach(dev); + netif_device_detach(ndev); - if (netif_running(dev)) { - spin_lock_irqsave(&priv->txlock, flags); - spin_lock(&priv->rxlock); + if (netif_running(ndev)) { - gfar_halt_nodisable(dev); + local_irq_save(flags); + lock_tx_qs(priv); + lock_rx_qs(priv); + + gfar_halt_nodisable(ndev); /* Disable Tx, and Rx if wake-on-LAN is disabled. */ - tempval = gfar_read(&priv->regs->maccfg1); + tempval = gfar_read(®s->maccfg1); tempval &= ~MACCFG1_TX_EN; if (!magic_packet) tempval &= ~MACCFG1_RX_EN; - gfar_write(&priv->regs->maccfg1, tempval); + gfar_write(®s->maccfg1, tempval); - spin_unlock(&priv->rxlock); - spin_unlock_irqrestore(&priv->txlock, flags); + unlock_rx_qs(priv); + unlock_tx_qs(priv); + local_irq_restore(flags); - napi_disable(&priv->napi); + disable_napi(priv); if (magic_packet) { /* Enable interrupt on Magic Packet */ - gfar_write(&priv->regs->imask, IMASK_MAG); + gfar_write(®s->imask, IMASK_MAG); /* Enable Magic Packet mode */ - tempval = gfar_read(&priv->regs->maccfg2); + tempval = gfar_read(®s->maccfg2); tempval |= MACCFG2_MPEN; - gfar_write(&priv->regs->maccfg2, tempval); + gfar_write(®s->maccfg2, tempval); } else { phy_stop(priv->phydev); } @@ -547,17 +1182,18 @@ static int gfar_suspend(struct of_device *ofdev, pm_message_t state) return 0; } -static int gfar_resume(struct of_device *ofdev) +static int gfar_resume(struct device *dev) { - struct gfar_private *priv = dev_get_drvdata(&ofdev->dev); - struct net_device *dev = priv->ndev; + struct gfar_private *priv = dev_get_drvdata(dev); + struct net_device *ndev = priv->ndev; + struct gfar __iomem *regs = priv->gfargrp[0].regs; unsigned long flags; u32 tempval; int magic_packet = priv->wol_en && (priv->device_flags & FSL_GIANFAR_DEV_HAS_MAGIC_PACKET); - if (!netif_running(dev)) { - netif_device_attach(dev); + if (!netif_running(ndev)) { + netif_device_attach(ndev); return 0; } @@ -567,28 +1203,80 @@ static int gfar_resume(struct of_device *ofdev) /* Disable Magic Packet mode, in case something * else woke us up. */ + local_irq_save(flags); + lock_tx_qs(priv); + lock_rx_qs(priv); - spin_lock_irqsave(&priv->txlock, flags); - spin_lock(&priv->rxlock); - - tempval = gfar_read(&priv->regs->maccfg2); + tempval = gfar_read(®s->maccfg2); tempval &= ~MACCFG2_MPEN; - gfar_write(&priv->regs->maccfg2, tempval); + gfar_write(®s->maccfg2, tempval); - gfar_start(dev); + gfar_start(ndev); - spin_unlock(&priv->rxlock); - spin_unlock_irqrestore(&priv->txlock, flags); + unlock_rx_qs(priv); + unlock_tx_qs(priv); + local_irq_restore(flags); - netif_device_attach(dev); + netif_device_attach(ndev); - napi_enable(&priv->napi); + enable_napi(priv); return 0; } + +static int gfar_restore(struct device *dev) +{ + struct gfar_private *priv = dev_get_drvdata(dev); + struct net_device *ndev = priv->ndev; + + if (!netif_running(ndev)) + return 0; + + gfar_init_bds(ndev); + init_registers(ndev); + gfar_set_mac_address(ndev); + gfar_init_mac(ndev); + gfar_start(ndev); + + priv->oldlink = 0; + priv->oldspeed = 0; + priv->oldduplex = -1; + + if (priv->phydev) + phy_start(priv->phydev); + + netif_device_attach(ndev); + enable_napi(priv); + + return 0; +} + +static struct dev_pm_ops gfar_pm_ops = { + .suspend = gfar_suspend, + .resume = gfar_resume, + .freeze = gfar_suspend, + .thaw = gfar_resume, + .restore = gfar_restore, +}; + +#define GFAR_PM_OPS (&gfar_pm_ops) + +static int gfar_legacy_suspend(struct of_device *ofdev, pm_message_t state) +{ + return gfar_suspend(&ofdev->dev); +} + +static int gfar_legacy_resume(struct of_device *ofdev) +{ + return gfar_resume(&ofdev->dev); +} + #else -#define gfar_suspend NULL -#define gfar_resume NULL + +#define GFAR_PM_OPS NULL +#define gfar_legacy_suspend NULL +#define gfar_legacy_resume NULL + #endif /* Reads the controller's registers to determine what interface @@ -597,7 +1285,10 @@ static int gfar_resume(struct of_device *ofdev) static phy_interface_t gfar_get_interface(struct net_device *dev) { struct gfar_private *priv = netdev_priv(dev); - u32 ecntrl = gfar_read(&priv->regs->ecntrl); + struct gfar __iomem *regs = priv->gfargrp[0].regs; + u32 ecntrl; + + ecntrl = gfar_read(®s->ecntrl); if (ecntrl & ECNTRL_SGMII_MODE) return PHY_INTERFACE_MODE_SGMII; @@ -719,46 +1410,52 @@ static void gfar_configure_serdes(struct net_device *dev) static void init_registers(struct net_device *dev) { struct gfar_private *priv = netdev_priv(dev); + struct gfar __iomem *regs = NULL; + int i = 0; - /* Clear IEVENT */ - gfar_write(&priv->regs->ievent, IEVENT_INIT_CLEAR); + for (i = 0; i < priv->num_grps; i++) { + regs = priv->gfargrp[i].regs; + /* Clear IEVENT */ + gfar_write(®s->ievent, IEVENT_INIT_CLEAR); - /* Initialize IMASK */ - gfar_write(&priv->regs->imask, IMASK_INIT_CLEAR); + /* Initialize IMASK */ + gfar_write(®s->imask, IMASK_INIT_CLEAR); + } + regs = priv->gfargrp[0].regs; /* Init hash registers to zero */ - gfar_write(&priv->regs->igaddr0, 0); - gfar_write(&priv->regs->igaddr1, 0); - gfar_write(&priv->regs->igaddr2, 0); - gfar_write(&priv->regs->igaddr3, 0); - gfar_write(&priv->regs->igaddr4, 0); - gfar_write(&priv->regs->igaddr5, 0); - gfar_write(&priv->regs->igaddr6, 0); - gfar_write(&priv->regs->igaddr7, 0); - - gfar_write(&priv->regs->gaddr0, 0); - gfar_write(&priv->regs->gaddr1, 0); - gfar_write(&priv->regs->gaddr2, 0); - gfar_write(&priv->regs->gaddr3, 0); - gfar_write(&priv->regs->gaddr4, 0); - gfar_write(&priv->regs->gaddr5, 0); - gfar_write(&priv->regs->gaddr6, 0); - gfar_write(&priv->regs->gaddr7, 0); + gfar_write(®s->igaddr0, 0); + gfar_write(®s->igaddr1, 0); + gfar_write(®s->igaddr2, 0); + gfar_write(®s->igaddr3, 0); + gfar_write(®s->igaddr4, 0); + gfar_write(®s->igaddr5, 0); + gfar_write(®s->igaddr6, 0); + gfar_write(®s->igaddr7, 0); + + gfar_write(®s->gaddr0, 0); + gfar_write(®s->gaddr1, 0); + gfar_write(®s->gaddr2, 0); + gfar_write(®s->gaddr3, 0); + gfar_write(®s->gaddr4, 0); + gfar_write(®s->gaddr5, 0); + gfar_write(®s->gaddr6, 0); + gfar_write(®s->gaddr7, 0); /* Zero out the rmon mib registers if it has them */ if (priv->device_flags & FSL_GIANFAR_DEV_HAS_RMON) { - memset_io(&(priv->regs->rmon), 0, sizeof (struct rmon_mib)); + memset_io(&(regs->rmon), 0, sizeof (struct rmon_mib)); /* Mask off the CAM interrupts */ - gfar_write(&priv->regs->rmon.cam1, 0xffffffff); - gfar_write(&priv->regs->rmon.cam2, 0xffffffff); + gfar_write(®s->rmon.cam1, 0xffffffff); + gfar_write(®s->rmon.cam2, 0xffffffff); } /* Initialize the max receive buffer length */ - gfar_write(&priv->regs->mrblr, priv->rx_buffer_size); + gfar_write(®s->mrblr, priv->rx_buffer_size); /* Initialize the Minimum Frame Length Register */ - gfar_write(&priv->regs->minflr, MINFLR_INIT_SETTINGS); + gfar_write(®s->minflr, MINFLR_INIT_SETTINGS); } @@ -766,23 +1463,28 @@ static void init_registers(struct net_device *dev) static void gfar_halt_nodisable(struct net_device *dev) { struct gfar_private *priv = netdev_priv(dev); - struct gfar __iomem *regs = priv->regs; + struct gfar __iomem *regs = NULL; u32 tempval; + int i = 0; - /* Mask all interrupts */ - gfar_write(®s->imask, IMASK_INIT_CLEAR); + for (i = 0; i < priv->num_grps; i++) { + regs = priv->gfargrp[i].regs; + /* Mask all interrupts */ + gfar_write(®s->imask, IMASK_INIT_CLEAR); - /* Clear all interrupts */ - gfar_write(®s->ievent, IEVENT_INIT_CLEAR); + /* Clear all interrupts */ + gfar_write(®s->ievent, IEVENT_INIT_CLEAR); + } + regs = priv->gfargrp[0].regs; /* Stop the DMA, and wait for it to stop */ - tempval = gfar_read(&priv->regs->dmactrl); + tempval = gfar_read(®s->dmactrl); if ((tempval & (DMACTRL_GRS | DMACTRL_GTS)) != (DMACTRL_GRS | DMACTRL_GTS)) { tempval |= (DMACTRL_GRS | DMACTRL_GTS); - gfar_write(&priv->regs->dmactrl, tempval); + gfar_write(®s->dmactrl, tempval); - while (!(gfar_read(&priv->regs->ievent) & + while (!(gfar_read(®s->ievent) & (IEVENT_GRSC | IEVENT_GTSC))) cpu_relax(); } @@ -792,7 +1494,7 @@ static void gfar_halt_nodisable(struct net_device *dev) void gfar_halt(struct net_device *dev) { struct gfar_private *priv = netdev_priv(dev); - struct gfar __iomem *regs = priv->regs; + struct gfar __iomem *regs = priv->gfargrp[0].regs; u32 tempval; gfar_halt_nodisable(dev); @@ -803,101 +1505,131 @@ void gfar_halt(struct net_device *dev) gfar_write(®s->maccfg1, tempval); } +static void free_grp_irqs(struct gfar_priv_grp *grp) +{ + free_irq(grp->interruptError, grp); + free_irq(grp->interruptTransmit, grp); + free_irq(grp->interruptReceive, grp); +} + void stop_gfar(struct net_device *dev) { struct gfar_private *priv = netdev_priv(dev); - struct gfar __iomem *regs = priv->regs; unsigned long flags; + int i; phy_stop(priv->phydev); + /* Lock it down */ - spin_lock_irqsave(&priv->txlock, flags); - spin_lock(&priv->rxlock); + local_irq_save(flags); + lock_tx_qs(priv); + lock_rx_qs(priv); gfar_halt(dev); - spin_unlock(&priv->rxlock); - spin_unlock_irqrestore(&priv->txlock, flags); + unlock_rx_qs(priv); + unlock_tx_qs(priv); + local_irq_restore(flags); /* Free the IRQs */ if (priv->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) { - free_irq(priv->interruptError, dev); - free_irq(priv->interruptTransmit, dev); - free_irq(priv->interruptReceive, dev); + for (i = 0; i < priv->num_grps; i++) + free_grp_irqs(&priv->gfargrp[i]); } else { - free_irq(priv->interruptTransmit, dev); + for (i = 0; i < priv->num_grps; i++) + free_irq(priv->gfargrp[i].interruptTransmit, + &priv->gfargrp[i]); } free_skb_resources(priv); - - dma_free_coherent(&priv->ofdev->dev, - sizeof(struct txbd8)*priv->tx_ring_size - + sizeof(struct rxbd8)*priv->rx_ring_size, - priv->tx_bd_base, - gfar_read(®s->tbase0)); } -/* If there are any tx skbs or rx skbs still around, free them. - * Then free tx_skbuff and rx_skbuff */ -static void free_skb_resources(struct gfar_private *priv) +static void free_skb_tx_queue(struct gfar_priv_tx_q *tx_queue) { - struct rxbd8 *rxbdp; struct txbd8 *txbdp; + struct gfar_private *priv = netdev_priv(tx_queue->dev); int i, j; - /* Go through all the buffer descriptors and free their data buffers */ - txbdp = priv->tx_bd_base; + txbdp = tx_queue->tx_bd_base; - for (i = 0; i < priv->tx_ring_size; i++) { - if (!priv->tx_skbuff[i]) + for (i = 0; i < tx_queue->tx_ring_size; i++) { + if (!tx_queue->tx_skbuff[i]) continue; dma_unmap_single(&priv->ofdev->dev, txbdp->bufPtr, txbdp->length, DMA_TO_DEVICE); txbdp->lstatus = 0; - for (j = 0; j < skb_shinfo(priv->tx_skbuff[i])->nr_frags; j++) { + for (j = 0; j < skb_shinfo(tx_queue->tx_skbuff[i])->nr_frags; + j++) { txbdp++; dma_unmap_page(&priv->ofdev->dev, txbdp->bufPtr, txbdp->length, DMA_TO_DEVICE); } txbdp++; - dev_kfree_skb_any(priv->tx_skbuff[i]); - priv->tx_skbuff[i] = NULL; + dev_kfree_skb_any(tx_queue->tx_skbuff[i]); + tx_queue->tx_skbuff[i] = NULL; } + kfree(tx_queue->tx_skbuff); +} - kfree(priv->tx_skbuff); - - rxbdp = priv->rx_bd_base; +static void free_skb_rx_queue(struct gfar_priv_rx_q *rx_queue) +{ + struct rxbd8 *rxbdp; + struct gfar_private *priv = netdev_priv(rx_queue->dev); + int i; - /* rx_skbuff is not guaranteed to be allocated, so only - * free it and its contents if it is allocated */ - if(priv->rx_skbuff != NULL) { - for (i = 0; i < priv->rx_ring_size; i++) { - if (priv->rx_skbuff[i]) { - dma_unmap_single(&priv->ofdev->dev, rxbdp->bufPtr, - priv->rx_buffer_size, - DMA_FROM_DEVICE); + rxbdp = rx_queue->rx_bd_base; - dev_kfree_skb_any(priv->rx_skbuff[i]); - priv->rx_skbuff[i] = NULL; - } + for (i = 0; i < rx_queue->rx_ring_size; i++) { + if (rx_queue->rx_skbuff[i]) { + dma_unmap_single(&priv->ofdev->dev, + rxbdp->bufPtr, priv->rx_buffer_size, + DMA_FROM_DEVICE); + dev_kfree_skb_any(rx_queue->rx_skbuff[i]); + rx_queue->rx_skbuff[i] = NULL; + } + rxbdp->lstatus = 0; + rxbdp->bufPtr = 0; + rxbdp++; + } + kfree(rx_queue->rx_skbuff); +} - rxbdp->lstatus = 0; - rxbdp->bufPtr = 0; +/* If there are any tx skbs or rx skbs still around, free them. + * Then free tx_skbuff and rx_skbuff */ +static void free_skb_resources(struct gfar_private *priv) +{ + struct gfar_priv_tx_q *tx_queue = NULL; + struct gfar_priv_rx_q *rx_queue = NULL; + int i; - rxbdp++; - } + /* Go through all the buffer descriptors and free their data buffers */ + for (i = 0; i < priv->num_tx_queues; i++) { + tx_queue = priv->tx_queue[i]; + if(!tx_queue->tx_skbuff) + free_skb_tx_queue(tx_queue); + } - kfree(priv->rx_skbuff); + for (i = 0; i < priv->num_rx_queues; i++) { + rx_queue = priv->rx_queue[i]; + if(!rx_queue->rx_skbuff) + free_skb_rx_queue(rx_queue); } + + dma_free_coherent(&priv->ofdev->dev, + sizeof(struct txbd8) * priv->total_tx_ring_size + + sizeof(struct rxbd8) * priv->total_rx_ring_size, + priv->tx_queue[0]->tx_bd_base, + priv->tx_queue[0]->tx_bd_dma_base); } void gfar_start(struct net_device *dev) { struct gfar_private *priv = netdev_priv(dev); - struct gfar __iomem *regs = priv->regs; + struct gfar __iomem *regs = priv->gfargrp[0].regs; u32 tempval; + int i = 0; /* Enable Rx and Tx in MACCFG1 */ tempval = gfar_read(®s->maccfg1); @@ -905,269 +1637,159 @@ void gfar_start(struct net_device *dev) gfar_write(®s->maccfg1, tempval); /* Initialize DMACTRL to have WWR and WOP */ - tempval = gfar_read(&priv->regs->dmactrl); + tempval = gfar_read(®s->dmactrl); tempval |= DMACTRL_INIT_SETTINGS; - gfar_write(&priv->regs->dmactrl, tempval); + gfar_write(®s->dmactrl, tempval); /* Make sure we aren't stopped */ - tempval = gfar_read(&priv->regs->dmactrl); + tempval = gfar_read(®s->dmactrl); tempval &= ~(DMACTRL_GRS | DMACTRL_GTS); - gfar_write(&priv->regs->dmactrl, tempval); - - /* Clear THLT/RHLT, so that the DMA starts polling now */ - gfar_write(®s->tstat, TSTAT_CLEAR_THALT); - gfar_write(®s->rstat, RSTAT_CLEAR_RHALT); - - /* Unmask the interrupts we look for */ - gfar_write(®s->imask, IMASK_DEFAULT); + gfar_write(®s->dmactrl, tempval); + + for (i = 0; i < priv->num_grps; i++) { + regs = priv->gfargrp[i].regs; + /* Clear THLT/RHLT, so that the DMA starts polling now */ + gfar_write(®s->tstat, priv->gfargrp[i].tstat); + gfar_write(®s->rstat, priv->gfargrp[i].rstat); + /* Unmask the interrupts we look for */ + gfar_write(®s->imask, IMASK_DEFAULT); + } dev->trans_start = jiffies; } -/* Bring the controller up and running */ -int startup_gfar(struct net_device *dev) +void gfar_configure_coalescing(struct gfar_private *priv, + unsigned long tx_mask, unsigned long rx_mask) { - struct txbd8 *txbdp; - struct rxbd8 *rxbdp; - dma_addr_t addr = 0; - unsigned long vaddr; - int i; - struct gfar_private *priv = netdev_priv(dev); - struct gfar __iomem *regs = priv->regs; - int err = 0; - u32 rctrl = 0; - u32 tctrl = 0; - u32 attrs = 0; - - gfar_write(®s->imask, IMASK_INIT_CLEAR); + struct gfar __iomem *regs = priv->gfargrp[0].regs; + u32 __iomem *baddr; + int i = 0; - /* Allocate memory for the buffer descriptors */ - vaddr = (unsigned long) dma_alloc_coherent(&priv->ofdev->dev, - sizeof (struct txbd8) * priv->tx_ring_size + - sizeof (struct rxbd8) * priv->rx_ring_size, - &addr, GFP_KERNEL); - - if (vaddr == 0) { - if (netif_msg_ifup(priv)) - printk(KERN_ERR "%s: Could not allocate buffer descriptors!\n", - dev->name); - return -ENOMEM; - } - - priv->tx_bd_base = (struct txbd8 *) vaddr; - - /* enet DMA only understands physical addresses */ - gfar_write(®s->tbase0, addr); - - /* Start the rx descriptor ring where the tx ring leaves off */ - addr = addr + sizeof (struct txbd8) * priv->tx_ring_size; - vaddr = vaddr + sizeof (struct txbd8) * priv->tx_ring_size; - priv->rx_bd_base = (struct rxbd8 *) vaddr; - gfar_write(®s->rbase0, addr); - - /* Setup the skbuff rings */ - priv->tx_skbuff = - (struct sk_buff **) kmalloc(sizeof (struct sk_buff *) * - priv->tx_ring_size, GFP_KERNEL); - - if (NULL == priv->tx_skbuff) { - if (netif_msg_ifup(priv)) - printk(KERN_ERR "%s: Could not allocate tx_skbuff\n", - dev->name); - err = -ENOMEM; - goto tx_skb_fail; - } - - for (i = 0; i < priv->tx_ring_size; i++) - priv->tx_skbuff[i] = NULL; - - priv->rx_skbuff = - (struct sk_buff **) kmalloc(sizeof (struct sk_buff *) * - priv->rx_ring_size, GFP_KERNEL); - - if (NULL == priv->rx_skbuff) { - if (netif_msg_ifup(priv)) - printk(KERN_ERR "%s: Could not allocate rx_skbuff\n", - dev->name); - err = -ENOMEM; - goto rx_skb_fail; - } - - for (i = 0; i < priv->rx_ring_size; i++) - priv->rx_skbuff[i] = NULL; - - /* Initialize some variables in our dev structure */ - priv->num_txbdfree = priv->tx_ring_size; - priv->dirty_tx = priv->cur_tx = priv->tx_bd_base; - priv->cur_rx = priv->rx_bd_base; - priv->skb_curtx = priv->skb_dirtytx = 0; - priv->skb_currx = 0; - - /* Initialize Transmit Descriptor Ring */ - txbdp = priv->tx_bd_base; - for (i = 0; i < priv->tx_ring_size; i++) { - txbdp->lstatus = 0; - txbdp->bufPtr = 0; - txbdp++; - } - - /* Set the last descriptor in the ring to indicate wrap */ - txbdp--; - txbdp->status |= TXBD_WRAP; - - rxbdp = priv->rx_bd_base; - for (i = 0; i < priv->rx_ring_size; i++) { - struct sk_buff *skb; - - skb = gfar_new_skb(dev); - - if (!skb) { - printk(KERN_ERR "%s: Can't allocate RX buffers\n", - dev->name); + /* Backward compatible case ---- even if we enable + * multiple queues, there's only single reg to program + */ + gfar_write(®s->txic, 0); + if(likely(priv->tx_queue[0]->txcoalescing)) + gfar_write(®s->txic, priv->tx_queue[0]->txic); - goto err_rxalloc_fail; + gfar_write(®s->rxic, 0); + if(unlikely(priv->rx_queue[0]->rxcoalescing)) + gfar_write(®s->rxic, priv->rx_queue[0]->rxic); + + if (priv->mode == MQ_MG_MODE) { + baddr = ®s->txic0; + for_each_bit (i, &tx_mask, priv->num_tx_queues) { + if (likely(priv->tx_queue[i]->txcoalescing)) { + gfar_write(baddr + i, 0); + gfar_write(baddr + i, priv->tx_queue[i]->txic); + } } - priv->rx_skbuff[i] = skb; - - gfar_new_rxbdp(dev, rxbdp, skb); - - rxbdp++; + baddr = ®s->rxic0; + for_each_bit (i, &rx_mask, priv->num_rx_queues) { + if (likely(priv->rx_queue[i]->rxcoalescing)) { + gfar_write(baddr + i, 0); + gfar_write(baddr + i, priv->rx_queue[i]->rxic); + } + } } +} - /* Set the last descriptor in the ring to wrap */ - rxbdp--; - rxbdp->status |= RXBD_WRAP; +static int register_grp_irqs(struct gfar_priv_grp *grp) +{ + struct gfar_private *priv = grp->priv; + struct net_device *dev = priv->ndev; + int err; /* If the device has multiple interrupts, register for * them. Otherwise, only register for the one */ if (priv->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) { /* Install our interrupt handlers for Error, * Transmit, and Receive */ - if (request_irq(priv->interruptError, gfar_error, - 0, priv->int_name_er, dev) < 0) { + if ((err = request_irq(grp->interruptError, gfar_error, 0, + grp->int_name_er,grp)) < 0) { if (netif_msg_intr(priv)) printk(KERN_ERR "%s: Can't get IRQ %d\n", - dev->name, priv->interruptError); + dev->name, grp->interruptError); - err = -1; - goto err_irq_fail; + goto err_irq_fail; } - if (request_irq(priv->interruptTransmit, gfar_transmit, - 0, priv->int_name_tx, dev) < 0) { + if ((err = request_irq(grp->interruptTransmit, gfar_transmit, + 0, grp->int_name_tx, grp)) < 0) { if (netif_msg_intr(priv)) printk(KERN_ERR "%s: Can't get IRQ %d\n", - dev->name, priv->interruptTransmit); - - err = -1; - + dev->name, grp->interruptTransmit); goto tx_irq_fail; } - if (request_irq(priv->interruptReceive, gfar_receive, - 0, priv->int_name_rx, dev) < 0) { + if ((err = request_irq(grp->interruptReceive, gfar_receive, 0, + grp->int_name_rx, grp)) < 0) { if (netif_msg_intr(priv)) - printk(KERN_ERR "%s: Can't get IRQ %d (receive0)\n", - dev->name, priv->interruptReceive); - - err = -1; + printk(KERN_ERR "%s: Can't get IRQ %d\n", + dev->name, grp->interruptReceive); goto rx_irq_fail; } } else { - if (request_irq(priv->interruptTransmit, gfar_interrupt, - 0, priv->int_name_tx, dev) < 0) { + if ((err = request_irq(grp->interruptTransmit, gfar_interrupt, 0, + grp->int_name_tx, grp)) < 0) { if (netif_msg_intr(priv)) printk(KERN_ERR "%s: Can't get IRQ %d\n", - dev->name, priv->interruptTransmit); - - err = -1; + dev->name, grp->interruptTransmit); goto err_irq_fail; } } - phy_start(priv->phydev); - - /* Configure the coalescing support */ - gfar_write(®s->txic, 0); - if (priv->txcoalescing) - gfar_write(®s->txic, priv->txic); - - gfar_write(®s->rxic, 0); - if (priv->rxcoalescing) - gfar_write(®s->rxic, priv->rxic); - - if (priv->rx_csum_enable) - rctrl |= RCTRL_CHECKSUMMING; + return 0; - if (priv->extended_hash) { - rctrl |= RCTRL_EXTHASH; +rx_irq_fail: + free_irq(grp->interruptTransmit, grp); +tx_irq_fail: + free_irq(grp->interruptError, grp); +err_irq_fail: + return err; - gfar_clear_exact_match(dev); - rctrl |= RCTRL_EMEN; - } +} - if (priv->padding) { - rctrl &= ~RCTRL_PAL_MASK; - rctrl |= RCTRL_PADDING(priv->padding); - } +/* Bring the controller up and running */ +int startup_gfar(struct net_device *ndev) +{ + struct gfar_private *priv = netdev_priv(ndev); + struct gfar __iomem *regs = NULL; + int err, i, j; - /* keep vlan related bits if it's enabled */ - if (priv->vlgrp) { - rctrl |= RCTRL_VLEX | RCTRL_PRSDEP_INIT; - tctrl |= TCTRL_VLINS; + for (i = 0; i < priv->num_grps; i++) { + regs= priv->gfargrp[i].regs; + gfar_write(®s->imask, IMASK_INIT_CLEAR); } - /* Init rctrl based on our settings */ - gfar_write(&priv->regs->rctrl, rctrl); - - if (dev->features & NETIF_F_IP_CSUM) - tctrl |= TCTRL_INIT_CSUM; - - gfar_write(&priv->regs->tctrl, tctrl); - - /* Set the extraction length and index */ - attrs = ATTRELI_EL(priv->rx_stash_size) | - ATTRELI_EI(priv->rx_stash_index); - - gfar_write(&priv->regs->attreli, attrs); - - /* Start with defaults, and add stashing or locking - * depending on the approprate variables */ - attrs = ATTR_INIT_SETTINGS; + regs= priv->gfargrp[0].regs; + err = gfar_alloc_skb_resources(ndev); + if (err) + return err; - if (priv->bd_stash_en) - attrs |= ATTR_BDSTASH; + gfar_init_mac(ndev); - if (priv->rx_stash_size != 0) - attrs |= ATTR_BUFSTASH; + for (i = 0; i < priv->num_grps; i++) { + err = register_grp_irqs(&priv->gfargrp[i]); + if (err) { + for (j = 0; j < i; j++) + free_grp_irqs(&priv->gfargrp[j]); + goto irq_fail; + } + } - gfar_write(&priv->regs->attr, attrs); + /* Start the controller */ + gfar_start(ndev); - gfar_write(&priv->regs->fifo_tx_thr, priv->fifo_threshold); - gfar_write(&priv->regs->fifo_tx_starve, priv->fifo_starve); - gfar_write(&priv->regs->fifo_tx_starve_shutoff, priv->fifo_starve_off); + phy_start(priv->phydev); - /* Start the controller */ - gfar_start(dev); + gfar_configure_coalescing(priv, 0xFF, 0xFF); return 0; -rx_irq_fail: - free_irq(priv->interruptTransmit, dev); -tx_irq_fail: - free_irq(priv->interruptError, dev); -err_irq_fail: -err_rxalloc_fail: -rx_skb_fail: +irq_fail: free_skb_resources(priv); -tx_skb_fail: - dma_free_coherent(&priv->ofdev->dev, - sizeof(struct txbd8)*priv->tx_ring_size - + sizeof(struct rxbd8)*priv->rx_ring_size, - priv->tx_bd_base, - gfar_read(®s->tbase0)); - return err; } @@ -1178,7 +1800,7 @@ static int gfar_enet_open(struct net_device *dev) struct gfar_private *priv = netdev_priv(dev); int err; - napi_enable(&priv->napi); + enable_napi(priv); skb_queue_head_init(&priv->rx_recycle); @@ -1189,18 +1811,18 @@ static int gfar_enet_open(struct net_device *dev) err = init_phy(dev); - if(err) { - napi_disable(&priv->napi); + if (err) { + disable_napi(priv); return err; } err = startup_gfar(dev); if (err) { - napi_disable(&priv->napi); + disable_napi(priv); return err; } - netif_start_queue(dev); + netif_tx_start_all_queues(dev); device_set_wakeup_enable(&dev->dev, priv->wol_en); @@ -1269,15 +1891,23 @@ static inline struct txbd8 *next_txbd(struct txbd8 *bdp, struct txbd8 *base, static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev) { struct gfar_private *priv = netdev_priv(dev); + struct gfar_priv_tx_q *tx_queue = NULL; + struct netdev_queue *txq; + struct gfar __iomem *regs = NULL; struct txfcb *fcb = NULL; struct txbd8 *txbdp, *txbdp_start, *base; u32 lstatus; - int i; + int i, rq = 0; u32 bufaddr; unsigned long flags; unsigned int nr_frags, length; - base = priv->tx_bd_base; + + rq = skb->queue_mapping; + tx_queue = priv->tx_queue[rq]; + txq = netdev_get_tx_queue(dev, rq); + base = tx_queue->tx_bd_base; + regs = tx_queue->grp->regs; /* make space for additional header when fcb is needed */ if (((skb->ip_summed == CHECKSUM_PARTIAL) || @@ -1298,21 +1928,18 @@ static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev) /* total number of fragments in the SKB */ nr_frags = skb_shinfo(skb)->nr_frags; - spin_lock_irqsave(&priv->txlock, flags); - /* check if there is space to queue this packet */ - if ((nr_frags+1) > priv->num_txbdfree) { + if ((nr_frags+1) > tx_queue->num_txbdfree) { /* no space, stop the queue */ - netif_stop_queue(dev); + netif_tx_stop_queue(txq); dev->stats.tx_fifo_errors++; - spin_unlock_irqrestore(&priv->txlock, flags); return NETDEV_TX_BUSY; } /* Update transmit stats */ dev->stats.tx_bytes += skb->len; - txbdp = txbdp_start = priv->cur_tx; + txbdp = txbdp_start = tx_queue->cur_tx; if (nr_frags == 0) { lstatus = txbdp->lstatus | BD_LFLAG(TXBD_LAST | TXBD_INTERRUPT); @@ -1320,7 +1947,7 @@ static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev) /* Place the fragment addresses and lengths into the TxBDs */ for (i = 0; i < nr_frags; i++) { /* Point at the next BD, wrapping as needed */ - txbdp = next_txbd(txbdp, base, priv->tx_ring_size); + txbdp = next_txbd(txbdp, base, tx_queue->tx_ring_size); length = skb_shinfo(skb)->frags[i].size; @@ -1362,13 +1989,27 @@ static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev) } /* setup the TxBD length and buffer pointer for the first BD */ - priv->tx_skbuff[priv->skb_curtx] = skb; + tx_queue->tx_skbuff[tx_queue->skb_curtx] = skb; txbdp_start->bufPtr = dma_map_single(&priv->ofdev->dev, skb->data, skb_headlen(skb), DMA_TO_DEVICE); lstatus |= BD_LFLAG(TXBD_CRC | TXBD_READY) | skb_headlen(skb); /* + * We can work in parallel with gfar_clean_tx_ring(), except + * when modifying num_txbdfree. Note that we didn't grab the lock + * when we were reading the num_txbdfree and checking for available + * space, that's because outside of this function it can only grow, + * and once we've got needed space, it cannot suddenly disappear. + * + * The lock also protects us from gfar_error(), which can modify + * regs->tstat and thus retrigger the transfers, which is why we + * also must grab the lock before setting ready bit for the first + * to be transmitted BD. + */ + spin_lock_irqsave(&tx_queue->txlock, flags); + + /* * The powerpc-specific eieio() is used, as wmb() has too strong * semantics (it requires synchronization between cacheable and * uncacheable mappings, which eieio doesn't provide and which we @@ -1382,29 +2023,29 @@ static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev) /* Update the current skb pointer to the next entry we will use * (wrapping if necessary) */ - priv->skb_curtx = (priv->skb_curtx + 1) & - TX_RING_MOD_MASK(priv->tx_ring_size); + tx_queue->skb_curtx = (tx_queue->skb_curtx + 1) & + TX_RING_MOD_MASK(tx_queue->tx_ring_size); - priv->cur_tx = next_txbd(txbdp, base, priv->tx_ring_size); + tx_queue->cur_tx = next_txbd(txbdp, base, tx_queue->tx_ring_size); /* reduce TxBD free count */ - priv->num_txbdfree -= (nr_frags + 1); + tx_queue->num_txbdfree -= (nr_frags + 1); dev->trans_start = jiffies; /* If the next BD still needs to be cleaned up, then the bds are full. We need to tell the kernel to stop sending us stuff. */ - if (!priv->num_txbdfree) { - netif_stop_queue(dev); + if (!tx_queue->num_txbdfree) { + netif_tx_stop_queue(txq); dev->stats.tx_fifo_errors++; } /* Tell the DMA to go go go */ - gfar_write(&priv->regs->tstat, TSTAT_CLEAR_THALT); + gfar_write(®s->tstat, TSTAT_CLEAR_THALT >> tx_queue->qindex); /* Unlock priv */ - spin_unlock_irqrestore(&priv->txlock, flags); + spin_unlock_irqrestore(&tx_queue->txlock, flags); return NETDEV_TX_OK; } @@ -1414,7 +2055,7 @@ static int gfar_close(struct net_device *dev) { struct gfar_private *priv = netdev_priv(dev); - napi_disable(&priv->napi); + disable_napi(priv); skb_queue_purge(&priv->rx_recycle); cancel_work_sync(&priv->reset_task); @@ -1424,7 +2065,7 @@ static int gfar_close(struct net_device *dev) phy_disconnect(priv->phydev); priv->phydev = NULL; - netif_stop_queue(dev); + netif_tx_stop_all_queues(dev); return 0; } @@ -1443,50 +2084,55 @@ static void gfar_vlan_rx_register(struct net_device *dev, struct vlan_group *grp) { struct gfar_private *priv = netdev_priv(dev); + struct gfar __iomem *regs = NULL; unsigned long flags; u32 tempval; - spin_lock_irqsave(&priv->rxlock, flags); + regs = priv->gfargrp[0].regs; + local_irq_save(flags); + lock_rx_qs(priv); priv->vlgrp = grp; if (grp) { /* Enable VLAN tag insertion */ - tempval = gfar_read(&priv->regs->tctrl); + tempval = gfar_read(®s->tctrl); tempval |= TCTRL_VLINS; - gfar_write(&priv->regs->tctrl, tempval); + gfar_write(®s->tctrl, tempval); /* Enable VLAN tag extraction */ - tempval = gfar_read(&priv->regs->rctrl); + tempval = gfar_read(®s->rctrl); tempval |= (RCTRL_VLEX | RCTRL_PRSDEP_INIT); - gfar_write(&priv->regs->rctrl, tempval); + gfar_write(®s->rctrl, tempval); } else { /* Disable VLAN tag insertion */ - tempval = gfar_read(&priv->regs->tctrl); + tempval = gfar_read(®s->tctrl); tempval &= ~TCTRL_VLINS; - gfar_write(&priv->regs->tctrl, tempval); + gfar_write(®s->tctrl, tempval); /* Disable VLAN tag extraction */ - tempval = gfar_read(&priv->regs->rctrl); + tempval = gfar_read(®s->rctrl); tempval &= ~RCTRL_VLEX; /* If parse is no longer required, then disable parser */ if (tempval & RCTRL_REQ_PARSER) tempval |= RCTRL_PRSDEP_INIT; else tempval &= ~RCTRL_PRSDEP_INIT; - gfar_write(&priv->regs->rctrl, tempval); + gfar_write(®s->rctrl, tempval); } gfar_change_mtu(dev, dev->mtu); - spin_unlock_irqrestore(&priv->rxlock, flags); + unlock_rx_qs(priv); + local_irq_restore(flags); } static int gfar_change_mtu(struct net_device *dev, int new_mtu) { int tempsize, tempval; struct gfar_private *priv = netdev_priv(dev); + struct gfar __iomem *regs = priv->gfargrp[0].regs; int oldsize = priv->rx_buffer_size; int frame_size = new_mtu + ETH_HLEN; @@ -1518,20 +2164,20 @@ static int gfar_change_mtu(struct net_device *dev, int new_mtu) dev->mtu = new_mtu; - gfar_write(&priv->regs->mrblr, priv->rx_buffer_size); - gfar_write(&priv->regs->maxfrm, priv->rx_buffer_size); + gfar_write(®s->mrblr, priv->rx_buffer_size); + gfar_write(®s->maxfrm, priv->rx_buffer_size); /* If the mtu is larger than the max size for standard * ethernet frames (ie, a jumbo frame), then set maccfg2 * to allow huge frames, and to check the length */ - tempval = gfar_read(&priv->regs->maccfg2); + tempval = gfar_read(®s->maccfg2); if (priv->rx_buffer_size > DEFAULT_RX_BUFFER_SIZE) tempval |= (MACCFG2_HUGEFRAME | MACCFG2_LENGTHCHECK); else tempval &= ~(MACCFG2_HUGEFRAME | MACCFG2_LENGTHCHECK); - gfar_write(&priv->regs->maccfg2, tempval); + gfar_write(®s->maccfg2, tempval); if ((oldsize != tempsize) && (dev->flags & IFF_UP)) startup_gfar(dev); @@ -1551,10 +2197,10 @@ static void gfar_reset_task(struct work_struct *work) struct net_device *dev = priv->ndev; if (dev->flags & IFF_UP) { - netif_stop_queue(dev); + netif_tx_stop_all_queues(dev); stop_gfar(dev); startup_gfar(dev); - netif_start_queue(dev); + netif_tx_start_all_queues(dev); } netif_tx_schedule_all(dev); @@ -1569,24 +2215,29 @@ static void gfar_timeout(struct net_device *dev) } /* Interrupt Handler for Transmit complete */ -static int gfar_clean_tx_ring(struct net_device *dev) +static int gfar_clean_tx_ring(struct gfar_priv_tx_q *tx_queue) { + struct net_device *dev = tx_queue->dev; struct gfar_private *priv = netdev_priv(dev); + struct gfar_priv_rx_q *rx_queue = NULL; struct txbd8 *bdp; struct txbd8 *lbdp = NULL; - struct txbd8 *base = priv->tx_bd_base; + struct txbd8 *base = tx_queue->tx_bd_base; struct sk_buff *skb; int skb_dirtytx; - int tx_ring_size = priv->tx_ring_size; + int tx_ring_size = tx_queue->tx_ring_size; int frags = 0; int i; int howmany = 0; u32 lstatus; - bdp = priv->dirty_tx; - skb_dirtytx = priv->skb_dirtytx; + rx_queue = priv->rx_queue[tx_queue->qindex]; + bdp = tx_queue->dirty_tx; + skb_dirtytx = tx_queue->skb_dirtytx; + + while ((skb = tx_queue->tx_skbuff[skb_dirtytx])) { + unsigned long flags; - while ((skb = priv->tx_skbuff[skb_dirtytx])) { frags = skb_shinfo(skb)->nr_frags; lbdp = skip_txbd(bdp, frags, base, tx_ring_size); @@ -1618,82 +2269,73 @@ static int gfar_clean_tx_ring(struct net_device *dev) * If there's room in the queue (limit it to rx_buffer_size) * we add this skb back into the pool, if it's the right size */ - if (skb_queue_len(&priv->rx_recycle) < priv->rx_ring_size && + if (skb_queue_len(&priv->rx_recycle) < rx_queue->rx_ring_size && skb_recycle_check(skb, priv->rx_buffer_size + RXBUF_ALIGNMENT)) __skb_queue_head(&priv->rx_recycle, skb); else dev_kfree_skb_any(skb); - priv->tx_skbuff[skb_dirtytx] = NULL; + tx_queue->tx_skbuff[skb_dirtytx] = NULL; skb_dirtytx = (skb_dirtytx + 1) & TX_RING_MOD_MASK(tx_ring_size); howmany++; - priv->num_txbdfree += frags + 1; + spin_lock_irqsave(&tx_queue->txlock, flags); + tx_queue->num_txbdfree += frags + 1; + spin_unlock_irqrestore(&tx_queue->txlock, flags); } /* If we freed a buffer, we can restart transmission, if necessary */ - if (netif_queue_stopped(dev) && priv->num_txbdfree) - netif_wake_queue(dev); + if (__netif_subqueue_stopped(dev, tx_queue->qindex) && tx_queue->num_txbdfree) + netif_wake_subqueue(dev, tx_queue->qindex); /* Update dirty indicators */ - priv->skb_dirtytx = skb_dirtytx; - priv->dirty_tx = bdp; + tx_queue->skb_dirtytx = skb_dirtytx; + tx_queue->dirty_tx = bdp; dev->stats.tx_packets += howmany; return howmany; } -static void gfar_schedule_cleanup(struct net_device *dev) +static void gfar_schedule_cleanup(struct gfar_priv_grp *gfargrp) { - struct gfar_private *priv = netdev_priv(dev); unsigned long flags; - spin_lock_irqsave(&priv->txlock, flags); - spin_lock(&priv->rxlock); - - if (napi_schedule_prep(&priv->napi)) { - gfar_write(&priv->regs->imask, IMASK_RTX_DISABLED); - __napi_schedule(&priv->napi); + spin_lock_irqsave(&gfargrp->grplock, flags); + if (napi_schedule_prep(&gfargrp->napi)) { + gfar_write(&gfargrp->regs->imask, IMASK_RTX_DISABLED); + __napi_schedule(&gfargrp->napi); } else { /* * Clear IEVENT, so interrupts aren't called again * because of the packets that have already arrived. */ - gfar_write(&priv->regs->ievent, IEVENT_RTX_MASK); + gfar_write(&gfargrp->regs->ievent, IEVENT_RTX_MASK); } + spin_unlock_irqrestore(&gfargrp->grplock, flags); - spin_unlock(&priv->rxlock); - spin_unlock_irqrestore(&priv->txlock, flags); } /* Interrupt Handler for Transmit complete */ -static irqreturn_t gfar_transmit(int irq, void *dev_id) +static irqreturn_t gfar_transmit(int irq, void *grp_id) { - gfar_schedule_cleanup((struct net_device *)dev_id); + gfar_schedule_cleanup((struct gfar_priv_grp *)grp_id); return IRQ_HANDLED; } -static void gfar_new_rxbdp(struct net_device *dev, struct rxbd8 *bdp, +static void gfar_new_rxbdp(struct gfar_priv_rx_q *rx_queue, struct rxbd8 *bdp, struct sk_buff *skb) { + struct net_device *dev = rx_queue->dev; struct gfar_private *priv = netdev_priv(dev); - u32 lstatus; - - bdp->bufPtr = dma_map_single(&priv->ofdev->dev, skb->data, - priv->rx_buffer_size, DMA_FROM_DEVICE); - - lstatus = BD_LFLAG(RXBD_EMPTY | RXBD_INTERRUPT); - - if (bdp == priv->rx_bd_base + priv->rx_ring_size - 1) - lstatus |= BD_LFLAG(RXBD_WRAP); - - eieio(); + dma_addr_t buf; - bdp->lstatus = lstatus; + buf = dma_map_single(&priv->ofdev->dev, skb->data, + priv->rx_buffer_size, DMA_FROM_DEVICE); + gfar_init_rxbdp(rx_queue, bdp, buf); } @@ -1760,9 +2402,9 @@ static inline void count_errors(unsigned short status, struct net_device *dev) } } -irqreturn_t gfar_receive(int irq, void *dev_id) +irqreturn_t gfar_receive(int irq, void *grp_id) { - gfar_schedule_cleanup((struct net_device *)dev_id); + gfar_schedule_cleanup((struct gfar_priv_grp *)grp_id); return IRQ_HANDLED; } @@ -1792,6 +2434,7 @@ static int gfar_process_frame(struct net_device *dev, struct sk_buff *skb, fcb = (struct rxfcb *)skb->data; /* Remove the FCB from the skb */ + skb_set_queue_mapping(skb, fcb->rq); /* Remove the padded bytes, if there are any */ if (amount_pull) skb_pull(skb, amount_pull); @@ -1818,8 +2461,9 @@ static int gfar_process_frame(struct net_device *dev, struct sk_buff *skb, * until the budget/quota has been reached. Returns the number * of frames handled */ -int gfar_clean_rx_ring(struct net_device *dev, int rx_work_limit) +int gfar_clean_rx_ring(struct gfar_priv_rx_q *rx_queue, int rx_work_limit) { + struct net_device *dev = rx_queue->dev; struct rxbd8 *bdp, *base; struct sk_buff *skb; int pkt_len; @@ -1828,8 +2472,8 @@ int gfar_clean_rx_ring(struct net_device *dev, int rx_work_limit) struct gfar_private *priv = netdev_priv(dev); /* Get the first full descriptor */ - bdp = priv->cur_rx; - base = priv->rx_bd_base; + bdp = rx_queue->cur_rx; + base = rx_queue->rx_bd_base; amount_pull = (gfar_uses_fcb(priv) ? GMAC_FCB_LEN : 0) + priv->padding; @@ -1841,7 +2485,7 @@ int gfar_clean_rx_ring(struct net_device *dev, int rx_work_limit) /* Add another skb for the future */ newskb = gfar_new_skb(dev); - skb = priv->rx_skbuff[priv->skb_currx]; + skb = rx_queue->rx_skbuff[rx_queue->skb_currx]; dma_unmap_single(&priv->ofdev->dev, bdp->bufPtr, priv->rx_buffer_size, DMA_FROM_DEVICE); @@ -1875,8 +2519,6 @@ int gfar_clean_rx_ring(struct net_device *dev, int rx_work_limit) skb_put(skb, pkt_len); dev->stats.rx_bytes += pkt_len; - if (in_irq() || irqs_disabled()) - printk("Interrupt problem!\n"); gfar_process_frame(dev, skb, amount_pull); } else { @@ -1889,46 +2531,70 @@ int gfar_clean_rx_ring(struct net_device *dev, int rx_work_limit) } - priv->rx_skbuff[priv->skb_currx] = newskb; + rx_queue->rx_skbuff[rx_queue->skb_currx] = newskb; /* Setup the new bdp */ - gfar_new_rxbdp(dev, bdp, newskb); + gfar_new_rxbdp(rx_queue, bdp, newskb); /* Update to the next pointer */ - bdp = next_bd(bdp, base, priv->rx_ring_size); + bdp = next_bd(bdp, base, rx_queue->rx_ring_size); /* update to point at the next skb */ - priv->skb_currx = - (priv->skb_currx + 1) & - RX_RING_MOD_MASK(priv->rx_ring_size); + rx_queue->skb_currx = + (rx_queue->skb_currx + 1) & + RX_RING_MOD_MASK(rx_queue->rx_ring_size); } /* Update the current rxbd pointer to be the next one */ - priv->cur_rx = bdp; + rx_queue->cur_rx = bdp; return howmany; } static int gfar_poll(struct napi_struct *napi, int budget) { - struct gfar_private *priv = container_of(napi, struct gfar_private, napi); - struct net_device *dev = priv->ndev; - int tx_cleaned = 0; - int rx_cleaned = 0; - unsigned long flags; + struct gfar_priv_grp *gfargrp = container_of(napi, + struct gfar_priv_grp, napi); + struct gfar_private *priv = gfargrp->priv; + struct gfar __iomem *regs = gfargrp->regs; + struct gfar_priv_tx_q *tx_queue = NULL; + struct gfar_priv_rx_q *rx_queue = NULL; + int rx_cleaned = 0, budget_per_queue = 0, rx_cleaned_per_queue = 0; + int tx_cleaned = 0, i, left_over_budget = budget; + unsigned long serviced_queues = 0; + int num_queues = 0; + + num_queues = gfargrp->num_rx_queues; + budget_per_queue = budget/num_queues; /* Clear IEVENT, so interrupts aren't called again * because of the packets that have already arrived */ - gfar_write(&priv->regs->ievent, IEVENT_RTX_MASK); - - /* If we fail to get the lock, don't bother with the TX BDs */ - if (spin_trylock_irqsave(&priv->txlock, flags)) { - tx_cleaned = gfar_clean_tx_ring(dev); - spin_unlock_irqrestore(&priv->txlock, flags); + gfar_write(®s->ievent, IEVENT_RTX_MASK); + + while (num_queues && left_over_budget) { + + budget_per_queue = left_over_budget/num_queues; + left_over_budget = 0; + + for_each_bit(i, &gfargrp->rx_bit_map, priv->num_rx_queues) { + if (test_bit(i, &serviced_queues)) + continue; + rx_queue = priv->rx_queue[i]; + tx_queue = priv->tx_queue[rx_queue->qindex]; + + tx_cleaned += gfar_clean_tx_ring(tx_queue); + rx_cleaned_per_queue = gfar_clean_rx_ring(rx_queue, + budget_per_queue); + rx_cleaned += rx_cleaned_per_queue; + if(rx_cleaned_per_queue < budget_per_queue) { + left_over_budget = left_over_budget + + (budget_per_queue - rx_cleaned_per_queue); + set_bit(i, &serviced_queues); + num_queues--; + } + } } - rx_cleaned = gfar_clean_rx_ring(dev, budget); - if (tx_cleaned) return budget; @@ -1936,20 +2602,14 @@ static int gfar_poll(struct napi_struct *napi, int budget) napi_complete(napi); /* Clear the halt bit in RSTAT */ - gfar_write(&priv->regs->rstat, RSTAT_CLEAR_RHALT); + gfar_write(®s->rstat, gfargrp->rstat); - gfar_write(&priv->regs->imask, IMASK_DEFAULT); + gfar_write(®s->imask, IMASK_DEFAULT); /* If we are coalescing interrupts, update the timer */ /* Otherwise, clear it */ - if (likely(priv->rxcoalescing)) { - gfar_write(&priv->regs->rxic, 0); - gfar_write(&priv->regs->rxic, priv->rxic); - } - if (likely(priv->txcoalescing)) { - gfar_write(&priv->regs->txic, 0); - gfar_write(&priv->regs->txic, priv->txic); - } + gfar_configure_coalescing(priv, + gfargrp->rx_bit_map, gfargrp->tx_bit_map); } return rx_cleaned; @@ -1964,44 +2624,49 @@ static int gfar_poll(struct napi_struct *napi, int budget) static void gfar_netpoll(struct net_device *dev) { struct gfar_private *priv = netdev_priv(dev); + int i = 0; /* If the device has multiple interrupts, run tx/rx */ if (priv->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) { - disable_irq(priv->interruptTransmit); - disable_irq(priv->interruptReceive); - disable_irq(priv->interruptError); - gfar_interrupt(priv->interruptTransmit, dev); - enable_irq(priv->interruptError); - enable_irq(priv->interruptReceive); - enable_irq(priv->interruptTransmit); + for (i = 0; i < priv->num_grps; i++) { + disable_irq(priv->gfargrp[i].interruptTransmit); + disable_irq(priv->gfargrp[i].interruptReceive); + disable_irq(priv->gfargrp[i].interruptError); + gfar_interrupt(priv->gfargrp[i].interruptTransmit, + &priv->gfargrp[i]); + enable_irq(priv->gfargrp[i].interruptError); + enable_irq(priv->gfargrp[i].interruptReceive); + enable_irq(priv->gfargrp[i].interruptTransmit); + } } else { - disable_irq(priv->interruptTransmit); - gfar_interrupt(priv->interruptTransmit, dev); - enable_irq(priv->interruptTransmit); + for (i = 0; i < priv->num_grps; i++) { + disable_irq(priv->gfargrp[i].interruptTransmit); + gfar_interrupt(priv->gfargrp[i].interruptTransmit, + &priv->gfargrp[i]); + enable_irq(priv->gfargrp[i].interruptTransmit); } } #endif /* The interrupt handler for devices with one interrupt */ -static irqreturn_t gfar_interrupt(int irq, void *dev_id) +static irqreturn_t gfar_interrupt(int irq, void *grp_id) { - struct net_device *dev = dev_id; - struct gfar_private *priv = netdev_priv(dev); + struct gfar_priv_grp *gfargrp = grp_id; /* Save ievent for future reference */ - u32 events = gfar_read(&priv->regs->ievent); + u32 events = gfar_read(&gfargrp->regs->ievent); /* Check for reception */ if (events & IEVENT_RX_MASK) - gfar_receive(irq, dev_id); + gfar_receive(irq, grp_id); /* Check for transmit completion */ if (events & IEVENT_TX_MASK) - gfar_transmit(irq, dev_id); + gfar_transmit(irq, grp_id); /* Check for errors */ if (events & IEVENT_ERR_MASK) - gfar_error(irq, dev_id); + gfar_error(irq, grp_id); return IRQ_HANDLED; } @@ -2015,12 +2680,14 @@ static irqreturn_t gfar_interrupt(int irq, void *dev_id) static void adjust_link(struct net_device *dev) { struct gfar_private *priv = netdev_priv(dev); - struct gfar __iomem *regs = priv->regs; + struct gfar __iomem *regs = priv->gfargrp[0].regs; unsigned long flags; struct phy_device *phydev = priv->phydev; int new_state = 0; - spin_lock_irqsave(&priv->txlock, flags); + local_irq_save(flags); + lock_tx_qs(priv); + if (phydev->link) { u32 tempval = gfar_read(®s->maccfg2); u32 ecntrl = gfar_read(®s->ecntrl); @@ -2085,8 +2752,8 @@ static void adjust_link(struct net_device *dev) if (new_state && netif_msg_link(priv)) phy_print_status(phydev); - - spin_unlock_irqrestore(&priv->txlock, flags); + unlock_tx_qs(priv); + local_irq_restore(flags); } /* Update the hash table based on the current list of multicast @@ -2097,10 +2764,10 @@ static void gfar_set_multi(struct net_device *dev) { struct dev_mc_list *mc_ptr; struct gfar_private *priv = netdev_priv(dev); - struct gfar __iomem *regs = priv->regs; + struct gfar __iomem *regs = priv->gfargrp[0].regs; u32 tempval; - if(dev->flags & IFF_PROMISC) { + if (dev->flags & IFF_PROMISC) { /* Set RCTRL to PROM */ tempval = gfar_read(®s->rctrl); tempval |= RCTRL_PROM; @@ -2112,7 +2779,7 @@ static void gfar_set_multi(struct net_device *dev) gfar_write(®s->rctrl, tempval); } - if(dev->flags & IFF_ALLMULTI) { + if (dev->flags & IFF_ALLMULTI) { /* Set the hash to rx all multicast frames */ gfar_write(®s->igaddr0, 0xffffffff); gfar_write(®s->igaddr1, 0xffffffff); @@ -2164,7 +2831,7 @@ static void gfar_set_multi(struct net_device *dev) em_num = 0; } - if(dev->mc_count == 0) + if (dev->mc_count == 0) return; /* Parse the list, and set the appropriate bits */ @@ -2230,10 +2897,11 @@ static void gfar_set_hash_for_addr(struct net_device *dev, u8 *addr) static void gfar_set_mac_for_addr(struct net_device *dev, int num, u8 *addr) { struct gfar_private *priv = netdev_priv(dev); + struct gfar __iomem *regs = priv->gfargrp[0].regs; int idx; char tmpbuf[MAC_ADDR_LEN]; u32 tempval; - u32 __iomem *macptr = &priv->regs->macstnaddr1; + u32 __iomem *macptr = ®s->macstnaddr1; macptr += num*2; @@ -2250,16 +2918,18 @@ static void gfar_set_mac_for_addr(struct net_device *dev, int num, u8 *addr) } /* GFAR error interrupt handler */ -static irqreturn_t gfar_error(int irq, void *dev_id) +static irqreturn_t gfar_error(int irq, void *grp_id) { - struct net_device *dev = dev_id; - struct gfar_private *priv = netdev_priv(dev); + struct gfar_priv_grp *gfargrp = grp_id; + struct gfar __iomem *regs = gfargrp->regs; + struct gfar_private *priv= gfargrp->priv; + struct net_device *dev = priv->ndev; /* Save ievent for future reference */ - u32 events = gfar_read(&priv->regs->ievent); + u32 events = gfar_read(®s->ievent); /* Clear IEVENT */ - gfar_write(&priv->regs->ievent, events & IEVENT_ERR_MASK); + gfar_write(®s->ievent, events & IEVENT_ERR_MASK); /* Magic Packet is not an error. */ if ((priv->device_flags & FSL_GIANFAR_DEV_HAS_MAGIC_PACKET) && @@ -2269,7 +2939,7 @@ static irqreturn_t gfar_error(int irq, void *dev_id) /* Hmm... */ if (netif_msg_rx_err(priv) || netif_msg_tx_err(priv)) printk(KERN_DEBUG "%s: error interrupt (ievent=0x%08x imask=0x%08x)\n", - dev->name, events, gfar_read(&priv->regs->imask)); + dev->name, events, gfar_read(®s->imask)); /* Update the error counters */ if (events & IEVENT_TXE) { @@ -2280,14 +2950,22 @@ static irqreturn_t gfar_error(int irq, void *dev_id) if (events & IEVENT_CRL) dev->stats.tx_aborted_errors++; if (events & IEVENT_XFUN) { + unsigned long flags; + if (netif_msg_tx_err(priv)) printk(KERN_DEBUG "%s: TX FIFO underrun, " "packet dropped.\n", dev->name); dev->stats.tx_dropped++; priv->extra_stats.tx_underrun++; + local_irq_save(flags); + lock_tx_qs(priv); + /* Reactivate the Tx Queues */ - gfar_write(&priv->regs->tstat, TSTAT_CLEAR_THALT); + gfar_write(®s->tstat, gfargrp->tstat); + + unlock_tx_qs(priv); + local_irq_restore(flags); } if (netif_msg_tx_err(priv)) printk(KERN_DEBUG "%s: Transmit Error\n", dev->name); @@ -2296,11 +2974,11 @@ static irqreturn_t gfar_error(int irq, void *dev_id) dev->stats.rx_errors++; priv->extra_stats.rx_bsy++; - gfar_receive(irq, dev_id); + gfar_receive(irq, grp_id); if (netif_msg_rx_err(priv)) printk(KERN_DEBUG "%s: busy error (rstat: %x)\n", - dev->name, gfar_read(&priv->regs->rstat)); + dev->name, gfar_read(®s->rstat)); } if (events & IEVENT_BABR) { dev->stats.rx_errors++; @@ -2331,6 +3009,9 @@ static struct of_device_id gfar_match[] = .type = "network", .compatible = "gianfar", }, + { + .compatible = "fsl,etsec2", + }, {}, }; MODULE_DEVICE_TABLE(of, gfar_match); @@ -2342,8 +3023,9 @@ static struct of_platform_driver gfar_driver = { .probe = gfar_probe, .remove = gfar_remove, - .suspend = gfar_suspend, - .resume = gfar_resume, + .suspend = gfar_legacy_suspend, + .resume = gfar_legacy_resume, + .driver.pm = GFAR_PM_OPS, }; static int __init gfar_init(void) |