diff options
Diffstat (limited to 'drivers/net/qlge/qlge_main.c')
-rw-r--r-- | drivers/net/qlge/qlge_main.c | 94 |
1 files changed, 65 insertions, 29 deletions
diff --git a/drivers/net/qlge/qlge_main.c b/drivers/net/qlge/qlge_main.c index 3d1d7b6e55aa..91191f761fba 100644 --- a/drivers/net/qlge/qlge_main.c +++ b/drivers/net/qlge/qlge_main.c @@ -898,6 +898,7 @@ static void ql_update_lbq(struct ql_adapter *qdev, struct rx_ring *rx_ring) lbq_desc->index); lbq_desc->p.lbq_page = alloc_page(GFP_ATOMIC); if (lbq_desc->p.lbq_page == NULL) { + rx_ring->lbq_clean_idx = clean_idx; QPRINTK(qdev, RX_STATUS, ERR, "Couldn't get a page.\n"); return; @@ -907,6 +908,9 @@ static void ql_update_lbq(struct ql_adapter *qdev, struct rx_ring *rx_ring) 0, PAGE_SIZE, PCI_DMA_FROMDEVICE); if (pci_dma_mapping_error(qdev->pdev, map)) { + rx_ring->lbq_clean_idx = clean_idx; + put_page(lbq_desc->p.lbq_page); + lbq_desc->p.lbq_page = NULL; QPRINTK(qdev, RX_STATUS, ERR, "PCI mapping failed.\n"); return; @@ -968,6 +972,8 @@ static void ql_update_sbq(struct ql_adapter *qdev, struct rx_ring *rx_ring) if (pci_dma_mapping_error(qdev->pdev, map)) { QPRINTK(qdev, IFUP, ERR, "PCI mapping failed.\n"); rx_ring->sbq_clean_idx = clean_idx; + dev_kfree_skb_any(sbq_desc->p.skb); + sbq_desc->p.skb = NULL; return; } pci_unmap_addr_set(sbq_desc, mapaddr, map); @@ -1430,18 +1436,32 @@ static void ql_process_mac_rx_intr(struct ql_adapter *qdev, if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_P) { QPRINTK(qdev, RX_STATUS, DEBUG, "Promiscuous Packet.\n"); } - if (ib_mac_rsp->flags1 & (IB_MAC_IOCB_RSP_IE | IB_MAC_IOCB_RSP_TE)) { - QPRINTK(qdev, RX_STATUS, ERR, - "Bad checksum for this %s packet.\n", - ((ib_mac_rsp-> - flags2 & IB_MAC_IOCB_RSP_T) ? "TCP" : "UDP")); - skb->ip_summed = CHECKSUM_NONE; - } else if (qdev->rx_csum && - ((ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_T) || - ((ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_U) && - !(ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_NU)))) { - QPRINTK(qdev, RX_STATUS, DEBUG, "RX checksum done!\n"); - skb->ip_summed = CHECKSUM_UNNECESSARY; + + skb->protocol = eth_type_trans(skb, ndev); + skb->ip_summed = CHECKSUM_NONE; + + /* If rx checksum is on, and there are no + * csum or frame errors. + */ + if (qdev->rx_csum && + !(ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) && + !(ib_mac_rsp->flags1 & IB_MAC_CSUM_ERR_MASK)) { + /* TCP frame. */ + if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_T) { + QPRINTK(qdev, RX_STATUS, DEBUG, + "TCP checksum done!\n"); + skb->ip_summed = CHECKSUM_UNNECESSARY; + } else if ((ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_U) && + (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_V4)) { + /* Unfragmented ipv4 UDP frame. */ + struct iphdr *iph = (struct iphdr *) skb->data; + if (!(iph->frag_off & + cpu_to_be16(IP_MF|IP_OFFSET))) { + skb->ip_summed = CHECKSUM_UNNECESSARY; + QPRINTK(qdev, RX_STATUS, DEBUG, + "TCP checksum done!\n"); + } + } } qdev->stats.rx_packets++; qdev->stats.rx_bytes += skb->len; @@ -1449,12 +1469,12 @@ static void ql_process_mac_rx_intr(struct ql_adapter *qdev, if (qdev->vlgrp && (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V)) { QPRINTK(qdev, RX_STATUS, DEBUG, "Passing a VLAN packet upstream.\n"); - vlan_hwaccel_rx(skb, qdev->vlgrp, + vlan_hwaccel_receive_skb(skb, qdev->vlgrp, le16_to_cpu(ib_mac_rsp->vlan_id)); } else { QPRINTK(qdev, RX_STATUS, DEBUG, "Passing a normal packet upstream.\n"); - netif_rx(skb); + netif_receive_skb(skb); } } @@ -1511,6 +1531,11 @@ void ql_queue_asic_error(struct ql_adapter *qdev) netif_stop_queue(qdev->ndev); netif_carrier_off(qdev->ndev); ql_disable_interrupts(qdev); + /* Clear adapter up bit to signal the recovery + * process that it shouldn't kill the reset worker + * thread + */ + clear_bit(QL_ADAPTER_UP, &qdev->flags); queue_delayed_work(qdev->workqueue, &qdev->asic_reset_work, 0); } @@ -1916,6 +1941,9 @@ static int qlge_send(struct sk_buff *skb, struct net_device *ndev) tx_ring = &qdev->tx_ring[tx_ring_idx]; + if (skb_padto(skb, ETH_ZLEN)) + return NETDEV_TX_OK; + if (unlikely(atomic_read(&tx_ring->tx_count) < 2)) { QPRINTK(qdev, TX_QUEUED, INFO, "%s: shutting down tx queue %d du to lack of resources.\n", @@ -1927,10 +1955,6 @@ static int qlge_send(struct sk_buff *skb, struct net_device *ndev) tx_ring_desc = &tx_ring->q[tx_ring->prod_idx]; mac_iocb_ptr = tx_ring_desc->queue_entry; memset((void *)mac_iocb_ptr, 0, sizeof(mac_iocb_ptr)); - if (ql_map_send(qdev, mac_iocb_ptr, skb, tx_ring_desc) != NETDEV_TX_OK) { - QPRINTK(qdev, TX_QUEUED, ERR, "Could not map the segments.\n"); - return NETDEV_TX_BUSY; - } mac_iocb_ptr->opcode = OPCODE_OB_MAC_IOCB; mac_iocb_ptr->tid = tx_ring_desc->index; @@ -1956,6 +1980,12 @@ static int qlge_send(struct sk_buff *skb, struct net_device *ndev) ql_hw_csum_setup(skb, (struct ob_mac_tso_iocb_req *)mac_iocb_ptr); } + if (ql_map_send(qdev, mac_iocb_ptr, skb, tx_ring_desc) != + NETDEV_TX_OK) { + QPRINTK(qdev, TX_QUEUED, ERR, + "Could not map the segments.\n"); + return NETDEV_TX_BUSY; + } QL_DUMP_OB_MAC_IOCB(mac_iocb_ptr); tx_ring->prod_idx++; if (tx_ring->prod_idx == tx_ring->wq_len) @@ -2873,8 +2903,8 @@ static int ql_start_rss(struct ql_adapter *qdev) /* * Fill out the Indirection Table. */ - for (i = 0; i < 32; i++) - hash_id[i] = i & 1; + for (i = 0; i < 256; i++) + hash_id[i] = i & (qdev->rss_ring_count - 1); /* * Random values for the IPv6 and IPv4 Hash Keys. @@ -2957,9 +2987,9 @@ static int ql_adapter_initialize(struct ql_adapter *qdev) mask = value << 16; ql_write32(qdev, SYS, mask | value); - /* Set the default queue. */ - value = NIC_RCV_CFG_DFQ; - mask = NIC_RCV_CFG_DFQ_MASK; + /* Set the default queue, and VLAN behavior. */ + value = NIC_RCV_CFG_DFQ | NIC_RCV_CFG_RV; + mask = NIC_RCV_CFG_DFQ_MASK | (NIC_RCV_CFG_RV << 16); ql_write32(qdev, NIC_RCV_CFG, (mask | value)); /* Set the MPI interrupt to enabled. */ @@ -3100,7 +3130,11 @@ static int ql_adapter_down(struct ql_adapter *qdev) netif_stop_queue(ndev); netif_carrier_off(ndev); - cancel_delayed_work_sync(&qdev->asic_reset_work); + /* Don't kill the reset worker thread if we + * are in the process of recovery. + */ + if (test_bit(QL_ADAPTER_UP, &qdev->flags)) + cancel_delayed_work_sync(&qdev->asic_reset_work); cancel_delayed_work_sync(&qdev->mpi_reset_work); cancel_delayed_work_sync(&qdev->mpi_work); @@ -3132,6 +3166,11 @@ static int ql_adapter_down(struct ql_adapter *qdev) ql_tx_ring_clean(qdev); + /* Call netif_napi_del() from common point. + */ + for (i = qdev->rss_ring_first_cq_id; i < qdev->rx_ring_count; i++) + netif_napi_del(&qdev->rx_ring[i].napi); + spin_lock(&qdev->hw_lock); status = ql_adapter_reset(qdev); if (status) @@ -3501,7 +3540,7 @@ static int qlge_set_mac_address(struct net_device *ndev, void *p) static void qlge_tx_timeout(struct net_device *ndev) { struct ql_adapter *qdev = (struct ql_adapter *)netdev_priv(ndev); - queue_delayed_work(qdev->workqueue, &qdev->asic_reset_work, 0); + ql_queue_asic_error(qdev); } static void ql_asic_reset_work(struct work_struct *work) @@ -3836,7 +3875,7 @@ static int qlge_suspend(struct pci_dev *pdev, pm_message_t state) { struct net_device *ndev = pci_get_drvdata(pdev); struct ql_adapter *qdev = netdev_priv(ndev); - int err, i; + int err; netif_device_detach(ndev); @@ -3846,9 +3885,6 @@ static int qlge_suspend(struct pci_dev *pdev, pm_message_t state) return err; } - for (i = qdev->rss_ring_first_cq_id; i < qdev->rx_ring_count; i++) - netif_napi_del(&qdev->rx_ring[i].napi); - err = pci_save_state(pdev); if (err) return err; |