diff options
Diffstat (limited to 'drivers/net/ixgbevf/ixgbevf_main.c')
-rw-r--r-- | drivers/net/ixgbevf/ixgbevf_main.c | 125 |
1 files changed, 57 insertions, 68 deletions
diff --git a/drivers/net/ixgbevf/ixgbevf_main.c b/drivers/net/ixgbevf/ixgbevf_main.c index 0cd6202dfacc..a16cff7e54a3 100644 --- a/drivers/net/ixgbevf/ixgbevf_main.c +++ b/drivers/net/ixgbevf/ixgbevf_main.c @@ -139,15 +139,15 @@ static void ixgbevf_unmap_and_free_tx_resource(struct ixgbevf_adapter *adapter, { if (tx_buffer_info->dma) { if (tx_buffer_info->mapped_as_page) - pci_unmap_page(adapter->pdev, + dma_unmap_page(&adapter->pdev->dev, tx_buffer_info->dma, tx_buffer_info->length, - PCI_DMA_TODEVICE); + DMA_TO_DEVICE); else - pci_unmap_single(adapter->pdev, + dma_unmap_single(&adapter->pdev->dev, tx_buffer_info->dma, tx_buffer_info->length, - PCI_DMA_TODEVICE); + DMA_TO_DEVICE); tx_buffer_info->dma = 0; } if (tx_buffer_info->skb) { @@ -416,10 +416,10 @@ static void ixgbevf_alloc_rx_buffers(struct ixgbevf_adapter *adapter, bi->page_offset ^= (PAGE_SIZE / 2); } - bi->page_dma = pci_map_page(pdev, bi->page, + bi->page_dma = dma_map_page(&pdev->dev, bi->page, bi->page_offset, (PAGE_SIZE / 2), - PCI_DMA_FROMDEVICE); + DMA_FROM_DEVICE); } skb = bi->skb; @@ -442,9 +442,9 @@ static void ixgbevf_alloc_rx_buffers(struct ixgbevf_adapter *adapter, bi->skb = skb; } if (!bi->dma) { - bi->dma = pci_map_single(pdev, skb->data, + bi->dma = dma_map_single(&pdev->dev, skb->data, rx_ring->rx_buf_len, - PCI_DMA_FROMDEVICE); + DMA_FROM_DEVICE); } /* Refresh the desc even if buffer_addrs didn't change because * each write-back erases this info. */ @@ -536,16 +536,16 @@ static bool ixgbevf_clean_rx_irq(struct ixgbevf_q_vector *q_vector, rx_buffer_info->skb = NULL; if (rx_buffer_info->dma) { - pci_unmap_single(pdev, rx_buffer_info->dma, + dma_unmap_single(&pdev->dev, rx_buffer_info->dma, rx_ring->rx_buf_len, - PCI_DMA_FROMDEVICE); + DMA_FROM_DEVICE); rx_buffer_info->dma = 0; skb_put(skb, len); } if (upper_len) { - pci_unmap_page(pdev, rx_buffer_info->page_dma, - PAGE_SIZE / 2, PCI_DMA_FROMDEVICE); + dma_unmap_page(&pdev->dev, rx_buffer_info->page_dma, + PAGE_SIZE / 2, DMA_FROM_DEVICE); rx_buffer_info->page_dma = 0; skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags, rx_buffer_info->page, @@ -604,14 +604,13 @@ static bool ixgbevf_clean_rx_irq(struct ixgbevf_q_vector *q_vector, * packets not getting split correctly */ if (staterr & IXGBE_RXD_STAT_LB) { - u32 header_fixup_len = skb->len - skb->data_len; + u32 header_fixup_len = skb_headlen(skb); if (header_fixup_len < 14) skb_push(skb, header_fixup_len); } skb->protocol = eth_type_trans(skb, adapter->netdev); ixgbevf_receive_skb(q_vector, skb, staterr, rx_ring, rx_desc); - adapter->netdev->last_rx = jiffies; next_desc: rx_desc->wb.upper.status_error = 0; @@ -947,8 +946,6 @@ static void ixgbevf_set_itr_msix(struct ixgbevf_q_vector *q_vector) itr_reg = EITR_INTS_PER_SEC_TO_REG(new_itr); ixgbevf_write_eitr(adapter, v_idx, itr_reg); } - - return; } static irqreturn_t ixgbevf_msix_mbx(int irq, void *data) @@ -962,12 +959,28 @@ static irqreturn_t ixgbevf_msix_mbx(int irq, void *data) eicr = IXGBE_READ_REG(hw, IXGBE_VTEICS); IXGBE_WRITE_REG(hw, IXGBE_VTEICR, eicr); + if (!hw->mbx.ops.check_for_ack(hw)) { + /* + * checking for the ack clears the PFACK bit. Place + * it back in the v2p_mailbox cache so that anyone + * polling for an ack will not miss it. Also + * avoid the read below because the code to read + * the mailbox will also clear the ack bit. This was + * causing lost acks. Just cache the bit and exit + * the IRQ handler. + */ + hw->mbx.v2p_mailbox |= IXGBE_VFMAILBOX_PFACK; + goto out; + } + + /* Not an ack interrupt, go ahead and read the message */ hw->mbx.ops.read(hw, &msg, 1); if ((msg & IXGBE_MBVFICR_VFREQ_MASK) == IXGBE_PF_CONTROL_MSG) mod_timer(&adapter->watchdog_timer, round_jiffies(jiffies + 1)); +out: return IRQ_HANDLED; } @@ -1496,22 +1509,6 @@ static void ixgbevf_restore_vlan(struct ixgbevf_adapter *adapter) } } -static u8 *ixgbevf_addr_list_itr(struct ixgbe_hw *hw, u8 **mc_addr_ptr, - u32 *vmdq) -{ - struct dev_mc_list *mc_ptr; - u8 *addr = *mc_addr_ptr; - *vmdq = 0; - - mc_ptr = container_of(addr, struct dev_mc_list, dmi_addr[0]); - if (mc_ptr->next) - *mc_addr_ptr = mc_ptr->next->dmi_addr; - else - *mc_addr_ptr = NULL; - - return addr; -} - /** * ixgbevf_set_rx_mode - Multicast set * @netdev: network interface device structure @@ -1524,16 +1521,10 @@ static void ixgbevf_set_rx_mode(struct net_device *netdev) { struct ixgbevf_adapter *adapter = netdev_priv(netdev); struct ixgbe_hw *hw = &adapter->hw; - u8 *addr_list = NULL; - int addr_count = 0; /* reprogram multicast list */ - addr_count = netdev_mc_count(netdev); - if (addr_count) - addr_list = netdev->mc_list->dmi_addr; if (hw->mac.ops.update_mc_addr_list) - hw->mac.ops.update_mc_addr_list(hw, addr_list, addr_count, - ixgbevf_addr_list_itr); + hw->mac.ops.update_mc_addr_list(hw, netdev); } static void ixgbevf_napi_enable_all(struct ixgbevf_adapter *adapter) @@ -1744,9 +1735,9 @@ static void ixgbevf_clean_rx_ring(struct ixgbevf_adapter *adapter, rx_buffer_info = &rx_ring->rx_buffer_info[i]; if (rx_buffer_info->dma) { - pci_unmap_single(pdev, rx_buffer_info->dma, + dma_unmap_single(&pdev->dev, rx_buffer_info->dma, rx_ring->rx_buf_len, - PCI_DMA_FROMDEVICE); + DMA_FROM_DEVICE); rx_buffer_info->dma = 0; } if (rx_buffer_info->skb) { @@ -1760,8 +1751,8 @@ static void ixgbevf_clean_rx_ring(struct ixgbevf_adapter *adapter, } if (!rx_buffer_info->page) continue; - pci_unmap_page(pdev, rx_buffer_info->page_dma, PAGE_SIZE / 2, - PCI_DMA_FROMDEVICE); + dma_unmap_page(&pdev->dev, rx_buffer_info->page_dma, + PAGE_SIZE / 2, DMA_FROM_DEVICE); rx_buffer_info->page_dma = 0; put_page(rx_buffer_info->page); rx_buffer_info->page = NULL; @@ -2158,8 +2149,6 @@ static void ixgbevf_reset_interrupt_capability(struct ixgbevf_adapter *adapter) pci_disable_msix(adapter->pdev); kfree(adapter->msix_entries); adapter->msix_entries = NULL; - - return; } /** @@ -2418,9 +2407,9 @@ static void ixgbevf_watchdog_task(struct work_struct *work) if (link_up) { if (!netif_carrier_ok(netdev)) { - hw_dbg(&adapter->hw, "NIC Link is Up %s, ", - ((link_speed == IXGBE_LINK_SPEED_10GB_FULL) ? - "10 Gbps\n" : "1 Gbps\n")); + hw_dbg(&adapter->hw, "NIC Link is Up, %u Gbps\n", + (link_speed == IXGBE_LINK_SPEED_10GB_FULL) ? + 10 : 1); netif_carrier_on(netdev); netif_tx_wake_all_queues(netdev); } else { @@ -2468,7 +2457,8 @@ void ixgbevf_free_tx_resources(struct ixgbevf_adapter *adapter, vfree(tx_ring->tx_buffer_info); tx_ring->tx_buffer_info = NULL; - pci_free_consistent(pdev, tx_ring->size, tx_ring->desc, tx_ring->dma); + dma_free_coherent(&pdev->dev, tx_ring->size, tx_ring->desc, + tx_ring->dma); tx_ring->desc = NULL; } @@ -2513,8 +2503,8 @@ int ixgbevf_setup_tx_resources(struct ixgbevf_adapter *adapter, tx_ring->size = tx_ring->count * sizeof(union ixgbe_adv_tx_desc); tx_ring->size = ALIGN(tx_ring->size, 4096); - tx_ring->desc = pci_alloc_consistent(pdev, tx_ring->size, - &tx_ring->dma); + tx_ring->desc = dma_alloc_coherent(&pdev->dev, tx_ring->size, + &tx_ring->dma, GFP_KERNEL); if (!tx_ring->desc) goto err; @@ -2584,8 +2574,8 @@ int ixgbevf_setup_rx_resources(struct ixgbevf_adapter *adapter, rx_ring->size = rx_ring->count * sizeof(union ixgbe_adv_rx_desc); rx_ring->size = ALIGN(rx_ring->size, 4096); - rx_ring->desc = pci_alloc_consistent(pdev, rx_ring->size, - &rx_ring->dma); + rx_ring->desc = dma_alloc_coherent(&pdev->dev, rx_ring->size, + &rx_ring->dma, GFP_KERNEL); if (!rx_ring->desc) { hw_dbg(&adapter->hw, @@ -2646,7 +2636,8 @@ void ixgbevf_free_rx_resources(struct ixgbevf_adapter *adapter, vfree(rx_ring->rx_buffer_info); rx_ring->rx_buffer_info = NULL; - pci_free_consistent(pdev, rx_ring->size, rx_ring->desc, rx_ring->dma); + dma_free_coherent(&pdev->dev, rx_ring->size, rx_ring->desc, + rx_ring->dma); rx_ring->desc = NULL; } @@ -2958,10 +2949,10 @@ static int ixgbevf_tx_map(struct ixgbevf_adapter *adapter, tx_buffer_info->length = size; tx_buffer_info->mapped_as_page = false; - tx_buffer_info->dma = pci_map_single(adapter->pdev, + tx_buffer_info->dma = dma_map_single(&adapter->pdev->dev, skb->data + offset, - size, PCI_DMA_TODEVICE); - if (pci_dma_mapping_error(pdev, tx_buffer_info->dma)) + size, DMA_TO_DEVICE); + if (dma_mapping_error(&pdev->dev, tx_buffer_info->dma)) goto dma_error; tx_buffer_info->time_stamp = jiffies; tx_buffer_info->next_to_watch = i; @@ -2987,13 +2978,13 @@ static int ixgbevf_tx_map(struct ixgbevf_adapter *adapter, size = min(len, (unsigned int)IXGBE_MAX_DATA_PER_TXD); tx_buffer_info->length = size; - tx_buffer_info->dma = pci_map_page(adapter->pdev, + tx_buffer_info->dma = dma_map_page(&adapter->pdev->dev, frag->page, offset, size, - PCI_DMA_TODEVICE); + DMA_TO_DEVICE); tx_buffer_info->mapped_as_page = true; - if (pci_dma_mapping_error(pdev, tx_buffer_info->dma)) + if (dma_mapping_error(&pdev->dev, tx_buffer_info->dma)) goto dma_error; tx_buffer_info->time_stamp = jiffies; tx_buffer_info->next_to_watch = i; @@ -3189,8 +3180,6 @@ static int ixgbevf_xmit_frame(struct sk_buff *skb, struct net_device *netdev) ixgbevf_tx_map(adapter, tx_ring, skb, tx_flags, first), skb->len, hdr_len); - netdev->trans_start = jiffies; - ixgbevf_maybe_stop_tx(netdev, tx_ring, DESC_NEEDED); return NETDEV_TX_OK; @@ -3334,14 +3323,14 @@ static int __devinit ixgbevf_probe(struct pci_dev *pdev, if (err) return err; - if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) && - !pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64))) { + if (!dma_set_mask(&pdev->dev, DMA_BIT_MASK(64)) && + !dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64))) { pci_using_dac = 1; } else { - err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); + err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32)); if (err) { - err = pci_set_consistent_dma_mask(pdev, - DMA_BIT_MASK(32)); + err = dma_set_coherent_mask(&pdev->dev, + DMA_BIT_MASK(32)); if (err) { dev_err(&pdev->dev, "No usable DMA " "configuration, aborting\n"); @@ -3482,7 +3471,7 @@ static int __devinit ixgbevf_probe(struct pci_dev *pdev, hw_dbg(hw, "MAC: %d\n", hw->mac.type); - hw_dbg(hw, "LRO is disabled \n"); + hw_dbg(hw, "LRO is disabled\n"); hw_dbg(hw, "Intel(R) 82599 Virtual Function\n"); cards_found++; |