diff options
Diffstat (limited to 'drivers/net/ethernet/amd/pcnet32.c')
-rw-r--r-- | drivers/net/ethernet/amd/pcnet32.c | 124 |
1 files changed, 89 insertions, 35 deletions
diff --git a/drivers/net/ethernet/amd/pcnet32.c b/drivers/net/ethernet/amd/pcnet32.c index 9339cccfe05a..e7cc9174e364 100644 --- a/drivers/net/ethernet/amd/pcnet32.c +++ b/drivers/net/ethernet/amd/pcnet32.c @@ -549,35 +549,35 @@ static void pcnet32_realloc_rx_ring(struct net_device *dev, struct pcnet32_rx_head *new_rx_ring; struct sk_buff **new_skb_list; int new, overlap; + unsigned int entries = 1 << size; new_rx_ring = pci_alloc_consistent(lp->pci_dev, sizeof(struct pcnet32_rx_head) * - (1 << size), + entries, &new_ring_dma_addr); if (new_rx_ring == NULL) { netif_err(lp, drv, dev, "Consistent memory allocation failed\n"); return; } - memset(new_rx_ring, 0, sizeof(struct pcnet32_rx_head) * (1 << size)); + memset(new_rx_ring, 0, sizeof(struct pcnet32_rx_head) * entries); - new_dma_addr_list = kcalloc(1 << size, sizeof(dma_addr_t), GFP_ATOMIC); + new_dma_addr_list = kcalloc(entries, sizeof(dma_addr_t), GFP_ATOMIC); if (!new_dma_addr_list) goto free_new_rx_ring; - new_skb_list = kcalloc(1 << size, sizeof(struct sk_buff *), - GFP_ATOMIC); + new_skb_list = kcalloc(entries, sizeof(struct sk_buff *), GFP_ATOMIC); if (!new_skb_list) goto free_new_lists; /* first copy the current receive buffers */ - overlap = min(size, lp->rx_ring_size); + overlap = min(entries, lp->rx_ring_size); for (new = 0; new < overlap; new++) { new_rx_ring[new] = lp->rx_ring[new]; new_dma_addr_list[new] = lp->rx_dma_addr[new]; new_skb_list[new] = lp->rx_skbuff[new]; } /* now allocate any new buffers needed */ - for (; new < size; new++) { + for (; new < entries; new++) { struct sk_buff *rx_skbuff; new_skb_list[new] = netdev_alloc_skb(dev, PKT_BUF_SKB); rx_skbuff = new_skb_list[new]; @@ -592,6 +592,13 @@ static void pcnet32_realloc_rx_ring(struct net_device *dev, new_dma_addr_list[new] = pci_map_single(lp->pci_dev, rx_skbuff->data, PKT_BUF_SIZE, PCI_DMA_FROMDEVICE); + if (pci_dma_mapping_error(lp->pci_dev, + new_dma_addr_list[new])) { + netif_err(lp, drv, dev, "%s dma mapping failed\n", + __func__); + dev_kfree_skb(new_skb_list[new]); + goto free_all_new; + } new_rx_ring[new].base = cpu_to_le32(new_dma_addr_list[new]); new_rx_ring[new].buf_length = cpu_to_le16(NEG_BUF_SIZE); new_rx_ring[new].status = cpu_to_le16(0x8000); @@ -599,8 +606,12 @@ static void pcnet32_realloc_rx_ring(struct net_device *dev, /* and free any unneeded buffers */ for (; new < lp->rx_ring_size; new++) { if (lp->rx_skbuff[new]) { - pci_unmap_single(lp->pci_dev, lp->rx_dma_addr[new], - PKT_BUF_SIZE, PCI_DMA_FROMDEVICE); + if (!pci_dma_mapping_error(lp->pci_dev, + lp->rx_dma_addr[new])) + pci_unmap_single(lp->pci_dev, + lp->rx_dma_addr[new], + PKT_BUF_SIZE, + PCI_DMA_FROMDEVICE); dev_kfree_skb(lp->rx_skbuff[new]); } } @@ -612,7 +623,7 @@ static void pcnet32_realloc_rx_ring(struct net_device *dev, lp->rx_ring_size, lp->rx_ring, lp->rx_ring_dma_addr); - lp->rx_ring_size = (1 << size); + lp->rx_ring_size = entries; lp->rx_mod_mask = lp->rx_ring_size - 1; lp->rx_len_bits = (size << 4); lp->rx_ring = new_rx_ring; @@ -624,8 +635,12 @@ static void pcnet32_realloc_rx_ring(struct net_device *dev, free_all_new: while (--new >= lp->rx_ring_size) { if (new_skb_list[new]) { - pci_unmap_single(lp->pci_dev, new_dma_addr_list[new], - PKT_BUF_SIZE, PCI_DMA_FROMDEVICE); + if (!pci_dma_mapping_error(lp->pci_dev, + new_dma_addr_list[new])) + pci_unmap_single(lp->pci_dev, + new_dma_addr_list[new], + PKT_BUF_SIZE, + PCI_DMA_FROMDEVICE); dev_kfree_skb(new_skb_list[new]); } } @@ -634,8 +649,7 @@ free_new_lists: kfree(new_dma_addr_list); free_new_rx_ring: pci_free_consistent(lp->pci_dev, - sizeof(struct pcnet32_rx_head) * - (1 << size), + sizeof(struct pcnet32_rx_head) * entries, new_rx_ring, new_ring_dma_addr); } @@ -650,8 +664,12 @@ static void pcnet32_purge_rx_ring(struct net_device *dev) lp->rx_ring[i].status = 0; /* CPU owns buffer */ wmb(); /* Make sure adapter sees owner change */ if (lp->rx_skbuff[i]) { - pci_unmap_single(lp->pci_dev, lp->rx_dma_addr[i], - PKT_BUF_SIZE, PCI_DMA_FROMDEVICE); + if (!pci_dma_mapping_error(lp->pci_dev, + lp->rx_dma_addr[i])) + pci_unmap_single(lp->pci_dev, + lp->rx_dma_addr[i], + PKT_BUF_SIZE, + PCI_DMA_FROMDEVICE); dev_kfree_skb_any(lp->rx_skbuff[i]); } lp->rx_skbuff[i] = NULL; @@ -930,6 +948,12 @@ static int pcnet32_loopback_test(struct net_device *dev, uint64_t * data1) lp->tx_dma_addr[x] = pci_map_single(lp->pci_dev, skb->data, skb->len, PCI_DMA_TODEVICE); + if (pci_dma_mapping_error(lp->pci_dev, lp->tx_dma_addr[x])) { + netif_printk(lp, hw, KERN_DEBUG, dev, + "DMA mapping error at line: %d!\n", + __LINE__); + goto clean_up; + } lp->tx_ring[x].base = cpu_to_le32(lp->tx_dma_addr[x]); wmb(); /* Make sure owner changes after all others are visible */ lp->tx_ring[x].status = cpu_to_le16(status); @@ -1142,24 +1166,36 @@ static void pcnet32_rx_entry(struct net_device *dev, if (pkt_len > rx_copybreak) { struct sk_buff *newskb; + dma_addr_t new_dma_addr; newskb = netdev_alloc_skb(dev, PKT_BUF_SKB); + /* + * map the new buffer, if mapping fails, drop the packet and + * reuse the old buffer + */ if (newskb) { skb_reserve(newskb, NET_IP_ALIGN); - skb = lp->rx_skbuff[entry]; - pci_unmap_single(lp->pci_dev, - lp->rx_dma_addr[entry], - PKT_BUF_SIZE, - PCI_DMA_FROMDEVICE); - skb_put(skb, pkt_len); - lp->rx_skbuff[entry] = newskb; - lp->rx_dma_addr[entry] = - pci_map_single(lp->pci_dev, - newskb->data, - PKT_BUF_SIZE, - PCI_DMA_FROMDEVICE); - rxp->base = cpu_to_le32(lp->rx_dma_addr[entry]); - rx_in_place = 1; + new_dma_addr = pci_map_single(lp->pci_dev, + newskb->data, + PKT_BUF_SIZE, + PCI_DMA_FROMDEVICE); + if (pci_dma_mapping_error(lp->pci_dev, new_dma_addr)) { + netif_err(lp, rx_err, dev, + "DMA mapping error.\n"); + dev_kfree_skb(newskb); + skb = NULL; + } else { + skb = lp->rx_skbuff[entry]; + pci_unmap_single(lp->pci_dev, + lp->rx_dma_addr[entry], + PKT_BUF_SIZE, + PCI_DMA_FROMDEVICE); + skb_put(skb, pkt_len); + lp->rx_skbuff[entry] = newskb; + lp->rx_dma_addr[entry] = new_dma_addr; + rxp->base = cpu_to_le32(new_dma_addr); + rx_in_place = 1; + } } else skb = NULL; } else @@ -2229,9 +2265,12 @@ static void pcnet32_purge_tx_ring(struct net_device *dev) lp->tx_ring[i].status = 0; /* CPU owns buffer */ wmb(); /* Make sure adapter sees owner change */ if (lp->tx_skbuff[i]) { - pci_unmap_single(lp->pci_dev, lp->tx_dma_addr[i], - lp->tx_skbuff[i]->len, - PCI_DMA_TODEVICE); + if (!pci_dma_mapping_error(lp->pci_dev, + lp->tx_dma_addr[i])) + pci_unmap_single(lp->pci_dev, + lp->tx_dma_addr[i], + lp->tx_skbuff[i]->len, + PCI_DMA_TODEVICE); dev_kfree_skb_any(lp->tx_skbuff[i]); } lp->tx_skbuff[i] = NULL; @@ -2264,10 +2303,19 @@ static int pcnet32_init_ring(struct net_device *dev) } rmb(); - if (lp->rx_dma_addr[i] == 0) + if (lp->rx_dma_addr[i] == 0) { lp->rx_dma_addr[i] = pci_map_single(lp->pci_dev, rx_skbuff->data, PKT_BUF_SIZE, PCI_DMA_FROMDEVICE); + if (pci_dma_mapping_error(lp->pci_dev, + lp->rx_dma_addr[i])) { + /* there is not much we can do at this point */ + netif_err(lp, drv, dev, + "%s pci dma mapping error\n", + __func__); + return -1; + } + } lp->rx_ring[i].base = cpu_to_le32(lp->rx_dma_addr[i]); lp->rx_ring[i].buf_length = cpu_to_le16(NEG_BUF_SIZE); wmb(); /* Make sure owner changes after all others are visible */ @@ -2397,9 +2445,14 @@ static netdev_tx_t pcnet32_start_xmit(struct sk_buff *skb, lp->tx_ring[entry].misc = 0x00000000; - lp->tx_skbuff[entry] = skb; lp->tx_dma_addr[entry] = pci_map_single(lp->pci_dev, skb->data, skb->len, PCI_DMA_TODEVICE); + if (pci_dma_mapping_error(lp->pci_dev, lp->tx_dma_addr[entry])) { + dev_kfree_skb_any(skb); + dev->stats.tx_dropped++; + goto drop_packet; + } + lp->tx_skbuff[entry] = skb; lp->tx_ring[entry].base = cpu_to_le32(lp->tx_dma_addr[entry]); wmb(); /* Make sure owner changes after all others are visible */ lp->tx_ring[entry].status = cpu_to_le16(status); @@ -2414,6 +2467,7 @@ static netdev_tx_t pcnet32_start_xmit(struct sk_buff *skb, lp->tx_full = 1; netif_stop_queue(dev); } +drop_packet: spin_unlock_irqrestore(&lp->lock, flags); return NETDEV_TX_OK; } |