diff options
author | Eric W. Biederman <ebiederm@xmission.com> | 2014-03-15 17:54:27 -0700 |
---|---|---|
committer | Eric W. Biederman <ebiederm@xmission.com> | 2014-03-24 21:19:08 -0700 |
commit | 1616566c4fa590d4b2011458408a8b39cd46bcc1 (patch) | |
tree | 9ac2e16ab9a651f4c03120232a1f08332dfcc4b6 /drivers/net/ethernet | |
parent | 6956d73aaf28d1449bc1222a0b2e997273cbf520 (diff) |
forcedeth: Call dev_kfree_skb_any instead of kfree_skb.
Replace kfree_skb with dev_kfree_skb_any in functions that can
be called in hard irq and other contexts.
Every location changes is a drop making dev_kfree_skby_any appropriate.
Signed-off-by: "Eric W. Biederman" <ebiederm@xmission.com>
Diffstat (limited to 'drivers/net/ethernet')
-rw-r--r-- | drivers/net/ethernet/nvidia/forcedeth.c | 8 |
1 files changed, 4 insertions, 4 deletions
diff --git a/drivers/net/ethernet/nvidia/forcedeth.c b/drivers/net/ethernet/nvidia/forcedeth.c index 811be0bccd14..fddb464aeab3 100644 --- a/drivers/net/ethernet/nvidia/forcedeth.c +++ b/drivers/net/ethernet/nvidia/forcedeth.c @@ -2231,7 +2231,7 @@ static netdev_tx_t nv_start_xmit(struct sk_buff *skb, struct net_device *dev) if (pci_dma_mapping_error(np->pci_dev, np->put_tx_ctx->dma)) { /* on DMA mapping error - drop the packet */ - kfree_skb(skb); + dev_kfree_skb_any(skb); u64_stats_update_begin(&np->swstats_tx_syncp); np->stat_tx_dropped++; u64_stats_update_end(&np->swstats_tx_syncp); @@ -2277,7 +2277,7 @@ static netdev_tx_t nv_start_xmit(struct sk_buff *skb, struct net_device *dev) if (unlikely(tmp_tx_ctx++ == np->last_tx_ctx)) tmp_tx_ctx = np->first_tx_ctx; } while (tmp_tx_ctx != np->put_tx_ctx); - kfree_skb(skb); + dev_kfree_skb_any(skb); np->put_tx_ctx = start_tx_ctx; u64_stats_update_begin(&np->swstats_tx_syncp); np->stat_tx_dropped++; @@ -2380,7 +2380,7 @@ static netdev_tx_t nv_start_xmit_optimized(struct sk_buff *skb, if (pci_dma_mapping_error(np->pci_dev, np->put_tx_ctx->dma)) { /* on DMA mapping error - drop the packet */ - kfree_skb(skb); + dev_kfree_skb_any(skb); u64_stats_update_begin(&np->swstats_tx_syncp); np->stat_tx_dropped++; u64_stats_update_end(&np->swstats_tx_syncp); @@ -2427,7 +2427,7 @@ static netdev_tx_t nv_start_xmit_optimized(struct sk_buff *skb, if (unlikely(tmp_tx_ctx++ == np->last_tx_ctx)) tmp_tx_ctx = np->first_tx_ctx; } while (tmp_tx_ctx != np->put_tx_ctx); - kfree_skb(skb); + dev_kfree_skb_any(skb); np->put_tx_ctx = start_tx_ctx; u64_stats_update_begin(&np->swstats_tx_syncp); np->stat_tx_dropped++; |