summaryrefslogtreecommitdiff
path: root/drivers/net/ethernet/brocade/bna/bnad.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/ethernet/brocade/bna/bnad.c')
-rw-r--r--drivers/net/ethernet/brocade/bna/bnad.c65
1 files changed, 27 insertions, 38 deletions
diff --git a/drivers/net/ethernet/brocade/bna/bnad.c b/drivers/net/ethernet/brocade/bna/bnad.c
index 4ad1187e82fb..675550fe8ee9 100644
--- a/drivers/net/ethernet/brocade/bna/bnad.c
+++ b/drivers/net/ethernet/brocade/bna/bnad.c
@@ -2496,12 +2496,10 @@ bnad_tso_prepare(struct bnad *bnad, struct sk_buff *skb)
{
int err;
- if (skb_header_cloned(skb)) {
- err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
- if (err) {
- BNAD_UPDATE_CTR(bnad, tso_err);
- return err;
- }
+ err = skb_cow_head(skb, 0);
+ if (err < 0) {
+ BNAD_UPDATE_CTR(bnad, tso_err);
+ return err;
}
/*
@@ -2669,9 +2667,11 @@ bnad_enable_msix(struct bnad *bnad)
for (i = 0; i < bnad->msix_num; i++)
bnad->msix_table[i].entry = i;
- ret = pci_enable_msix(bnad->pcidev, bnad->msix_table, bnad->msix_num);
- if (ret > 0) {
- /* Not enough MSI-X vectors. */
+ ret = pci_enable_msix_range(bnad->pcidev, bnad->msix_table,
+ 1, bnad->msix_num);
+ if (ret < 0) {
+ goto intx_mode;
+ } else if (ret < bnad->msix_num) {
pr_warn("BNA: %d MSI-X vectors allocated < %d requested\n",
ret, bnad->msix_num);
@@ -2684,18 +2684,11 @@ bnad_enable_msix(struct bnad *bnad)
bnad->msix_num = BNAD_NUM_TXQ + BNAD_NUM_RXP +
BNAD_MAILBOX_MSIX_VECTORS;
- if (bnad->msix_num > ret)
+ if (bnad->msix_num > ret) {
+ pci_disable_msix(bnad->pcidev);
goto intx_mode;
-
- /* Try once more with adjusted numbers */
- /* If this fails, fall back to INTx */
- ret = pci_enable_msix(bnad->pcidev, bnad->msix_table,
- bnad->msix_num);
- if (ret)
- goto intx_mode;
-
- } else if (ret < 0)
- goto intx_mode;
+ }
+ }
pci_intx(bnad->pcidev, 0);
@@ -2850,13 +2843,11 @@ bnad_txq_wi_prepare(struct bnad *bnad, struct bna_tcb *tcb,
}
if (unlikely((gso_size + skb_transport_offset(skb) +
tcp_hdrlen(skb)) >= skb->len)) {
- txqent->hdr.wi.opcode =
- __constant_htons(BNA_TXQ_WI_SEND);
+ txqent->hdr.wi.opcode = htons(BNA_TXQ_WI_SEND);
txqent->hdr.wi.lso_mss = 0;
BNAD_UPDATE_CTR(bnad, tx_skb_tso_too_short);
} else {
- txqent->hdr.wi.opcode =
- __constant_htons(BNA_TXQ_WI_SEND_LSO);
+ txqent->hdr.wi.opcode = htons(BNA_TXQ_WI_SEND_LSO);
txqent->hdr.wi.lso_mss = htons(gso_size);
}
@@ -2870,7 +2861,7 @@ bnad_txq_wi_prepare(struct bnad *bnad, struct bna_tcb *tcb,
htons(BNA_TXQ_WI_L4_HDR_N_OFFSET(
tcp_hdrlen(skb) >> 2, skb_transport_offset(skb)));
} else {
- txqent->hdr.wi.opcode = __constant_htons(BNA_TXQ_WI_SEND);
+ txqent->hdr.wi.opcode = htons(BNA_TXQ_WI_SEND);
txqent->hdr.wi.lso_mss = 0;
if (unlikely(skb->len > (bnad->netdev->mtu + ETH_HLEN))) {
@@ -2881,11 +2872,10 @@ bnad_txq_wi_prepare(struct bnad *bnad, struct bna_tcb *tcb,
if (skb->ip_summed == CHECKSUM_PARTIAL) {
u8 proto = 0;
- if (skb->protocol == __constant_htons(ETH_P_IP))
+ if (skb->protocol == htons(ETH_P_IP))
proto = ip_hdr(skb)->protocol;
#ifdef NETIF_F_IPV6_CSUM
- else if (skb->protocol ==
- __constant_htons(ETH_P_IPV6)) {
+ else if (skb->protocol == htons(ETH_P_IPV6)) {
/* nexthdr may not be TCP immediately. */
proto = ipv6_hdr(skb)->nexthdr;
}
@@ -2954,17 +2944,17 @@ bnad_start_xmit(struct sk_buff *skb, struct net_device *netdev)
/* Sanity checks for the skb */
if (unlikely(skb->len <= ETH_HLEN)) {
- dev_kfree_skb(skb);
+ dev_kfree_skb_any(skb);
BNAD_UPDATE_CTR(bnad, tx_skb_too_short);
return NETDEV_TX_OK;
}
if (unlikely(len > BFI_TX_MAX_DATA_PER_VECTOR)) {
- dev_kfree_skb(skb);
+ dev_kfree_skb_any(skb);
BNAD_UPDATE_CTR(bnad, tx_skb_headlen_zero);
return NETDEV_TX_OK;
}
if (unlikely(len == 0)) {
- dev_kfree_skb(skb);
+ dev_kfree_skb_any(skb);
BNAD_UPDATE_CTR(bnad, tx_skb_headlen_zero);
return NETDEV_TX_OK;
}
@@ -2976,7 +2966,7 @@ bnad_start_xmit(struct sk_buff *skb, struct net_device *netdev)
* and the netif_tx_stop_all_queues() call.
*/
if (unlikely(!tcb || !test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags))) {
- dev_kfree_skb(skb);
+ dev_kfree_skb_any(skb);
BNAD_UPDATE_CTR(bnad, tx_skb_stopping);
return NETDEV_TX_OK;
}
@@ -2989,7 +2979,7 @@ bnad_start_xmit(struct sk_buff *skb, struct net_device *netdev)
wis = BNA_TXQ_WI_NEEDED(vectors); /* 4 vectors per work item */
if (unlikely(vectors > BFI_TX_MAX_VECTORS_PER_PKT)) {
- dev_kfree_skb(skb);
+ dev_kfree_skb_any(skb);
BNAD_UPDATE_CTR(bnad, tx_skb_max_vectors);
return NETDEV_TX_OK;
}
@@ -3029,7 +3019,7 @@ bnad_start_xmit(struct sk_buff *skb, struct net_device *netdev)
/* Program the opcode, flags, frame_len, num_vectors in WI */
if (bnad_txq_wi_prepare(bnad, tcb, skb, txqent)) {
- dev_kfree_skb(skb);
+ dev_kfree_skb_any(skb);
return NETDEV_TX_OK;
}
txqent->hdr.wi.reserved = 0;
@@ -3055,7 +3045,7 @@ bnad_start_xmit(struct sk_buff *skb, struct net_device *netdev)
/* Undo the changes starting at tcb->producer_index */
bnad_tx_buff_unmap(bnad, unmap_q, q_depth,
tcb->producer_index);
- dev_kfree_skb(skb);
+ dev_kfree_skb_any(skb);
BNAD_UPDATE_CTR(bnad, tx_skb_frag_zero);
return NETDEV_TX_OK;
}
@@ -3067,8 +3057,7 @@ bnad_start_xmit(struct sk_buff *skb, struct net_device *netdev)
vect_id = 0;
BNA_QE_INDX_INC(prod, q_depth);
txqent = &((struct bna_txq_entry *)tcb->sw_q)[prod];
- txqent->hdr.wi_ext.opcode =
- __constant_htons(BNA_TXQ_WI_EXTENSION);
+ txqent->hdr.wi_ext.opcode = htons(BNA_TXQ_WI_EXTENSION);
unmap = &unmap_q[prod];
}
@@ -3085,7 +3074,7 @@ bnad_start_xmit(struct sk_buff *skb, struct net_device *netdev)
if (unlikely(len != skb->len)) {
/* Undo the changes starting at tcb->producer_index */
bnad_tx_buff_unmap(bnad, unmap_q, q_depth, tcb->producer_index);
- dev_kfree_skb(skb);
+ dev_kfree_skb_any(skb);
BNAD_UPDATE_CTR(bnad, tx_skb_len_mismatch);
return NETDEV_TX_OK;
}