diff options
author | Johannes Berg <johannes.berg@intel.com> | 2012-11-04 09:29:17 +0100 |
---|---|---|
committer | Johannes Berg <johannes.berg@intel.com> | 2012-11-05 16:08:58 +0100 |
commit | 7c34158231b2eda8dcbd297be2bb1559e69cb433 (patch) | |
tree | 550e6e356d67681dea769e035f2801fe2dc2cb76 /drivers/net | |
parent | 8f7b8db6e0557c8437adf9371e020cd89a7e85dc (diff) |
iwlwifi: handle DMA mapping failures
The RX replenish code doesn't handle DMA mapping failures,
which will cause issues if there actually is a failure. This
was reported by Shuah Khan who found a DMA mapping framework
warning ("device driver failed to check map error").
Cc: stable@vger.kernel.org
Reported-by: Shuah Khan <shuah.khan@hp.com>
Reviewed-by: Emmanuel Grumbach <emmanuel.grumbach@intel.com>
Signed-off-by: Johannes Berg <johannes.berg@intel.com>
Diffstat (limited to 'drivers/net')
-rw-r--r-- | drivers/net/wireless/iwlwifi/pcie/rx.c | 23 |
1 files changed, 21 insertions, 2 deletions
diff --git a/drivers/net/wireless/iwlwifi/pcie/rx.c b/drivers/net/wireless/iwlwifi/pcie/rx.c index 17c8e5d82681..bb69f8f90b3b 100644 --- a/drivers/net/wireless/iwlwifi/pcie/rx.c +++ b/drivers/net/wireless/iwlwifi/pcie/rx.c @@ -321,6 +321,14 @@ static void iwl_rx_allocate(struct iwl_trans *trans, gfp_t priority) dma_map_page(trans->dev, page, 0, PAGE_SIZE << trans_pcie->rx_page_order, DMA_FROM_DEVICE); + if (dma_mapping_error(trans->dev, rxb->page_dma)) { + rxb->page = NULL; + spin_lock_irqsave(&rxq->lock, flags); + list_add(&rxb->list, &rxq->rx_used); + spin_unlock_irqrestore(&rxq->lock, flags); + __free_pages(page, trans_pcie->rx_page_order); + return; + } /* dma address must be no more than 36 bits */ BUG_ON(rxb->page_dma & ~DMA_BIT_MASK(36)); /* and also 256 byte aligned! */ @@ -488,8 +496,19 @@ static void iwl_rx_handle_rxbuf(struct iwl_trans *trans, dma_map_page(trans->dev, rxb->page, 0, PAGE_SIZE << trans_pcie->rx_page_order, DMA_FROM_DEVICE); - list_add_tail(&rxb->list, &rxq->rx_free); - rxq->free_count++; + if (dma_mapping_error(trans->dev, rxb->page_dma)) { + /* + * free the page(s) as well to not break + * the invariant that the items on the used + * list have no page(s) + */ + __free_pages(rxb->page, trans_pcie->rx_page_order); + rxb->page = NULL; + list_add_tail(&rxb->list, &rxq->rx_used); + } else { + list_add_tail(&rxb->list, &rxq->rx_free); + rxq->free_count++; + } } else list_add_tail(&rxb->list, &rxq->rx_used); spin_unlock_irqrestore(&rxq->lock, flags); |