summaryrefslogtreecommitdiff
path: root/drivers/net/wireless/ath/ath10k/htt_rx.c
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2014-10-08 21:40:54 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2014-10-08 21:40:54 -0400
commit35a9ad8af0bb0fa3525e6d0d20e32551d226f38e (patch)
tree15b4b33206818886d9cff371fd2163e073b70568 /drivers/net/wireless/ath/ath10k/htt_rx.c
parentd5935b07da53f74726e2a65dd4281d0f2c70e5d4 (diff)
parent64b1f00a0830e1c53874067273a096b228d83d36 (diff)
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-next
Pull networking updates from David Miller: "Most notable changes in here: 1) By far the biggest accomplishment, thanks to a large range of contributors, is the addition of multi-send for transmit. This is the result of discussions back in Chicago, and the hard work of several individuals. Now, when the ->ndo_start_xmit() method of a driver sees skb->xmit_more as true, it can choose to defer the doorbell telling the driver to start processing the new TX queue entires. skb->xmit_more means that the generic networking is guaranteed to call the driver immediately with another SKB to send. There is logic added to the qdisc layer to dequeue multiple packets at a time, and the handling mis-predicted offloads in software is now done with no locks held. Finally, pktgen is extended to have a "burst" parameter that can be used to test a multi-send implementation. Several drivers have xmit_more support: i40e, igb, ixgbe, mlx4, virtio_net Adding support is almost trivial, so export more drivers to support this optimization soon. I want to thank, in no particular or implied order, Jesper Dangaard Brouer, Eric Dumazet, Alexander Duyck, Tom Herbert, Jamal Hadi Salim, John Fastabend, Florian Westphal, Daniel Borkmann, David Tat, Hannes Frederic Sowa, and Rusty Russell. 2) PTP and timestamping support in bnx2x, from Michal Kalderon. 3) Allow adjusting the rx_copybreak threshold for a driver via ethtool, and add rx_copybreak support to enic driver. From Govindarajulu Varadarajan. 4) Significant enhancements to the generic PHY layer and the bcm7xxx driver in particular (EEE support, auto power down, etc.) from Florian Fainelli. 5) Allow raw buffers to be used for flow dissection, allowing drivers to determine the optimal "linear pull" size for devices that DMA into pools of pages. The objective is to get exactly the necessary amount of headers into the linear SKB area pre-pulled, but no more. The new interface drivers use is eth_get_headlen(). From WANG Cong, with driver conversions (several had their own by-hand duplicated implementations) by Alexander Duyck and Eric Dumazet. 6) Support checksumming more smoothly and efficiently for encapsulations, and add "foo over UDP" facility. From Tom Herbert. 7) Add Broadcom SF2 switch driver to DSA layer, from Florian Fainelli. 8) eBPF now can load programs via a system call and has an extensive testsuite. Alexei Starovoitov and Daniel Borkmann. 9) Major overhaul of the packet scheduler to use RCU in several major areas such as the classifiers and rate estimators. From John Fastabend. 10) Add driver for Intel FM10000 Ethernet Switch, from Alexander Duyck. 11) Rearrange TCP_SKB_CB() to reduce cache line misses, from Eric Dumazet. 12) Add Datacenter TCP congestion control algorithm support, From Florian Westphal. 13) Reorganize sk_buff so that __copy_skb_header() is significantly faster. From Eric Dumazet" * git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-next: (1558 commits) netlabel: directly return netlbl_unlabel_genl_init() net: add netdev_txq_bql_{enqueue, complete}_prefetchw() helpers net: description of dma_cookie cause make xmldocs warning cxgb4: clean up a type issue cxgb4: potential shift wrapping bug i40e: skb->xmit_more support net: fs_enet: Add NAPI TX net: fs_enet: Remove non NAPI RX r8169:add support for RTL8168EP net_sched: copy exts->type in tcf_exts_change() wimax: convert printk to pr_foo() af_unix: remove 0 assignment on static ipv6: Do not warn for informational ICMP messages, regardless of type. Update Intel Ethernet Driver maintainers list bridge: Save frag_max_size between PRE_ROUTING and POST_ROUTING tipc: fix bug in multicast congestion handling net: better IFF_XMIT_DST_RELEASE support net/mlx4_en: remove NETDEV_TX_BUSY 3c59x: fix bad split of cpu_to_le32(pci_map_single()) net: bcmgenet: fix Tx ring priority programming ...
Diffstat (limited to 'drivers/net/wireless/ath/ath10k/htt_rx.c')
-rw-r--r--drivers/net/wireless/ath/ath10k/htt_rx.c217
1 files changed, 137 insertions, 80 deletions
diff --git a/drivers/net/wireless/ath/ath10k/htt_rx.c b/drivers/net/wireless/ath/ath10k/htt_rx.c
index 80cdac15588a..60d40a04508b 100644
--- a/drivers/net/wireless/ath/ath10k/htt_rx.c
+++ b/drivers/net/wireless/ath/ath10k/htt_rx.c
@@ -42,7 +42,6 @@
/* when under memory pressure rx ring refill may fail and needs a retry */
#define HTT_RX_RING_REFILL_RETRY_MS 50
-
static int ath10k_htt_rx_get_csum_state(struct sk_buff *skb);
static void ath10k_htt_txrx_compl_task(unsigned long ptr);
@@ -133,7 +132,7 @@ static int __ath10k_htt_rx_ring_fill_n(struct ath10k_htt *htt, int num)
dma_addr_t paddr;
int ret = 0, idx;
- idx = __le32_to_cpu(*(htt->rx_ring.alloc_idx.vaddr));
+ idx = __le32_to_cpu(*htt->rx_ring.alloc_idx.vaddr);
while (num > 0) {
skb = dev_alloc_skb(HTT_RX_BUF_SIZE + HTT_RX_DESC_ALIGN);
if (!skb) {
@@ -171,7 +170,7 @@ static int __ath10k_htt_rx_ring_fill_n(struct ath10k_htt *htt, int num)
}
fail:
- *(htt->rx_ring.alloc_idx.vaddr) = __cpu_to_le32(idx);
+ *htt->rx_ring.alloc_idx.vaddr = __cpu_to_le32(idx);
return ret;
}
@@ -223,6 +222,7 @@ static void ath10k_htt_rx_msdu_buff_replenish(struct ath10k_htt *htt)
static void ath10k_htt_rx_ring_refill_retry(unsigned long arg)
{
struct ath10k_htt *htt = (struct ath10k_htt *)arg;
+
ath10k_htt_rx_msdu_buff_replenish(htt);
}
@@ -271,13 +271,14 @@ void ath10k_htt_rx_free(struct ath10k_htt *htt)
static inline struct sk_buff *ath10k_htt_rx_netbuf_pop(struct ath10k_htt *htt)
{
+ struct ath10k *ar = htt->ar;
int idx;
struct sk_buff *msdu;
lockdep_assert_held(&htt->rx_ring.lock);
if (htt->rx_ring.fill_cnt == 0) {
- ath10k_warn("tried to pop sk_buff from an empty rx ring\n");
+ ath10k_warn(ar, "tried to pop sk_buff from an empty rx ring\n");
return NULL;
}
@@ -311,14 +312,15 @@ static int ath10k_htt_rx_amsdu_pop(struct ath10k_htt *htt,
struct sk_buff **tail_msdu,
u32 *attention)
{
+ struct ath10k *ar = htt->ar;
int msdu_len, msdu_chaining = 0;
- struct sk_buff *msdu;
+ struct sk_buff *msdu, *next;
struct htt_rx_desc *rx_desc;
lockdep_assert_held(&htt->rx_ring.lock);
if (htt->rx_confused) {
- ath10k_warn("htt is confused. refusing rx\n");
+ ath10k_warn(ar, "htt is confused. refusing rx\n");
return -1;
}
@@ -331,7 +333,7 @@ static int ath10k_htt_rx_amsdu_pop(struct ath10k_htt *htt,
msdu->len + skb_tailroom(msdu),
DMA_FROM_DEVICE);
- ath10k_dbg_dump(ATH10K_DBG_HTT_DUMP, NULL, "htt rx pop: ",
+ ath10k_dbg_dump(ar, ATH10K_DBG_HTT_DUMP, NULL, "htt rx pop: ",
msdu->data, msdu->len + skb_tailroom(msdu));
rx_desc = (struct htt_rx_desc *)msdu->data;
@@ -354,7 +356,7 @@ static int ath10k_htt_rx_amsdu_pop(struct ath10k_htt *htt,
ath10k_htt_rx_free_msdu_chain(*head_msdu);
*head_msdu = NULL;
msdu = NULL;
- ath10k_err("htt rx stopped. cannot recover\n");
+ ath10k_err(ar, "htt rx stopped. cannot recover\n");
htt->rx_confused = true;
break;
}
@@ -429,7 +431,7 @@ static int ath10k_htt_rx_amsdu_pop(struct ath10k_htt *htt,
next->len + skb_tailroom(next),
DMA_FROM_DEVICE);
- ath10k_dbg_dump(ATH10K_DBG_HTT_DUMP, NULL,
+ ath10k_dbg_dump(ar, ATH10K_DBG_HTT_DUMP, NULL,
"htt rx chained: ", next->data,
next->len + skb_tailroom(next));
@@ -448,11 +450,11 @@ static int ath10k_htt_rx_amsdu_pop(struct ath10k_htt *htt,
if (last_msdu) {
msdu->next = NULL;
break;
- } else {
- struct sk_buff *next = ath10k_htt_rx_netbuf_pop(htt);
- msdu->next = next;
- msdu = next;
}
+
+ next = ath10k_htt_rx_netbuf_pop(htt);
+ msdu->next = next;
+ msdu = next;
}
*tail_msdu = msdu;
@@ -478,18 +480,21 @@ static int ath10k_htt_rx_amsdu_pop(struct ath10k_htt *htt,
static void ath10k_htt_rx_replenish_task(unsigned long ptr)
{
struct ath10k_htt *htt = (struct ath10k_htt *)ptr;
+
ath10k_htt_rx_msdu_buff_replenish(htt);
}
int ath10k_htt_rx_alloc(struct ath10k_htt *htt)
{
+ struct ath10k *ar = htt->ar;
dma_addr_t paddr;
void *vaddr;
+ size_t size;
struct timer_list *timer = &htt->rx_ring.refill_retry_timer;
htt->rx_ring.size = ath10k_htt_rx_ring_size(htt);
if (!is_power_of_2(htt->rx_ring.size)) {
- ath10k_warn("htt rx ring size is not power of 2\n");
+ ath10k_warn(ar, "htt rx ring size is not power of 2\n");
return -EINVAL;
}
@@ -512,9 +517,9 @@ int ath10k_htt_rx_alloc(struct ath10k_htt *htt)
if (!htt->rx_ring.netbufs_ring)
goto err_netbuf;
- vaddr = dma_alloc_coherent(htt->ar->dev,
- (htt->rx_ring.size * sizeof(htt->rx_ring.paddrs_ring)),
- &paddr, GFP_DMA);
+ size = htt->rx_ring.size * sizeof(htt->rx_ring.paddrs_ring);
+
+ vaddr = dma_alloc_coherent(htt->ar->dev, size, &paddr, GFP_DMA);
if (!vaddr)
goto err_dma_ring;
@@ -550,7 +555,7 @@ int ath10k_htt_rx_alloc(struct ath10k_htt *htt)
tasklet_init(&htt->txrx_compl_task, ath10k_htt_txrx_compl_task,
(unsigned long)htt);
- ath10k_dbg(ATH10K_DBG_BOOT, "htt rx ring size %d fill_level %d\n",
+ ath10k_dbg(ar, ATH10K_DBG_BOOT, "htt rx ring size %d fill_level %d\n",
htt->rx_ring.size, htt->rx_ring.fill_level);
return 0;
@@ -572,7 +577,8 @@ err_netbuf:
return -ENOMEM;
}
-static int ath10k_htt_rx_crypto_param_len(enum htt_rx_mpdu_encrypt_type type)
+static int ath10k_htt_rx_crypto_param_len(struct ath10k *ar,
+ enum htt_rx_mpdu_encrypt_type type)
{
switch (type) {
case HTT_RX_MPDU_ENCRYPT_WEP40:
@@ -588,11 +594,12 @@ static int ath10k_htt_rx_crypto_param_len(enum htt_rx_mpdu_encrypt_type type)
return 0;
}
- ath10k_warn("unknown encryption type %d\n", type);
+ ath10k_warn(ar, "unknown encryption type %d\n", type);
return 0;
}
-static int ath10k_htt_rx_crypto_tail_len(enum htt_rx_mpdu_encrypt_type type)
+static int ath10k_htt_rx_crypto_tail_len(struct ath10k *ar,
+ enum htt_rx_mpdu_encrypt_type type)
{
switch (type) {
case HTT_RX_MPDU_ENCRYPT_NONE:
@@ -608,7 +615,7 @@ static int ath10k_htt_rx_crypto_tail_len(enum htt_rx_mpdu_encrypt_type type)
return 8;
}
- ath10k_warn("unknown encryption type %d\n", type);
+ ath10k_warn(ar, "unknown encryption type %d\n", type);
return 0;
}
@@ -620,19 +627,21 @@ static struct ieee80211_hdr *ath10k_htt_rx_skb_get_hdr(struct sk_buff *skb)
rxd = (void *)skb->data - sizeof(*rxd);
fmt = MS(__le32_to_cpu(rxd->msdu_start.info1),
- RX_MSDU_START_INFO1_DECAP_FORMAT);
+ RX_MSDU_START_INFO1_DECAP_FORMAT);
if (fmt == RX_MSDU_DECAP_RAW)
return (void *)skb->data;
- else
- return (void *)skb->data - RX_HTT_HDR_STATUS_LEN;
+
+ return (void *)skb->data - RX_HTT_HDR_STATUS_LEN;
}
/* This function only applies for first msdu in an msdu chain */
static bool ath10k_htt_rx_hdr_is_amsdu(struct ieee80211_hdr *hdr)
{
+ u8 *qc;
+
if (ieee80211_is_data_qos(hdr->frame_control)) {
- u8 *qc = ieee80211_get_qos_ctl(hdr);
+ qc = ieee80211_get_qos_ctl(hdr);
if (qc[0] & 0x80)
return true;
}
@@ -819,19 +828,55 @@ static bool ath10k_htt_rx_h_channel(struct ath10k *ar,
return true;
}
+static const char * const tid_to_ac[] = {
+ "BE",
+ "BK",
+ "BK",
+ "BE",
+ "VI",
+ "VI",
+ "VO",
+ "VO",
+};
+
+static char *ath10k_get_tid(struct ieee80211_hdr *hdr, char *out, size_t size)
+{
+ u8 *qc;
+ int tid;
+
+ if (!ieee80211_is_data_qos(hdr->frame_control))
+ return "";
+
+ qc = ieee80211_get_qos_ctl(hdr);
+ tid = *qc & IEEE80211_QOS_CTL_TID_MASK;
+ if (tid < 8)
+ snprintf(out, size, "tid %d (%s)", tid, tid_to_ac[tid]);
+ else
+ snprintf(out, size, "tid %d", tid);
+
+ return out;
+}
+
static void ath10k_process_rx(struct ath10k *ar,
struct ieee80211_rx_status *rx_status,
struct sk_buff *skb)
{
struct ieee80211_rx_status *status;
+ struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
+ char tid[32];
status = IEEE80211_SKB_RXCB(skb);
*status = *rx_status;
- ath10k_dbg(ATH10K_DBG_DATA,
- "rx skb %p len %u %s%s%s%s%s %srate_idx %u vht_nss %u freq %u band %u flag 0x%x fcs-err %imic-err %i\n",
+ ath10k_dbg(ar, ATH10K_DBG_DATA,
+ "rx skb %p len %u peer %pM %s %s sn %u %s%s%s%s%s %srate_idx %u vht_nss %u freq %u band %u flag 0x%x fcs-err %i mic-err %i amsdu-more %i\n",
skb,
skb->len,
+ ieee80211_get_SA(hdr),
+ ath10k_get_tid(hdr, tid, sizeof(tid)),
+ is_multicast_ether_addr(ieee80211_get_DA(hdr)) ?
+ "mcast" : "ucast",
+ (__le16_to_cpu(hdr->seq_ctrl) & IEEE80211_SCTL_SEQ) >> 4,
status->flag == 0 ? "legacy" : "",
status->flag & RX_FLAG_HT ? "ht" : "",
status->flag & RX_FLAG_VHT ? "vht" : "",
@@ -843,8 +888,9 @@ static void ath10k_process_rx(struct ath10k *ar,
status->freq,
status->band, status->flag,
!!(status->flag & RX_FLAG_FAILED_FCS_CRC),
- !!(status->flag & RX_FLAG_MMIC_ERROR));
- ath10k_dbg_dump(ATH10K_DBG_HTT_DUMP, NULL, "rx skb: ",
+ !!(status->flag & RX_FLAG_MMIC_ERROR),
+ !!(status->flag & RX_FLAG_AMSDU_MORE));
+ ath10k_dbg_dump(ar, ATH10K_DBG_HTT_DUMP, NULL, "rx skb: ",
skb->data, skb->len);
ieee80211_rx(ar->hw, skb);
@@ -860,18 +906,19 @@ static void ath10k_htt_rx_amsdu(struct ath10k_htt *htt,
struct ieee80211_rx_status *rx_status,
struct sk_buff *skb_in)
{
+ struct ath10k *ar = htt->ar;
struct htt_rx_desc *rxd;
struct sk_buff *skb = skb_in;
struct sk_buff *first;
enum rx_msdu_decap_format fmt;
enum htt_rx_mpdu_encrypt_type enctype;
struct ieee80211_hdr *hdr;
- u8 hdr_buf[64], addr[ETH_ALEN], *qos;
+ u8 hdr_buf[64], da[ETH_ALEN], sa[ETH_ALEN], *qos;
unsigned int hdr_len;
rxd = (void *)skb->data - sizeof(*rxd);
enctype = MS(__le32_to_cpu(rxd->mpdu_start.info0),
- RX_MPDU_START_INFO0_ENCRYPT_TYPE);
+ RX_MPDU_START_INFO0_ENCRYPT_TYPE);
hdr = (struct ieee80211_hdr *)rxd->rx_hdr_status;
hdr_len = ieee80211_hdrlen(hdr->frame_control);
@@ -893,8 +940,8 @@ static void ath10k_htt_rx_amsdu(struct ath10k_htt *htt,
/* First frame in an A-MSDU chain has more decapped data. */
if (skb == first) {
len = round_up(ieee80211_hdrlen(hdr->frame_control), 4);
- len += round_up(ath10k_htt_rx_crypto_param_len(enctype),
- 4);
+ len += round_up(ath10k_htt_rx_crypto_param_len(ar,
+ enctype), 4);
decap_hdr += len;
}
@@ -904,10 +951,11 @@ static void ath10k_htt_rx_amsdu(struct ath10k_htt *htt,
skb_trim(skb, skb->len - FCS_LEN);
break;
case RX_MSDU_DECAP_NATIVE_WIFI:
- /* pull decapped header and copy DA */
+ /* pull decapped header and copy SA & DA */
hdr = (struct ieee80211_hdr *)skb->data;
hdr_len = ath10k_htt_rx_nwifi_hdrlen(hdr);
- memcpy(addr, ieee80211_get_DA(hdr), ETH_ALEN);
+ ether_addr_copy(da, ieee80211_get_DA(hdr));
+ ether_addr_copy(sa, ieee80211_get_SA(hdr));
skb_pull(skb, hdr_len);
/* push original 802.11 header */
@@ -921,8 +969,11 @@ static void ath10k_htt_rx_amsdu(struct ath10k_htt *htt,
qos = ieee80211_get_qos_ctl(hdr);
qos[0] &= ~IEEE80211_QOS_CTL_A_MSDU_PRESENT;
- /* original 802.11 header has a different DA */
- memcpy(ieee80211_get_DA(hdr), addr, ETH_ALEN);
+ /* original 802.11 header has a different DA and in
+ * case of 4addr it may also have different SA
+ */
+ ether_addr_copy(ieee80211_get_DA(hdr), da);
+ ether_addr_copy(ieee80211_get_SA(hdr), sa);
break;
case RX_MSDU_DECAP_ETHERNET2_DIX:
/* strip ethernet header and insert decapped 802.11
@@ -965,6 +1016,7 @@ static void ath10k_htt_rx_msdu(struct ath10k_htt *htt,
struct ieee80211_rx_status *rx_status,
struct sk_buff *skb)
{
+ struct ath10k *ar = htt->ar;
struct htt_rx_desc *rxd;
struct ieee80211_hdr *hdr;
enum rx_msdu_decap_format fmt;
@@ -974,16 +1026,16 @@ static void ath10k_htt_rx_msdu(struct ath10k_htt *htt,
/* This shouldn't happen. If it does than it may be a FW bug. */
if (skb->next) {
- ath10k_warn("htt rx received chained non A-MSDU frame\n");
+ ath10k_warn(ar, "htt rx received chained non A-MSDU frame\n");
ath10k_htt_rx_free_msdu_chain(skb->next);
skb->next = NULL;
}
rxd = (void *)skb->data - sizeof(*rxd);
fmt = MS(__le32_to_cpu(rxd->msdu_start.info1),
- RX_MSDU_START_INFO1_DECAP_FORMAT);
+ RX_MSDU_START_INFO1_DECAP_FORMAT);
enctype = MS(__le32_to_cpu(rxd->mpdu_start.info0),
- RX_MPDU_START_INFO0_ENCRYPT_TYPE);
+ RX_MPDU_START_INFO0_ENCRYPT_TYPE);
hdr = (struct ieee80211_hdr *)rxd->rx_hdr_status;
hdr_len = ieee80211_hdrlen(hdr->frame_control);
@@ -1011,7 +1063,8 @@ static void ath10k_htt_rx_msdu(struct ath10k_htt *htt,
rfc1042 = hdr;
rfc1042 += roundup(hdr_len, 4);
- rfc1042 += roundup(ath10k_htt_rx_crypto_param_len(enctype), 4);
+ rfc1042 += roundup(ath10k_htt_rx_crypto_param_len(ar,
+ enctype), 4);
skb_pull(skb, sizeof(struct ethhdr));
memcpy(skb_push(skb, sizeof(struct rfc1042_hdr)),
@@ -1120,27 +1173,29 @@ static bool ath10k_htt_rx_amsdu_allowed(struct ath10k_htt *htt,
bool channel_set,
u32 attention)
{
+ struct ath10k *ar = htt->ar;
+
if (head->len == 0) {
- ath10k_dbg(ATH10K_DBG_HTT,
+ ath10k_dbg(ar, ATH10K_DBG_HTT,
"htt rx dropping due to zero-len\n");
return false;
}
if (attention & RX_ATTENTION_FLAGS_DECRYPT_ERR) {
- ath10k_dbg(ATH10K_DBG_HTT,
+ ath10k_dbg(ar, ATH10K_DBG_HTT,
"htt rx dropping due to decrypt-err\n");
return false;
}
if (!channel_set) {
- ath10k_warn("no channel configured; ignoring frame!\n");
+ ath10k_warn(ar, "no channel configured; ignoring frame!\n");
return false;
}
/* Skip mgmt frames while we handle this in WMI */
if (status == HTT_RX_IND_MPDU_STATUS_MGMT_CTRL ||
attention & RX_ATTENTION_FLAGS_MGMT_TYPE) {
- ath10k_dbg(ATH10K_DBG_HTT, "htt rx mgmt ctrl\n");
+ ath10k_dbg(ar, ATH10K_DBG_HTT, "htt rx mgmt ctrl\n");
return false;
}
@@ -1148,14 +1203,14 @@ static bool ath10k_htt_rx_amsdu_allowed(struct ath10k_htt *htt,
status != HTT_RX_IND_MPDU_STATUS_TKIP_MIC_ERR &&
status != HTT_RX_IND_MPDU_STATUS_ERR_INV_PEER &&
!htt->ar->monitor_started) {
- ath10k_dbg(ATH10K_DBG_HTT,
+ ath10k_dbg(ar, ATH10K_DBG_HTT,
"htt rx ignoring frame w/ status %d\n",
status);
return false;
}
if (test_bit(ATH10K_CAC_RUNNING, &htt->ar->dev_flags)) {
- ath10k_dbg(ATH10K_DBG_HTT,
+ ath10k_dbg(ar, ATH10K_DBG_HTT,
"htt rx CAC running\n");
return false;
}
@@ -1166,6 +1221,7 @@ static bool ath10k_htt_rx_amsdu_allowed(struct ath10k_htt *htt,
static void ath10k_htt_rx_handler(struct ath10k_htt *htt,
struct htt_rx_indication *rx)
{
+ struct ath10k *ar = htt->ar;
struct ieee80211_rx_status *rx_status = &htt->rx_status;
struct htt_rx_indication_mpdu_range *mpdu_ranges;
struct htt_rx_desc *rxd;
@@ -1211,7 +1267,7 @@ static void ath10k_htt_rx_handler(struct ath10k_htt *htt,
rx_status);
}
- ath10k_dbg_dump(ATH10K_DBG_HTT_DUMP, NULL, "htt rx ind: ",
+ ath10k_dbg_dump(ar, ATH10K_DBG_HTT_DUMP, NULL, "htt rx ind: ",
rx, sizeof(*rx) +
(sizeof(struct htt_rx_indication_mpdu_range) *
num_mpdu_ranges));
@@ -1233,7 +1289,7 @@ static void ath10k_htt_rx_handler(struct ath10k_htt *htt,
&attention);
if (ret < 0) {
- ath10k_warn("failed to pop amsdu from htt rx ring %d\n",
+ ath10k_warn(ar, "failed to pop amsdu from htt rx ring %d\n",
ret);
ath10k_htt_rx_free_msdu_chain(msdu_head);
continue;
@@ -1280,8 +1336,9 @@ static void ath10k_htt_rx_handler(struct ath10k_htt *htt,
}
static void ath10k_htt_rx_frag_handler(struct ath10k_htt *htt,
- struct htt_rx_fragment_indication *frag)
+ struct htt_rx_fragment_indication *frag)
{
+ struct ath10k *ar = htt->ar;
struct sk_buff *msdu_head, *msdu_tail;
enum htt_rx_mpdu_encrypt_type enctype;
struct htt_rx_desc *rxd;
@@ -1308,10 +1365,10 @@ static void ath10k_htt_rx_frag_handler(struct ath10k_htt *htt,
&attention);
spin_unlock_bh(&htt->rx_ring.lock);
- ath10k_dbg(ATH10K_DBG_HTT_DUMP, "htt rx frag ahead\n");
+ ath10k_dbg(ar, ATH10K_DBG_HTT_DUMP, "htt rx frag ahead\n");
if (ret) {
- ath10k_warn("failed to pop amsdu from httr rx ring for fragmented rx %d\n",
+ ath10k_warn(ar, "failed to pop amsdu from httr rx ring for fragmented rx %d\n",
ret);
ath10k_htt_rx_free_msdu_chain(msdu_head);
return;
@@ -1325,10 +1382,10 @@ static void ath10k_htt_rx_frag_handler(struct ath10k_htt *htt,
tkip_mic_err = !!(attention & RX_ATTENTION_FLAGS_TKIP_MIC_ERR);
decrypt_err = !!(attention & RX_ATTENTION_FLAGS_DECRYPT_ERR);
fmt = MS(__le32_to_cpu(rxd->msdu_start.info1),
- RX_MSDU_START_INFO1_DECAP_FORMAT);
+ RX_MSDU_START_INFO1_DECAP_FORMAT);
if (fmt != RX_MSDU_DECAP_RAW) {
- ath10k_warn("we dont support non-raw fragmented rx yet\n");
+ ath10k_warn(ar, "we dont support non-raw fragmented rx yet\n");
dev_kfree_skb_any(msdu_head);
goto end;
}
@@ -1340,17 +1397,17 @@ static void ath10k_htt_rx_frag_handler(struct ath10k_htt *htt,
msdu_head->ip_summed = ath10k_htt_rx_get_csum_state(msdu_head);
if (tkip_mic_err)
- ath10k_warn("tkip mic error\n");
+ ath10k_warn(ar, "tkip mic error\n");
if (decrypt_err) {
- ath10k_warn("decryption err in fragmented rx\n");
+ ath10k_warn(ar, "decryption err in fragmented rx\n");
dev_kfree_skb_any(msdu_head);
goto end;
}
if (enctype != HTT_RX_MPDU_ENCRYPT_NONE) {
hdrlen = ieee80211_hdrlen(hdr->frame_control);
- paramlen = ath10k_htt_rx_crypto_param_len(enctype);
+ paramlen = ath10k_htt_rx_crypto_param_len(ar, enctype);
/* It is more efficient to move the header than the payload */
memmove((void *)msdu_head->data + paramlen,
@@ -1364,7 +1421,7 @@ static void ath10k_htt_rx_frag_handler(struct ath10k_htt *htt,
trim = 4;
/* remove crypto trailer */
- trim += ath10k_htt_rx_crypto_tail_len(enctype);
+ trim += ath10k_htt_rx_crypto_tail_len(ar, enctype);
/* last fragment of TKIP frags has MIC */
if (!ieee80211_has_morefrags(hdr->frame_control) &&
@@ -1372,20 +1429,20 @@ static void ath10k_htt_rx_frag_handler(struct ath10k_htt *htt,
trim += 8;
if (trim > msdu_head->len) {
- ath10k_warn("htt rx fragment: trailer longer than the frame itself? drop\n");
+ ath10k_warn(ar, "htt rx fragment: trailer longer than the frame itself? drop\n");
dev_kfree_skb_any(msdu_head);
goto end;
}
skb_trim(msdu_head, msdu_head->len - trim);
- ath10k_dbg_dump(ATH10K_DBG_HTT_DUMP, NULL, "htt rx frag mpdu: ",
+ ath10k_dbg_dump(ar, ATH10K_DBG_HTT_DUMP, NULL, "htt rx frag mpdu: ",
msdu_head->data, msdu_head->len);
ath10k_process_rx(htt->ar, rx_status, msdu_head);
end:
if (fw_desc_len > 0) {
- ath10k_dbg(ATH10K_DBG_HTT,
+ ath10k_dbg(ar, ATH10K_DBG_HTT,
"expecting more fragmented rx in one indication %d\n",
fw_desc_len);
}
@@ -1415,12 +1472,12 @@ static void ath10k_htt_rx_frm_tx_compl(struct ath10k *ar,
tx_done.discard = true;
break;
default:
- ath10k_warn("unhandled tx completion status %d\n", status);
+ ath10k_warn(ar, "unhandled tx completion status %d\n", status);
tx_done.discard = true;
break;
}
- ath10k_dbg(ATH10K_DBG_HTT, "htt tx completion num_msdus %d\n",
+ ath10k_dbg(ar, ATH10K_DBG_HTT, "htt tx completion num_msdus %d\n",
resp->data_tx_completion.num_msdus);
for (i = 0; i < resp->data_tx_completion.num_msdus; i++) {
@@ -1441,14 +1498,14 @@ static void ath10k_htt_rx_addba(struct ath10k *ar, struct htt_resp *resp)
tid = MS(info0, HTT_RX_BA_INFO0_TID);
peer_id = MS(info0, HTT_RX_BA_INFO0_PEER_ID);
- ath10k_dbg(ATH10K_DBG_HTT,
+ ath10k_dbg(ar, ATH10K_DBG_HTT,
"htt rx addba tid %hu peer_id %hu size %hhu\n",
tid, peer_id, ev->window_size);
spin_lock_bh(&ar->data_lock);
peer = ath10k_peer_find_by_id(ar, peer_id);
if (!peer) {
- ath10k_warn("received addba event for invalid peer_id: %hu\n",
+ ath10k_warn(ar, "received addba event for invalid peer_id: %hu\n",
peer_id);
spin_unlock_bh(&ar->data_lock);
return;
@@ -1456,13 +1513,13 @@ static void ath10k_htt_rx_addba(struct ath10k *ar, struct htt_resp *resp)
arvif = ath10k_get_arvif(ar, peer->vdev_id);
if (!arvif) {
- ath10k_warn("received addba event for invalid vdev_id: %u\n",
+ ath10k_warn(ar, "received addba event for invalid vdev_id: %u\n",
peer->vdev_id);
spin_unlock_bh(&ar->data_lock);
return;
}
- ath10k_dbg(ATH10K_DBG_HTT,
+ ath10k_dbg(ar, ATH10K_DBG_HTT,
"htt rx start rx ba session sta %pM tid %hu size %hhu\n",
peer->addr, tid, ev->window_size);
@@ -1481,14 +1538,14 @@ static void ath10k_htt_rx_delba(struct ath10k *ar, struct htt_resp *resp)
tid = MS(info0, HTT_RX_BA_INFO0_TID);
peer_id = MS(info0, HTT_RX_BA_INFO0_PEER_ID);
- ath10k_dbg(ATH10K_DBG_HTT,
+ ath10k_dbg(ar, ATH10K_DBG_HTT,
"htt rx delba tid %hu peer_id %hu\n",
tid, peer_id);
spin_lock_bh(&ar->data_lock);
peer = ath10k_peer_find_by_id(ar, peer_id);
if (!peer) {
- ath10k_warn("received addba event for invalid peer_id: %hu\n",
+ ath10k_warn(ar, "received addba event for invalid peer_id: %hu\n",
peer_id);
spin_unlock_bh(&ar->data_lock);
return;
@@ -1496,13 +1553,13 @@ static void ath10k_htt_rx_delba(struct ath10k *ar, struct htt_resp *resp)
arvif = ath10k_get_arvif(ar, peer->vdev_id);
if (!arvif) {
- ath10k_warn("received addba event for invalid vdev_id: %u\n",
+ ath10k_warn(ar, "received addba event for invalid vdev_id: %u\n",
peer->vdev_id);
spin_unlock_bh(&ar->data_lock);
return;
}
- ath10k_dbg(ATH10K_DBG_HTT,
+ ath10k_dbg(ar, ATH10K_DBG_HTT,
"htt rx stop rx ba session sta %pM tid %hu\n",
peer->addr, tid);
@@ -1517,9 +1574,9 @@ void ath10k_htt_t2h_msg_handler(struct ath10k *ar, struct sk_buff *skb)
/* confirm alignment */
if (!IS_ALIGNED((unsigned long)skb->data, 4))
- ath10k_warn("unaligned htt message, expect trouble\n");
+ ath10k_warn(ar, "unaligned htt message, expect trouble\n");
- ath10k_dbg(ATH10K_DBG_HTT, "htt rx, msg_type: 0x%0X\n",
+ ath10k_dbg(ar, ATH10K_DBG_HTT, "htt rx, msg_type: 0x%0X\n",
resp->hdr.msg_type);
switch (resp->hdr.msg_type) {
case HTT_T2H_MSG_TYPE_VERSION_CONF: {
@@ -1583,7 +1640,7 @@ void ath10k_htt_t2h_msg_handler(struct ath10k *ar, struct sk_buff *skb)
struct ath10k *ar = htt->ar;
struct htt_security_indication *ev = &resp->security_indication;
- ath10k_dbg(ATH10K_DBG_HTT,
+ ath10k_dbg(ar, ATH10K_DBG_HTT,
"sec ind peer_id %d unicast %d type %d\n",
__le16_to_cpu(ev->peer_id),
!!(ev->flags & HTT_SECURITY_IS_UNICAST),
@@ -1592,7 +1649,7 @@ void ath10k_htt_t2h_msg_handler(struct ath10k *ar, struct sk_buff *skb)
break;
}
case HTT_T2H_MSG_TYPE_RX_FRAG_IND: {
- ath10k_dbg_dump(ATH10K_DBG_HTT_DUMP, NULL, "htt event: ",
+ ath10k_dbg_dump(ar, ATH10K_DBG_HTT_DUMP, NULL, "htt event: ",
skb->data, skb->len);
ath10k_htt_rx_frag_handler(htt, &resp->rx_frag_ind);
break;
@@ -1601,7 +1658,7 @@ void ath10k_htt_t2h_msg_handler(struct ath10k *ar, struct sk_buff *skb)
/* FIX THIS */
break;
case HTT_T2H_MSG_TYPE_STATS_CONF:
- trace_ath10k_htt_stats(skb->data, skb->len);
+ trace_ath10k_htt_stats(ar, skb->data, skb->len);
break;
case HTT_T2H_MSG_TYPE_TX_INSPECT_IND:
/* Firmware can return tx frames if it's unable to fully
@@ -1609,7 +1666,7 @@ void ath10k_htt_t2h_msg_handler(struct ath10k *ar, struct sk_buff *skb)
* sends all tx frames as already inspected so this shouldn't
* happen unless fw has a bug.
*/
- ath10k_warn("received an unexpected htt tx inspect event\n");
+ ath10k_warn(ar, "received an unexpected htt tx inspect event\n");
break;
case HTT_T2H_MSG_TYPE_RX_ADDBA:
ath10k_htt_rx_addba(ar, resp);
@@ -1624,9 +1681,9 @@ void ath10k_htt_t2h_msg_handler(struct ath10k *ar, struct sk_buff *skb)
break;
}
default:
- ath10k_dbg(ATH10K_DBG_HTT, "htt event (%d) not handled\n",
+ ath10k_dbg(ar, ATH10K_DBG_HTT, "htt event (%d) not handled\n",
resp->hdr.msg_type);
- ath10k_dbg_dump(ATH10K_DBG_HTT_DUMP, NULL, "htt event: ",
+ ath10k_dbg_dump(ar, ATH10K_DBG_HTT_DUMP, NULL, "htt event: ",
skb->data, skb->len);
break;
};