2 * Copyright (c) 2005-2011 Atheros Communications Inc.
3 * Copyright (c) 2011-2013 Qualcomm Atheros, Inc.
5 * Permission to use, copy, modify, and/or distribute this software for any
6 * purpose with or without fee is hereby granted, provided that the above
7 * copyright notice and this permission notice appear in all copies.
9 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
10 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
11 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
12 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
13 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
14 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
15 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
26 #include <linux/log2.h>
28 /* slightly larger than one large A-MPDU */
29 #define HTT_RX_RING_SIZE_MIN 128
31 /* roughly 20 ms @ 1 Gbps of 1500B MSDUs */
32 #define HTT_RX_RING_SIZE_MAX 2048
34 #define HTT_RX_AVG_FRM_BYTES 1000
36 /* ms, very conservative */
37 #define HTT_RX_HOST_LATENCY_MAX_MS 20
39 /* ms, conservative */
40 #define HTT_RX_HOST_LATENCY_WORST_LIKELY_MS 10
42 /* when under memory pressure rx ring refill may fail and needs a retry */
43 #define HTT_RX_RING_REFILL_RETRY_MS 50
45 static int ath10k_htt_rx_get_csum_state(struct sk_buff *skb);
46 static void ath10k_htt_txrx_compl_task(unsigned long ptr);
48 static int ath10k_htt_rx_ring_size(struct ath10k_htt *htt)
53 * It is expected that the host CPU will typically be able to
54 * service the rx indication from one A-MPDU before the rx
55 * indication from the subsequent A-MPDU happens, roughly 1-2 ms
56 * later. However, the rx ring should be sized very conservatively,
57 * to accomodate the worst reasonable delay before the host CPU
58 * services a rx indication interrupt.
60 * The rx ring need not be kept full of empty buffers. In theory,
61 * the htt host SW can dynamically track the low-water mark in the
62 * rx ring, and dynamically adjust the level to which the rx ring
63 * is filled with empty buffers, to dynamically meet the desired
66 * In contrast, it's difficult to resize the rx ring itself, once
67 * it's in use. Thus, the ring itself should be sized very
68 * conservatively, while the degree to which the ring is filled
69 * with empty buffers should be sized moderately conservatively.
72 /* 1e6 bps/mbps / 1e3 ms per sec = 1000 */
74 htt->max_throughput_mbps +
76 (8 * HTT_RX_AVG_FRM_BYTES) * HTT_RX_HOST_LATENCY_MAX_MS;
78 if (size < HTT_RX_RING_SIZE_MIN)
79 size = HTT_RX_RING_SIZE_MIN;
81 if (size > HTT_RX_RING_SIZE_MAX)
82 size = HTT_RX_RING_SIZE_MAX;
84 size = roundup_pow_of_two(size);
89 static int ath10k_htt_rx_ring_fill_level(struct ath10k_htt *htt)
93 /* 1e6 bps/mbps / 1e3 ms per sec = 1000 */
95 htt->max_throughput_mbps *
97 (8 * HTT_RX_AVG_FRM_BYTES) * HTT_RX_HOST_LATENCY_WORST_LIKELY_MS;
100 * Make sure the fill level is at least 1 less than the ring size.
101 * Leaving 1 element empty allows the SW to easily distinguish
102 * between a full ring vs. an empty ring.
104 if (size >= htt->rx_ring.size)
105 size = htt->rx_ring.size - 1;
110 static void ath10k_htt_rx_ring_free(struct ath10k_htt *htt)
113 struct ath10k_skb_cb *cb;
116 for (i = 0; i < htt->rx_ring.fill_cnt; i++) {
117 skb = htt->rx_ring.netbufs_ring[i];
118 cb = ATH10K_SKB_CB(skb);
119 dma_unmap_single(htt->ar->dev, cb->paddr,
120 skb->len + skb_tailroom(skb),
122 dev_kfree_skb_any(skb);
125 htt->rx_ring.fill_cnt = 0;
128 static int __ath10k_htt_rx_ring_fill_n(struct ath10k_htt *htt, int num)
130 struct htt_rx_desc *rx_desc;
135 idx = __le32_to_cpu(*htt->rx_ring.alloc_idx.vaddr);
137 skb = dev_alloc_skb(HTT_RX_BUF_SIZE + HTT_RX_DESC_ALIGN);
143 if (!IS_ALIGNED((unsigned long)skb->data, HTT_RX_DESC_ALIGN))
145 PTR_ALIGN(skb->data, HTT_RX_DESC_ALIGN) -
148 /* Clear rx_desc attention word before posting to Rx ring */
149 rx_desc = (struct htt_rx_desc *)skb->data;
150 rx_desc->attention.flags = __cpu_to_le32(0);
152 paddr = dma_map_single(htt->ar->dev, skb->data,
153 skb->len + skb_tailroom(skb),
156 if (unlikely(dma_mapping_error(htt->ar->dev, paddr))) {
157 dev_kfree_skb_any(skb);
162 ATH10K_SKB_CB(skb)->paddr = paddr;
163 htt->rx_ring.netbufs_ring[idx] = skb;
164 htt->rx_ring.paddrs_ring[idx] = __cpu_to_le32(paddr);
165 htt->rx_ring.fill_cnt++;
169 idx &= htt->rx_ring.size_mask;
173 *htt->rx_ring.alloc_idx.vaddr = __cpu_to_le32(idx);
177 static int ath10k_htt_rx_ring_fill_n(struct ath10k_htt *htt, int num)
179 lockdep_assert_held(&htt->rx_ring.lock);
180 return __ath10k_htt_rx_ring_fill_n(htt, num);
183 static void ath10k_htt_rx_msdu_buff_replenish(struct ath10k_htt *htt)
185 int ret, num_deficit, num_to_fill;
187 /* Refilling the whole RX ring buffer proves to be a bad idea. The
188 * reason is RX may take up significant amount of CPU cycles and starve
189 * other tasks, e.g. TX on an ethernet device while acting as a bridge
190 * with ath10k wlan interface. This ended up with very poor performance
191 * once CPU the host system was overwhelmed with RX on ath10k.
193 * By limiting the number of refills the replenishing occurs
194 * progressively. This in turns makes use of the fact tasklets are
195 * processed in FIFO order. This means actual RX processing can starve
196 * out refilling. If there's not enough buffers on RX ring FW will not
197 * report RX until it is refilled with enough buffers. This
198 * automatically balances load wrt to CPU power.
200 * This probably comes at a cost of lower maximum throughput but
201 * improves the avarage and stability. */
202 spin_lock_bh(&htt->rx_ring.lock);
203 num_deficit = htt->rx_ring.fill_level - htt->rx_ring.fill_cnt;
204 num_to_fill = min(ATH10K_HTT_MAX_NUM_REFILL, num_deficit);
205 num_deficit -= num_to_fill;
206 ret = ath10k_htt_rx_ring_fill_n(htt, num_to_fill);
207 if (ret == -ENOMEM) {
209 * Failed to fill it to the desired level -
210 * we'll start a timer and try again next time.
211 * As long as enough buffers are left in the ring for
212 * another A-MPDU rx, no special recovery is needed.
214 mod_timer(&htt->rx_ring.refill_retry_timer, jiffies +
215 msecs_to_jiffies(HTT_RX_RING_REFILL_RETRY_MS));
216 } else if (num_deficit > 0) {
217 tasklet_schedule(&htt->rx_replenish_task);
219 spin_unlock_bh(&htt->rx_ring.lock);
222 static void ath10k_htt_rx_ring_refill_retry(unsigned long arg)
224 struct ath10k_htt *htt = (struct ath10k_htt *)arg;
226 ath10k_htt_rx_msdu_buff_replenish(htt);
229 static void ath10k_htt_rx_ring_clean_up(struct ath10k_htt *htt)
234 for (i = 0; i < htt->rx_ring.size; i++) {
235 skb = htt->rx_ring.netbufs_ring[i];
239 dma_unmap_single(htt->ar->dev, ATH10K_SKB_CB(skb)->paddr,
240 skb->len + skb_tailroom(skb),
242 dev_kfree_skb_any(skb);
243 htt->rx_ring.netbufs_ring[i] = NULL;
247 void ath10k_htt_rx_free(struct ath10k_htt *htt)
249 del_timer_sync(&htt->rx_ring.refill_retry_timer);
250 tasklet_kill(&htt->rx_replenish_task);
251 tasklet_kill(&htt->txrx_compl_task);
253 skb_queue_purge(&htt->tx_compl_q);
254 skb_queue_purge(&htt->rx_compl_q);
256 ath10k_htt_rx_ring_clean_up(htt);
258 dma_free_coherent(htt->ar->dev,
260 sizeof(htt->rx_ring.paddrs_ring)),
261 htt->rx_ring.paddrs_ring,
262 htt->rx_ring.base_paddr);
264 dma_free_coherent(htt->ar->dev,
265 sizeof(*htt->rx_ring.alloc_idx.vaddr),
266 htt->rx_ring.alloc_idx.vaddr,
267 htt->rx_ring.alloc_idx.paddr);
269 kfree(htt->rx_ring.netbufs_ring);
272 static inline struct sk_buff *ath10k_htt_rx_netbuf_pop(struct ath10k_htt *htt)
274 struct ath10k *ar = htt->ar;
276 struct sk_buff *msdu;
278 lockdep_assert_held(&htt->rx_ring.lock);
280 if (htt->rx_ring.fill_cnt == 0) {
281 ath10k_warn(ar, "tried to pop sk_buff from an empty rx ring\n");
285 idx = htt->rx_ring.sw_rd_idx.msdu_payld;
286 msdu = htt->rx_ring.netbufs_ring[idx];
287 htt->rx_ring.netbufs_ring[idx] = NULL;
290 idx &= htt->rx_ring.size_mask;
291 htt->rx_ring.sw_rd_idx.msdu_payld = idx;
292 htt->rx_ring.fill_cnt--;
294 dma_unmap_single(htt->ar->dev,
295 ATH10K_SKB_CB(msdu)->paddr,
296 msdu->len + skb_tailroom(msdu),
298 ath10k_dbg_dump(ar, ATH10K_DBG_HTT_DUMP, NULL, "htt rx netbuf pop: ",
299 msdu->data, msdu->len + skb_tailroom(msdu));
304 /* return: < 0 fatal error, 0 - non chained msdu, 1 chained msdu */
305 static int ath10k_htt_rx_amsdu_pop(struct ath10k_htt *htt,
306 u8 **fw_desc, int *fw_desc_len,
307 struct sk_buff_head *amsdu,
310 struct ath10k *ar = htt->ar;
311 int msdu_len, msdu_chaining = 0;
312 struct sk_buff *msdu;
313 struct htt_rx_desc *rx_desc;
315 lockdep_assert_held(&htt->rx_ring.lock);
318 int last_msdu, msdu_len_invalid, msdu_chained;
320 msdu = ath10k_htt_rx_netbuf_pop(htt);
322 __skb_queue_purge(amsdu);
326 __skb_queue_tail(amsdu, msdu);
328 rx_desc = (struct htt_rx_desc *)msdu->data;
330 /* FIXME: we must report msdu payload since this is what caller
332 skb_put(msdu, offsetof(struct htt_rx_desc, msdu_payload));
333 skb_pull(msdu, offsetof(struct htt_rx_desc, msdu_payload));
336 * Sanity check - confirm the HW is finished filling in the
338 * If the HW and SW are working correctly, then it's guaranteed
339 * that the HW's MAC DMA is done before this point in the SW.
340 * To prevent the case that we handle a stale Rx descriptor,
341 * just assert for now until we have a way to recover.
343 if (!(__le32_to_cpu(rx_desc->attention.flags)
344 & RX_ATTENTION_FLAGS_MSDU_DONE)) {
345 __skb_queue_purge(amsdu);
349 *attention |= __le32_to_cpu(rx_desc->attention.flags) &
350 (RX_ATTENTION_FLAGS_TKIP_MIC_ERR |
351 RX_ATTENTION_FLAGS_DECRYPT_ERR |
352 RX_ATTENTION_FLAGS_FCS_ERR |
353 RX_ATTENTION_FLAGS_MGMT_TYPE);
355 * Copy the FW rx descriptor for this MSDU from the rx
356 * indication message into the MSDU's netbuf. HL uses the
357 * same rx indication message definition as LL, and simply
358 * appends new info (fields from the HW rx desc, and the
359 * MSDU payload itself). So, the offset into the rx
360 * indication message only has to account for the standard
361 * offset of the per-MSDU FW rx desc info within the
362 * message, and how many bytes of the per-MSDU FW rx desc
363 * info have already been consumed. (And the endianness of
364 * the host, since for a big-endian host, the rx ind
365 * message contents, including the per-MSDU rx desc bytes,
366 * were byteswapped during upload.)
368 if (*fw_desc_len > 0) {
369 rx_desc->fw_desc.info0 = **fw_desc;
371 * The target is expected to only provide the basic
372 * per-MSDU rx descriptors. Just to be sure, verify
373 * that the target has not attached extension data
374 * (e.g. LRO flow ID).
377 /* or more, if there's extension data */
382 * When an oversized AMSDU happened, FW will lost
383 * some of MSDU status - in this case, the FW
384 * descriptors provided will be less than the
385 * actual MSDUs inside this MPDU. Mark the FW
386 * descriptors so that it will still deliver to
387 * upper stack, if no CRC error for this MPDU.
389 * FIX THIS - the FW descriptors are actually for
390 * MSDUs in the end of this A-MSDU instead of the
393 rx_desc->fw_desc.info0 = 0;
396 msdu_len_invalid = !!(__le32_to_cpu(rx_desc->attention.flags)
397 & (RX_ATTENTION_FLAGS_MPDU_LENGTH_ERR |
398 RX_ATTENTION_FLAGS_MSDU_LENGTH_ERR));
399 msdu_len = MS(__le32_to_cpu(rx_desc->msdu_start.info0),
400 RX_MSDU_START_INFO0_MSDU_LENGTH);
401 msdu_chained = rx_desc->frag_info.ring2_more_count;
403 if (msdu_len_invalid)
407 skb_put(msdu, min(msdu_len, HTT_RX_MSDU_SIZE));
408 msdu_len -= msdu->len;
410 /* Note: Chained buffers do not contain rx descriptor */
411 while (msdu_chained--) {
412 msdu = ath10k_htt_rx_netbuf_pop(htt);
414 __skb_queue_purge(amsdu);
418 __skb_queue_tail(amsdu, msdu);
420 skb_put(msdu, min(msdu_len, HTT_RX_BUF_SIZE));
421 msdu_len -= msdu->len;
425 last_msdu = __le32_to_cpu(rx_desc->msdu_end.info0) &
426 RX_MSDU_END_INFO0_LAST_MSDU;
428 trace_ath10k_htt_rx_desc(ar, &rx_desc->attention,
429 sizeof(*rx_desc) - sizeof(u32));
435 if (skb_queue_empty(amsdu))
439 * Don't refill the ring yet.
441 * First, the elements popped here are still in use - it is not
442 * safe to overwrite them until the matching call to
443 * mpdu_desc_list_next. Second, for efficiency it is preferable to
444 * refill the rx ring with 1 PPDU's worth of rx buffers (something
445 * like 32 x 3 buffers), rather than one MPDU's worth of rx buffers
446 * (something like 3 buffers). Consequently, we'll rely on the txrx
447 * SW to tell us when it is done pulling all the PPDU's rx buffers
448 * out of the rx ring, and then refill it just once.
451 return msdu_chaining;
454 static void ath10k_htt_rx_replenish_task(unsigned long ptr)
456 struct ath10k_htt *htt = (struct ath10k_htt *)ptr;
458 ath10k_htt_rx_msdu_buff_replenish(htt);
461 int ath10k_htt_rx_alloc(struct ath10k_htt *htt)
463 struct ath10k *ar = htt->ar;
467 struct timer_list *timer = &htt->rx_ring.refill_retry_timer;
469 htt->rx_confused = false;
471 htt->rx_ring.size = ath10k_htt_rx_ring_size(htt);
472 if (!is_power_of_2(htt->rx_ring.size)) {
473 ath10k_warn(ar, "htt rx ring size is not power of 2\n");
477 htt->rx_ring.size_mask = htt->rx_ring.size - 1;
480 * Set the initial value for the level to which the rx ring
481 * should be filled, based on the max throughput and the
482 * worst likely latency for the host to fill the rx ring
483 * with new buffers. In theory, this fill level can be
484 * dynamically adjusted from the initial value set here, to
485 * reflect the actual host latency rather than a
486 * conservative assumption about the host latency.
488 htt->rx_ring.fill_level = ath10k_htt_rx_ring_fill_level(htt);
490 htt->rx_ring.netbufs_ring =
491 kzalloc(htt->rx_ring.size * sizeof(struct sk_buff *),
493 if (!htt->rx_ring.netbufs_ring)
496 size = htt->rx_ring.size * sizeof(htt->rx_ring.paddrs_ring);
498 vaddr = dma_alloc_coherent(htt->ar->dev, size, &paddr, GFP_DMA);
502 htt->rx_ring.paddrs_ring = vaddr;
503 htt->rx_ring.base_paddr = paddr;
505 vaddr = dma_alloc_coherent(htt->ar->dev,
506 sizeof(*htt->rx_ring.alloc_idx.vaddr),
511 htt->rx_ring.alloc_idx.vaddr = vaddr;
512 htt->rx_ring.alloc_idx.paddr = paddr;
513 htt->rx_ring.sw_rd_idx.msdu_payld = 0;
514 *htt->rx_ring.alloc_idx.vaddr = 0;
516 /* Initialize the Rx refill retry timer */
517 setup_timer(timer, ath10k_htt_rx_ring_refill_retry, (unsigned long)htt);
519 spin_lock_init(&htt->rx_ring.lock);
521 htt->rx_ring.fill_cnt = 0;
522 if (__ath10k_htt_rx_ring_fill_n(htt, htt->rx_ring.fill_level))
525 tasklet_init(&htt->rx_replenish_task, ath10k_htt_rx_replenish_task,
528 skb_queue_head_init(&htt->tx_compl_q);
529 skb_queue_head_init(&htt->rx_compl_q);
531 tasklet_init(&htt->txrx_compl_task, ath10k_htt_txrx_compl_task,
534 ath10k_dbg(ar, ATH10K_DBG_BOOT, "htt rx ring size %d fill_level %d\n",
535 htt->rx_ring.size, htt->rx_ring.fill_level);
539 ath10k_htt_rx_ring_free(htt);
540 dma_free_coherent(htt->ar->dev,
541 sizeof(*htt->rx_ring.alloc_idx.vaddr),
542 htt->rx_ring.alloc_idx.vaddr,
543 htt->rx_ring.alloc_idx.paddr);
545 dma_free_coherent(htt->ar->dev,
547 sizeof(htt->rx_ring.paddrs_ring)),
548 htt->rx_ring.paddrs_ring,
549 htt->rx_ring.base_paddr);
551 kfree(htt->rx_ring.netbufs_ring);
556 static int ath10k_htt_rx_crypto_param_len(struct ath10k *ar,
557 enum htt_rx_mpdu_encrypt_type type)
560 case HTT_RX_MPDU_ENCRYPT_NONE:
562 case HTT_RX_MPDU_ENCRYPT_WEP40:
563 case HTT_RX_MPDU_ENCRYPT_WEP104:
564 return IEEE80211_WEP_IV_LEN;
565 case HTT_RX_MPDU_ENCRYPT_TKIP_WITHOUT_MIC:
566 case HTT_RX_MPDU_ENCRYPT_TKIP_WPA:
567 return IEEE80211_TKIP_IV_LEN;
568 case HTT_RX_MPDU_ENCRYPT_AES_CCM_WPA2:
569 return IEEE80211_CCMP_HDR_LEN;
570 case HTT_RX_MPDU_ENCRYPT_WEP128:
571 case HTT_RX_MPDU_ENCRYPT_WAPI:
575 ath10k_warn(ar, "unsupported encryption type %d\n", type);
579 #define MICHAEL_MIC_LEN 8
581 static int ath10k_htt_rx_crypto_tail_len(struct ath10k *ar,
582 enum htt_rx_mpdu_encrypt_type type)
585 case HTT_RX_MPDU_ENCRYPT_NONE:
587 case HTT_RX_MPDU_ENCRYPT_WEP40:
588 case HTT_RX_MPDU_ENCRYPT_WEP104:
589 return IEEE80211_WEP_ICV_LEN;
590 case HTT_RX_MPDU_ENCRYPT_TKIP_WITHOUT_MIC:
591 case HTT_RX_MPDU_ENCRYPT_TKIP_WPA:
592 return IEEE80211_TKIP_ICV_LEN;
593 case HTT_RX_MPDU_ENCRYPT_AES_CCM_WPA2:
594 return IEEE80211_CCMP_MIC_LEN;
595 case HTT_RX_MPDU_ENCRYPT_WEP128:
596 case HTT_RX_MPDU_ENCRYPT_WAPI:
600 ath10k_warn(ar, "unsupported encryption type %d\n", type);
604 /* Applies for first msdu in chain, before altering it. */
605 static struct ieee80211_hdr *ath10k_htt_rx_skb_get_hdr(struct sk_buff *skb)
607 struct htt_rx_desc *rxd;
608 enum rx_msdu_decap_format fmt;
610 rxd = (void *)skb->data - sizeof(*rxd);
611 fmt = MS(__le32_to_cpu(rxd->msdu_start.info1),
612 RX_MSDU_START_INFO1_DECAP_FORMAT);
614 if (fmt == RX_MSDU_DECAP_RAW)
615 return (void *)skb->data;
617 return (void *)skb->data - RX_HTT_HDR_STATUS_LEN;
620 /* This function only applies for first msdu in an msdu chain */
621 static bool ath10k_htt_rx_hdr_is_amsdu(struct ieee80211_hdr *hdr)
625 if (ieee80211_is_data_qos(hdr->frame_control)) {
626 qc = ieee80211_get_qos_ctl(hdr);
641 struct amsdu_subframe_hdr {
647 static const u8 rx_legacy_rate_idx[] = {
648 3, /* 0x00 - 11Mbps */
649 2, /* 0x01 - 5.5Mbps */
650 1, /* 0x02 - 2Mbps */
651 0, /* 0x03 - 1Mbps */
652 3, /* 0x04 - 11Mbps */
653 2, /* 0x05 - 5.5Mbps */
654 1, /* 0x06 - 2Mbps */
655 0, /* 0x07 - 1Mbps */
656 10, /* 0x08 - 48Mbps */
657 8, /* 0x09 - 24Mbps */
658 6, /* 0x0A - 12Mbps */
659 4, /* 0x0B - 6Mbps */
660 11, /* 0x0C - 54Mbps */
661 9, /* 0x0D - 36Mbps */
662 7, /* 0x0E - 18Mbps */
663 5, /* 0x0F - 9Mbps */
666 static void ath10k_htt_rx_h_rates(struct ath10k *ar,
667 enum ieee80211_band band,
668 u8 info0, u32 info1, u32 info2,
669 struct ieee80211_rx_status *status)
671 u8 cck, rate, rate_idx, bw, sgi, mcs, nss;
674 /* Check if valid fields */
675 if (!(info0 & HTT_RX_INDICATION_INFO0_START_VALID))
678 preamble = MS(info1, HTT_RX_INDICATION_INFO1_PREAMBLE_TYPE);
682 cck = info0 & HTT_RX_INDICATION_INFO0_LEGACY_RATE_CCK;
683 rate = MS(info0, HTT_RX_INDICATION_INFO0_LEGACY_RATE);
686 if (rate < 0x08 || rate > 0x0F)
690 case IEEE80211_BAND_2GHZ:
693 rate_idx = rx_legacy_rate_idx[rate];
695 case IEEE80211_BAND_5GHZ:
696 rate_idx = rx_legacy_rate_idx[rate];
697 /* We are using same rate table registering
698 HW - ath10k_rates[]. In case of 5GHz skip
699 CCK rates, so -4 here */
706 status->rate_idx = rate_idx;
709 case HTT_RX_HT_WITH_TXBF:
710 /* HT-SIG - Table 20-11 in info1 and info2 */
713 bw = (info1 >> 7) & 1;
714 sgi = (info2 >> 7) & 1;
716 status->rate_idx = mcs;
717 status->flag |= RX_FLAG_HT;
719 status->flag |= RX_FLAG_SHORT_GI;
721 status->flag |= RX_FLAG_40MHZ;
724 case HTT_RX_VHT_WITH_TXBF:
725 /* VHT-SIG-A1 in info 1, VHT-SIG-A2 in info2
727 mcs = (info2 >> 4) & 0x0F;
728 nss = ((info1 >> 10) & 0x07) + 1;
732 status->rate_idx = mcs;
733 status->vht_nss = nss;
736 status->flag |= RX_FLAG_SHORT_GI;
744 status->flag |= RX_FLAG_40MHZ;
748 status->vht_flag |= RX_VHT_FLAG_80MHZ;
751 status->flag |= RX_FLAG_VHT;
758 static void ath10k_htt_rx_h_protected(struct ath10k_htt *htt,
759 struct ieee80211_rx_status *rx_status,
761 enum htt_rx_mpdu_encrypt_type enctype,
762 enum rx_msdu_decap_format fmt,
765 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
767 rx_status->flag &= ~(RX_FLAG_DECRYPTED |
768 RX_FLAG_IV_STRIPPED |
769 RX_FLAG_MMIC_STRIPPED);
771 if (enctype == HTT_RX_MPDU_ENCRYPT_NONE)
775 * There's no explicit rx descriptor flag to indicate whether a given
776 * frame has been decrypted or not. We're forced to use the decap
777 * format as an implicit indication. However fragmentation rx is always
778 * raw and it probably never reports undecrypted raws.
780 * This makes sure sniffed frames are reported as-is without stripping
781 * the protected flag.
783 if (fmt == RX_MSDU_DECAP_RAW && !dot11frag)
786 rx_status->flag |= RX_FLAG_DECRYPTED |
787 RX_FLAG_IV_STRIPPED |
788 RX_FLAG_MMIC_STRIPPED;
789 hdr->frame_control = __cpu_to_le16(__le16_to_cpu(hdr->frame_control) &
790 ~IEEE80211_FCTL_PROTECTED);
793 static bool ath10k_htt_rx_h_channel(struct ath10k *ar,
794 struct ieee80211_rx_status *status)
796 struct ieee80211_channel *ch;
798 spin_lock_bh(&ar->data_lock);
799 ch = ar->scan_channel;
802 spin_unlock_bh(&ar->data_lock);
807 status->band = ch->band;
808 status->freq = ch->center_freq;
813 static const char * const tid_to_ac[] = {
824 static char *ath10k_get_tid(struct ieee80211_hdr *hdr, char *out, size_t size)
829 if (!ieee80211_is_data_qos(hdr->frame_control))
832 qc = ieee80211_get_qos_ctl(hdr);
833 tid = *qc & IEEE80211_QOS_CTL_TID_MASK;
835 snprintf(out, size, "tid %d (%s)", tid, tid_to_ac[tid]);
837 snprintf(out, size, "tid %d", tid);
842 static void ath10k_process_rx(struct ath10k *ar,
843 struct ieee80211_rx_status *rx_status,
846 struct ieee80211_rx_status *status;
847 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
850 status = IEEE80211_SKB_RXCB(skb);
851 *status = *rx_status;
853 ath10k_dbg(ar, ATH10K_DBG_DATA,
854 "rx skb %p len %u peer %pM %s %s sn %u %s%s%s%s%s %srate_idx %u vht_nss %u freq %u band %u flag 0x%x fcs-err %i mic-err %i amsdu-more %i\n",
857 ieee80211_get_SA(hdr),
858 ath10k_get_tid(hdr, tid, sizeof(tid)),
859 is_multicast_ether_addr(ieee80211_get_DA(hdr)) ?
861 (__le16_to_cpu(hdr->seq_ctrl) & IEEE80211_SCTL_SEQ) >> 4,
862 status->flag == 0 ? "legacy" : "",
863 status->flag & RX_FLAG_HT ? "ht" : "",
864 status->flag & RX_FLAG_VHT ? "vht" : "",
865 status->flag & RX_FLAG_40MHZ ? "40" : "",
866 status->vht_flag & RX_VHT_FLAG_80MHZ ? "80" : "",
867 status->flag & RX_FLAG_SHORT_GI ? "sgi " : "",
871 status->band, status->flag,
872 !!(status->flag & RX_FLAG_FAILED_FCS_CRC),
873 !!(status->flag & RX_FLAG_MMIC_ERROR),
874 !!(status->flag & RX_FLAG_AMSDU_MORE));
875 ath10k_dbg_dump(ar, ATH10K_DBG_HTT_DUMP, NULL, "rx skb: ",
876 skb->data, skb->len);
877 trace_ath10k_rx_hdr(ar, skb->data, skb->len);
878 trace_ath10k_rx_payload(ar, skb->data, skb->len);
880 ieee80211_rx(ar->hw, skb);
883 static int ath10k_htt_rx_nwifi_hdrlen(struct ieee80211_hdr *hdr)
885 /* nwifi header is padded to 4 bytes. this fixes 4addr rx */
886 return round_up(ieee80211_hdrlen(hdr->frame_control), 4);
889 static void ath10k_htt_rx_amsdu(struct ath10k_htt *htt,
890 struct ieee80211_rx_status *rx_status,
891 struct sk_buff_head *amsdu)
893 struct ath10k *ar = htt->ar;
894 struct htt_rx_desc *rxd;
896 struct sk_buff *first;
897 enum rx_msdu_decap_format fmt;
898 enum htt_rx_mpdu_encrypt_type enctype;
899 struct ieee80211_hdr *hdr;
900 u8 hdr_buf[64], da[ETH_ALEN], sa[ETH_ALEN], *qos;
901 unsigned int hdr_len;
903 first = skb_peek(amsdu);
905 rxd = (void *)first->data - sizeof(*rxd);
906 enctype = MS(__le32_to_cpu(rxd->mpdu_start.info0),
907 RX_MPDU_START_INFO0_ENCRYPT_TYPE);
909 hdr = (struct ieee80211_hdr *)rxd->rx_hdr_status;
910 hdr_len = ieee80211_hdrlen(hdr->frame_control);
911 memcpy(hdr_buf, hdr, hdr_len);
912 hdr = (struct ieee80211_hdr *)hdr_buf;
914 while ((skb = __skb_dequeue(amsdu))) {
918 rxd = (void *)skb->data - sizeof(*rxd);
919 fmt = MS(__le32_to_cpu(rxd->msdu_start.info1),
920 RX_MSDU_START_INFO1_DECAP_FORMAT);
921 decap_hdr = (void *)rxd->rx_hdr_status;
923 skb->ip_summed = ath10k_htt_rx_get_csum_state(skb);
925 /* First frame in an A-MSDU chain has more decapped data. */
927 len = round_up(ieee80211_hdrlen(hdr->frame_control), 4);
928 len += round_up(ath10k_htt_rx_crypto_param_len(ar,
934 case RX_MSDU_DECAP_RAW:
935 /* remove trailing FCS */
936 skb_trim(skb, skb->len - FCS_LEN);
938 case RX_MSDU_DECAP_NATIVE_WIFI:
939 /* pull decapped header and copy SA & DA */
940 hdr = (struct ieee80211_hdr *)skb->data;
941 hdr_len = ath10k_htt_rx_nwifi_hdrlen(hdr);
942 ether_addr_copy(da, ieee80211_get_DA(hdr));
943 ether_addr_copy(sa, ieee80211_get_SA(hdr));
944 skb_pull(skb, hdr_len);
946 /* push original 802.11 header */
947 hdr = (struct ieee80211_hdr *)hdr_buf;
948 hdr_len = ieee80211_hdrlen(hdr->frame_control);
949 memcpy(skb_push(skb, hdr_len), hdr, hdr_len);
951 /* original A-MSDU header has the bit set but we're
952 * not including A-MSDU subframe header */
953 hdr = (struct ieee80211_hdr *)skb->data;
954 qos = ieee80211_get_qos_ctl(hdr);
955 qos[0] &= ~IEEE80211_QOS_CTL_A_MSDU_PRESENT;
957 /* original 802.11 header has a different DA and in
958 * case of 4addr it may also have different SA
960 ether_addr_copy(ieee80211_get_DA(hdr), da);
961 ether_addr_copy(ieee80211_get_SA(hdr), sa);
963 case RX_MSDU_DECAP_ETHERNET2_DIX:
964 /* strip ethernet header and insert decapped 802.11
965 * header, amsdu subframe header and rfc1042 header */
968 len += sizeof(struct rfc1042_hdr);
969 len += sizeof(struct amsdu_subframe_hdr);
971 skb_pull(skb, sizeof(struct ethhdr));
972 memcpy(skb_push(skb, len), decap_hdr, len);
973 memcpy(skb_push(skb, hdr_len), hdr, hdr_len);
975 case RX_MSDU_DECAP_8023_SNAP_LLC:
976 /* insert decapped 802.11 header making a singly
978 memcpy(skb_push(skb, hdr_len), hdr, hdr_len);
982 ath10k_htt_rx_h_protected(htt, rx_status, skb, enctype, fmt,
985 if (skb_queue_empty(amsdu))
986 rx_status->flag &= ~RX_FLAG_AMSDU_MORE;
988 rx_status->flag |= RX_FLAG_AMSDU_MORE;
990 ath10k_process_rx(htt->ar, rx_status, skb);
993 /* FIXME: It might be nice to re-assemble the A-MSDU when there's a
994 * monitor interface active for sniffing purposes. */
997 static void ath10k_htt_rx_msdu(struct ath10k_htt *htt,
998 struct ieee80211_rx_status *rx_status,
1001 struct ath10k *ar = htt->ar;
1002 struct htt_rx_desc *rxd;
1003 struct ieee80211_hdr *hdr;
1004 enum rx_msdu_decap_format fmt;
1005 enum htt_rx_mpdu_encrypt_type enctype;
1009 rxd = (void *)skb->data - sizeof(*rxd);
1010 fmt = MS(__le32_to_cpu(rxd->msdu_start.info1),
1011 RX_MSDU_START_INFO1_DECAP_FORMAT);
1012 enctype = MS(__le32_to_cpu(rxd->mpdu_start.info0),
1013 RX_MPDU_START_INFO0_ENCRYPT_TYPE);
1014 hdr = (struct ieee80211_hdr *)rxd->rx_hdr_status;
1015 hdr_len = ieee80211_hdrlen(hdr->frame_control);
1017 skb->ip_summed = ath10k_htt_rx_get_csum_state(skb);
1020 case RX_MSDU_DECAP_RAW:
1021 /* remove trailing FCS */
1022 skb_trim(skb, skb->len - FCS_LEN);
1024 case RX_MSDU_DECAP_NATIVE_WIFI:
1025 /* Pull decapped header */
1026 hdr = (struct ieee80211_hdr *)skb->data;
1027 hdr_len = ath10k_htt_rx_nwifi_hdrlen(hdr);
1028 skb_pull(skb, hdr_len);
1030 /* Push original header */
1031 hdr = (struct ieee80211_hdr *)rxd->rx_hdr_status;
1032 hdr_len = ieee80211_hdrlen(hdr->frame_control);
1033 memcpy(skb_push(skb, hdr_len), hdr, hdr_len);
1035 case RX_MSDU_DECAP_ETHERNET2_DIX:
1036 /* strip ethernet header and insert decapped 802.11 header and
1040 rfc1042 += roundup(hdr_len, 4);
1041 rfc1042 += roundup(ath10k_htt_rx_crypto_param_len(ar,
1044 skb_pull(skb, sizeof(struct ethhdr));
1045 memcpy(skb_push(skb, sizeof(struct rfc1042_hdr)),
1046 rfc1042, sizeof(struct rfc1042_hdr));
1047 memcpy(skb_push(skb, hdr_len), hdr, hdr_len);
1049 case RX_MSDU_DECAP_8023_SNAP_LLC:
1050 /* remove A-MSDU subframe header and insert
1051 * decapped 802.11 header. rfc1042 header is already there */
1053 skb_pull(skb, sizeof(struct amsdu_subframe_hdr));
1054 memcpy(skb_push(skb, hdr_len), hdr, hdr_len);
1058 ath10k_htt_rx_h_protected(htt, rx_status, skb, enctype, fmt, false);
1060 ath10k_process_rx(htt->ar, rx_status, skb);
1063 static int ath10k_htt_rx_get_csum_state(struct sk_buff *skb)
1065 struct htt_rx_desc *rxd;
1067 bool is_ip4, is_ip6;
1068 bool is_tcp, is_udp;
1069 bool ip_csum_ok, tcpudp_csum_ok;
1071 rxd = (void *)skb->data - sizeof(*rxd);
1072 flags = __le32_to_cpu(rxd->attention.flags);
1073 info = __le32_to_cpu(rxd->msdu_start.info1);
1075 is_ip4 = !!(info & RX_MSDU_START_INFO1_IPV4_PROTO);
1076 is_ip6 = !!(info & RX_MSDU_START_INFO1_IPV6_PROTO);
1077 is_tcp = !!(info & RX_MSDU_START_INFO1_TCP_PROTO);
1078 is_udp = !!(info & RX_MSDU_START_INFO1_UDP_PROTO);
1079 ip_csum_ok = !(flags & RX_ATTENTION_FLAGS_IP_CHKSUM_FAIL);
1080 tcpudp_csum_ok = !(flags & RX_ATTENTION_FLAGS_TCP_UDP_CHKSUM_FAIL);
1082 if (!is_ip4 && !is_ip6)
1083 return CHECKSUM_NONE;
1084 if (!is_tcp && !is_udp)
1085 return CHECKSUM_NONE;
1087 return CHECKSUM_NONE;
1088 if (!tcpudp_csum_ok)
1089 return CHECKSUM_NONE;
1091 return CHECKSUM_UNNECESSARY;
1094 static int ath10k_unchain_msdu(struct sk_buff_head *amsdu)
1096 struct sk_buff *skb, *first;
1100 /* TODO: Might could optimize this by using
1101 * skb_try_coalesce or similar method to
1102 * decrease copying, or maybe get mac80211 to
1103 * provide a way to just receive a list of
1107 first = __skb_dequeue(amsdu);
1109 /* Allocate total length all at once. */
1110 skb_queue_walk(amsdu, skb)
1111 total_len += skb->len;
1113 space = total_len - skb_tailroom(first);
1115 (pskb_expand_head(first, 0, space, GFP_ATOMIC) < 0)) {
1116 /* TODO: bump some rx-oom error stat */
1117 /* put it back together so we can free the
1118 * whole list at once.
1120 __skb_queue_head(amsdu, first);
1124 /* Walk list again, copying contents into
1127 while ((skb = __skb_dequeue(amsdu))) {
1128 skb_copy_from_linear_data(skb, skb_put(first, skb->len),
1130 dev_kfree_skb_any(skb);
1133 __skb_queue_head(amsdu, first);
1137 static bool ath10k_htt_rx_amsdu_allowed(struct ath10k_htt *htt,
1138 struct sk_buff *head,
1142 struct ath10k *ar = htt->ar;
1144 if (head->len == 0) {
1145 ath10k_dbg(ar, ATH10K_DBG_HTT,
1146 "htt rx dropping due to zero-len\n");
1150 if (attention & RX_ATTENTION_FLAGS_DECRYPT_ERR) {
1151 ath10k_dbg(ar, ATH10K_DBG_HTT,
1152 "htt rx dropping due to decrypt-err\n");
1157 ath10k_warn(ar, "no channel configured; ignoring frame!\n");
1161 /* Skip mgmt frames while we handle this in WMI */
1162 if (attention & RX_ATTENTION_FLAGS_MGMT_TYPE) {
1163 ath10k_dbg(ar, ATH10K_DBG_HTT, "htt rx mgmt ctrl\n");
1167 if (test_bit(ATH10K_CAC_RUNNING, &htt->ar->dev_flags)) {
1168 ath10k_dbg(ar, ATH10K_DBG_HTT,
1169 "htt rx CAC running\n");
1176 static void ath10k_htt_rx_handler(struct ath10k_htt *htt,
1177 struct htt_rx_indication *rx)
1179 struct ath10k *ar = htt->ar;
1180 struct ieee80211_rx_status *rx_status = &htt->rx_status;
1181 struct htt_rx_indication_mpdu_range *mpdu_ranges;
1182 struct sk_buff_head amsdu;
1183 struct ieee80211_hdr *hdr;
1184 int num_mpdu_ranges;
1189 int i, ret, mpdu_count = 0;
1191 lockdep_assert_held(&htt->rx_ring.lock);
1193 if (htt->rx_confused)
1196 fw_desc_len = __le16_to_cpu(rx->prefix.fw_rx_desc_bytes);
1197 fw_desc = (u8 *)&rx->fw_desc;
1199 num_mpdu_ranges = MS(__le32_to_cpu(rx->hdr.info1),
1200 HTT_RX_INDICATION_INFO1_NUM_MPDU_RANGES);
1201 mpdu_ranges = htt_rx_ind_get_mpdu_ranges(rx);
1203 /* Fill this once, while this is per-ppdu */
1204 if (rx->ppdu.info0 & HTT_RX_INDICATION_INFO0_START_VALID) {
1205 memset(rx_status, 0, sizeof(*rx_status));
1206 rx_status->signal = ATH10K_DEFAULT_NOISE_FLOOR +
1207 rx->ppdu.combined_rssi;
1210 if (rx->ppdu.info0 & HTT_RX_INDICATION_INFO0_END_VALID) {
1211 /* TSF available only in 32-bit */
1212 rx_status->mactime = __le32_to_cpu(rx->ppdu.tsf) & 0xffffffff;
1213 rx_status->flag |= RX_FLAG_MACTIME_END;
1216 channel_set = ath10k_htt_rx_h_channel(htt->ar, rx_status);
1219 ath10k_htt_rx_h_rates(htt->ar, rx_status->band,
1221 __le32_to_cpu(rx->ppdu.info1),
1222 __le32_to_cpu(rx->ppdu.info2),
1226 ath10k_dbg_dump(ar, ATH10K_DBG_HTT_DUMP, NULL, "htt rx ind: ",
1228 (sizeof(struct htt_rx_indication_mpdu_range) *
1231 for (i = 0; i < num_mpdu_ranges; i++)
1232 mpdu_count += mpdu_ranges[i].mpdu_count;
1234 while (mpdu_count--) {
1236 __skb_queue_head_init(&amsdu);
1237 ret = ath10k_htt_rx_amsdu_pop(htt, &fw_desc,
1238 &fw_desc_len, &amsdu,
1241 ath10k_warn(ar, "rx ring became corrupted: %d\n", ret);
1242 __skb_queue_purge(&amsdu);
1243 /* FIXME: It's probably a good idea to reboot the
1244 * device instead of leaving it inoperable.
1246 htt->rx_confused = true;
1250 if (!ath10k_htt_rx_amsdu_allowed(htt, skb_peek(&amsdu),
1251 channel_set, attention)) {
1252 __skb_queue_purge(&amsdu);
1256 if (ret > 0 && ath10k_unchain_msdu(&amsdu) < 0) {
1257 __skb_queue_purge(&amsdu);
1261 if (attention & RX_ATTENTION_FLAGS_FCS_ERR)
1262 rx_status->flag |= RX_FLAG_FAILED_FCS_CRC;
1264 rx_status->flag &= ~RX_FLAG_FAILED_FCS_CRC;
1266 if (attention & RX_ATTENTION_FLAGS_TKIP_MIC_ERR)
1267 rx_status->flag |= RX_FLAG_MMIC_ERROR;
1269 rx_status->flag &= ~RX_FLAG_MMIC_ERROR;
1271 hdr = ath10k_htt_rx_skb_get_hdr(skb_peek(&amsdu));
1273 if (ath10k_htt_rx_hdr_is_amsdu(hdr))
1274 ath10k_htt_rx_amsdu(htt, rx_status, &amsdu);
1276 ath10k_htt_rx_msdu(htt, rx_status,
1277 __skb_dequeue(&amsdu));
1280 tasklet_schedule(&htt->rx_replenish_task);
1283 static void ath10k_htt_rx_frag_handler(struct ath10k_htt *htt,
1284 struct htt_rx_fragment_indication *frag)
1286 struct ath10k *ar = htt->ar;
1287 struct sk_buff *msdu;
1288 enum htt_rx_mpdu_encrypt_type enctype;
1289 struct htt_rx_desc *rxd;
1290 enum rx_msdu_decap_format fmt;
1291 struct ieee80211_rx_status *rx_status = &htt->rx_status;
1292 struct ieee80211_hdr *hdr;
1293 struct sk_buff_head amsdu;
1298 int fw_desc_len, hdrlen, paramlen;
1302 fw_desc_len = __le16_to_cpu(frag->fw_rx_desc_bytes);
1303 fw_desc = (u8 *)frag->fw_msdu_rx_desc;
1305 __skb_queue_head_init(&amsdu);
1307 spin_lock_bh(&htt->rx_ring.lock);
1308 ret = ath10k_htt_rx_amsdu_pop(htt, &fw_desc, &fw_desc_len,
1309 &amsdu, &attention);
1310 spin_unlock_bh(&htt->rx_ring.lock);
1312 tasklet_schedule(&htt->rx_replenish_task);
1314 ath10k_dbg(ar, ATH10K_DBG_HTT_DUMP, "htt rx frag ahead\n");
1317 ath10k_warn(ar, "failed to pop amsdu from httr rx ring for fragmented rx %d\n",
1319 __skb_queue_purge(&amsdu);
1323 if (skb_queue_len(&amsdu) != 1) {
1324 ath10k_warn(ar, "failed to pop frag amsdu: too many msdus\n");
1325 __skb_queue_purge(&amsdu);
1329 msdu = __skb_dequeue(&amsdu);
1331 /* FIXME: implement signal strength */
1332 rx_status->flag |= RX_FLAG_NO_SIGNAL_VAL;
1334 hdr = (struct ieee80211_hdr *)msdu->data;
1335 rxd = (void *)msdu->data - sizeof(*rxd);
1336 tkip_mic_err = !!(attention & RX_ATTENTION_FLAGS_TKIP_MIC_ERR);
1337 decrypt_err = !!(attention & RX_ATTENTION_FLAGS_DECRYPT_ERR);
1338 fmt = MS(__le32_to_cpu(rxd->msdu_start.info1),
1339 RX_MSDU_START_INFO1_DECAP_FORMAT);
1341 if (fmt != RX_MSDU_DECAP_RAW) {
1342 ath10k_warn(ar, "we dont support non-raw fragmented rx yet\n");
1343 dev_kfree_skb_any(msdu);
1347 enctype = MS(__le32_to_cpu(rxd->mpdu_start.info0),
1348 RX_MPDU_START_INFO0_ENCRYPT_TYPE);
1349 ath10k_htt_rx_h_protected(htt, rx_status, msdu, enctype, fmt,
1351 msdu->ip_summed = ath10k_htt_rx_get_csum_state(msdu);
1354 ath10k_warn(ar, "tkip mic error\n");
1357 ath10k_warn(ar, "decryption err in fragmented rx\n");
1358 dev_kfree_skb_any(msdu);
1362 if (enctype != HTT_RX_MPDU_ENCRYPT_NONE) {
1363 hdrlen = ieee80211_hdrlen(hdr->frame_control);
1364 paramlen = ath10k_htt_rx_crypto_param_len(ar, enctype);
1366 /* It is more efficient to move the header than the payload */
1367 memmove((void *)msdu->data + paramlen,
1370 skb_pull(msdu, paramlen);
1371 hdr = (struct ieee80211_hdr *)msdu->data;
1374 /* remove trailing FCS */
1377 /* remove crypto trailer */
1378 trim += ath10k_htt_rx_crypto_tail_len(ar, enctype);
1380 /* last fragment of TKIP frags has MIC */
1381 if (!ieee80211_has_morefrags(hdr->frame_control) &&
1382 enctype == HTT_RX_MPDU_ENCRYPT_TKIP_WPA)
1383 trim += MICHAEL_MIC_LEN;
1385 if (trim > msdu->len) {
1386 ath10k_warn(ar, "htt rx fragment: trailer longer than the frame itself? drop\n");
1387 dev_kfree_skb_any(msdu);
1391 skb_trim(msdu, msdu->len - trim);
1393 ath10k_dbg_dump(ar, ATH10K_DBG_HTT_DUMP, NULL, "htt rx frag mpdu: ",
1394 msdu->data, msdu->len);
1395 ath10k_process_rx(htt->ar, rx_status, msdu);
1398 if (fw_desc_len > 0) {
1399 ath10k_dbg(ar, ATH10K_DBG_HTT,
1400 "expecting more fragmented rx in one indication %d\n",
1405 static void ath10k_htt_rx_frm_tx_compl(struct ath10k *ar,
1406 struct sk_buff *skb)
1408 struct ath10k_htt *htt = &ar->htt;
1409 struct htt_resp *resp = (struct htt_resp *)skb->data;
1410 struct htt_tx_done tx_done = {};
1411 int status = MS(resp->data_tx_completion.flags, HTT_DATA_TX_STATUS);
1415 lockdep_assert_held(&htt->tx_lock);
1418 case HTT_DATA_TX_STATUS_NO_ACK:
1419 tx_done.no_ack = true;
1421 case HTT_DATA_TX_STATUS_OK:
1423 case HTT_DATA_TX_STATUS_DISCARD:
1424 case HTT_DATA_TX_STATUS_POSTPONE:
1425 case HTT_DATA_TX_STATUS_DOWNLOAD_FAIL:
1426 tx_done.discard = true;
1429 ath10k_warn(ar, "unhandled tx completion status %d\n", status);
1430 tx_done.discard = true;
1434 ath10k_dbg(ar, ATH10K_DBG_HTT, "htt tx completion num_msdus %d\n",
1435 resp->data_tx_completion.num_msdus);
1437 for (i = 0; i < resp->data_tx_completion.num_msdus; i++) {
1438 msdu_id = resp->data_tx_completion.msdus[i];
1439 tx_done.msdu_id = __le16_to_cpu(msdu_id);
1440 ath10k_txrx_tx_unref(htt, &tx_done);
1444 static void ath10k_htt_rx_addba(struct ath10k *ar, struct htt_resp *resp)
1446 struct htt_rx_addba *ev = &resp->rx_addba;
1447 struct ath10k_peer *peer;
1448 struct ath10k_vif *arvif;
1449 u16 info0, tid, peer_id;
1451 info0 = __le16_to_cpu(ev->info0);
1452 tid = MS(info0, HTT_RX_BA_INFO0_TID);
1453 peer_id = MS(info0, HTT_RX_BA_INFO0_PEER_ID);
1455 ath10k_dbg(ar, ATH10K_DBG_HTT,
1456 "htt rx addba tid %hu peer_id %hu size %hhu\n",
1457 tid, peer_id, ev->window_size);
1459 spin_lock_bh(&ar->data_lock);
1460 peer = ath10k_peer_find_by_id(ar, peer_id);
1462 ath10k_warn(ar, "received addba event for invalid peer_id: %hu\n",
1464 spin_unlock_bh(&ar->data_lock);
1468 arvif = ath10k_get_arvif(ar, peer->vdev_id);
1470 ath10k_warn(ar, "received addba event for invalid vdev_id: %u\n",
1472 spin_unlock_bh(&ar->data_lock);
1476 ath10k_dbg(ar, ATH10K_DBG_HTT,
1477 "htt rx start rx ba session sta %pM tid %hu size %hhu\n",
1478 peer->addr, tid, ev->window_size);
1480 ieee80211_start_rx_ba_session_offl(arvif->vif, peer->addr, tid);
1481 spin_unlock_bh(&ar->data_lock);
1484 static void ath10k_htt_rx_delba(struct ath10k *ar, struct htt_resp *resp)
1486 struct htt_rx_delba *ev = &resp->rx_delba;
1487 struct ath10k_peer *peer;
1488 struct ath10k_vif *arvif;
1489 u16 info0, tid, peer_id;
1491 info0 = __le16_to_cpu(ev->info0);
1492 tid = MS(info0, HTT_RX_BA_INFO0_TID);
1493 peer_id = MS(info0, HTT_RX_BA_INFO0_PEER_ID);
1495 ath10k_dbg(ar, ATH10K_DBG_HTT,
1496 "htt rx delba tid %hu peer_id %hu\n",
1499 spin_lock_bh(&ar->data_lock);
1500 peer = ath10k_peer_find_by_id(ar, peer_id);
1502 ath10k_warn(ar, "received addba event for invalid peer_id: %hu\n",
1504 spin_unlock_bh(&ar->data_lock);
1508 arvif = ath10k_get_arvif(ar, peer->vdev_id);
1510 ath10k_warn(ar, "received addba event for invalid vdev_id: %u\n",
1512 spin_unlock_bh(&ar->data_lock);
1516 ath10k_dbg(ar, ATH10K_DBG_HTT,
1517 "htt rx stop rx ba session sta %pM tid %hu\n",
1520 ieee80211_stop_rx_ba_session_offl(arvif->vif, peer->addr, tid);
1521 spin_unlock_bh(&ar->data_lock);
1524 void ath10k_htt_t2h_msg_handler(struct ath10k *ar, struct sk_buff *skb)
1526 struct ath10k_htt *htt = &ar->htt;
1527 struct htt_resp *resp = (struct htt_resp *)skb->data;
1529 /* confirm alignment */
1530 if (!IS_ALIGNED((unsigned long)skb->data, 4))
1531 ath10k_warn(ar, "unaligned htt message, expect trouble\n");
1533 ath10k_dbg(ar, ATH10K_DBG_HTT, "htt rx, msg_type: 0x%0X\n",
1534 resp->hdr.msg_type);
1535 switch (resp->hdr.msg_type) {
1536 case HTT_T2H_MSG_TYPE_VERSION_CONF: {
1537 htt->target_version_major = resp->ver_resp.major;
1538 htt->target_version_minor = resp->ver_resp.minor;
1539 complete(&htt->target_version_received);
1542 case HTT_T2H_MSG_TYPE_RX_IND:
1543 spin_lock_bh(&htt->rx_ring.lock);
1544 __skb_queue_tail(&htt->rx_compl_q, skb);
1545 spin_unlock_bh(&htt->rx_ring.lock);
1546 tasklet_schedule(&htt->txrx_compl_task);
1548 case HTT_T2H_MSG_TYPE_PEER_MAP: {
1549 struct htt_peer_map_event ev = {
1550 .vdev_id = resp->peer_map.vdev_id,
1551 .peer_id = __le16_to_cpu(resp->peer_map.peer_id),
1553 memcpy(ev.addr, resp->peer_map.addr, sizeof(ev.addr));
1554 ath10k_peer_map_event(htt, &ev);
1557 case HTT_T2H_MSG_TYPE_PEER_UNMAP: {
1558 struct htt_peer_unmap_event ev = {
1559 .peer_id = __le16_to_cpu(resp->peer_unmap.peer_id),
1561 ath10k_peer_unmap_event(htt, &ev);
1564 case HTT_T2H_MSG_TYPE_MGMT_TX_COMPLETION: {
1565 struct htt_tx_done tx_done = {};
1566 int status = __le32_to_cpu(resp->mgmt_tx_completion.status);
1569 __le32_to_cpu(resp->mgmt_tx_completion.desc_id);
1572 case HTT_MGMT_TX_STATUS_OK:
1574 case HTT_MGMT_TX_STATUS_RETRY:
1575 tx_done.no_ack = true;
1577 case HTT_MGMT_TX_STATUS_DROP:
1578 tx_done.discard = true;
1582 spin_lock_bh(&htt->tx_lock);
1583 ath10k_txrx_tx_unref(htt, &tx_done);
1584 spin_unlock_bh(&htt->tx_lock);
1587 case HTT_T2H_MSG_TYPE_TX_COMPL_IND:
1588 spin_lock_bh(&htt->tx_lock);
1589 __skb_queue_tail(&htt->tx_compl_q, skb);
1590 spin_unlock_bh(&htt->tx_lock);
1591 tasklet_schedule(&htt->txrx_compl_task);
1593 case HTT_T2H_MSG_TYPE_SEC_IND: {
1594 struct ath10k *ar = htt->ar;
1595 struct htt_security_indication *ev = &resp->security_indication;
1597 ath10k_dbg(ar, ATH10K_DBG_HTT,
1598 "sec ind peer_id %d unicast %d type %d\n",
1599 __le16_to_cpu(ev->peer_id),
1600 !!(ev->flags & HTT_SECURITY_IS_UNICAST),
1601 MS(ev->flags, HTT_SECURITY_TYPE));
1602 complete(&ar->install_key_done);
1605 case HTT_T2H_MSG_TYPE_RX_FRAG_IND: {
1606 ath10k_dbg_dump(ar, ATH10K_DBG_HTT_DUMP, NULL, "htt event: ",
1607 skb->data, skb->len);
1608 ath10k_htt_rx_frag_handler(htt, &resp->rx_frag_ind);
1611 case HTT_T2H_MSG_TYPE_TEST:
1614 case HTT_T2H_MSG_TYPE_STATS_CONF:
1615 trace_ath10k_htt_stats(ar, skb->data, skb->len);
1617 case HTT_T2H_MSG_TYPE_TX_INSPECT_IND:
1618 /* Firmware can return tx frames if it's unable to fully
1619 * process them and suspects host may be able to fix it. ath10k
1620 * sends all tx frames as already inspected so this shouldn't
1621 * happen unless fw has a bug.
1623 ath10k_warn(ar, "received an unexpected htt tx inspect event\n");
1625 case HTT_T2H_MSG_TYPE_RX_ADDBA:
1626 ath10k_htt_rx_addba(ar, resp);
1628 case HTT_T2H_MSG_TYPE_RX_DELBA:
1629 ath10k_htt_rx_delba(ar, resp);
1631 case HTT_T2H_MSG_TYPE_PKTLOG: {
1632 struct ath10k_pktlog_hdr *hdr =
1633 (struct ath10k_pktlog_hdr *)resp->pktlog_msg.payload;
1635 trace_ath10k_htt_pktlog(ar, resp->pktlog_msg.payload,
1637 __le16_to_cpu(hdr->size));
1640 case HTT_T2H_MSG_TYPE_RX_FLUSH: {
1641 /* Ignore this event because mac80211 takes care of Rx
1642 * aggregation reordering.
1647 ath10k_warn(ar, "htt event (%d) not handled\n",
1648 resp->hdr.msg_type);
1649 ath10k_dbg_dump(ar, ATH10K_DBG_HTT_DUMP, NULL, "htt event: ",
1650 skb->data, skb->len);
1654 /* Free the indication buffer */
1655 dev_kfree_skb_any(skb);
1658 static void ath10k_htt_txrx_compl_task(unsigned long ptr)
1660 struct ath10k_htt *htt = (struct ath10k_htt *)ptr;
1661 struct htt_resp *resp;
1662 struct sk_buff *skb;
1664 spin_lock_bh(&htt->tx_lock);
1665 while ((skb = __skb_dequeue(&htt->tx_compl_q))) {
1666 ath10k_htt_rx_frm_tx_compl(htt->ar, skb);
1667 dev_kfree_skb_any(skb);
1669 spin_unlock_bh(&htt->tx_lock);
1671 spin_lock_bh(&htt->rx_ring.lock);
1672 while ((skb = __skb_dequeue(&htt->rx_compl_q))) {
1673 resp = (struct htt_resp *)skb->data;
1674 ath10k_htt_rx_handler(htt, &resp->rx_ind);
1675 dev_kfree_skb_any(skb);
1677 spin_unlock_bh(&htt->rx_ring.lock);