2 * Copyright (c) 2005-2011 Atheros Communications Inc.
3 * Copyright (c) 2011-2013 Qualcomm Atheros, Inc.
5 * Permission to use, copy, modify, and/or distribute this software for any
6 * purpose with or without fee is hereby granted, provided that the above
7 * copyright notice and this permission notice appear in all copies.
9 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
10 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
11 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
12 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
13 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
14 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
15 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
26 #include <linux/log2.h>
28 /* slightly larger than one large A-MPDU */
29 #define HTT_RX_RING_SIZE_MIN 128
31 /* roughly 20 ms @ 1 Gbps of 1500B MSDUs */
32 #define HTT_RX_RING_SIZE_MAX 2048
34 #define HTT_RX_AVG_FRM_BYTES 1000
36 /* ms, very conservative */
37 #define HTT_RX_HOST_LATENCY_MAX_MS 20
39 /* ms, conservative */
40 #define HTT_RX_HOST_LATENCY_WORST_LIKELY_MS 10
42 /* when under memory pressure rx ring refill may fail and needs a retry */
43 #define HTT_RX_RING_REFILL_RETRY_MS 50
46 static int ath10k_htt_rx_get_csum_state(struct sk_buff *skb);
47 static void ath10k_htt_txrx_compl_task(unsigned long ptr);
49 static int ath10k_htt_rx_ring_size(struct ath10k_htt *htt)
54 * It is expected that the host CPU will typically be able to
55 * service the rx indication from one A-MPDU before the rx
56 * indication from the subsequent A-MPDU happens, roughly 1-2 ms
57 * later. However, the rx ring should be sized very conservatively,
58 * to accomodate the worst reasonable delay before the host CPU
59 * services a rx indication interrupt.
61 * The rx ring need not be kept full of empty buffers. In theory,
62 * the htt host SW can dynamically track the low-water mark in the
63 * rx ring, and dynamically adjust the level to which the rx ring
64 * is filled with empty buffers, to dynamically meet the desired
67 * In contrast, it's difficult to resize the rx ring itself, once
68 * it's in use. Thus, the ring itself should be sized very
69 * conservatively, while the degree to which the ring is filled
70 * with empty buffers should be sized moderately conservatively.
73 /* 1e6 bps/mbps / 1e3 ms per sec = 1000 */
75 htt->max_throughput_mbps +
77 (8 * HTT_RX_AVG_FRM_BYTES) * HTT_RX_HOST_LATENCY_MAX_MS;
79 if (size < HTT_RX_RING_SIZE_MIN)
80 size = HTT_RX_RING_SIZE_MIN;
82 if (size > HTT_RX_RING_SIZE_MAX)
83 size = HTT_RX_RING_SIZE_MAX;
85 size = roundup_pow_of_two(size);
90 static int ath10k_htt_rx_ring_fill_level(struct ath10k_htt *htt)
94 /* 1e6 bps/mbps / 1e3 ms per sec = 1000 */
96 htt->max_throughput_mbps *
98 (8 * HTT_RX_AVG_FRM_BYTES) * HTT_RX_HOST_LATENCY_WORST_LIKELY_MS;
101 * Make sure the fill level is at least 1 less than the ring size.
102 * Leaving 1 element empty allows the SW to easily distinguish
103 * between a full ring vs. an empty ring.
105 if (size >= htt->rx_ring.size)
106 size = htt->rx_ring.size - 1;
111 static void ath10k_htt_rx_ring_free(struct ath10k_htt *htt)
114 struct ath10k_skb_cb *cb;
117 for (i = 0; i < htt->rx_ring.fill_cnt; i++) {
118 skb = htt->rx_ring.netbufs_ring[i];
119 cb = ATH10K_SKB_CB(skb);
120 dma_unmap_single(htt->ar->dev, cb->paddr,
121 skb->len + skb_tailroom(skb),
123 dev_kfree_skb_any(skb);
126 htt->rx_ring.fill_cnt = 0;
129 static int __ath10k_htt_rx_ring_fill_n(struct ath10k_htt *htt, int num)
131 struct htt_rx_desc *rx_desc;
136 idx = __le32_to_cpu(*(htt->rx_ring.alloc_idx.vaddr));
138 skb = dev_alloc_skb(HTT_RX_BUF_SIZE + HTT_RX_DESC_ALIGN);
144 if (!IS_ALIGNED((unsigned long)skb->data, HTT_RX_DESC_ALIGN))
146 PTR_ALIGN(skb->data, HTT_RX_DESC_ALIGN) -
149 /* Clear rx_desc attention word before posting to Rx ring */
150 rx_desc = (struct htt_rx_desc *)skb->data;
151 rx_desc->attention.flags = __cpu_to_le32(0);
153 paddr = dma_map_single(htt->ar->dev, skb->data,
154 skb->len + skb_tailroom(skb),
157 if (unlikely(dma_mapping_error(htt->ar->dev, paddr))) {
158 dev_kfree_skb_any(skb);
163 ATH10K_SKB_CB(skb)->paddr = paddr;
164 htt->rx_ring.netbufs_ring[idx] = skb;
165 htt->rx_ring.paddrs_ring[idx] = __cpu_to_le32(paddr);
166 htt->rx_ring.fill_cnt++;
170 idx &= htt->rx_ring.size_mask;
174 *(htt->rx_ring.alloc_idx.vaddr) = __cpu_to_le32(idx);
178 static int ath10k_htt_rx_ring_fill_n(struct ath10k_htt *htt, int num)
180 lockdep_assert_held(&htt->rx_ring.lock);
181 return __ath10k_htt_rx_ring_fill_n(htt, num);
184 static void ath10k_htt_rx_msdu_buff_replenish(struct ath10k_htt *htt)
186 int ret, num_deficit, num_to_fill;
188 /* Refilling the whole RX ring buffer proves to be a bad idea. The
189 * reason is RX may take up significant amount of CPU cycles and starve
190 * other tasks, e.g. TX on an ethernet device while acting as a bridge
191 * with ath10k wlan interface. This ended up with very poor performance
192 * once CPU the host system was overwhelmed with RX on ath10k.
194 * By limiting the number of refills the replenishing occurs
195 * progressively. This in turns makes use of the fact tasklets are
196 * processed in FIFO order. This means actual RX processing can starve
197 * out refilling. If there's not enough buffers on RX ring FW will not
198 * report RX until it is refilled with enough buffers. This
199 * automatically balances load wrt to CPU power.
201 * This probably comes at a cost of lower maximum throughput but
202 * improves the avarage and stability. */
203 spin_lock_bh(&htt->rx_ring.lock);
204 num_deficit = htt->rx_ring.fill_level - htt->rx_ring.fill_cnt;
205 num_to_fill = min(ATH10K_HTT_MAX_NUM_REFILL, num_deficit);
206 num_deficit -= num_to_fill;
207 ret = ath10k_htt_rx_ring_fill_n(htt, num_to_fill);
208 if (ret == -ENOMEM) {
210 * Failed to fill it to the desired level -
211 * we'll start a timer and try again next time.
212 * As long as enough buffers are left in the ring for
213 * another A-MPDU rx, no special recovery is needed.
215 mod_timer(&htt->rx_ring.refill_retry_timer, jiffies +
216 msecs_to_jiffies(HTT_RX_RING_REFILL_RETRY_MS));
217 } else if (num_deficit > 0) {
218 tasklet_schedule(&htt->rx_replenish_task);
220 spin_unlock_bh(&htt->rx_ring.lock);
223 static void ath10k_htt_rx_ring_refill_retry(unsigned long arg)
225 struct ath10k_htt *htt = (struct ath10k_htt *)arg;
226 ath10k_htt_rx_msdu_buff_replenish(htt);
229 static void ath10k_htt_rx_ring_clean_up(struct ath10k_htt *htt)
234 for (i = 0; i < htt->rx_ring.size; i++) {
235 skb = htt->rx_ring.netbufs_ring[i];
239 dma_unmap_single(htt->ar->dev, ATH10K_SKB_CB(skb)->paddr,
240 skb->len + skb_tailroom(skb),
242 dev_kfree_skb_any(skb);
243 htt->rx_ring.netbufs_ring[i] = NULL;
247 void ath10k_htt_rx_free(struct ath10k_htt *htt)
249 del_timer_sync(&htt->rx_ring.refill_retry_timer);
250 tasklet_kill(&htt->rx_replenish_task);
251 tasklet_kill(&htt->txrx_compl_task);
253 skb_queue_purge(&htt->tx_compl_q);
254 skb_queue_purge(&htt->rx_compl_q);
256 ath10k_htt_rx_ring_clean_up(htt);
258 dma_free_coherent(htt->ar->dev,
260 sizeof(htt->rx_ring.paddrs_ring)),
261 htt->rx_ring.paddrs_ring,
262 htt->rx_ring.base_paddr);
264 dma_free_coherent(htt->ar->dev,
265 sizeof(*htt->rx_ring.alloc_idx.vaddr),
266 htt->rx_ring.alloc_idx.vaddr,
267 htt->rx_ring.alloc_idx.paddr);
269 kfree(htt->rx_ring.netbufs_ring);
272 static inline struct sk_buff *ath10k_htt_rx_netbuf_pop(struct ath10k_htt *htt)
275 struct sk_buff *msdu;
277 lockdep_assert_held(&htt->rx_ring.lock);
279 if (htt->rx_ring.fill_cnt == 0) {
280 ath10k_warn("tried to pop sk_buff from an empty rx ring\n");
284 idx = htt->rx_ring.sw_rd_idx.msdu_payld;
285 msdu = htt->rx_ring.netbufs_ring[idx];
286 htt->rx_ring.netbufs_ring[idx] = NULL;
289 idx &= htt->rx_ring.size_mask;
290 htt->rx_ring.sw_rd_idx.msdu_payld = idx;
291 htt->rx_ring.fill_cnt--;
296 static void ath10k_htt_rx_free_msdu_chain(struct sk_buff *skb)
298 struct sk_buff *next;
302 dev_kfree_skb_any(skb);
307 /* return: < 0 fatal error, 0 - non chained msdu, 1 chained msdu */
308 static int ath10k_htt_rx_amsdu_pop(struct ath10k_htt *htt,
309 u8 **fw_desc, int *fw_desc_len,
310 struct sk_buff **head_msdu,
311 struct sk_buff **tail_msdu,
314 int msdu_len, msdu_chaining = 0;
315 struct sk_buff *msdu;
316 struct htt_rx_desc *rx_desc;
318 lockdep_assert_held(&htt->rx_ring.lock);
320 if (htt->rx_confused) {
321 ath10k_warn("htt is confused. refusing rx\n");
325 msdu = *head_msdu = ath10k_htt_rx_netbuf_pop(htt);
327 int last_msdu, msdu_len_invalid, msdu_chained;
329 dma_unmap_single(htt->ar->dev,
330 ATH10K_SKB_CB(msdu)->paddr,
331 msdu->len + skb_tailroom(msdu),
334 ath10k_dbg_dump(ATH10K_DBG_HTT_DUMP, NULL, "htt rx pop: ",
335 msdu->data, msdu->len + skb_tailroom(msdu));
337 rx_desc = (struct htt_rx_desc *)msdu->data;
339 /* FIXME: we must report msdu payload since this is what caller
341 skb_put(msdu, offsetof(struct htt_rx_desc, msdu_payload));
342 skb_pull(msdu, offsetof(struct htt_rx_desc, msdu_payload));
345 * Sanity check - confirm the HW is finished filling in the
347 * If the HW and SW are working correctly, then it's guaranteed
348 * that the HW's MAC DMA is done before this point in the SW.
349 * To prevent the case that we handle a stale Rx descriptor,
350 * just assert for now until we have a way to recover.
352 if (!(__le32_to_cpu(rx_desc->attention.flags)
353 & RX_ATTENTION_FLAGS_MSDU_DONE)) {
354 ath10k_htt_rx_free_msdu_chain(*head_msdu);
357 ath10k_err("htt rx stopped. cannot recover\n");
358 htt->rx_confused = true;
362 *attention |= __le32_to_cpu(rx_desc->attention.flags) &
363 (RX_ATTENTION_FLAGS_TKIP_MIC_ERR |
364 RX_ATTENTION_FLAGS_DECRYPT_ERR |
365 RX_ATTENTION_FLAGS_FCS_ERR |
366 RX_ATTENTION_FLAGS_MGMT_TYPE);
368 * Copy the FW rx descriptor for this MSDU from the rx
369 * indication message into the MSDU's netbuf. HL uses the
370 * same rx indication message definition as LL, and simply
371 * appends new info (fields from the HW rx desc, and the
372 * MSDU payload itself). So, the offset into the rx
373 * indication message only has to account for the standard
374 * offset of the per-MSDU FW rx desc info within the
375 * message, and how many bytes of the per-MSDU FW rx desc
376 * info have already been consumed. (And the endianness of
377 * the host, since for a big-endian host, the rx ind
378 * message contents, including the per-MSDU rx desc bytes,
379 * were byteswapped during upload.)
381 if (*fw_desc_len > 0) {
382 rx_desc->fw_desc.info0 = **fw_desc;
384 * The target is expected to only provide the basic
385 * per-MSDU rx descriptors. Just to be sure, verify
386 * that the target has not attached extension data
387 * (e.g. LRO flow ID).
390 /* or more, if there's extension data */
395 * When an oversized AMSDU happened, FW will lost
396 * some of MSDU status - in this case, the FW
397 * descriptors provided will be less than the
398 * actual MSDUs inside this MPDU. Mark the FW
399 * descriptors so that it will still deliver to
400 * upper stack, if no CRC error for this MPDU.
402 * FIX THIS - the FW descriptors are actually for
403 * MSDUs in the end of this A-MSDU instead of the
406 rx_desc->fw_desc.info0 = 0;
409 msdu_len_invalid = !!(__le32_to_cpu(rx_desc->attention.flags)
410 & (RX_ATTENTION_FLAGS_MPDU_LENGTH_ERR |
411 RX_ATTENTION_FLAGS_MSDU_LENGTH_ERR));
412 msdu_len = MS(__le32_to_cpu(rx_desc->msdu_start.info0),
413 RX_MSDU_START_INFO0_MSDU_LENGTH);
414 msdu_chained = rx_desc->frag_info.ring2_more_count;
416 if (msdu_len_invalid)
420 skb_put(msdu, min(msdu_len, HTT_RX_MSDU_SIZE));
421 msdu_len -= msdu->len;
423 /* FIXME: Do chained buffers include htt_rx_desc or not? */
424 while (msdu_chained--) {
425 struct sk_buff *next = ath10k_htt_rx_netbuf_pop(htt);
427 dma_unmap_single(htt->ar->dev,
428 ATH10K_SKB_CB(next)->paddr,
429 next->len + skb_tailroom(next),
432 ath10k_dbg_dump(ATH10K_DBG_HTT_DUMP, NULL,
433 "htt rx chained: ", next->data,
434 next->len + skb_tailroom(next));
437 skb_put(next, min(msdu_len, HTT_RX_BUF_SIZE));
438 msdu_len -= next->len;
445 last_msdu = __le32_to_cpu(rx_desc->msdu_end.info0) &
446 RX_MSDU_END_INFO0_LAST_MSDU;
452 struct sk_buff *next = ath10k_htt_rx_netbuf_pop(htt);
459 if (*head_msdu == NULL)
463 * Don't refill the ring yet.
465 * First, the elements popped here are still in use - it is not
466 * safe to overwrite them until the matching call to
467 * mpdu_desc_list_next. Second, for efficiency it is preferable to
468 * refill the rx ring with 1 PPDU's worth of rx buffers (something
469 * like 32 x 3 buffers), rather than one MPDU's worth of rx buffers
470 * (something like 3 buffers). Consequently, we'll rely on the txrx
471 * SW to tell us when it is done pulling all the PPDU's rx buffers
472 * out of the rx ring, and then refill it just once.
475 return msdu_chaining;
478 static void ath10k_htt_rx_replenish_task(unsigned long ptr)
480 struct ath10k_htt *htt = (struct ath10k_htt *)ptr;
481 ath10k_htt_rx_msdu_buff_replenish(htt);
484 int ath10k_htt_rx_alloc(struct ath10k_htt *htt)
488 struct timer_list *timer = &htt->rx_ring.refill_retry_timer;
490 htt->rx_ring.size = ath10k_htt_rx_ring_size(htt);
491 if (!is_power_of_2(htt->rx_ring.size)) {
492 ath10k_warn("htt rx ring size is not power of 2\n");
496 htt->rx_ring.size_mask = htt->rx_ring.size - 1;
499 * Set the initial value for the level to which the rx ring
500 * should be filled, based on the max throughput and the
501 * worst likely latency for the host to fill the rx ring
502 * with new buffers. In theory, this fill level can be
503 * dynamically adjusted from the initial value set here, to
504 * reflect the actual host latency rather than a
505 * conservative assumption about the host latency.
507 htt->rx_ring.fill_level = ath10k_htt_rx_ring_fill_level(htt);
509 htt->rx_ring.netbufs_ring =
510 kzalloc(htt->rx_ring.size * sizeof(struct sk_buff *),
512 if (!htt->rx_ring.netbufs_ring)
515 vaddr = dma_alloc_coherent(htt->ar->dev,
516 (htt->rx_ring.size * sizeof(htt->rx_ring.paddrs_ring)),
521 htt->rx_ring.paddrs_ring = vaddr;
522 htt->rx_ring.base_paddr = paddr;
524 vaddr = dma_alloc_coherent(htt->ar->dev,
525 sizeof(*htt->rx_ring.alloc_idx.vaddr),
530 htt->rx_ring.alloc_idx.vaddr = vaddr;
531 htt->rx_ring.alloc_idx.paddr = paddr;
532 htt->rx_ring.sw_rd_idx.msdu_payld = 0;
533 *htt->rx_ring.alloc_idx.vaddr = 0;
535 /* Initialize the Rx refill retry timer */
536 setup_timer(timer, ath10k_htt_rx_ring_refill_retry, (unsigned long)htt);
538 spin_lock_init(&htt->rx_ring.lock);
540 htt->rx_ring.fill_cnt = 0;
541 if (__ath10k_htt_rx_ring_fill_n(htt, htt->rx_ring.fill_level))
544 tasklet_init(&htt->rx_replenish_task, ath10k_htt_rx_replenish_task,
547 skb_queue_head_init(&htt->tx_compl_q);
548 skb_queue_head_init(&htt->rx_compl_q);
550 tasklet_init(&htt->txrx_compl_task, ath10k_htt_txrx_compl_task,
553 ath10k_dbg(ATH10K_DBG_BOOT, "htt rx ring size %d fill_level %d\n",
554 htt->rx_ring.size, htt->rx_ring.fill_level);
558 ath10k_htt_rx_ring_free(htt);
559 dma_free_coherent(htt->ar->dev,
560 sizeof(*htt->rx_ring.alloc_idx.vaddr),
561 htt->rx_ring.alloc_idx.vaddr,
562 htt->rx_ring.alloc_idx.paddr);
564 dma_free_coherent(htt->ar->dev,
566 sizeof(htt->rx_ring.paddrs_ring)),
567 htt->rx_ring.paddrs_ring,
568 htt->rx_ring.base_paddr);
570 kfree(htt->rx_ring.netbufs_ring);
575 static int ath10k_htt_rx_crypto_param_len(enum htt_rx_mpdu_encrypt_type type)
578 case HTT_RX_MPDU_ENCRYPT_WEP40:
579 case HTT_RX_MPDU_ENCRYPT_WEP104:
581 case HTT_RX_MPDU_ENCRYPT_TKIP_WITHOUT_MIC:
582 case HTT_RX_MPDU_ENCRYPT_WEP128: /* not tested */
583 case HTT_RX_MPDU_ENCRYPT_TKIP_WPA:
584 case HTT_RX_MPDU_ENCRYPT_WAPI: /* not tested */
585 case HTT_RX_MPDU_ENCRYPT_AES_CCM_WPA2:
587 case HTT_RX_MPDU_ENCRYPT_NONE:
591 ath10k_warn("unknown encryption type %d\n", type);
595 static int ath10k_htt_rx_crypto_tail_len(enum htt_rx_mpdu_encrypt_type type)
598 case HTT_RX_MPDU_ENCRYPT_NONE:
599 case HTT_RX_MPDU_ENCRYPT_WEP40:
600 case HTT_RX_MPDU_ENCRYPT_WEP104:
601 case HTT_RX_MPDU_ENCRYPT_WEP128:
602 case HTT_RX_MPDU_ENCRYPT_WAPI:
604 case HTT_RX_MPDU_ENCRYPT_TKIP_WITHOUT_MIC:
605 case HTT_RX_MPDU_ENCRYPT_TKIP_WPA:
607 case HTT_RX_MPDU_ENCRYPT_AES_CCM_WPA2:
611 ath10k_warn("unknown encryption type %d\n", type);
615 /* Applies for first msdu in chain, before altering it. */
616 static struct ieee80211_hdr *ath10k_htt_rx_skb_get_hdr(struct sk_buff *skb)
618 struct htt_rx_desc *rxd;
619 enum rx_msdu_decap_format fmt;
621 rxd = (void *)skb->data - sizeof(*rxd);
622 fmt = MS(__le32_to_cpu(rxd->msdu_start.info1),
623 RX_MSDU_START_INFO1_DECAP_FORMAT);
625 if (fmt == RX_MSDU_DECAP_RAW)
626 return (void *)skb->data;
628 return (void *)skb->data - RX_HTT_HDR_STATUS_LEN;
631 /* This function only applies for first msdu in an msdu chain */
632 static bool ath10k_htt_rx_hdr_is_amsdu(struct ieee80211_hdr *hdr)
634 if (ieee80211_is_data_qos(hdr->frame_control)) {
635 u8 *qc = ieee80211_get_qos_ctl(hdr);
650 struct amsdu_subframe_hdr {
656 static const u8 rx_legacy_rate_idx[] = {
657 3, /* 0x00 - 11Mbps */
658 2, /* 0x01 - 5.5Mbps */
659 1, /* 0x02 - 2Mbps */
660 0, /* 0x03 - 1Mbps */
661 3, /* 0x04 - 11Mbps */
662 2, /* 0x05 - 5.5Mbps */
663 1, /* 0x06 - 2Mbps */
664 0, /* 0x07 - 1Mbps */
665 10, /* 0x08 - 48Mbps */
666 8, /* 0x09 - 24Mbps */
667 6, /* 0x0A - 12Mbps */
668 4, /* 0x0B - 6Mbps */
669 11, /* 0x0C - 54Mbps */
670 9, /* 0x0D - 36Mbps */
671 7, /* 0x0E - 18Mbps */
672 5, /* 0x0F - 9Mbps */
675 static void ath10k_htt_rx_h_rates(struct ath10k *ar,
676 enum ieee80211_band band,
677 u8 info0, u32 info1, u32 info2,
678 struct ieee80211_rx_status *status)
680 u8 cck, rate, rate_idx, bw, sgi, mcs, nss;
683 /* Check if valid fields */
684 if (!(info0 & HTT_RX_INDICATION_INFO0_START_VALID))
687 preamble = MS(info1, HTT_RX_INDICATION_INFO1_PREAMBLE_TYPE);
691 cck = info0 & HTT_RX_INDICATION_INFO0_LEGACY_RATE_CCK;
692 rate = MS(info0, HTT_RX_INDICATION_INFO0_LEGACY_RATE);
695 if (rate < 0x08 || rate > 0x0F)
699 case IEEE80211_BAND_2GHZ:
702 rate_idx = rx_legacy_rate_idx[rate];
704 case IEEE80211_BAND_5GHZ:
705 rate_idx = rx_legacy_rate_idx[rate];
706 /* We are using same rate table registering
707 HW - ath10k_rates[]. In case of 5GHz skip
708 CCK rates, so -4 here */
715 status->rate_idx = rate_idx;
718 case HTT_RX_HT_WITH_TXBF:
719 /* HT-SIG - Table 20-11 in info1 and info2 */
722 bw = (info1 >> 7) & 1;
723 sgi = (info2 >> 7) & 1;
725 status->rate_idx = mcs;
726 status->flag |= RX_FLAG_HT;
728 status->flag |= RX_FLAG_SHORT_GI;
730 status->flag |= RX_FLAG_40MHZ;
733 case HTT_RX_VHT_WITH_TXBF:
734 /* VHT-SIG-A1 in info 1, VHT-SIG-A2 in info2
736 mcs = (info2 >> 4) & 0x0F;
737 nss = ((info1 >> 10) & 0x07) + 1;
741 status->rate_idx = mcs;
742 status->vht_nss = nss;
745 status->flag |= RX_FLAG_SHORT_GI;
753 status->flag |= RX_FLAG_40MHZ;
757 status->vht_flag |= RX_VHT_FLAG_80MHZ;
760 status->flag |= RX_FLAG_VHT;
767 static void ath10k_htt_rx_h_protected(struct ath10k_htt *htt,
768 struct ieee80211_rx_status *rx_status,
770 enum htt_rx_mpdu_encrypt_type enctype,
771 enum rx_msdu_decap_format fmt,
774 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
776 rx_status->flag &= ~(RX_FLAG_DECRYPTED |
777 RX_FLAG_IV_STRIPPED |
778 RX_FLAG_MMIC_STRIPPED);
780 if (enctype == HTT_RX_MPDU_ENCRYPT_NONE)
784 * There's no explicit rx descriptor flag to indicate whether a given
785 * frame has been decrypted or not. We're forced to use the decap
786 * format as an implicit indication. However fragmentation rx is always
787 * raw and it probably never reports undecrypted raws.
789 * This makes sure sniffed frames are reported as-is without stripping
790 * the protected flag.
792 if (fmt == RX_MSDU_DECAP_RAW && !dot11frag)
795 rx_status->flag |= RX_FLAG_DECRYPTED |
796 RX_FLAG_IV_STRIPPED |
797 RX_FLAG_MMIC_STRIPPED;
798 hdr->frame_control = __cpu_to_le16(__le16_to_cpu(hdr->frame_control) &
799 ~IEEE80211_FCTL_PROTECTED);
802 static bool ath10k_htt_rx_h_channel(struct ath10k *ar,
803 struct ieee80211_rx_status *status)
805 struct ieee80211_channel *ch;
807 spin_lock_bh(&ar->data_lock);
808 ch = ar->scan_channel;
811 spin_unlock_bh(&ar->data_lock);
816 status->band = ch->band;
817 status->freq = ch->center_freq;
822 static void ath10k_process_rx(struct ath10k *ar,
823 struct ieee80211_rx_status *rx_status,
826 struct ieee80211_rx_status *status;
828 status = IEEE80211_SKB_RXCB(skb);
829 *status = *rx_status;
831 ath10k_dbg(ATH10K_DBG_DATA,
832 "rx skb %p len %u %s%s%s%s%s %srate_idx %u vht_nss %u freq %u band %u flag 0x%x fcs-err %imic-err %i\n",
835 status->flag == 0 ? "legacy" : "",
836 status->flag & RX_FLAG_HT ? "ht" : "",
837 status->flag & RX_FLAG_VHT ? "vht" : "",
838 status->flag & RX_FLAG_40MHZ ? "40" : "",
839 status->vht_flag & RX_VHT_FLAG_80MHZ ? "80" : "",
840 status->flag & RX_FLAG_SHORT_GI ? "sgi " : "",
844 status->band, status->flag,
845 !!(status->flag & RX_FLAG_FAILED_FCS_CRC),
846 !!(status->flag & RX_FLAG_MMIC_ERROR));
847 ath10k_dbg_dump(ATH10K_DBG_HTT_DUMP, NULL, "rx skb: ",
848 skb->data, skb->len);
850 ieee80211_rx(ar->hw, skb);
853 static int ath10k_htt_rx_nwifi_hdrlen(struct ieee80211_hdr *hdr)
855 /* nwifi header is padded to 4 bytes. this fixes 4addr rx */
856 return round_up(ieee80211_hdrlen(hdr->frame_control), 4);
859 static void ath10k_htt_rx_amsdu(struct ath10k_htt *htt,
860 struct ieee80211_rx_status *rx_status,
861 struct sk_buff *skb_in)
863 struct htt_rx_desc *rxd;
864 struct sk_buff *skb = skb_in;
865 struct sk_buff *first;
866 enum rx_msdu_decap_format fmt;
867 enum htt_rx_mpdu_encrypt_type enctype;
868 struct ieee80211_hdr *hdr;
869 u8 hdr_buf[64], addr[ETH_ALEN], *qos;
870 unsigned int hdr_len;
872 rxd = (void *)skb->data - sizeof(*rxd);
873 enctype = MS(__le32_to_cpu(rxd->mpdu_start.info0),
874 RX_MPDU_START_INFO0_ENCRYPT_TYPE);
876 hdr = (struct ieee80211_hdr *)rxd->rx_hdr_status;
877 hdr_len = ieee80211_hdrlen(hdr->frame_control);
878 memcpy(hdr_buf, hdr, hdr_len);
879 hdr = (struct ieee80211_hdr *)hdr_buf;
886 rxd = (void *)skb->data - sizeof(*rxd);
887 fmt = MS(__le32_to_cpu(rxd->msdu_start.info1),
888 RX_MSDU_START_INFO1_DECAP_FORMAT);
889 decap_hdr = (void *)rxd->rx_hdr_status;
891 skb->ip_summed = ath10k_htt_rx_get_csum_state(skb);
893 /* First frame in an A-MSDU chain has more decapped data. */
895 len = round_up(ieee80211_hdrlen(hdr->frame_control), 4);
896 len += round_up(ath10k_htt_rx_crypto_param_len(enctype),
902 case RX_MSDU_DECAP_RAW:
903 /* remove trailing FCS */
904 skb_trim(skb, skb->len - FCS_LEN);
906 case RX_MSDU_DECAP_NATIVE_WIFI:
907 /* pull decapped header and copy DA */
908 hdr = (struct ieee80211_hdr *)skb->data;
909 hdr_len = ath10k_htt_rx_nwifi_hdrlen(hdr);
910 memcpy(addr, ieee80211_get_DA(hdr), ETH_ALEN);
911 skb_pull(skb, hdr_len);
913 /* push original 802.11 header */
914 hdr = (struct ieee80211_hdr *)hdr_buf;
915 hdr_len = ieee80211_hdrlen(hdr->frame_control);
916 memcpy(skb_push(skb, hdr_len), hdr, hdr_len);
918 /* original A-MSDU header has the bit set but we're
919 * not including A-MSDU subframe header */
920 hdr = (struct ieee80211_hdr *)skb->data;
921 qos = ieee80211_get_qos_ctl(hdr);
922 qos[0] &= ~IEEE80211_QOS_CTL_A_MSDU_PRESENT;
924 /* original 802.11 header has a different DA */
925 memcpy(ieee80211_get_DA(hdr), addr, ETH_ALEN);
927 case RX_MSDU_DECAP_ETHERNET2_DIX:
928 /* strip ethernet header and insert decapped 802.11
929 * header, amsdu subframe header and rfc1042 header */
932 len += sizeof(struct rfc1042_hdr);
933 len += sizeof(struct amsdu_subframe_hdr);
935 skb_pull(skb, sizeof(struct ethhdr));
936 memcpy(skb_push(skb, len), decap_hdr, len);
937 memcpy(skb_push(skb, hdr_len), hdr, hdr_len);
939 case RX_MSDU_DECAP_8023_SNAP_LLC:
940 /* insert decapped 802.11 header making a singly
942 memcpy(skb_push(skb, hdr_len), hdr, hdr_len);
947 ath10k_htt_rx_h_protected(htt, rx_status, skb_in, enctype, fmt,
953 rx_status->flag |= RX_FLAG_AMSDU_MORE;
955 rx_status->flag &= ~RX_FLAG_AMSDU_MORE;
957 ath10k_process_rx(htt->ar, rx_status, skb_in);
960 /* FIXME: It might be nice to re-assemble the A-MSDU when there's a
961 * monitor interface active for sniffing purposes. */
964 static void ath10k_htt_rx_msdu(struct ath10k_htt *htt,
965 struct ieee80211_rx_status *rx_status,
968 struct htt_rx_desc *rxd;
969 struct ieee80211_hdr *hdr;
970 enum rx_msdu_decap_format fmt;
971 enum htt_rx_mpdu_encrypt_type enctype;
975 /* This shouldn't happen. If it does than it may be a FW bug. */
977 ath10k_warn("htt rx received chained non A-MSDU frame\n");
978 ath10k_htt_rx_free_msdu_chain(skb->next);
982 rxd = (void *)skb->data - sizeof(*rxd);
983 fmt = MS(__le32_to_cpu(rxd->msdu_start.info1),
984 RX_MSDU_START_INFO1_DECAP_FORMAT);
985 enctype = MS(__le32_to_cpu(rxd->mpdu_start.info0),
986 RX_MPDU_START_INFO0_ENCRYPT_TYPE);
987 hdr = (struct ieee80211_hdr *)rxd->rx_hdr_status;
988 hdr_len = ieee80211_hdrlen(hdr->frame_control);
990 skb->ip_summed = ath10k_htt_rx_get_csum_state(skb);
993 case RX_MSDU_DECAP_RAW:
994 /* remove trailing FCS */
995 skb_trim(skb, skb->len - FCS_LEN);
997 case RX_MSDU_DECAP_NATIVE_WIFI:
998 /* Pull decapped header */
999 hdr = (struct ieee80211_hdr *)skb->data;
1000 hdr_len = ath10k_htt_rx_nwifi_hdrlen(hdr);
1001 skb_pull(skb, hdr_len);
1003 /* Push original header */
1004 hdr = (struct ieee80211_hdr *)rxd->rx_hdr_status;
1005 hdr_len = ieee80211_hdrlen(hdr->frame_control);
1006 memcpy(skb_push(skb, hdr_len), hdr, hdr_len);
1008 case RX_MSDU_DECAP_ETHERNET2_DIX:
1009 /* strip ethernet header and insert decapped 802.11 header and
1013 rfc1042 += roundup(hdr_len, 4);
1014 rfc1042 += roundup(ath10k_htt_rx_crypto_param_len(enctype), 4);
1016 skb_pull(skb, sizeof(struct ethhdr));
1017 memcpy(skb_push(skb, sizeof(struct rfc1042_hdr)),
1018 rfc1042, sizeof(struct rfc1042_hdr));
1019 memcpy(skb_push(skb, hdr_len), hdr, hdr_len);
1021 case RX_MSDU_DECAP_8023_SNAP_LLC:
1022 /* remove A-MSDU subframe header and insert
1023 * decapped 802.11 header. rfc1042 header is already there */
1025 skb_pull(skb, sizeof(struct amsdu_subframe_hdr));
1026 memcpy(skb_push(skb, hdr_len), hdr, hdr_len);
1030 ath10k_htt_rx_h_protected(htt, rx_status, skb, enctype, fmt, false);
1032 ath10k_process_rx(htt->ar, rx_status, skb);
1035 static int ath10k_htt_rx_get_csum_state(struct sk_buff *skb)
1037 struct htt_rx_desc *rxd;
1039 bool is_ip4, is_ip6;
1040 bool is_tcp, is_udp;
1041 bool ip_csum_ok, tcpudp_csum_ok;
1043 rxd = (void *)skb->data - sizeof(*rxd);
1044 flags = __le32_to_cpu(rxd->attention.flags);
1045 info = __le32_to_cpu(rxd->msdu_start.info1);
1047 is_ip4 = !!(info & RX_MSDU_START_INFO1_IPV4_PROTO);
1048 is_ip6 = !!(info & RX_MSDU_START_INFO1_IPV6_PROTO);
1049 is_tcp = !!(info & RX_MSDU_START_INFO1_TCP_PROTO);
1050 is_udp = !!(info & RX_MSDU_START_INFO1_UDP_PROTO);
1051 ip_csum_ok = !(flags & RX_ATTENTION_FLAGS_IP_CHKSUM_FAIL);
1052 tcpudp_csum_ok = !(flags & RX_ATTENTION_FLAGS_TCP_UDP_CHKSUM_FAIL);
1054 if (!is_ip4 && !is_ip6)
1055 return CHECKSUM_NONE;
1056 if (!is_tcp && !is_udp)
1057 return CHECKSUM_NONE;
1059 return CHECKSUM_NONE;
1060 if (!tcpudp_csum_ok)
1061 return CHECKSUM_NONE;
1063 return CHECKSUM_UNNECESSARY;
1066 static int ath10k_unchain_msdu(struct sk_buff *msdu_head)
1068 struct sk_buff *next = msdu_head->next;
1069 struct sk_buff *to_free = next;
1073 /* TODO: Might could optimize this by using
1074 * skb_try_coalesce or similar method to
1075 * decrease copying, or maybe get mac80211 to
1076 * provide a way to just receive a list of
1080 msdu_head->next = NULL;
1082 /* Allocate total length all at once. */
1084 total_len += next->len;
1088 space = total_len - skb_tailroom(msdu_head);
1090 (pskb_expand_head(msdu_head, 0, space, GFP_ATOMIC) < 0)) {
1091 /* TODO: bump some rx-oom error stat */
1092 /* put it back together so we can free the
1093 * whole list at once.
1095 msdu_head->next = to_free;
1099 /* Walk list again, copying contents into
1104 skb_copy_from_linear_data(next, skb_put(msdu_head, next->len),
1109 /* If here, we have consolidated skb. Free the
1110 * fragments and pass the main skb on up the
1113 ath10k_htt_rx_free_msdu_chain(to_free);
1117 static bool ath10k_htt_rx_amsdu_allowed(struct ath10k_htt *htt,
1118 struct sk_buff *head,
1119 enum htt_rx_mpdu_status status,
1123 if (head->len == 0) {
1124 ath10k_dbg(ATH10K_DBG_HTT,
1125 "htt rx dropping due to zero-len\n");
1129 if (attention & RX_ATTENTION_FLAGS_DECRYPT_ERR) {
1130 ath10k_dbg(ATH10K_DBG_HTT,
1131 "htt rx dropping due to decrypt-err\n");
1136 ath10k_warn("no channel configured; ignoring frame!\n");
1140 /* Skip mgmt frames while we handle this in WMI */
1141 if (status == HTT_RX_IND_MPDU_STATUS_MGMT_CTRL ||
1142 attention & RX_ATTENTION_FLAGS_MGMT_TYPE) {
1143 ath10k_dbg(ATH10K_DBG_HTT, "htt rx mgmt ctrl\n");
1147 if (status != HTT_RX_IND_MPDU_STATUS_OK &&
1148 status != HTT_RX_IND_MPDU_STATUS_TKIP_MIC_ERR &&
1149 status != HTT_RX_IND_MPDU_STATUS_ERR_INV_PEER &&
1150 !htt->ar->monitor_started) {
1151 ath10k_dbg(ATH10K_DBG_HTT,
1152 "htt rx ignoring frame w/ status %d\n",
1157 if (test_bit(ATH10K_CAC_RUNNING, &htt->ar->dev_flags)) {
1158 ath10k_dbg(ATH10K_DBG_HTT,
1159 "htt rx CAC running\n");
1166 static void ath10k_htt_rx_handler(struct ath10k_htt *htt,
1167 struct htt_rx_indication *rx)
1169 struct ieee80211_rx_status *rx_status = &htt->rx_status;
1170 struct htt_rx_indication_mpdu_range *mpdu_ranges;
1171 struct htt_rx_desc *rxd;
1172 enum htt_rx_mpdu_status status;
1173 struct ieee80211_hdr *hdr;
1174 int num_mpdu_ranges;
1182 lockdep_assert_held(&htt->rx_ring.lock);
1184 fw_desc_len = __le16_to_cpu(rx->prefix.fw_rx_desc_bytes);
1185 fw_desc = (u8 *)&rx->fw_desc;
1187 num_mpdu_ranges = MS(__le32_to_cpu(rx->hdr.info1),
1188 HTT_RX_INDICATION_INFO1_NUM_MPDU_RANGES);
1189 mpdu_ranges = htt_rx_ind_get_mpdu_ranges(rx);
1191 /* Fill this once, while this is per-ppdu */
1192 if (rx->ppdu.info0 & HTT_RX_INDICATION_INFO0_START_VALID) {
1193 memset(rx_status, 0, sizeof(*rx_status));
1194 rx_status->signal = ATH10K_DEFAULT_NOISE_FLOOR +
1195 rx->ppdu.combined_rssi;
1198 if (rx->ppdu.info0 & HTT_RX_INDICATION_INFO0_END_VALID) {
1199 /* TSF available only in 32-bit */
1200 rx_status->mactime = __le32_to_cpu(rx->ppdu.tsf) & 0xffffffff;
1201 rx_status->flag |= RX_FLAG_MACTIME_END;
1204 channel_set = ath10k_htt_rx_h_channel(htt->ar, rx_status);
1207 ath10k_htt_rx_h_rates(htt->ar, rx_status->band,
1209 __le32_to_cpu(rx->ppdu.info1),
1210 __le32_to_cpu(rx->ppdu.info2),
1214 ath10k_dbg_dump(ATH10K_DBG_HTT_DUMP, NULL, "htt rx ind: ",
1216 (sizeof(struct htt_rx_indication_mpdu_range) *
1219 for (i = 0; i < num_mpdu_ranges; i++) {
1220 status = mpdu_ranges[i].mpdu_range_status;
1222 for (j = 0; j < mpdu_ranges[i].mpdu_count; j++) {
1223 struct sk_buff *msdu_head, *msdu_tail;
1228 ret = ath10k_htt_rx_amsdu_pop(htt,
1236 ath10k_warn("failed to pop amsdu from htt rx ring %d\n",
1238 ath10k_htt_rx_free_msdu_chain(msdu_head);
1242 rxd = container_of((void *)msdu_head->data,
1246 if (!ath10k_htt_rx_amsdu_allowed(htt, msdu_head,
1250 ath10k_htt_rx_free_msdu_chain(msdu_head);
1255 ath10k_unchain_msdu(msdu_head) < 0) {
1256 ath10k_htt_rx_free_msdu_chain(msdu_head);
1260 if (attention & RX_ATTENTION_FLAGS_FCS_ERR)
1261 rx_status->flag |= RX_FLAG_FAILED_FCS_CRC;
1263 rx_status->flag &= ~RX_FLAG_FAILED_FCS_CRC;
1265 if (attention & RX_ATTENTION_FLAGS_TKIP_MIC_ERR)
1266 rx_status->flag |= RX_FLAG_MMIC_ERROR;
1268 rx_status->flag &= ~RX_FLAG_MMIC_ERROR;
1270 hdr = ath10k_htt_rx_skb_get_hdr(msdu_head);
1272 if (ath10k_htt_rx_hdr_is_amsdu(hdr))
1273 ath10k_htt_rx_amsdu(htt, rx_status, msdu_head);
1275 ath10k_htt_rx_msdu(htt, rx_status, msdu_head);
1279 tasklet_schedule(&htt->rx_replenish_task);
1282 static void ath10k_htt_rx_frag_handler(struct ath10k_htt *htt,
1283 struct htt_rx_fragment_indication *frag)
1285 struct sk_buff *msdu_head, *msdu_tail;
1286 enum htt_rx_mpdu_encrypt_type enctype;
1287 struct htt_rx_desc *rxd;
1288 enum rx_msdu_decap_format fmt;
1289 struct ieee80211_rx_status *rx_status = &htt->rx_status;
1290 struct ieee80211_hdr *hdr;
1295 int fw_desc_len, hdrlen, paramlen;
1299 fw_desc_len = __le16_to_cpu(frag->fw_rx_desc_bytes);
1300 fw_desc = (u8 *)frag->fw_msdu_rx_desc;
1305 spin_lock_bh(&htt->rx_ring.lock);
1306 ret = ath10k_htt_rx_amsdu_pop(htt, &fw_desc, &fw_desc_len,
1307 &msdu_head, &msdu_tail,
1309 spin_unlock_bh(&htt->rx_ring.lock);
1311 ath10k_dbg(ATH10K_DBG_HTT_DUMP, "htt rx frag ahead\n");
1314 ath10k_warn("failed to pop amsdu from httr rx ring for fragmented rx %d\n",
1316 ath10k_htt_rx_free_msdu_chain(msdu_head);
1320 /* FIXME: implement signal strength */
1321 rx_status->flag |= RX_FLAG_NO_SIGNAL_VAL;
1323 hdr = (struct ieee80211_hdr *)msdu_head->data;
1324 rxd = (void *)msdu_head->data - sizeof(*rxd);
1325 tkip_mic_err = !!(attention & RX_ATTENTION_FLAGS_TKIP_MIC_ERR);
1326 decrypt_err = !!(attention & RX_ATTENTION_FLAGS_DECRYPT_ERR);
1327 fmt = MS(__le32_to_cpu(rxd->msdu_start.info1),
1328 RX_MSDU_START_INFO1_DECAP_FORMAT);
1330 if (fmt != RX_MSDU_DECAP_RAW) {
1331 ath10k_warn("we dont support non-raw fragmented rx yet\n");
1332 dev_kfree_skb_any(msdu_head);
1336 enctype = MS(__le32_to_cpu(rxd->mpdu_start.info0),
1337 RX_MPDU_START_INFO0_ENCRYPT_TYPE);
1338 ath10k_htt_rx_h_protected(htt, rx_status, msdu_head, enctype, fmt,
1340 msdu_head->ip_summed = ath10k_htt_rx_get_csum_state(msdu_head);
1343 ath10k_warn("tkip mic error\n");
1346 ath10k_warn("decryption err in fragmented rx\n");
1347 dev_kfree_skb_any(msdu_head);
1351 if (enctype != HTT_RX_MPDU_ENCRYPT_NONE) {
1352 hdrlen = ieee80211_hdrlen(hdr->frame_control);
1353 paramlen = ath10k_htt_rx_crypto_param_len(enctype);
1355 /* It is more efficient to move the header than the payload */
1356 memmove((void *)msdu_head->data + paramlen,
1357 (void *)msdu_head->data,
1359 skb_pull(msdu_head, paramlen);
1360 hdr = (struct ieee80211_hdr *)msdu_head->data;
1363 /* remove trailing FCS */
1366 /* remove crypto trailer */
1367 trim += ath10k_htt_rx_crypto_tail_len(enctype);
1369 /* last fragment of TKIP frags has MIC */
1370 if (!ieee80211_has_morefrags(hdr->frame_control) &&
1371 enctype == HTT_RX_MPDU_ENCRYPT_TKIP_WPA)
1374 if (trim > msdu_head->len) {
1375 ath10k_warn("htt rx fragment: trailer longer than the frame itself? drop\n");
1376 dev_kfree_skb_any(msdu_head);
1380 skb_trim(msdu_head, msdu_head->len - trim);
1382 ath10k_dbg_dump(ATH10K_DBG_HTT_DUMP, NULL, "htt rx frag mpdu: ",
1383 msdu_head->data, msdu_head->len);
1384 ath10k_process_rx(htt->ar, rx_status, msdu_head);
1387 if (fw_desc_len > 0) {
1388 ath10k_dbg(ATH10K_DBG_HTT,
1389 "expecting more fragmented rx in one indication %d\n",
1394 static void ath10k_htt_rx_frm_tx_compl(struct ath10k *ar,
1395 struct sk_buff *skb)
1397 struct ath10k_htt *htt = &ar->htt;
1398 struct htt_resp *resp = (struct htt_resp *)skb->data;
1399 struct htt_tx_done tx_done = {};
1400 int status = MS(resp->data_tx_completion.flags, HTT_DATA_TX_STATUS);
1404 lockdep_assert_held(&htt->tx_lock);
1407 case HTT_DATA_TX_STATUS_NO_ACK:
1408 tx_done.no_ack = true;
1410 case HTT_DATA_TX_STATUS_OK:
1412 case HTT_DATA_TX_STATUS_DISCARD:
1413 case HTT_DATA_TX_STATUS_POSTPONE:
1414 case HTT_DATA_TX_STATUS_DOWNLOAD_FAIL:
1415 tx_done.discard = true;
1418 ath10k_warn("unhandled tx completion status %d\n", status);
1419 tx_done.discard = true;
1423 ath10k_dbg(ATH10K_DBG_HTT, "htt tx completion num_msdus %d\n",
1424 resp->data_tx_completion.num_msdus);
1426 for (i = 0; i < resp->data_tx_completion.num_msdus; i++) {
1427 msdu_id = resp->data_tx_completion.msdus[i];
1428 tx_done.msdu_id = __le16_to_cpu(msdu_id);
1429 ath10k_txrx_tx_unref(htt, &tx_done);
1433 static void ath10k_htt_rx_addba(struct ath10k *ar, struct htt_resp *resp)
1435 struct htt_rx_addba *ev = &resp->rx_addba;
1436 struct ath10k_peer *peer;
1437 struct ath10k_vif *arvif;
1438 u16 info0, tid, peer_id;
1440 info0 = __le16_to_cpu(ev->info0);
1441 tid = MS(info0, HTT_RX_BA_INFO0_TID);
1442 peer_id = MS(info0, HTT_RX_BA_INFO0_PEER_ID);
1444 ath10k_dbg(ATH10K_DBG_HTT,
1445 "htt rx addba tid %hu peer_id %hu size %hhu\n",
1446 tid, peer_id, ev->window_size);
1448 spin_lock_bh(&ar->data_lock);
1449 peer = ath10k_peer_find_by_id(ar, peer_id);
1451 ath10k_warn("received addba event for invalid peer_id: %hu\n",
1453 spin_unlock_bh(&ar->data_lock);
1457 arvif = ath10k_get_arvif(ar, peer->vdev_id);
1459 ath10k_warn("received addba event for invalid vdev_id: %u\n",
1461 spin_unlock_bh(&ar->data_lock);
1465 ath10k_dbg(ATH10K_DBG_HTT,
1466 "htt rx start rx ba session sta %pM tid %hu size %hhu\n",
1467 peer->addr, tid, ev->window_size);
1469 ieee80211_start_rx_ba_session_offl(arvif->vif, peer->addr, tid);
1470 spin_unlock_bh(&ar->data_lock);
1473 static void ath10k_htt_rx_delba(struct ath10k *ar, struct htt_resp *resp)
1475 struct htt_rx_delba *ev = &resp->rx_delba;
1476 struct ath10k_peer *peer;
1477 struct ath10k_vif *arvif;
1478 u16 info0, tid, peer_id;
1480 info0 = __le16_to_cpu(ev->info0);
1481 tid = MS(info0, HTT_RX_BA_INFO0_TID);
1482 peer_id = MS(info0, HTT_RX_BA_INFO0_PEER_ID);
1484 ath10k_dbg(ATH10K_DBG_HTT,
1485 "htt rx delba tid %hu peer_id %hu\n",
1488 spin_lock_bh(&ar->data_lock);
1489 peer = ath10k_peer_find_by_id(ar, peer_id);
1491 ath10k_warn("received addba event for invalid peer_id: %hu\n",
1493 spin_unlock_bh(&ar->data_lock);
1497 arvif = ath10k_get_arvif(ar, peer->vdev_id);
1499 ath10k_warn("received addba event for invalid vdev_id: %u\n",
1501 spin_unlock_bh(&ar->data_lock);
1505 ath10k_dbg(ATH10K_DBG_HTT,
1506 "htt rx stop rx ba session sta %pM tid %hu\n",
1509 ieee80211_stop_rx_ba_session_offl(arvif->vif, peer->addr, tid);
1510 spin_unlock_bh(&ar->data_lock);
1513 void ath10k_htt_t2h_msg_handler(struct ath10k *ar, struct sk_buff *skb)
1515 struct ath10k_htt *htt = &ar->htt;
1516 struct htt_resp *resp = (struct htt_resp *)skb->data;
1518 /* confirm alignment */
1519 if (!IS_ALIGNED((unsigned long)skb->data, 4))
1520 ath10k_warn("unaligned htt message, expect trouble\n");
1522 ath10k_dbg(ATH10K_DBG_HTT, "htt rx, msg_type: 0x%0X\n",
1523 resp->hdr.msg_type);
1524 switch (resp->hdr.msg_type) {
1525 case HTT_T2H_MSG_TYPE_VERSION_CONF: {
1526 htt->target_version_major = resp->ver_resp.major;
1527 htt->target_version_minor = resp->ver_resp.minor;
1528 complete(&htt->target_version_received);
1531 case HTT_T2H_MSG_TYPE_RX_IND:
1532 spin_lock_bh(&htt->rx_ring.lock);
1533 __skb_queue_tail(&htt->rx_compl_q, skb);
1534 spin_unlock_bh(&htt->rx_ring.lock);
1535 tasklet_schedule(&htt->txrx_compl_task);
1537 case HTT_T2H_MSG_TYPE_PEER_MAP: {
1538 struct htt_peer_map_event ev = {
1539 .vdev_id = resp->peer_map.vdev_id,
1540 .peer_id = __le16_to_cpu(resp->peer_map.peer_id),
1542 memcpy(ev.addr, resp->peer_map.addr, sizeof(ev.addr));
1543 ath10k_peer_map_event(htt, &ev);
1546 case HTT_T2H_MSG_TYPE_PEER_UNMAP: {
1547 struct htt_peer_unmap_event ev = {
1548 .peer_id = __le16_to_cpu(resp->peer_unmap.peer_id),
1550 ath10k_peer_unmap_event(htt, &ev);
1553 case HTT_T2H_MSG_TYPE_MGMT_TX_COMPLETION: {
1554 struct htt_tx_done tx_done = {};
1555 int status = __le32_to_cpu(resp->mgmt_tx_completion.status);
1558 __le32_to_cpu(resp->mgmt_tx_completion.desc_id);
1561 case HTT_MGMT_TX_STATUS_OK:
1563 case HTT_MGMT_TX_STATUS_RETRY:
1564 tx_done.no_ack = true;
1566 case HTT_MGMT_TX_STATUS_DROP:
1567 tx_done.discard = true;
1571 spin_lock_bh(&htt->tx_lock);
1572 ath10k_txrx_tx_unref(htt, &tx_done);
1573 spin_unlock_bh(&htt->tx_lock);
1576 case HTT_T2H_MSG_TYPE_TX_COMPL_IND:
1577 spin_lock_bh(&htt->tx_lock);
1578 __skb_queue_tail(&htt->tx_compl_q, skb);
1579 spin_unlock_bh(&htt->tx_lock);
1580 tasklet_schedule(&htt->txrx_compl_task);
1582 case HTT_T2H_MSG_TYPE_SEC_IND: {
1583 struct ath10k *ar = htt->ar;
1584 struct htt_security_indication *ev = &resp->security_indication;
1586 ath10k_dbg(ATH10K_DBG_HTT,
1587 "sec ind peer_id %d unicast %d type %d\n",
1588 __le16_to_cpu(ev->peer_id),
1589 !!(ev->flags & HTT_SECURITY_IS_UNICAST),
1590 MS(ev->flags, HTT_SECURITY_TYPE));
1591 complete(&ar->install_key_done);
1594 case HTT_T2H_MSG_TYPE_RX_FRAG_IND: {
1595 ath10k_dbg_dump(ATH10K_DBG_HTT_DUMP, NULL, "htt event: ",
1596 skb->data, skb->len);
1597 ath10k_htt_rx_frag_handler(htt, &resp->rx_frag_ind);
1600 case HTT_T2H_MSG_TYPE_TEST:
1603 case HTT_T2H_MSG_TYPE_STATS_CONF:
1604 trace_ath10k_htt_stats(skb->data, skb->len);
1606 case HTT_T2H_MSG_TYPE_TX_INSPECT_IND:
1607 /* Firmware can return tx frames if it's unable to fully
1608 * process them and suspects host may be able to fix it. ath10k
1609 * sends all tx frames as already inspected so this shouldn't
1610 * happen unless fw has a bug.
1612 ath10k_warn("received an unexpected htt tx inspect event\n");
1614 case HTT_T2H_MSG_TYPE_RX_ADDBA:
1615 ath10k_htt_rx_addba(ar, resp);
1617 case HTT_T2H_MSG_TYPE_RX_DELBA:
1618 ath10k_htt_rx_delba(ar, resp);
1620 case HTT_T2H_MSG_TYPE_RX_FLUSH: {
1621 /* Ignore this event because mac80211 takes care of Rx
1622 * aggregation reordering.
1627 ath10k_dbg(ATH10K_DBG_HTT, "htt event (%d) not handled\n",
1628 resp->hdr.msg_type);
1629 ath10k_dbg_dump(ATH10K_DBG_HTT_DUMP, NULL, "htt event: ",
1630 skb->data, skb->len);
1634 /* Free the indication buffer */
1635 dev_kfree_skb_any(skb);
1638 static void ath10k_htt_txrx_compl_task(unsigned long ptr)
1640 struct ath10k_htt *htt = (struct ath10k_htt *)ptr;
1641 struct htt_resp *resp;
1642 struct sk_buff *skb;
1644 spin_lock_bh(&htt->tx_lock);
1645 while ((skb = __skb_dequeue(&htt->tx_compl_q))) {
1646 ath10k_htt_rx_frm_tx_compl(htt->ar, skb);
1647 dev_kfree_skb_any(skb);
1649 spin_unlock_bh(&htt->tx_lock);
1651 spin_lock_bh(&htt->rx_ring.lock);
1652 while ((skb = __skb_dequeue(&htt->rx_compl_q))) {
1653 resp = (struct htt_resp *)skb->data;
1654 ath10k_htt_rx_handler(htt, &resp->rx_ind);
1655 dev_kfree_skb_any(skb);
1657 spin_unlock_bh(&htt->rx_ring.lock);