2 * Copyright (c) 2005-2011 Atheros Communications Inc.
3 * Copyright (c) 2011-2013 Qualcomm Atheros, Inc.
5 * Permission to use, copy, modify, and/or distribute this software for any
6 * purpose with or without fee is hereby granted, provided that the above
7 * copyright notice and this permission notice appear in all copies.
9 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
10 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
11 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
12 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
13 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
14 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
15 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
26 #include <linux/log2.h>
28 /* slightly larger than one large A-MPDU */
29 #define HTT_RX_RING_SIZE_MIN 128
31 /* roughly 20 ms @ 1 Gbps of 1500B MSDUs */
32 #define HTT_RX_RING_SIZE_MAX 2048
34 #define HTT_RX_AVG_FRM_BYTES 1000
36 /* ms, very conservative */
37 #define HTT_RX_HOST_LATENCY_MAX_MS 20
39 /* ms, conservative */
40 #define HTT_RX_HOST_LATENCY_WORST_LIKELY_MS 10
42 /* when under memory pressure rx ring refill may fail and needs a retry */
43 #define HTT_RX_RING_REFILL_RETRY_MS 50
45 static int ath10k_htt_rx_get_csum_state(struct sk_buff *skb);
46 static void ath10k_htt_txrx_compl_task(unsigned long ptr);
48 static int ath10k_htt_rx_ring_size(struct ath10k_htt *htt)
53 * It is expected that the host CPU will typically be able to
54 * service the rx indication from one A-MPDU before the rx
55 * indication from the subsequent A-MPDU happens, roughly 1-2 ms
56 * later. However, the rx ring should be sized very conservatively,
57 * to accomodate the worst reasonable delay before the host CPU
58 * services a rx indication interrupt.
60 * The rx ring need not be kept full of empty buffers. In theory,
61 * the htt host SW can dynamically track the low-water mark in the
62 * rx ring, and dynamically adjust the level to which the rx ring
63 * is filled with empty buffers, to dynamically meet the desired
66 * In contrast, it's difficult to resize the rx ring itself, once
67 * it's in use. Thus, the ring itself should be sized very
68 * conservatively, while the degree to which the ring is filled
69 * with empty buffers should be sized moderately conservatively.
72 /* 1e6 bps/mbps / 1e3 ms per sec = 1000 */
74 htt->max_throughput_mbps +
76 (8 * HTT_RX_AVG_FRM_BYTES) * HTT_RX_HOST_LATENCY_MAX_MS;
78 if (size < HTT_RX_RING_SIZE_MIN)
79 size = HTT_RX_RING_SIZE_MIN;
81 if (size > HTT_RX_RING_SIZE_MAX)
82 size = HTT_RX_RING_SIZE_MAX;
84 size = roundup_pow_of_two(size);
89 static int ath10k_htt_rx_ring_fill_level(struct ath10k_htt *htt)
93 /* 1e6 bps/mbps / 1e3 ms per sec = 1000 */
95 htt->max_throughput_mbps *
97 (8 * HTT_RX_AVG_FRM_BYTES) * HTT_RX_HOST_LATENCY_WORST_LIKELY_MS;
100 * Make sure the fill level is at least 1 less than the ring size.
101 * Leaving 1 element empty allows the SW to easily distinguish
102 * between a full ring vs. an empty ring.
104 if (size >= htt->rx_ring.size)
105 size = htt->rx_ring.size - 1;
110 static void ath10k_htt_rx_ring_free(struct ath10k_htt *htt)
113 struct ath10k_skb_cb *cb;
116 for (i = 0; i < htt->rx_ring.fill_cnt; i++) {
117 skb = htt->rx_ring.netbufs_ring[i];
118 cb = ATH10K_SKB_CB(skb);
119 dma_unmap_single(htt->ar->dev, cb->paddr,
120 skb->len + skb_tailroom(skb),
122 dev_kfree_skb_any(skb);
125 htt->rx_ring.fill_cnt = 0;
128 static int __ath10k_htt_rx_ring_fill_n(struct ath10k_htt *htt, int num)
130 struct htt_rx_desc *rx_desc;
135 idx = __le32_to_cpu(*htt->rx_ring.alloc_idx.vaddr);
137 skb = dev_alloc_skb(HTT_RX_BUF_SIZE + HTT_RX_DESC_ALIGN);
143 if (!IS_ALIGNED((unsigned long)skb->data, HTT_RX_DESC_ALIGN))
145 PTR_ALIGN(skb->data, HTT_RX_DESC_ALIGN) -
148 /* Clear rx_desc attention word before posting to Rx ring */
149 rx_desc = (struct htt_rx_desc *)skb->data;
150 rx_desc->attention.flags = __cpu_to_le32(0);
152 paddr = dma_map_single(htt->ar->dev, skb->data,
153 skb->len + skb_tailroom(skb),
156 if (unlikely(dma_mapping_error(htt->ar->dev, paddr))) {
157 dev_kfree_skb_any(skb);
162 ATH10K_SKB_CB(skb)->paddr = paddr;
163 htt->rx_ring.netbufs_ring[idx] = skb;
164 htt->rx_ring.paddrs_ring[idx] = __cpu_to_le32(paddr);
165 htt->rx_ring.fill_cnt++;
169 idx &= htt->rx_ring.size_mask;
173 *htt->rx_ring.alloc_idx.vaddr = __cpu_to_le32(idx);
177 static int ath10k_htt_rx_ring_fill_n(struct ath10k_htt *htt, int num)
179 lockdep_assert_held(&htt->rx_ring.lock);
180 return __ath10k_htt_rx_ring_fill_n(htt, num);
183 static void ath10k_htt_rx_msdu_buff_replenish(struct ath10k_htt *htt)
185 int ret, num_deficit, num_to_fill;
187 /* Refilling the whole RX ring buffer proves to be a bad idea. The
188 * reason is RX may take up significant amount of CPU cycles and starve
189 * other tasks, e.g. TX on an ethernet device while acting as a bridge
190 * with ath10k wlan interface. This ended up with very poor performance
191 * once CPU the host system was overwhelmed with RX on ath10k.
193 * By limiting the number of refills the replenishing occurs
194 * progressively. This in turns makes use of the fact tasklets are
195 * processed in FIFO order. This means actual RX processing can starve
196 * out refilling. If there's not enough buffers on RX ring FW will not
197 * report RX until it is refilled with enough buffers. This
198 * automatically balances load wrt to CPU power.
200 * This probably comes at a cost of lower maximum throughput but
201 * improves the avarage and stability. */
202 spin_lock_bh(&htt->rx_ring.lock);
203 num_deficit = htt->rx_ring.fill_level - htt->rx_ring.fill_cnt;
204 num_to_fill = min(ATH10K_HTT_MAX_NUM_REFILL, num_deficit);
205 num_deficit -= num_to_fill;
206 ret = ath10k_htt_rx_ring_fill_n(htt, num_to_fill);
207 if (ret == -ENOMEM) {
209 * Failed to fill it to the desired level -
210 * we'll start a timer and try again next time.
211 * As long as enough buffers are left in the ring for
212 * another A-MPDU rx, no special recovery is needed.
214 mod_timer(&htt->rx_ring.refill_retry_timer, jiffies +
215 msecs_to_jiffies(HTT_RX_RING_REFILL_RETRY_MS));
216 } else if (num_deficit > 0) {
217 tasklet_schedule(&htt->rx_replenish_task);
219 spin_unlock_bh(&htt->rx_ring.lock);
222 static void ath10k_htt_rx_ring_refill_retry(unsigned long arg)
224 struct ath10k_htt *htt = (struct ath10k_htt *)arg;
226 ath10k_htt_rx_msdu_buff_replenish(htt);
229 static void ath10k_htt_rx_ring_clean_up(struct ath10k_htt *htt)
234 for (i = 0; i < htt->rx_ring.size; i++) {
235 skb = htt->rx_ring.netbufs_ring[i];
239 dma_unmap_single(htt->ar->dev, ATH10K_SKB_CB(skb)->paddr,
240 skb->len + skb_tailroom(skb),
242 dev_kfree_skb_any(skb);
243 htt->rx_ring.netbufs_ring[i] = NULL;
247 void ath10k_htt_rx_free(struct ath10k_htt *htt)
249 del_timer_sync(&htt->rx_ring.refill_retry_timer);
250 tasklet_kill(&htt->rx_replenish_task);
251 tasklet_kill(&htt->txrx_compl_task);
253 skb_queue_purge(&htt->tx_compl_q);
254 skb_queue_purge(&htt->rx_compl_q);
256 ath10k_htt_rx_ring_clean_up(htt);
258 dma_free_coherent(htt->ar->dev,
260 sizeof(htt->rx_ring.paddrs_ring)),
261 htt->rx_ring.paddrs_ring,
262 htt->rx_ring.base_paddr);
264 dma_free_coherent(htt->ar->dev,
265 sizeof(*htt->rx_ring.alloc_idx.vaddr),
266 htt->rx_ring.alloc_idx.vaddr,
267 htt->rx_ring.alloc_idx.paddr);
269 kfree(htt->rx_ring.netbufs_ring);
272 static inline struct sk_buff *ath10k_htt_rx_netbuf_pop(struct ath10k_htt *htt)
274 struct ath10k *ar = htt->ar;
276 struct sk_buff *msdu;
278 lockdep_assert_held(&htt->rx_ring.lock);
280 if (htt->rx_ring.fill_cnt == 0) {
281 ath10k_warn(ar, "tried to pop sk_buff from an empty rx ring\n");
285 idx = htt->rx_ring.sw_rd_idx.msdu_payld;
286 msdu = htt->rx_ring.netbufs_ring[idx];
287 htt->rx_ring.netbufs_ring[idx] = NULL;
290 idx &= htt->rx_ring.size_mask;
291 htt->rx_ring.sw_rd_idx.msdu_payld = idx;
292 htt->rx_ring.fill_cnt--;
294 dma_unmap_single(htt->ar->dev,
295 ATH10K_SKB_CB(msdu)->paddr,
296 msdu->len + skb_tailroom(msdu),
298 ath10k_dbg_dump(ar, ATH10K_DBG_HTT_DUMP, NULL, "htt rx netbuf pop: ",
299 msdu->data, msdu->len + skb_tailroom(msdu));
304 /* return: < 0 fatal error, 0 - non chained msdu, 1 chained msdu */
305 static int ath10k_htt_rx_amsdu_pop(struct ath10k_htt *htt,
306 u8 **fw_desc, int *fw_desc_len,
307 struct sk_buff_head *amsdu)
309 struct ath10k *ar = htt->ar;
310 int msdu_len, msdu_chaining = 0;
311 struct sk_buff *msdu;
312 struct htt_rx_desc *rx_desc;
314 lockdep_assert_held(&htt->rx_ring.lock);
317 int last_msdu, msdu_len_invalid, msdu_chained;
319 msdu = ath10k_htt_rx_netbuf_pop(htt);
321 __skb_queue_purge(amsdu);
325 __skb_queue_tail(amsdu, msdu);
327 rx_desc = (struct htt_rx_desc *)msdu->data;
329 /* FIXME: we must report msdu payload since this is what caller
331 skb_put(msdu, offsetof(struct htt_rx_desc, msdu_payload));
332 skb_pull(msdu, offsetof(struct htt_rx_desc, msdu_payload));
335 * Sanity check - confirm the HW is finished filling in the
337 * If the HW and SW are working correctly, then it's guaranteed
338 * that the HW's MAC DMA is done before this point in the SW.
339 * To prevent the case that we handle a stale Rx descriptor,
340 * just assert for now until we have a way to recover.
342 if (!(__le32_to_cpu(rx_desc->attention.flags)
343 & RX_ATTENTION_FLAGS_MSDU_DONE)) {
344 __skb_queue_purge(amsdu);
349 * Copy the FW rx descriptor for this MSDU from the rx
350 * indication message into the MSDU's netbuf. HL uses the
351 * same rx indication message definition as LL, and simply
352 * appends new info (fields from the HW rx desc, and the
353 * MSDU payload itself). So, the offset into the rx
354 * indication message only has to account for the standard
355 * offset of the per-MSDU FW rx desc info within the
356 * message, and how many bytes of the per-MSDU FW rx desc
357 * info have already been consumed. (And the endianness of
358 * the host, since for a big-endian host, the rx ind
359 * message contents, including the per-MSDU rx desc bytes,
360 * were byteswapped during upload.)
362 if (*fw_desc_len > 0) {
363 rx_desc->fw_desc.info0 = **fw_desc;
365 * The target is expected to only provide the basic
366 * per-MSDU rx descriptors. Just to be sure, verify
367 * that the target has not attached extension data
368 * (e.g. LRO flow ID).
371 /* or more, if there's extension data */
376 * When an oversized AMSDU happened, FW will lost
377 * some of MSDU status - in this case, the FW
378 * descriptors provided will be less than the
379 * actual MSDUs inside this MPDU. Mark the FW
380 * descriptors so that it will still deliver to
381 * upper stack, if no CRC error for this MPDU.
383 * FIX THIS - the FW descriptors are actually for
384 * MSDUs in the end of this A-MSDU instead of the
387 rx_desc->fw_desc.info0 = 0;
390 msdu_len_invalid = !!(__le32_to_cpu(rx_desc->attention.flags)
391 & (RX_ATTENTION_FLAGS_MPDU_LENGTH_ERR |
392 RX_ATTENTION_FLAGS_MSDU_LENGTH_ERR));
393 msdu_len = MS(__le32_to_cpu(rx_desc->msdu_start.info0),
394 RX_MSDU_START_INFO0_MSDU_LENGTH);
395 msdu_chained = rx_desc->frag_info.ring2_more_count;
397 if (msdu_len_invalid)
401 skb_put(msdu, min(msdu_len, HTT_RX_MSDU_SIZE));
402 msdu_len -= msdu->len;
404 /* Note: Chained buffers do not contain rx descriptor */
405 while (msdu_chained--) {
406 msdu = ath10k_htt_rx_netbuf_pop(htt);
408 __skb_queue_purge(amsdu);
412 __skb_queue_tail(amsdu, msdu);
414 skb_put(msdu, min(msdu_len, HTT_RX_BUF_SIZE));
415 msdu_len -= msdu->len;
419 last_msdu = __le32_to_cpu(rx_desc->msdu_end.info0) &
420 RX_MSDU_END_INFO0_LAST_MSDU;
422 trace_ath10k_htt_rx_desc(ar, &rx_desc->attention,
423 sizeof(*rx_desc) - sizeof(u32));
429 if (skb_queue_empty(amsdu))
433 * Don't refill the ring yet.
435 * First, the elements popped here are still in use - it is not
436 * safe to overwrite them until the matching call to
437 * mpdu_desc_list_next. Second, for efficiency it is preferable to
438 * refill the rx ring with 1 PPDU's worth of rx buffers (something
439 * like 32 x 3 buffers), rather than one MPDU's worth of rx buffers
440 * (something like 3 buffers). Consequently, we'll rely on the txrx
441 * SW to tell us when it is done pulling all the PPDU's rx buffers
442 * out of the rx ring, and then refill it just once.
445 return msdu_chaining;
448 static void ath10k_htt_rx_replenish_task(unsigned long ptr)
450 struct ath10k_htt *htt = (struct ath10k_htt *)ptr;
452 ath10k_htt_rx_msdu_buff_replenish(htt);
455 int ath10k_htt_rx_alloc(struct ath10k_htt *htt)
457 struct ath10k *ar = htt->ar;
461 struct timer_list *timer = &htt->rx_ring.refill_retry_timer;
463 htt->rx_confused = false;
465 htt->rx_ring.size = ath10k_htt_rx_ring_size(htt);
466 if (!is_power_of_2(htt->rx_ring.size)) {
467 ath10k_warn(ar, "htt rx ring size is not power of 2\n");
471 htt->rx_ring.size_mask = htt->rx_ring.size - 1;
474 * Set the initial value for the level to which the rx ring
475 * should be filled, based on the max throughput and the
476 * worst likely latency for the host to fill the rx ring
477 * with new buffers. In theory, this fill level can be
478 * dynamically adjusted from the initial value set here, to
479 * reflect the actual host latency rather than a
480 * conservative assumption about the host latency.
482 htt->rx_ring.fill_level = ath10k_htt_rx_ring_fill_level(htt);
484 htt->rx_ring.netbufs_ring =
485 kzalloc(htt->rx_ring.size * sizeof(struct sk_buff *),
487 if (!htt->rx_ring.netbufs_ring)
490 size = htt->rx_ring.size * sizeof(htt->rx_ring.paddrs_ring);
492 vaddr = dma_alloc_coherent(htt->ar->dev, size, &paddr, GFP_DMA);
496 htt->rx_ring.paddrs_ring = vaddr;
497 htt->rx_ring.base_paddr = paddr;
499 vaddr = dma_alloc_coherent(htt->ar->dev,
500 sizeof(*htt->rx_ring.alloc_idx.vaddr),
505 htt->rx_ring.alloc_idx.vaddr = vaddr;
506 htt->rx_ring.alloc_idx.paddr = paddr;
507 htt->rx_ring.sw_rd_idx.msdu_payld = 0;
508 *htt->rx_ring.alloc_idx.vaddr = 0;
510 /* Initialize the Rx refill retry timer */
511 setup_timer(timer, ath10k_htt_rx_ring_refill_retry, (unsigned long)htt);
513 spin_lock_init(&htt->rx_ring.lock);
515 htt->rx_ring.fill_cnt = 0;
516 if (__ath10k_htt_rx_ring_fill_n(htt, htt->rx_ring.fill_level))
519 tasklet_init(&htt->rx_replenish_task, ath10k_htt_rx_replenish_task,
522 skb_queue_head_init(&htt->tx_compl_q);
523 skb_queue_head_init(&htt->rx_compl_q);
525 tasklet_init(&htt->txrx_compl_task, ath10k_htt_txrx_compl_task,
528 ath10k_dbg(ar, ATH10K_DBG_BOOT, "htt rx ring size %d fill_level %d\n",
529 htt->rx_ring.size, htt->rx_ring.fill_level);
533 ath10k_htt_rx_ring_free(htt);
534 dma_free_coherent(htt->ar->dev,
535 sizeof(*htt->rx_ring.alloc_idx.vaddr),
536 htt->rx_ring.alloc_idx.vaddr,
537 htt->rx_ring.alloc_idx.paddr);
539 dma_free_coherent(htt->ar->dev,
541 sizeof(htt->rx_ring.paddrs_ring)),
542 htt->rx_ring.paddrs_ring,
543 htt->rx_ring.base_paddr);
545 kfree(htt->rx_ring.netbufs_ring);
550 static int ath10k_htt_rx_crypto_param_len(struct ath10k *ar,
551 enum htt_rx_mpdu_encrypt_type type)
554 case HTT_RX_MPDU_ENCRYPT_NONE:
556 case HTT_RX_MPDU_ENCRYPT_WEP40:
557 case HTT_RX_MPDU_ENCRYPT_WEP104:
558 return IEEE80211_WEP_IV_LEN;
559 case HTT_RX_MPDU_ENCRYPT_TKIP_WITHOUT_MIC:
560 case HTT_RX_MPDU_ENCRYPT_TKIP_WPA:
561 return IEEE80211_TKIP_IV_LEN;
562 case HTT_RX_MPDU_ENCRYPT_AES_CCM_WPA2:
563 return IEEE80211_CCMP_HDR_LEN;
564 case HTT_RX_MPDU_ENCRYPT_WEP128:
565 case HTT_RX_MPDU_ENCRYPT_WAPI:
569 ath10k_warn(ar, "unsupported encryption type %d\n", type);
573 #define MICHAEL_MIC_LEN 8
575 static int ath10k_htt_rx_crypto_tail_len(struct ath10k *ar,
576 enum htt_rx_mpdu_encrypt_type type)
579 case HTT_RX_MPDU_ENCRYPT_NONE:
581 case HTT_RX_MPDU_ENCRYPT_WEP40:
582 case HTT_RX_MPDU_ENCRYPT_WEP104:
583 return IEEE80211_WEP_ICV_LEN;
584 case HTT_RX_MPDU_ENCRYPT_TKIP_WITHOUT_MIC:
585 case HTT_RX_MPDU_ENCRYPT_TKIP_WPA:
586 return IEEE80211_TKIP_ICV_LEN;
587 case HTT_RX_MPDU_ENCRYPT_AES_CCM_WPA2:
588 return IEEE80211_CCMP_MIC_LEN;
589 case HTT_RX_MPDU_ENCRYPT_WEP128:
590 case HTT_RX_MPDU_ENCRYPT_WAPI:
594 ath10k_warn(ar, "unsupported encryption type %d\n", type);
606 struct amsdu_subframe_hdr {
612 static const u8 rx_legacy_rate_idx[] = {
613 3, /* 0x00 - 11Mbps */
614 2, /* 0x01 - 5.5Mbps */
615 1, /* 0x02 - 2Mbps */
616 0, /* 0x03 - 1Mbps */
617 3, /* 0x04 - 11Mbps */
618 2, /* 0x05 - 5.5Mbps */
619 1, /* 0x06 - 2Mbps */
620 0, /* 0x07 - 1Mbps */
621 10, /* 0x08 - 48Mbps */
622 8, /* 0x09 - 24Mbps */
623 6, /* 0x0A - 12Mbps */
624 4, /* 0x0B - 6Mbps */
625 11, /* 0x0C - 54Mbps */
626 9, /* 0x0D - 36Mbps */
627 7, /* 0x0E - 18Mbps */
628 5, /* 0x0F - 9Mbps */
631 static void ath10k_htt_rx_h_rates(struct ath10k *ar,
632 struct ieee80211_rx_status *status,
633 struct htt_rx_desc *rxd)
635 enum ieee80211_band band;
636 u8 cck, rate, rate_idx, bw, sgi, mcs, nss;
638 u32 info1, info2, info3;
640 /* Band value can't be set as undefined but freq can be 0 - use that to
641 * determine whether band is provided.
643 * FIXME: Perhaps this can go away if CCK rate reporting is a little
650 info1 = __le32_to_cpu(rxd->ppdu_start.info1);
651 info2 = __le32_to_cpu(rxd->ppdu_start.info2);
652 info3 = __le32_to_cpu(rxd->ppdu_start.info3);
654 preamble = MS(info1, RX_PPDU_START_INFO1_PREAMBLE_TYPE);
658 cck = info1 & RX_PPDU_START_INFO1_L_SIG_RATE_SELECT;
659 rate = MS(info1, RX_PPDU_START_INFO1_L_SIG_RATE);
662 if (rate < 0x08 || rate > 0x0F)
666 case IEEE80211_BAND_2GHZ:
669 rate_idx = rx_legacy_rate_idx[rate];
671 case IEEE80211_BAND_5GHZ:
672 rate_idx = rx_legacy_rate_idx[rate];
673 /* We are using same rate table registering
674 HW - ath10k_rates[]. In case of 5GHz skip
675 CCK rates, so -4 here */
682 status->rate_idx = rate_idx;
685 case HTT_RX_HT_WITH_TXBF:
686 /* HT-SIG - Table 20-11 in info2 and info3 */
689 bw = (info2 >> 7) & 1;
690 sgi = (info3 >> 7) & 1;
692 status->rate_idx = mcs;
693 status->flag |= RX_FLAG_HT;
695 status->flag |= RX_FLAG_SHORT_GI;
697 status->flag |= RX_FLAG_40MHZ;
700 case HTT_RX_VHT_WITH_TXBF:
701 /* VHT-SIG-A1 in info2, VHT-SIG-A2 in info3
703 mcs = (info3 >> 4) & 0x0F;
704 nss = ((info2 >> 10) & 0x07) + 1;
708 status->rate_idx = mcs;
709 status->vht_nss = nss;
712 status->flag |= RX_FLAG_SHORT_GI;
720 status->flag |= RX_FLAG_40MHZ;
724 status->vht_flag |= RX_VHT_FLAG_80MHZ;
727 status->flag |= RX_FLAG_VHT;
734 static bool ath10k_htt_rx_h_channel(struct ath10k *ar,
735 struct ieee80211_rx_status *status)
737 struct ieee80211_channel *ch;
739 spin_lock_bh(&ar->data_lock);
740 ch = ar->scan_channel;
743 spin_unlock_bh(&ar->data_lock);
748 status->band = ch->band;
749 status->freq = ch->center_freq;
754 static void ath10k_htt_rx_h_signal(struct ath10k *ar,
755 struct ieee80211_rx_status *status,
756 struct htt_rx_desc *rxd)
758 /* FIXME: Get real NF */
759 status->signal = ATH10K_DEFAULT_NOISE_FLOOR +
760 rxd->ppdu_start.rssi_comb;
761 status->flag &= ~RX_FLAG_NO_SIGNAL_VAL;
764 static void ath10k_htt_rx_h_mactime(struct ath10k *ar,
765 struct ieee80211_rx_status *status,
766 struct htt_rx_desc *rxd)
768 /* FIXME: TSF is known only at the end of PPDU, in the last MPDU. This
769 * means all prior MSDUs in a PPDU are reported to mac80211 without the
770 * TSF. Is it worth holding frames until end of PPDU is known?
772 * FIXME: Can we get/compute 64bit TSF?
774 status->mactime = __le32_to_cpu(rxd->ppdu_end.tsf_timestamp);
775 status->flag |= RX_FLAG_MACTIME_END;
778 static void ath10k_htt_rx_h_ppdu(struct ath10k *ar,
779 struct sk_buff_head *amsdu,
780 struct ieee80211_rx_status *status)
782 struct sk_buff *first;
783 struct htt_rx_desc *rxd;
787 if (skb_queue_empty(amsdu))
790 first = skb_peek(amsdu);
791 rxd = (void *)first->data - sizeof(*rxd);
793 is_first_ppdu = !!(rxd->attention.flags &
794 __cpu_to_le32(RX_ATTENTION_FLAGS_FIRST_MPDU));
795 is_last_ppdu = !!(rxd->attention.flags &
796 __cpu_to_le32(RX_ATTENTION_FLAGS_LAST_MPDU));
799 /* New PPDU starts so clear out the old per-PPDU status. */
801 status->rate_idx = 0;
803 status->vht_flag &= ~RX_VHT_FLAG_80MHZ;
804 status->flag &= ~(RX_FLAG_HT |
808 RX_FLAG_MACTIME_END);
809 status->flag |= RX_FLAG_NO_SIGNAL_VAL;
811 ath10k_htt_rx_h_signal(ar, status, rxd);
812 ath10k_htt_rx_h_channel(ar, status);
813 ath10k_htt_rx_h_rates(ar, status, rxd);
817 ath10k_htt_rx_h_mactime(ar, status, rxd);
820 static const char * const tid_to_ac[] = {
831 static char *ath10k_get_tid(struct ieee80211_hdr *hdr, char *out, size_t size)
836 if (!ieee80211_is_data_qos(hdr->frame_control))
839 qc = ieee80211_get_qos_ctl(hdr);
840 tid = *qc & IEEE80211_QOS_CTL_TID_MASK;
842 snprintf(out, size, "tid %d (%s)", tid, tid_to_ac[tid]);
844 snprintf(out, size, "tid %d", tid);
849 static void ath10k_process_rx(struct ath10k *ar,
850 struct ieee80211_rx_status *rx_status,
853 struct ieee80211_rx_status *status;
854 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
857 status = IEEE80211_SKB_RXCB(skb);
858 *status = *rx_status;
860 ath10k_dbg(ar, ATH10K_DBG_DATA,
861 "rx skb %p len %u peer %pM %s %s sn %u %s%s%s%s%s %srate_idx %u vht_nss %u freq %u band %u flag 0x%x fcs-err %i mic-err %i amsdu-more %i\n",
864 ieee80211_get_SA(hdr),
865 ath10k_get_tid(hdr, tid, sizeof(tid)),
866 is_multicast_ether_addr(ieee80211_get_DA(hdr)) ?
868 (__le16_to_cpu(hdr->seq_ctrl) & IEEE80211_SCTL_SEQ) >> 4,
869 status->flag == 0 ? "legacy" : "",
870 status->flag & RX_FLAG_HT ? "ht" : "",
871 status->flag & RX_FLAG_VHT ? "vht" : "",
872 status->flag & RX_FLAG_40MHZ ? "40" : "",
873 status->vht_flag & RX_VHT_FLAG_80MHZ ? "80" : "",
874 status->flag & RX_FLAG_SHORT_GI ? "sgi " : "",
878 status->band, status->flag,
879 !!(status->flag & RX_FLAG_FAILED_FCS_CRC),
880 !!(status->flag & RX_FLAG_MMIC_ERROR),
881 !!(status->flag & RX_FLAG_AMSDU_MORE));
882 ath10k_dbg_dump(ar, ATH10K_DBG_HTT_DUMP, NULL, "rx skb: ",
883 skb->data, skb->len);
884 trace_ath10k_rx_hdr(ar, skb->data, skb->len);
885 trace_ath10k_rx_payload(ar, skb->data, skb->len);
887 ieee80211_rx(ar->hw, skb);
890 static int ath10k_htt_rx_nwifi_hdrlen(struct ieee80211_hdr *hdr)
892 /* nwifi header is padded to 4 bytes. this fixes 4addr rx */
893 return round_up(ieee80211_hdrlen(hdr->frame_control), 4);
896 static void ath10k_htt_rx_h_undecap_raw(struct ath10k *ar,
897 struct sk_buff *msdu,
898 struct ieee80211_rx_status *status,
899 enum htt_rx_mpdu_encrypt_type enctype,
902 struct ieee80211_hdr *hdr;
903 struct htt_rx_desc *rxd;
909 rxd = (void *)msdu->data - sizeof(*rxd);
910 is_first = !!(rxd->msdu_end.info0 &
911 __cpu_to_le32(RX_MSDU_END_INFO0_FIRST_MSDU));
912 is_last = !!(rxd->msdu_end.info0 &
913 __cpu_to_le32(RX_MSDU_END_INFO0_LAST_MSDU));
915 /* Delivered decapped frame:
917 * [crypto param] <-- can be trimmed if !fcs_err &&
918 * !decrypt_err && !peer_idx_invalid
919 * [amsdu header] <-- only if A-MSDU
922 * [FCS] <-- at end, needs to be trimmed
925 /* This probably shouldn't happen but warn just in case */
926 if (unlikely(WARN_ON_ONCE(!is_first)))
929 /* This probably shouldn't happen but warn just in case */
930 if (unlikely(WARN_ON_ONCE(!(is_first && is_last))))
933 skb_trim(msdu, msdu->len - FCS_LEN);
935 /* In most cases this will be true for sniffed frames. It makes sense
936 * to deliver them as-is without stripping the crypto param. This would
937 * also make sense for software based decryption (which is not
938 * implemented in ath10k).
940 * If there's no error then the frame is decrypted. At least that is
941 * the case for frames that come in via fragmented rx indication.
946 /* The payload is decrypted so strip crypto params. Start from tail
947 * since hdr is used to compute some stuff.
950 hdr = (void *)msdu->data;
953 skb_trim(msdu, msdu->len - ath10k_htt_rx_crypto_tail_len(ar, enctype));
956 if (!ieee80211_has_morefrags(hdr->frame_control) &&
957 enctype == HTT_RX_MPDU_ENCRYPT_TKIP_WPA)
958 skb_trim(msdu, msdu->len - 8);
961 hdr_len = ieee80211_hdrlen(hdr->frame_control);
962 crypto_len = ath10k_htt_rx_crypto_param_len(ar, enctype);
964 memmove((void *)msdu->data + crypto_len,
965 (void *)msdu->data, hdr_len);
966 skb_pull(msdu, crypto_len);
969 static void ath10k_htt_rx_h_undecap_nwifi(struct ath10k *ar,
970 struct sk_buff *msdu,
971 struct ieee80211_rx_status *status,
972 const u8 first_hdr[64])
974 struct ieee80211_hdr *hdr;
979 /* Delivered decapped frame:
980 * [nwifi 802.11 header] <-- replaced with 802.11 hdr
983 * Note: The nwifi header doesn't have QoS Control and is
984 * (always?) a 3addr frame.
986 * Note2: There's no A-MSDU subframe header. Even if it's part
990 /* pull decapped header and copy SA & DA */
991 hdr = (struct ieee80211_hdr *)msdu->data;
992 hdr_len = ath10k_htt_rx_nwifi_hdrlen(hdr);
993 ether_addr_copy(da, ieee80211_get_DA(hdr));
994 ether_addr_copy(sa, ieee80211_get_SA(hdr));
995 skb_pull(msdu, hdr_len);
997 /* push original 802.11 header */
998 hdr = (struct ieee80211_hdr *)first_hdr;
999 hdr_len = ieee80211_hdrlen(hdr->frame_control);
1000 memcpy(skb_push(msdu, hdr_len), hdr, hdr_len);
1002 /* original 802.11 header has a different DA and in
1003 * case of 4addr it may also have different SA
1005 hdr = (struct ieee80211_hdr *)msdu->data;
1006 ether_addr_copy(ieee80211_get_DA(hdr), da);
1007 ether_addr_copy(ieee80211_get_SA(hdr), sa);
1010 static void *ath10k_htt_rx_h_find_rfc1042(struct ath10k *ar,
1011 struct sk_buff *msdu,
1012 enum htt_rx_mpdu_encrypt_type enctype)
1014 struct ieee80211_hdr *hdr;
1015 struct htt_rx_desc *rxd;
1016 size_t hdr_len, crypto_len;
1018 bool is_first, is_last, is_amsdu;
1020 rxd = (void *)msdu->data - sizeof(*rxd);
1021 hdr = (void *)rxd->rx_hdr_status;
1023 is_first = !!(rxd->msdu_end.info0 &
1024 __cpu_to_le32(RX_MSDU_END_INFO0_FIRST_MSDU));
1025 is_last = !!(rxd->msdu_end.info0 &
1026 __cpu_to_le32(RX_MSDU_END_INFO0_LAST_MSDU));
1027 is_amsdu = !(is_first && is_last);
1032 hdr_len = ieee80211_hdrlen(hdr->frame_control);
1033 crypto_len = ath10k_htt_rx_crypto_param_len(ar, enctype);
1035 rfc1042 += round_up(hdr_len, 4) +
1036 round_up(crypto_len, 4);
1040 rfc1042 += sizeof(struct amsdu_subframe_hdr);
1045 static void ath10k_htt_rx_h_undecap_eth(struct ath10k *ar,
1046 struct sk_buff *msdu,
1047 struct ieee80211_rx_status *status,
1048 const u8 first_hdr[64],
1049 enum htt_rx_mpdu_encrypt_type enctype)
1051 struct ieee80211_hdr *hdr;
1058 /* Delivered decapped frame:
1059 * [eth header] <-- replaced with 802.11 hdr & rfc1042/llc
1063 rfc1042 = ath10k_htt_rx_h_find_rfc1042(ar, msdu, enctype);
1064 if (WARN_ON_ONCE(!rfc1042))
1067 /* pull decapped header and copy SA & DA */
1068 eth = (struct ethhdr *)msdu->data;
1069 ether_addr_copy(da, eth->h_dest);
1070 ether_addr_copy(sa, eth->h_source);
1071 skb_pull(msdu, sizeof(struct ethhdr));
1073 /* push rfc1042/llc/snap */
1074 memcpy(skb_push(msdu, sizeof(struct rfc1042_hdr)), rfc1042,
1075 sizeof(struct rfc1042_hdr));
1077 /* push original 802.11 header */
1078 hdr = (struct ieee80211_hdr *)first_hdr;
1079 hdr_len = ieee80211_hdrlen(hdr->frame_control);
1080 memcpy(skb_push(msdu, hdr_len), hdr, hdr_len);
1082 /* original 802.11 header has a different DA and in
1083 * case of 4addr it may also have different SA
1085 hdr = (struct ieee80211_hdr *)msdu->data;
1086 ether_addr_copy(ieee80211_get_DA(hdr), da);
1087 ether_addr_copy(ieee80211_get_SA(hdr), sa);
1090 static void ath10k_htt_rx_h_undecap_snap(struct ath10k *ar,
1091 struct sk_buff *msdu,
1092 struct ieee80211_rx_status *status,
1093 const u8 first_hdr[64])
1095 struct ieee80211_hdr *hdr;
1098 /* Delivered decapped frame:
1099 * [amsdu header] <-- replaced with 802.11 hdr
1104 skb_pull(msdu, sizeof(struct amsdu_subframe_hdr));
1106 hdr = (struct ieee80211_hdr *)first_hdr;
1107 hdr_len = ieee80211_hdrlen(hdr->frame_control);
1108 memcpy(skb_push(msdu, hdr_len), hdr, hdr_len);
1111 static void ath10k_htt_rx_h_undecap(struct ath10k *ar,
1112 struct sk_buff *msdu,
1113 struct ieee80211_rx_status *status,
1115 enum htt_rx_mpdu_encrypt_type enctype,
1118 struct htt_rx_desc *rxd;
1119 enum rx_msdu_decap_format decap;
1120 struct ieee80211_hdr *hdr;
1122 /* First msdu's decapped header:
1123 * [802.11 header] <-- padded to 4 bytes long
1124 * [crypto param] <-- padded to 4 bytes long
1125 * [amsdu header] <-- only if A-MSDU
1128 * Other (2nd, 3rd, ..) msdu's decapped header:
1129 * [amsdu header] <-- only if A-MSDU
1133 rxd = (void *)msdu->data - sizeof(*rxd);
1134 hdr = (void *)rxd->rx_hdr_status;
1135 decap = MS(__le32_to_cpu(rxd->msdu_start.info1),
1136 RX_MSDU_START_INFO1_DECAP_FORMAT);
1139 case RX_MSDU_DECAP_RAW:
1140 ath10k_htt_rx_h_undecap_raw(ar, msdu, status, enctype,
1143 case RX_MSDU_DECAP_NATIVE_WIFI:
1144 ath10k_htt_rx_h_undecap_nwifi(ar, msdu, status, first_hdr);
1146 case RX_MSDU_DECAP_ETHERNET2_DIX:
1147 ath10k_htt_rx_h_undecap_eth(ar, msdu, status, first_hdr, enctype);
1149 case RX_MSDU_DECAP_8023_SNAP_LLC:
1150 ath10k_htt_rx_h_undecap_snap(ar, msdu, status, first_hdr);
1155 static int ath10k_htt_rx_get_csum_state(struct sk_buff *skb)
1157 struct htt_rx_desc *rxd;
1159 bool is_ip4, is_ip6;
1160 bool is_tcp, is_udp;
1161 bool ip_csum_ok, tcpudp_csum_ok;
1163 rxd = (void *)skb->data - sizeof(*rxd);
1164 flags = __le32_to_cpu(rxd->attention.flags);
1165 info = __le32_to_cpu(rxd->msdu_start.info1);
1167 is_ip4 = !!(info & RX_MSDU_START_INFO1_IPV4_PROTO);
1168 is_ip6 = !!(info & RX_MSDU_START_INFO1_IPV6_PROTO);
1169 is_tcp = !!(info & RX_MSDU_START_INFO1_TCP_PROTO);
1170 is_udp = !!(info & RX_MSDU_START_INFO1_UDP_PROTO);
1171 ip_csum_ok = !(flags & RX_ATTENTION_FLAGS_IP_CHKSUM_FAIL);
1172 tcpudp_csum_ok = !(flags & RX_ATTENTION_FLAGS_TCP_UDP_CHKSUM_FAIL);
1174 if (!is_ip4 && !is_ip6)
1175 return CHECKSUM_NONE;
1176 if (!is_tcp && !is_udp)
1177 return CHECKSUM_NONE;
1179 return CHECKSUM_NONE;
1180 if (!tcpudp_csum_ok)
1181 return CHECKSUM_NONE;
1183 return CHECKSUM_UNNECESSARY;
1186 static void ath10k_htt_rx_h_csum_offload(struct sk_buff *msdu)
1188 msdu->ip_summed = ath10k_htt_rx_get_csum_state(msdu);
1191 static void ath10k_htt_rx_h_mpdu(struct ath10k *ar,
1192 struct sk_buff_head *amsdu,
1193 struct ieee80211_rx_status *status)
1195 struct sk_buff *first;
1196 struct sk_buff *last;
1197 struct sk_buff *msdu;
1198 struct htt_rx_desc *rxd;
1199 struct ieee80211_hdr *hdr;
1200 enum htt_rx_mpdu_encrypt_type enctype;
1205 bool has_crypto_err;
1207 bool has_peer_idx_invalid;
1211 if (skb_queue_empty(amsdu))
1214 first = skb_peek(amsdu);
1215 rxd = (void *)first->data - sizeof(*rxd);
1217 enctype = MS(__le32_to_cpu(rxd->mpdu_start.info0),
1218 RX_MPDU_START_INFO0_ENCRYPT_TYPE);
1220 /* First MSDU's Rx descriptor in an A-MSDU contains full 802.11
1221 * decapped header. It'll be used for undecapping of each MSDU.
1223 hdr = (void *)rxd->rx_hdr_status;
1224 hdr_len = ieee80211_hdrlen(hdr->frame_control);
1225 memcpy(first_hdr, hdr, hdr_len);
1227 /* Each A-MSDU subframe will use the original header as the base and be
1228 * reported as a separate MSDU so strip the A-MSDU bit from QoS Ctl.
1230 hdr = (void *)first_hdr;
1231 qos = ieee80211_get_qos_ctl(hdr);
1232 qos[0] &= ~IEEE80211_QOS_CTL_A_MSDU_PRESENT;
1234 /* Some attention flags are valid only in the last MSDU. */
1235 last = skb_peek_tail(amsdu);
1236 rxd = (void *)last->data - sizeof(*rxd);
1237 attention = __le32_to_cpu(rxd->attention.flags);
1239 has_fcs_err = !!(attention & RX_ATTENTION_FLAGS_FCS_ERR);
1240 has_crypto_err = !!(attention & RX_ATTENTION_FLAGS_DECRYPT_ERR);
1241 has_tkip_err = !!(attention & RX_ATTENTION_FLAGS_TKIP_MIC_ERR);
1242 has_peer_idx_invalid = !!(attention & RX_ATTENTION_FLAGS_PEER_IDX_INVALID);
1244 /* Note: If hardware captures an encrypted frame that it can't decrypt,
1245 * e.g. due to fcs error, missing peer or invalid key data it will
1246 * report the frame as raw.
1248 is_decrypted = (enctype != HTT_RX_MPDU_ENCRYPT_NONE &&
1251 !has_peer_idx_invalid);
1253 /* Clear per-MPDU flags while leaving per-PPDU flags intact. */
1254 status->flag &= ~(RX_FLAG_FAILED_FCS_CRC |
1255 RX_FLAG_MMIC_ERROR |
1257 RX_FLAG_IV_STRIPPED |
1258 RX_FLAG_MMIC_STRIPPED);
1261 status->flag |= RX_FLAG_FAILED_FCS_CRC;
1264 status->flag |= RX_FLAG_MMIC_ERROR;
1267 status->flag |= RX_FLAG_DECRYPTED |
1268 RX_FLAG_IV_STRIPPED |
1269 RX_FLAG_MMIC_STRIPPED;
1271 skb_queue_walk(amsdu, msdu) {
1272 ath10k_htt_rx_h_csum_offload(msdu);
1273 ath10k_htt_rx_h_undecap(ar, msdu, status, first_hdr, enctype,
1276 /* Undecapping involves copying the original 802.11 header back
1277 * to sk_buff. If frame is protected and hardware has decrypted
1278 * it then remove the protected bit.
1283 hdr = (void *)msdu->data;
1284 hdr->frame_control &= ~__cpu_to_le16(IEEE80211_FCTL_PROTECTED);
1288 static void ath10k_htt_rx_h_deliver(struct ath10k *ar,
1289 struct sk_buff_head *amsdu,
1290 struct ieee80211_rx_status *status)
1292 struct sk_buff *msdu;
1294 while ((msdu = __skb_dequeue(amsdu))) {
1295 /* Setup per-MSDU flags */
1296 if (skb_queue_empty(amsdu))
1297 status->flag &= ~RX_FLAG_AMSDU_MORE;
1299 status->flag |= RX_FLAG_AMSDU_MORE;
1301 ath10k_process_rx(ar, status, msdu);
1305 static int ath10k_unchain_msdu(struct sk_buff_head *amsdu)
1307 struct sk_buff *skb, *first;
1311 /* TODO: Might could optimize this by using
1312 * skb_try_coalesce or similar method to
1313 * decrease copying, or maybe get mac80211 to
1314 * provide a way to just receive a list of
1318 first = __skb_dequeue(amsdu);
1320 /* Allocate total length all at once. */
1321 skb_queue_walk(amsdu, skb)
1322 total_len += skb->len;
1324 space = total_len - skb_tailroom(first);
1326 (pskb_expand_head(first, 0, space, GFP_ATOMIC) < 0)) {
1327 /* TODO: bump some rx-oom error stat */
1328 /* put it back together so we can free the
1329 * whole list at once.
1331 __skb_queue_head(amsdu, first);
1335 /* Walk list again, copying contents into
1338 while ((skb = __skb_dequeue(amsdu))) {
1339 skb_copy_from_linear_data(skb, skb_put(first, skb->len),
1341 dev_kfree_skb_any(skb);
1344 __skb_queue_head(amsdu, first);
1348 static void ath10k_htt_rx_h_unchain(struct ath10k *ar,
1349 struct sk_buff_head *amsdu,
1352 struct sk_buff *first;
1353 struct htt_rx_desc *rxd;
1354 enum rx_msdu_decap_format decap;
1356 first = skb_peek(amsdu);
1357 rxd = (void *)first->data - sizeof(*rxd);
1358 decap = MS(__le32_to_cpu(rxd->msdu_start.info1),
1359 RX_MSDU_START_INFO1_DECAP_FORMAT);
1364 /* FIXME: Current unchaining logic can only handle simple case of raw
1365 * msdu chaining. If decapping is other than raw the chaining may be
1366 * more complex and this isn't handled by the current code. Don't even
1367 * try re-constructing such frames - it'll be pretty much garbage.
1369 if (decap != RX_MSDU_DECAP_RAW ||
1370 skb_queue_len(amsdu) != 1 + rxd->frag_info.ring2_more_count) {
1371 __skb_queue_purge(amsdu);
1375 ath10k_unchain_msdu(amsdu);
1378 static bool ath10k_htt_rx_amsdu_allowed(struct ath10k *ar,
1379 struct sk_buff_head *amsdu,
1380 struct ieee80211_rx_status *rx_status)
1382 struct sk_buff *msdu;
1383 struct htt_rx_desc *rxd;
1385 msdu = skb_peek(amsdu);
1386 rxd = (void *)msdu->data - sizeof(*rxd);
1388 /* FIXME: It might be a good idea to do some fuzzy-testing to drop
1389 * invalid/dangerous frames.
1392 if (!rx_status->freq) {
1393 ath10k_warn(ar, "no channel configured; ignoring frame(s)!\n");
1397 /* Management frames are handled via WMI events. The pros of such
1398 * approach is that channel is explicitly provided in WMI events
1399 * whereas HTT doesn't provide channel information for Rxed frames.
1401 if (rxd->attention.flags &
1402 __cpu_to_le32(RX_ATTENTION_FLAGS_MGMT_TYPE)) {
1403 ath10k_dbg(ar, ATH10K_DBG_HTT, "htt rx mgmt ctrl\n");
1407 if (test_bit(ATH10K_CAC_RUNNING, &ar->dev_flags)) {
1408 ath10k_dbg(ar, ATH10K_DBG_HTT, "htt rx cac running\n");
1415 static void ath10k_htt_rx_h_filter(struct ath10k *ar,
1416 struct sk_buff_head *amsdu,
1417 struct ieee80211_rx_status *rx_status)
1419 if (skb_queue_empty(amsdu))
1422 if (ath10k_htt_rx_amsdu_allowed(ar, amsdu, rx_status))
1425 __skb_queue_purge(amsdu);
1428 static void ath10k_htt_rx_handler(struct ath10k_htt *htt,
1429 struct htt_rx_indication *rx)
1431 struct ath10k *ar = htt->ar;
1432 struct ieee80211_rx_status *rx_status = &htt->rx_status;
1433 struct htt_rx_indication_mpdu_range *mpdu_ranges;
1434 struct sk_buff_head amsdu;
1435 int num_mpdu_ranges;
1438 int i, ret, mpdu_count = 0;
1440 lockdep_assert_held(&htt->rx_ring.lock);
1442 if (htt->rx_confused)
1445 fw_desc_len = __le16_to_cpu(rx->prefix.fw_rx_desc_bytes);
1446 fw_desc = (u8 *)&rx->fw_desc;
1448 num_mpdu_ranges = MS(__le32_to_cpu(rx->hdr.info1),
1449 HTT_RX_INDICATION_INFO1_NUM_MPDU_RANGES);
1450 mpdu_ranges = htt_rx_ind_get_mpdu_ranges(rx);
1452 ath10k_dbg_dump(ar, ATH10K_DBG_HTT_DUMP, NULL, "htt rx ind: ",
1454 (sizeof(struct htt_rx_indication_mpdu_range) *
1457 for (i = 0; i < num_mpdu_ranges; i++)
1458 mpdu_count += mpdu_ranges[i].mpdu_count;
1460 while (mpdu_count--) {
1461 __skb_queue_head_init(&amsdu);
1462 ret = ath10k_htt_rx_amsdu_pop(htt, &fw_desc,
1463 &fw_desc_len, &amsdu);
1465 ath10k_warn(ar, "rx ring became corrupted: %d\n", ret);
1466 __skb_queue_purge(&amsdu);
1467 /* FIXME: It's probably a good idea to reboot the
1468 * device instead of leaving it inoperable.
1470 htt->rx_confused = true;
1474 ath10k_htt_rx_h_ppdu(ar, &amsdu, rx_status);
1475 ath10k_htt_rx_h_unchain(ar, &amsdu, ret > 0);
1476 ath10k_htt_rx_h_filter(ar, &amsdu, rx_status);
1477 ath10k_htt_rx_h_mpdu(ar, &amsdu, rx_status);
1478 ath10k_htt_rx_h_deliver(ar, &amsdu, rx_status);
1481 tasklet_schedule(&htt->rx_replenish_task);
1484 static void ath10k_htt_rx_frag_handler(struct ath10k_htt *htt,
1485 struct htt_rx_fragment_indication *frag)
1487 struct ath10k *ar = htt->ar;
1488 struct ieee80211_rx_status *rx_status = &htt->rx_status;
1489 struct sk_buff_head amsdu;
1494 fw_desc_len = __le16_to_cpu(frag->fw_rx_desc_bytes);
1495 fw_desc = (u8 *)frag->fw_msdu_rx_desc;
1497 __skb_queue_head_init(&amsdu);
1499 spin_lock_bh(&htt->rx_ring.lock);
1500 ret = ath10k_htt_rx_amsdu_pop(htt, &fw_desc, &fw_desc_len,
1502 spin_unlock_bh(&htt->rx_ring.lock);
1504 tasklet_schedule(&htt->rx_replenish_task);
1506 ath10k_dbg(ar, ATH10K_DBG_HTT_DUMP, "htt rx frag ahead\n");
1509 ath10k_warn(ar, "failed to pop amsdu from httr rx ring for fragmented rx %d\n",
1511 __skb_queue_purge(&amsdu);
1515 if (skb_queue_len(&amsdu) != 1) {
1516 ath10k_warn(ar, "failed to pop frag amsdu: too many msdus\n");
1517 __skb_queue_purge(&amsdu);
1521 ath10k_htt_rx_h_ppdu(ar, &amsdu, rx_status);
1522 ath10k_htt_rx_h_filter(ar, &amsdu, rx_status);
1523 ath10k_htt_rx_h_mpdu(ar, &amsdu, rx_status);
1524 ath10k_htt_rx_h_deliver(ar, &amsdu, rx_status);
1526 if (fw_desc_len > 0) {
1527 ath10k_dbg(ar, ATH10K_DBG_HTT,
1528 "expecting more fragmented rx in one indication %d\n",
1533 static void ath10k_htt_rx_frm_tx_compl(struct ath10k *ar,
1534 struct sk_buff *skb)
1536 struct ath10k_htt *htt = &ar->htt;
1537 struct htt_resp *resp = (struct htt_resp *)skb->data;
1538 struct htt_tx_done tx_done = {};
1539 int status = MS(resp->data_tx_completion.flags, HTT_DATA_TX_STATUS);
1543 lockdep_assert_held(&htt->tx_lock);
1546 case HTT_DATA_TX_STATUS_NO_ACK:
1547 tx_done.no_ack = true;
1549 case HTT_DATA_TX_STATUS_OK:
1551 case HTT_DATA_TX_STATUS_DISCARD:
1552 case HTT_DATA_TX_STATUS_POSTPONE:
1553 case HTT_DATA_TX_STATUS_DOWNLOAD_FAIL:
1554 tx_done.discard = true;
1557 ath10k_warn(ar, "unhandled tx completion status %d\n", status);
1558 tx_done.discard = true;
1562 ath10k_dbg(ar, ATH10K_DBG_HTT, "htt tx completion num_msdus %d\n",
1563 resp->data_tx_completion.num_msdus);
1565 for (i = 0; i < resp->data_tx_completion.num_msdus; i++) {
1566 msdu_id = resp->data_tx_completion.msdus[i];
1567 tx_done.msdu_id = __le16_to_cpu(msdu_id);
1568 ath10k_txrx_tx_unref(htt, &tx_done);
1572 static void ath10k_htt_rx_addba(struct ath10k *ar, struct htt_resp *resp)
1574 struct htt_rx_addba *ev = &resp->rx_addba;
1575 struct ath10k_peer *peer;
1576 struct ath10k_vif *arvif;
1577 u16 info0, tid, peer_id;
1579 info0 = __le16_to_cpu(ev->info0);
1580 tid = MS(info0, HTT_RX_BA_INFO0_TID);
1581 peer_id = MS(info0, HTT_RX_BA_INFO0_PEER_ID);
1583 ath10k_dbg(ar, ATH10K_DBG_HTT,
1584 "htt rx addba tid %hu peer_id %hu size %hhu\n",
1585 tid, peer_id, ev->window_size);
1587 spin_lock_bh(&ar->data_lock);
1588 peer = ath10k_peer_find_by_id(ar, peer_id);
1590 ath10k_warn(ar, "received addba event for invalid peer_id: %hu\n",
1592 spin_unlock_bh(&ar->data_lock);
1596 arvif = ath10k_get_arvif(ar, peer->vdev_id);
1598 ath10k_warn(ar, "received addba event for invalid vdev_id: %u\n",
1600 spin_unlock_bh(&ar->data_lock);
1604 ath10k_dbg(ar, ATH10K_DBG_HTT,
1605 "htt rx start rx ba session sta %pM tid %hu size %hhu\n",
1606 peer->addr, tid, ev->window_size);
1608 ieee80211_start_rx_ba_session_offl(arvif->vif, peer->addr, tid);
1609 spin_unlock_bh(&ar->data_lock);
1612 static void ath10k_htt_rx_delba(struct ath10k *ar, struct htt_resp *resp)
1614 struct htt_rx_delba *ev = &resp->rx_delba;
1615 struct ath10k_peer *peer;
1616 struct ath10k_vif *arvif;
1617 u16 info0, tid, peer_id;
1619 info0 = __le16_to_cpu(ev->info0);
1620 tid = MS(info0, HTT_RX_BA_INFO0_TID);
1621 peer_id = MS(info0, HTT_RX_BA_INFO0_PEER_ID);
1623 ath10k_dbg(ar, ATH10K_DBG_HTT,
1624 "htt rx delba tid %hu peer_id %hu\n",
1627 spin_lock_bh(&ar->data_lock);
1628 peer = ath10k_peer_find_by_id(ar, peer_id);
1630 ath10k_warn(ar, "received addba event for invalid peer_id: %hu\n",
1632 spin_unlock_bh(&ar->data_lock);
1636 arvif = ath10k_get_arvif(ar, peer->vdev_id);
1638 ath10k_warn(ar, "received addba event for invalid vdev_id: %u\n",
1640 spin_unlock_bh(&ar->data_lock);
1644 ath10k_dbg(ar, ATH10K_DBG_HTT,
1645 "htt rx stop rx ba session sta %pM tid %hu\n",
1648 ieee80211_stop_rx_ba_session_offl(arvif->vif, peer->addr, tid);
1649 spin_unlock_bh(&ar->data_lock);
1652 void ath10k_htt_t2h_msg_handler(struct ath10k *ar, struct sk_buff *skb)
1654 struct ath10k_htt *htt = &ar->htt;
1655 struct htt_resp *resp = (struct htt_resp *)skb->data;
1657 /* confirm alignment */
1658 if (!IS_ALIGNED((unsigned long)skb->data, 4))
1659 ath10k_warn(ar, "unaligned htt message, expect trouble\n");
1661 ath10k_dbg(ar, ATH10K_DBG_HTT, "htt rx, msg_type: 0x%0X\n",
1662 resp->hdr.msg_type);
1663 switch (resp->hdr.msg_type) {
1664 case HTT_T2H_MSG_TYPE_VERSION_CONF: {
1665 htt->target_version_major = resp->ver_resp.major;
1666 htt->target_version_minor = resp->ver_resp.minor;
1667 complete(&htt->target_version_received);
1670 case HTT_T2H_MSG_TYPE_RX_IND:
1671 spin_lock_bh(&htt->rx_ring.lock);
1672 __skb_queue_tail(&htt->rx_compl_q, skb);
1673 spin_unlock_bh(&htt->rx_ring.lock);
1674 tasklet_schedule(&htt->txrx_compl_task);
1676 case HTT_T2H_MSG_TYPE_PEER_MAP: {
1677 struct htt_peer_map_event ev = {
1678 .vdev_id = resp->peer_map.vdev_id,
1679 .peer_id = __le16_to_cpu(resp->peer_map.peer_id),
1681 memcpy(ev.addr, resp->peer_map.addr, sizeof(ev.addr));
1682 ath10k_peer_map_event(htt, &ev);
1685 case HTT_T2H_MSG_TYPE_PEER_UNMAP: {
1686 struct htt_peer_unmap_event ev = {
1687 .peer_id = __le16_to_cpu(resp->peer_unmap.peer_id),
1689 ath10k_peer_unmap_event(htt, &ev);
1692 case HTT_T2H_MSG_TYPE_MGMT_TX_COMPLETION: {
1693 struct htt_tx_done tx_done = {};
1694 int status = __le32_to_cpu(resp->mgmt_tx_completion.status);
1697 __le32_to_cpu(resp->mgmt_tx_completion.desc_id);
1700 case HTT_MGMT_TX_STATUS_OK:
1702 case HTT_MGMT_TX_STATUS_RETRY:
1703 tx_done.no_ack = true;
1705 case HTT_MGMT_TX_STATUS_DROP:
1706 tx_done.discard = true;
1710 spin_lock_bh(&htt->tx_lock);
1711 ath10k_txrx_tx_unref(htt, &tx_done);
1712 spin_unlock_bh(&htt->tx_lock);
1715 case HTT_T2H_MSG_TYPE_TX_COMPL_IND:
1716 spin_lock_bh(&htt->tx_lock);
1717 __skb_queue_tail(&htt->tx_compl_q, skb);
1718 spin_unlock_bh(&htt->tx_lock);
1719 tasklet_schedule(&htt->txrx_compl_task);
1721 case HTT_T2H_MSG_TYPE_SEC_IND: {
1722 struct ath10k *ar = htt->ar;
1723 struct htt_security_indication *ev = &resp->security_indication;
1725 ath10k_dbg(ar, ATH10K_DBG_HTT,
1726 "sec ind peer_id %d unicast %d type %d\n",
1727 __le16_to_cpu(ev->peer_id),
1728 !!(ev->flags & HTT_SECURITY_IS_UNICAST),
1729 MS(ev->flags, HTT_SECURITY_TYPE));
1730 complete(&ar->install_key_done);
1733 case HTT_T2H_MSG_TYPE_RX_FRAG_IND: {
1734 ath10k_dbg_dump(ar, ATH10K_DBG_HTT_DUMP, NULL, "htt event: ",
1735 skb->data, skb->len);
1736 ath10k_htt_rx_frag_handler(htt, &resp->rx_frag_ind);
1739 case HTT_T2H_MSG_TYPE_TEST:
1742 case HTT_T2H_MSG_TYPE_STATS_CONF:
1743 trace_ath10k_htt_stats(ar, skb->data, skb->len);
1745 case HTT_T2H_MSG_TYPE_TX_INSPECT_IND:
1746 /* Firmware can return tx frames if it's unable to fully
1747 * process them and suspects host may be able to fix it. ath10k
1748 * sends all tx frames as already inspected so this shouldn't
1749 * happen unless fw has a bug.
1751 ath10k_warn(ar, "received an unexpected htt tx inspect event\n");
1753 case HTT_T2H_MSG_TYPE_RX_ADDBA:
1754 ath10k_htt_rx_addba(ar, resp);
1756 case HTT_T2H_MSG_TYPE_RX_DELBA:
1757 ath10k_htt_rx_delba(ar, resp);
1759 case HTT_T2H_MSG_TYPE_PKTLOG: {
1760 struct ath10k_pktlog_hdr *hdr =
1761 (struct ath10k_pktlog_hdr *)resp->pktlog_msg.payload;
1763 trace_ath10k_htt_pktlog(ar, resp->pktlog_msg.payload,
1765 __le16_to_cpu(hdr->size));
1768 case HTT_T2H_MSG_TYPE_RX_FLUSH: {
1769 /* Ignore this event because mac80211 takes care of Rx
1770 * aggregation reordering.
1775 ath10k_warn(ar, "htt event (%d) not handled\n",
1776 resp->hdr.msg_type);
1777 ath10k_dbg_dump(ar, ATH10K_DBG_HTT_DUMP, NULL, "htt event: ",
1778 skb->data, skb->len);
1782 /* Free the indication buffer */
1783 dev_kfree_skb_any(skb);
1786 static void ath10k_htt_txrx_compl_task(unsigned long ptr)
1788 struct ath10k_htt *htt = (struct ath10k_htt *)ptr;
1789 struct htt_resp *resp;
1790 struct sk_buff *skb;
1792 spin_lock_bh(&htt->tx_lock);
1793 while ((skb = __skb_dequeue(&htt->tx_compl_q))) {
1794 ath10k_htt_rx_frm_tx_compl(htt->ar, skb);
1795 dev_kfree_skb_any(skb);
1797 spin_unlock_bh(&htt->tx_lock);
1799 spin_lock_bh(&htt->rx_ring.lock);
1800 while ((skb = __skb_dequeue(&htt->rx_compl_q))) {
1801 resp = (struct htt_resp *)skb->data;
1802 ath10k_htt_rx_handler(htt, &resp->rx_ind);
1803 dev_kfree_skb_any(skb);
1805 spin_unlock_bh(&htt->rx_ring.lock);