ath10k: decouple HTT TX completions
[cascardo/linux.git] / drivers / net / wireless / ath / ath10k / htt_rx.c
1 /*
2  * Copyright (c) 2005-2011 Atheros Communications Inc.
3  * Copyright (c) 2011-2013 Qualcomm Atheros, Inc.
4  *
5  * Permission to use, copy, modify, and/or distribute this software for any
6  * purpose with or without fee is hereby granted, provided that the above
7  * copyright notice and this permission notice appear in all copies.
8  *
9  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
10  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
11  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
12  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
13  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
14  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
15  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
16  */
17
18 #include "core.h"
19 #include "htc.h"
20 #include "htt.h"
21 #include "txrx.h"
22 #include "debug.h"
23 #include "trace.h"
24
25 #include <linux/log2.h>
26
27 /* slightly larger than one large A-MPDU */
28 #define HTT_RX_RING_SIZE_MIN 128
29
30 /* roughly 20 ms @ 1 Gbps of 1500B MSDUs */
31 #define HTT_RX_RING_SIZE_MAX 2048
32
33 #define HTT_RX_AVG_FRM_BYTES 1000
34
35 /* ms, very conservative */
36 #define HTT_RX_HOST_LATENCY_MAX_MS 20
37
38 /* ms, conservative */
39 #define HTT_RX_HOST_LATENCY_WORST_LIKELY_MS 10
40
41 /* when under memory pressure rx ring refill may fail and needs a retry */
42 #define HTT_RX_RING_REFILL_RETRY_MS 50
43
44 static int ath10k_htt_rx_ring_size(struct ath10k_htt *htt)
45 {
46         int size;
47
48         /*
49          * It is expected that the host CPU will typically be able to
50          * service the rx indication from one A-MPDU before the rx
51          * indication from the subsequent A-MPDU happens, roughly 1-2 ms
52          * later. However, the rx ring should be sized very conservatively,
53          * to accomodate the worst reasonable delay before the host CPU
54          * services a rx indication interrupt.
55          *
56          * The rx ring need not be kept full of empty buffers. In theory,
57          * the htt host SW can dynamically track the low-water mark in the
58          * rx ring, and dynamically adjust the level to which the rx ring
59          * is filled with empty buffers, to dynamically meet the desired
60          * low-water mark.
61          *
62          * In contrast, it's difficult to resize the rx ring itself, once
63          * it's in use. Thus, the ring itself should be sized very
64          * conservatively, while the degree to which the ring is filled
65          * with empty buffers should be sized moderately conservatively.
66          */
67
68         /* 1e6 bps/mbps / 1e3 ms per sec = 1000 */
69         size =
70             htt->max_throughput_mbps +
71             1000  /
72             (8 * HTT_RX_AVG_FRM_BYTES) * HTT_RX_HOST_LATENCY_MAX_MS;
73
74         if (size < HTT_RX_RING_SIZE_MIN)
75                 size = HTT_RX_RING_SIZE_MIN;
76
77         if (size > HTT_RX_RING_SIZE_MAX)
78                 size = HTT_RX_RING_SIZE_MAX;
79
80         size = roundup_pow_of_two(size);
81
82         return size;
83 }
84
85 static int ath10k_htt_rx_ring_fill_level(struct ath10k_htt *htt)
86 {
87         int size;
88
89         /* 1e6 bps/mbps / 1e3 ms per sec = 1000 */
90         size =
91             htt->max_throughput_mbps *
92             1000  /
93             (8 * HTT_RX_AVG_FRM_BYTES) * HTT_RX_HOST_LATENCY_WORST_LIKELY_MS;
94
95         /*
96          * Make sure the fill level is at least 1 less than the ring size.
97          * Leaving 1 element empty allows the SW to easily distinguish
98          * between a full ring vs. an empty ring.
99          */
100         if (size >= htt->rx_ring.size)
101                 size = htt->rx_ring.size - 1;
102
103         return size;
104 }
105
106 static void ath10k_htt_rx_ring_free(struct ath10k_htt *htt)
107 {
108         struct sk_buff *skb;
109         struct ath10k_skb_cb *cb;
110         int i;
111
112         for (i = 0; i < htt->rx_ring.fill_cnt; i++) {
113                 skb = htt->rx_ring.netbufs_ring[i];
114                 cb = ATH10K_SKB_CB(skb);
115                 dma_unmap_single(htt->ar->dev, cb->paddr,
116                                  skb->len + skb_tailroom(skb),
117                                  DMA_FROM_DEVICE);
118                 dev_kfree_skb_any(skb);
119         }
120
121         htt->rx_ring.fill_cnt = 0;
122 }
123
124 static int __ath10k_htt_rx_ring_fill_n(struct ath10k_htt *htt, int num)
125 {
126         struct htt_rx_desc *rx_desc;
127         struct sk_buff *skb;
128         dma_addr_t paddr;
129         int ret = 0, idx;
130
131         idx = __le32_to_cpu(*(htt->rx_ring.alloc_idx.vaddr));
132         while (num > 0) {
133                 skb = dev_alloc_skb(HTT_RX_BUF_SIZE + HTT_RX_DESC_ALIGN);
134                 if (!skb) {
135                         ret = -ENOMEM;
136                         goto fail;
137                 }
138
139                 if (!IS_ALIGNED((unsigned long)skb->data, HTT_RX_DESC_ALIGN))
140                         skb_pull(skb,
141                                  PTR_ALIGN(skb->data, HTT_RX_DESC_ALIGN) -
142                                  skb->data);
143
144                 /* Clear rx_desc attention word before posting to Rx ring */
145                 rx_desc = (struct htt_rx_desc *)skb->data;
146                 rx_desc->attention.flags = __cpu_to_le32(0);
147
148                 paddr = dma_map_single(htt->ar->dev, skb->data,
149                                        skb->len + skb_tailroom(skb),
150                                        DMA_FROM_DEVICE);
151
152                 if (unlikely(dma_mapping_error(htt->ar->dev, paddr))) {
153                         dev_kfree_skb_any(skb);
154                         ret = -ENOMEM;
155                         goto fail;
156                 }
157
158                 ATH10K_SKB_CB(skb)->paddr = paddr;
159                 htt->rx_ring.netbufs_ring[idx] = skb;
160                 htt->rx_ring.paddrs_ring[idx] = __cpu_to_le32(paddr);
161                 htt->rx_ring.fill_cnt++;
162
163                 num--;
164                 idx++;
165                 idx &= htt->rx_ring.size_mask;
166         }
167
168 fail:
169         *(htt->rx_ring.alloc_idx.vaddr) = __cpu_to_le32(idx);
170         return ret;
171 }
172
173 static int ath10k_htt_rx_ring_fill_n(struct ath10k_htt *htt, int num)
174 {
175         lockdep_assert_held(&htt->rx_ring.lock);
176         return __ath10k_htt_rx_ring_fill_n(htt, num);
177 }
178
179 static void ath10k_htt_rx_msdu_buff_replenish(struct ath10k_htt *htt)
180 {
181         int ret, num_to_fill;
182
183         spin_lock_bh(&htt->rx_ring.lock);
184         num_to_fill = htt->rx_ring.fill_level - htt->rx_ring.fill_cnt;
185         ret = ath10k_htt_rx_ring_fill_n(htt, num_to_fill);
186         if (ret == -ENOMEM) {
187                 /*
188                  * Failed to fill it to the desired level -
189                  * we'll start a timer and try again next time.
190                  * As long as enough buffers are left in the ring for
191                  * another A-MPDU rx, no special recovery is needed.
192                  */
193                 mod_timer(&htt->rx_ring.refill_retry_timer, jiffies +
194                           msecs_to_jiffies(HTT_RX_RING_REFILL_RETRY_MS));
195         }
196         spin_unlock_bh(&htt->rx_ring.lock);
197 }
198
199 static void ath10k_htt_rx_ring_refill_retry(unsigned long arg)
200 {
201         struct ath10k_htt *htt = (struct ath10k_htt *)arg;
202         ath10k_htt_rx_msdu_buff_replenish(htt);
203 }
204
205 static unsigned ath10k_htt_rx_ring_elems(struct ath10k_htt *htt)
206 {
207         return (__le32_to_cpu(*htt->rx_ring.alloc_idx.vaddr) -
208                 htt->rx_ring.sw_rd_idx.msdu_payld) & htt->rx_ring.size_mask;
209 }
210
211 void ath10k_htt_rx_detach(struct ath10k_htt *htt)
212 {
213         int sw_rd_idx = htt->rx_ring.sw_rd_idx.msdu_payld;
214
215         del_timer_sync(&htt->rx_ring.refill_retry_timer);
216
217         while (sw_rd_idx != __le32_to_cpu(*(htt->rx_ring.alloc_idx.vaddr))) {
218                 struct sk_buff *skb =
219                                 htt->rx_ring.netbufs_ring[sw_rd_idx];
220                 struct ath10k_skb_cb *cb = ATH10K_SKB_CB(skb);
221
222                 dma_unmap_single(htt->ar->dev, cb->paddr,
223                                  skb->len + skb_tailroom(skb),
224                                  DMA_FROM_DEVICE);
225                 dev_kfree_skb_any(htt->rx_ring.netbufs_ring[sw_rd_idx]);
226                 sw_rd_idx++;
227                 sw_rd_idx &= htt->rx_ring.size_mask;
228         }
229
230         dma_free_coherent(htt->ar->dev,
231                           (htt->rx_ring.size *
232                            sizeof(htt->rx_ring.paddrs_ring)),
233                           htt->rx_ring.paddrs_ring,
234                           htt->rx_ring.base_paddr);
235
236         dma_free_coherent(htt->ar->dev,
237                           sizeof(*htt->rx_ring.alloc_idx.vaddr),
238                           htt->rx_ring.alloc_idx.vaddr,
239                           htt->rx_ring.alloc_idx.paddr);
240
241         kfree(htt->rx_ring.netbufs_ring);
242 }
243
244 static inline struct sk_buff *ath10k_htt_rx_netbuf_pop(struct ath10k_htt *htt)
245 {
246         int idx;
247         struct sk_buff *msdu;
248
249         spin_lock_bh(&htt->rx_ring.lock);
250
251         if (ath10k_htt_rx_ring_elems(htt) == 0)
252                 ath10k_warn("htt rx ring is empty!\n");
253
254         idx = htt->rx_ring.sw_rd_idx.msdu_payld;
255         msdu = htt->rx_ring.netbufs_ring[idx];
256
257         idx++;
258         idx &= htt->rx_ring.size_mask;
259         htt->rx_ring.sw_rd_idx.msdu_payld = idx;
260         htt->rx_ring.fill_cnt--;
261
262         spin_unlock_bh(&htt->rx_ring.lock);
263         return msdu;
264 }
265
266 static void ath10k_htt_rx_free_msdu_chain(struct sk_buff *skb)
267 {
268         struct sk_buff *next;
269
270         while (skb) {
271                 next = skb->next;
272                 dev_kfree_skb_any(skb);
273                 skb = next;
274         }
275 }
276
277 static int ath10k_htt_rx_amsdu_pop(struct ath10k_htt *htt,
278                                    u8 **fw_desc, int *fw_desc_len,
279                                    struct sk_buff **head_msdu,
280                                    struct sk_buff **tail_msdu)
281 {
282         int msdu_len, msdu_chaining = 0;
283         struct sk_buff *msdu;
284         struct htt_rx_desc *rx_desc;
285
286         if (ath10k_htt_rx_ring_elems(htt) == 0)
287                 ath10k_warn("htt rx ring is empty!\n");
288
289         if (htt->rx_confused) {
290                 ath10k_warn("htt is confused. refusing rx\n");
291                 return 0;
292         }
293
294         msdu = *head_msdu = ath10k_htt_rx_netbuf_pop(htt);
295         while (msdu) {
296                 int last_msdu, msdu_len_invalid, msdu_chained;
297
298                 dma_unmap_single(htt->ar->dev,
299                                  ATH10K_SKB_CB(msdu)->paddr,
300                                  msdu->len + skb_tailroom(msdu),
301                                  DMA_FROM_DEVICE);
302
303                 ath10k_dbg_dump(ATH10K_DBG_HTT_DUMP, NULL, "htt rx: ",
304                                 msdu->data, msdu->len + skb_tailroom(msdu));
305
306                 rx_desc = (struct htt_rx_desc *)msdu->data;
307
308                 /* FIXME: we must report msdu payload since this is what caller
309                  *        expects now */
310                 skb_put(msdu, offsetof(struct htt_rx_desc, msdu_payload));
311                 skb_pull(msdu, offsetof(struct htt_rx_desc, msdu_payload));
312
313                 /*
314                  * Sanity check - confirm the HW is finished filling in the
315                  * rx data.
316                  * If the HW and SW are working correctly, then it's guaranteed
317                  * that the HW's MAC DMA is done before this point in the SW.
318                  * To prevent the case that we handle a stale Rx descriptor,
319                  * just assert for now until we have a way to recover.
320                  */
321                 if (!(__le32_to_cpu(rx_desc->attention.flags)
322                                 & RX_ATTENTION_FLAGS_MSDU_DONE)) {
323                         ath10k_htt_rx_free_msdu_chain(*head_msdu);
324                         *head_msdu = NULL;
325                         msdu = NULL;
326                         ath10k_err("htt rx stopped. cannot recover\n");
327                         htt->rx_confused = true;
328                         break;
329                 }
330
331                 /*
332                  * Copy the FW rx descriptor for this MSDU from the rx
333                  * indication message into the MSDU's netbuf. HL uses the
334                  * same rx indication message definition as LL, and simply
335                  * appends new info (fields from the HW rx desc, and the
336                  * MSDU payload itself). So, the offset into the rx
337                  * indication message only has to account for the standard
338                  * offset of the per-MSDU FW rx desc info within the
339                  * message, and how many bytes of the per-MSDU FW rx desc
340                  * info have already been consumed. (And the endianness of
341                  * the host, since for a big-endian host, the rx ind
342                  * message contents, including the per-MSDU rx desc bytes,
343                  * were byteswapped during upload.)
344                  */
345                 if (*fw_desc_len > 0) {
346                         rx_desc->fw_desc.info0 = **fw_desc;
347                         /*
348                          * The target is expected to only provide the basic
349                          * per-MSDU rx descriptors. Just to be sure, verify
350                          * that the target has not attached extension data
351                          * (e.g. LRO flow ID).
352                          */
353
354                         /* or more, if there's extension data */
355                         (*fw_desc)++;
356                         (*fw_desc_len)--;
357                 } else {
358                         /*
359                          * When an oversized AMSDU happened, FW will lost
360                          * some of MSDU status - in this case, the FW
361                          * descriptors provided will be less than the
362                          * actual MSDUs inside this MPDU. Mark the FW
363                          * descriptors so that it will still deliver to
364                          * upper stack, if no CRC error for this MPDU.
365                          *
366                          * FIX THIS - the FW descriptors are actually for
367                          * MSDUs in the end of this A-MSDU instead of the
368                          * beginning.
369                          */
370                         rx_desc->fw_desc.info0 = 0;
371                 }
372
373                 msdu_len_invalid = !!(__le32_to_cpu(rx_desc->attention.flags)
374                                         & (RX_ATTENTION_FLAGS_MPDU_LENGTH_ERR |
375                                            RX_ATTENTION_FLAGS_MSDU_LENGTH_ERR));
376                 msdu_len = MS(__le32_to_cpu(rx_desc->msdu_start.info0),
377                               RX_MSDU_START_INFO0_MSDU_LENGTH);
378                 msdu_chained = rx_desc->frag_info.ring2_more_count;
379
380                 if (msdu_len_invalid)
381                         msdu_len = 0;
382
383                 skb_trim(msdu, 0);
384                 skb_put(msdu, min(msdu_len, HTT_RX_MSDU_SIZE));
385                 msdu_len -= msdu->len;
386
387                 /* FIXME: Do chained buffers include htt_rx_desc or not? */
388                 while (msdu_chained--) {
389                         struct sk_buff *next = ath10k_htt_rx_netbuf_pop(htt);
390
391                         dma_unmap_single(htt->ar->dev,
392                                          ATH10K_SKB_CB(next)->paddr,
393                                          next->len + skb_tailroom(next),
394                                          DMA_FROM_DEVICE);
395
396                         ath10k_dbg_dump(ATH10K_DBG_HTT_DUMP, NULL, "htt rx: ",
397                                         next->data,
398                                         next->len + skb_tailroom(next));
399
400                         skb_trim(next, 0);
401                         skb_put(next, min(msdu_len, HTT_RX_BUF_SIZE));
402                         msdu_len -= next->len;
403
404                         msdu->next = next;
405                         msdu = next;
406                         msdu_chaining = 1;
407                 }
408
409                 if (msdu_len > 0) {
410                         /* This may suggest FW bug? */
411                         ath10k_warn("htt rx msdu len not consumed (%d)\n",
412                                     msdu_len);
413                 }
414
415                 last_msdu = __le32_to_cpu(rx_desc->msdu_end.info0) &
416                                 RX_MSDU_END_INFO0_LAST_MSDU;
417
418                 if (last_msdu) {
419                         msdu->next = NULL;
420                         break;
421                 } else {
422                         struct sk_buff *next = ath10k_htt_rx_netbuf_pop(htt);
423                         msdu->next = next;
424                         msdu = next;
425                 }
426         }
427         *tail_msdu = msdu;
428
429         /*
430          * Don't refill the ring yet.
431          *
432          * First, the elements popped here are still in use - it is not
433          * safe to overwrite them until the matching call to
434          * mpdu_desc_list_next. Second, for efficiency it is preferable to
435          * refill the rx ring with 1 PPDU's worth of rx buffers (something
436          * like 32 x 3 buffers), rather than one MPDU's worth of rx buffers
437          * (something like 3 buffers). Consequently, we'll rely on the txrx
438          * SW to tell us when it is done pulling all the PPDU's rx buffers
439          * out of the rx ring, and then refill it just once.
440          */
441
442         return msdu_chaining;
443 }
444
445 int ath10k_htt_rx_attach(struct ath10k_htt *htt)
446 {
447         dma_addr_t paddr;
448         void *vaddr;
449         struct timer_list *timer = &htt->rx_ring.refill_retry_timer;
450
451         htt->rx_ring.size = ath10k_htt_rx_ring_size(htt);
452         if (!is_power_of_2(htt->rx_ring.size)) {
453                 ath10k_warn("htt rx ring size is not power of 2\n");
454                 return -EINVAL;
455         }
456
457         htt->rx_ring.size_mask = htt->rx_ring.size - 1;
458
459         /*
460          * Set the initial value for the level to which the rx ring
461          * should be filled, based on the max throughput and the
462          * worst likely latency for the host to fill the rx ring
463          * with new buffers. In theory, this fill level can be
464          * dynamically adjusted from the initial value set here, to
465          * reflect the actual host latency rather than a
466          * conservative assumption about the host latency.
467          */
468         htt->rx_ring.fill_level = ath10k_htt_rx_ring_fill_level(htt);
469
470         htt->rx_ring.netbufs_ring =
471                 kmalloc(htt->rx_ring.size * sizeof(struct sk_buff *),
472                         GFP_KERNEL);
473         if (!htt->rx_ring.netbufs_ring)
474                 goto err_netbuf;
475
476         vaddr = dma_alloc_coherent(htt->ar->dev,
477                    (htt->rx_ring.size * sizeof(htt->rx_ring.paddrs_ring)),
478                    &paddr, GFP_DMA);
479         if (!vaddr)
480                 goto err_dma_ring;
481
482         htt->rx_ring.paddrs_ring = vaddr;
483         htt->rx_ring.base_paddr = paddr;
484
485         vaddr = dma_alloc_coherent(htt->ar->dev,
486                                    sizeof(*htt->rx_ring.alloc_idx.vaddr),
487                                    &paddr, GFP_DMA);
488         if (!vaddr)
489                 goto err_dma_idx;
490
491         htt->rx_ring.alloc_idx.vaddr = vaddr;
492         htt->rx_ring.alloc_idx.paddr = paddr;
493         htt->rx_ring.sw_rd_idx.msdu_payld = 0;
494         *htt->rx_ring.alloc_idx.vaddr = 0;
495
496         /* Initialize the Rx refill retry timer */
497         setup_timer(timer, ath10k_htt_rx_ring_refill_retry, (unsigned long)htt);
498
499         spin_lock_init(&htt->rx_ring.lock);
500
501         htt->rx_ring.fill_cnt = 0;
502         if (__ath10k_htt_rx_ring_fill_n(htt, htt->rx_ring.fill_level))
503                 goto err_fill_ring;
504
505         ath10k_dbg(ATH10K_DBG_BOOT, "htt rx ring size %d fill_level %d\n",
506                    htt->rx_ring.size, htt->rx_ring.fill_level);
507         return 0;
508
509 err_fill_ring:
510         ath10k_htt_rx_ring_free(htt);
511         dma_free_coherent(htt->ar->dev,
512                           sizeof(*htt->rx_ring.alloc_idx.vaddr),
513                           htt->rx_ring.alloc_idx.vaddr,
514                           htt->rx_ring.alloc_idx.paddr);
515 err_dma_idx:
516         dma_free_coherent(htt->ar->dev,
517                           (htt->rx_ring.size *
518                            sizeof(htt->rx_ring.paddrs_ring)),
519                           htt->rx_ring.paddrs_ring,
520                           htt->rx_ring.base_paddr);
521 err_dma_ring:
522         kfree(htt->rx_ring.netbufs_ring);
523 err_netbuf:
524         return -ENOMEM;
525 }
526
527 static int ath10k_htt_rx_crypto_param_len(enum htt_rx_mpdu_encrypt_type type)
528 {
529         switch (type) {
530         case HTT_RX_MPDU_ENCRYPT_WEP40:
531         case HTT_RX_MPDU_ENCRYPT_WEP104:
532                 return 4;
533         case HTT_RX_MPDU_ENCRYPT_TKIP_WITHOUT_MIC:
534         case HTT_RX_MPDU_ENCRYPT_WEP128: /* not tested */
535         case HTT_RX_MPDU_ENCRYPT_TKIP_WPA:
536         case HTT_RX_MPDU_ENCRYPT_WAPI: /* not tested */
537         case HTT_RX_MPDU_ENCRYPT_AES_CCM_WPA2:
538                 return 8;
539         case HTT_RX_MPDU_ENCRYPT_NONE:
540                 return 0;
541         }
542
543         ath10k_warn("unknown encryption type %d\n", type);
544         return 0;
545 }
546
547 static int ath10k_htt_rx_crypto_tail_len(enum htt_rx_mpdu_encrypt_type type)
548 {
549         switch (type) {
550         case HTT_RX_MPDU_ENCRYPT_NONE:
551         case HTT_RX_MPDU_ENCRYPT_WEP40:
552         case HTT_RX_MPDU_ENCRYPT_WEP104:
553         case HTT_RX_MPDU_ENCRYPT_WEP128:
554         case HTT_RX_MPDU_ENCRYPT_WAPI:
555                 return 0;
556         case HTT_RX_MPDU_ENCRYPT_TKIP_WITHOUT_MIC:
557         case HTT_RX_MPDU_ENCRYPT_TKIP_WPA:
558                 return 4;
559         case HTT_RX_MPDU_ENCRYPT_AES_CCM_WPA2:
560                 return 8;
561         }
562
563         ath10k_warn("unknown encryption type %d\n", type);
564         return 0;
565 }
566
567 /* Applies for first msdu in chain, before altering it. */
568 static struct ieee80211_hdr *ath10k_htt_rx_skb_get_hdr(struct sk_buff *skb)
569 {
570         struct htt_rx_desc *rxd;
571         enum rx_msdu_decap_format fmt;
572
573         rxd = (void *)skb->data - sizeof(*rxd);
574         fmt = MS(__le32_to_cpu(rxd->msdu_start.info1),
575                         RX_MSDU_START_INFO1_DECAP_FORMAT);
576
577         if (fmt == RX_MSDU_DECAP_RAW)
578                 return (void *)skb->data;
579         else
580                 return (void *)skb->data - RX_HTT_HDR_STATUS_LEN;
581 }
582
583 /* This function only applies for first msdu in an msdu chain */
584 static bool ath10k_htt_rx_hdr_is_amsdu(struct ieee80211_hdr *hdr)
585 {
586         if (ieee80211_is_data_qos(hdr->frame_control)) {
587                 u8 *qc = ieee80211_get_qos_ctl(hdr);
588                 if (qc[0] & 0x80)
589                         return true;
590         }
591         return false;
592 }
593
594 static int ath10k_htt_rx_amsdu(struct ath10k_htt *htt,
595                         struct htt_rx_info *info)
596 {
597         struct htt_rx_desc *rxd;
598         struct sk_buff *amsdu;
599         struct sk_buff *first;
600         struct ieee80211_hdr *hdr;
601         struct sk_buff *skb = info->skb;
602         enum rx_msdu_decap_format fmt;
603         enum htt_rx_mpdu_encrypt_type enctype;
604         unsigned int hdr_len;
605         int crypto_len;
606
607         rxd = (void *)skb->data - sizeof(*rxd);
608         fmt = MS(__le32_to_cpu(rxd->msdu_start.info1),
609                         RX_MSDU_START_INFO1_DECAP_FORMAT);
610         enctype = MS(__le32_to_cpu(rxd->mpdu_start.info0),
611                         RX_MPDU_START_INFO0_ENCRYPT_TYPE);
612
613         /* FIXME: No idea what assumptions are safe here. Need logs */
614         if ((fmt == RX_MSDU_DECAP_RAW && skb->next)) {
615                 ath10k_htt_rx_free_msdu_chain(skb->next);
616                 skb->next = NULL;
617                 return -ENOTSUPP;
618         }
619
620         /* A-MSDU max is a little less than 8K */
621         amsdu = dev_alloc_skb(8*1024);
622         if (!amsdu) {
623                 ath10k_warn("A-MSDU allocation failed\n");
624                 ath10k_htt_rx_free_msdu_chain(skb->next);
625                 skb->next = NULL;
626                 return -ENOMEM;
627         }
628
629         if (fmt >= RX_MSDU_DECAP_NATIVE_WIFI) {
630                 int hdrlen;
631
632                 hdr = (void *)rxd->rx_hdr_status;
633                 hdrlen = ieee80211_hdrlen(hdr->frame_control);
634                 memcpy(skb_put(amsdu, hdrlen), hdr, hdrlen);
635         }
636
637         first = skb;
638         while (skb) {
639                 void *decap_hdr;
640                 int decap_len = 0;
641
642                 rxd = (void *)skb->data - sizeof(*rxd);
643                 fmt = MS(__le32_to_cpu(rxd->msdu_start.info1),
644                                 RX_MSDU_START_INFO1_DECAP_FORMAT);
645                 decap_hdr = (void *)rxd->rx_hdr_status;
646
647                 if (skb == first) {
648                         /* We receive linked A-MSDU subframe skbuffs. The
649                          * first one contains the original 802.11 header (and
650                          * possible crypto param) in the RX descriptor. The
651                          * A-MSDU subframe header follows that. Each part is
652                          * aligned to 4 byte boundary. */
653
654                         hdr = (void *)amsdu->data;
655                         hdr_len = ieee80211_hdrlen(hdr->frame_control);
656                         crypto_len = ath10k_htt_rx_crypto_param_len(enctype);
657
658                         decap_hdr += roundup(hdr_len, 4);
659                         decap_hdr += roundup(crypto_len, 4);
660                 }
661
662                 /* When fmt == RX_MSDU_DECAP_8023_SNAP_LLC:
663                  *
664                  * SNAP 802.3 consists of:
665                  * [dst:6][src:6][len:2][dsap:1][ssap:1][ctl:1][snap:5]
666                  * [data][fcs:4].
667                  *
668                  * Since this overlaps with A-MSDU header (da, sa, len)
669                  * there's nothing extra to do. */
670
671                 if (fmt == RX_MSDU_DECAP_ETHERNET2_DIX) {
672                         /* Ethernet2 decap inserts ethernet header in place of
673                          * A-MSDU subframe header. */
674                         skb_pull(skb, 6 + 6 + 2);
675
676                         /* A-MSDU subframe header length */
677                         decap_len += 6 + 6 + 2;
678
679                         /* Ethernet2 decap also strips the LLC/SNAP so we need
680                          * to re-insert it. The LLC/SNAP follows A-MSDU
681                          * subframe header. */
682                         /* FIXME: Not all LLCs are 8 bytes long */
683                         decap_len += 8;
684
685                         memcpy(skb_put(amsdu, decap_len), decap_hdr, decap_len);
686                 }
687
688                 if (fmt == RX_MSDU_DECAP_NATIVE_WIFI) {
689                         /* Native Wifi decap inserts regular 802.11 header
690                          * in place of A-MSDU subframe header. */
691                         hdr = (struct ieee80211_hdr *)skb->data;
692                         skb_pull(skb, ieee80211_hdrlen(hdr->frame_control));
693
694                         /* A-MSDU subframe header length */
695                         decap_len += 6 + 6 + 2;
696
697                         memcpy(skb_put(amsdu, decap_len), decap_hdr, decap_len);
698                 }
699
700                 if (fmt == RX_MSDU_DECAP_RAW)
701                         skb_trim(skb, skb->len - 4); /* remove FCS */
702
703                 memcpy(skb_put(amsdu, skb->len), skb->data, skb->len);
704
705                 /* A-MSDU subframes are padded to 4bytes
706                  * but relative to first subframe, not the whole MPDU */
707                 if (skb->next && ((decap_len + skb->len) & 3)) {
708                         int padlen = 4 - ((decap_len + skb->len) & 3);
709                         memset(skb_put(amsdu, padlen), 0, padlen);
710                 }
711
712                 skb = skb->next;
713         }
714
715         info->skb = amsdu;
716         info->encrypt_type = enctype;
717
718         ath10k_htt_rx_free_msdu_chain(first);
719
720         return 0;
721 }
722
723 static int ath10k_htt_rx_msdu(struct ath10k_htt *htt, struct htt_rx_info *info)
724 {
725         struct sk_buff *skb = info->skb;
726         struct htt_rx_desc *rxd;
727         struct ieee80211_hdr *hdr;
728         enum rx_msdu_decap_format fmt;
729         enum htt_rx_mpdu_encrypt_type enctype;
730
731         /* This shouldn't happen. If it does than it may be a FW bug. */
732         if (skb->next) {
733                 ath10k_warn("received chained non A-MSDU frame\n");
734                 ath10k_htt_rx_free_msdu_chain(skb->next);
735                 skb->next = NULL;
736         }
737
738         rxd = (void *)skb->data - sizeof(*rxd);
739         fmt = MS(__le32_to_cpu(rxd->msdu_start.info1),
740                         RX_MSDU_START_INFO1_DECAP_FORMAT);
741         enctype = MS(__le32_to_cpu(rxd->mpdu_start.info0),
742                         RX_MPDU_START_INFO0_ENCRYPT_TYPE);
743         hdr = (void *)skb->data - RX_HTT_HDR_STATUS_LEN;
744
745         switch (fmt) {
746         case RX_MSDU_DECAP_RAW:
747                 /* remove trailing FCS */
748                 skb_trim(skb, skb->len - 4);
749                 break;
750         case RX_MSDU_DECAP_NATIVE_WIFI:
751                 /* nothing to do here */
752                 break;
753         case RX_MSDU_DECAP_ETHERNET2_DIX:
754                 /* macaddr[6] + macaddr[6] + ethertype[2] */
755                 skb_pull(skb, 6 + 6 + 2);
756                 break;
757         case RX_MSDU_DECAP_8023_SNAP_LLC:
758                 /* macaddr[6] + macaddr[6] + len[2] */
759                 /* we don't need this for non-A-MSDU */
760                 skb_pull(skb, 6 + 6 + 2);
761                 break;
762         }
763
764         if (fmt == RX_MSDU_DECAP_ETHERNET2_DIX) {
765                 void *llc;
766                 int llclen;
767
768                 llclen = 8;
769                 llc  = hdr;
770                 llc += roundup(ieee80211_hdrlen(hdr->frame_control), 4);
771                 llc += roundup(ath10k_htt_rx_crypto_param_len(enctype), 4);
772
773                 skb_push(skb, llclen);
774                 memcpy(skb->data, llc, llclen);
775         }
776
777         if (fmt >= RX_MSDU_DECAP_ETHERNET2_DIX) {
778                 int len = ieee80211_hdrlen(hdr->frame_control);
779                 skb_push(skb, len);
780                 memcpy(skb->data, hdr, len);
781         }
782
783         info->skb = skb;
784         info->encrypt_type = enctype;
785         return 0;
786 }
787
788 static bool ath10k_htt_rx_has_decrypt_err(struct sk_buff *skb)
789 {
790         struct htt_rx_desc *rxd;
791         u32 flags;
792
793         rxd = (void *)skb->data - sizeof(*rxd);
794         flags = __le32_to_cpu(rxd->attention.flags);
795
796         if (flags & RX_ATTENTION_FLAGS_DECRYPT_ERR)
797                 return true;
798
799         return false;
800 }
801
802 static bool ath10k_htt_rx_has_fcs_err(struct sk_buff *skb)
803 {
804         struct htt_rx_desc *rxd;
805         u32 flags;
806
807         rxd = (void *)skb->data - sizeof(*rxd);
808         flags = __le32_to_cpu(rxd->attention.flags);
809
810         if (flags & RX_ATTENTION_FLAGS_FCS_ERR)
811                 return true;
812
813         return false;
814 }
815
816 static int ath10k_htt_rx_get_csum_state(struct sk_buff *skb)
817 {
818         struct htt_rx_desc *rxd;
819         u32 flags, info;
820         bool is_ip4, is_ip6;
821         bool is_tcp, is_udp;
822         bool ip_csum_ok, tcpudp_csum_ok;
823
824         rxd = (void *)skb->data - sizeof(*rxd);
825         flags = __le32_to_cpu(rxd->attention.flags);
826         info = __le32_to_cpu(rxd->msdu_start.info1);
827
828         is_ip4 = !!(info & RX_MSDU_START_INFO1_IPV4_PROTO);
829         is_ip6 = !!(info & RX_MSDU_START_INFO1_IPV6_PROTO);
830         is_tcp = !!(info & RX_MSDU_START_INFO1_TCP_PROTO);
831         is_udp = !!(info & RX_MSDU_START_INFO1_UDP_PROTO);
832         ip_csum_ok = !(flags & RX_ATTENTION_FLAGS_IP_CHKSUM_FAIL);
833         tcpudp_csum_ok = !(flags & RX_ATTENTION_FLAGS_TCP_UDP_CHKSUM_FAIL);
834
835         if (!is_ip4 && !is_ip6)
836                 return CHECKSUM_NONE;
837         if (!is_tcp && !is_udp)
838                 return CHECKSUM_NONE;
839         if (!ip_csum_ok)
840                 return CHECKSUM_NONE;
841         if (!tcpudp_csum_ok)
842                 return CHECKSUM_NONE;
843
844         return CHECKSUM_UNNECESSARY;
845 }
846
847 static void ath10k_htt_rx_handler(struct ath10k_htt *htt,
848                                   struct htt_rx_indication *rx)
849 {
850         struct htt_rx_info info;
851         struct htt_rx_indication_mpdu_range *mpdu_ranges;
852         struct ieee80211_hdr *hdr;
853         int num_mpdu_ranges;
854         int fw_desc_len;
855         u8 *fw_desc;
856         int i, j;
857         int ret;
858         int ip_summed;
859
860         memset(&info, 0, sizeof(info));
861
862         fw_desc_len = __le16_to_cpu(rx->prefix.fw_rx_desc_bytes);
863         fw_desc = (u8 *)&rx->fw_desc;
864
865         num_mpdu_ranges = MS(__le32_to_cpu(rx->hdr.info1),
866                              HTT_RX_INDICATION_INFO1_NUM_MPDU_RANGES);
867         mpdu_ranges = htt_rx_ind_get_mpdu_ranges(rx);
868
869         ath10k_dbg_dump(ATH10K_DBG_HTT_DUMP, NULL, "htt rx ind: ",
870                         rx, sizeof(*rx) +
871                         (sizeof(struct htt_rx_indication_mpdu_range) *
872                                 num_mpdu_ranges));
873
874         for (i = 0; i < num_mpdu_ranges; i++) {
875                 info.status = mpdu_ranges[i].mpdu_range_status;
876
877                 for (j = 0; j < mpdu_ranges[i].mpdu_count; j++) {
878                         struct sk_buff *msdu_head, *msdu_tail;
879                         enum htt_rx_mpdu_status status;
880                         int msdu_chaining;
881
882                         msdu_head = NULL;
883                         msdu_tail = NULL;
884                         msdu_chaining = ath10k_htt_rx_amsdu_pop(htt,
885                                                          &fw_desc,
886                                                          &fw_desc_len,
887                                                          &msdu_head,
888                                                          &msdu_tail);
889
890                         if (!msdu_head) {
891                                 ath10k_warn("htt rx no data!\n");
892                                 continue;
893                         }
894
895                         if (msdu_head->len == 0) {
896                                 ath10k_dbg(ATH10K_DBG_HTT,
897                                            "htt rx dropping due to zero-len\n");
898                                 ath10k_htt_rx_free_msdu_chain(msdu_head);
899                                 continue;
900                         }
901
902                         if (ath10k_htt_rx_has_decrypt_err(msdu_head)) {
903                                 ath10k_htt_rx_free_msdu_chain(msdu_head);
904                                 continue;
905                         }
906
907                         status = info.status;
908
909                         /* Skip mgmt frames while we handle this in WMI */
910                         if (status == HTT_RX_IND_MPDU_STATUS_MGMT_CTRL) {
911                                 ath10k_htt_rx_free_msdu_chain(msdu_head);
912                                 continue;
913                         }
914
915                         if (status != HTT_RX_IND_MPDU_STATUS_OK &&
916                             status != HTT_RX_IND_MPDU_STATUS_TKIP_MIC_ERR &&
917                             !htt->ar->monitor_enabled) {
918                                 ath10k_dbg(ATH10K_DBG_HTT,
919                                            "htt rx ignoring frame w/ status %d\n",
920                                            status);
921                                 ath10k_htt_rx_free_msdu_chain(msdu_head);
922                                 continue;
923                         }
924
925                         /* FIXME: we do not support chaining yet.
926                          * this needs investigation */
927                         if (msdu_chaining) {
928                                 ath10k_warn("msdu_chaining is true\n");
929                                 ath10k_htt_rx_free_msdu_chain(msdu_head);
930                                 continue;
931                         }
932
933                         /* The skb is not yet processed and it may be
934                          * reallocated. Since the offload is in the original
935                          * skb extract the checksum now and assign it later */
936                         ip_summed = ath10k_htt_rx_get_csum_state(msdu_head);
937
938                         info.skb     = msdu_head;
939                         info.fcs_err = ath10k_htt_rx_has_fcs_err(msdu_head);
940                         info.signal  = ATH10K_DEFAULT_NOISE_FLOOR;
941                         info.signal += rx->ppdu.combined_rssi;
942
943                         info.rate.info0 = rx->ppdu.info0;
944                         info.rate.info1 = __le32_to_cpu(rx->ppdu.info1);
945                         info.rate.info2 = __le32_to_cpu(rx->ppdu.info2);
946
947                         hdr = ath10k_htt_rx_skb_get_hdr(msdu_head);
948
949                         if (ath10k_htt_rx_hdr_is_amsdu(hdr))
950                                 ret = ath10k_htt_rx_amsdu(htt, &info);
951                         else
952                                 ret = ath10k_htt_rx_msdu(htt, &info);
953
954                         if (ret && !info.fcs_err) {
955                                 ath10k_warn("error processing msdus %d\n", ret);
956                                 dev_kfree_skb_any(info.skb);
957                                 continue;
958                         }
959
960                         if (ath10k_htt_rx_hdr_is_amsdu((void *)info.skb->data))
961                                 ath10k_dbg(ATH10K_DBG_HTT, "htt mpdu is amsdu\n");
962
963                         info.skb->ip_summed = ip_summed;
964
965                         ath10k_dbg_dump(ATH10K_DBG_HTT_DUMP, NULL, "htt mpdu: ",
966                                         info.skb->data, info.skb->len);
967                         ath10k_process_rx(htt->ar, &info);
968                 }
969         }
970
971         ath10k_htt_rx_msdu_buff_replenish(htt);
972 }
973
974 static void ath10k_htt_rx_frag_handler(struct ath10k_htt *htt,
975                                 struct htt_rx_fragment_indication *frag)
976 {
977         struct sk_buff *msdu_head, *msdu_tail;
978         struct htt_rx_desc *rxd;
979         enum rx_msdu_decap_format fmt;
980         struct htt_rx_info info = {};
981         struct ieee80211_hdr *hdr;
982         int msdu_chaining;
983         bool tkip_mic_err;
984         bool decrypt_err;
985         u8 *fw_desc;
986         int fw_desc_len, hdrlen, paramlen;
987         int trim;
988
989         fw_desc_len = __le16_to_cpu(frag->fw_rx_desc_bytes);
990         fw_desc = (u8 *)frag->fw_msdu_rx_desc;
991
992         msdu_head = NULL;
993         msdu_tail = NULL;
994         msdu_chaining = ath10k_htt_rx_amsdu_pop(htt, &fw_desc, &fw_desc_len,
995                                                 &msdu_head, &msdu_tail);
996
997         ath10k_dbg(ATH10K_DBG_HTT_DUMP, "htt rx frag ahead\n");
998
999         if (!msdu_head) {
1000                 ath10k_warn("htt rx frag no data\n");
1001                 return;
1002         }
1003
1004         if (msdu_chaining || msdu_head != msdu_tail) {
1005                 ath10k_warn("aggregation with fragmentation?!\n");
1006                 ath10k_htt_rx_free_msdu_chain(msdu_head);
1007                 return;
1008         }
1009
1010         /* FIXME: implement signal strength */
1011
1012         hdr = (struct ieee80211_hdr *)msdu_head->data;
1013         rxd = (void *)msdu_head->data - sizeof(*rxd);
1014         tkip_mic_err = !!(__le32_to_cpu(rxd->attention.flags) &
1015                                 RX_ATTENTION_FLAGS_TKIP_MIC_ERR);
1016         decrypt_err = !!(__le32_to_cpu(rxd->attention.flags) &
1017                                 RX_ATTENTION_FLAGS_DECRYPT_ERR);
1018         fmt = MS(__le32_to_cpu(rxd->msdu_start.info1),
1019                         RX_MSDU_START_INFO1_DECAP_FORMAT);
1020
1021         if (fmt != RX_MSDU_DECAP_RAW) {
1022                 ath10k_warn("we dont support non-raw fragmented rx yet\n");
1023                 dev_kfree_skb_any(msdu_head);
1024                 goto end;
1025         }
1026
1027         info.skb = msdu_head;
1028         info.status = HTT_RX_IND_MPDU_STATUS_OK;
1029         info.encrypt_type = MS(__le32_to_cpu(rxd->mpdu_start.info0),
1030                                 RX_MPDU_START_INFO0_ENCRYPT_TYPE);
1031         info.skb->ip_summed = ath10k_htt_rx_get_csum_state(info.skb);
1032
1033         if (tkip_mic_err) {
1034                 ath10k_warn("tkip mic error\n");
1035                 info.status = HTT_RX_IND_MPDU_STATUS_TKIP_MIC_ERR;
1036         }
1037
1038         if (decrypt_err) {
1039                 ath10k_warn("decryption err in fragmented rx\n");
1040                 dev_kfree_skb_any(info.skb);
1041                 goto end;
1042         }
1043
1044         if (info.encrypt_type != HTT_RX_MPDU_ENCRYPT_NONE) {
1045                 hdrlen = ieee80211_hdrlen(hdr->frame_control);
1046                 paramlen = ath10k_htt_rx_crypto_param_len(info.encrypt_type);
1047
1048                 /* It is more efficient to move the header than the payload */
1049                 memmove((void *)info.skb->data + paramlen,
1050                         (void *)info.skb->data,
1051                         hdrlen);
1052                 skb_pull(info.skb, paramlen);
1053                 hdr = (struct ieee80211_hdr *)info.skb->data;
1054         }
1055
1056         /* remove trailing FCS */
1057         trim  = 4;
1058
1059         /* remove crypto trailer */
1060         trim += ath10k_htt_rx_crypto_tail_len(info.encrypt_type);
1061
1062         /* last fragment of TKIP frags has MIC */
1063         if (!ieee80211_has_morefrags(hdr->frame_control) &&
1064             info.encrypt_type == HTT_RX_MPDU_ENCRYPT_TKIP_WPA)
1065                 trim += 8;
1066
1067         if (trim > info.skb->len) {
1068                 ath10k_warn("htt rx fragment: trailer longer than the frame itself? drop\n");
1069                 dev_kfree_skb_any(info.skb);
1070                 goto end;
1071         }
1072
1073         skb_trim(info.skb, info.skb->len - trim);
1074
1075         ath10k_dbg_dump(ATH10K_DBG_HTT_DUMP, NULL, "htt frag mpdu: ",
1076                         info.skb->data, info.skb->len);
1077         ath10k_process_rx(htt->ar, &info);
1078
1079 end:
1080         if (fw_desc_len > 0) {
1081                 ath10k_dbg(ATH10K_DBG_HTT,
1082                            "expecting more fragmented rx in one indication %d\n",
1083                            fw_desc_len);
1084         }
1085 }
1086
1087 void ath10k_htt_t2h_msg_handler(struct ath10k *ar, struct sk_buff *skb)
1088 {
1089         struct ath10k_htt *htt = &ar->htt;
1090         struct htt_resp *resp = (struct htt_resp *)skb->data;
1091
1092         /* confirm alignment */
1093         if (!IS_ALIGNED((unsigned long)skb->data, 4))
1094                 ath10k_warn("unaligned htt message, expect trouble\n");
1095
1096         ath10k_dbg(ATH10K_DBG_HTT, "HTT RX, msg_type: 0x%0X\n",
1097                    resp->hdr.msg_type);
1098         switch (resp->hdr.msg_type) {
1099         case HTT_T2H_MSG_TYPE_VERSION_CONF: {
1100                 htt->target_version_major = resp->ver_resp.major;
1101                 htt->target_version_minor = resp->ver_resp.minor;
1102                 complete(&htt->target_version_received);
1103                 break;
1104         }
1105         case HTT_T2H_MSG_TYPE_RX_IND: {
1106                 ath10k_htt_rx_handler(htt, &resp->rx_ind);
1107                 break;
1108         }
1109         case HTT_T2H_MSG_TYPE_PEER_MAP: {
1110                 struct htt_peer_map_event ev = {
1111                         .vdev_id = resp->peer_map.vdev_id,
1112                         .peer_id = __le16_to_cpu(resp->peer_map.peer_id),
1113                 };
1114                 memcpy(ev.addr, resp->peer_map.addr, sizeof(ev.addr));
1115                 ath10k_peer_map_event(htt, &ev);
1116                 break;
1117         }
1118         case HTT_T2H_MSG_TYPE_PEER_UNMAP: {
1119                 struct htt_peer_unmap_event ev = {
1120                         .peer_id = __le16_to_cpu(resp->peer_unmap.peer_id),
1121                 };
1122                 ath10k_peer_unmap_event(htt, &ev);
1123                 break;
1124         }
1125         case HTT_T2H_MSG_TYPE_MGMT_TX_COMPLETION: {
1126                 struct htt_tx_done tx_done = {};
1127                 int status = __le32_to_cpu(resp->mgmt_tx_completion.status);
1128
1129                 tx_done.msdu_id =
1130                         __le32_to_cpu(resp->mgmt_tx_completion.desc_id);
1131
1132                 switch (status) {
1133                 case HTT_MGMT_TX_STATUS_OK:
1134                         break;
1135                 case HTT_MGMT_TX_STATUS_RETRY:
1136                         tx_done.no_ack = true;
1137                         break;
1138                 case HTT_MGMT_TX_STATUS_DROP:
1139                         tx_done.discard = true;
1140                         break;
1141                 }
1142
1143                 ath10k_txrx_tx_unref(htt, &tx_done);
1144                 break;
1145         }
1146         case HTT_T2H_MSG_TYPE_TX_COMPL_IND: {
1147                 struct htt_tx_done tx_done = {};
1148                 int status = MS(resp->data_tx_completion.flags,
1149                                 HTT_DATA_TX_STATUS);
1150                 __le16 msdu_id;
1151                 int i;
1152
1153                 switch (status) {
1154                 case HTT_DATA_TX_STATUS_NO_ACK:
1155                         tx_done.no_ack = true;
1156                         break;
1157                 case HTT_DATA_TX_STATUS_OK:
1158                         break;
1159                 case HTT_DATA_TX_STATUS_DISCARD:
1160                 case HTT_DATA_TX_STATUS_POSTPONE:
1161                 case HTT_DATA_TX_STATUS_DOWNLOAD_FAIL:
1162                         tx_done.discard = true;
1163                         break;
1164                 default:
1165                         ath10k_warn("unhandled tx completion status %d\n",
1166                                     status);
1167                         tx_done.discard = true;
1168                         break;
1169                 }
1170
1171                 ath10k_dbg(ATH10K_DBG_HTT, "htt tx completion num_msdus %d\n",
1172                            resp->data_tx_completion.num_msdus);
1173
1174                 for (i = 0; i < resp->data_tx_completion.num_msdus; i++) {
1175                         msdu_id = resp->data_tx_completion.msdus[i];
1176                         tx_done.msdu_id = __le16_to_cpu(msdu_id);
1177                         ath10k_txrx_tx_unref(htt, &tx_done);
1178                 }
1179                 break;
1180         }
1181         case HTT_T2H_MSG_TYPE_SEC_IND: {
1182                 struct ath10k *ar = htt->ar;
1183                 struct htt_security_indication *ev = &resp->security_indication;
1184
1185                 ath10k_dbg(ATH10K_DBG_HTT,
1186                            "sec ind peer_id %d unicast %d type %d\n",
1187                           __le16_to_cpu(ev->peer_id),
1188                           !!(ev->flags & HTT_SECURITY_IS_UNICAST),
1189                           MS(ev->flags, HTT_SECURITY_TYPE));
1190                 complete(&ar->install_key_done);
1191                 break;
1192         }
1193         case HTT_T2H_MSG_TYPE_RX_FRAG_IND: {
1194                 ath10k_dbg_dump(ATH10K_DBG_HTT_DUMP, NULL, "htt event: ",
1195                                 skb->data, skb->len);
1196                 ath10k_htt_rx_frag_handler(htt, &resp->rx_frag_ind);
1197                 break;
1198         }
1199         case HTT_T2H_MSG_TYPE_TEST:
1200                 /* FIX THIS */
1201                 break;
1202         case HTT_T2H_MSG_TYPE_STATS_CONF:
1203                 trace_ath10k_htt_stats(skb->data, skb->len);
1204                 break;
1205         case HTT_T2H_MSG_TYPE_TX_INSPECT_IND:
1206         case HTT_T2H_MSG_TYPE_RX_ADDBA:
1207         case HTT_T2H_MSG_TYPE_RX_DELBA:
1208         case HTT_T2H_MSG_TYPE_RX_FLUSH:
1209         default:
1210                 ath10k_dbg(ATH10K_DBG_HTT, "htt event (%d) not handled\n",
1211                            resp->hdr.msg_type);
1212                 ath10k_dbg_dump(ATH10K_DBG_HTT_DUMP, NULL, "htt event: ",
1213                                 skb->data, skb->len);
1214                 break;
1215         };
1216
1217         /* Free the indication buffer */
1218         dev_kfree_skb_any(skb);
1219 }