Merge branch 'x86-apic-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git...
[cascardo/linux.git] / drivers / net / wireless / ath / ath10k / htt_rx.c
1 /*
2  * Copyright (c) 2005-2011 Atheros Communications Inc.
3  * Copyright (c) 2011-2013 Qualcomm Atheros, Inc.
4  *
5  * Permission to use, copy, modify, and/or distribute this software for any
6  * purpose with or without fee is hereby granted, provided that the above
7  * copyright notice and this permission notice appear in all copies.
8  *
9  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
10  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
11  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
12  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
13  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
14  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
15  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
16  */
17
18 #include "core.h"
19 #include "htc.h"
20 #include "htt.h"
21 #include "txrx.h"
22 #include "debug.h"
23 #include "trace.h"
24 #include "mac.h"
25
26 #include <linux/log2.h>
27
28 /* slightly larger than one large A-MPDU */
29 #define HTT_RX_RING_SIZE_MIN 128
30
31 /* roughly 20 ms @ 1 Gbps of 1500B MSDUs */
32 #define HTT_RX_RING_SIZE_MAX 2048
33
34 #define HTT_RX_AVG_FRM_BYTES 1000
35
36 /* ms, very conservative */
37 #define HTT_RX_HOST_LATENCY_MAX_MS 20
38
39 /* ms, conservative */
40 #define HTT_RX_HOST_LATENCY_WORST_LIKELY_MS 10
41
42 /* when under memory pressure rx ring refill may fail and needs a retry */
43 #define HTT_RX_RING_REFILL_RETRY_MS 50
44
45
46 static int ath10k_htt_rx_get_csum_state(struct sk_buff *skb);
47 static void ath10k_htt_txrx_compl_task(unsigned long ptr);
48
49 static int ath10k_htt_rx_ring_size(struct ath10k_htt *htt)
50 {
51         int size;
52
53         /*
54          * It is expected that the host CPU will typically be able to
55          * service the rx indication from one A-MPDU before the rx
56          * indication from the subsequent A-MPDU happens, roughly 1-2 ms
57          * later. However, the rx ring should be sized very conservatively,
58          * to accomodate the worst reasonable delay before the host CPU
59          * services a rx indication interrupt.
60          *
61          * The rx ring need not be kept full of empty buffers. In theory,
62          * the htt host SW can dynamically track the low-water mark in the
63          * rx ring, and dynamically adjust the level to which the rx ring
64          * is filled with empty buffers, to dynamically meet the desired
65          * low-water mark.
66          *
67          * In contrast, it's difficult to resize the rx ring itself, once
68          * it's in use. Thus, the ring itself should be sized very
69          * conservatively, while the degree to which the ring is filled
70          * with empty buffers should be sized moderately conservatively.
71          */
72
73         /* 1e6 bps/mbps / 1e3 ms per sec = 1000 */
74         size =
75             htt->max_throughput_mbps +
76             1000  /
77             (8 * HTT_RX_AVG_FRM_BYTES) * HTT_RX_HOST_LATENCY_MAX_MS;
78
79         if (size < HTT_RX_RING_SIZE_MIN)
80                 size = HTT_RX_RING_SIZE_MIN;
81
82         if (size > HTT_RX_RING_SIZE_MAX)
83                 size = HTT_RX_RING_SIZE_MAX;
84
85         size = roundup_pow_of_two(size);
86
87         return size;
88 }
89
90 static int ath10k_htt_rx_ring_fill_level(struct ath10k_htt *htt)
91 {
92         int size;
93
94         /* 1e6 bps/mbps / 1e3 ms per sec = 1000 */
95         size =
96             htt->max_throughput_mbps *
97             1000  /
98             (8 * HTT_RX_AVG_FRM_BYTES) * HTT_RX_HOST_LATENCY_WORST_LIKELY_MS;
99
100         /*
101          * Make sure the fill level is at least 1 less than the ring size.
102          * Leaving 1 element empty allows the SW to easily distinguish
103          * between a full ring vs. an empty ring.
104          */
105         if (size >= htt->rx_ring.size)
106                 size = htt->rx_ring.size - 1;
107
108         return size;
109 }
110
111 static void ath10k_htt_rx_ring_free(struct ath10k_htt *htt)
112 {
113         struct sk_buff *skb;
114         struct ath10k_skb_cb *cb;
115         int i;
116
117         for (i = 0; i < htt->rx_ring.fill_cnt; i++) {
118                 skb = htt->rx_ring.netbufs_ring[i];
119                 cb = ATH10K_SKB_CB(skb);
120                 dma_unmap_single(htt->ar->dev, cb->paddr,
121                                  skb->len + skb_tailroom(skb),
122                                  DMA_FROM_DEVICE);
123                 dev_kfree_skb_any(skb);
124         }
125
126         htt->rx_ring.fill_cnt = 0;
127 }
128
129 static int __ath10k_htt_rx_ring_fill_n(struct ath10k_htt *htt, int num)
130 {
131         struct htt_rx_desc *rx_desc;
132         struct sk_buff *skb;
133         dma_addr_t paddr;
134         int ret = 0, idx;
135
136         idx = __le32_to_cpu(*(htt->rx_ring.alloc_idx.vaddr));
137         while (num > 0) {
138                 skb = dev_alloc_skb(HTT_RX_BUF_SIZE + HTT_RX_DESC_ALIGN);
139                 if (!skb) {
140                         ret = -ENOMEM;
141                         goto fail;
142                 }
143
144                 if (!IS_ALIGNED((unsigned long)skb->data, HTT_RX_DESC_ALIGN))
145                         skb_pull(skb,
146                                  PTR_ALIGN(skb->data, HTT_RX_DESC_ALIGN) -
147                                  skb->data);
148
149                 /* Clear rx_desc attention word before posting to Rx ring */
150                 rx_desc = (struct htt_rx_desc *)skb->data;
151                 rx_desc->attention.flags = __cpu_to_le32(0);
152
153                 paddr = dma_map_single(htt->ar->dev, skb->data,
154                                        skb->len + skb_tailroom(skb),
155                                        DMA_FROM_DEVICE);
156
157                 if (unlikely(dma_mapping_error(htt->ar->dev, paddr))) {
158                         dev_kfree_skb_any(skb);
159                         ret = -ENOMEM;
160                         goto fail;
161                 }
162
163                 ATH10K_SKB_CB(skb)->paddr = paddr;
164                 htt->rx_ring.netbufs_ring[idx] = skb;
165                 htt->rx_ring.paddrs_ring[idx] = __cpu_to_le32(paddr);
166                 htt->rx_ring.fill_cnt++;
167
168                 num--;
169                 idx++;
170                 idx &= htt->rx_ring.size_mask;
171         }
172
173 fail:
174         *(htt->rx_ring.alloc_idx.vaddr) = __cpu_to_le32(idx);
175         return ret;
176 }
177
178 static int ath10k_htt_rx_ring_fill_n(struct ath10k_htt *htt, int num)
179 {
180         lockdep_assert_held(&htt->rx_ring.lock);
181         return __ath10k_htt_rx_ring_fill_n(htt, num);
182 }
183
184 static void ath10k_htt_rx_msdu_buff_replenish(struct ath10k_htt *htt)
185 {
186         int ret, num_deficit, num_to_fill;
187
188         /* Refilling the whole RX ring buffer proves to be a bad idea. The
189          * reason is RX may take up significant amount of CPU cycles and starve
190          * other tasks, e.g. TX on an ethernet device while acting as a bridge
191          * with ath10k wlan interface. This ended up with very poor performance
192          * once CPU the host system was overwhelmed with RX on ath10k.
193          *
194          * By limiting the number of refills the replenishing occurs
195          * progressively. This in turns makes use of the fact tasklets are
196          * processed in FIFO order. This means actual RX processing can starve
197          * out refilling. If there's not enough buffers on RX ring FW will not
198          * report RX until it is refilled with enough buffers. This
199          * automatically balances load wrt to CPU power.
200          *
201          * This probably comes at a cost of lower maximum throughput but
202          * improves the avarage and stability. */
203         spin_lock_bh(&htt->rx_ring.lock);
204         num_deficit = htt->rx_ring.fill_level - htt->rx_ring.fill_cnt;
205         num_to_fill = min(ATH10K_HTT_MAX_NUM_REFILL, num_deficit);
206         num_deficit -= num_to_fill;
207         ret = ath10k_htt_rx_ring_fill_n(htt, num_to_fill);
208         if (ret == -ENOMEM) {
209                 /*
210                  * Failed to fill it to the desired level -
211                  * we'll start a timer and try again next time.
212                  * As long as enough buffers are left in the ring for
213                  * another A-MPDU rx, no special recovery is needed.
214                  */
215                 mod_timer(&htt->rx_ring.refill_retry_timer, jiffies +
216                           msecs_to_jiffies(HTT_RX_RING_REFILL_RETRY_MS));
217         } else if (num_deficit > 0) {
218                 tasklet_schedule(&htt->rx_replenish_task);
219         }
220         spin_unlock_bh(&htt->rx_ring.lock);
221 }
222
223 static void ath10k_htt_rx_ring_refill_retry(unsigned long arg)
224 {
225         struct ath10k_htt *htt = (struct ath10k_htt *)arg;
226         ath10k_htt_rx_msdu_buff_replenish(htt);
227 }
228
229 static void ath10k_htt_rx_ring_clean_up(struct ath10k_htt *htt)
230 {
231         struct sk_buff *skb;
232         int i;
233
234         for (i = 0; i < htt->rx_ring.size; i++) {
235                 skb = htt->rx_ring.netbufs_ring[i];
236                 if (!skb)
237                         continue;
238
239                 dma_unmap_single(htt->ar->dev, ATH10K_SKB_CB(skb)->paddr,
240                                  skb->len + skb_tailroom(skb),
241                                  DMA_FROM_DEVICE);
242                 dev_kfree_skb_any(skb);
243                 htt->rx_ring.netbufs_ring[i] = NULL;
244         }
245 }
246
247 void ath10k_htt_rx_free(struct ath10k_htt *htt)
248 {
249         del_timer_sync(&htt->rx_ring.refill_retry_timer);
250         tasklet_kill(&htt->rx_replenish_task);
251         tasklet_kill(&htt->txrx_compl_task);
252
253         skb_queue_purge(&htt->tx_compl_q);
254         skb_queue_purge(&htt->rx_compl_q);
255
256         ath10k_htt_rx_ring_clean_up(htt);
257
258         dma_free_coherent(htt->ar->dev,
259                           (htt->rx_ring.size *
260                            sizeof(htt->rx_ring.paddrs_ring)),
261                           htt->rx_ring.paddrs_ring,
262                           htt->rx_ring.base_paddr);
263
264         dma_free_coherent(htt->ar->dev,
265                           sizeof(*htt->rx_ring.alloc_idx.vaddr),
266                           htt->rx_ring.alloc_idx.vaddr,
267                           htt->rx_ring.alloc_idx.paddr);
268
269         kfree(htt->rx_ring.netbufs_ring);
270 }
271
272 static inline struct sk_buff *ath10k_htt_rx_netbuf_pop(struct ath10k_htt *htt)
273 {
274         int idx;
275         struct sk_buff *msdu;
276
277         lockdep_assert_held(&htt->rx_ring.lock);
278
279         if (htt->rx_ring.fill_cnt == 0) {
280                 ath10k_warn("tried to pop sk_buff from an empty rx ring\n");
281                 return NULL;
282         }
283
284         idx = htt->rx_ring.sw_rd_idx.msdu_payld;
285         msdu = htt->rx_ring.netbufs_ring[idx];
286         htt->rx_ring.netbufs_ring[idx] = NULL;
287
288         idx++;
289         idx &= htt->rx_ring.size_mask;
290         htt->rx_ring.sw_rd_idx.msdu_payld = idx;
291         htt->rx_ring.fill_cnt--;
292
293         return msdu;
294 }
295
296 static void ath10k_htt_rx_free_msdu_chain(struct sk_buff *skb)
297 {
298         struct sk_buff *next;
299
300         while (skb) {
301                 next = skb->next;
302                 dev_kfree_skb_any(skb);
303                 skb = next;
304         }
305 }
306
307 /* return: < 0 fatal error, 0 - non chained msdu, 1 chained msdu */
308 static int ath10k_htt_rx_amsdu_pop(struct ath10k_htt *htt,
309                                    u8 **fw_desc, int *fw_desc_len,
310                                    struct sk_buff **head_msdu,
311                                    struct sk_buff **tail_msdu,
312                                    u32 *attention)
313 {
314         int msdu_len, msdu_chaining = 0;
315         struct sk_buff *msdu;
316         struct htt_rx_desc *rx_desc;
317
318         lockdep_assert_held(&htt->rx_ring.lock);
319
320         if (htt->rx_confused) {
321                 ath10k_warn("htt is confused. refusing rx\n");
322                 return -1;
323         }
324
325         msdu = *head_msdu = ath10k_htt_rx_netbuf_pop(htt);
326         while (msdu) {
327                 int last_msdu, msdu_len_invalid, msdu_chained;
328
329                 dma_unmap_single(htt->ar->dev,
330                                  ATH10K_SKB_CB(msdu)->paddr,
331                                  msdu->len + skb_tailroom(msdu),
332                                  DMA_FROM_DEVICE);
333
334                 ath10k_dbg_dump(ATH10K_DBG_HTT_DUMP, NULL, "htt rx pop: ",
335                                 msdu->data, msdu->len + skb_tailroom(msdu));
336
337                 rx_desc = (struct htt_rx_desc *)msdu->data;
338
339                 /* FIXME: we must report msdu payload since this is what caller
340                  *        expects now */
341                 skb_put(msdu, offsetof(struct htt_rx_desc, msdu_payload));
342                 skb_pull(msdu, offsetof(struct htt_rx_desc, msdu_payload));
343
344                 /*
345                  * Sanity check - confirm the HW is finished filling in the
346                  * rx data.
347                  * If the HW and SW are working correctly, then it's guaranteed
348                  * that the HW's MAC DMA is done before this point in the SW.
349                  * To prevent the case that we handle a stale Rx descriptor,
350                  * just assert for now until we have a way to recover.
351                  */
352                 if (!(__le32_to_cpu(rx_desc->attention.flags)
353                                 & RX_ATTENTION_FLAGS_MSDU_DONE)) {
354                         ath10k_htt_rx_free_msdu_chain(*head_msdu);
355                         *head_msdu = NULL;
356                         msdu = NULL;
357                         ath10k_err("htt rx stopped. cannot recover\n");
358                         htt->rx_confused = true;
359                         break;
360                 }
361
362                 *attention |= __le32_to_cpu(rx_desc->attention.flags) &
363                                             (RX_ATTENTION_FLAGS_TKIP_MIC_ERR |
364                                              RX_ATTENTION_FLAGS_DECRYPT_ERR |
365                                              RX_ATTENTION_FLAGS_FCS_ERR |
366                                              RX_ATTENTION_FLAGS_MGMT_TYPE);
367                 /*
368                  * Copy the FW rx descriptor for this MSDU from the rx
369                  * indication message into the MSDU's netbuf. HL uses the
370                  * same rx indication message definition as LL, and simply
371                  * appends new info (fields from the HW rx desc, and the
372                  * MSDU payload itself). So, the offset into the rx
373                  * indication message only has to account for the standard
374                  * offset of the per-MSDU FW rx desc info within the
375                  * message, and how many bytes of the per-MSDU FW rx desc
376                  * info have already been consumed. (And the endianness of
377                  * the host, since for a big-endian host, the rx ind
378                  * message contents, including the per-MSDU rx desc bytes,
379                  * were byteswapped during upload.)
380                  */
381                 if (*fw_desc_len > 0) {
382                         rx_desc->fw_desc.info0 = **fw_desc;
383                         /*
384                          * The target is expected to only provide the basic
385                          * per-MSDU rx descriptors. Just to be sure, verify
386                          * that the target has not attached extension data
387                          * (e.g. LRO flow ID).
388                          */
389
390                         /* or more, if there's extension data */
391                         (*fw_desc)++;
392                         (*fw_desc_len)--;
393                 } else {
394                         /*
395                          * When an oversized AMSDU happened, FW will lost
396                          * some of MSDU status - in this case, the FW
397                          * descriptors provided will be less than the
398                          * actual MSDUs inside this MPDU. Mark the FW
399                          * descriptors so that it will still deliver to
400                          * upper stack, if no CRC error for this MPDU.
401                          *
402                          * FIX THIS - the FW descriptors are actually for
403                          * MSDUs in the end of this A-MSDU instead of the
404                          * beginning.
405                          */
406                         rx_desc->fw_desc.info0 = 0;
407                 }
408
409                 msdu_len_invalid = !!(__le32_to_cpu(rx_desc->attention.flags)
410                                         & (RX_ATTENTION_FLAGS_MPDU_LENGTH_ERR |
411                                            RX_ATTENTION_FLAGS_MSDU_LENGTH_ERR));
412                 msdu_len = MS(__le32_to_cpu(rx_desc->msdu_start.info0),
413                               RX_MSDU_START_INFO0_MSDU_LENGTH);
414                 msdu_chained = rx_desc->frag_info.ring2_more_count;
415
416                 if (msdu_len_invalid)
417                         msdu_len = 0;
418
419                 skb_trim(msdu, 0);
420                 skb_put(msdu, min(msdu_len, HTT_RX_MSDU_SIZE));
421                 msdu_len -= msdu->len;
422
423                 /* FIXME: Do chained buffers include htt_rx_desc or not? */
424                 while (msdu_chained--) {
425                         struct sk_buff *next = ath10k_htt_rx_netbuf_pop(htt);
426
427                         dma_unmap_single(htt->ar->dev,
428                                          ATH10K_SKB_CB(next)->paddr,
429                                          next->len + skb_tailroom(next),
430                                          DMA_FROM_DEVICE);
431
432                         ath10k_dbg_dump(ATH10K_DBG_HTT_DUMP, NULL,
433                                         "htt rx chained: ", next->data,
434                                         next->len + skb_tailroom(next));
435
436                         skb_trim(next, 0);
437                         skb_put(next, min(msdu_len, HTT_RX_BUF_SIZE));
438                         msdu_len -= next->len;
439
440                         msdu->next = next;
441                         msdu = next;
442                         msdu_chaining = 1;
443                 }
444
445                 last_msdu = __le32_to_cpu(rx_desc->msdu_end.info0) &
446                                 RX_MSDU_END_INFO0_LAST_MSDU;
447
448                 if (last_msdu) {
449                         msdu->next = NULL;
450                         break;
451                 } else {
452                         struct sk_buff *next = ath10k_htt_rx_netbuf_pop(htt);
453                         msdu->next = next;
454                         msdu = next;
455                 }
456         }
457         *tail_msdu = msdu;
458
459         if (*head_msdu == NULL)
460                 msdu_chaining = -1;
461
462         /*
463          * Don't refill the ring yet.
464          *
465          * First, the elements popped here are still in use - it is not
466          * safe to overwrite them until the matching call to
467          * mpdu_desc_list_next. Second, for efficiency it is preferable to
468          * refill the rx ring with 1 PPDU's worth of rx buffers (something
469          * like 32 x 3 buffers), rather than one MPDU's worth of rx buffers
470          * (something like 3 buffers). Consequently, we'll rely on the txrx
471          * SW to tell us when it is done pulling all the PPDU's rx buffers
472          * out of the rx ring, and then refill it just once.
473          */
474
475         return msdu_chaining;
476 }
477
478 static void ath10k_htt_rx_replenish_task(unsigned long ptr)
479 {
480         struct ath10k_htt *htt = (struct ath10k_htt *)ptr;
481         ath10k_htt_rx_msdu_buff_replenish(htt);
482 }
483
484 int ath10k_htt_rx_alloc(struct ath10k_htt *htt)
485 {
486         dma_addr_t paddr;
487         void *vaddr;
488         struct timer_list *timer = &htt->rx_ring.refill_retry_timer;
489
490         htt->rx_ring.size = ath10k_htt_rx_ring_size(htt);
491         if (!is_power_of_2(htt->rx_ring.size)) {
492                 ath10k_warn("htt rx ring size is not power of 2\n");
493                 return -EINVAL;
494         }
495
496         htt->rx_ring.size_mask = htt->rx_ring.size - 1;
497
498         /*
499          * Set the initial value for the level to which the rx ring
500          * should be filled, based on the max throughput and the
501          * worst likely latency for the host to fill the rx ring
502          * with new buffers. In theory, this fill level can be
503          * dynamically adjusted from the initial value set here, to
504          * reflect the actual host latency rather than a
505          * conservative assumption about the host latency.
506          */
507         htt->rx_ring.fill_level = ath10k_htt_rx_ring_fill_level(htt);
508
509         htt->rx_ring.netbufs_ring =
510                 kzalloc(htt->rx_ring.size * sizeof(struct sk_buff *),
511                         GFP_KERNEL);
512         if (!htt->rx_ring.netbufs_ring)
513                 goto err_netbuf;
514
515         vaddr = dma_alloc_coherent(htt->ar->dev,
516                    (htt->rx_ring.size * sizeof(htt->rx_ring.paddrs_ring)),
517                    &paddr, GFP_DMA);
518         if (!vaddr)
519                 goto err_dma_ring;
520
521         htt->rx_ring.paddrs_ring = vaddr;
522         htt->rx_ring.base_paddr = paddr;
523
524         vaddr = dma_alloc_coherent(htt->ar->dev,
525                                    sizeof(*htt->rx_ring.alloc_idx.vaddr),
526                                    &paddr, GFP_DMA);
527         if (!vaddr)
528                 goto err_dma_idx;
529
530         htt->rx_ring.alloc_idx.vaddr = vaddr;
531         htt->rx_ring.alloc_idx.paddr = paddr;
532         htt->rx_ring.sw_rd_idx.msdu_payld = 0;
533         *htt->rx_ring.alloc_idx.vaddr = 0;
534
535         /* Initialize the Rx refill retry timer */
536         setup_timer(timer, ath10k_htt_rx_ring_refill_retry, (unsigned long)htt);
537
538         spin_lock_init(&htt->rx_ring.lock);
539
540         htt->rx_ring.fill_cnt = 0;
541         if (__ath10k_htt_rx_ring_fill_n(htt, htt->rx_ring.fill_level))
542                 goto err_fill_ring;
543
544         tasklet_init(&htt->rx_replenish_task, ath10k_htt_rx_replenish_task,
545                      (unsigned long)htt);
546
547         skb_queue_head_init(&htt->tx_compl_q);
548         skb_queue_head_init(&htt->rx_compl_q);
549
550         tasklet_init(&htt->txrx_compl_task, ath10k_htt_txrx_compl_task,
551                      (unsigned long)htt);
552
553         ath10k_dbg(ATH10K_DBG_BOOT, "htt rx ring size %d fill_level %d\n",
554                    htt->rx_ring.size, htt->rx_ring.fill_level);
555         return 0;
556
557 err_fill_ring:
558         ath10k_htt_rx_ring_free(htt);
559         dma_free_coherent(htt->ar->dev,
560                           sizeof(*htt->rx_ring.alloc_idx.vaddr),
561                           htt->rx_ring.alloc_idx.vaddr,
562                           htt->rx_ring.alloc_idx.paddr);
563 err_dma_idx:
564         dma_free_coherent(htt->ar->dev,
565                           (htt->rx_ring.size *
566                            sizeof(htt->rx_ring.paddrs_ring)),
567                           htt->rx_ring.paddrs_ring,
568                           htt->rx_ring.base_paddr);
569 err_dma_ring:
570         kfree(htt->rx_ring.netbufs_ring);
571 err_netbuf:
572         return -ENOMEM;
573 }
574
575 static int ath10k_htt_rx_crypto_param_len(enum htt_rx_mpdu_encrypt_type type)
576 {
577         switch (type) {
578         case HTT_RX_MPDU_ENCRYPT_WEP40:
579         case HTT_RX_MPDU_ENCRYPT_WEP104:
580                 return 4;
581         case HTT_RX_MPDU_ENCRYPT_TKIP_WITHOUT_MIC:
582         case HTT_RX_MPDU_ENCRYPT_WEP128: /* not tested */
583         case HTT_RX_MPDU_ENCRYPT_TKIP_WPA:
584         case HTT_RX_MPDU_ENCRYPT_WAPI: /* not tested */
585         case HTT_RX_MPDU_ENCRYPT_AES_CCM_WPA2:
586                 return 8;
587         case HTT_RX_MPDU_ENCRYPT_NONE:
588                 return 0;
589         }
590
591         ath10k_warn("unknown encryption type %d\n", type);
592         return 0;
593 }
594
595 static int ath10k_htt_rx_crypto_tail_len(enum htt_rx_mpdu_encrypt_type type)
596 {
597         switch (type) {
598         case HTT_RX_MPDU_ENCRYPT_NONE:
599         case HTT_RX_MPDU_ENCRYPT_WEP40:
600         case HTT_RX_MPDU_ENCRYPT_WEP104:
601         case HTT_RX_MPDU_ENCRYPT_WEP128:
602         case HTT_RX_MPDU_ENCRYPT_WAPI:
603                 return 0;
604         case HTT_RX_MPDU_ENCRYPT_TKIP_WITHOUT_MIC:
605         case HTT_RX_MPDU_ENCRYPT_TKIP_WPA:
606                 return 4;
607         case HTT_RX_MPDU_ENCRYPT_AES_CCM_WPA2:
608                 return 8;
609         }
610
611         ath10k_warn("unknown encryption type %d\n", type);
612         return 0;
613 }
614
615 /* Applies for first msdu in chain, before altering it. */
616 static struct ieee80211_hdr *ath10k_htt_rx_skb_get_hdr(struct sk_buff *skb)
617 {
618         struct htt_rx_desc *rxd;
619         enum rx_msdu_decap_format fmt;
620
621         rxd = (void *)skb->data - sizeof(*rxd);
622         fmt = MS(__le32_to_cpu(rxd->msdu_start.info1),
623                         RX_MSDU_START_INFO1_DECAP_FORMAT);
624
625         if (fmt == RX_MSDU_DECAP_RAW)
626                 return (void *)skb->data;
627         else
628                 return (void *)skb->data - RX_HTT_HDR_STATUS_LEN;
629 }
630
631 /* This function only applies for first msdu in an msdu chain */
632 static bool ath10k_htt_rx_hdr_is_amsdu(struct ieee80211_hdr *hdr)
633 {
634         if (ieee80211_is_data_qos(hdr->frame_control)) {
635                 u8 *qc = ieee80211_get_qos_ctl(hdr);
636                 if (qc[0] & 0x80)
637                         return true;
638         }
639         return false;
640 }
641
642 struct rfc1042_hdr {
643         u8 llc_dsap;
644         u8 llc_ssap;
645         u8 llc_ctrl;
646         u8 snap_oui[3];
647         __be16 snap_type;
648 } __packed;
649
650 struct amsdu_subframe_hdr {
651         u8 dst[ETH_ALEN];
652         u8 src[ETH_ALEN];
653         __be16 len;
654 } __packed;
655
656 static const u8 rx_legacy_rate_idx[] = {
657         3,      /* 0x00  - 11Mbps  */
658         2,      /* 0x01  - 5.5Mbps */
659         1,      /* 0x02  - 2Mbps   */
660         0,      /* 0x03  - 1Mbps   */
661         3,      /* 0x04  - 11Mbps  */
662         2,      /* 0x05  - 5.5Mbps */
663         1,      /* 0x06  - 2Mbps   */
664         0,      /* 0x07  - 1Mbps   */
665         10,     /* 0x08  - 48Mbps  */
666         8,      /* 0x09  - 24Mbps  */
667         6,      /* 0x0A  - 12Mbps  */
668         4,      /* 0x0B  - 6Mbps   */
669         11,     /* 0x0C  - 54Mbps  */
670         9,      /* 0x0D  - 36Mbps  */
671         7,      /* 0x0E  - 18Mbps  */
672         5,      /* 0x0F  - 9Mbps   */
673 };
674
675 static void ath10k_htt_rx_h_rates(struct ath10k *ar,
676                                   enum ieee80211_band band,
677                                   u8 info0, u32 info1, u32 info2,
678                                   struct ieee80211_rx_status *status)
679 {
680         u8 cck, rate, rate_idx, bw, sgi, mcs, nss;
681         u8 preamble = 0;
682
683         /* Check if valid fields */
684         if (!(info0 & HTT_RX_INDICATION_INFO0_START_VALID))
685                 return;
686
687         preamble = MS(info1, HTT_RX_INDICATION_INFO1_PREAMBLE_TYPE);
688
689         switch (preamble) {
690         case HTT_RX_LEGACY:
691                 cck = info0 & HTT_RX_INDICATION_INFO0_LEGACY_RATE_CCK;
692                 rate = MS(info0, HTT_RX_INDICATION_INFO0_LEGACY_RATE);
693                 rate_idx = 0;
694
695                 if (rate < 0x08 || rate > 0x0F)
696                         break;
697
698                 switch (band) {
699                 case IEEE80211_BAND_2GHZ:
700                         if (cck)
701                                 rate &= ~BIT(3);
702                         rate_idx = rx_legacy_rate_idx[rate];
703                         break;
704                 case IEEE80211_BAND_5GHZ:
705                         rate_idx = rx_legacy_rate_idx[rate];
706                         /* We are using same rate table registering
707                            HW - ath10k_rates[]. In case of 5GHz skip
708                            CCK rates, so -4 here */
709                         rate_idx -= 4;
710                         break;
711                 default:
712                         break;
713                 }
714
715                 status->rate_idx = rate_idx;
716                 break;
717         case HTT_RX_HT:
718         case HTT_RX_HT_WITH_TXBF:
719                 /* HT-SIG - Table 20-11 in info1 and info2 */
720                 mcs = info1 & 0x1F;
721                 nss = mcs >> 3;
722                 bw = (info1 >> 7) & 1;
723                 sgi = (info2 >> 7) & 1;
724
725                 status->rate_idx = mcs;
726                 status->flag |= RX_FLAG_HT;
727                 if (sgi)
728                         status->flag |= RX_FLAG_SHORT_GI;
729                 if (bw)
730                         status->flag |= RX_FLAG_40MHZ;
731                 break;
732         case HTT_RX_VHT:
733         case HTT_RX_VHT_WITH_TXBF:
734                 /* VHT-SIG-A1 in info 1, VHT-SIG-A2 in info2
735                    TODO check this */
736                 mcs = (info2 >> 4) & 0x0F;
737                 nss = ((info1 >> 10) & 0x07) + 1;
738                 bw = info1 & 3;
739                 sgi = info2 & 1;
740
741                 status->rate_idx = mcs;
742                 status->vht_nss = nss;
743
744                 if (sgi)
745                         status->flag |= RX_FLAG_SHORT_GI;
746
747                 switch (bw) {
748                 /* 20MHZ */
749                 case 0:
750                         break;
751                 /* 40MHZ */
752                 case 1:
753                         status->flag |= RX_FLAG_40MHZ;
754                         break;
755                 /* 80MHZ */
756                 case 2:
757                         status->vht_flag |= RX_VHT_FLAG_80MHZ;
758                 }
759
760                 status->flag |= RX_FLAG_VHT;
761                 break;
762         default:
763                 break;
764         }
765 }
766
767 static void ath10k_htt_rx_h_protected(struct ath10k_htt *htt,
768                                       struct ieee80211_rx_status *rx_status,
769                                       struct sk_buff *skb,
770                                       enum htt_rx_mpdu_encrypt_type enctype,
771                                       enum rx_msdu_decap_format fmt,
772                                       bool dot11frag)
773 {
774         struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
775
776         rx_status->flag &= ~(RX_FLAG_DECRYPTED |
777                              RX_FLAG_IV_STRIPPED |
778                              RX_FLAG_MMIC_STRIPPED);
779
780         if (enctype == HTT_RX_MPDU_ENCRYPT_NONE)
781                 return;
782
783         /*
784          * There's no explicit rx descriptor flag to indicate whether a given
785          * frame has been decrypted or not. We're forced to use the decap
786          * format as an implicit indication. However fragmentation rx is always
787          * raw and it probably never reports undecrypted raws.
788          *
789          * This makes sure sniffed frames are reported as-is without stripping
790          * the protected flag.
791          */
792         if (fmt == RX_MSDU_DECAP_RAW && !dot11frag)
793                 return;
794
795         rx_status->flag |= RX_FLAG_DECRYPTED |
796                            RX_FLAG_IV_STRIPPED |
797                            RX_FLAG_MMIC_STRIPPED;
798         hdr->frame_control = __cpu_to_le16(__le16_to_cpu(hdr->frame_control) &
799                                            ~IEEE80211_FCTL_PROTECTED);
800 }
801
802 static bool ath10k_htt_rx_h_channel(struct ath10k *ar,
803                                     struct ieee80211_rx_status *status)
804 {
805         struct ieee80211_channel *ch;
806
807         spin_lock_bh(&ar->data_lock);
808         ch = ar->scan_channel;
809         if (!ch)
810                 ch = ar->rx_channel;
811         spin_unlock_bh(&ar->data_lock);
812
813         if (!ch)
814                 return false;
815
816         status->band = ch->band;
817         status->freq = ch->center_freq;
818
819         return true;
820 }
821
822 static void ath10k_process_rx(struct ath10k *ar,
823                               struct ieee80211_rx_status *rx_status,
824                               struct sk_buff *skb)
825 {
826         struct ieee80211_rx_status *status;
827
828         status = IEEE80211_SKB_RXCB(skb);
829         *status = *rx_status;
830
831         ath10k_dbg(ATH10K_DBG_DATA,
832                    "rx skb %p len %u %s%s%s%s%s %srate_idx %u vht_nss %u freq %u band %u flag 0x%x fcs-err %imic-err %i\n",
833                    skb,
834                    skb->len,
835                    status->flag == 0 ? "legacy" : "",
836                    status->flag & RX_FLAG_HT ? "ht" : "",
837                    status->flag & RX_FLAG_VHT ? "vht" : "",
838                    status->flag & RX_FLAG_40MHZ ? "40" : "",
839                    status->vht_flag & RX_VHT_FLAG_80MHZ ? "80" : "",
840                    status->flag & RX_FLAG_SHORT_GI ? "sgi " : "",
841                    status->rate_idx,
842                    status->vht_nss,
843                    status->freq,
844                    status->band, status->flag,
845                    !!(status->flag & RX_FLAG_FAILED_FCS_CRC),
846                    !!(status->flag & RX_FLAG_MMIC_ERROR));
847         ath10k_dbg_dump(ATH10K_DBG_HTT_DUMP, NULL, "rx skb: ",
848                         skb->data, skb->len);
849
850         ieee80211_rx(ar->hw, skb);
851 }
852
853 static int ath10k_htt_rx_nwifi_hdrlen(struct ieee80211_hdr *hdr)
854 {
855         /* nwifi header is padded to 4 bytes. this fixes 4addr rx */
856         return round_up(ieee80211_hdrlen(hdr->frame_control), 4);
857 }
858
859 static void ath10k_htt_rx_amsdu(struct ath10k_htt *htt,
860                                 struct ieee80211_rx_status *rx_status,
861                                 struct sk_buff *skb_in)
862 {
863         struct htt_rx_desc *rxd;
864         struct sk_buff *skb = skb_in;
865         struct sk_buff *first;
866         enum rx_msdu_decap_format fmt;
867         enum htt_rx_mpdu_encrypt_type enctype;
868         struct ieee80211_hdr *hdr;
869         u8 hdr_buf[64], addr[ETH_ALEN], *qos;
870         unsigned int hdr_len;
871
872         rxd = (void *)skb->data - sizeof(*rxd);
873         enctype = MS(__le32_to_cpu(rxd->mpdu_start.info0),
874                         RX_MPDU_START_INFO0_ENCRYPT_TYPE);
875
876         hdr = (struct ieee80211_hdr *)rxd->rx_hdr_status;
877         hdr_len = ieee80211_hdrlen(hdr->frame_control);
878         memcpy(hdr_buf, hdr, hdr_len);
879         hdr = (struct ieee80211_hdr *)hdr_buf;
880
881         first = skb;
882         while (skb) {
883                 void *decap_hdr;
884                 int len;
885
886                 rxd = (void *)skb->data - sizeof(*rxd);
887                 fmt = MS(__le32_to_cpu(rxd->msdu_start.info1),
888                          RX_MSDU_START_INFO1_DECAP_FORMAT);
889                 decap_hdr = (void *)rxd->rx_hdr_status;
890
891                 skb->ip_summed = ath10k_htt_rx_get_csum_state(skb);
892
893                 /* First frame in an A-MSDU chain has more decapped data. */
894                 if (skb == first) {
895                         len = round_up(ieee80211_hdrlen(hdr->frame_control), 4);
896                         len += round_up(ath10k_htt_rx_crypto_param_len(enctype),
897                                         4);
898                         decap_hdr += len;
899                 }
900
901                 switch (fmt) {
902                 case RX_MSDU_DECAP_RAW:
903                         /* remove trailing FCS */
904                         skb_trim(skb, skb->len - FCS_LEN);
905                         break;
906                 case RX_MSDU_DECAP_NATIVE_WIFI:
907                         /* pull decapped header and copy DA */
908                         hdr = (struct ieee80211_hdr *)skb->data;
909                         hdr_len = ath10k_htt_rx_nwifi_hdrlen(hdr);
910                         memcpy(addr, ieee80211_get_DA(hdr), ETH_ALEN);
911                         skb_pull(skb, hdr_len);
912
913                         /* push original 802.11 header */
914                         hdr = (struct ieee80211_hdr *)hdr_buf;
915                         hdr_len = ieee80211_hdrlen(hdr->frame_control);
916                         memcpy(skb_push(skb, hdr_len), hdr, hdr_len);
917
918                         /* original A-MSDU header has the bit set but we're
919                          * not including A-MSDU subframe header */
920                         hdr = (struct ieee80211_hdr *)skb->data;
921                         qos = ieee80211_get_qos_ctl(hdr);
922                         qos[0] &= ~IEEE80211_QOS_CTL_A_MSDU_PRESENT;
923
924                         /* original 802.11 header has a different DA */
925                         memcpy(ieee80211_get_DA(hdr), addr, ETH_ALEN);
926                         break;
927                 case RX_MSDU_DECAP_ETHERNET2_DIX:
928                         /* strip ethernet header and insert decapped 802.11
929                          * header, amsdu subframe header and rfc1042 header */
930
931                         len = 0;
932                         len += sizeof(struct rfc1042_hdr);
933                         len += sizeof(struct amsdu_subframe_hdr);
934
935                         skb_pull(skb, sizeof(struct ethhdr));
936                         memcpy(skb_push(skb, len), decap_hdr, len);
937                         memcpy(skb_push(skb, hdr_len), hdr, hdr_len);
938                         break;
939                 case RX_MSDU_DECAP_8023_SNAP_LLC:
940                         /* insert decapped 802.11 header making a singly
941                          * A-MSDU */
942                         memcpy(skb_push(skb, hdr_len), hdr, hdr_len);
943                         break;
944                 }
945
946                 skb_in = skb;
947                 ath10k_htt_rx_h_protected(htt, rx_status, skb_in, enctype, fmt,
948                                           false);
949                 skb = skb->next;
950                 skb_in->next = NULL;
951
952                 if (skb)
953                         rx_status->flag |= RX_FLAG_AMSDU_MORE;
954                 else
955                         rx_status->flag &= ~RX_FLAG_AMSDU_MORE;
956
957                 ath10k_process_rx(htt->ar, rx_status, skb_in);
958         }
959
960         /* FIXME: It might be nice to re-assemble the A-MSDU when there's a
961          * monitor interface active for sniffing purposes. */
962 }
963
964 static void ath10k_htt_rx_msdu(struct ath10k_htt *htt,
965                                struct ieee80211_rx_status *rx_status,
966                                struct sk_buff *skb)
967 {
968         struct htt_rx_desc *rxd;
969         struct ieee80211_hdr *hdr;
970         enum rx_msdu_decap_format fmt;
971         enum htt_rx_mpdu_encrypt_type enctype;
972         int hdr_len;
973         void *rfc1042;
974
975         /* This shouldn't happen. If it does than it may be a FW bug. */
976         if (skb->next) {
977                 ath10k_warn("htt rx received chained non A-MSDU frame\n");
978                 ath10k_htt_rx_free_msdu_chain(skb->next);
979                 skb->next = NULL;
980         }
981
982         rxd = (void *)skb->data - sizeof(*rxd);
983         fmt = MS(__le32_to_cpu(rxd->msdu_start.info1),
984                         RX_MSDU_START_INFO1_DECAP_FORMAT);
985         enctype = MS(__le32_to_cpu(rxd->mpdu_start.info0),
986                         RX_MPDU_START_INFO0_ENCRYPT_TYPE);
987         hdr = (struct ieee80211_hdr *)rxd->rx_hdr_status;
988         hdr_len = ieee80211_hdrlen(hdr->frame_control);
989
990         skb->ip_summed = ath10k_htt_rx_get_csum_state(skb);
991
992         switch (fmt) {
993         case RX_MSDU_DECAP_RAW:
994                 /* remove trailing FCS */
995                 skb_trim(skb, skb->len - FCS_LEN);
996                 break;
997         case RX_MSDU_DECAP_NATIVE_WIFI:
998                 /* Pull decapped header */
999                 hdr = (struct ieee80211_hdr *)skb->data;
1000                 hdr_len = ath10k_htt_rx_nwifi_hdrlen(hdr);
1001                 skb_pull(skb, hdr_len);
1002
1003                 /* Push original header */
1004                 hdr = (struct ieee80211_hdr *)rxd->rx_hdr_status;
1005                 hdr_len = ieee80211_hdrlen(hdr->frame_control);
1006                 memcpy(skb_push(skb, hdr_len), hdr, hdr_len);
1007                 break;
1008         case RX_MSDU_DECAP_ETHERNET2_DIX:
1009                 /* strip ethernet header and insert decapped 802.11 header and
1010                  * rfc1042 header */
1011
1012                 rfc1042 = hdr;
1013                 rfc1042 += roundup(hdr_len, 4);
1014                 rfc1042 += roundup(ath10k_htt_rx_crypto_param_len(enctype), 4);
1015
1016                 skb_pull(skb, sizeof(struct ethhdr));
1017                 memcpy(skb_push(skb, sizeof(struct rfc1042_hdr)),
1018                        rfc1042, sizeof(struct rfc1042_hdr));
1019                 memcpy(skb_push(skb, hdr_len), hdr, hdr_len);
1020                 break;
1021         case RX_MSDU_DECAP_8023_SNAP_LLC:
1022                 /* remove A-MSDU subframe header and insert
1023                  * decapped 802.11 header. rfc1042 header is already there */
1024
1025                 skb_pull(skb, sizeof(struct amsdu_subframe_hdr));
1026                 memcpy(skb_push(skb, hdr_len), hdr, hdr_len);
1027                 break;
1028         }
1029
1030         ath10k_htt_rx_h_protected(htt, rx_status, skb, enctype, fmt, false);
1031
1032         ath10k_process_rx(htt->ar, rx_status, skb);
1033 }
1034
1035 static int ath10k_htt_rx_get_csum_state(struct sk_buff *skb)
1036 {
1037         struct htt_rx_desc *rxd;
1038         u32 flags, info;
1039         bool is_ip4, is_ip6;
1040         bool is_tcp, is_udp;
1041         bool ip_csum_ok, tcpudp_csum_ok;
1042
1043         rxd = (void *)skb->data - sizeof(*rxd);
1044         flags = __le32_to_cpu(rxd->attention.flags);
1045         info = __le32_to_cpu(rxd->msdu_start.info1);
1046
1047         is_ip4 = !!(info & RX_MSDU_START_INFO1_IPV4_PROTO);
1048         is_ip6 = !!(info & RX_MSDU_START_INFO1_IPV6_PROTO);
1049         is_tcp = !!(info & RX_MSDU_START_INFO1_TCP_PROTO);
1050         is_udp = !!(info & RX_MSDU_START_INFO1_UDP_PROTO);
1051         ip_csum_ok = !(flags & RX_ATTENTION_FLAGS_IP_CHKSUM_FAIL);
1052         tcpudp_csum_ok = !(flags & RX_ATTENTION_FLAGS_TCP_UDP_CHKSUM_FAIL);
1053
1054         if (!is_ip4 && !is_ip6)
1055                 return CHECKSUM_NONE;
1056         if (!is_tcp && !is_udp)
1057                 return CHECKSUM_NONE;
1058         if (!ip_csum_ok)
1059                 return CHECKSUM_NONE;
1060         if (!tcpudp_csum_ok)
1061                 return CHECKSUM_NONE;
1062
1063         return CHECKSUM_UNNECESSARY;
1064 }
1065
1066 static int ath10k_unchain_msdu(struct sk_buff *msdu_head)
1067 {
1068         struct sk_buff *next = msdu_head->next;
1069         struct sk_buff *to_free = next;
1070         int space;
1071         int total_len = 0;
1072
1073         /* TODO:  Might could optimize this by using
1074          * skb_try_coalesce or similar method to
1075          * decrease copying, or maybe get mac80211 to
1076          * provide a way to just receive a list of
1077          * skb?
1078          */
1079
1080         msdu_head->next = NULL;
1081
1082         /* Allocate total length all at once. */
1083         while (next) {
1084                 total_len += next->len;
1085                 next = next->next;
1086         }
1087
1088         space = total_len - skb_tailroom(msdu_head);
1089         if ((space > 0) &&
1090             (pskb_expand_head(msdu_head, 0, space, GFP_ATOMIC) < 0)) {
1091                 /* TODO:  bump some rx-oom error stat */
1092                 /* put it back together so we can free the
1093                  * whole list at once.
1094                  */
1095                 msdu_head->next = to_free;
1096                 return -1;
1097         }
1098
1099         /* Walk list again, copying contents into
1100          * msdu_head
1101          */
1102         next = to_free;
1103         while (next) {
1104                 skb_copy_from_linear_data(next, skb_put(msdu_head, next->len),
1105                                           next->len);
1106                 next = next->next;
1107         }
1108
1109         /* If here, we have consolidated skb.  Free the
1110          * fragments and pass the main skb on up the
1111          * stack.
1112          */
1113         ath10k_htt_rx_free_msdu_chain(to_free);
1114         return 0;
1115 }
1116
1117 static bool ath10k_htt_rx_amsdu_allowed(struct ath10k_htt *htt,
1118                                         struct sk_buff *head,
1119                                         enum htt_rx_mpdu_status status,
1120                                         bool channel_set,
1121                                         u32 attention)
1122 {
1123         if (head->len == 0) {
1124                 ath10k_dbg(ATH10K_DBG_HTT,
1125                            "htt rx dropping due to zero-len\n");
1126                 return false;
1127         }
1128
1129         if (attention & RX_ATTENTION_FLAGS_DECRYPT_ERR) {
1130                 ath10k_dbg(ATH10K_DBG_HTT,
1131                            "htt rx dropping due to decrypt-err\n");
1132                 return false;
1133         }
1134
1135         if (!channel_set) {
1136                 ath10k_warn("no channel configured; ignoring frame!\n");
1137                 return false;
1138         }
1139
1140         /* Skip mgmt frames while we handle this in WMI */
1141         if (status == HTT_RX_IND_MPDU_STATUS_MGMT_CTRL ||
1142             attention & RX_ATTENTION_FLAGS_MGMT_TYPE) {
1143                 ath10k_dbg(ATH10K_DBG_HTT, "htt rx mgmt ctrl\n");
1144                 return false;
1145         }
1146
1147         if (status != HTT_RX_IND_MPDU_STATUS_OK &&
1148             status != HTT_RX_IND_MPDU_STATUS_TKIP_MIC_ERR &&
1149             status != HTT_RX_IND_MPDU_STATUS_ERR_INV_PEER &&
1150             !htt->ar->monitor_started) {
1151                 ath10k_dbg(ATH10K_DBG_HTT,
1152                            "htt rx ignoring frame w/ status %d\n",
1153                            status);
1154                 return false;
1155         }
1156
1157         if (test_bit(ATH10K_CAC_RUNNING, &htt->ar->dev_flags)) {
1158                 ath10k_dbg(ATH10K_DBG_HTT,
1159                            "htt rx CAC running\n");
1160                 return false;
1161         }
1162
1163         return true;
1164 }
1165
1166 static void ath10k_htt_rx_handler(struct ath10k_htt *htt,
1167                                   struct htt_rx_indication *rx)
1168 {
1169         struct ieee80211_rx_status *rx_status = &htt->rx_status;
1170         struct htt_rx_indication_mpdu_range *mpdu_ranges;
1171         struct htt_rx_desc *rxd;
1172         enum htt_rx_mpdu_status status;
1173         struct ieee80211_hdr *hdr;
1174         int num_mpdu_ranges;
1175         u32 attention;
1176         int fw_desc_len;
1177         u8 *fw_desc;
1178         bool channel_set;
1179         int i, j;
1180         int ret;
1181
1182         lockdep_assert_held(&htt->rx_ring.lock);
1183
1184         fw_desc_len = __le16_to_cpu(rx->prefix.fw_rx_desc_bytes);
1185         fw_desc = (u8 *)&rx->fw_desc;
1186
1187         num_mpdu_ranges = MS(__le32_to_cpu(rx->hdr.info1),
1188                              HTT_RX_INDICATION_INFO1_NUM_MPDU_RANGES);
1189         mpdu_ranges = htt_rx_ind_get_mpdu_ranges(rx);
1190
1191         /* Fill this once, while this is per-ppdu */
1192         if (rx->ppdu.info0 & HTT_RX_INDICATION_INFO0_START_VALID) {
1193                 memset(rx_status, 0, sizeof(*rx_status));
1194                 rx_status->signal  = ATH10K_DEFAULT_NOISE_FLOOR +
1195                                      rx->ppdu.combined_rssi;
1196         }
1197
1198         if (rx->ppdu.info0 & HTT_RX_INDICATION_INFO0_END_VALID) {
1199                 /* TSF available only in 32-bit */
1200                 rx_status->mactime = __le32_to_cpu(rx->ppdu.tsf) & 0xffffffff;
1201                 rx_status->flag |= RX_FLAG_MACTIME_END;
1202         }
1203
1204         channel_set = ath10k_htt_rx_h_channel(htt->ar, rx_status);
1205
1206         if (channel_set) {
1207                 ath10k_htt_rx_h_rates(htt->ar, rx_status->band,
1208                                       rx->ppdu.info0,
1209                                       __le32_to_cpu(rx->ppdu.info1),
1210                                       __le32_to_cpu(rx->ppdu.info2),
1211                                       rx_status);
1212         }
1213
1214         ath10k_dbg_dump(ATH10K_DBG_HTT_DUMP, NULL, "htt rx ind: ",
1215                         rx, sizeof(*rx) +
1216                         (sizeof(struct htt_rx_indication_mpdu_range) *
1217                                 num_mpdu_ranges));
1218
1219         for (i = 0; i < num_mpdu_ranges; i++) {
1220                 status = mpdu_ranges[i].mpdu_range_status;
1221
1222                 for (j = 0; j < mpdu_ranges[i].mpdu_count; j++) {
1223                         struct sk_buff *msdu_head, *msdu_tail;
1224
1225                         attention = 0;
1226                         msdu_head = NULL;
1227                         msdu_tail = NULL;
1228                         ret = ath10k_htt_rx_amsdu_pop(htt,
1229                                                       &fw_desc,
1230                                                       &fw_desc_len,
1231                                                       &msdu_head,
1232                                                       &msdu_tail,
1233                                                       &attention);
1234
1235                         if (ret < 0) {
1236                                 ath10k_warn("failed to pop amsdu from htt rx ring %d\n",
1237                                             ret);
1238                                 ath10k_htt_rx_free_msdu_chain(msdu_head);
1239                                 continue;
1240                         }
1241
1242                         rxd = container_of((void *)msdu_head->data,
1243                                            struct htt_rx_desc,
1244                                            msdu_payload);
1245
1246                         if (!ath10k_htt_rx_amsdu_allowed(htt, msdu_head,
1247                                                          status,
1248                                                          channel_set,
1249                                                          attention)) {
1250                                 ath10k_htt_rx_free_msdu_chain(msdu_head);
1251                                 continue;
1252                         }
1253
1254                         if (ret > 0 &&
1255                             ath10k_unchain_msdu(msdu_head) < 0) {
1256                                 ath10k_htt_rx_free_msdu_chain(msdu_head);
1257                                 continue;
1258                         }
1259
1260                         if (attention & RX_ATTENTION_FLAGS_FCS_ERR)
1261                                 rx_status->flag |= RX_FLAG_FAILED_FCS_CRC;
1262                         else
1263                                 rx_status->flag &= ~RX_FLAG_FAILED_FCS_CRC;
1264
1265                         if (attention & RX_ATTENTION_FLAGS_TKIP_MIC_ERR)
1266                                 rx_status->flag |= RX_FLAG_MMIC_ERROR;
1267                         else
1268                                 rx_status->flag &= ~RX_FLAG_MMIC_ERROR;
1269
1270                         hdr = ath10k_htt_rx_skb_get_hdr(msdu_head);
1271
1272                         if (ath10k_htt_rx_hdr_is_amsdu(hdr))
1273                                 ath10k_htt_rx_amsdu(htt, rx_status, msdu_head);
1274                         else
1275                                 ath10k_htt_rx_msdu(htt, rx_status, msdu_head);
1276                 }
1277         }
1278
1279         tasklet_schedule(&htt->rx_replenish_task);
1280 }
1281
1282 static void ath10k_htt_rx_frag_handler(struct ath10k_htt *htt,
1283                                 struct htt_rx_fragment_indication *frag)
1284 {
1285         struct sk_buff *msdu_head, *msdu_tail;
1286         enum htt_rx_mpdu_encrypt_type enctype;
1287         struct htt_rx_desc *rxd;
1288         enum rx_msdu_decap_format fmt;
1289         struct ieee80211_rx_status *rx_status = &htt->rx_status;
1290         struct ieee80211_hdr *hdr;
1291         int ret;
1292         bool tkip_mic_err;
1293         bool decrypt_err;
1294         u8 *fw_desc;
1295         int fw_desc_len, hdrlen, paramlen;
1296         int trim;
1297         u32 attention = 0;
1298
1299         fw_desc_len = __le16_to_cpu(frag->fw_rx_desc_bytes);
1300         fw_desc = (u8 *)frag->fw_msdu_rx_desc;
1301
1302         msdu_head = NULL;
1303         msdu_tail = NULL;
1304
1305         spin_lock_bh(&htt->rx_ring.lock);
1306         ret = ath10k_htt_rx_amsdu_pop(htt, &fw_desc, &fw_desc_len,
1307                                       &msdu_head, &msdu_tail,
1308                                       &attention);
1309         spin_unlock_bh(&htt->rx_ring.lock);
1310
1311         ath10k_dbg(ATH10K_DBG_HTT_DUMP, "htt rx frag ahead\n");
1312
1313         if (ret) {
1314                 ath10k_warn("failed to pop amsdu from httr rx ring for fragmented rx %d\n",
1315                             ret);
1316                 ath10k_htt_rx_free_msdu_chain(msdu_head);
1317                 return;
1318         }
1319
1320         /* FIXME: implement signal strength */
1321         rx_status->flag |= RX_FLAG_NO_SIGNAL_VAL;
1322
1323         hdr = (struct ieee80211_hdr *)msdu_head->data;
1324         rxd = (void *)msdu_head->data - sizeof(*rxd);
1325         tkip_mic_err = !!(attention & RX_ATTENTION_FLAGS_TKIP_MIC_ERR);
1326         decrypt_err = !!(attention & RX_ATTENTION_FLAGS_DECRYPT_ERR);
1327         fmt = MS(__le32_to_cpu(rxd->msdu_start.info1),
1328                         RX_MSDU_START_INFO1_DECAP_FORMAT);
1329
1330         if (fmt != RX_MSDU_DECAP_RAW) {
1331                 ath10k_warn("we dont support non-raw fragmented rx yet\n");
1332                 dev_kfree_skb_any(msdu_head);
1333                 goto end;
1334         }
1335
1336         enctype = MS(__le32_to_cpu(rxd->mpdu_start.info0),
1337                      RX_MPDU_START_INFO0_ENCRYPT_TYPE);
1338         ath10k_htt_rx_h_protected(htt, rx_status, msdu_head, enctype, fmt,
1339                                   true);
1340         msdu_head->ip_summed = ath10k_htt_rx_get_csum_state(msdu_head);
1341
1342         if (tkip_mic_err)
1343                 ath10k_warn("tkip mic error\n");
1344
1345         if (decrypt_err) {
1346                 ath10k_warn("decryption err in fragmented rx\n");
1347                 dev_kfree_skb_any(msdu_head);
1348                 goto end;
1349         }
1350
1351         if (enctype != HTT_RX_MPDU_ENCRYPT_NONE) {
1352                 hdrlen = ieee80211_hdrlen(hdr->frame_control);
1353                 paramlen = ath10k_htt_rx_crypto_param_len(enctype);
1354
1355                 /* It is more efficient to move the header than the payload */
1356                 memmove((void *)msdu_head->data + paramlen,
1357                         (void *)msdu_head->data,
1358                         hdrlen);
1359                 skb_pull(msdu_head, paramlen);
1360                 hdr = (struct ieee80211_hdr *)msdu_head->data;
1361         }
1362
1363         /* remove trailing FCS */
1364         trim  = 4;
1365
1366         /* remove crypto trailer */
1367         trim += ath10k_htt_rx_crypto_tail_len(enctype);
1368
1369         /* last fragment of TKIP frags has MIC */
1370         if (!ieee80211_has_morefrags(hdr->frame_control) &&
1371             enctype == HTT_RX_MPDU_ENCRYPT_TKIP_WPA)
1372                 trim += 8;
1373
1374         if (trim > msdu_head->len) {
1375                 ath10k_warn("htt rx fragment: trailer longer than the frame itself? drop\n");
1376                 dev_kfree_skb_any(msdu_head);
1377                 goto end;
1378         }
1379
1380         skb_trim(msdu_head, msdu_head->len - trim);
1381
1382         ath10k_dbg_dump(ATH10K_DBG_HTT_DUMP, NULL, "htt rx frag mpdu: ",
1383                         msdu_head->data, msdu_head->len);
1384         ath10k_process_rx(htt->ar, rx_status, msdu_head);
1385
1386 end:
1387         if (fw_desc_len > 0) {
1388                 ath10k_dbg(ATH10K_DBG_HTT,
1389                            "expecting more fragmented rx in one indication %d\n",
1390                            fw_desc_len);
1391         }
1392 }
1393
1394 static void ath10k_htt_rx_frm_tx_compl(struct ath10k *ar,
1395                                        struct sk_buff *skb)
1396 {
1397         struct ath10k_htt *htt = &ar->htt;
1398         struct htt_resp *resp = (struct htt_resp *)skb->data;
1399         struct htt_tx_done tx_done = {};
1400         int status = MS(resp->data_tx_completion.flags, HTT_DATA_TX_STATUS);
1401         __le16 msdu_id;
1402         int i;
1403
1404         lockdep_assert_held(&htt->tx_lock);
1405
1406         switch (status) {
1407         case HTT_DATA_TX_STATUS_NO_ACK:
1408                 tx_done.no_ack = true;
1409                 break;
1410         case HTT_DATA_TX_STATUS_OK:
1411                 break;
1412         case HTT_DATA_TX_STATUS_DISCARD:
1413         case HTT_DATA_TX_STATUS_POSTPONE:
1414         case HTT_DATA_TX_STATUS_DOWNLOAD_FAIL:
1415                 tx_done.discard = true;
1416                 break;
1417         default:
1418                 ath10k_warn("unhandled tx completion status %d\n", status);
1419                 tx_done.discard = true;
1420                 break;
1421         }
1422
1423         ath10k_dbg(ATH10K_DBG_HTT, "htt tx completion num_msdus %d\n",
1424                    resp->data_tx_completion.num_msdus);
1425
1426         for (i = 0; i < resp->data_tx_completion.num_msdus; i++) {
1427                 msdu_id = resp->data_tx_completion.msdus[i];
1428                 tx_done.msdu_id = __le16_to_cpu(msdu_id);
1429                 ath10k_txrx_tx_unref(htt, &tx_done);
1430         }
1431 }
1432
1433 static void ath10k_htt_rx_addba(struct ath10k *ar, struct htt_resp *resp)
1434 {
1435         struct htt_rx_addba *ev = &resp->rx_addba;
1436         struct ath10k_peer *peer;
1437         struct ath10k_vif *arvif;
1438         u16 info0, tid, peer_id;
1439
1440         info0 = __le16_to_cpu(ev->info0);
1441         tid = MS(info0, HTT_RX_BA_INFO0_TID);
1442         peer_id = MS(info0, HTT_RX_BA_INFO0_PEER_ID);
1443
1444         ath10k_dbg(ATH10K_DBG_HTT,
1445                    "htt rx addba tid %hu peer_id %hu size %hhu\n",
1446                    tid, peer_id, ev->window_size);
1447
1448         spin_lock_bh(&ar->data_lock);
1449         peer = ath10k_peer_find_by_id(ar, peer_id);
1450         if (!peer) {
1451                 ath10k_warn("received addba event for invalid peer_id: %hu\n",
1452                             peer_id);
1453                 spin_unlock_bh(&ar->data_lock);
1454                 return;
1455         }
1456
1457         arvif = ath10k_get_arvif(ar, peer->vdev_id);
1458         if (!arvif) {
1459                 ath10k_warn("received addba event for invalid vdev_id: %u\n",
1460                             peer->vdev_id);
1461                 spin_unlock_bh(&ar->data_lock);
1462                 return;
1463         }
1464
1465         ath10k_dbg(ATH10K_DBG_HTT,
1466                    "htt rx start rx ba session sta %pM tid %hu size %hhu\n",
1467                    peer->addr, tid, ev->window_size);
1468
1469         ieee80211_start_rx_ba_session_offl(arvif->vif, peer->addr, tid);
1470         spin_unlock_bh(&ar->data_lock);
1471 }
1472
1473 static void ath10k_htt_rx_delba(struct ath10k *ar, struct htt_resp *resp)
1474 {
1475         struct htt_rx_delba *ev = &resp->rx_delba;
1476         struct ath10k_peer *peer;
1477         struct ath10k_vif *arvif;
1478         u16 info0, tid, peer_id;
1479
1480         info0 = __le16_to_cpu(ev->info0);
1481         tid = MS(info0, HTT_RX_BA_INFO0_TID);
1482         peer_id = MS(info0, HTT_RX_BA_INFO0_PEER_ID);
1483
1484         ath10k_dbg(ATH10K_DBG_HTT,
1485                    "htt rx delba tid %hu peer_id %hu\n",
1486                    tid, peer_id);
1487
1488         spin_lock_bh(&ar->data_lock);
1489         peer = ath10k_peer_find_by_id(ar, peer_id);
1490         if (!peer) {
1491                 ath10k_warn("received addba event for invalid peer_id: %hu\n",
1492                             peer_id);
1493                 spin_unlock_bh(&ar->data_lock);
1494                 return;
1495         }
1496
1497         arvif = ath10k_get_arvif(ar, peer->vdev_id);
1498         if (!arvif) {
1499                 ath10k_warn("received addba event for invalid vdev_id: %u\n",
1500                             peer->vdev_id);
1501                 spin_unlock_bh(&ar->data_lock);
1502                 return;
1503         }
1504
1505         ath10k_dbg(ATH10K_DBG_HTT,
1506                    "htt rx stop rx ba session sta %pM tid %hu\n",
1507                    peer->addr, tid);
1508
1509         ieee80211_stop_rx_ba_session_offl(arvif->vif, peer->addr, tid);
1510         spin_unlock_bh(&ar->data_lock);
1511 }
1512
1513 void ath10k_htt_t2h_msg_handler(struct ath10k *ar, struct sk_buff *skb)
1514 {
1515         struct ath10k_htt *htt = &ar->htt;
1516         struct htt_resp *resp = (struct htt_resp *)skb->data;
1517
1518         /* confirm alignment */
1519         if (!IS_ALIGNED((unsigned long)skb->data, 4))
1520                 ath10k_warn("unaligned htt message, expect trouble\n");
1521
1522         ath10k_dbg(ATH10K_DBG_HTT, "htt rx, msg_type: 0x%0X\n",
1523                    resp->hdr.msg_type);
1524         switch (resp->hdr.msg_type) {
1525         case HTT_T2H_MSG_TYPE_VERSION_CONF: {
1526                 htt->target_version_major = resp->ver_resp.major;
1527                 htt->target_version_minor = resp->ver_resp.minor;
1528                 complete(&htt->target_version_received);
1529                 break;
1530         }
1531         case HTT_T2H_MSG_TYPE_RX_IND:
1532                 spin_lock_bh(&htt->rx_ring.lock);
1533                 __skb_queue_tail(&htt->rx_compl_q, skb);
1534                 spin_unlock_bh(&htt->rx_ring.lock);
1535                 tasklet_schedule(&htt->txrx_compl_task);
1536                 return;
1537         case HTT_T2H_MSG_TYPE_PEER_MAP: {
1538                 struct htt_peer_map_event ev = {
1539                         .vdev_id = resp->peer_map.vdev_id,
1540                         .peer_id = __le16_to_cpu(resp->peer_map.peer_id),
1541                 };
1542                 memcpy(ev.addr, resp->peer_map.addr, sizeof(ev.addr));
1543                 ath10k_peer_map_event(htt, &ev);
1544                 break;
1545         }
1546         case HTT_T2H_MSG_TYPE_PEER_UNMAP: {
1547                 struct htt_peer_unmap_event ev = {
1548                         .peer_id = __le16_to_cpu(resp->peer_unmap.peer_id),
1549                 };
1550                 ath10k_peer_unmap_event(htt, &ev);
1551                 break;
1552         }
1553         case HTT_T2H_MSG_TYPE_MGMT_TX_COMPLETION: {
1554                 struct htt_tx_done tx_done = {};
1555                 int status = __le32_to_cpu(resp->mgmt_tx_completion.status);
1556
1557                 tx_done.msdu_id =
1558                         __le32_to_cpu(resp->mgmt_tx_completion.desc_id);
1559
1560                 switch (status) {
1561                 case HTT_MGMT_TX_STATUS_OK:
1562                         break;
1563                 case HTT_MGMT_TX_STATUS_RETRY:
1564                         tx_done.no_ack = true;
1565                         break;
1566                 case HTT_MGMT_TX_STATUS_DROP:
1567                         tx_done.discard = true;
1568                         break;
1569                 }
1570
1571                 spin_lock_bh(&htt->tx_lock);
1572                 ath10k_txrx_tx_unref(htt, &tx_done);
1573                 spin_unlock_bh(&htt->tx_lock);
1574                 break;
1575         }
1576         case HTT_T2H_MSG_TYPE_TX_COMPL_IND:
1577                 spin_lock_bh(&htt->tx_lock);
1578                 __skb_queue_tail(&htt->tx_compl_q, skb);
1579                 spin_unlock_bh(&htt->tx_lock);
1580                 tasklet_schedule(&htt->txrx_compl_task);
1581                 return;
1582         case HTT_T2H_MSG_TYPE_SEC_IND: {
1583                 struct ath10k *ar = htt->ar;
1584                 struct htt_security_indication *ev = &resp->security_indication;
1585
1586                 ath10k_dbg(ATH10K_DBG_HTT,
1587                            "sec ind peer_id %d unicast %d type %d\n",
1588                           __le16_to_cpu(ev->peer_id),
1589                           !!(ev->flags & HTT_SECURITY_IS_UNICAST),
1590                           MS(ev->flags, HTT_SECURITY_TYPE));
1591                 complete(&ar->install_key_done);
1592                 break;
1593         }
1594         case HTT_T2H_MSG_TYPE_RX_FRAG_IND: {
1595                 ath10k_dbg_dump(ATH10K_DBG_HTT_DUMP, NULL, "htt event: ",
1596                                 skb->data, skb->len);
1597                 ath10k_htt_rx_frag_handler(htt, &resp->rx_frag_ind);
1598                 break;
1599         }
1600         case HTT_T2H_MSG_TYPE_TEST:
1601                 /* FIX THIS */
1602                 break;
1603         case HTT_T2H_MSG_TYPE_STATS_CONF:
1604                 trace_ath10k_htt_stats(skb->data, skb->len);
1605                 break;
1606         case HTT_T2H_MSG_TYPE_TX_INSPECT_IND:
1607                 /* Firmware can return tx frames if it's unable to fully
1608                  * process them and suspects host may be able to fix it. ath10k
1609                  * sends all tx frames as already inspected so this shouldn't
1610                  * happen unless fw has a bug.
1611                  */
1612                 ath10k_warn("received an unexpected htt tx inspect event\n");
1613                 break;
1614         case HTT_T2H_MSG_TYPE_RX_ADDBA:
1615                 ath10k_htt_rx_addba(ar, resp);
1616                 break;
1617         case HTT_T2H_MSG_TYPE_RX_DELBA:
1618                 ath10k_htt_rx_delba(ar, resp);
1619                 break;
1620         case HTT_T2H_MSG_TYPE_RX_FLUSH: {
1621                 /* Ignore this event because mac80211 takes care of Rx
1622                  * aggregation reordering.
1623                  */
1624                 break;
1625         }
1626         default:
1627                 ath10k_dbg(ATH10K_DBG_HTT, "htt event (%d) not handled\n",
1628                            resp->hdr.msg_type);
1629                 ath10k_dbg_dump(ATH10K_DBG_HTT_DUMP, NULL, "htt event: ",
1630                                 skb->data, skb->len);
1631                 break;
1632         };
1633
1634         /* Free the indication buffer */
1635         dev_kfree_skb_any(skb);
1636 }
1637
1638 static void ath10k_htt_txrx_compl_task(unsigned long ptr)
1639 {
1640         struct ath10k_htt *htt = (struct ath10k_htt *)ptr;
1641         struct htt_resp *resp;
1642         struct sk_buff *skb;
1643
1644         spin_lock_bh(&htt->tx_lock);
1645         while ((skb = __skb_dequeue(&htt->tx_compl_q))) {
1646                 ath10k_htt_rx_frm_tx_compl(htt->ar, skb);
1647                 dev_kfree_skb_any(skb);
1648         }
1649         spin_unlock_bh(&htt->tx_lock);
1650
1651         spin_lock_bh(&htt->rx_ring.lock);
1652         while ((skb = __skb_dequeue(&htt->rx_compl_q))) {
1653                 resp = (struct htt_resp *)skb->data;
1654                 ath10k_htt_rx_handler(htt, &resp->rx_ind);
1655                 dev_kfree_skb_any(skb);
1656         }
1657         spin_unlock_bh(&htt->rx_ring.lock);
1658 }