2 * Copyright (c) 2004-2011 Atheros Communications Inc.
3 * Copyright (c) 2011-2012 Qualcomm Atheros, Inc.
5 * Permission to use, copy, modify, and/or distribute this software for any
6 * purpose with or without fee is hereby granted, provided that the above
7 * copyright notice and this permission notice appear in all copies.
9 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
10 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
11 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
12 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
13 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
14 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
15 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
18 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
25 * tid - tid_mux0..tid_mux3
26 * aid - tid_mux4..tid_mux7
28 #define ATH6KL_TID_MASK 0xf
29 #define ATH6KL_AID_SHIFT 4
31 static inline u8 ath6kl_get_tid(u8 tid_mux)
33 return tid_mux & ATH6KL_TID_MASK;
36 static inline u8 ath6kl_get_aid(u8 tid_mux)
38 return tid_mux >> ATH6KL_AID_SHIFT;
41 static u8 ath6kl_ibss_map_epid(struct sk_buff *skb, struct net_device *dev,
44 struct ath6kl *ar = ath6kl_priv(dev);
45 struct ethhdr *eth_hdr;
51 eth_hdr = (struct ethhdr *) (datap + sizeof(struct wmi_data_hdr));
53 if (is_multicast_ether_addr(eth_hdr->h_dest))
56 for (i = 0; i < ar->node_num; i++) {
57 if (memcmp(eth_hdr->h_dest, ar->node_map[i].mac_addr,
60 ar->node_map[i].tx_pend++;
61 return ar->node_map[i].ep_id;
64 if ((ep_map == -1) && !ar->node_map[i].tx_pend)
69 ep_map = ar->node_num;
71 if (ar->node_num > MAX_NODE_NUM)
72 return ENDPOINT_UNUSED;
75 memcpy(ar->node_map[ep_map].mac_addr, eth_hdr->h_dest, ETH_ALEN);
77 for (i = ENDPOINT_2; i <= ENDPOINT_5; i++) {
78 if (!ar->tx_pending[i]) {
79 ar->node_map[ep_map].ep_id = i;
84 * No free endpoint is available, start redistribution on
85 * the inuse endpoints.
87 if (i == ENDPOINT_5) {
88 ar->node_map[ep_map].ep_id = ar->next_ep_id;
90 if (ar->next_ep_id > ENDPOINT_5)
91 ar->next_ep_id = ENDPOINT_2;
96 ar->node_map[ep_map].tx_pend++;
98 return ar->node_map[ep_map].ep_id;
101 static bool ath6kl_process_uapsdq(struct ath6kl_sta *conn,
102 struct ath6kl_vif *vif,
106 struct ath6kl *ar = vif->ar;
107 bool is_apsdq_empty = false;
108 struct ethhdr *datap = (struct ethhdr *) skb->data;
109 u8 up = 0, traffic_class, *ip_hdr;
111 struct ath6kl_llc_snap_hdr *llc_hdr;
113 if (conn->sta_flags & STA_PS_APSD_TRIGGER) {
115 * This tx is because of a uAPSD trigger, determine
116 * more and EOSP bit. Set EOSP if queue is empty
117 * or sufficient frames are delivered for this trigger.
119 spin_lock_bh(&conn->psq_lock);
120 if (!skb_queue_empty(&conn->apsdq))
121 *flags |= WMI_DATA_HDR_FLAGS_MORE;
122 else if (conn->sta_flags & STA_PS_APSD_EOSP)
123 *flags |= WMI_DATA_HDR_FLAGS_EOSP;
124 *flags |= WMI_DATA_HDR_FLAGS_UAPSD;
125 spin_unlock_bh(&conn->psq_lock);
127 } else if (!conn->apsd_info)
130 if (test_bit(WMM_ENABLED, &vif->flags)) {
131 ether_type = be16_to_cpu(datap->h_proto);
132 if (is_ethertype(ether_type)) {
133 /* packet is in DIX format */
134 ip_hdr = (u8 *)(datap + 1);
136 /* packet is in 802.3 format */
137 llc_hdr = (struct ath6kl_llc_snap_hdr *)
139 ether_type = be16_to_cpu(llc_hdr->eth_type);
140 ip_hdr = (u8 *)(llc_hdr + 1);
143 if (ether_type == IP_ETHERTYPE)
144 up = ath6kl_wmi_determine_user_priority(
148 traffic_class = ath6kl_wmi_get_traffic_class(up);
150 if ((conn->apsd_info & (1 << traffic_class)) == 0)
153 /* Queue the frames if the STA is sleeping */
154 spin_lock_bh(&conn->psq_lock);
155 is_apsdq_empty = skb_queue_empty(&conn->apsdq);
156 skb_queue_tail(&conn->apsdq, skb);
157 spin_unlock_bh(&conn->psq_lock);
160 * If this is the first pkt getting queued
161 * for this STA, update the PVB for this STA
163 if (is_apsdq_empty) {
164 ath6kl_wmi_set_apsd_bfrd_traf(ar->wmi,
168 *flags |= WMI_DATA_HDR_FLAGS_UAPSD;
173 static bool ath6kl_process_psq(struct ath6kl_sta *conn,
174 struct ath6kl_vif *vif,
178 bool is_psq_empty = false;
179 struct ath6kl *ar = vif->ar;
181 if (conn->sta_flags & STA_PS_POLLED) {
182 spin_lock_bh(&conn->psq_lock);
183 if (!skb_queue_empty(&conn->psq))
184 *flags |= WMI_DATA_HDR_FLAGS_MORE;
185 spin_unlock_bh(&conn->psq_lock);
189 /* Queue the frames if the STA is sleeping */
190 spin_lock_bh(&conn->psq_lock);
191 is_psq_empty = skb_queue_empty(&conn->psq);
192 skb_queue_tail(&conn->psq, skb);
193 spin_unlock_bh(&conn->psq_lock);
196 * If this is the first pkt getting queued
197 * for this STA, update the PVB for this
201 ath6kl_wmi_set_pvb_cmd(ar->wmi,
207 static bool ath6kl_powersave_ap(struct ath6kl_vif *vif, struct sk_buff *skb,
210 struct ethhdr *datap = (struct ethhdr *) skb->data;
211 struct ath6kl_sta *conn = NULL;
212 bool ps_queued = false;
213 struct ath6kl *ar = vif->ar;
215 if (is_multicast_ether_addr(datap->h_dest)) {
217 bool q_mcast = false;
219 for (ctr = 0; ctr < AP_MAX_NUM_STA; ctr++) {
220 if (ar->sta_list[ctr].sta_flags & STA_PS_SLEEP) {
228 * If this transmit is not because of a Dtim Expiry
231 if (!test_bit(DTIM_EXPIRED, &vif->flags)) {
232 bool is_mcastq_empty = false;
234 spin_lock_bh(&ar->mcastpsq_lock);
236 skb_queue_empty(&ar->mcastpsq);
237 skb_queue_tail(&ar->mcastpsq, skb);
238 spin_unlock_bh(&ar->mcastpsq_lock);
241 * If this is the first Mcast pkt getting
242 * queued indicate to the target to set the
243 * BitmapControl LSB of the TIM IE.
246 ath6kl_wmi_set_pvb_cmd(ar->wmi,
253 * This transmit is because of Dtim expiry.
254 * Determine if MoreData bit has to be set.
256 spin_lock_bh(&ar->mcastpsq_lock);
257 if (!skb_queue_empty(&ar->mcastpsq))
258 *flags |= WMI_DATA_HDR_FLAGS_MORE;
259 spin_unlock_bh(&ar->mcastpsq_lock);
263 conn = ath6kl_find_sta(vif, datap->h_dest);
267 /* Inform the caller that the skb is consumed */
271 if (conn->sta_flags & STA_PS_SLEEP) {
272 ps_queued = ath6kl_process_uapsdq(conn,
274 if (!(*flags & WMI_DATA_HDR_FLAGS_UAPSD))
275 ps_queued = ath6kl_process_psq(conn,
284 int ath6kl_control_tx(void *devt, struct sk_buff *skb,
285 enum htc_endpoint_id eid)
287 struct ath6kl *ar = devt;
289 struct ath6kl_cookie *cookie = NULL;
291 if (WARN_ON_ONCE(ar->state == ATH6KL_STATE_WOW)) {
296 spin_lock_bh(&ar->lock);
298 ath6kl_dbg(ATH6KL_DBG_WLAN_TX,
299 "%s: skb=0x%p, len=0x%x eid =%d\n", __func__,
302 if (test_bit(WMI_CTRL_EP_FULL, &ar->flag) && (eid == ar->ctrl_ep)) {
304 * Control endpoint is full, don't allocate resources, we
305 * are just going to drop this packet.
308 ath6kl_err("wmi ctrl ep full, dropping pkt : 0x%p, len:%d\n",
311 cookie = ath6kl_alloc_cookie(ar);
313 if (cookie == NULL) {
314 spin_unlock_bh(&ar->lock);
319 ar->tx_pending[eid]++;
321 if (eid != ar->ctrl_ep)
322 ar->total_tx_data_pend++;
324 spin_unlock_bh(&ar->lock);
328 set_htc_pkt_info(&cookie->htc_pkt, cookie, skb->data, skb->len,
329 eid, ATH6KL_CONTROL_PKT_TAG);
330 cookie->htc_pkt.skb = skb;
333 * This interface is asynchronous, if there is an error, cleanup
334 * will happen in the TX completion callback.
336 ath6kl_htc_tx(ar->htc_target, &cookie->htc_pkt);
345 int ath6kl_data_tx(struct sk_buff *skb, struct net_device *dev)
347 struct ath6kl *ar = ath6kl_priv(dev);
348 struct ath6kl_cookie *cookie = NULL;
349 enum htc_endpoint_id eid = ENDPOINT_UNUSED;
350 struct ath6kl_vif *vif = netdev_priv(dev);
352 u16 htc_tag = ATH6KL_DATA_PKT_TAG;
353 u8 ac = 99 ; /* initialize to unmapped ac */
354 bool chk_adhoc_ps_mapping = false;
356 struct wmi_tx_meta_v2 meta_v2;
358 u8 csum_start = 0, csum_dest = 0, csum = skb->ip_summed;
362 ath6kl_dbg(ATH6KL_DBG_WLAN_TX,
363 "%s: skb=0x%p, data=0x%p, len=0x%x\n", __func__,
364 skb, skb->data, skb->len);
366 /* If target is not associated */
367 if (!test_bit(CONNECTED, &vif->flags))
370 if (WARN_ON_ONCE(ar->state != ATH6KL_STATE_ON))
373 if (!test_bit(WMI_READY, &ar->flag))
376 /* AP mode Power saving processing */
377 if (vif->nw_type == AP_NETWORK) {
378 if (ath6kl_powersave_ap(vif, skb, &flags))
382 if (test_bit(WMI_ENABLED, &ar->flag)) {
383 if ((dev->features & NETIF_F_IP_CSUM) &&
384 (csum == CHECKSUM_PARTIAL)) {
385 csum_start = skb->csum_start -
386 (skb_network_header(skb) - skb->head) +
387 sizeof(struct ath6kl_llc_snap_hdr);
388 csum_dest = skb->csum_offset + csum_start;
391 if (skb_headroom(skb) < dev->needed_headroom) {
392 struct sk_buff *tmp_skb = skb;
394 skb = skb_realloc_headroom(skb, dev->needed_headroom);
397 vif->net_stats.tx_dropped++;
402 if (ath6kl_wmi_dix_2_dot3(ar->wmi, skb)) {
403 ath6kl_err("ath6kl_wmi_dix_2_dot3 failed\n");
407 if ((dev->features & NETIF_F_IP_CSUM) &&
408 (csum == CHECKSUM_PARTIAL)) {
409 meta_v2.csum_start = csum_start;
410 meta_v2.csum_dest = csum_dest;
412 /* instruct target to calculate checksum */
413 meta_v2.csum_flags = WMI_META_V2_FLAG_CSUM_OFFLOAD;
414 meta_ver = WMI_META_VERSION_2;
421 ret = ath6kl_wmi_data_hdr_add(ar->wmi, skb,
422 DATA_MSGTYPE, flags, 0,
424 meta, vif->fw_vif_idx);
427 ath6kl_warn("failed to add wmi data header:%d\n"
432 if ((vif->nw_type == ADHOC_NETWORK) &&
433 ar->ibss_ps_enable && test_bit(CONNECTED, &vif->flags))
434 chk_adhoc_ps_mapping = true;
436 /* get the stream mapping */
437 ret = ath6kl_wmi_implicit_create_pstream(ar->wmi,
438 vif->fw_vif_idx, skb,
439 0, test_bit(WMM_ENABLED, &vif->flags), &ac);
446 spin_lock_bh(&ar->lock);
448 if (chk_adhoc_ps_mapping)
449 eid = ath6kl_ibss_map_epid(skb, dev, &map_no);
451 eid = ar->ac2ep_map[ac];
453 if (eid == 0 || eid == ENDPOINT_UNUSED) {
454 ath6kl_err("eid %d is not mapped!\n", eid);
455 spin_unlock_bh(&ar->lock);
459 /* allocate resource for this packet */
460 cookie = ath6kl_alloc_cookie(ar);
463 spin_unlock_bh(&ar->lock);
467 /* update counts while the lock is held */
468 ar->tx_pending[eid]++;
469 ar->total_tx_data_pend++;
471 spin_unlock_bh(&ar->lock);
473 if (!IS_ALIGNED((unsigned long) skb->data - HTC_HDR_LENGTH, 4) &&
476 * We will touch (move the buffer data to align it. Since the
477 * skb buffer is cloned and not only the header is changed, we
478 * have to copy it to allow the changes. Since we are copying
479 * the data here, we may as well align it by reserving suitable
480 * headroom to avoid the memmove in ath6kl_htc_tx_buf_align().
482 struct sk_buff *nskb;
484 nskb = skb_copy_expand(skb, HTC_HDR_LENGTH, 0, GFP_ATOMIC);
492 cookie->map_no = map_no;
493 set_htc_pkt_info(&cookie->htc_pkt, cookie, skb->data, skb->len,
495 cookie->htc_pkt.skb = skb;
497 ath6kl_dbg_dump(ATH6KL_DBG_RAW_BYTES, __func__, "tx ",
498 skb->data, skb->len);
501 * HTC interface is asynchronous, if this fails, cleanup will
502 * happen in the ath6kl_tx_complete callback.
504 ath6kl_htc_tx(ar->htc_target, &cookie->htc_pkt);
511 vif->net_stats.tx_dropped++;
512 vif->net_stats.tx_aborted_errors++;
517 /* indicate tx activity or inactivity on a WMI stream */
518 void ath6kl_indicate_tx_activity(void *devt, u8 traffic_class, bool active)
520 struct ath6kl *ar = devt;
521 enum htc_endpoint_id eid;
524 eid = ar->ac2ep_map[traffic_class];
526 if (!test_bit(WMI_ENABLED, &ar->flag))
529 spin_lock_bh(&ar->lock);
531 ar->ac_stream_active[traffic_class] = active;
535 * Keep track of the active stream with the highest
538 if (ar->ac_stream_pri_map[traffic_class] >
539 ar->hiac_stream_active_pri)
540 /* set the new highest active priority */
541 ar->hiac_stream_active_pri =
542 ar->ac_stream_pri_map[traffic_class];
546 * We may have to search for the next active stream
547 * that is the highest priority.
549 if (ar->hiac_stream_active_pri ==
550 ar->ac_stream_pri_map[traffic_class]) {
552 * The highest priority stream just went inactive
553 * reset and search for the "next" highest "active"
556 ar->hiac_stream_active_pri = 0;
558 for (i = 0; i < WMM_NUM_AC; i++) {
559 if (ar->ac_stream_active[i] &&
560 (ar->ac_stream_pri_map[i] >
561 ar->hiac_stream_active_pri))
563 * Set the new highest active
566 ar->hiac_stream_active_pri =
567 ar->ac_stream_pri_map[i];
572 spin_unlock_bh(&ar->lock);
575 /* notify HTC, this may cause credit distribution changes */
576 ath6kl_htc_activity_changed(ar->htc_target, eid, active);
579 enum htc_send_full_action ath6kl_tx_queue_full(struct htc_target *target,
580 struct htc_packet *packet)
582 struct ath6kl *ar = target->dev->ar;
583 struct ath6kl_vif *vif;
584 enum htc_endpoint_id endpoint = packet->endpoint;
585 enum htc_send_full_action action = HTC_SEND_FULL_KEEP;
587 if (endpoint == ar->ctrl_ep) {
589 * Under normal WMI if this is getting full, then something
590 * is running rampant the host should not be exhausting the
591 * WMI queue with too many commands the only exception to
592 * this is during testing using endpointping.
594 set_bit(WMI_CTRL_EP_FULL, &ar->flag);
595 ath6kl_err("wmi ctrl ep is full\n");
596 ath6kl_recovery_err_notify(ar, ATH6KL_FW_EP_FULL);
600 if (packet->info.tx.tag == ATH6KL_CONTROL_PKT_TAG)
604 * The last MAX_HI_COOKIE_NUM "batch" of cookies are reserved for
605 * the highest active stream.
607 if (ar->ac_stream_pri_map[ar->ep2ac_map[endpoint]] <
608 ar->hiac_stream_active_pri &&
610 target->endpoint[endpoint].tx_drop_packet_threshold)
612 * Give preference to the highest priority stream by
613 * dropping the packets which overflowed.
615 action = HTC_SEND_FULL_DROP;
618 spin_lock_bh(&ar->list_lock);
619 list_for_each_entry(vif, &ar->vif_list, list) {
620 if (vif->nw_type == ADHOC_NETWORK ||
621 action != HTC_SEND_FULL_DROP) {
622 spin_unlock_bh(&ar->list_lock);
624 set_bit(NETQ_STOPPED, &vif->flags);
625 netif_stop_queue(vif->ndev);
630 spin_unlock_bh(&ar->list_lock);
635 /* TODO this needs to be looked at */
636 static void ath6kl_tx_clear_node_map(struct ath6kl_vif *vif,
637 enum htc_endpoint_id eid, u32 map_no)
639 struct ath6kl *ar = vif->ar;
642 if (vif->nw_type != ADHOC_NETWORK)
645 if (!ar->ibss_ps_enable)
648 if (eid == ar->ctrl_ep)
655 ar->node_map[map_no].tx_pend--;
657 if (ar->node_map[map_no].tx_pend)
660 if (map_no != (ar->node_num - 1))
663 for (i = ar->node_num; i > 0; i--) {
664 if (ar->node_map[i - 1].tx_pend)
667 memset(&ar->node_map[i - 1], 0,
668 sizeof(struct ath6kl_node_mapping));
673 void ath6kl_tx_complete(struct htc_target *target,
674 struct list_head *packet_queue)
676 struct ath6kl *ar = target->dev->ar;
677 struct sk_buff_head skb_queue;
678 struct htc_packet *packet;
680 struct ath6kl_cookie *ath6kl_cookie;
683 enum htc_endpoint_id eid;
684 bool wake_event = false;
685 bool flushing[ATH6KL_VIF_MAX] = {false};
687 struct ath6kl_vif *vif;
689 skb_queue_head_init(&skb_queue);
691 /* lock the driver as we update internal state */
692 spin_lock_bh(&ar->lock);
694 /* reap completed packets */
695 while (!list_empty(packet_queue)) {
697 packet = list_first_entry(packet_queue, struct htc_packet,
699 list_del(&packet->list);
701 ath6kl_cookie = (struct ath6kl_cookie *)packet->pkt_cntxt;
702 if (WARN_ON_ONCE(!ath6kl_cookie))
705 status = packet->status;
706 skb = ath6kl_cookie->skb;
707 eid = packet->endpoint;
708 map_no = ath6kl_cookie->map_no;
710 if (WARN_ON_ONCE(!skb || !skb->data)) {
712 ath6kl_free_cookie(ar, ath6kl_cookie);
716 __skb_queue_tail(&skb_queue, skb);
718 if (WARN_ON_ONCE(!status && (packet->act_len != skb->len))) {
719 ath6kl_free_cookie(ar, ath6kl_cookie);
723 ar->tx_pending[eid]--;
725 if (eid != ar->ctrl_ep)
726 ar->total_tx_data_pend--;
728 if (eid == ar->ctrl_ep) {
729 if (test_bit(WMI_CTRL_EP_FULL, &ar->flag))
730 clear_bit(WMI_CTRL_EP_FULL, &ar->flag);
732 if (ar->tx_pending[eid] == 0)
736 if (eid == ar->ctrl_ep) {
737 if_idx = wmi_cmd_hdr_get_if_idx(
738 (struct wmi_cmd_hdr *) packet->buf);
740 if_idx = wmi_data_hdr_get_if_idx(
741 (struct wmi_data_hdr *) packet->buf);
744 vif = ath6kl_get_vif_by_index(ar, if_idx);
746 ath6kl_free_cookie(ar, ath6kl_cookie);
751 if (status == -ECANCELED)
752 /* a packet was flushed */
753 flushing[if_idx] = true;
755 vif->net_stats.tx_errors++;
757 if (status != -ENOSPC && status != -ECANCELED)
758 ath6kl_warn("tx complete error: %d\n", status);
760 ath6kl_dbg(ATH6KL_DBG_WLAN_TX,
761 "%s: skb=0x%p data=0x%p len=0x%x eid=%d %s\n",
762 __func__, skb, packet->buf, packet->act_len,
765 ath6kl_dbg(ATH6KL_DBG_WLAN_TX,
766 "%s: skb=0x%p data=0x%p len=0x%x eid=%d %s\n",
767 __func__, skb, packet->buf, packet->act_len,
770 flushing[if_idx] = false;
771 vif->net_stats.tx_packets++;
772 vif->net_stats.tx_bytes += skb->len;
775 ath6kl_tx_clear_node_map(vif, eid, map_no);
777 ath6kl_free_cookie(ar, ath6kl_cookie);
779 if (test_bit(NETQ_STOPPED, &vif->flags))
780 clear_bit(NETQ_STOPPED, &vif->flags);
783 spin_unlock_bh(&ar->lock);
785 __skb_queue_purge(&skb_queue);
788 spin_lock_bh(&ar->list_lock);
789 list_for_each_entry(vif, &ar->vif_list, list) {
790 if (test_bit(CONNECTED, &vif->flags) &&
791 !flushing[vif->fw_vif_idx]) {
792 spin_unlock_bh(&ar->list_lock);
793 netif_wake_queue(vif->ndev);
794 spin_lock_bh(&ar->list_lock);
797 spin_unlock_bh(&ar->list_lock);
800 wake_up(&ar->event_wq);
805 void ath6kl_tx_data_cleanup(struct ath6kl *ar)
809 /* flush all the data (non-control) streams */
810 for (i = 0; i < WMM_NUM_AC; i++)
811 ath6kl_htc_flush_txep(ar->htc_target, ar->ac2ep_map[i],
812 ATH6KL_DATA_PKT_TAG);
817 static void ath6kl_deliver_frames_to_nw_stack(struct net_device *dev,
825 if (!(skb->dev->flags & IFF_UP)) {
830 skb->protocol = eth_type_trans(skb, skb->dev);
835 static void ath6kl_alloc_netbufs(struct sk_buff_head *q, u16 num)
840 skb = ath6kl_buf_alloc(ATH6KL_BUFFER_SIZE);
842 ath6kl_err("netbuf allocation failed\n");
845 skb_queue_tail(q, skb);
850 static struct sk_buff *aggr_get_free_skb(struct aggr_info *p_aggr)
852 struct sk_buff *skb = NULL;
854 if (skb_queue_len(&p_aggr->rx_amsdu_freeq) <
855 (AGGR_NUM_OF_FREE_NETBUFS >> 2))
856 ath6kl_alloc_netbufs(&p_aggr->rx_amsdu_freeq,
857 AGGR_NUM_OF_FREE_NETBUFS);
859 skb = skb_dequeue(&p_aggr->rx_amsdu_freeq);
864 void ath6kl_rx_refill(struct htc_target *target, enum htc_endpoint_id endpoint)
866 struct ath6kl *ar = target->dev->ar;
870 struct htc_packet *packet;
871 struct list_head queue;
873 n_buf_refill = ATH6KL_MAX_RX_BUFFERS -
874 ath6kl_htc_get_rxbuf_num(ar->htc_target, endpoint);
876 if (n_buf_refill <= 0)
879 INIT_LIST_HEAD(&queue);
881 ath6kl_dbg(ATH6KL_DBG_WLAN_RX,
882 "%s: providing htc with %d buffers at eid=%d\n",
883 __func__, n_buf_refill, endpoint);
885 for (rx_buf = 0; rx_buf < n_buf_refill; rx_buf++) {
886 skb = ath6kl_buf_alloc(ATH6KL_BUFFER_SIZE);
890 packet = (struct htc_packet *) skb->head;
891 if (!IS_ALIGNED((unsigned long) skb->data, 4))
892 skb->data = PTR_ALIGN(skb->data - 4, 4);
893 set_htc_rxpkt_info(packet, skb, skb->data,
894 ATH6KL_BUFFER_SIZE, endpoint);
896 list_add_tail(&packet->list, &queue);
899 if (!list_empty(&queue))
900 ath6kl_htc_add_rxbuf_multiple(ar->htc_target, &queue);
903 void ath6kl_refill_amsdu_rxbufs(struct ath6kl *ar, int count)
905 struct htc_packet *packet;
909 skb = ath6kl_buf_alloc(ATH6KL_AMSDU_BUFFER_SIZE);
913 packet = (struct htc_packet *) skb->head;
914 if (!IS_ALIGNED((unsigned long) skb->data, 4))
915 skb->data = PTR_ALIGN(skb->data - 4, 4);
916 set_htc_rxpkt_info(packet, skb, skb->data,
917 ATH6KL_AMSDU_BUFFER_SIZE, 0);
920 spin_lock_bh(&ar->lock);
921 list_add_tail(&packet->list, &ar->amsdu_rx_buffer_queue);
922 spin_unlock_bh(&ar->lock);
928 * Callback to allocate a receive buffer for a pending packet. We use a
929 * pre-allocated list of buffers of maximum AMSDU size (4K).
931 struct htc_packet *ath6kl_alloc_amsdu_rxbuf(struct htc_target *target,
932 enum htc_endpoint_id endpoint,
935 struct ath6kl *ar = target->dev->ar;
936 struct htc_packet *packet = NULL;
937 struct list_head *pkt_pos;
938 int refill_cnt = 0, depth = 0;
940 ath6kl_dbg(ATH6KL_DBG_WLAN_RX, "%s: eid=%d, len:%d\n",
941 __func__, endpoint, len);
943 if ((len <= ATH6KL_BUFFER_SIZE) ||
944 (len > ATH6KL_AMSDU_BUFFER_SIZE))
947 spin_lock_bh(&ar->lock);
949 if (list_empty(&ar->amsdu_rx_buffer_queue)) {
950 spin_unlock_bh(&ar->lock);
951 refill_cnt = ATH6KL_MAX_AMSDU_RX_BUFFERS;
955 packet = list_first_entry(&ar->amsdu_rx_buffer_queue,
956 struct htc_packet, list);
957 list_del(&packet->list);
958 list_for_each(pkt_pos, &ar->amsdu_rx_buffer_queue)
961 refill_cnt = ATH6KL_MAX_AMSDU_RX_BUFFERS - depth;
962 spin_unlock_bh(&ar->lock);
964 /* set actual endpoint ID */
965 packet->endpoint = endpoint;
968 if (refill_cnt >= ATH6KL_AMSDU_REFILL_THRESHOLD)
969 ath6kl_refill_amsdu_rxbufs(ar, refill_cnt);
974 static void aggr_slice_amsdu(struct aggr_info *p_aggr,
975 struct rxtid *rxtid, struct sk_buff *skb)
977 struct sk_buff *new_skb;
979 u16 frame_8023_len, payload_8023_len, mac_hdr_len, amsdu_len;
982 mac_hdr_len = sizeof(struct ethhdr);
983 framep = skb->data + mac_hdr_len;
984 amsdu_len = skb->len - mac_hdr_len;
986 while (amsdu_len > mac_hdr_len) {
987 hdr = (struct ethhdr *) framep;
988 payload_8023_len = ntohs(hdr->h_proto);
990 if (payload_8023_len < MIN_MSDU_SUBFRAME_PAYLOAD_LEN ||
991 payload_8023_len > MAX_MSDU_SUBFRAME_PAYLOAD_LEN) {
992 ath6kl_err("802.3 AMSDU frame bound check failed. len %d\n",
997 frame_8023_len = payload_8023_len + mac_hdr_len;
998 new_skb = aggr_get_free_skb(p_aggr);
1000 ath6kl_err("no buffer available\n");
1004 memcpy(new_skb->data, framep, frame_8023_len);
1005 skb_put(new_skb, frame_8023_len);
1006 if (ath6kl_wmi_dot3_2_dix(new_skb)) {
1007 ath6kl_err("dot3_2_dix error\n");
1008 dev_kfree_skb(new_skb);
1012 skb_queue_tail(&rxtid->q, new_skb);
1014 /* Is this the last subframe within this aggregate ? */
1015 if ((amsdu_len - frame_8023_len) == 0)
1018 /* Add the length of A-MSDU subframe padding bytes -
1019 * Round to nearest word.
1021 frame_8023_len = ALIGN(frame_8023_len, 4);
1023 framep += frame_8023_len;
1024 amsdu_len -= frame_8023_len;
1030 static void aggr_deque_frms(struct aggr_info_conn *agg_conn, u8 tid,
1031 u16 seq_no, u8 order)
1033 struct sk_buff *skb;
1034 struct rxtid *rxtid;
1035 struct skb_hold_q *node;
1036 u16 idx, idx_end, seq_end;
1037 struct rxtid_stats *stats;
1039 rxtid = &agg_conn->rx_tid[tid];
1040 stats = &agg_conn->stat[tid];
1042 spin_lock_bh(&rxtid->lock);
1043 idx = AGGR_WIN_IDX(rxtid->seq_next, rxtid->hold_q_sz);
1046 * idx_end is typically the last possible frame in the window,
1047 * but changes to 'the' seq_no, when BAR comes. If seq_no
1048 * is non-zero, we will go up to that and stop.
1049 * Note: last seq no in current window will occupy the same
1050 * index position as index that is just previous to start.
1051 * An imp point : if win_sz is 7, for seq_no space of 4095,
1052 * then, there would be holes when sequence wrap around occurs.
1053 * Target should judiciously choose the win_sz, based on
1054 * this condition. For 4095, (TID_WINDOW_SZ = 2 x win_sz
1055 * 2, 4, 8, 16 win_sz works fine).
1056 * We must deque from "idx" to "idx_end", including both.
1058 seq_end = seq_no ? seq_no : rxtid->seq_next;
1059 idx_end = AGGR_WIN_IDX(seq_end, rxtid->hold_q_sz);
1062 node = &rxtid->hold_q[idx];
1063 if ((order == 1) && (!node->skb))
1068 aggr_slice_amsdu(agg_conn->aggr_info, rxtid,
1071 skb_queue_tail(&rxtid->q, node->skb);
1076 rxtid->seq_next = ATH6KL_NEXT_SEQ_NO(rxtid->seq_next);
1077 idx = AGGR_WIN_IDX(rxtid->seq_next, rxtid->hold_q_sz);
1078 } while (idx != idx_end);
1080 spin_unlock_bh(&rxtid->lock);
1082 stats->num_delivered += skb_queue_len(&rxtid->q);
1084 while ((skb = skb_dequeue(&rxtid->q)))
1085 ath6kl_deliver_frames_to_nw_stack(agg_conn->dev, skb);
1088 static bool aggr_process_recv_frm(struct aggr_info_conn *agg_conn, u8 tid,
1090 bool is_amsdu, struct sk_buff *frame)
1092 struct rxtid *rxtid;
1093 struct rxtid_stats *stats;
1094 struct sk_buff *skb;
1095 struct skb_hold_q *node;
1096 u16 idx, st, cur, end;
1097 bool is_queued = false;
1100 rxtid = &agg_conn->rx_tid[tid];
1101 stats = &agg_conn->stat[tid];
1103 stats->num_into_aggr++;
1107 aggr_slice_amsdu(agg_conn->aggr_info, rxtid, frame);
1110 while ((skb = skb_dequeue(&rxtid->q)))
1111 ath6kl_deliver_frames_to_nw_stack(agg_conn->dev,
1117 /* Check the incoming sequence no, if it's in the window */
1118 st = rxtid->seq_next;
1120 end = (st + rxtid->hold_q_sz-1) & ATH6KL_MAX_SEQ_NO;
1122 if (((st < end) && (cur < st || cur > end)) ||
1123 ((st > end) && (cur > end) && (cur < st))) {
1124 extended_end = (end + rxtid->hold_q_sz - 1) &
1127 if (((end < extended_end) &&
1128 (cur < end || cur > extended_end)) ||
1129 ((end > extended_end) && (cur > extended_end) &&
1131 aggr_deque_frms(agg_conn, tid, 0, 0);
1132 spin_lock_bh(&rxtid->lock);
1133 if (cur >= rxtid->hold_q_sz - 1)
1134 rxtid->seq_next = cur - (rxtid->hold_q_sz - 1);
1136 rxtid->seq_next = ATH6KL_MAX_SEQ_NO -
1137 (rxtid->hold_q_sz - 2 - cur);
1138 spin_unlock_bh(&rxtid->lock);
1141 * Dequeue only those frames that are outside the
1142 * new shifted window.
1144 if (cur >= rxtid->hold_q_sz - 1)
1145 st = cur - (rxtid->hold_q_sz - 1);
1147 st = ATH6KL_MAX_SEQ_NO -
1148 (rxtid->hold_q_sz - 2 - cur);
1150 aggr_deque_frms(agg_conn, tid, st, 0);
1156 idx = AGGR_WIN_IDX(seq_no, rxtid->hold_q_sz);
1158 node = &rxtid->hold_q[idx];
1160 spin_lock_bh(&rxtid->lock);
1163 * Is the cur frame duplicate or something beyond our window(hold_q
1164 * -> which is 2x, already)?
1166 * 1. Duplicate is easy - drop incoming frame.
1167 * 2. Not falling in current sliding window.
1168 * 2a. is the frame_seq_no preceding current tid_seq_no?
1169 * -> drop the frame. perhaps sender did not get our ACK.
1170 * this is taken care of above.
1171 * 2b. is the frame_seq_no beyond window(st, TID_WINDOW_SZ);
1172 * -> Taken care of it above, by moving window forward.
1174 dev_kfree_skb(node->skb);
1179 node->is_amsdu = is_amsdu;
1180 node->seq_no = seq_no;
1187 spin_unlock_bh(&rxtid->lock);
1189 aggr_deque_frms(agg_conn, tid, 0, 1);
1191 if (agg_conn->timer_scheduled)
1194 spin_lock_bh(&rxtid->lock);
1195 for (idx = 0 ; idx < rxtid->hold_q_sz; idx++) {
1196 if (rxtid->hold_q[idx].skb) {
1198 * There is a frame in the queue and no
1199 * timer so start a timer to ensure that
1200 * the frame doesn't remain stuck
1203 agg_conn->timer_scheduled = true;
1204 mod_timer(&agg_conn->timer,
1205 (jiffies + (HZ * AGGR_RX_TIMEOUT) / 1000));
1206 rxtid->timer_mon = true;
1210 spin_unlock_bh(&rxtid->lock);
1215 static void ath6kl_uapsd_trigger_frame_rx(struct ath6kl_vif *vif,
1216 struct ath6kl_sta *conn)
1218 struct ath6kl *ar = vif->ar;
1219 bool is_apsdq_empty, is_apsdq_empty_at_start;
1220 u32 num_frames_to_deliver, flags;
1221 struct sk_buff *skb = NULL;
1224 * If the APSD q for this STA is not empty, dequeue and
1225 * send a pkt from the head of the q. Also update the
1226 * More data bit in the WMI_DATA_HDR if there are
1227 * more pkts for this STA in the APSD q.
1228 * If there are no more pkts for this STA,
1229 * update the APSD bitmap for this STA.
1232 num_frames_to_deliver = (conn->apsd_info >> ATH6KL_APSD_NUM_OF_AC) &
1233 ATH6KL_APSD_FRAME_MASK;
1235 * Number of frames to send in a service period is
1236 * indicated by the station
1237 * in the QOS_INFO of the association request
1238 * If it is zero, send all frames
1240 if (!num_frames_to_deliver)
1241 num_frames_to_deliver = ATH6KL_APSD_ALL_FRAME;
1243 spin_lock_bh(&conn->psq_lock);
1244 is_apsdq_empty = skb_queue_empty(&conn->apsdq);
1245 spin_unlock_bh(&conn->psq_lock);
1246 is_apsdq_empty_at_start = is_apsdq_empty;
1248 while ((!is_apsdq_empty) && (num_frames_to_deliver)) {
1250 spin_lock_bh(&conn->psq_lock);
1251 skb = skb_dequeue(&conn->apsdq);
1252 is_apsdq_empty = skb_queue_empty(&conn->apsdq);
1253 spin_unlock_bh(&conn->psq_lock);
1256 * Set the STA flag to Trigger delivery,
1257 * so that the frame will go out
1259 conn->sta_flags |= STA_PS_APSD_TRIGGER;
1260 num_frames_to_deliver--;
1262 /* Last frame in the service period, set EOSP or queue empty */
1263 if ((is_apsdq_empty) || (!num_frames_to_deliver))
1264 conn->sta_flags |= STA_PS_APSD_EOSP;
1266 ath6kl_data_tx(skb, vif->ndev);
1267 conn->sta_flags &= ~(STA_PS_APSD_TRIGGER);
1268 conn->sta_flags &= ~(STA_PS_APSD_EOSP);
1271 if (is_apsdq_empty) {
1272 if (is_apsdq_empty_at_start)
1273 flags = WMI_AP_APSD_NO_DELIVERY_FRAMES;
1277 ath6kl_wmi_set_apsd_bfrd_traf(ar->wmi,
1279 conn->aid, 0, flags);
1285 void ath6kl_rx(struct htc_target *target, struct htc_packet *packet)
1287 struct ath6kl *ar = target->dev->ar;
1288 struct sk_buff *skb = packet->pkt_cntxt;
1289 struct wmi_rx_meta_v2 *meta;
1290 struct wmi_data_hdr *dhdr;
1292 u8 meta_type, dot11_hdr = 0;
1293 u8 pad_before_data_start;
1294 int status = packet->status;
1295 enum htc_endpoint_id ept = packet->endpoint;
1296 bool is_amsdu, prev_ps, ps_state = false;
1297 bool trig_state = false;
1298 struct ath6kl_sta *conn = NULL;
1299 struct sk_buff *skb1 = NULL;
1300 struct ethhdr *datap = NULL;
1301 struct ath6kl_vif *vif;
1302 struct aggr_info_conn *aggr_conn;
1306 ath6kl_dbg(ATH6KL_DBG_WLAN_RX,
1307 "%s: ar=0x%p eid=%d, skb=0x%p, data=0x%p, len=0x%x status:%d",
1308 __func__, ar, ept, skb, packet->buf,
1309 packet->act_len, status);
1311 if (status || !(skb->data + HTC_HDR_LENGTH)) {
1316 skb_put(skb, packet->act_len + HTC_HDR_LENGTH);
1317 skb_pull(skb, HTC_HDR_LENGTH);
1319 ath6kl_dbg_dump(ATH6KL_DBG_RAW_BYTES, __func__, "rx ",
1320 skb->data, skb->len);
1322 if (ept == ar->ctrl_ep) {
1323 if (test_bit(WMI_ENABLED, &ar->flag)) {
1324 ath6kl_check_wow_status(ar);
1325 ath6kl_wmi_control_rx(ar->wmi, skb);
1329 wmi_cmd_hdr_get_if_idx((struct wmi_cmd_hdr *) skb->data);
1332 wmi_data_hdr_get_if_idx((struct wmi_data_hdr *) skb->data);
1335 vif = ath6kl_get_vif_by_index(ar, if_idx);
1342 * Take lock to protect buffer counts and adaptive power throughput
1345 spin_lock_bh(&vif->if_lock);
1347 vif->net_stats.rx_packets++;
1348 vif->net_stats.rx_bytes += packet->act_len;
1350 spin_unlock_bh(&vif->if_lock);
1352 skb->dev = vif->ndev;
1354 if (!test_bit(WMI_ENABLED, &ar->flag)) {
1355 if (EPPING_ALIGNMENT_PAD > 0)
1356 skb_pull(skb, EPPING_ALIGNMENT_PAD);
1357 ath6kl_deliver_frames_to_nw_stack(vif->ndev, skb);
1361 ath6kl_check_wow_status(ar);
1363 min_hdr_len = sizeof(struct ethhdr) + sizeof(struct wmi_data_hdr) +
1364 sizeof(struct ath6kl_llc_snap_hdr);
1366 dhdr = (struct wmi_data_hdr *) skb->data;
1369 * In the case of AP mode we may receive NULL data frames
1370 * that do not have LLC hdr. They are 16 bytes in size.
1371 * Allow these frames in the AP mode.
1373 if (vif->nw_type != AP_NETWORK &&
1374 ((packet->act_len < min_hdr_len) ||
1375 (packet->act_len > WMI_MAX_AMSDU_RX_DATA_FRAME_LENGTH))) {
1376 ath6kl_info("frame len is too short or too long\n");
1377 vif->net_stats.rx_errors++;
1378 vif->net_stats.rx_length_errors++;
1383 /* Get the Power save state of the STA */
1384 if (vif->nw_type == AP_NETWORK) {
1385 meta_type = wmi_data_hdr_get_meta(dhdr);
1387 ps_state = !!((dhdr->info >> WMI_DATA_HDR_PS_SHIFT) &
1388 WMI_DATA_HDR_PS_MASK);
1390 offset = sizeof(struct wmi_data_hdr);
1391 trig_state = !!(le16_to_cpu(dhdr->info3) & WMI_DATA_HDR_TRIG);
1393 switch (meta_type) {
1396 case WMI_META_VERSION_1:
1397 offset += sizeof(struct wmi_rx_meta_v1);
1399 case WMI_META_VERSION_2:
1400 offset += sizeof(struct wmi_rx_meta_v2);
1406 datap = (struct ethhdr *) (skb->data + offset);
1407 conn = ath6kl_find_sta(vif, datap->h_source);
1415 * If there is a change in PS state of the STA,
1416 * take appropriate steps:
1418 * 1. If Sleep-->Awake, flush the psq for the STA
1419 * Clear the PVB for the STA.
1420 * 2. If Awake-->Sleep, Starting queueing frames
1423 prev_ps = !!(conn->sta_flags & STA_PS_SLEEP);
1426 conn->sta_flags |= STA_PS_SLEEP;
1428 conn->sta_flags &= ~STA_PS_SLEEP;
1430 /* Accept trigger only when the station is in sleep */
1431 if ((conn->sta_flags & STA_PS_SLEEP) && trig_state)
1432 ath6kl_uapsd_trigger_frame_rx(vif, conn);
1434 if (prev_ps ^ !!(conn->sta_flags & STA_PS_SLEEP)) {
1435 if (!(conn->sta_flags & STA_PS_SLEEP)) {
1436 struct sk_buff *skbuff = NULL;
1437 bool is_apsdq_empty;
1438 struct ath6kl_mgmt_buff *mgmt;
1441 spin_lock_bh(&conn->psq_lock);
1442 while (conn->mgmt_psq_len > 0) {
1443 mgmt = list_first_entry(
1445 struct ath6kl_mgmt_buff,
1447 list_del(&mgmt->list);
1448 conn->mgmt_psq_len--;
1449 spin_unlock_bh(&conn->psq_lock);
1450 idx = vif->fw_vif_idx;
1452 ath6kl_wmi_send_mgmt_cmd(ar->wmi,
1462 spin_lock_bh(&conn->psq_lock);
1464 conn->mgmt_psq_len = 0;
1465 while ((skbuff = skb_dequeue(&conn->psq))) {
1466 spin_unlock_bh(&conn->psq_lock);
1467 ath6kl_data_tx(skbuff, vif->ndev);
1468 spin_lock_bh(&conn->psq_lock);
1471 is_apsdq_empty = skb_queue_empty(&conn->apsdq);
1472 while ((skbuff = skb_dequeue(&conn->apsdq))) {
1473 spin_unlock_bh(&conn->psq_lock);
1474 ath6kl_data_tx(skbuff, vif->ndev);
1475 spin_lock_bh(&conn->psq_lock);
1477 spin_unlock_bh(&conn->psq_lock);
1479 if (!is_apsdq_empty)
1480 ath6kl_wmi_set_apsd_bfrd_traf(
1485 /* Clear the PVB for this STA */
1486 ath6kl_wmi_set_pvb_cmd(ar->wmi, vif->fw_vif_idx,
1491 /* drop NULL data frames here */
1492 if ((packet->act_len < min_hdr_len) ||
1494 WMI_MAX_AMSDU_RX_DATA_FRAME_LENGTH)) {
1500 is_amsdu = wmi_data_hdr_is_amsdu(dhdr) ? true : false;
1501 tid = wmi_data_hdr_get_up(dhdr);
1502 seq_no = wmi_data_hdr_get_seqno(dhdr);
1503 meta_type = wmi_data_hdr_get_meta(dhdr);
1504 dot11_hdr = wmi_data_hdr_get_dot11(dhdr);
1505 pad_before_data_start =
1506 (le16_to_cpu(dhdr->info3) >> WMI_DATA_HDR_PAD_BEFORE_DATA_SHIFT)
1507 & WMI_DATA_HDR_PAD_BEFORE_DATA_MASK;
1509 skb_pull(skb, sizeof(struct wmi_data_hdr));
1511 switch (meta_type) {
1512 case WMI_META_VERSION_1:
1513 skb_pull(skb, sizeof(struct wmi_rx_meta_v1));
1515 case WMI_META_VERSION_2:
1516 meta = (struct wmi_rx_meta_v2 *) skb->data;
1517 if (meta->csum_flags & 0x1) {
1518 skb->ip_summed = CHECKSUM_COMPLETE;
1519 skb->csum = (__force __wsum) meta->csum;
1521 skb_pull(skb, sizeof(struct wmi_rx_meta_v2));
1527 skb_pull(skb, pad_before_data_start);
1530 status = ath6kl_wmi_dot11_hdr_remove(ar->wmi, skb);
1532 status = ath6kl_wmi_dot3_2_dix(skb);
1536 * Drop frames that could not be processed (lack of
1543 if (!(vif->ndev->flags & IFF_UP)) {
1548 if (vif->nw_type == AP_NETWORK) {
1549 datap = (struct ethhdr *) skb->data;
1550 if (is_multicast_ether_addr(datap->h_dest))
1552 * Bcast/Mcast frames should be sent to the
1553 * OS stack as well as on the air.
1555 skb1 = skb_copy(skb, GFP_ATOMIC);
1558 * Search for a connected STA with dstMac
1559 * as the Mac address. If found send the
1560 * frame to it on the air else send the
1561 * frame up the stack.
1563 conn = ath6kl_find_sta(vif, datap->h_dest);
1565 if (conn && ar->intra_bss) {
1568 } else if (conn && !ar->intra_bss) {
1574 ath6kl_data_tx(skb1, vif->ndev);
1577 /* nothing to deliver up the stack */
1582 datap = (struct ethhdr *) skb->data;
1584 if (is_unicast_ether_addr(datap->h_dest)) {
1585 if (vif->nw_type == AP_NETWORK) {
1586 conn = ath6kl_find_sta(vif, datap->h_source);
1589 aggr_conn = conn->aggr_conn;
1591 aggr_conn = vif->aggr_cntxt->aggr_conn;
1593 if (aggr_process_recv_frm(aggr_conn, tid, seq_no,
1595 /* aggregation code will handle the skb */
1598 } else if (!is_broadcast_ether_addr(datap->h_dest))
1599 vif->net_stats.multicast++;
1601 ath6kl_deliver_frames_to_nw_stack(vif->ndev, skb);
1604 static void aggr_timeout(unsigned long arg)
1607 struct aggr_info_conn *aggr_conn = (struct aggr_info_conn *) arg;
1608 struct rxtid *rxtid;
1609 struct rxtid_stats *stats;
1611 for (i = 0; i < NUM_OF_TIDS; i++) {
1612 rxtid = &aggr_conn->rx_tid[i];
1613 stats = &aggr_conn->stat[i];
1615 if (!rxtid->aggr || !rxtid->timer_mon)
1618 stats->num_timeouts++;
1619 ath6kl_dbg(ATH6KL_DBG_AGGR,
1620 "aggr timeout (st %d end %d)\n",
1622 ((rxtid->seq_next + rxtid->hold_q_sz-1) &
1623 ATH6KL_MAX_SEQ_NO));
1624 aggr_deque_frms(aggr_conn, i, 0, 0);
1627 aggr_conn->timer_scheduled = false;
1629 for (i = 0; i < NUM_OF_TIDS; i++) {
1630 rxtid = &aggr_conn->rx_tid[i];
1632 if (rxtid->aggr && rxtid->hold_q) {
1633 spin_lock_bh(&rxtid->lock);
1634 for (j = 0; j < rxtid->hold_q_sz; j++) {
1635 if (rxtid->hold_q[j].skb) {
1636 aggr_conn->timer_scheduled = true;
1637 rxtid->timer_mon = true;
1641 spin_unlock_bh(&rxtid->lock);
1643 if (j >= rxtid->hold_q_sz)
1644 rxtid->timer_mon = false;
1648 if (aggr_conn->timer_scheduled)
1649 mod_timer(&aggr_conn->timer,
1650 jiffies + msecs_to_jiffies(AGGR_RX_TIMEOUT));
1653 static void aggr_delete_tid_state(struct aggr_info_conn *aggr_conn, u8 tid)
1655 struct rxtid *rxtid;
1656 struct rxtid_stats *stats;
1658 if (!aggr_conn || tid >= NUM_OF_TIDS)
1661 rxtid = &aggr_conn->rx_tid[tid];
1662 stats = &aggr_conn->stat[tid];
1665 aggr_deque_frms(aggr_conn, tid, 0, 0);
1667 rxtid->aggr = false;
1668 rxtid->timer_mon = false;
1670 rxtid->seq_next = 0;
1671 rxtid->hold_q_sz = 0;
1673 kfree(rxtid->hold_q);
1674 rxtid->hold_q = NULL;
1676 memset(stats, 0, sizeof(struct rxtid_stats));
1679 void aggr_recv_addba_req_evt(struct ath6kl_vif *vif, u8 tid_mux, u16 seq_no,
1682 struct ath6kl_sta *sta;
1683 struct aggr_info_conn *aggr_conn = NULL;
1684 struct rxtid *rxtid;
1685 struct rxtid_stats *stats;
1689 if (vif->nw_type == AP_NETWORK) {
1690 aid = ath6kl_get_aid(tid_mux);
1691 sta = ath6kl_find_sta_by_aid(vif->ar, aid);
1693 aggr_conn = sta->aggr_conn;
1695 aggr_conn = vif->aggr_cntxt->aggr_conn;
1700 tid = ath6kl_get_tid(tid_mux);
1701 if (tid >= NUM_OF_TIDS)
1704 rxtid = &aggr_conn->rx_tid[tid];
1705 stats = &aggr_conn->stat[tid];
1707 if (win_sz < AGGR_WIN_SZ_MIN || win_sz > AGGR_WIN_SZ_MAX)
1708 ath6kl_dbg(ATH6KL_DBG_WLAN_RX, "%s: win_sz %d, tid %d\n",
1709 __func__, win_sz, tid);
1712 aggr_delete_tid_state(aggr_conn, tid);
1714 rxtid->seq_next = seq_no;
1715 hold_q_size = TID_WINDOW_SZ(win_sz) * sizeof(struct skb_hold_q);
1716 rxtid->hold_q = kzalloc(hold_q_size, GFP_KERNEL);
1720 rxtid->win_sz = win_sz;
1721 rxtid->hold_q_sz = TID_WINDOW_SZ(win_sz);
1722 if (!skb_queue_empty(&rxtid->q))
1728 void aggr_conn_init(struct ath6kl_vif *vif, struct aggr_info *aggr_info,
1729 struct aggr_info_conn *aggr_conn)
1731 struct rxtid *rxtid;
1734 aggr_conn->aggr_sz = AGGR_SZ_DEFAULT;
1735 aggr_conn->dev = vif->ndev;
1736 init_timer(&aggr_conn->timer);
1737 aggr_conn->timer.function = aggr_timeout;
1738 aggr_conn->timer.data = (unsigned long) aggr_conn;
1739 aggr_conn->aggr_info = aggr_info;
1741 aggr_conn->timer_scheduled = false;
1743 for (i = 0; i < NUM_OF_TIDS; i++) {
1744 rxtid = &aggr_conn->rx_tid[i];
1745 rxtid->aggr = false;
1746 rxtid->timer_mon = false;
1747 skb_queue_head_init(&rxtid->q);
1748 spin_lock_init(&rxtid->lock);
1753 struct aggr_info *aggr_init(struct ath6kl_vif *vif)
1755 struct aggr_info *p_aggr = NULL;
1757 p_aggr = kzalloc(sizeof(struct aggr_info), GFP_KERNEL);
1759 ath6kl_err("failed to alloc memory for aggr_node\n");
1763 p_aggr->aggr_conn = kzalloc(sizeof(struct aggr_info_conn), GFP_KERNEL);
1764 if (!p_aggr->aggr_conn) {
1765 ath6kl_err("failed to alloc memory for connection specific aggr info\n");
1770 aggr_conn_init(vif, p_aggr, p_aggr->aggr_conn);
1772 skb_queue_head_init(&p_aggr->rx_amsdu_freeq);
1773 ath6kl_alloc_netbufs(&p_aggr->rx_amsdu_freeq, AGGR_NUM_OF_FREE_NETBUFS);
1778 void aggr_recv_delba_req_evt(struct ath6kl_vif *vif, u8 tid_mux)
1780 struct ath6kl_sta *sta;
1781 struct rxtid *rxtid;
1782 struct aggr_info_conn *aggr_conn = NULL;
1785 if (vif->nw_type == AP_NETWORK) {
1786 aid = ath6kl_get_aid(tid_mux);
1787 sta = ath6kl_find_sta_by_aid(vif->ar, aid);
1789 aggr_conn = sta->aggr_conn;
1791 aggr_conn = vif->aggr_cntxt->aggr_conn;
1796 tid = ath6kl_get_tid(tid_mux);
1797 if (tid >= NUM_OF_TIDS)
1800 rxtid = &aggr_conn->rx_tid[tid];
1803 aggr_delete_tid_state(aggr_conn, tid);
1806 void aggr_reset_state(struct aggr_info_conn *aggr_conn)
1813 if (aggr_conn->timer_scheduled) {
1814 del_timer(&aggr_conn->timer);
1815 aggr_conn->timer_scheduled = false;
1818 for (tid = 0; tid < NUM_OF_TIDS; tid++)
1819 aggr_delete_tid_state(aggr_conn, tid);
1822 /* clean up our amsdu buffer list */
1823 void ath6kl_cleanup_amsdu_rxbufs(struct ath6kl *ar)
1825 struct htc_packet *packet, *tmp_pkt;
1827 spin_lock_bh(&ar->lock);
1828 if (list_empty(&ar->amsdu_rx_buffer_queue)) {
1829 spin_unlock_bh(&ar->lock);
1833 list_for_each_entry_safe(packet, tmp_pkt, &ar->amsdu_rx_buffer_queue,
1835 list_del(&packet->list);
1836 spin_unlock_bh(&ar->lock);
1837 dev_kfree_skb(packet->pkt_cntxt);
1838 spin_lock_bh(&ar->lock);
1841 spin_unlock_bh(&ar->lock);
1844 void aggr_module_destroy(struct aggr_info *aggr_info)
1849 aggr_reset_state(aggr_info->aggr_conn);
1850 skb_queue_purge(&aggr_info->rx_amsdu_freeq);
1851 kfree(aggr_info->aggr_conn);