2 * Copyright (c) 2005-2011 Atheros Communications Inc.
3 * Copyright (c) 2011-2013 Qualcomm Atheros, Inc.
5 * Permission to use, copy, modify, and/or distribute this software for any
6 * purpose with or without fee is hereby granted, provided that the above
7 * copyright notice and this permission notice appear in all copies.
9 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
10 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
11 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
12 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
13 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
14 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
15 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
24 static void ath10k_report_offchan_tx(struct ath10k *ar, struct sk_buff *skb)
26 if (!ATH10K_SKB_CB(skb)->htt.is_offchan)
29 /* If the original wait_for_completion() timed out before
30 * {data,mgmt}_tx_completed() was called then we could complete
31 * offchan_tx_completed for a different skb. Prevent this by using
33 spin_lock_bh(&ar->data_lock);
34 if (ar->offchan_tx_skb != skb) {
35 ath10k_warn("completed old offchannel frame\n");
39 complete(&ar->offchan_tx_completed);
40 ar->offchan_tx_skb = NULL; /* just for sanity */
42 ath10k_dbg(ATH10K_DBG_HTT, "completed offchannel skb %p\n", skb);
44 spin_unlock_bh(&ar->data_lock);
47 void ath10k_txrx_tx_unref(struct ath10k_htt *htt,
48 const struct htt_tx_done *tx_done)
50 struct device *dev = htt->ar->dev;
51 struct ieee80211_tx_info *info;
52 struct ath10k_skb_cb *skb_cb;
55 lockdep_assert_held(&htt->tx_lock);
57 ath10k_dbg(ATH10K_DBG_HTT, "htt tx completion msdu_id %u discard %d no_ack %d\n",
58 tx_done->msdu_id, !!tx_done->discard, !!tx_done->no_ack);
60 if (tx_done->msdu_id >= htt->max_num_pending_tx) {
61 ath10k_warn("warning: msdu_id %d too big, ignoring\n",
66 msdu = htt->pending_tx[tx_done->msdu_id];
67 skb_cb = ATH10K_SKB_CB(msdu);
69 dma_unmap_single(dev, skb_cb->paddr, msdu->len, DMA_TO_DEVICE);
71 if (skb_cb->htt.txbuf)
72 dma_pool_free(htt->tx_pool,
74 skb_cb->htt.txbuf_paddr);
76 ath10k_report_offchan_tx(htt->ar, msdu);
78 info = IEEE80211_SKB_CB(msdu);
79 memset(&info->status, 0, sizeof(info->status));
81 if (tx_done->discard) {
82 ieee80211_free_txskb(htt->ar->hw, msdu);
86 if (!(info->flags & IEEE80211_TX_CTL_NO_ACK))
87 info->flags |= IEEE80211_TX_STAT_ACK;
90 info->flags &= ~IEEE80211_TX_STAT_ACK;
92 ieee80211_tx_status(htt->ar->hw, msdu);
93 /* we do not own the msdu anymore */
96 htt->pending_tx[tx_done->msdu_id] = NULL;
97 ath10k_htt_tx_free_msdu_id(htt, tx_done->msdu_id);
98 __ath10k_htt_tx_dec_pending(htt);
99 if (htt->num_pending_tx == 0)
100 wake_up(&htt->empty_tx_wq);
103 struct ath10k_peer *ath10k_peer_find(struct ath10k *ar, int vdev_id,
106 struct ath10k_peer *peer;
108 lockdep_assert_held(&ar->data_lock);
110 list_for_each_entry(peer, &ar->peers, list) {
111 if (peer->vdev_id != vdev_id)
113 if (memcmp(peer->addr, addr, ETH_ALEN))
122 struct ath10k_peer *ath10k_peer_find_by_id(struct ath10k *ar, int peer_id)
124 struct ath10k_peer *peer;
126 lockdep_assert_held(&ar->data_lock);
128 list_for_each_entry(peer, &ar->peers, list)
129 if (test_bit(peer_id, peer->peer_ids))
135 static int ath10k_wait_for_peer_common(struct ath10k *ar, int vdev_id,
136 const u8 *addr, bool expect_mapped)
140 ret = wait_event_timeout(ar->peer_mapping_wq, ({
143 spin_lock_bh(&ar->data_lock);
144 mapped = !!ath10k_peer_find(ar, vdev_id, addr);
145 spin_unlock_bh(&ar->data_lock);
147 mapped == expect_mapped;
156 int ath10k_wait_for_peer_created(struct ath10k *ar, int vdev_id, const u8 *addr)
158 return ath10k_wait_for_peer_common(ar, vdev_id, addr, true);
161 int ath10k_wait_for_peer_deleted(struct ath10k *ar, int vdev_id, const u8 *addr)
163 return ath10k_wait_for_peer_common(ar, vdev_id, addr, false);
166 void ath10k_peer_map_event(struct ath10k_htt *htt,
167 struct htt_peer_map_event *ev)
169 struct ath10k *ar = htt->ar;
170 struct ath10k_peer *peer;
172 spin_lock_bh(&ar->data_lock);
173 peer = ath10k_peer_find(ar, ev->vdev_id, ev->addr);
175 peer = kzalloc(sizeof(*peer), GFP_ATOMIC);
179 peer->vdev_id = ev->vdev_id;
180 memcpy(peer->addr, ev->addr, ETH_ALEN);
181 list_add(&peer->list, &ar->peers);
182 wake_up(&ar->peer_mapping_wq);
185 ath10k_dbg(ATH10K_DBG_HTT, "htt peer map vdev %d peer %pM id %d\n",
186 ev->vdev_id, ev->addr, ev->peer_id);
188 set_bit(ev->peer_id, peer->peer_ids);
190 spin_unlock_bh(&ar->data_lock);
193 void ath10k_peer_unmap_event(struct ath10k_htt *htt,
194 struct htt_peer_unmap_event *ev)
196 struct ath10k *ar = htt->ar;
197 struct ath10k_peer *peer;
199 spin_lock_bh(&ar->data_lock);
200 peer = ath10k_peer_find_by_id(ar, ev->peer_id);
202 ath10k_warn("peer-unmap-event: unknown peer id %d\n",
207 ath10k_dbg(ATH10K_DBG_HTT, "htt peer unmap vdev %d peer %pM id %d\n",
208 peer->vdev_id, peer->addr, ev->peer_id);
210 clear_bit(ev->peer_id, peer->peer_ids);
212 if (bitmap_empty(peer->peer_ids, ATH10K_MAX_NUM_PEER_IDS)) {
213 list_del(&peer->list);
215 wake_up(&ar->peer_mapping_wq);
219 spin_unlock_bh(&ar->data_lock);