2 * Copyright (c) 2005-2011 Atheros Communications Inc.
3 * Copyright (c) 2011-2013 Qualcomm Atheros, Inc.
5 * Permission to use, copy, modify, and/or distribute this software for any
6 * purpose with or without fee is hereby granted, provided that the above
7 * copyright notice and this permission notice appear in all copies.
9 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
10 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
11 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
12 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
13 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
14 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
15 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
18 #include <linux/etherdevice.h>
25 void __ath10k_htt_tx_dec_pending(struct ath10k_htt *htt)
27 htt->num_pending_tx--;
28 if (htt->num_pending_tx == htt->max_num_pending_tx - 1)
29 ieee80211_wake_queues(htt->ar->hw);
32 static void ath10k_htt_tx_dec_pending(struct ath10k_htt *htt)
34 spin_lock_bh(&htt->tx_lock);
35 __ath10k_htt_tx_dec_pending(htt);
36 spin_unlock_bh(&htt->tx_lock);
39 static int ath10k_htt_tx_inc_pending(struct ath10k_htt *htt)
43 spin_lock_bh(&htt->tx_lock);
45 if (htt->num_pending_tx >= htt->max_num_pending_tx) {
50 htt->num_pending_tx++;
51 if (htt->num_pending_tx == htt->max_num_pending_tx)
52 ieee80211_stop_queues(htt->ar->hw);
55 spin_unlock_bh(&htt->tx_lock);
59 int ath10k_htt_tx_alloc_msdu_id(struct ath10k_htt *htt)
61 struct ath10k *ar = htt->ar;
64 lockdep_assert_held(&htt->tx_lock);
66 msdu_id = find_first_zero_bit(htt->used_msdu_ids,
67 htt->max_num_pending_tx);
68 if (msdu_id == htt->max_num_pending_tx)
71 ath10k_dbg(ar, ATH10K_DBG_HTT, "htt tx alloc msdu_id %d\n", msdu_id);
72 __set_bit(msdu_id, htt->used_msdu_ids);
76 void ath10k_htt_tx_free_msdu_id(struct ath10k_htt *htt, u16 msdu_id)
78 struct ath10k *ar = htt->ar;
80 lockdep_assert_held(&htt->tx_lock);
82 if (!test_bit(msdu_id, htt->used_msdu_ids))
83 ath10k_warn(ar, "trying to free unallocated msdu_id %d\n",
86 ath10k_dbg(ar, ATH10K_DBG_HTT, "htt tx free msdu_id %hu\n", msdu_id);
87 __clear_bit(msdu_id, htt->used_msdu_ids);
90 int ath10k_htt_tx_alloc(struct ath10k_htt *htt)
92 struct ath10k *ar = htt->ar;
94 spin_lock_init(&htt->tx_lock);
95 init_waitqueue_head(&htt->empty_tx_wq);
97 if (test_bit(ATH10K_FW_FEATURE_WMI_10X, htt->ar->fw_features))
98 htt->max_num_pending_tx = TARGET_10X_NUM_MSDU_DESC;
100 htt->max_num_pending_tx = TARGET_NUM_MSDU_DESC;
102 ath10k_dbg(ar, ATH10K_DBG_BOOT, "htt tx max num pending tx %d\n",
103 htt->max_num_pending_tx);
105 htt->pending_tx = kzalloc(sizeof(*htt->pending_tx) *
106 htt->max_num_pending_tx, GFP_KERNEL);
107 if (!htt->pending_tx)
110 htt->used_msdu_ids = kzalloc(sizeof(unsigned long) *
111 BITS_TO_LONGS(htt->max_num_pending_tx),
113 if (!htt->used_msdu_ids) {
114 kfree(htt->pending_tx);
118 htt->tx_pool = dma_pool_create("ath10k htt tx pool", htt->ar->dev,
119 sizeof(struct ath10k_htt_txbuf), 4, 0);
121 kfree(htt->used_msdu_ids);
122 kfree(htt->pending_tx);
129 static void ath10k_htt_tx_free_pending(struct ath10k_htt *htt)
131 struct ath10k *ar = htt->ar;
132 struct htt_tx_done tx_done = {0};
135 spin_lock_bh(&htt->tx_lock);
136 for (msdu_id = 0; msdu_id < htt->max_num_pending_tx; msdu_id++) {
137 if (!test_bit(msdu_id, htt->used_msdu_ids))
140 ath10k_dbg(ar, ATH10K_DBG_HTT, "force cleanup msdu_id %hu\n",
144 tx_done.msdu_id = msdu_id;
146 ath10k_txrx_tx_unref(htt, &tx_done);
148 spin_unlock_bh(&htt->tx_lock);
151 void ath10k_htt_tx_free(struct ath10k_htt *htt)
153 ath10k_htt_tx_free_pending(htt);
154 kfree(htt->pending_tx);
155 kfree(htt->used_msdu_ids);
156 dma_pool_destroy(htt->tx_pool);
160 void ath10k_htt_htc_tx_complete(struct ath10k *ar, struct sk_buff *skb)
162 dev_kfree_skb_any(skb);
165 int ath10k_htt_h2t_ver_req_msg(struct ath10k_htt *htt)
167 struct ath10k *ar = htt->ar;
173 len += sizeof(cmd->hdr);
174 len += sizeof(cmd->ver_req);
176 skb = ath10k_htc_alloc_skb(ar, len);
181 cmd = (struct htt_cmd *)skb->data;
182 cmd->hdr.msg_type = HTT_H2T_MSG_TYPE_VERSION_REQ;
184 ret = ath10k_htc_send(&htt->ar->htc, htt->eid, skb);
186 dev_kfree_skb_any(skb);
193 int ath10k_htt_h2t_stats_req(struct ath10k_htt *htt, u8 mask, u64 cookie)
195 struct ath10k *ar = htt->ar;
196 struct htt_stats_req *req;
201 len += sizeof(cmd->hdr);
202 len += sizeof(cmd->stats_req);
204 skb = ath10k_htc_alloc_skb(ar, len);
209 cmd = (struct htt_cmd *)skb->data;
210 cmd->hdr.msg_type = HTT_H2T_MSG_TYPE_STATS_REQ;
212 req = &cmd->stats_req;
214 memset(req, 0, sizeof(*req));
216 /* currently we support only max 8 bit masks so no need to worry
217 * about endian support */
218 req->upload_types[0] = mask;
219 req->reset_types[0] = mask;
220 req->stat_type = HTT_STATS_REQ_CFG_STAT_TYPE_INVALID;
221 req->cookie_lsb = cpu_to_le32(cookie & 0xffffffff);
222 req->cookie_msb = cpu_to_le32((cookie & 0xffffffff00000000ULL) >> 32);
224 ret = ath10k_htc_send(&htt->ar->htc, htt->eid, skb);
226 ath10k_warn(ar, "failed to send htt type stats request: %d",
228 dev_kfree_skb_any(skb);
235 int ath10k_htt_send_rx_ring_cfg_ll(struct ath10k_htt *htt)
237 struct ath10k *ar = htt->ar;
240 struct htt_rx_ring_setup_ring *ring;
241 const int num_rx_ring = 1;
248 * the HW expects the buffer to be an integral number of 4-byte
251 BUILD_BUG_ON(!IS_ALIGNED(HTT_RX_BUF_SIZE, 4));
252 BUILD_BUG_ON((HTT_RX_BUF_SIZE & HTT_MAX_CACHE_LINE_SIZE_MASK) != 0);
254 len = sizeof(cmd->hdr) + sizeof(cmd->rx_setup.hdr)
255 + (sizeof(*ring) * num_rx_ring);
256 skb = ath10k_htc_alloc_skb(ar, len);
262 cmd = (struct htt_cmd *)skb->data;
263 ring = &cmd->rx_setup.rings[0];
265 cmd->hdr.msg_type = HTT_H2T_MSG_TYPE_RX_RING_CFG;
266 cmd->rx_setup.hdr.num_rings = 1;
268 /* FIXME: do we need all of this? */
270 flags |= HTT_RX_RING_FLAGS_MAC80211_HDR;
271 flags |= HTT_RX_RING_FLAGS_MSDU_PAYLOAD;
272 flags |= HTT_RX_RING_FLAGS_PPDU_START;
273 flags |= HTT_RX_RING_FLAGS_PPDU_END;
274 flags |= HTT_RX_RING_FLAGS_MPDU_START;
275 flags |= HTT_RX_RING_FLAGS_MPDU_END;
276 flags |= HTT_RX_RING_FLAGS_MSDU_START;
277 flags |= HTT_RX_RING_FLAGS_MSDU_END;
278 flags |= HTT_RX_RING_FLAGS_RX_ATTENTION;
279 flags |= HTT_RX_RING_FLAGS_FRAG_INFO;
280 flags |= HTT_RX_RING_FLAGS_UNICAST_RX;
281 flags |= HTT_RX_RING_FLAGS_MULTICAST_RX;
282 flags |= HTT_RX_RING_FLAGS_CTRL_RX;
283 flags |= HTT_RX_RING_FLAGS_MGMT_RX;
284 flags |= HTT_RX_RING_FLAGS_NULL_RX;
285 flags |= HTT_RX_RING_FLAGS_PHY_DATA_RX;
287 fw_idx = __le32_to_cpu(*htt->rx_ring.alloc_idx.vaddr);
289 ring->fw_idx_shadow_reg_paddr =
290 __cpu_to_le32(htt->rx_ring.alloc_idx.paddr);
291 ring->rx_ring_base_paddr = __cpu_to_le32(htt->rx_ring.base_paddr);
292 ring->rx_ring_len = __cpu_to_le16(htt->rx_ring.size);
293 ring->rx_ring_bufsize = __cpu_to_le16(HTT_RX_BUF_SIZE);
294 ring->flags = __cpu_to_le16(flags);
295 ring->fw_idx_init_val = __cpu_to_le16(fw_idx);
297 #define desc_offset(x) (offsetof(struct htt_rx_desc, x) / 4)
299 ring->mac80211_hdr_offset = __cpu_to_le16(desc_offset(rx_hdr_status));
300 ring->msdu_payload_offset = __cpu_to_le16(desc_offset(msdu_payload));
301 ring->ppdu_start_offset = __cpu_to_le16(desc_offset(ppdu_start));
302 ring->ppdu_end_offset = __cpu_to_le16(desc_offset(ppdu_end));
303 ring->mpdu_start_offset = __cpu_to_le16(desc_offset(mpdu_start));
304 ring->mpdu_end_offset = __cpu_to_le16(desc_offset(mpdu_end));
305 ring->msdu_start_offset = __cpu_to_le16(desc_offset(msdu_start));
306 ring->msdu_end_offset = __cpu_to_le16(desc_offset(msdu_end));
307 ring->rx_attention_offset = __cpu_to_le16(desc_offset(attention));
308 ring->frag_info_offset = __cpu_to_le16(desc_offset(frag_info));
312 ret = ath10k_htc_send(&htt->ar->htc, htt->eid, skb);
314 dev_kfree_skb_any(skb);
321 int ath10k_htt_h2t_aggr_cfg_msg(struct ath10k_htt *htt,
322 u8 max_subfrms_ampdu,
323 u8 max_subfrms_amsdu)
325 struct ath10k *ar = htt->ar;
326 struct htt_aggr_conf *aggr_conf;
332 /* Firmware defaults are: amsdu = 3 and ampdu = 64 */
334 if (max_subfrms_ampdu == 0 || max_subfrms_ampdu > 64)
337 if (max_subfrms_amsdu == 0 || max_subfrms_amsdu > 31)
340 len = sizeof(cmd->hdr);
341 len += sizeof(cmd->aggr_conf);
343 skb = ath10k_htc_alloc_skb(ar, len);
348 cmd = (struct htt_cmd *)skb->data;
349 cmd->hdr.msg_type = HTT_H2T_MSG_TYPE_AGGR_CFG;
351 aggr_conf = &cmd->aggr_conf;
352 aggr_conf->max_num_ampdu_subframes = max_subfrms_ampdu;
353 aggr_conf->max_num_amsdu_subframes = max_subfrms_amsdu;
355 ath10k_dbg(ar, ATH10K_DBG_HTT, "htt h2t aggr cfg msg amsdu %d ampdu %d",
356 aggr_conf->max_num_amsdu_subframes,
357 aggr_conf->max_num_ampdu_subframes);
359 ret = ath10k_htc_send(&htt->ar->htc, htt->eid, skb);
361 dev_kfree_skb_any(skb);
368 int ath10k_htt_mgmt_tx(struct ath10k_htt *htt, struct sk_buff *msdu)
370 struct ath10k *ar = htt->ar;
371 struct device *dev = ar->dev;
372 struct sk_buff *txdesc = NULL;
374 struct ath10k_skb_cb *skb_cb = ATH10K_SKB_CB(msdu);
375 u8 vdev_id = skb_cb->vdev_id;
380 res = ath10k_htt_tx_inc_pending(htt);
384 len += sizeof(cmd->hdr);
385 len += sizeof(cmd->mgmt_tx);
387 spin_lock_bh(&htt->tx_lock);
388 res = ath10k_htt_tx_alloc_msdu_id(htt);
390 spin_unlock_bh(&htt->tx_lock);
394 htt->pending_tx[msdu_id] = msdu;
395 spin_unlock_bh(&htt->tx_lock);
397 txdesc = ath10k_htc_alloc_skb(ar, len);
400 goto err_free_msdu_id;
403 skb_cb->paddr = dma_map_single(dev, msdu->data, msdu->len,
405 res = dma_mapping_error(dev, skb_cb->paddr);
407 goto err_free_txdesc;
409 skb_put(txdesc, len);
410 cmd = (struct htt_cmd *)txdesc->data;
411 cmd->hdr.msg_type = HTT_H2T_MSG_TYPE_MGMT_TX;
412 cmd->mgmt_tx.msdu_paddr = __cpu_to_le32(ATH10K_SKB_CB(msdu)->paddr);
413 cmd->mgmt_tx.len = __cpu_to_le32(msdu->len);
414 cmd->mgmt_tx.desc_id = __cpu_to_le32(msdu_id);
415 cmd->mgmt_tx.vdev_id = __cpu_to_le32(vdev_id);
416 memcpy(cmd->mgmt_tx.hdr, msdu->data,
417 min_t(int, msdu->len, HTT_MGMT_FRM_HDR_DOWNLOAD_LEN));
419 skb_cb->htt.txbuf = NULL;
421 res = ath10k_htc_send(&htt->ar->htc, htt->eid, txdesc);
428 dma_unmap_single(dev, skb_cb->paddr, msdu->len, DMA_TO_DEVICE);
430 dev_kfree_skb_any(txdesc);
432 spin_lock_bh(&htt->tx_lock);
433 htt->pending_tx[msdu_id] = NULL;
434 ath10k_htt_tx_free_msdu_id(htt, msdu_id);
435 spin_unlock_bh(&htt->tx_lock);
437 ath10k_htt_tx_dec_pending(htt);
442 int ath10k_htt_tx(struct ath10k_htt *htt, struct sk_buff *msdu)
444 struct ath10k *ar = htt->ar;
445 struct device *dev = ar->dev;
446 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)msdu->data;
447 struct ath10k_skb_cb *skb_cb = ATH10K_SKB_CB(msdu);
448 struct ath10k_hif_sg_item sg_items[2];
449 struct htt_data_tx_desc_frag *frags;
450 u8 vdev_id = skb_cb->vdev_id;
451 u8 tid = skb_cb->htt.tid;
455 u16 msdu_id, flags1 = 0;
460 res = ath10k_htt_tx_inc_pending(htt);
464 spin_lock_bh(&htt->tx_lock);
465 res = ath10k_htt_tx_alloc_msdu_id(htt);
467 spin_unlock_bh(&htt->tx_lock);
471 htt->pending_tx[msdu_id] = msdu;
472 spin_unlock_bh(&htt->tx_lock);
474 prefetch_len = min(htt->prefetch_len, msdu->len);
475 prefetch_len = roundup(prefetch_len, 4);
477 /* Since HTT 3.0 there is no separate mgmt tx command. However in case
478 * of mgmt tx using TX_FRM there is not tx fragment list. Instead of tx
479 * fragment list host driver specifies directly frame pointer. */
480 use_frags = htt->target_version_major < 3 ||
481 !ieee80211_is_mgmt(hdr->frame_control);
483 skb_cb->htt.txbuf = dma_pool_alloc(htt->tx_pool, GFP_ATOMIC,
485 if (!skb_cb->htt.txbuf)
486 goto err_free_msdu_id;
487 skb_cb->htt.txbuf_paddr = paddr;
489 skb_cb->paddr = dma_map_single(dev, msdu->data, msdu->len,
491 res = dma_mapping_error(dev, skb_cb->paddr);
495 if (likely(use_frags)) {
496 frags = skb_cb->htt.txbuf->frags;
498 frags[0].paddr = __cpu_to_le32(skb_cb->paddr);
499 frags[0].len = __cpu_to_le32(msdu->len);
503 flags0 |= SM(ATH10K_HW_TXRX_NATIVE_WIFI,
504 HTT_DATA_TX_DESC_FLAGS0_PKT_TYPE);
506 frags_paddr = skb_cb->htt.txbuf_paddr;
508 flags0 |= SM(ATH10K_HW_TXRX_MGMT,
509 HTT_DATA_TX_DESC_FLAGS0_PKT_TYPE);
511 frags_paddr = skb_cb->paddr;
514 /* Normally all commands go through HTC which manages tx credits for
515 * each endpoint and notifies when tx is completed.
517 * HTT endpoint is creditless so there's no need to care about HTC
518 * flags. In that case it is trivial to fill the HTC header here.
520 * MSDU transmission is considered completed upon HTT event. This
521 * implies no relevant resources can be freed until after the event is
522 * received. That's why HTC tx completion handler itself is ignored by
523 * setting NULL to transfer_context for all sg items.
525 * There is simply no point in pushing HTT TX_FRM through HTC tx path
526 * as it's a waste of resources. By bypassing HTC it is possible to
527 * avoid extra memory allocations, compress data structures and thus
528 * improve performance. */
530 skb_cb->htt.txbuf->htc_hdr.eid = htt->eid;
531 skb_cb->htt.txbuf->htc_hdr.len = __cpu_to_le16(
532 sizeof(skb_cb->htt.txbuf->cmd_hdr) +
533 sizeof(skb_cb->htt.txbuf->cmd_tx) +
535 skb_cb->htt.txbuf->htc_hdr.flags = 0;
537 if (!ieee80211_has_protected(hdr->frame_control))
538 flags0 |= HTT_DATA_TX_DESC_FLAGS0_NO_ENCRYPT;
540 flags0 |= HTT_DATA_TX_DESC_FLAGS0_MAC_HDR_PRESENT;
542 flags1 |= SM((u16)vdev_id, HTT_DATA_TX_DESC_FLAGS1_VDEV_ID);
543 flags1 |= SM((u16)tid, HTT_DATA_TX_DESC_FLAGS1_EXT_TID);
544 flags1 |= HTT_DATA_TX_DESC_FLAGS1_CKSUM_L3_OFFLOAD;
545 flags1 |= HTT_DATA_TX_DESC_FLAGS1_CKSUM_L4_OFFLOAD;
547 /* Prevent firmware from sending up tx inspection requests. There's
548 * nothing ath10k can do with frames requested for inspection so force
549 * it to simply rely a regular tx completion with discard status.
551 flags1 |= HTT_DATA_TX_DESC_FLAGS1_POSTPONED;
553 skb_cb->htt.txbuf->cmd_hdr.msg_type = HTT_H2T_MSG_TYPE_TX_FRM;
554 skb_cb->htt.txbuf->cmd_tx.flags0 = flags0;
555 skb_cb->htt.txbuf->cmd_tx.flags1 = __cpu_to_le16(flags1);
556 skb_cb->htt.txbuf->cmd_tx.len = __cpu_to_le16(msdu->len);
557 skb_cb->htt.txbuf->cmd_tx.id = __cpu_to_le16(msdu_id);
558 skb_cb->htt.txbuf->cmd_tx.frags_paddr = __cpu_to_le32(frags_paddr);
559 skb_cb->htt.txbuf->cmd_tx.peerid = __cpu_to_le32(HTT_INVALID_PEERID);
561 ath10k_dbg(ar, ATH10K_DBG_HTT,
562 "htt tx flags0 %hhu flags1 %hu len %d id %hu frags_paddr %08x, msdu_paddr %08x vdev %hhu tid %hhu\n",
563 flags0, flags1, msdu->len, msdu_id, frags_paddr,
564 (u32)skb_cb->paddr, vdev_id, tid);
565 ath10k_dbg_dump(ar, ATH10K_DBG_HTT_DUMP, NULL, "htt tx msdu: ",
566 msdu->data, msdu->len);
568 sg_items[0].transfer_id = 0;
569 sg_items[0].transfer_context = NULL;
570 sg_items[0].vaddr = &skb_cb->htt.txbuf->htc_hdr;
571 sg_items[0].paddr = skb_cb->htt.txbuf_paddr +
572 sizeof(skb_cb->htt.txbuf->frags);
573 sg_items[0].len = sizeof(skb_cb->htt.txbuf->htc_hdr) +
574 sizeof(skb_cb->htt.txbuf->cmd_hdr) +
575 sizeof(skb_cb->htt.txbuf->cmd_tx);
577 sg_items[1].transfer_id = 0;
578 sg_items[1].transfer_context = NULL;
579 sg_items[1].vaddr = msdu->data;
580 sg_items[1].paddr = skb_cb->paddr;
581 sg_items[1].len = prefetch_len;
583 res = ath10k_hif_tx_sg(htt->ar,
584 htt->ar->htc.endpoint[htt->eid].ul_pipe_id,
585 sg_items, ARRAY_SIZE(sg_items));
592 dma_unmap_single(dev, skb_cb->paddr, msdu->len, DMA_TO_DEVICE);
594 dma_pool_free(htt->tx_pool,
596 skb_cb->htt.txbuf_paddr);
598 spin_lock_bh(&htt->tx_lock);
599 htt->pending_tx[msdu_id] = NULL;
600 ath10k_htt_tx_free_msdu_id(htt, msdu_id);
601 spin_unlock_bh(&htt->tx_lock);
603 ath10k_htt_tx_dec_pending(htt);