2 * Copyright (c) 2005-2011 Atheros Communications Inc.
3 * Copyright (c) 2011-2013 Qualcomm Atheros, Inc.
5 * Permission to use, copy, modify, and/or distribute this software for any
6 * purpose with or without fee is hereby granted, provided that the above
7 * copyright notice and this permission notice appear in all copies.
9 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
10 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
11 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
12 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
13 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
14 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
15 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
18 #include <linux/etherdevice.h>
25 void __ath10k_htt_tx_dec_pending(struct ath10k_htt *htt, bool limit_mgmt_desc)
28 htt->num_pending_mgmt_tx--;
30 htt->num_pending_tx--;
31 if (htt->num_pending_tx == htt->max_num_pending_tx - 1)
32 ath10k_mac_tx_unlock(htt->ar, ATH10K_TX_PAUSE_Q_FULL);
35 static void ath10k_htt_tx_dec_pending(struct ath10k_htt *htt,
38 spin_lock_bh(&htt->tx_lock);
39 __ath10k_htt_tx_dec_pending(htt, limit_mgmt_desc);
40 spin_unlock_bh(&htt->tx_lock);
43 static int ath10k_htt_tx_inc_pending(struct ath10k_htt *htt,
44 bool limit_mgmt_desc, bool is_probe_resp)
46 struct ath10k *ar = htt->ar;
49 spin_lock_bh(&htt->tx_lock);
51 if (htt->num_pending_tx >= htt->max_num_pending_tx) {
56 if (limit_mgmt_desc) {
57 if (is_probe_resp && (htt->num_pending_mgmt_tx >
58 ar->hw_params.max_probe_resp_desc_thres)) {
62 htt->num_pending_mgmt_tx++;
65 htt->num_pending_tx++;
66 if (htt->num_pending_tx == htt->max_num_pending_tx)
67 ath10k_mac_tx_lock(htt->ar, ATH10K_TX_PAUSE_Q_FULL);
70 spin_unlock_bh(&htt->tx_lock);
74 int ath10k_htt_tx_alloc_msdu_id(struct ath10k_htt *htt, struct sk_buff *skb)
76 struct ath10k *ar = htt->ar;
79 lockdep_assert_held(&htt->tx_lock);
81 ret = idr_alloc(&htt->pending_tx, skb, 0,
82 htt->max_num_pending_tx, GFP_ATOMIC);
84 ath10k_dbg(ar, ATH10K_DBG_HTT, "htt tx alloc msdu_id %d\n", ret);
89 void ath10k_htt_tx_free_msdu_id(struct ath10k_htt *htt, u16 msdu_id)
91 struct ath10k *ar = htt->ar;
93 lockdep_assert_held(&htt->tx_lock);
95 ath10k_dbg(ar, ATH10K_DBG_HTT, "htt tx free msdu_id %hu\n", msdu_id);
97 idr_remove(&htt->pending_tx, msdu_id);
100 int ath10k_htt_tx_alloc(struct ath10k_htt *htt)
102 struct ath10k *ar = htt->ar;
105 ath10k_dbg(ar, ATH10K_DBG_BOOT, "htt tx max num pending tx %d\n",
106 htt->max_num_pending_tx);
108 spin_lock_init(&htt->tx_lock);
109 idr_init(&htt->pending_tx);
111 size = htt->max_num_pending_tx * sizeof(struct ath10k_htt_txbuf);
112 htt->txbuf.vaddr = dma_alloc_coherent(ar->dev, size,
115 if (!htt->txbuf.vaddr) {
116 ath10k_err(ar, "failed to alloc tx buffer\n");
118 goto free_idr_pending_tx;
121 if (!ar->hw_params.continuous_frag_desc)
122 goto skip_frag_desc_alloc;
124 size = htt->max_num_pending_tx * sizeof(struct htt_msdu_ext_desc);
125 htt->frag_desc.vaddr = dma_alloc_coherent(ar->dev, size,
126 &htt->frag_desc.paddr,
128 if (!htt->frag_desc.vaddr) {
129 ath10k_warn(ar, "failed to alloc fragment desc memory\n");
134 skip_frag_desc_alloc:
138 size = htt->max_num_pending_tx *
139 sizeof(struct ath10k_htt_txbuf);
140 dma_free_coherent(htt->ar->dev, size, htt->txbuf.vaddr,
143 idr_destroy(&htt->pending_tx);
147 static int ath10k_htt_tx_clean_up_pending(int msdu_id, void *skb, void *ctx)
149 struct ath10k *ar = ctx;
150 struct ath10k_htt *htt = &ar->htt;
151 struct htt_tx_done tx_done = {0};
153 ath10k_dbg(ar, ATH10K_DBG_HTT, "force cleanup msdu_id %hu\n", msdu_id);
156 tx_done.msdu_id = msdu_id;
158 ath10k_txrx_tx_unref(htt, &tx_done);
163 void ath10k_htt_tx_free(struct ath10k_htt *htt)
167 idr_for_each(&htt->pending_tx, ath10k_htt_tx_clean_up_pending, htt->ar);
168 idr_destroy(&htt->pending_tx);
170 if (htt->txbuf.vaddr) {
171 size = htt->max_num_pending_tx *
172 sizeof(struct ath10k_htt_txbuf);
173 dma_free_coherent(htt->ar->dev, size, htt->txbuf.vaddr,
177 if (htt->frag_desc.vaddr) {
178 size = htt->max_num_pending_tx *
179 sizeof(struct htt_msdu_ext_desc);
180 dma_free_coherent(htt->ar->dev, size, htt->frag_desc.vaddr,
181 htt->frag_desc.paddr);
185 void ath10k_htt_htc_tx_complete(struct ath10k *ar, struct sk_buff *skb)
187 dev_kfree_skb_any(skb);
190 void ath10k_htt_hif_tx_complete(struct ath10k *ar, struct sk_buff *skb)
192 dev_kfree_skb_any(skb);
194 EXPORT_SYMBOL(ath10k_htt_hif_tx_complete);
196 int ath10k_htt_h2t_ver_req_msg(struct ath10k_htt *htt)
198 struct ath10k *ar = htt->ar;
204 len += sizeof(cmd->hdr);
205 len += sizeof(cmd->ver_req);
207 skb = ath10k_htc_alloc_skb(ar, len);
212 cmd = (struct htt_cmd *)skb->data;
213 cmd->hdr.msg_type = HTT_H2T_MSG_TYPE_VERSION_REQ;
215 ret = ath10k_htc_send(&htt->ar->htc, htt->eid, skb);
217 dev_kfree_skb_any(skb);
224 int ath10k_htt_h2t_stats_req(struct ath10k_htt *htt, u8 mask, u64 cookie)
226 struct ath10k *ar = htt->ar;
227 struct htt_stats_req *req;
232 len += sizeof(cmd->hdr);
233 len += sizeof(cmd->stats_req);
235 skb = ath10k_htc_alloc_skb(ar, len);
240 cmd = (struct htt_cmd *)skb->data;
241 cmd->hdr.msg_type = HTT_H2T_MSG_TYPE_STATS_REQ;
243 req = &cmd->stats_req;
245 memset(req, 0, sizeof(*req));
247 /* currently we support only max 8 bit masks so no need to worry
248 * about endian support */
249 req->upload_types[0] = mask;
250 req->reset_types[0] = mask;
251 req->stat_type = HTT_STATS_REQ_CFG_STAT_TYPE_INVALID;
252 req->cookie_lsb = cpu_to_le32(cookie & 0xffffffff);
253 req->cookie_msb = cpu_to_le32((cookie & 0xffffffff00000000ULL) >> 32);
255 ret = ath10k_htc_send(&htt->ar->htc, htt->eid, skb);
257 ath10k_warn(ar, "failed to send htt type stats request: %d",
259 dev_kfree_skb_any(skb);
266 int ath10k_htt_send_frag_desc_bank_cfg(struct ath10k_htt *htt)
268 struct ath10k *ar = htt->ar;
273 if (!ar->hw_params.continuous_frag_desc)
276 if (!htt->frag_desc.paddr) {
277 ath10k_warn(ar, "invalid frag desc memory\n");
281 size = sizeof(cmd->hdr) + sizeof(cmd->frag_desc_bank_cfg);
282 skb = ath10k_htc_alloc_skb(ar, size);
287 cmd = (struct htt_cmd *)skb->data;
288 cmd->hdr.msg_type = HTT_H2T_MSG_TYPE_FRAG_DESC_BANK_CFG;
289 cmd->frag_desc_bank_cfg.info = 0;
290 cmd->frag_desc_bank_cfg.num_banks = 1;
291 cmd->frag_desc_bank_cfg.desc_size = sizeof(struct htt_msdu_ext_desc);
292 cmd->frag_desc_bank_cfg.bank_base_addrs[0] =
293 __cpu_to_le32(htt->frag_desc.paddr);
294 cmd->frag_desc_bank_cfg.bank_id[0].bank_min_id = 0;
295 cmd->frag_desc_bank_cfg.bank_id[0].bank_max_id =
296 __cpu_to_le16(htt->max_num_pending_tx - 1);
298 ret = ath10k_htc_send(&htt->ar->htc, htt->eid, skb);
300 ath10k_warn(ar, "failed to send frag desc bank cfg request: %d\n",
302 dev_kfree_skb_any(skb);
309 int ath10k_htt_send_rx_ring_cfg_ll(struct ath10k_htt *htt)
311 struct ath10k *ar = htt->ar;
314 struct htt_rx_ring_setup_ring *ring;
315 const int num_rx_ring = 1;
322 * the HW expects the buffer to be an integral number of 4-byte
325 BUILD_BUG_ON(!IS_ALIGNED(HTT_RX_BUF_SIZE, 4));
326 BUILD_BUG_ON((HTT_RX_BUF_SIZE & HTT_MAX_CACHE_LINE_SIZE_MASK) != 0);
328 len = sizeof(cmd->hdr) + sizeof(cmd->rx_setup.hdr)
329 + (sizeof(*ring) * num_rx_ring);
330 skb = ath10k_htc_alloc_skb(ar, len);
336 cmd = (struct htt_cmd *)skb->data;
337 ring = &cmd->rx_setup.rings[0];
339 cmd->hdr.msg_type = HTT_H2T_MSG_TYPE_RX_RING_CFG;
340 cmd->rx_setup.hdr.num_rings = 1;
342 /* FIXME: do we need all of this? */
344 flags |= HTT_RX_RING_FLAGS_MAC80211_HDR;
345 flags |= HTT_RX_RING_FLAGS_MSDU_PAYLOAD;
346 flags |= HTT_RX_RING_FLAGS_PPDU_START;
347 flags |= HTT_RX_RING_FLAGS_PPDU_END;
348 flags |= HTT_RX_RING_FLAGS_MPDU_START;
349 flags |= HTT_RX_RING_FLAGS_MPDU_END;
350 flags |= HTT_RX_RING_FLAGS_MSDU_START;
351 flags |= HTT_RX_RING_FLAGS_MSDU_END;
352 flags |= HTT_RX_RING_FLAGS_RX_ATTENTION;
353 flags |= HTT_RX_RING_FLAGS_FRAG_INFO;
354 flags |= HTT_RX_RING_FLAGS_UNICAST_RX;
355 flags |= HTT_RX_RING_FLAGS_MULTICAST_RX;
356 flags |= HTT_RX_RING_FLAGS_CTRL_RX;
357 flags |= HTT_RX_RING_FLAGS_MGMT_RX;
358 flags |= HTT_RX_RING_FLAGS_NULL_RX;
359 flags |= HTT_RX_RING_FLAGS_PHY_DATA_RX;
361 fw_idx = __le32_to_cpu(*htt->rx_ring.alloc_idx.vaddr);
363 ring->fw_idx_shadow_reg_paddr =
364 __cpu_to_le32(htt->rx_ring.alloc_idx.paddr);
365 ring->rx_ring_base_paddr = __cpu_to_le32(htt->rx_ring.base_paddr);
366 ring->rx_ring_len = __cpu_to_le16(htt->rx_ring.size);
367 ring->rx_ring_bufsize = __cpu_to_le16(HTT_RX_BUF_SIZE);
368 ring->flags = __cpu_to_le16(flags);
369 ring->fw_idx_init_val = __cpu_to_le16(fw_idx);
371 #define desc_offset(x) (offsetof(struct htt_rx_desc, x) / 4)
373 ring->mac80211_hdr_offset = __cpu_to_le16(desc_offset(rx_hdr_status));
374 ring->msdu_payload_offset = __cpu_to_le16(desc_offset(msdu_payload));
375 ring->ppdu_start_offset = __cpu_to_le16(desc_offset(ppdu_start));
376 ring->ppdu_end_offset = __cpu_to_le16(desc_offset(ppdu_end));
377 ring->mpdu_start_offset = __cpu_to_le16(desc_offset(mpdu_start));
378 ring->mpdu_end_offset = __cpu_to_le16(desc_offset(mpdu_end));
379 ring->msdu_start_offset = __cpu_to_le16(desc_offset(msdu_start));
380 ring->msdu_end_offset = __cpu_to_le16(desc_offset(msdu_end));
381 ring->rx_attention_offset = __cpu_to_le16(desc_offset(attention));
382 ring->frag_info_offset = __cpu_to_le16(desc_offset(frag_info));
386 ret = ath10k_htc_send(&htt->ar->htc, htt->eid, skb);
388 dev_kfree_skb_any(skb);
395 int ath10k_htt_h2t_aggr_cfg_msg(struct ath10k_htt *htt,
396 u8 max_subfrms_ampdu,
397 u8 max_subfrms_amsdu)
399 struct ath10k *ar = htt->ar;
400 struct htt_aggr_conf *aggr_conf;
406 /* Firmware defaults are: amsdu = 3 and ampdu = 64 */
408 if (max_subfrms_ampdu == 0 || max_subfrms_ampdu > 64)
411 if (max_subfrms_amsdu == 0 || max_subfrms_amsdu > 31)
414 len = sizeof(cmd->hdr);
415 len += sizeof(cmd->aggr_conf);
417 skb = ath10k_htc_alloc_skb(ar, len);
422 cmd = (struct htt_cmd *)skb->data;
423 cmd->hdr.msg_type = HTT_H2T_MSG_TYPE_AGGR_CFG;
425 aggr_conf = &cmd->aggr_conf;
426 aggr_conf->max_num_ampdu_subframes = max_subfrms_ampdu;
427 aggr_conf->max_num_amsdu_subframes = max_subfrms_amsdu;
429 ath10k_dbg(ar, ATH10K_DBG_HTT, "htt h2t aggr cfg msg amsdu %d ampdu %d",
430 aggr_conf->max_num_amsdu_subframes,
431 aggr_conf->max_num_ampdu_subframes);
433 ret = ath10k_htc_send(&htt->ar->htc, htt->eid, skb);
435 dev_kfree_skb_any(skb);
442 static u8 ath10k_htt_tx_get_vdev_id(struct ath10k *ar, struct sk_buff *skb)
444 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
445 struct ath10k_skb_cb *cb = ATH10K_SKB_CB(skb);
446 struct ath10k_vif *arvif = (void *)cb->vif->drv_priv;
448 if (info->flags & IEEE80211_TX_CTL_TX_OFFCHAN)
449 return ar->scan.vdev_id;
451 return arvif->vdev_id;
452 else if (ar->monitor_started)
453 return ar->monitor_vdev_id;
458 static u8 ath10k_htt_tx_get_tid(struct sk_buff *skb, bool is_eth)
460 struct ieee80211_hdr *hdr = (void *)skb->data;
461 struct ath10k_skb_cb *cb = ATH10K_SKB_CB(skb);
463 if (!is_eth && ieee80211_is_mgmt(hdr->frame_control))
464 return HTT_DATA_TX_EXT_TID_MGMT;
465 else if (cb->flags & ATH10K_SKB_F_QOS)
466 return skb->priority % IEEE80211_QOS_CTL_TID_MASK;
468 return HTT_DATA_TX_EXT_TID_NON_QOS_MCAST_BCAST;
471 int ath10k_htt_mgmt_tx(struct ath10k_htt *htt, struct sk_buff *msdu)
473 struct ath10k *ar = htt->ar;
474 struct device *dev = ar->dev;
475 struct sk_buff *txdesc = NULL;
477 struct ath10k_skb_cb *skb_cb = ATH10K_SKB_CB(msdu);
478 u8 vdev_id = ath10k_htt_tx_get_vdev_id(ar, msdu);
482 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)msdu->data;
483 bool limit_mgmt_desc = false;
484 bool is_probe_resp = false;
486 if (ar->hw_params.max_probe_resp_desc_thres) {
487 limit_mgmt_desc = true;
489 if (ieee80211_is_probe_resp(hdr->frame_control))
490 is_probe_resp = true;
493 res = ath10k_htt_tx_inc_pending(htt, limit_mgmt_desc, is_probe_resp);
498 len += sizeof(cmd->hdr);
499 len += sizeof(cmd->mgmt_tx);
501 spin_lock_bh(&htt->tx_lock);
502 res = ath10k_htt_tx_alloc_msdu_id(htt, msdu);
503 spin_unlock_bh(&htt->tx_lock);
509 if ((ieee80211_is_action(hdr->frame_control) ||
510 ieee80211_is_deauth(hdr->frame_control) ||
511 ieee80211_is_disassoc(hdr->frame_control)) &&
512 ieee80211_has_protected(hdr->frame_control)) {
513 skb_put(msdu, IEEE80211_CCMP_MIC_LEN);
516 txdesc = ath10k_htc_alloc_skb(ar, len);
519 goto err_free_msdu_id;
522 skb_cb->paddr = dma_map_single(dev, msdu->data, msdu->len,
524 res = dma_mapping_error(dev, skb_cb->paddr);
527 goto err_free_txdesc;
530 skb_put(txdesc, len);
531 cmd = (struct htt_cmd *)txdesc->data;
534 cmd->hdr.msg_type = HTT_H2T_MSG_TYPE_MGMT_TX;
535 cmd->mgmt_tx.msdu_paddr = __cpu_to_le32(ATH10K_SKB_CB(msdu)->paddr);
536 cmd->mgmt_tx.len = __cpu_to_le32(msdu->len);
537 cmd->mgmt_tx.desc_id = __cpu_to_le32(msdu_id);
538 cmd->mgmt_tx.vdev_id = __cpu_to_le32(vdev_id);
539 memcpy(cmd->mgmt_tx.hdr, msdu->data,
540 min_t(int, msdu->len, HTT_MGMT_FRM_HDR_DOWNLOAD_LEN));
542 res = ath10k_htc_send(&htt->ar->htc, htt->eid, txdesc);
549 dma_unmap_single(dev, skb_cb->paddr, msdu->len, DMA_TO_DEVICE);
551 dev_kfree_skb_any(txdesc);
553 spin_lock_bh(&htt->tx_lock);
554 ath10k_htt_tx_free_msdu_id(htt, msdu_id);
555 spin_unlock_bh(&htt->tx_lock);
557 ath10k_htt_tx_dec_pending(htt, limit_mgmt_desc);
562 int ath10k_htt_tx(struct ath10k_htt *htt, enum ath10k_hw_txrx_mode txmode,
563 struct sk_buff *msdu)
565 struct ath10k *ar = htt->ar;
566 struct device *dev = ar->dev;
567 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)msdu->data;
568 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(msdu);
569 struct ath10k_skb_cb *skb_cb = ATH10K_SKB_CB(msdu);
570 struct ath10k_hif_sg_item sg_items[2];
571 struct ath10k_htt_txbuf *txbuf;
572 struct htt_data_tx_desc_frag *frags;
573 bool is_eth = (txmode == ATH10K_HW_TXRX_ETHERNET);
574 u8 vdev_id = ath10k_htt_tx_get_vdev_id(ar, msdu);
575 u8 tid = ath10k_htt_tx_get_tid(msdu, is_eth);
579 u16 msdu_id, flags1 = 0;
583 struct htt_msdu_ext_desc *ext_desc = NULL;
584 bool limit_mgmt_desc = false;
585 bool is_probe_resp = false;
587 if (unlikely(ieee80211_is_mgmt(hdr->frame_control)) &&
588 ar->hw_params.max_probe_resp_desc_thres) {
589 limit_mgmt_desc = true;
591 if (ieee80211_is_probe_resp(hdr->frame_control))
592 is_probe_resp = true;
595 res = ath10k_htt_tx_inc_pending(htt, limit_mgmt_desc, is_probe_resp);
599 spin_lock_bh(&htt->tx_lock);
600 res = ath10k_htt_tx_alloc_msdu_id(htt, msdu);
601 spin_unlock_bh(&htt->tx_lock);
607 prefetch_len = min(htt->prefetch_len, msdu->len);
608 prefetch_len = roundup(prefetch_len, 4);
610 txbuf = &htt->txbuf.vaddr[msdu_id];
611 txbuf_paddr = htt->txbuf.paddr +
612 (sizeof(struct ath10k_htt_txbuf) * msdu_id);
614 if ((ieee80211_is_action(hdr->frame_control) ||
615 ieee80211_is_deauth(hdr->frame_control) ||
616 ieee80211_is_disassoc(hdr->frame_control)) &&
617 ieee80211_has_protected(hdr->frame_control)) {
618 skb_put(msdu, IEEE80211_CCMP_MIC_LEN);
619 } else if (!(skb_cb->flags & ATH10K_SKB_F_NO_HWCRYPT) &&
620 txmode == ATH10K_HW_TXRX_RAW &&
621 ieee80211_has_protected(hdr->frame_control)) {
622 skb_put(msdu, IEEE80211_CCMP_MIC_LEN);
625 skb_cb->paddr = dma_map_single(dev, msdu->data, msdu->len,
627 res = dma_mapping_error(dev, skb_cb->paddr);
630 goto err_free_msdu_id;
633 if (unlikely(info->flags & IEEE80211_TX_CTL_TX_OFFCHAN))
634 freq = ar->scan.roc_freq;
637 case ATH10K_HW_TXRX_RAW:
638 case ATH10K_HW_TXRX_NATIVE_WIFI:
639 flags0 |= HTT_DATA_TX_DESC_FLAGS0_MAC_HDR_PRESENT;
641 case ATH10K_HW_TXRX_ETHERNET:
642 if (ar->hw_params.continuous_frag_desc) {
643 memset(&htt->frag_desc.vaddr[msdu_id], 0,
644 sizeof(struct htt_msdu_ext_desc));
645 frags = (struct htt_data_tx_desc_frag *)
646 &htt->frag_desc.vaddr[msdu_id].frags;
647 ext_desc = &htt->frag_desc.vaddr[msdu_id];
648 frags[0].tword_addr.paddr_lo =
649 __cpu_to_le32(skb_cb->paddr);
650 frags[0].tword_addr.paddr_hi = 0;
651 frags[0].tword_addr.len_16 = __cpu_to_le16(msdu->len);
653 frags_paddr = htt->frag_desc.paddr +
654 (sizeof(struct htt_msdu_ext_desc) * msdu_id);
656 frags = txbuf->frags;
657 frags[0].dword_addr.paddr =
658 __cpu_to_le32(skb_cb->paddr);
659 frags[0].dword_addr.len = __cpu_to_le32(msdu->len);
660 frags[1].dword_addr.paddr = 0;
661 frags[1].dword_addr.len = 0;
663 frags_paddr = txbuf_paddr;
665 flags0 |= SM(txmode, HTT_DATA_TX_DESC_FLAGS0_PKT_TYPE);
667 case ATH10K_HW_TXRX_MGMT:
668 flags0 |= SM(ATH10K_HW_TXRX_MGMT,
669 HTT_DATA_TX_DESC_FLAGS0_PKT_TYPE);
670 flags0 |= HTT_DATA_TX_DESC_FLAGS0_MAC_HDR_PRESENT;
672 frags_paddr = skb_cb->paddr;
676 /* Normally all commands go through HTC which manages tx credits for
677 * each endpoint and notifies when tx is completed.
679 * HTT endpoint is creditless so there's no need to care about HTC
680 * flags. In that case it is trivial to fill the HTC header here.
682 * MSDU transmission is considered completed upon HTT event. This
683 * implies no relevant resources can be freed until after the event is
684 * received. That's why HTC tx completion handler itself is ignored by
685 * setting NULL to transfer_context for all sg items.
687 * There is simply no point in pushing HTT TX_FRM through HTC tx path
688 * as it's a waste of resources. By bypassing HTC it is possible to
689 * avoid extra memory allocations, compress data structures and thus
690 * improve performance. */
692 txbuf->htc_hdr.eid = htt->eid;
693 txbuf->htc_hdr.len = __cpu_to_le16(sizeof(txbuf->cmd_hdr) +
694 sizeof(txbuf->cmd_tx) +
696 txbuf->htc_hdr.flags = 0;
698 if (skb_cb->flags & ATH10K_SKB_F_NO_HWCRYPT)
699 flags0 |= HTT_DATA_TX_DESC_FLAGS0_NO_ENCRYPT;
701 flags1 |= SM((u16)vdev_id, HTT_DATA_TX_DESC_FLAGS1_VDEV_ID);
702 flags1 |= SM((u16)tid, HTT_DATA_TX_DESC_FLAGS1_EXT_TID);
703 if (msdu->ip_summed == CHECKSUM_PARTIAL &&
704 !test_bit(ATH10K_FLAG_RAW_MODE, &ar->dev_flags)) {
705 flags1 |= HTT_DATA_TX_DESC_FLAGS1_CKSUM_L3_OFFLOAD;
706 flags1 |= HTT_DATA_TX_DESC_FLAGS1_CKSUM_L4_OFFLOAD;
707 if (ar->hw_params.continuous_frag_desc)
708 ext_desc->flags |= HTT_MSDU_CHECKSUM_ENABLE;
711 /* Prevent firmware from sending up tx inspection requests. There's
712 * nothing ath10k can do with frames requested for inspection so force
713 * it to simply rely a regular tx completion with discard status.
715 flags1 |= HTT_DATA_TX_DESC_FLAGS1_POSTPONED;
717 txbuf->cmd_hdr.msg_type = HTT_H2T_MSG_TYPE_TX_FRM;
718 txbuf->cmd_tx.flags0 = flags0;
719 txbuf->cmd_tx.flags1 = __cpu_to_le16(flags1);
720 txbuf->cmd_tx.len = __cpu_to_le16(msdu->len);
721 txbuf->cmd_tx.id = __cpu_to_le16(msdu_id);
722 txbuf->cmd_tx.frags_paddr = __cpu_to_le32(frags_paddr);
723 if (ath10k_mac_tx_frm_has_freq(ar)) {
724 txbuf->cmd_tx.offchan_tx.peerid =
725 __cpu_to_le16(HTT_INVALID_PEERID);
726 txbuf->cmd_tx.offchan_tx.freq =
729 txbuf->cmd_tx.peerid =
730 __cpu_to_le32(HTT_INVALID_PEERID);
733 trace_ath10k_htt_tx(ar, msdu_id, msdu->len, vdev_id, tid);
734 ath10k_dbg(ar, ATH10K_DBG_HTT,
735 "htt tx flags0 %hhu flags1 %hu len %d id %hu frags_paddr %08x, msdu_paddr %08x vdev %hhu tid %hhu freq %hu\n",
736 flags0, flags1, msdu->len, msdu_id, frags_paddr,
737 (u32)skb_cb->paddr, vdev_id, tid, freq);
738 ath10k_dbg_dump(ar, ATH10K_DBG_HTT_DUMP, NULL, "htt tx msdu: ",
739 msdu->data, msdu->len);
740 trace_ath10k_tx_hdr(ar, msdu->data, msdu->len);
741 trace_ath10k_tx_payload(ar, msdu->data, msdu->len);
743 sg_items[0].transfer_id = 0;
744 sg_items[0].transfer_context = NULL;
745 sg_items[0].vaddr = &txbuf->htc_hdr;
746 sg_items[0].paddr = txbuf_paddr +
747 sizeof(txbuf->frags);
748 sg_items[0].len = sizeof(txbuf->htc_hdr) +
749 sizeof(txbuf->cmd_hdr) +
750 sizeof(txbuf->cmd_tx);
752 sg_items[1].transfer_id = 0;
753 sg_items[1].transfer_context = NULL;
754 sg_items[1].vaddr = msdu->data;
755 sg_items[1].paddr = skb_cb->paddr;
756 sg_items[1].len = prefetch_len;
758 res = ath10k_hif_tx_sg(htt->ar,
759 htt->ar->htc.endpoint[htt->eid].ul_pipe_id,
760 sg_items, ARRAY_SIZE(sg_items));
767 dma_unmap_single(dev, skb_cb->paddr, msdu->len, DMA_TO_DEVICE);
769 spin_lock_bh(&htt->tx_lock);
770 ath10k_htt_tx_free_msdu_id(htt, msdu_id);
771 spin_unlock_bh(&htt->tx_lock);
773 ath10k_htt_tx_dec_pending(htt, limit_mgmt_desc);