2 * Copyright (c) 2005-2011 Atheros Communications Inc.
3 * Copyright (c) 2011-2013 Qualcomm Atheros, Inc.
5 * Permission to use, copy, modify, and/or distribute this software for any
6 * purpose with or without fee is hereby granted, provided that the above
7 * copyright notice and this permission notice appear in all copies.
9 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
10 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
11 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
12 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
13 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
14 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
15 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
18 #include <linux/etherdevice.h>
25 static u8 ath10k_htt_tx_txq_calc_size(size_t count)
33 while (factor >= 64 && exp < 4) {
42 factor = max(1, factor);
44 return SM(exp, HTT_TX_Q_STATE_ENTRY_EXP) |
45 SM(factor, HTT_TX_Q_STATE_ENTRY_FACTOR);
48 static void __ath10k_htt_tx_txq_recalc(struct ieee80211_hw *hw,
49 struct ieee80211_txq *txq)
51 struct ath10k *ar = hw->priv;
52 struct ath10k_sta *arsta = (void *)txq->sta->drv_priv;
53 struct ath10k_vif *arvif = (void *)txq->vif->drv_priv;
54 unsigned long frame_cnt;
55 unsigned long byte_cnt;
62 lockdep_assert_held(&ar->htt.tx_lock);
64 if (!ar->htt.tx_q_state.enabled)
67 if (ar->htt.tx_q_state.mode != HTT_TX_MODE_SWITCH_PUSH_PULL)
71 peer_id = arsta->peer_id;
73 peer_id = arvif->peer_id;
76 bit = BIT(peer_id % 32);
79 ieee80211_txq_get_depth(txq, &frame_cnt, &byte_cnt);
80 count = ath10k_htt_tx_txq_calc_size(byte_cnt);
82 if (unlikely(peer_id >= ar->htt.tx_q_state.num_peers) ||
83 unlikely(tid >= ar->htt.tx_q_state.num_tids)) {
84 ath10k_warn(ar, "refusing to update txq for peer_id %hu tid %hhu due to out of bounds\n",
89 ar->htt.tx_q_state.vaddr->count[tid][peer_id] = count;
90 ar->htt.tx_q_state.vaddr->map[tid][idx] &= ~bit;
91 ar->htt.tx_q_state.vaddr->map[tid][idx] |= count ? bit : 0;
93 ath10k_dbg(ar, ATH10K_DBG_HTT, "htt tx txq state update peer_id %hu tid %hhu count %hhu\n",
97 static void __ath10k_htt_tx_txq_sync(struct ath10k *ar)
102 lockdep_assert_held(&ar->htt.tx_lock);
104 if (!ar->htt.tx_q_state.enabled)
107 if (ar->htt.tx_q_state.mode != HTT_TX_MODE_SWITCH_PUSH_PULL)
110 seq = le32_to_cpu(ar->htt.tx_q_state.vaddr->seq);
112 ar->htt.tx_q_state.vaddr->seq = cpu_to_le32(seq);
114 ath10k_dbg(ar, ATH10K_DBG_HTT, "htt tx txq state update commit seq %u\n",
117 size = sizeof(*ar->htt.tx_q_state.vaddr);
118 dma_sync_single_for_device(ar->dev,
119 ar->htt.tx_q_state.paddr,
124 void ath10k_htt_tx_txq_recalc(struct ieee80211_hw *hw,
125 struct ieee80211_txq *txq)
127 struct ath10k *ar = hw->priv;
129 spin_lock_bh(&ar->htt.tx_lock);
130 __ath10k_htt_tx_txq_recalc(hw, txq);
131 spin_unlock_bh(&ar->htt.tx_lock);
134 void ath10k_htt_tx_txq_sync(struct ath10k *ar)
136 spin_lock_bh(&ar->htt.tx_lock);
137 __ath10k_htt_tx_txq_sync(ar);
138 spin_unlock_bh(&ar->htt.tx_lock);
141 void ath10k_htt_tx_txq_update(struct ieee80211_hw *hw,
142 struct ieee80211_txq *txq)
144 struct ath10k *ar = hw->priv;
146 spin_lock_bh(&ar->htt.tx_lock);
147 __ath10k_htt_tx_txq_recalc(hw, txq);
148 __ath10k_htt_tx_txq_sync(ar);
149 spin_unlock_bh(&ar->htt.tx_lock);
152 void ath10k_htt_tx_dec_pending(struct ath10k_htt *htt)
154 lockdep_assert_held(&htt->tx_lock);
156 htt->num_pending_tx--;
157 if (htt->num_pending_tx == htt->max_num_pending_tx - 1)
158 ath10k_mac_tx_unlock(htt->ar, ATH10K_TX_PAUSE_Q_FULL);
161 int ath10k_htt_tx_inc_pending(struct ath10k_htt *htt)
163 lockdep_assert_held(&htt->tx_lock);
165 if (htt->num_pending_tx >= htt->max_num_pending_tx)
168 htt->num_pending_tx++;
169 if (htt->num_pending_tx == htt->max_num_pending_tx)
170 ath10k_mac_tx_lock(htt->ar, ATH10K_TX_PAUSE_Q_FULL);
175 int ath10k_htt_tx_mgmt_inc_pending(struct ath10k_htt *htt, bool is_mgmt,
178 struct ath10k *ar = htt->ar;
180 lockdep_assert_held(&htt->tx_lock);
182 if (!is_mgmt || !ar->hw_params.max_probe_resp_desc_thres)
186 ar->hw_params.max_probe_resp_desc_thres < htt->num_pending_mgmt_tx)
189 htt->num_pending_mgmt_tx++;
194 void ath10k_htt_tx_mgmt_dec_pending(struct ath10k_htt *htt)
196 lockdep_assert_held(&htt->tx_lock);
198 if (!htt->ar->hw_params.max_probe_resp_desc_thres)
201 htt->num_pending_mgmt_tx--;
204 int ath10k_htt_tx_alloc_msdu_id(struct ath10k_htt *htt, struct sk_buff *skb)
206 struct ath10k *ar = htt->ar;
209 lockdep_assert_held(&htt->tx_lock);
211 ret = idr_alloc(&htt->pending_tx, skb, 0,
212 htt->max_num_pending_tx, GFP_ATOMIC);
214 ath10k_dbg(ar, ATH10K_DBG_HTT, "htt tx alloc msdu_id %d\n", ret);
219 void ath10k_htt_tx_free_msdu_id(struct ath10k_htt *htt, u16 msdu_id)
221 struct ath10k *ar = htt->ar;
223 lockdep_assert_held(&htt->tx_lock);
225 ath10k_dbg(ar, ATH10K_DBG_HTT, "htt tx free msdu_id %hu\n", msdu_id);
227 idr_remove(&htt->pending_tx, msdu_id);
230 static void ath10k_htt_tx_free_cont_frag_desc(struct ath10k_htt *htt)
234 if (!htt->frag_desc.vaddr)
237 size = htt->max_num_pending_tx * sizeof(struct htt_msdu_ext_desc);
239 dma_free_coherent(htt->ar->dev,
241 htt->frag_desc.vaddr,
242 htt->frag_desc.paddr);
245 static int ath10k_htt_tx_alloc_cont_frag_desc(struct ath10k_htt *htt)
247 struct ath10k *ar = htt->ar;
250 if (!ar->hw_params.continuous_frag_desc)
253 size = htt->max_num_pending_tx * sizeof(struct htt_msdu_ext_desc);
254 htt->frag_desc.vaddr = dma_alloc_coherent(ar->dev, size,
255 &htt->frag_desc.paddr,
257 if (!htt->frag_desc.vaddr) {
258 ath10k_err(ar, "failed to alloc fragment desc memory\n");
265 static void ath10k_htt_tx_free_txq(struct ath10k_htt *htt)
267 struct ath10k *ar = htt->ar;
270 if (!test_bit(ATH10K_FW_FEATURE_PEER_FLOW_CONTROL,
271 ar->running_fw->fw_file.fw_features))
274 size = sizeof(*htt->tx_q_state.vaddr);
276 dma_unmap_single(ar->dev, htt->tx_q_state.paddr, size, DMA_TO_DEVICE);
277 kfree(htt->tx_q_state.vaddr);
280 static int ath10k_htt_tx_alloc_txq(struct ath10k_htt *htt)
282 struct ath10k *ar = htt->ar;
286 if (!test_bit(ATH10K_FW_FEATURE_PEER_FLOW_CONTROL,
287 ar->running_fw->fw_file.fw_features))
290 htt->tx_q_state.num_peers = HTT_TX_Q_STATE_NUM_PEERS;
291 htt->tx_q_state.num_tids = HTT_TX_Q_STATE_NUM_TIDS;
292 htt->tx_q_state.type = HTT_Q_DEPTH_TYPE_BYTES;
294 size = sizeof(*htt->tx_q_state.vaddr);
295 htt->tx_q_state.vaddr = kzalloc(size, GFP_KERNEL);
296 if (!htt->tx_q_state.vaddr)
299 htt->tx_q_state.paddr = dma_map_single(ar->dev, htt->tx_q_state.vaddr,
300 size, DMA_TO_DEVICE);
301 ret = dma_mapping_error(ar->dev, htt->tx_q_state.paddr);
303 ath10k_warn(ar, "failed to dma map tx_q_state: %d\n", ret);
304 kfree(htt->tx_q_state.vaddr);
311 int ath10k_htt_tx_alloc(struct ath10k_htt *htt)
313 struct ath10k *ar = htt->ar;
316 ath10k_dbg(ar, ATH10K_DBG_BOOT, "htt tx max num pending tx %d\n",
317 htt->max_num_pending_tx);
319 spin_lock_init(&htt->tx_lock);
320 idr_init(&htt->pending_tx);
322 size = htt->max_num_pending_tx * sizeof(struct ath10k_htt_txbuf);
323 htt->txbuf.vaddr = dma_alloc_coherent(ar->dev, size,
326 if (!htt->txbuf.vaddr) {
327 ath10k_err(ar, "failed to alloc tx buffer\n");
329 goto free_idr_pending_tx;
332 ret = ath10k_htt_tx_alloc_cont_frag_desc(htt);
334 ath10k_err(ar, "failed to alloc cont frag desc: %d\n", ret);
338 ret = ath10k_htt_tx_alloc_txq(htt);
340 ath10k_err(ar, "failed to alloc txq: %d\n", ret);
344 size = roundup_pow_of_two(htt->max_num_pending_tx);
345 ret = kfifo_alloc(&htt->txdone_fifo, size, GFP_KERNEL);
347 ath10k_err(ar, "failed to alloc txdone fifo: %d\n", ret);
354 ath10k_htt_tx_free_txq(htt);
357 ath10k_htt_tx_free_cont_frag_desc(htt);
360 size = htt->max_num_pending_tx *
361 sizeof(struct ath10k_htt_txbuf);
362 dma_free_coherent(htt->ar->dev, size, htt->txbuf.vaddr,
366 idr_destroy(&htt->pending_tx);
371 static int ath10k_htt_tx_clean_up_pending(int msdu_id, void *skb, void *ctx)
373 struct ath10k *ar = ctx;
374 struct ath10k_htt *htt = &ar->htt;
375 struct htt_tx_done tx_done = {0};
377 ath10k_dbg(ar, ATH10K_DBG_HTT, "force cleanup msdu_id %hu\n", msdu_id);
379 tx_done.msdu_id = msdu_id;
380 tx_done.status = HTT_TX_COMPL_STATE_DISCARD;
382 ath10k_txrx_tx_unref(htt, &tx_done);
387 void ath10k_htt_tx_free(struct ath10k_htt *htt)
391 idr_for_each(&htt->pending_tx, ath10k_htt_tx_clean_up_pending, htt->ar);
392 idr_destroy(&htt->pending_tx);
394 if (htt->txbuf.vaddr) {
395 size = htt->max_num_pending_tx *
396 sizeof(struct ath10k_htt_txbuf);
397 dma_free_coherent(htt->ar->dev, size, htt->txbuf.vaddr,
401 ath10k_htt_tx_free_txq(htt);
402 ath10k_htt_tx_free_cont_frag_desc(htt);
403 WARN_ON(!kfifo_is_empty(&htt->txdone_fifo));
404 kfifo_free(&htt->txdone_fifo);
407 void ath10k_htt_htc_tx_complete(struct ath10k *ar, struct sk_buff *skb)
409 dev_kfree_skb_any(skb);
412 void ath10k_htt_hif_tx_complete(struct ath10k *ar, struct sk_buff *skb)
414 dev_kfree_skb_any(skb);
416 EXPORT_SYMBOL(ath10k_htt_hif_tx_complete);
418 int ath10k_htt_h2t_ver_req_msg(struct ath10k_htt *htt)
420 struct ath10k *ar = htt->ar;
426 len += sizeof(cmd->hdr);
427 len += sizeof(cmd->ver_req);
429 skb = ath10k_htc_alloc_skb(ar, len);
434 cmd = (struct htt_cmd *)skb->data;
435 cmd->hdr.msg_type = HTT_H2T_MSG_TYPE_VERSION_REQ;
437 ret = ath10k_htc_send(&htt->ar->htc, htt->eid, skb);
439 dev_kfree_skb_any(skb);
446 int ath10k_htt_h2t_stats_req(struct ath10k_htt *htt, u8 mask, u64 cookie)
448 struct ath10k *ar = htt->ar;
449 struct htt_stats_req *req;
454 len += sizeof(cmd->hdr);
455 len += sizeof(cmd->stats_req);
457 skb = ath10k_htc_alloc_skb(ar, len);
462 cmd = (struct htt_cmd *)skb->data;
463 cmd->hdr.msg_type = HTT_H2T_MSG_TYPE_STATS_REQ;
465 req = &cmd->stats_req;
467 memset(req, 0, sizeof(*req));
469 /* currently we support only max 8 bit masks so no need to worry
470 * about endian support */
471 req->upload_types[0] = mask;
472 req->reset_types[0] = mask;
473 req->stat_type = HTT_STATS_REQ_CFG_STAT_TYPE_INVALID;
474 req->cookie_lsb = cpu_to_le32(cookie & 0xffffffff);
475 req->cookie_msb = cpu_to_le32((cookie & 0xffffffff00000000ULL) >> 32);
477 ret = ath10k_htc_send(&htt->ar->htc, htt->eid, skb);
479 ath10k_warn(ar, "failed to send htt type stats request: %d",
481 dev_kfree_skb_any(skb);
488 int ath10k_htt_send_frag_desc_bank_cfg(struct ath10k_htt *htt)
490 struct ath10k *ar = htt->ar;
493 struct htt_frag_desc_bank_cfg *cfg;
497 if (!ar->hw_params.continuous_frag_desc)
500 if (!htt->frag_desc.paddr) {
501 ath10k_warn(ar, "invalid frag desc memory\n");
505 size = sizeof(cmd->hdr) + sizeof(cmd->frag_desc_bank_cfg);
506 skb = ath10k_htc_alloc_skb(ar, size);
511 cmd = (struct htt_cmd *)skb->data;
512 cmd->hdr.msg_type = HTT_H2T_MSG_TYPE_FRAG_DESC_BANK_CFG;
515 info |= SM(htt->tx_q_state.type,
516 HTT_FRAG_DESC_BANK_CFG_INFO_Q_STATE_DEPTH_TYPE);
518 if (test_bit(ATH10K_FW_FEATURE_PEER_FLOW_CONTROL,
519 ar->running_fw->fw_file.fw_features))
520 info |= HTT_FRAG_DESC_BANK_CFG_INFO_Q_STATE_VALID;
522 cfg = &cmd->frag_desc_bank_cfg;
525 cfg->desc_size = sizeof(struct htt_msdu_ext_desc);
526 cfg->bank_base_addrs[0] = __cpu_to_le32(htt->frag_desc.paddr);
527 cfg->bank_id[0].bank_min_id = 0;
528 cfg->bank_id[0].bank_max_id = __cpu_to_le16(htt->max_num_pending_tx -
531 cfg->q_state.paddr = cpu_to_le32(htt->tx_q_state.paddr);
532 cfg->q_state.num_peers = cpu_to_le16(htt->tx_q_state.num_peers);
533 cfg->q_state.num_tids = cpu_to_le16(htt->tx_q_state.num_tids);
534 cfg->q_state.record_size = HTT_TX_Q_STATE_ENTRY_SIZE;
535 cfg->q_state.record_multiplier = HTT_TX_Q_STATE_ENTRY_MULTIPLIER;
537 ath10k_dbg(ar, ATH10K_DBG_HTT, "htt frag desc bank cmd\n");
539 ret = ath10k_htc_send(&htt->ar->htc, htt->eid, skb);
541 ath10k_warn(ar, "failed to send frag desc bank cfg request: %d\n",
543 dev_kfree_skb_any(skb);
550 int ath10k_htt_send_rx_ring_cfg_ll(struct ath10k_htt *htt)
552 struct ath10k *ar = htt->ar;
555 struct htt_rx_ring_setup_ring *ring;
556 const int num_rx_ring = 1;
563 * the HW expects the buffer to be an integral number of 4-byte
566 BUILD_BUG_ON(!IS_ALIGNED(HTT_RX_BUF_SIZE, 4));
567 BUILD_BUG_ON((HTT_RX_BUF_SIZE & HTT_MAX_CACHE_LINE_SIZE_MASK) != 0);
569 len = sizeof(cmd->hdr) + sizeof(cmd->rx_setup.hdr)
570 + (sizeof(*ring) * num_rx_ring);
571 skb = ath10k_htc_alloc_skb(ar, len);
577 cmd = (struct htt_cmd *)skb->data;
578 ring = &cmd->rx_setup.rings[0];
580 cmd->hdr.msg_type = HTT_H2T_MSG_TYPE_RX_RING_CFG;
581 cmd->rx_setup.hdr.num_rings = 1;
583 /* FIXME: do we need all of this? */
585 flags |= HTT_RX_RING_FLAGS_MAC80211_HDR;
586 flags |= HTT_RX_RING_FLAGS_MSDU_PAYLOAD;
587 flags |= HTT_RX_RING_FLAGS_PPDU_START;
588 flags |= HTT_RX_RING_FLAGS_PPDU_END;
589 flags |= HTT_RX_RING_FLAGS_MPDU_START;
590 flags |= HTT_RX_RING_FLAGS_MPDU_END;
591 flags |= HTT_RX_RING_FLAGS_MSDU_START;
592 flags |= HTT_RX_RING_FLAGS_MSDU_END;
593 flags |= HTT_RX_RING_FLAGS_RX_ATTENTION;
594 flags |= HTT_RX_RING_FLAGS_FRAG_INFO;
595 flags |= HTT_RX_RING_FLAGS_UNICAST_RX;
596 flags |= HTT_RX_RING_FLAGS_MULTICAST_RX;
597 flags |= HTT_RX_RING_FLAGS_CTRL_RX;
598 flags |= HTT_RX_RING_FLAGS_MGMT_RX;
599 flags |= HTT_RX_RING_FLAGS_NULL_RX;
600 flags |= HTT_RX_RING_FLAGS_PHY_DATA_RX;
602 fw_idx = __le32_to_cpu(*htt->rx_ring.alloc_idx.vaddr);
604 ring->fw_idx_shadow_reg_paddr =
605 __cpu_to_le32(htt->rx_ring.alloc_idx.paddr);
606 ring->rx_ring_base_paddr = __cpu_to_le32(htt->rx_ring.base_paddr);
607 ring->rx_ring_len = __cpu_to_le16(htt->rx_ring.size);
608 ring->rx_ring_bufsize = __cpu_to_le16(HTT_RX_BUF_SIZE);
609 ring->flags = __cpu_to_le16(flags);
610 ring->fw_idx_init_val = __cpu_to_le16(fw_idx);
612 #define desc_offset(x) (offsetof(struct htt_rx_desc, x) / 4)
614 ring->mac80211_hdr_offset = __cpu_to_le16(desc_offset(rx_hdr_status));
615 ring->msdu_payload_offset = __cpu_to_le16(desc_offset(msdu_payload));
616 ring->ppdu_start_offset = __cpu_to_le16(desc_offset(ppdu_start));
617 ring->ppdu_end_offset = __cpu_to_le16(desc_offset(ppdu_end));
618 ring->mpdu_start_offset = __cpu_to_le16(desc_offset(mpdu_start));
619 ring->mpdu_end_offset = __cpu_to_le16(desc_offset(mpdu_end));
620 ring->msdu_start_offset = __cpu_to_le16(desc_offset(msdu_start));
621 ring->msdu_end_offset = __cpu_to_le16(desc_offset(msdu_end));
622 ring->rx_attention_offset = __cpu_to_le16(desc_offset(attention));
623 ring->frag_info_offset = __cpu_to_le16(desc_offset(frag_info));
627 ret = ath10k_htc_send(&htt->ar->htc, htt->eid, skb);
629 dev_kfree_skb_any(skb);
636 int ath10k_htt_h2t_aggr_cfg_msg(struct ath10k_htt *htt,
637 u8 max_subfrms_ampdu,
638 u8 max_subfrms_amsdu)
640 struct ath10k *ar = htt->ar;
641 struct htt_aggr_conf *aggr_conf;
647 /* Firmware defaults are: amsdu = 3 and ampdu = 64 */
649 if (max_subfrms_ampdu == 0 || max_subfrms_ampdu > 64)
652 if (max_subfrms_amsdu == 0 || max_subfrms_amsdu > 31)
655 len = sizeof(cmd->hdr);
656 len += sizeof(cmd->aggr_conf);
658 skb = ath10k_htc_alloc_skb(ar, len);
663 cmd = (struct htt_cmd *)skb->data;
664 cmd->hdr.msg_type = HTT_H2T_MSG_TYPE_AGGR_CFG;
666 aggr_conf = &cmd->aggr_conf;
667 aggr_conf->max_num_ampdu_subframes = max_subfrms_ampdu;
668 aggr_conf->max_num_amsdu_subframes = max_subfrms_amsdu;
670 ath10k_dbg(ar, ATH10K_DBG_HTT, "htt h2t aggr cfg msg amsdu %d ampdu %d",
671 aggr_conf->max_num_amsdu_subframes,
672 aggr_conf->max_num_ampdu_subframes);
674 ret = ath10k_htc_send(&htt->ar->htc, htt->eid, skb);
676 dev_kfree_skb_any(skb);
683 int ath10k_htt_tx_fetch_resp(struct ath10k *ar,
685 __le16 fetch_seq_num,
686 struct htt_tx_fetch_record *records,
691 const u16 resp_id = 0;
695 /* Response IDs are echo-ed back only for host driver convienence
696 * purposes. They aren't used for anything in the driver yet so use 0.
699 len += sizeof(cmd->hdr);
700 len += sizeof(cmd->tx_fetch_resp);
701 len += sizeof(cmd->tx_fetch_resp.records[0]) * num_records;
703 skb = ath10k_htc_alloc_skb(ar, len);
708 cmd = (struct htt_cmd *)skb->data;
709 cmd->hdr.msg_type = HTT_H2T_MSG_TYPE_TX_FETCH_RESP;
710 cmd->tx_fetch_resp.resp_id = cpu_to_le16(resp_id);
711 cmd->tx_fetch_resp.fetch_seq_num = fetch_seq_num;
712 cmd->tx_fetch_resp.num_records = cpu_to_le16(num_records);
713 cmd->tx_fetch_resp.token = token;
715 memcpy(cmd->tx_fetch_resp.records, records,
716 sizeof(records[0]) * num_records);
718 ret = ath10k_htc_send(&ar->htc, ar->htt.eid, skb);
720 ath10k_warn(ar, "failed to submit htc command: %d\n", ret);
727 dev_kfree_skb_any(skb);
732 static u8 ath10k_htt_tx_get_vdev_id(struct ath10k *ar, struct sk_buff *skb)
734 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
735 struct ath10k_skb_cb *cb = ATH10K_SKB_CB(skb);
736 struct ath10k_vif *arvif = (void *)cb->vif->drv_priv;
738 if (info->flags & IEEE80211_TX_CTL_TX_OFFCHAN)
739 return ar->scan.vdev_id;
741 return arvif->vdev_id;
742 else if (ar->monitor_started)
743 return ar->monitor_vdev_id;
748 static u8 ath10k_htt_tx_get_tid(struct sk_buff *skb, bool is_eth)
750 struct ieee80211_hdr *hdr = (void *)skb->data;
751 struct ath10k_skb_cb *cb = ATH10K_SKB_CB(skb);
753 if (!is_eth && ieee80211_is_mgmt(hdr->frame_control))
754 return HTT_DATA_TX_EXT_TID_MGMT;
755 else if (cb->flags & ATH10K_SKB_F_QOS)
756 return skb->priority % IEEE80211_QOS_CTL_TID_MASK;
758 return HTT_DATA_TX_EXT_TID_NON_QOS_MCAST_BCAST;
761 int ath10k_htt_mgmt_tx(struct ath10k_htt *htt, struct sk_buff *msdu)
763 struct ath10k *ar = htt->ar;
764 struct device *dev = ar->dev;
765 struct sk_buff *txdesc = NULL;
767 struct ath10k_skb_cb *skb_cb = ATH10K_SKB_CB(msdu);
768 u8 vdev_id = ath10k_htt_tx_get_vdev_id(ar, msdu);
772 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)msdu->data;
774 len += sizeof(cmd->hdr);
775 len += sizeof(cmd->mgmt_tx);
777 spin_lock_bh(&htt->tx_lock);
778 res = ath10k_htt_tx_alloc_msdu_id(htt, msdu);
779 spin_unlock_bh(&htt->tx_lock);
785 if ((ieee80211_is_action(hdr->frame_control) ||
786 ieee80211_is_deauth(hdr->frame_control) ||
787 ieee80211_is_disassoc(hdr->frame_control)) &&
788 ieee80211_has_protected(hdr->frame_control)) {
789 skb_put(msdu, IEEE80211_CCMP_MIC_LEN);
792 txdesc = ath10k_htc_alloc_skb(ar, len);
795 goto err_free_msdu_id;
798 skb_cb->paddr = dma_map_single(dev, msdu->data, msdu->len,
800 res = dma_mapping_error(dev, skb_cb->paddr);
803 goto err_free_txdesc;
806 skb_put(txdesc, len);
807 cmd = (struct htt_cmd *)txdesc->data;
810 cmd->hdr.msg_type = HTT_H2T_MSG_TYPE_MGMT_TX;
811 cmd->mgmt_tx.msdu_paddr = __cpu_to_le32(ATH10K_SKB_CB(msdu)->paddr);
812 cmd->mgmt_tx.len = __cpu_to_le32(msdu->len);
813 cmd->mgmt_tx.desc_id = __cpu_to_le32(msdu_id);
814 cmd->mgmt_tx.vdev_id = __cpu_to_le32(vdev_id);
815 memcpy(cmd->mgmt_tx.hdr, msdu->data,
816 min_t(int, msdu->len, HTT_MGMT_FRM_HDR_DOWNLOAD_LEN));
818 res = ath10k_htc_send(&htt->ar->htc, htt->eid, txdesc);
825 dma_unmap_single(dev, skb_cb->paddr, msdu->len, DMA_TO_DEVICE);
827 dev_kfree_skb_any(txdesc);
829 spin_lock_bh(&htt->tx_lock);
830 ath10k_htt_tx_free_msdu_id(htt, msdu_id);
831 spin_unlock_bh(&htt->tx_lock);
836 int ath10k_htt_tx(struct ath10k_htt *htt, enum ath10k_hw_txrx_mode txmode,
837 struct sk_buff *msdu)
839 struct ath10k *ar = htt->ar;
840 struct device *dev = ar->dev;
841 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)msdu->data;
842 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(msdu);
843 struct ath10k_skb_cb *skb_cb = ATH10K_SKB_CB(msdu);
844 struct ath10k_hif_sg_item sg_items[2];
845 struct ath10k_htt_txbuf *txbuf;
846 struct htt_data_tx_desc_frag *frags;
847 bool is_eth = (txmode == ATH10K_HW_TXRX_ETHERNET);
848 u8 vdev_id = ath10k_htt_tx_get_vdev_id(ar, msdu);
849 u8 tid = ath10k_htt_tx_get_tid(msdu, is_eth);
853 u16 msdu_id, flags1 = 0;
857 struct htt_msdu_ext_desc *ext_desc = NULL;
859 spin_lock_bh(&htt->tx_lock);
860 res = ath10k_htt_tx_alloc_msdu_id(htt, msdu);
861 spin_unlock_bh(&htt->tx_lock);
867 prefetch_len = min(htt->prefetch_len, msdu->len);
868 prefetch_len = roundup(prefetch_len, 4);
870 txbuf = &htt->txbuf.vaddr[msdu_id];
871 txbuf_paddr = htt->txbuf.paddr +
872 (sizeof(struct ath10k_htt_txbuf) * msdu_id);
874 if ((ieee80211_is_action(hdr->frame_control) ||
875 ieee80211_is_deauth(hdr->frame_control) ||
876 ieee80211_is_disassoc(hdr->frame_control)) &&
877 ieee80211_has_protected(hdr->frame_control)) {
878 skb_put(msdu, IEEE80211_CCMP_MIC_LEN);
879 } else if (!(skb_cb->flags & ATH10K_SKB_F_NO_HWCRYPT) &&
880 txmode == ATH10K_HW_TXRX_RAW &&
881 ieee80211_has_protected(hdr->frame_control)) {
882 skb_put(msdu, IEEE80211_CCMP_MIC_LEN);
885 skb_cb->paddr = dma_map_single(dev, msdu->data, msdu->len,
887 res = dma_mapping_error(dev, skb_cb->paddr);
890 goto err_free_msdu_id;
893 if (unlikely(info->flags & IEEE80211_TX_CTL_TX_OFFCHAN))
894 freq = ar->scan.roc_freq;
897 case ATH10K_HW_TXRX_RAW:
898 case ATH10K_HW_TXRX_NATIVE_WIFI:
899 flags0 |= HTT_DATA_TX_DESC_FLAGS0_MAC_HDR_PRESENT;
901 case ATH10K_HW_TXRX_ETHERNET:
902 if (ar->hw_params.continuous_frag_desc) {
903 memset(&htt->frag_desc.vaddr[msdu_id], 0,
904 sizeof(struct htt_msdu_ext_desc));
905 frags = (struct htt_data_tx_desc_frag *)
906 &htt->frag_desc.vaddr[msdu_id].frags;
907 ext_desc = &htt->frag_desc.vaddr[msdu_id];
908 frags[0].tword_addr.paddr_lo =
909 __cpu_to_le32(skb_cb->paddr);
910 frags[0].tword_addr.paddr_hi = 0;
911 frags[0].tword_addr.len_16 = __cpu_to_le16(msdu->len);
913 frags_paddr = htt->frag_desc.paddr +
914 (sizeof(struct htt_msdu_ext_desc) * msdu_id);
916 frags = txbuf->frags;
917 frags[0].dword_addr.paddr =
918 __cpu_to_le32(skb_cb->paddr);
919 frags[0].dword_addr.len = __cpu_to_le32(msdu->len);
920 frags[1].dword_addr.paddr = 0;
921 frags[1].dword_addr.len = 0;
923 frags_paddr = txbuf_paddr;
925 flags0 |= SM(txmode, HTT_DATA_TX_DESC_FLAGS0_PKT_TYPE);
927 case ATH10K_HW_TXRX_MGMT:
928 flags0 |= SM(ATH10K_HW_TXRX_MGMT,
929 HTT_DATA_TX_DESC_FLAGS0_PKT_TYPE);
930 flags0 |= HTT_DATA_TX_DESC_FLAGS0_MAC_HDR_PRESENT;
932 frags_paddr = skb_cb->paddr;
936 /* Normally all commands go through HTC which manages tx credits for
937 * each endpoint and notifies when tx is completed.
939 * HTT endpoint is creditless so there's no need to care about HTC
940 * flags. In that case it is trivial to fill the HTC header here.
942 * MSDU transmission is considered completed upon HTT event. This
943 * implies no relevant resources can be freed until after the event is
944 * received. That's why HTC tx completion handler itself is ignored by
945 * setting NULL to transfer_context for all sg items.
947 * There is simply no point in pushing HTT TX_FRM through HTC tx path
948 * as it's a waste of resources. By bypassing HTC it is possible to
949 * avoid extra memory allocations, compress data structures and thus
950 * improve performance. */
952 txbuf->htc_hdr.eid = htt->eid;
953 txbuf->htc_hdr.len = __cpu_to_le16(sizeof(txbuf->cmd_hdr) +
954 sizeof(txbuf->cmd_tx) +
956 txbuf->htc_hdr.flags = 0;
958 if (skb_cb->flags & ATH10K_SKB_F_NO_HWCRYPT)
959 flags0 |= HTT_DATA_TX_DESC_FLAGS0_NO_ENCRYPT;
961 flags1 |= SM((u16)vdev_id, HTT_DATA_TX_DESC_FLAGS1_VDEV_ID);
962 flags1 |= SM((u16)tid, HTT_DATA_TX_DESC_FLAGS1_EXT_TID);
963 if (msdu->ip_summed == CHECKSUM_PARTIAL &&
964 !test_bit(ATH10K_FLAG_RAW_MODE, &ar->dev_flags)) {
965 flags1 |= HTT_DATA_TX_DESC_FLAGS1_CKSUM_L3_OFFLOAD;
966 flags1 |= HTT_DATA_TX_DESC_FLAGS1_CKSUM_L4_OFFLOAD;
967 if (ar->hw_params.continuous_frag_desc)
968 ext_desc->flags |= HTT_MSDU_CHECKSUM_ENABLE;
971 /* Prevent firmware from sending up tx inspection requests. There's
972 * nothing ath10k can do with frames requested for inspection so force
973 * it to simply rely a regular tx completion with discard status.
975 flags1 |= HTT_DATA_TX_DESC_FLAGS1_POSTPONED;
977 txbuf->cmd_hdr.msg_type = HTT_H2T_MSG_TYPE_TX_FRM;
978 txbuf->cmd_tx.flags0 = flags0;
979 txbuf->cmd_tx.flags1 = __cpu_to_le16(flags1);
980 txbuf->cmd_tx.len = __cpu_to_le16(msdu->len);
981 txbuf->cmd_tx.id = __cpu_to_le16(msdu_id);
982 txbuf->cmd_tx.frags_paddr = __cpu_to_le32(frags_paddr);
983 if (ath10k_mac_tx_frm_has_freq(ar)) {
984 txbuf->cmd_tx.offchan_tx.peerid =
985 __cpu_to_le16(HTT_INVALID_PEERID);
986 txbuf->cmd_tx.offchan_tx.freq =
989 txbuf->cmd_tx.peerid =
990 __cpu_to_le32(HTT_INVALID_PEERID);
993 trace_ath10k_htt_tx(ar, msdu_id, msdu->len, vdev_id, tid);
994 ath10k_dbg(ar, ATH10K_DBG_HTT,
995 "htt tx flags0 %hhu flags1 %hu len %d id %hu frags_paddr %08x, msdu_paddr %08x vdev %hhu tid %hhu freq %hu\n",
996 flags0, flags1, msdu->len, msdu_id, frags_paddr,
997 (u32)skb_cb->paddr, vdev_id, tid, freq);
998 ath10k_dbg_dump(ar, ATH10K_DBG_HTT_DUMP, NULL, "htt tx msdu: ",
999 msdu->data, msdu->len);
1000 trace_ath10k_tx_hdr(ar, msdu->data, msdu->len);
1001 trace_ath10k_tx_payload(ar, msdu->data, msdu->len);
1003 sg_items[0].transfer_id = 0;
1004 sg_items[0].transfer_context = NULL;
1005 sg_items[0].vaddr = &txbuf->htc_hdr;
1006 sg_items[0].paddr = txbuf_paddr +
1007 sizeof(txbuf->frags);
1008 sg_items[0].len = sizeof(txbuf->htc_hdr) +
1009 sizeof(txbuf->cmd_hdr) +
1010 sizeof(txbuf->cmd_tx);
1012 sg_items[1].transfer_id = 0;
1013 sg_items[1].transfer_context = NULL;
1014 sg_items[1].vaddr = msdu->data;
1015 sg_items[1].paddr = skb_cb->paddr;
1016 sg_items[1].len = prefetch_len;
1018 res = ath10k_hif_tx_sg(htt->ar,
1019 htt->ar->htc.endpoint[htt->eid].ul_pipe_id,
1020 sg_items, ARRAY_SIZE(sg_items));
1022 goto err_unmap_msdu;
1027 dma_unmap_single(dev, skb_cb->paddr, msdu->len, DMA_TO_DEVICE);
1029 ath10k_htt_tx_free_msdu_id(htt, msdu_id);