+static void ath10k_mac_txq_init(struct ieee80211_txq *txq)
+{
+ struct ath10k_txq *artxq = (void *)txq->drv_priv;
+
+ if (!txq)
+ return;
+
+ INIT_LIST_HEAD(&artxq->list);
+}
+
+static void ath10k_mac_txq_unref(struct ath10k *ar, struct ieee80211_txq *txq)
+{
+ struct ath10k_txq *artxq = (void *)txq->drv_priv;
+ struct ath10k_skb_cb *cb;
+ struct sk_buff *msdu;
+ int msdu_id;
+
+ if (!txq)
+ return;
+
+ spin_lock_bh(&ar->txqs_lock);
+ if (!list_empty(&artxq->list))
+ list_del_init(&artxq->list);
+ spin_unlock_bh(&ar->txqs_lock);
+
+ spin_lock_bh(&ar->htt.tx_lock);
+ idr_for_each_entry(&ar->htt.pending_tx, msdu, msdu_id) {
+ cb = ATH10K_SKB_CB(msdu);
+ if (cb->txq == txq)
+ cb->txq = NULL;
+ }
+ spin_unlock_bh(&ar->htt.tx_lock);
+}
+
+struct ieee80211_txq *ath10k_mac_txq_lookup(struct ath10k *ar,
+ u16 peer_id,
+ u8 tid)
+{
+ struct ath10k_peer *peer;
+
+ lockdep_assert_held(&ar->data_lock);
+
+ peer = ar->peer_map[peer_id];
+ if (!peer)
+ return NULL;
+
+ if (peer->sta)
+ return peer->sta->txq[tid];
+ else if (peer->vif)
+ return peer->vif->txq;
+ else
+ return NULL;
+}
+
+static bool ath10k_mac_tx_can_push(struct ieee80211_hw *hw,
+ struct ieee80211_txq *txq)
+{
+ struct ath10k *ar = hw->priv;
+ struct ath10k_txq *artxq = (void *)txq->drv_priv;
+
+ /* No need to get locks */
+
+ if (ar->htt.tx_q_state.mode == HTT_TX_MODE_SWITCH_PUSH)
+ return true;
+
+ if (ar->htt.num_pending_tx < ar->htt.tx_q_state.num_push_allowed)
+ return true;
+
+ if (artxq->num_fw_queued < artxq->num_push_allowed)
+ return true;
+
+ return false;
+}
+
+int ath10k_mac_tx_push_txq(struct ieee80211_hw *hw,
+ struct ieee80211_txq *txq)
+{
+ struct ath10k *ar = hw->priv;
+ struct ath10k_htt *htt = &ar->htt;
+ struct ath10k_txq *artxq = (void *)txq->drv_priv;
+ struct ieee80211_vif *vif = txq->vif;
+ struct ieee80211_sta *sta = txq->sta;
+ enum ath10k_hw_txrx_mode txmode;
+ enum ath10k_mac_tx_path txpath;
+ struct sk_buff *skb;
+ size_t skb_len;
+ int ret;
+
+ spin_lock_bh(&ar->htt.tx_lock);
+ ret = ath10k_htt_tx_inc_pending(htt);
+ spin_unlock_bh(&ar->htt.tx_lock);
+
+ if (ret)
+ return ret;
+
+ skb = ieee80211_tx_dequeue(hw, txq);
+ if (!skb) {
+ spin_lock_bh(&ar->htt.tx_lock);
+ ath10k_htt_tx_dec_pending(htt);
+ spin_unlock_bh(&ar->htt.tx_lock);
+
+ return -ENOENT;
+ }
+
+ ath10k_mac_tx_h_fill_cb(ar, vif, txq, skb);
+
+ skb_len = skb->len;
+ txmode = ath10k_mac_tx_h_get_txmode(ar, vif, sta, skb);
+ txpath = ath10k_mac_tx_h_get_txpath(ar, skb, txmode);
+
+ ret = ath10k_mac_tx(ar, vif, sta, txmode, txpath, skb);
+ if (unlikely(ret)) {
+ ath10k_warn(ar, "failed to push frame: %d\n", ret);
+
+ spin_lock_bh(&ar->htt.tx_lock);
+ ath10k_htt_tx_dec_pending(htt);
+ spin_unlock_bh(&ar->htt.tx_lock);
+
+ return ret;
+ }
+
+ spin_lock_bh(&ar->htt.tx_lock);
+ artxq->num_fw_queued++;
+ spin_unlock_bh(&ar->htt.tx_lock);
+
+ return skb_len;
+}
+
+void ath10k_mac_tx_push_pending(struct ath10k *ar)
+{
+ struct ieee80211_hw *hw = ar->hw;
+ struct ieee80211_txq *txq;
+ struct ath10k_txq *artxq;
+ struct ath10k_txq *last;
+ int ret;
+ int max;
+
+ spin_lock_bh(&ar->txqs_lock);
+ rcu_read_lock();
+
+ last = list_last_entry(&ar->txqs, struct ath10k_txq, list);
+ while (!list_empty(&ar->txqs)) {
+ artxq = list_first_entry(&ar->txqs, struct ath10k_txq, list);
+ txq = container_of((void *)artxq, struct ieee80211_txq,
+ drv_priv);
+
+ /* Prevent aggressive sta/tid taking over tx queue */
+ max = 16;
+ ret = 0;
+ while (ath10k_mac_tx_can_push(hw, txq) && max--) {
+ ret = ath10k_mac_tx_push_txq(hw, txq);
+ if (ret < 0)
+ break;
+ }
+
+ list_del_init(&artxq->list);
+ if (ret != -ENOENT)
+ list_add_tail(&artxq->list, &ar->txqs);
+
+ ath10k_htt_tx_txq_update(hw, txq);
+
+ if (artxq == last || (ret < 0 && ret != -ENOENT))
+ break;
+ }
+
+ rcu_read_unlock();
+ spin_unlock_bh(&ar->txqs_lock);
+}
+