Merge git://git.kernel.org/pub/scm/linux/kernel/git/kvalo/wireless-drivers.git
authorKalle Valo <kvalo@codeaurora.org>
Sun, 10 Jul 2016 18:02:20 +0000 (21:02 +0300)
committerKalle Valo <kvalo@codeaurora.org>
Sun, 10 Jul 2016 18:07:29 +0000 (21:07 +0300)
This is to fix some conflicts in iwlwifi.

Conflicts:
drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
drivers/net/wireless/intel/iwlwifi/mvm/scan.c

1  2 
drivers/net/wireless/ath/ath10k/core.c
drivers/net/wireless/ath/ath10k/htt_rx.c
drivers/net/wireless/ath/ath10k/mac.c
drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c
drivers/net/wireless/intel/iwlwifi/mvm/sta.c
drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8192e.c

@@@ -18,7 -18,6 +18,7 @@@
  #include <linux/module.h>
  #include <linux/firmware.h>
  #include <linux/of.h>
 +#include <asm/byteorder.h>
  
  #include "core.h"
  #include "mac.h"
@@@ -56,7 -55,7 +56,7 @@@ static const struct ath10k_hw_params at
                .name = "qca988x hw2.0",
                .patch_load_addr = QCA988X_HW_2_0_PATCH_LOAD_ADDR,
                .uart_pin = 7,
 -              .has_shifted_cc_wraparound = true,
 +              .cc_wraparound_type = ATH10K_HW_CC_WRAP_SHIFTED_ALL,
                .otp_exe_param = 0,
                .channel_counters_freq_hz = 88000,
                .max_probe_resp_desc_thres = 0,
                        .board_ext_size = QCA988X_BOARD_EXT_DATA_SZ,
                },
        },
 +      {
 +              .id = QCA9887_HW_1_0_VERSION,
 +              .dev_id = QCA9887_1_0_DEVICE_ID,
 +              .name = "qca9887 hw1.0",
 +              .patch_load_addr = QCA9887_HW_1_0_PATCH_LOAD_ADDR,
 +              .uart_pin = 7,
 +              .cc_wraparound_type = ATH10K_HW_CC_WRAP_SHIFTED_ALL,
 +              .otp_exe_param = 0,
 +              .channel_counters_freq_hz = 88000,
 +              .max_probe_resp_desc_thres = 0,
 +              .hw_4addr_pad = ATH10K_HW_4ADDR_PAD_AFTER,
 +              .cal_data_len = 2116,
 +              .fw = {
 +                      .dir = QCA9887_HW_1_0_FW_DIR,
 +                      .board = QCA9887_HW_1_0_BOARD_DATA_FILE,
 +                      .board_size = QCA9887_BOARD_DATA_SZ,
 +                      .board_ext_size = QCA9887_BOARD_EXT_DATA_SZ,
 +              },
 +      },
        {
                .id = QCA6174_HW_2_1_VERSION,
                .dev_id = QCA6164_2_1_DEVICE_ID,
                .uart_pin = 7,
                .otp_exe_param = 0x00000700,
                .continuous_frag_desc = true,
 +              .cck_rate_map_rev2 = true,
                .channel_counters_freq_hz = 150000,
                .max_probe_resp_desc_thres = 24,
                .hw_4addr_pad = ATH10K_HW_4ADDR_PAD_BEFORE,
                        .board_ext_size = QCA99X0_BOARD_EXT_DATA_SZ,
                },
        },
 +      {
 +              .id = QCA9984_HW_1_0_DEV_VERSION,
 +              .dev_id = QCA9984_1_0_DEVICE_ID,
 +              .name = "qca9984/qca9994 hw1.0",
 +              .patch_load_addr = QCA9984_HW_1_0_PATCH_LOAD_ADDR,
 +              .uart_pin = 7,
 +              .otp_exe_param = 0x00000700,
 +              .continuous_frag_desc = true,
 +              .cck_rate_map_rev2 = true,
 +              .channel_counters_freq_hz = 150000,
 +              .max_probe_resp_desc_thres = 24,
 +              .hw_4addr_pad = ATH10K_HW_4ADDR_PAD_BEFORE,
 +              .tx_chain_mask = 0xf,
 +              .rx_chain_mask = 0xf,
 +              .max_spatial_stream = 4,
 +              .cal_data_len = 12064,
 +              .fw = {
 +                      .dir = QCA9984_HW_1_0_FW_DIR,
 +                      .board = QCA9984_HW_1_0_BOARD_DATA_FILE,
 +                      .board_size = QCA99X0_BOARD_DATA_SZ,
 +                      .board_ext_size = QCA99X0_BOARD_EXT_DATA_SZ,
 +              },
 +      },
        {
                .id = QCA9377_HW_1_0_DEV_VERSION,
                .dev_id = QCA9377_1_0_DEVICE_ID,
                .name = "qca4019 hw1.0",
                .patch_load_addr = QCA4019_HW_1_0_PATCH_LOAD_ADDR,
                .uart_pin = 7,
 -              .has_shifted_cc_wraparound = true,
 +              .cc_wraparound_type = ATH10K_HW_CC_WRAP_SHIFTED_EACH,
                .otp_exe_param = 0x0010000,
                .continuous_frag_desc = true,
 +              .cck_rate_map_rev2 = true,
                .channel_counters_freq_hz = 125000,
                .max_probe_resp_desc_thres = 24,
                .hw_4addr_pad = ATH10K_HW_4ADDR_PAD_BEFORE,
@@@ -281,7 -236,6 +281,7 @@@ static const char *const ath10k_core_fw
        [ATH10K_FW_FEATURE_SUPPORTS_ADAPTIVE_CCA] = "adaptive-cca",
        [ATH10K_FW_FEATURE_MFP_SUPPORT] = "mfp",
        [ATH10K_FW_FEATURE_PEER_FLOW_CONTROL] = "peer-flow-ctrl",
 +      [ATH10K_FW_FEATURE_BTCOEX_PARAM] = "btcoex-param",
  };
  
  static unsigned int ath10k_core_get_fw_feature_str(char *buf,
@@@ -577,35 -531,6 +577,35 @@@ out
        return ret;
  }
  
 +static int ath10k_download_cal_eeprom(struct ath10k *ar)
 +{
 +      size_t data_len;
 +      void *data = NULL;
 +      int ret;
 +
 +      ret = ath10k_hif_fetch_cal_eeprom(ar, &data, &data_len);
 +      if (ret) {
 +              if (ret != -EOPNOTSUPP)
 +                      ath10k_warn(ar, "failed to read calibration data from EEPROM: %d\n",
 +                                  ret);
 +              goto out_free;
 +      }
 +
 +      ret = ath10k_download_board_data(ar, data, data_len);
 +      if (ret) {
 +              ath10k_warn(ar, "failed to download calibration data from EEPROM: %d\n",
 +                          ret);
 +              goto out_free;
 +      }
 +
 +      ret = 0;
 +
 +out_free:
 +      kfree(data);
 +
 +      return ret;
 +}
 +
  static int ath10k_core_get_board_id_from_otp(struct ath10k *ar)
  {
        u32 result, address;
@@@ -1158,7 -1083,7 +1158,7 @@@ int ath10k_core_fetch_firmware_api_n(st
                        }
  
                        ath10k_dbg_dump(ar, ATH10K_DBG_BOOT, "features", "",
-                                       ar->running_fw->fw_file.fw_features,
+                                       fw_file->fw_features,
                                        sizeof(fw_file->fw_features));
                        break;
                case ATH10K_FW_IE_FW_IMAGE:
@@@ -1368,17 -1293,7 +1368,17 @@@ static int ath10k_download_cal_data(str
        }
  
        ath10k_dbg(ar, ATH10K_DBG_BOOT,
 -                 "boot did not find DT entry, try OTP next: %d\n",
 +                 "boot did not find DT entry, try target EEPROM next: %d\n",
 +                 ret);
 +
 +      ret = ath10k_download_cal_eeprom(ar);
 +      if (ret == 0) {
 +              ar->cal_mode = ATH10K_CAL_MODE_EEPROM;
 +              goto done;
 +      }
 +
 +      ath10k_dbg(ar, ATH10K_DBG_BOOT,
 +                 "boot did not find target EEPROM entry, try OTP next: %d\n",
                   ret);
  
        ret = ath10k_download_and_run_otp(ar);
@@@ -1818,16 -1733,6 +1818,16 @@@ int ath10k_core_start(struct ath10k *ar
                if (test_bit(WMI_SERVICE_BSS_CHANNEL_INFO_64, ar->wmi.svc_map))
                        val |= WMI_10_4_BSS_CHANNEL_INFO_64;
  
 +              /* 10.4 firmware supports BT-Coex without reloading firmware
 +               * via pdev param. To support Bluetooth coexistence pdev param,
 +               * WMI_COEX_GPIO_SUPPORT of extended resource config should be
 +               * enabled always.
 +               */
 +              if (test_bit(WMI_SERVICE_COEX_GPIO, ar->wmi.svc_map) &&
 +                  test_bit(ATH10K_FW_FEATURE_BTCOEX_PARAM,
 +                           ar->running_fw->fw_file.fw_features))
 +                      val |= WMI_10_4_COEX_GPIO_SUPPORT;
 +
                status = ath10k_mac_ext_resource_config(ar, val);
                if (status) {
                        ath10k_err(ar,
@@@ -2157,7 -2062,6 +2157,7 @@@ struct ath10k *ath10k_core_create(size_
  
        switch (hw_rev) {
        case ATH10K_HW_QCA988X:
 +      case ATH10K_HW_QCA9887:
                ar->regs = &qca988x_regs;
                ar->hw_values = &qca988x_values;
                break;
                ar->hw_values = &qca6174_values;
                break;
        case ATH10K_HW_QCA99X0:
 +      case ATH10K_HW_QCA9984:
                ar->regs = &qca99x0_regs;
                ar->hw_values = &qca99x0_values;
                break;
@@@ -2256,5 -2159,5 +2256,5 @@@ void ath10k_core_destroy(struct ath10k 
  EXPORT_SYMBOL(ath10k_core_destroy);
  
  MODULE_AUTHOR("Qualcomm Atheros");
 -MODULE_DESCRIPTION("Core module for QCA988X PCIe devices.");
 +MODULE_DESCRIPTION("Core module for Qualcomm Atheros 802.11ac wireless LAN cards.");
  MODULE_LICENSE("Dual BSD/GPL");
@@@ -748,7 -748,7 +748,7 @@@ ath10k_htt_rx_h_peer_channel(struct ath
        if (WARN_ON_ONCE(!arvif))
                return NULL;
  
 -      if (WARN_ON(ath10k_mac_vif_chan(arvif->vif, &def)))
 +      if (WARN_ON_ONCE(ath10k_mac_vif_chan(arvif->vif, &def)))
                return NULL;
  
        return def.chan;
@@@ -939,8 -939,7 +939,8 @@@ static void ath10k_process_rx(struct at
                   is_multicast_ether_addr(ieee80211_get_DA(hdr)) ?
                                                        "mcast" : "ucast",
                   (__le16_to_cpu(hdr->seq_ctrl) & IEEE80211_SCTL_SEQ) >> 4,
 -                 status->flag == 0 ? "legacy" : "",
 +                 (status->flag & (RX_FLAG_HT | RX_FLAG_VHT)) == 0 ?
 +                                                      "legacy" : "",
                   status->flag & RX_FLAG_HT ? "ht" : "",
                   status->flag & RX_FLAG_VHT ? "vht" : "",
                   status->flag & RX_FLAG_40MHZ ? "40" : "",
@@@ -1905,7 -1904,6 +1905,6 @@@ static void ath10k_htt_rx_in_ord_ind(st
                        return;
                }
        }
-       ath10k_htt_rx_msdu_buff_replenish(htt);
  }
  
  static void ath10k_htt_rx_tx_fetch_resp_id_confirm(struct ath10k *ar,
@@@ -2183,6 -2181,34 +2182,6 @@@ static void ath10k_htt_rx_tx_mode_switc
        ath10k_mac_tx_push_pending(ar);
  }
  
 -static inline enum nl80211_band phy_mode_to_band(u32 phy_mode)
 -{
 -      enum nl80211_band band;
 -
 -      switch (phy_mode) {
 -      case MODE_11A:
 -      case MODE_11NA_HT20:
 -      case MODE_11NA_HT40:
 -      case MODE_11AC_VHT20:
 -      case MODE_11AC_VHT40:
 -      case MODE_11AC_VHT80:
 -              band = NL80211_BAND_5GHZ;
 -              break;
 -      case MODE_11G:
 -      case MODE_11B:
 -      case MODE_11GONLY:
 -      case MODE_11NG_HT20:
 -      case MODE_11NG_HT40:
 -      case MODE_11AC_VHT20_2G:
 -      case MODE_11AC_VHT40_2G:
 -      case MODE_11AC_VHT80_2G:
 -      default:
 -              band = NL80211_BAND_2GHZ;
 -      }
 -
 -      return band;
 -}
 -
  void ath10k_htt_htc_t2h_msg_handler(struct ath10k *ar, struct sk_buff *skb)
  {
        bool release;
@@@ -2264,6 -2290,7 +2263,6 @@@ bool ath10k_htt_t2h_msg_handler(struct 
                        ath10k_htt_tx_mgmt_dec_pending(htt);
                        spin_unlock_bh(&htt->tx_lock);
                }
 -              ath10k_mac_tx_push_pending(ar);
                break;
        }
        case HTT_T2H_MSG_TYPE_TX_COMPL_IND:
@@@ -2414,6 -2441,8 +2413,6 @@@ static void ath10k_htt_txrx_compl_task(
                dev_kfree_skb_any(skb);
        }
  
 -      ath10k_mac_tx_push_pending(ar);
 -
        num_mpdus = atomic_read(&htt->num_mpdus_ready);
  
        while (num_mpdus) {
@@@ -62,32 -62,6 +62,32 @@@ static struct ieee80211_rate ath10k_rat
        { .bitrate = 540, .hw_value = ATH10K_HW_RATE_OFDM_54M },
  };
  
 +static struct ieee80211_rate ath10k_rates_rev2[] = {
 +      { .bitrate = 10,
 +        .hw_value = ATH10K_HW_RATE_REV2_CCK_LP_1M },
 +      { .bitrate = 20,
 +        .hw_value = ATH10K_HW_RATE_REV2_CCK_LP_2M,
 +        .hw_value_short = ATH10K_HW_RATE_REV2_CCK_SP_2M,
 +        .flags = IEEE80211_RATE_SHORT_PREAMBLE },
 +      { .bitrate = 55,
 +        .hw_value = ATH10K_HW_RATE_REV2_CCK_LP_5_5M,
 +        .hw_value_short = ATH10K_HW_RATE_REV2_CCK_SP_5_5M,
 +        .flags = IEEE80211_RATE_SHORT_PREAMBLE },
 +      { .bitrate = 110,
 +        .hw_value = ATH10K_HW_RATE_REV2_CCK_LP_11M,
 +        .hw_value_short = ATH10K_HW_RATE_REV2_CCK_SP_11M,
 +        .flags = IEEE80211_RATE_SHORT_PREAMBLE },
 +
 +      { .bitrate = 60, .hw_value = ATH10K_HW_RATE_OFDM_6M },
 +      { .bitrate = 90, .hw_value = ATH10K_HW_RATE_OFDM_9M },
 +      { .bitrate = 120, .hw_value = ATH10K_HW_RATE_OFDM_12M },
 +      { .bitrate = 180, .hw_value = ATH10K_HW_RATE_OFDM_18M },
 +      { .bitrate = 240, .hw_value = ATH10K_HW_RATE_OFDM_24M },
 +      { .bitrate = 360, .hw_value = ATH10K_HW_RATE_OFDM_36M },
 +      { .bitrate = 480, .hw_value = ATH10K_HW_RATE_OFDM_48M },
 +      { .bitrate = 540, .hw_value = ATH10K_HW_RATE_OFDM_54M },
 +};
 +
  #define ATH10K_MAC_FIRST_OFDM_RATE_IDX 4
  
  #define ath10k_a_rates (ath10k_rates + ATH10K_MAC_FIRST_OFDM_RATE_IDX)
@@@ -96,9 -70,6 +96,9 @@@
  #define ath10k_g_rates (ath10k_rates + 0)
  #define ath10k_g_rates_size (ARRAY_SIZE(ath10k_rates))
  
 +#define ath10k_g_rates_rev2 (ath10k_rates_rev2 + 0)
 +#define ath10k_g_rates_rev2_size (ARRAY_SIZE(ath10k_rates_rev2))
 +
  static bool ath10k_mac_bitrate_is_cck(int bitrate)
  {
        switch (bitrate) {
@@@ -708,10 -679,10 +708,10 @@@ static int ath10k_peer_create(struct at
  
        peer = ath10k_peer_find(ar, vdev_id, addr);
        if (!peer) {
+               spin_unlock_bh(&ar->data_lock);
                ath10k_warn(ar, "failed to find peer %pM on vdev %i after creation\n",
                            addr, vdev_id);
                ath10k_wmi_peer_delete(ar, vdev_id, addr);
-               spin_unlock_bh(&ar->data_lock);
                return -ENOENT;
        }
  
@@@ -3810,9 -3781,6 +3810,9 @@@ void ath10k_mac_tx_push_pending(struct 
        int ret;
        int max;
  
 +      if (ar->htt.num_pending_tx >= (ar->htt.max_num_pending_tx / 2))
 +              return;
 +
        spin_lock_bh(&ar->txqs_lock);
        rcu_read_lock();
  
@@@ -4083,7 -4051,9 +4083,7 @@@ static void ath10k_mac_op_wake_tx_queue
                list_add_tail(&artxq->list, &ar->txqs);
        spin_unlock_bh(&ar->txqs_lock);
  
 -      if (ath10k_mac_tx_can_push(hw, txq))
 -              tasklet_schedule(&ar->htt.txrx_compl_task);
 -
 +      ath10k_mac_tx_push_pending(ar);
        ath10k_htt_tx_txq_update(hw, txq);
  }
  
@@@ -4497,19 -4467,6 +4497,19 @@@ static int ath10k_start(struct ieee8021
                }
        }
  
 +      param = ar->wmi.pdev_param->enable_btcoex;
 +      if (test_bit(WMI_SERVICE_COEX_GPIO, ar->wmi.svc_map) &&
 +          test_bit(ATH10K_FW_FEATURE_BTCOEX_PARAM,
 +                   ar->running_fw->fw_file.fw_features)) {
 +              ret = ath10k_wmi_pdev_set_param(ar, param, 0);
 +              if (ret) {
 +                      ath10k_warn(ar,
 +                                  "failed to set btcoex param: %d\n", ret);
 +                      goto err_core_stop;
 +              }
 +              clear_bit(ATH10K_FLAG_BTCOEX, &ar->dev_flags);
 +      }
 +
        ar->num_started_vdevs = 0;
        ath10k_regd_update(ar);
  
@@@ -7738,14 -7695,8 +7738,14 @@@ int ath10k_mac_register(struct ath10k *
                band = &ar->mac.sbands[NL80211_BAND_2GHZ];
                band->n_channels = ARRAY_SIZE(ath10k_2ghz_channels);
                band->channels = channels;
 -              band->n_bitrates = ath10k_g_rates_size;
 -              band->bitrates = ath10k_g_rates;
 +
 +              if (ar->hw_params.cck_rate_map_rev2) {
 +                      band->n_bitrates = ath10k_g_rates_rev2_size;
 +                      band->bitrates = ath10k_g_rates_rev2;
 +              } else {
 +                      band->n_bitrates = ath10k_g_rates_size;
 +                      band->bitrates = ath10k_g_rates;
 +              }
  
                ar->hw->wiphy->bands[NL80211_BAND_2GHZ] = band;
        }
@@@ -465,20 -465,11 +465,20 @@@ int iwl_mvm_mac_setup_register(struct i
        hw->uapsd_queues = IWL_MVM_UAPSD_QUEUES;
        hw->uapsd_max_sp_len = IWL_UAPSD_MAX_SP;
  
 -      BUILD_BUG_ON(ARRAY_SIZE(mvm->ciphers) < ARRAY_SIZE(mvm_ciphers) + 2);
 +      BUILD_BUG_ON(ARRAY_SIZE(mvm->ciphers) < ARRAY_SIZE(mvm_ciphers) + 4);
        memcpy(mvm->ciphers, mvm_ciphers, sizeof(mvm_ciphers));
        hw->wiphy->n_cipher_suites = ARRAY_SIZE(mvm_ciphers);
        hw->wiphy->cipher_suites = mvm->ciphers;
  
 +      if (iwl_mvm_has_new_rx_api(mvm)) {
 +              mvm->ciphers[hw->wiphy->n_cipher_suites] =
 +                      WLAN_CIPHER_SUITE_GCMP;
 +              hw->wiphy->n_cipher_suites++;
 +              mvm->ciphers[hw->wiphy->n_cipher_suites] =
 +                      WLAN_CIPHER_SUITE_GCMP_256;
 +              hw->wiphy->n_cipher_suites++;
 +      }
 +
        /*
         * Enable 11w if advertised by firmware and software crypto
         * is not enabled (as the firmware will interpret some mgmt
  
        /* currently FW API supports only one optional cipher scheme */
        if (mvm->fw->cs[0].cipher) {
 +              const struct iwl_fw_cipher_scheme *fwcs = &mvm->fw->cs[0];
 +              struct ieee80211_cipher_scheme *cs = &mvm->cs[0];
 +
                mvm->hw->n_cipher_schemes = 1;
 -              mvm->hw->cipher_schemes = &mvm->fw->cs[0];
 -              mvm->ciphers[hw->wiphy->n_cipher_suites] =
 -                      mvm->fw->cs[0].cipher;
 +
 +              cs->cipher = le32_to_cpu(fwcs->cipher);
 +              cs->iftype = BIT(NL80211_IFTYPE_STATION);
 +              cs->hdr_len = fwcs->hdr_len;
 +              cs->pn_len = fwcs->pn_len;
 +              cs->pn_off = fwcs->pn_off;
 +              cs->key_idx_off = fwcs->key_idx_off;
 +              cs->key_idx_mask = fwcs->key_idx_mask;
 +              cs->key_idx_shift = fwcs->key_idx_shift;
 +              cs->mic_len = fwcs->mic_len;
 +
 +              mvm->hw->cipher_schemes = mvm->cs;
 +              mvm->ciphers[hw->wiphy->n_cipher_suites] = cs->cipher;
                hw->wiphy->n_cipher_suites++;
        }
  
@@@ -1033,7 -1011,11 +1033,7 @@@ static void iwl_mvm_restart_cleanup(str
        memset(mvm->sta_deferred_frames, 0, sizeof(mvm->sta_deferred_frames));
        memset(mvm->tfd_drained, 0, sizeof(mvm->tfd_drained));
        memset(&mvm->last_bt_notif, 0, sizeof(mvm->last_bt_notif));
 -      memset(&mvm->last_bt_notif_old, 0, sizeof(mvm->last_bt_notif_old));
        memset(&mvm->last_bt_ci_cmd, 0, sizeof(mvm->last_bt_ci_cmd));
 -      memset(&mvm->last_bt_ci_cmd_old, 0, sizeof(mvm->last_bt_ci_cmd_old));
 -      memset(&mvm->bt_ack_kill_msk, 0, sizeof(mvm->bt_ack_kill_msk));
 -      memset(&mvm->bt_cts_kill_msk, 0, sizeof(mvm->bt_cts_kill_msk));
  
        ieee80211_wake_queues(mvm->hw);
  
@@@ -1217,8 -1199,6 +1217,8 @@@ static void iwl_mvm_mac_stop(struct iee
        flush_work(&mvm->async_handlers_wk);
        flush_work(&mvm->add_stream_wk);
        cancel_delayed_work_sync(&mvm->fw_dump_wk);
 +      cancel_delayed_work_sync(&mvm->cs_tx_unblock_dwork);
 +      cancel_delayed_work_sync(&mvm->scan_timeout_dwork);
        iwl_mvm_free_fw_dump_desc(mvm);
  
        mutex_lock(&mvm->mutex);
@@@ -1250,20 -1230,18 +1250,20 @@@ static int iwl_mvm_set_tx_power(struct 
                                s16 tx_power)
  {
        struct iwl_dev_tx_power_cmd cmd = {
 -              .v2.set_mode = cpu_to_le32(IWL_TX_POWER_MODE_SET_MAC),
 -              .v2.mac_context_id =
 +              .v3.v2.set_mode = cpu_to_le32(IWL_TX_POWER_MODE_SET_MAC),
 +              .v3.v2.mac_context_id =
                        cpu_to_le32(iwl_mvm_vif_from_mac80211(vif)->id),
 -              .v2.pwr_restriction = cpu_to_le16(8 * tx_power),
 +              .v3.v2.pwr_restriction = cpu_to_le16(8 * tx_power),
        };
        int len = sizeof(cmd);
  
        if (tx_power == IWL_DEFAULT_MAX_TX_POWER)
 -              cmd.v2.pwr_restriction = cpu_to_le16(IWL_DEV_MAX_TX_POWER);
 +              cmd.v3.v2.pwr_restriction = cpu_to_le16(IWL_DEV_MAX_TX_POWER);
  
 +      if (!fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_TX_POWER_ACK))
 +              len = sizeof(cmd.v3);
        if (!fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_TX_POWER_CHAIN))
 -              len = sizeof(cmd.v2);
 +              len = sizeof(cmd.v3.v2);
  
        return iwl_mvm_send_cmd_pdu(mvm, REDUCE_TX_POWER_CMD, 0, len, &cmd);
  }
@@@ -2382,7 -2360,7 +2382,7 @@@ static void iwl_mvm_check_uapsd(struct 
        if (!(mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_UAPSD_SUPPORT))
                return;
  
 -      if (vif->p2p && !iwl_mvm_is_p2p_standalone_uapsd_supported(mvm)) {
 +      if (vif->p2p && !iwl_mvm_is_p2p_scm_uapsd_supported(mvm)) {
                vif->driver_flags &= ~IEEE80211_VIF_SUPPORTS_UAPSD;
                return;
        }
@@@ -2741,8 -2719,6 +2741,8 @@@ static int iwl_mvm_mac_set_key(struct i
                key->flags |= IEEE80211_KEY_FLAG_PUT_IV_SPACE;
                break;
        case WLAN_CIPHER_SUITE_CCMP:
 +      case WLAN_CIPHER_SUITE_GCMP:
 +      case WLAN_CIPHER_SUITE_GCMP_256:
                key->flags |= IEEE80211_KEY_FLAG_PUT_IV_SPACE;
                break;
        case WLAN_CIPHER_SUITE_AES_CMAC:
                    sta && iwl_mvm_has_new_rx_api(mvm) &&
                    key->flags & IEEE80211_KEY_FLAG_PAIRWISE &&
                    (key->cipher == WLAN_CIPHER_SUITE_CCMP ||
 -                   key->cipher == WLAN_CIPHER_SUITE_GCMP)) {
 +                   key->cipher == WLAN_CIPHER_SUITE_GCMP ||
 +                   key->cipher == WLAN_CIPHER_SUITE_GCMP_256)) {
                        struct ieee80211_key_seq seq;
                        int tid, q;
  
                if (sta && iwl_mvm_has_new_rx_api(mvm) &&
                    key->flags & IEEE80211_KEY_FLAG_PAIRWISE &&
                    (key->cipher == WLAN_CIPHER_SUITE_CCMP ||
 -                   key->cipher == WLAN_CIPHER_SUITE_GCMP)) {
 +                   key->cipher == WLAN_CIPHER_SUITE_GCMP ||
 +                   key->cipher == WLAN_CIPHER_SUITE_GCMP_256)) {
                        mvmsta = iwl_mvm_sta_from_mac80211(sta);
                        ptk_pn = rcu_dereference_protected(
                                                mvmsta->ptk_pn[keyidx],
@@@ -3713,13 -3687,6 +3713,13 @@@ static int iwl_mvm_pre_channel_switch(s
                        goto out_unlock;
                }
  
 +              /* we still didn't unblock tx. prevent new CS meanwhile */
 +              if (rcu_dereference_protected(mvm->csa_tx_blocked_vif,
 +                                            lockdep_is_held(&mvm->mutex))) {
 +                      ret = -EBUSY;
 +                      goto out_unlock;
 +              }
 +
                rcu_assign_pointer(mvm->csa_vif, vif);
  
                if (WARN_ONCE(mvmvif->csa_countdown,
                        goto out_unlock;
                }
  
 +              mvmvif->csa_target_freq = chsw->chandef.chan->center_freq;
 +
                break;
        case NL80211_IFTYPE_STATION:
                if (mvmvif->lqm_active)
@@@ -3886,8 -3851,8 +3886,8 @@@ static int iwl_mvm_mac_get_survey(struc
        if (idx != 0)
                return -ENOENT;
  
-       if (fw_has_capa(&mvm->fw->ucode_capa,
-                       IWL_UCODE_TLV_CAPA_RADIO_BEACON_STATS))
+       if (!fw_has_capa(&mvm->fw->ucode_capa,
+                        IWL_UCODE_TLV_CAPA_RADIO_BEACON_STATS))
                return -ENOENT;
  
        mutex_lock(&mvm->mutex);
@@@ -3933,13 -3898,8 +3933,13 @@@ static void iwl_mvm_mac_sta_statistics(
        struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
        struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
  
-       if (fw_has_capa(&mvm->fw->ucode_capa,
-                       IWL_UCODE_TLV_CAPA_RADIO_BEACON_STATS))
 +      if (mvmsta->avg_energy) {
 +              sinfo->signal_avg = mvmsta->avg_energy;
 +              sinfo->filled |= BIT(NL80211_STA_INFO_SIGNAL_AVG);
 +      }
 +
+       if (!fw_has_capa(&mvm->fw->ucode_capa,
+                        IWL_UCODE_TLV_CAPA_RADIO_BEACON_STATS))
                return;
  
        /* if beacon filtering isn't on mac80211 does it anyway */
  #include "fw-api.h"
  #include "fw-dbg.h"
  
 -void iwl_mvm_rx_phy_cmd_mq(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb)
 -{
 -      mvm->ampdu_ref++;
 -
 -#ifdef CONFIG_IWLWIFI_DEBUGFS
 -      if (mvm->last_phy_info.phy_flags & cpu_to_le16(RX_RES_PHY_FLAGS_AGG)) {
 -              spin_lock(&mvm->drv_stats_lock);
 -              mvm->drv_rx_stats.ampdu_count++;
 -              spin_unlock(&mvm->drv_stats_lock);
 -      }
 -#endif
 -}
 -
  static inline int iwl_mvm_check_pn(struct iwl_mvm *mvm, struct sk_buff *skb,
                                   int queue, struct ieee80211_sta *sta)
  {
@@@ -476,9 -489,6 +476,9 @@@ void iwl_mvm_reorder_timer_expired(unsi
                rcu_read_lock();
                sta = rcu_dereference(buf->mvm->fw_id_to_mac_id[buf->sta_id]);
                /* SN is set to the last expired frame + 1 */
 +              IWL_DEBUG_HT(buf->mvm,
 +                           "Releasing expired frames for sta %u, sn %d\n",
 +                           buf->sta_id, sn);
                iwl_mvm_release_frames(buf->mvm, sta, NULL, buf, sn);
                rcu_read_unlock();
        } else if (buf->num_stored) {
@@@ -571,14 -581,12 +571,14 @@@ static bool iwl_mvm_reorder(struct iwl_
                            struct iwl_rx_mpdu_desc *desc)
  {
        struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
-       struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta);
+       struct iwl_mvm_sta *mvm_sta;
        struct iwl_mvm_baid_data *baid_data;
        struct iwl_mvm_reorder_buffer *buffer;
        struct sk_buff *tail;
        u32 reorder = le32_to_cpu(desc->reorder_data);
        bool amsdu = desc->mac_flags2 & IWL_RX_MPDU_MFLG2_AMSDU;
 +      bool last_subframe =
 +              desc->amsdu_info & IWL_RX_MPDU_AMSDU_LAST_SUBFRAME;
        u8 tid = *ieee80211_get_qos_ctl(hdr) & IEEE80211_QOS_CTL_TID_MASK;
        u8 sub_frame_idx = desc->amsdu_info &
                           IWL_RX_MPDU_AMSDU_SUBFRAME_IDX_MASK;
        if (WARN_ON(IS_ERR_OR_NULL(sta)))
                return false;
  
+       mvm_sta = iwl_mvm_sta_from_mac80211(sta);
        /* not a data packet */
        if (!ieee80211_is_data_qos(hdr->frame_control) ||
            is_multicast_ether_addr(hdr->addr1))
        /* release immediately if allowed by nssn and no stored frames */
        if (!buffer->num_stored && ieee80211_sn_less(sn, nssn)) {
                if (iwl_mvm_is_sn_less(buffer->head_sn, nssn,
 -                                     buffer->buf_size))
 +                                     buffer->buf_size) &&
 +                 (!amsdu || last_subframe))
                        buffer->head_sn = nssn;
                /* No need to update AMSDU last SN - we are moving the head */
                spin_unlock_bh(&buffer->lock);
                buffer->last_sub_index = sub_frame_idx;
        }
  
 -      iwl_mvm_release_frames(mvm, sta, napi, buffer, nssn);
 +      /*
 +       * We cannot trust NSSN for AMSDU sub-frames that are not the last.
 +       * The reason is that NSSN advances on the first sub-frame, and may
 +       * cause the reorder buffer to advance before all the sub-frames arrive.
 +       * Example: reorder buffer contains SN 0 & 2, and we receive AMSDU with
 +       * SN 1. NSSN for first sub frame will be 3 with the result of driver
 +       * releasing SN 0,1, 2. When sub-frame 1 arrives - reorder buffer is
 +       * already ahead and it will be dropped.
 +       * If the last sub-frame is not on this queue - we will get frame
 +       * release notification with up to date NSSN.
 +       */
 +      if (!amsdu || last_subframe)
 +              iwl_mvm_release_frames(mvm, sta, napi, buffer, nssn);
 +
        spin_unlock_bh(&buffer->lock);
        return true;
  
@@@ -740,7 -736,6 +742,7 @@@ void iwl_mvm_rx_mpdu_mq(struct iwl_mvm 
        struct ieee80211_hdr *hdr = (void *)(pkt->data + sizeof(*desc));
        u32 len = le16_to_cpu(desc->mpdu_len);
        u32 rate_n_flags = le32_to_cpu(desc->rate_n_flags);
 +      u16 phy_info = le16_to_cpu(desc->phy_info);
        struct ieee80211_sta *sta = NULL;
        struct sk_buff *skb;
        u8 crypt_len = 0;
                             le16_to_cpu(desc->status));
                rx_status->flag |= RX_FLAG_FAILED_FCS_CRC;
        }
 -
 -      rx_status->mactime = le64_to_cpu(desc->tsf_on_air_rise);
 +      /* set the preamble flag if appropriate */
 +      if (phy_info & IWL_RX_MPDU_PHY_SHORT_PREAMBLE)
 +              rx_status->flag |= RX_FLAG_SHORTPRE;
 +
 +      if (likely(!(phy_info & IWL_RX_MPDU_PHY_TSF_OVERLOAD))) {
 +              rx_status->mactime = le64_to_cpu(desc->tsf_on_air_rise);
 +              /* TSF as indicated by the firmware is at INA time */
 +              rx_status->flag |= RX_FLAG_MACTIME_PLCP_START;
 +      }
        rx_status->device_timestamp = le32_to_cpu(desc->gp2_on_air_rise);
        rx_status->band = desc->channel > 14 ? NL80211_BAND_5GHZ :
                                               NL80211_BAND_2GHZ;
        rx_status->freq = ieee80211_channel_to_frequency(desc->channel,
                                                         rx_status->band);
        iwl_mvm_get_signal_strength(mvm, desc, rx_status);
 -      /* TSF as indicated by the firmware is at INA time */
 -      rx_status->flag |= RX_FLAG_MACTIME_PLCP_START;
 +
 +      /* update aggregation data for monitor sake on default queue */
 +      if (!queue && (phy_info & IWL_RX_MPDU_PHY_AMPDU)) {
 +              bool toggle_bit = phy_info & IWL_RX_MPDU_PHY_AMPDU_TOGGLE;
 +
 +              rx_status->flag |= RX_FLAG_AMPDU_DETAILS;
 +              rx_status->ampdu_reference = mvm->ampdu_ref;
 +              /* toggle is switched whenever new aggregation starts */
 +              if (toggle_bit != mvm->ampdu_toggle) {
 +                      mvm->ampdu_ref++;
 +                      mvm->ampdu_toggle = toggle_bit;
 +              }
 +      }
  
        rcu_read_lock();
  
  
        if (sta) {
                struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
 +              struct ieee80211_vif *tx_blocked_vif =
 +                      rcu_dereference(mvm->csa_tx_blocked_vif);
                u8 baid = (u8)((le32_to_cpu(desc->reorder_data) &
                               IWL_RX_MPDU_REORDER_BAID_MASK) >>
                               IWL_RX_MPDU_REORDER_BAID_SHIFT);
                 * frames from a blocked station on a new channel we can
                 * TX to it again.
                 */
 -              if (unlikely(mvm->csa_tx_block_bcn_timeout))
 -                      iwl_mvm_sta_modify_disable_tx_ap(mvm, sta, false);
 +              if (unlikely(tx_blocked_vif) &&
 +                  tx_blocked_vif == mvmsta->vif) {
 +                      struct iwl_mvm_vif *mvmvif =
 +                              iwl_mvm_vif_from_mac80211(tx_blocked_vif);
 +
 +                      if (mvmvif->csa_target_freq == rx_status->freq)
 +                              iwl_mvm_sta_modify_disable_tx_ap(mvm, sta,
 +                                                               false);
 +              }
  
                rs_update_last_rssi(mvm, &mvmsta->lq_sta, rx_status);
  
                                iwl_mvm_fw_dbg_collect_trig(mvm, trig, NULL);
                }
  
 -              /* TODO: multi queue TCM */
 -
                if (ieee80211_is_data(hdr->frame_control))
                        iwl_mvm_rx_csum(sta, skb, desc);
  
                        iwl_mvm_agg_rx_received(mvm, baid);
        }
  
 -      /*
 -       * TODO: PHY info.
 -       * Verify we don't have the information in the MPDU descriptor and
 -       * that it is not needed.
 -       * Make sure for monitor mode that we are on default queue, update
 -       * ampdu_ref and the rest of phy info then
 -       */
 -
        /* Set up the HT phy flags */
        switch (rate_n_flags & RATE_MCS_CHAN_WIDTH_MSK) {
        case RATE_MCS_CHAN_WIDTH_20:
                                                            rx_status->band);
        }
  
 -      /* TODO: PHY info - update ampdu queue statistics (for debugfs) */
 -      /* TODO: PHY info - gscan */
 +      /* management stuff on default queue */
 +      if (!queue) {
 +              if (unlikely((ieee80211_is_beacon(hdr->frame_control) ||
 +                            ieee80211_is_probe_resp(hdr->frame_control)) &&
 +                           mvm->sched_scan_pass_all ==
 +                           SCHED_SCAN_PASS_ALL_ENABLED))
 +                      mvm->sched_scan_pass_all = SCHED_SCAN_PASS_ALL_FOUND;
 +
 +              if (unlikely(ieee80211_is_beacon(hdr->frame_control) ||
 +                           ieee80211_is_probe_resp(hdr->frame_control)))
 +                      rx_status->boottime_ns = ktime_get_boot_ns();
 +      }
  
        iwl_mvm_create_skb(skb, hdr, len, crypt_len, rxb);
        if (!iwl_mvm_reorder(mvm, napi, queue, sta, skb, desc))
@@@ -959,9 -927,6 +961,9 @@@ void iwl_mvm_rx_frame_release(struct iw
  
        int baid = release->baid;
  
 +      IWL_DEBUG_HT(mvm, "Frame release notification for BAID %u, NSSN %d\n",
 +                   release->baid, le16_to_cpu(release->nssn));
 +
        if (WARN_ON_ONCE(baid == IWL_RX_REORDER_DATA_INVALID_BAID))
                return;
  
@@@ -310,304 -310,6 +310,304 @@@ static void iwl_mvm_tdls_sta_deinit(str
                iwl_mvm_disable_txq(mvm, i, i, IWL_MAX_TID_COUNT, 0);
  }
  
 +/* Disable aggregations for a bitmap of TIDs for a given station */
 +static int iwl_mvm_invalidate_sta_queue(struct iwl_mvm *mvm, int queue,
 +                                      unsigned long disable_agg_tids,
 +                                      bool remove_queue)
 +{
 +      struct iwl_mvm_add_sta_cmd cmd = {};
 +      struct ieee80211_sta *sta;
 +      struct iwl_mvm_sta *mvmsta;
 +      u32 status;
 +      u8 sta_id;
 +      int ret;
 +
 +      spin_lock_bh(&mvm->queue_info_lock);
 +      sta_id = mvm->queue_info[queue].ra_sta_id;
 +      spin_unlock_bh(&mvm->queue_info_lock);
 +
 +      rcu_read_lock();
 +
 +      sta = rcu_dereference(mvm->fw_id_to_mac_id[sta_id]);
 +
 +      if (WARN_ON_ONCE(IS_ERR_OR_NULL(sta))) {
 +              rcu_read_unlock();
 +              return -EINVAL;
 +      }
 +
 +      mvmsta = iwl_mvm_sta_from_mac80211(sta);
 +
 +      mvmsta->tid_disable_agg |= disable_agg_tids;
 +
 +      cmd.mac_id_n_color = cpu_to_le32(mvmsta->mac_id_n_color);
 +      cmd.sta_id = mvmsta->sta_id;
 +      cmd.add_modify = STA_MODE_MODIFY;
 +      cmd.modify_mask = STA_MODIFY_QUEUES;
 +      if (disable_agg_tids)
 +              cmd.modify_mask |= STA_MODIFY_TID_DISABLE_TX;
 +      if (remove_queue)
 +              cmd.modify_mask |= STA_MODIFY_QUEUE_REMOVAL;
 +      cmd.tfd_queue_msk = cpu_to_le32(mvmsta->tfd_queue_msk);
 +      cmd.tid_disable_tx = cpu_to_le16(mvmsta->tid_disable_agg);
 +
 +      rcu_read_unlock();
 +
 +      /* Notify FW of queue removal from the STA queues */
 +      status = ADD_STA_SUCCESS;
 +      ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA,
 +                                        iwl_mvm_add_sta_cmd_size(mvm),
 +                                        &cmd, &status);
 +
 +      return ret;
 +}
 +
 +static int iwl_mvm_get_queue_agg_tids(struct iwl_mvm *mvm, int queue)
 +{
 +      struct ieee80211_sta *sta;
 +      struct iwl_mvm_sta *mvmsta;
 +      unsigned long tid_bitmap;
 +      unsigned long agg_tids = 0;
 +      s8 sta_id;
 +      int tid;
 +
 +      lockdep_assert_held(&mvm->mutex);
 +
 +      spin_lock_bh(&mvm->queue_info_lock);
 +      sta_id = mvm->queue_info[queue].ra_sta_id;
 +      tid_bitmap = mvm->queue_info[queue].tid_bitmap;
 +      spin_unlock_bh(&mvm->queue_info_lock);
 +
 +      sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[sta_id],
 +                                      lockdep_is_held(&mvm->mutex));
 +
 +      if (WARN_ON_ONCE(IS_ERR_OR_NULL(sta)))
 +              return -EINVAL;
 +
 +      mvmsta = iwl_mvm_sta_from_mac80211(sta);
 +
 +      spin_lock_bh(&mvmsta->lock);
 +      for_each_set_bit(tid, &tid_bitmap, IWL_MAX_TID_COUNT + 1) {
 +              if (mvmsta->tid_data[tid].state == IWL_AGG_ON)
 +                      agg_tids |= BIT(tid);
 +      }
 +      spin_unlock_bh(&mvmsta->lock);
 +
 +      return agg_tids;
 +}
 +
 +/*
 + * Remove a queue from a station's resources.
 + * Note that this only marks as free. It DOESN'T delete a BA agreement, and
 + * doesn't disable the queue
 + */
 +static int iwl_mvm_remove_sta_queue_marking(struct iwl_mvm *mvm, int queue)
 +{
 +      struct ieee80211_sta *sta;
 +      struct iwl_mvm_sta *mvmsta;
 +      unsigned long tid_bitmap;
 +      unsigned long disable_agg_tids = 0;
 +      u8 sta_id;
 +      int tid;
 +
 +      lockdep_assert_held(&mvm->mutex);
 +
 +      spin_lock_bh(&mvm->queue_info_lock);
 +      sta_id = mvm->queue_info[queue].ra_sta_id;
 +      tid_bitmap = mvm->queue_info[queue].tid_bitmap;
 +      spin_unlock_bh(&mvm->queue_info_lock);
 +
 +      rcu_read_lock();
 +
 +      sta = rcu_dereference(mvm->fw_id_to_mac_id[sta_id]);
 +
 +      if (WARN_ON_ONCE(IS_ERR_OR_NULL(sta))) {
 +              rcu_read_unlock();
 +              return 0;
 +      }
 +
 +      mvmsta = iwl_mvm_sta_from_mac80211(sta);
 +
 +      spin_lock_bh(&mvmsta->lock);
 +      /* Unmap MAC queues and TIDs from this queue */
 +      for_each_set_bit(tid, &tid_bitmap, IWL_MAX_TID_COUNT + 1) {
 +              if (mvmsta->tid_data[tid].state == IWL_AGG_ON)
 +                      disable_agg_tids |= BIT(tid);
 +              mvmsta->tid_data[tid].txq_id = IEEE80211_INVAL_HW_QUEUE;
 +      }
 +
 +      mvmsta->tfd_queue_msk &= ~BIT(queue); /* Don't use this queue anymore */
 +      spin_unlock_bh(&mvmsta->lock);
 +
 +      rcu_read_unlock();
 +
 +      spin_lock_bh(&mvm->queue_info_lock);
 +      /* Unmap MAC queues and TIDs from this queue */
 +      mvm->queue_info[queue].hw_queue_to_mac80211 = 0;
 +      mvm->queue_info[queue].hw_queue_refcount = 0;
 +      mvm->queue_info[queue].tid_bitmap = 0;
 +      spin_unlock_bh(&mvm->queue_info_lock);
 +
 +      return disable_agg_tids;
 +}
 +
 +static int iwl_mvm_get_shared_queue(struct iwl_mvm *mvm,
 +                                  unsigned long tfd_queue_mask, u8 ac)
 +{
 +      int queue = 0;
 +      u8 ac_to_queue[IEEE80211_NUM_ACS];
 +      int i;
 +
 +      lockdep_assert_held(&mvm->queue_info_lock);
 +
 +      memset(&ac_to_queue, IEEE80211_INVAL_HW_QUEUE, sizeof(ac_to_queue));
 +
 +      /* See what ACs the existing queues for this STA have */
 +      for_each_set_bit(i, &tfd_queue_mask, IWL_MVM_DQA_MAX_DATA_QUEUE) {
 +              /* Only DATA queues can be shared */
 +              if (i < IWL_MVM_DQA_MIN_DATA_QUEUE &&
 +                  i != IWL_MVM_DQA_BSS_CLIENT_QUEUE)
 +                      continue;
 +
 +              ac_to_queue[mvm->queue_info[i].mac80211_ac] = i;
 +      }
 +
 +      /*
 +       * The queue to share is chosen only from DATA queues as follows (in
 +       * descending priority):
 +       * 1. An AC_BE queue
 +       * 2. Same AC queue
 +       * 3. Highest AC queue that is lower than new AC
 +       * 4. Any existing AC (there always is at least 1 DATA queue)
 +       */
 +
 +      /* Priority 1: An AC_BE queue */
 +      if (ac_to_queue[IEEE80211_AC_BE] != IEEE80211_INVAL_HW_QUEUE)
 +              queue = ac_to_queue[IEEE80211_AC_BE];
 +      /* Priority 2: Same AC queue */
 +      else if (ac_to_queue[ac] != IEEE80211_INVAL_HW_QUEUE)
 +              queue = ac_to_queue[ac];
 +      /* Priority 3a: If new AC is VO and VI exists - use VI */
 +      else if (ac == IEEE80211_AC_VO &&
 +               ac_to_queue[IEEE80211_AC_VI] != IEEE80211_INVAL_HW_QUEUE)
 +              queue = ac_to_queue[IEEE80211_AC_VI];
 +      /* Priority 3b: No BE so only AC less than the new one is BK */
 +      else if (ac_to_queue[IEEE80211_AC_BK] != IEEE80211_INVAL_HW_QUEUE)
 +              queue = ac_to_queue[IEEE80211_AC_BK];
 +      /* Priority 4a: No BE nor BK - use VI if exists */
 +      else if (ac_to_queue[IEEE80211_AC_VI] != IEEE80211_INVAL_HW_QUEUE)
 +              queue = ac_to_queue[IEEE80211_AC_VI];
 +      /* Priority 4b: No BE, BK nor VI - use VO if exists */
 +      else if (ac_to_queue[IEEE80211_AC_VO] != IEEE80211_INVAL_HW_QUEUE)
 +              queue = ac_to_queue[IEEE80211_AC_VO];
 +
 +      /* Make sure queue found (or not) is legal */
 +      if (!((queue >= IWL_MVM_DQA_MIN_MGMT_QUEUE &&
 +             queue <= IWL_MVM_DQA_MAX_MGMT_QUEUE) ||
 +            (queue >= IWL_MVM_DQA_MIN_DATA_QUEUE &&
 +             queue <= IWL_MVM_DQA_MAX_DATA_QUEUE) ||
 +            (queue == IWL_MVM_DQA_BSS_CLIENT_QUEUE))) {
 +              IWL_ERR(mvm, "No DATA queues available to share\n");
 +              queue = -ENOSPC;
 +      }
 +
 +      return queue;
 +}
 +
 +/*
 + * If a given queue has a higher AC than the TID stream that is being added to
 + * it, the queue needs to be redirected to the lower AC. This function does that
 + * in such a case, otherwise - if no redirection required - it does nothing,
 + * unless the %force param is true.
 + */
 +static int iwl_mvm_scd_queue_redirect(struct iwl_mvm *mvm, int queue, int tid,
 +                                    int ac, int ssn, unsigned int wdg_timeout,
 +                                    bool force)
 +{
 +      struct iwl_scd_txq_cfg_cmd cmd = {
 +              .scd_queue = queue,
 +              .enable = 0,
 +      };
 +      bool shared_queue;
 +      unsigned long mq;
 +      int ret;
 +
 +      /*
 +       * If the AC is lower than current one - FIFO needs to be redirected to
 +       * the lowest one of the streams in the queue. Check if this is needed
 +       * here.
 +       * Notice that the enum ieee80211_ac_numbers is "flipped", so BK is with
 +       * value 3 and VO with value 0, so to check if ac X is lower than ac Y
 +       * we need to check if the numerical value of X is LARGER than of Y.
 +       */
 +      spin_lock_bh(&mvm->queue_info_lock);
 +      if (ac <= mvm->queue_info[queue].mac80211_ac && !force) {
 +              spin_unlock_bh(&mvm->queue_info_lock);
 +
 +              IWL_DEBUG_TX_QUEUES(mvm,
 +                                  "No redirection needed on TXQ #%d\n",
 +                                  queue);
 +              return 0;
 +      }
 +
 +      cmd.sta_id = mvm->queue_info[queue].ra_sta_id;
 +      cmd.tx_fifo = iwl_mvm_ac_to_tx_fifo[mvm->queue_info[queue].mac80211_ac];
 +      mq = mvm->queue_info[queue].hw_queue_to_mac80211;
 +      shared_queue = (mvm->queue_info[queue].hw_queue_refcount > 1);
 +      spin_unlock_bh(&mvm->queue_info_lock);
 +
 +      IWL_DEBUG_TX_QUEUES(mvm, "Redirecting shared TXQ #%d to FIFO #%d\n",
 +                          queue, iwl_mvm_ac_to_tx_fifo[ac]);
 +
 +      /* Stop MAC queues and wait for this queue to empty */
 +      iwl_mvm_stop_mac_queues(mvm, mq);
 +      ret = iwl_trans_wait_tx_queue_empty(mvm->trans, BIT(queue));
 +      if (ret) {
 +              IWL_ERR(mvm, "Error draining queue %d before reconfig\n",
 +                      queue);
 +              ret = -EIO;
 +              goto out;
 +      }
 +
 +      /* Before redirecting the queue we need to de-activate it */
 +      iwl_trans_txq_disable(mvm->trans, queue, false);
 +      ret = iwl_mvm_send_cmd_pdu(mvm, SCD_QUEUE_CFG, 0, sizeof(cmd), &cmd);
 +      if (ret)
 +              IWL_ERR(mvm, "Failed SCD disable TXQ %d (ret=%d)\n", queue,
 +                      ret);
 +
 +      /* Make sure the SCD wrptr is correctly set before reconfiguring */
 +      iwl_trans_txq_enable(mvm->trans, queue, iwl_mvm_ac_to_tx_fifo[ac],
 +                           cmd.sta_id, tid, LINK_QUAL_AGG_FRAME_LIMIT_DEF,
 +                           ssn, wdg_timeout);
 +
 +      /* TODO: Work-around SCD bug when moving back by multiples of 0x40 */
 +
 +      /* Redirect to lower AC */
 +      iwl_mvm_reconfig_scd(mvm, queue, iwl_mvm_ac_to_tx_fifo[ac],
 +                           cmd.sta_id, tid, LINK_QUAL_AGG_FRAME_LIMIT_DEF,
 +                           ssn);
 +
 +      /* Update AC marking of the queue */
 +      spin_lock_bh(&mvm->queue_info_lock);
 +      mvm->queue_info[queue].mac80211_ac = ac;
 +      spin_unlock_bh(&mvm->queue_info_lock);
 +
 +      /*
 +       * Mark queue as shared in transport if shared
 +       * Note this has to be done after queue enablement because enablement
 +       * can also set this value, and there is no indication there to shared
 +       * queues
 +       */
 +      if (shared_queue)
 +              iwl_trans_txq_set_shared_mode(mvm->trans, queue, true);
 +
 +out:
 +      /* Continue using the MAC queues */
 +      iwl_mvm_start_mac_queues(mvm, mq);
 +
 +      return ret;
 +}
 +
  static int iwl_mvm_sta_alloc_queue(struct iwl_mvm *mvm,
                                   struct ieee80211_sta *sta, u8 ac, int tid,
                                   struct ieee80211_hdr *hdr)
                iwl_mvm_get_wd_timeout(mvm, mvmsta->vif, false, false);
        u8 mac_queue = mvmsta->vif->hw_queue[ac];
        int queue = -1;
 +      bool using_inactive_queue = false;
 +      unsigned long disable_agg_tids = 0;
 +      enum iwl_mvm_agg_state queue_state;
 +      bool shared_queue = false;
        int ssn;
 +      unsigned long tfd_queue_mask;
        int ret;
  
        lockdep_assert_held(&mvm->mutex);
  
 +      spin_lock_bh(&mvmsta->lock);
 +      tfd_queue_mask = mvmsta->tfd_queue_msk;
 +      spin_unlock_bh(&mvmsta->lock);
 +
        spin_lock_bh(&mvm->queue_info_lock);
  
        /*
         */
        if (!ieee80211_is_data_qos(hdr->frame_control) ||
            ieee80211_is_qos_nullfunc(hdr->frame_control)) {
 -              queue = iwl_mvm_find_free_queue(mvm, IWL_MVM_DQA_MIN_MGMT_QUEUE,
 +              queue = iwl_mvm_find_free_queue(mvm, mvmsta->sta_id,
 +                                              IWL_MVM_DQA_MIN_MGMT_QUEUE,
                                                IWL_MVM_DQA_MAX_MGMT_QUEUE);
                if (queue >= IWL_MVM_DQA_MIN_MGMT_QUEUE)
                        IWL_DEBUG_TX_QUEUES(mvm, "Found free MGMT queue #%d\n",
                /* If no such queue is found, we'll use a DATA queue instead */
        }
  
 -      if (queue < 0 && mvmsta->reserved_queue != IEEE80211_INVAL_HW_QUEUE) {
 +      if ((queue < 0 && mvmsta->reserved_queue != IEEE80211_INVAL_HW_QUEUE) &&
 +          (mvm->queue_info[mvmsta->reserved_queue].status ==
 +           IWL_MVM_QUEUE_RESERVED ||
 +           mvm->queue_info[mvmsta->reserved_queue].status ==
 +           IWL_MVM_QUEUE_INACTIVE)) {
                queue = mvmsta->reserved_queue;
 +              mvm->queue_info[queue].reserved = true;
                IWL_DEBUG_TX_QUEUES(mvm, "Using reserved queue #%d\n", queue);
        }
  
        if (queue < 0)
 -              queue = iwl_mvm_find_free_queue(mvm, IWL_MVM_DQA_MIN_DATA_QUEUE,
 +              queue = iwl_mvm_find_free_queue(mvm, mvmsta->sta_id,
 +                                              IWL_MVM_DQA_MIN_DATA_QUEUE,
                                                IWL_MVM_DQA_MAX_DATA_QUEUE);
  
 +      /*
 +       * Check if this queue is already allocated but inactive.
 +       * In such a case, we'll need to first free this queue before enabling
 +       * it again, so we'll mark it as reserved to make sure no new traffic
 +       * arrives on it
 +       */
 +      if (queue > 0 &&
 +          mvm->queue_info[queue].status == IWL_MVM_QUEUE_INACTIVE) {
 +              mvm->queue_info[queue].status = IWL_MVM_QUEUE_RESERVED;
 +              using_inactive_queue = true;
 +              IWL_DEBUG_TX_QUEUES(mvm,
 +                                  "Re-assigning TXQ %d: sta_id=%d, tid=%d\n",
 +                                  queue, mvmsta->sta_id, tid);
 +      }
 +
 +      /* No free queue - we'll have to share */
 +      if (queue <= 0) {
 +              queue = iwl_mvm_get_shared_queue(mvm, tfd_queue_mask, ac);
 +              if (queue > 0) {
 +                      shared_queue = true;
 +                      mvm->queue_info[queue].status = IWL_MVM_QUEUE_SHARED;
 +              }
 +      }
 +
        /*
         * Mark TXQ as ready, even though it hasn't been fully configured yet,
         * to make sure no one else takes it.
         * This will allow avoiding re-acquiring the lock at the end of the
         * configuration. On error we'll mark it back as free.
         */
 -      if (queue >= 0)
 +      if ((queue > 0) && !shared_queue)
                mvm->queue_info[queue].status = IWL_MVM_QUEUE_READY;
  
        spin_unlock_bh(&mvm->queue_info_lock);
  
 -      /* TODO: support shared queues for same RA */
 -      if (queue < 0)
 +      /* This shouldn't happen - out of queues */
 +      if (WARN_ON(queue <= 0)) {
 +              IWL_ERR(mvm, "No available queues for tid %d on sta_id %d\n",
 +                      tid, cfg.sta_id);
                return -ENOSPC;
 +      }
  
        /*
         * Actual en/disablement of aggregations is through the ADD_STA HCMD,
        cfg.aggregate = (queue >= IWL_MVM_DQA_MIN_DATA_QUEUE ||
                         queue == IWL_MVM_DQA_BSS_CLIENT_QUEUE);
  
 -      IWL_DEBUG_TX_QUEUES(mvm, "Allocating queue #%d to sta %d on tid %d\n",
 -                          queue, mvmsta->sta_id, tid);
 +      /*
 +       * If this queue was previously inactive (idle) - we need to free it
 +       * first
 +       */
 +      if (using_inactive_queue) {
 +              struct iwl_scd_txq_cfg_cmd cmd = {
 +                      .scd_queue = queue,
 +                      .enable = 0,
 +              };
 +              u8 ac;
 +
 +              disable_agg_tids = iwl_mvm_remove_sta_queue_marking(mvm, queue);
 +
 +              spin_lock_bh(&mvm->queue_info_lock);
 +              ac = mvm->queue_info[queue].mac80211_ac;
 +              cmd.sta_id = mvm->queue_info[queue].ra_sta_id;
 +              cmd.tx_fifo = iwl_mvm_ac_to_tx_fifo[ac];
 +              spin_unlock_bh(&mvm->queue_info_lock);
 +
 +              /* Disable the queue */
 +              iwl_mvm_invalidate_sta_queue(mvm, queue, disable_agg_tids,
 +                                           true);
 +              iwl_trans_txq_disable(mvm->trans, queue, false);
 +              ret = iwl_mvm_send_cmd_pdu(mvm, SCD_QUEUE_CFG, 0, sizeof(cmd),
 +                                         &cmd);
 +              if (ret) {
 +                      IWL_ERR(mvm,
 +                              "Failed to free inactive queue %d (ret=%d)\n",
 +                              queue, ret);
 +
 +                      /* Re-mark the inactive queue as inactive */
 +                      spin_lock_bh(&mvm->queue_info_lock);
 +                      mvm->queue_info[queue].status = IWL_MVM_QUEUE_INACTIVE;
 +                      spin_unlock_bh(&mvm->queue_info_lock);
 +
 +                      return ret;
 +              }
 +      }
 +
 +      IWL_DEBUG_TX_QUEUES(mvm,
 +                          "Allocating %squeue #%d to sta %d on tid %d\n",
 +                          shared_queue ? "shared " : "", queue,
 +                          mvmsta->sta_id, tid);
 +
 +      if (shared_queue) {
 +              /* Disable any open aggs on this queue */
 +              disable_agg_tids = iwl_mvm_get_queue_agg_tids(mvm, queue);
 +
 +              if (disable_agg_tids) {
 +                      IWL_DEBUG_TX_QUEUES(mvm, "Disabling aggs on queue %d\n",
 +                                          queue);
 +                      iwl_mvm_invalidate_sta_queue(mvm, queue,
 +                                                   disable_agg_tids, false);
 +              }
 +      }
  
        ssn = IEEE80211_SEQ_TO_SN(le16_to_cpu(hdr->seq_ctrl));
        iwl_mvm_enable_txq(mvm, queue, mac_queue, ssn, &cfg,
                           wdg_timeout);
  
 +      /*
 +       * Mark queue as shared in transport if shared
 +       * Note this has to be done after queue enablement because enablement
 +       * can also set this value, and there is no indication there to shared
 +       * queues
 +       */
 +      if (shared_queue)
 +              iwl_trans_txq_set_shared_mode(mvm->trans, queue, true);
 +
        spin_lock_bh(&mvmsta->lock);
        mvmsta->tid_data[tid].txq_id = queue;
 +      mvmsta->tid_data[tid].is_tid_active = true;
        mvmsta->tfd_queue_msk |= BIT(queue);
 +      queue_state = mvmsta->tid_data[tid].state;
  
        if (mvmsta->reserved_queue == queue)
                mvmsta->reserved_queue = IEEE80211_INVAL_HW_QUEUE;
        spin_unlock_bh(&mvmsta->lock);
  
 -      ret = iwl_mvm_sta_send_to_fw(mvm, sta, true, STA_MODIFY_QUEUES);
 -      if (ret)
 -              goto out_err;
 +      if (!shared_queue) {
 +              ret = iwl_mvm_sta_send_to_fw(mvm, sta, true, STA_MODIFY_QUEUES);
 +              if (ret)
 +                      goto out_err;
 +
 +              /* If we need to re-enable aggregations... */
 +              if (queue_state == IWL_AGG_ON) {
 +                      ret = iwl_mvm_sta_tx_agg(mvm, sta, tid, queue, true);
 +                      if (ret)
 +                              goto out_err;
 +              }
 +      } else {
 +              /* Redirect queue, if needed */
 +              ret = iwl_mvm_scd_queue_redirect(mvm, queue, tid, ac, ssn,
 +                                               wdg_timeout, false);
 +              if (ret)
 +                      goto out_err;
 +      }
  
        return 0;
  
@@@ -896,9 -476,6 +896,9 @@@ void iwl_mvm_add_new_dqa_stream_wk(stru
        unsigned long deferred_tid_traffic;
        int sta_id, tid;
  
 +      /* Check inactivity of queues */
 +      iwl_mvm_inactivity_check(mvm);
 +
        mutex_lock(&mvm->mutex);
  
        /* Go over all stations with deferred traffic */
@@@ -928,12 -505,6 +928,12 @@@ static int iwl_mvm_reserve_sta_stream(s
        struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
        int queue;
  
 +      /*
 +       * Check for inactive queues, so we don't reach a situation where we
 +       * can't add a STA due to a shortage in queues that doesn't really exist
 +       */
 +      iwl_mvm_inactivity_check(mvm);
 +
        spin_lock_bh(&mvm->queue_info_lock);
  
        /* Make sure we have free resources for this STA */
             IWL_MVM_QUEUE_FREE))
                queue = IWL_MVM_DQA_BSS_CLIENT_QUEUE;
        else
 -              queue = iwl_mvm_find_free_queue(mvm, IWL_MVM_DQA_MIN_DATA_QUEUE,
 +              queue = iwl_mvm_find_free_queue(mvm, mvmsta->sta_id,
 +                                              IWL_MVM_DQA_MIN_DATA_QUEUE,
                                                IWL_MVM_DQA_MAX_DATA_QUEUE);
        if (queue < 0) {
                spin_unlock_bh(&mvm->queue_info_lock);
@@@ -998,11 -568,8 +998,11 @@@ int iwl_mvm_add_sta(struct iwl_mvm *mvm
        mvm_sta->tid_disable_agg = 0xffff; /* No aggs at first */
        mvm_sta->tfd_queue_msk = 0;
  
 -      /* allocate new queues for a TDLS station */
 -      if (sta->tdls) {
 +      /*
 +       * Allocate new queues for a TDLS station, unless we're in DQA mode,
 +       * and then they'll be allocated dynamically
 +       */
 +      if (!iwl_mvm_is_dqa_supported(mvm) && sta->tdls) {
                ret = iwl_mvm_tdls_sta_init(mvm, sta);
                if (ret)
                        return ret;
        return 0;
  
  err:
 -      iwl_mvm_tdls_sta_deinit(mvm, sta);
 +      if (!iwl_mvm_is_dqa_supported(mvm) && sta->tdls)
 +              iwl_mvm_tdls_sta_deinit(mvm, sta);
        return ret;
  }
  
@@@ -1253,9 -819,8 +1253,9 @@@ int iwl_mvm_rm_sta(struct iwl_mvm *mvm
        if (iwl_mvm_has_new_rx_api(mvm))
                kfree(mvm_sta->dup_data);
  
 -      if (vif->type == NL80211_IFTYPE_STATION &&
 -          mvmvif->ap_sta_id == mvm_sta->sta_id) {
 +      if ((vif->type == NL80211_IFTYPE_STATION &&
 +           mvmvif->ap_sta_id == mvm_sta->sta_id) ||
 +          iwl_mvm_is_dqa_supported(mvm)){
                ret = iwl_mvm_drain_sta(mvm, mvm_sta, true);
                if (ret)
                        return ret;
                if (iwl_mvm_is_dqa_supported(mvm))
                        iwl_mvm_disable_sta_queues(mvm, vif, mvm_sta);
  
 -              /* if we are associated - we can't remove the AP STA now */
 -              if (vif->bss_conf.assoc)
 -                      return ret;
 +              if (vif->type == NL80211_IFTYPE_STATION &&
 +                  mvmvif->ap_sta_id == mvm_sta->sta_id) {
 +                      /* if associated - we can't remove the AP STA now */
 +                      if (vif->bss_conf.assoc)
 +                              return ret;
  
 -              /* unassoc - go ahead - remove the AP STA now */
 -              mvmvif->ap_sta_id = IWL_MVM_STATION_COUNT;
 +                      /* unassoc - go ahead - remove the AP STA now */
 +                      mvmvif->ap_sta_id = IWL_MVM_STATION_COUNT;
  
 -              /* clear d0i3_ap_sta_id if no longer relevant */
 -              if (mvm->d0i3_ap_sta_id == mvm_sta->sta_id)
 -                      mvm->d0i3_ap_sta_id = IWL_MVM_STATION_COUNT;
 +                      /* clear d0i3_ap_sta_id if no longer relevant */
 +                      if (mvm->d0i3_ap_sta_id == mvm_sta->sta_id)
 +                              mvm->d0i3_ap_sta_id = IWL_MVM_STATION_COUNT;
 +              }
        }
  
        /*
        } else {
                spin_unlock_bh(&mvm_sta->lock);
  
 -              if (sta->tdls)
 +              if (!iwl_mvm_is_dqa_supported(mvm) && sta->tdls)
                        iwl_mvm_tdls_sta_deinit(mvm, sta);
  
                ret = iwl_mvm_rm_sta_common(mvm, mvm_sta->sta_id);
@@@ -1421,9 -983,8 +1421,9 @@@ int iwl_mvm_add_aux_sta(struct iwl_mvm 
        lockdep_assert_held(&mvm->mutex);
  
        /* Map Aux queue to fifo - needs to happen before adding Aux station */
 -      iwl_mvm_enable_ac_txq(mvm, mvm->aux_queue, mvm->aux_queue,
 -                            IWL_MVM_TX_FIFO_MCAST, 0, wdg_timeout);
 +      if (!iwl_mvm_is_dqa_supported(mvm))
 +              iwl_mvm_enable_ac_txq(mvm, mvm->aux_queue, mvm->aux_queue,
 +                                    IWL_MVM_TX_FIFO_MCAST, 0, wdg_timeout);
  
        /* Allocate aux station and assign to it the aux queue */
        ret = iwl_mvm_allocate_int_sta(mvm, &mvm->aux_sta, BIT(mvm->aux_queue),
        if (ret)
                return ret;
  
 +      if (iwl_mvm_is_dqa_supported(mvm)) {
 +              struct iwl_trans_txq_scd_cfg cfg = {
 +                      .fifo = IWL_MVM_TX_FIFO_MCAST,
 +                      .sta_id = mvm->aux_sta.sta_id,
 +                      .tid = IWL_MAX_TID_COUNT,
 +                      .aggregate = false,
 +                      .frame_limit = IWL_FRAME_LIMIT,
 +              };
 +
 +              iwl_mvm_enable_txq(mvm, mvm->aux_queue, mvm->aux_queue, 0, &cfg,
 +                                 wdg_timeout);
 +      }
 +
        ret = iwl_mvm_add_int_sta_common(mvm, &mvm->aux_sta, NULL,
                                         MAC_INDEX_AUX, 0);
  
@@@ -1768,8 -1316,8 +1768,8 @@@ int iwl_mvm_sta_rx_agg(struct iwl_mvm *
  
        switch (status & IWL_ADD_STA_STATUS_MASK) {
        case ADD_STA_SUCCESS:
 -              IWL_DEBUG_INFO(mvm, "RX BA Session %sed in fw\n",
 -                             start ? "start" : "stopp");
 +              IWL_DEBUG_HT(mvm, "RX BA Session %sed in fw\n",
 +                           start ? "start" : "stopp");
                break;
        case ADD_STA_IMMEDIATE_BA_FAILURE:
                IWL_WARN(mvm, "RX BA Session refused by fw\n");
                 * supposed to happen) and we will free the session data while
                 * RX is being processed in parallel
                 */
 +              IWL_DEBUG_HT(mvm, "Sta %d(%d) is assigned to BAID %d\n",
 +                           mvm_sta->sta_id, tid, baid);
                WARN_ON(rcu_access_pointer(mvm->baid_map[baid]));
                rcu_assign_pointer(mvm->baid_map[baid], baid_data);
 -      } else if (mvm->rx_ba_sessions > 0) {
 +      } else  {
                u8 baid = mvm_sta->tid_to_baid[tid];
  
 -              /* check that restart flow didn't zero the counter */
 -              mvm->rx_ba_sessions--;
 +              if (mvm->rx_ba_sessions > 0)
 +                      /* check that restart flow didn't zero the counter */
 +                      mvm->rx_ba_sessions--;
                if (!iwl_mvm_has_new_rx_api(mvm))
                        return 0;
  
                del_timer_sync(&baid_data->session_timer);
                RCU_INIT_POINTER(mvm->baid_map[baid], NULL);
                kfree_rcu(baid_data, rcu_head);
 +              IWL_DEBUG_HT(mvm, "BAID %d is free\n", baid);
        }
        return 0;
  
@@@ -1858,8 -1402,8 +1858,8 @@@ out_free
        return ret;
  }
  
 -static int iwl_mvm_sta_tx_agg(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
 -                            int tid, u8 queue, bool start)
 +int iwl_mvm_sta_tx_agg(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
 +                     int tid, u8 queue, bool start)
  {
        struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta);
        struct iwl_mvm_add_sta_cmd cmd = {};
@@@ -1914,7 -1458,6 +1914,7 @@@ const u8 tid_to_mac80211_ac[] = 
        IEEE80211_AC_VI,
        IEEE80211_AC_VO,
        IEEE80211_AC_VO,
 +      IEEE80211_AC_VO, /* We treat MGMT as TID 8, which is set as AC_VO */
  };
  
  static const u8 tid_to_ucode_ac[] = {
@@@ -1969,8 -1512,7 +1969,8 @@@ int iwl_mvm_sta_tx_agg_start(struct iwl
        txq_id = mvmsta->tid_data[tid].txq_id;
        if (!iwl_mvm_is_dqa_supported(mvm) ||
            mvm->queue_info[txq_id].status != IWL_MVM_QUEUE_READY) {
 -              txq_id = iwl_mvm_find_free_queue(mvm, mvm->first_agg_queue,
 +              txq_id = iwl_mvm_find_free_queue(mvm, mvmsta->sta_id,
 +                                               mvm->first_agg_queue,
                                                 mvm->last_agg_queue);
                if (txq_id < 0) {
                        ret = txq_id;
@@@ -2310,12 -1852,18 +2310,18 @@@ static struct iwl_mvm_sta *iwl_mvm_get_
            mvmvif->ap_sta_id != IWL_MVM_STATION_COUNT) {
                u8 sta_id = mvmvif->ap_sta_id;
  
+               sta = rcu_dereference_check(mvm->fw_id_to_mac_id[sta_id],
+                                           lockdep_is_held(&mvm->mutex));
                /*
                 * It is possible that the 'sta' parameter is NULL,
                 * for example when a GTK is removed - the sta_id will then
                 * be the AP ID, and no station was passed by mac80211.
                 */
-               return iwl_mvm_sta_from_staid_protected(mvm, sta_id);
+               if (IS_ERR_OR_NULL(sta))
+                       return NULL;
+               return iwl_mvm_sta_from_mac80211(sta);
        }
  
        return NULL;
@@@ -2359,13 -1907,6 +2365,13 @@@ static int iwl_mvm_send_sta_key(struct 
                key_flags |= cpu_to_le16(STA_KEY_FLG_WEP);
                memcpy(cmd.key + 3, keyconf->key, keyconf->keylen);
                break;
 +      case WLAN_CIPHER_SUITE_GCMP_256:
 +              key_flags |= cpu_to_le16(STA_KEY_FLG_KEY_32BYTES);
 +              /* fall through */
 +      case WLAN_CIPHER_SUITE_GCMP:
 +              key_flags |= cpu_to_le16(STA_KEY_FLG_GCMP);
 +              memcpy(cmd.key, keyconf->key, keyconf->keylen);
 +              break;
        default:
                key_flags |= cpu_to_le16(STA_KEY_FLG_EXT);
                memcpy(cmd.key, keyconf->key, keyconf->keylen);
@@@ -2420,6 -1961,14 +2426,14 @@@ static int iwl_mvm_send_sta_igtk(struc
                struct ieee80211_key_seq seq;
                const u8 *pn;
  
+               switch (keyconf->cipher) {
+               case WLAN_CIPHER_SUITE_AES_CMAC:
+                       igtk_cmd.ctrl_flags |= cpu_to_le32(STA_KEY_FLG_CCM);
+                       break;
+               default:
+                       return -EINVAL;
+               }
                memcpy(igtk_cmd.IGTK, keyconf->key, keyconf->keylen);
                ieee80211_get_key_rx_seq(keyconf, 0, &seq);
                pn = seq.aes_cmac.pn;
@@@ -2486,8 -2035,6 +2500,8 @@@ static int __iwl_mvm_set_sta_key(struc
        case WLAN_CIPHER_SUITE_CCMP:
        case WLAN_CIPHER_SUITE_WEP40:
        case WLAN_CIPHER_SUITE_WEP104:
 +      case WLAN_CIPHER_SUITE_GCMP:
 +      case WLAN_CIPHER_SUITE_GCMP_256:
                ret = iwl_mvm_send_sta_key(mvm, mvm_sta, keyconf, mcast,
                                           0, NULL, 0, key_offset);
                break;
@@@ -622,8 -622,13 +622,8 @@@ static int rtl8192eu_parse_efuse(struc
                dev_info(&priv->udev->dev,
                         "%s: dumping efuse (0x%02zx bytes):\n",
                         __func__, sizeof(struct rtl8192eu_efuse));
 -              for (i = 0; i < sizeof(struct rtl8192eu_efuse); i += 8) {
 -                      dev_info(&priv->udev->dev, "%02x: "
 -                               "%02x %02x %02x %02x %02x %02x %02x %02x\n", i,
 -                               raw[i], raw[i + 1], raw[i + 2],
 -                               raw[i + 3], raw[i + 4], raw[i + 5],
 -                               raw[i + 6], raw[i + 7]);
 -              }
 +              for (i = 0; i < sizeof(struct rtl8192eu_efuse); i += 8)
 +                      dev_info(&priv->udev->dev, "%02x: %8ph\n", i, &raw[i]);
        }
        return 0;
  }
@@@ -1144,7 -1149,7 +1144,7 @@@ static void rtl8192eu_phy_iqcalibrate(s
  
                for (i = 0; i < retry; i++) {
                        path_b_ok = rtl8192eu_rx_iqk_path_b(priv);
-                       if (path_a_ok == 0x03) {
+                       if (path_b_ok == 0x03) {
                                val32 = rtl8xxxu_read32(priv,
                                                        REG_RX_POWER_BEFORE_IQK_B_2);
                                result[t][6] = (val32 >> 16) & 0x3ff;
@@@ -1244,9 -1249,11 +1244,9 @@@ static void rtl8192eu_phy_iq_calibrate(
                reg_e94 = result[i][0];
                reg_e9c = result[i][1];
                reg_ea4 = result[i][2];
 -              reg_eac = result[i][3];
                reg_eb4 = result[i][4];
                reg_ebc = result[i][5];
                reg_ec4 = result[i][6];
 -              reg_ecc = result[i][7];
        }
  
        if (candidate >= 0) {