Merge tag 'iwlwifi-next-for-kalle-2016-03-30' of https://git.kernel.org/pub/scm/linux...
authorKalle Valo <kvalo@codeaurora.org>
Wed, 6 Apr 2016 18:16:01 +0000 (21:16 +0300)
committerKalle Valo <kvalo@codeaurora.org>
Wed, 6 Apr 2016 18:16:01 +0000 (21:16 +0300)
* Support for Link Quality measurement (Aviya)
* Improvements in thermal (Chaya Rachel)
* Various cleanups (many people)
* Improvements in firmware error dump (Golan)
* More work 9000 devices and MSIx (Haim)
* Continuation of the Dynamic Queue Allocation work (Liad)
* Scan timeout to cope with buggy firmware (Luca)
* D0i3 improvements (Luca)
* Make the paging less memory hungry (Matti)
* 9000 new Rx path (Sara)

47 files changed:
drivers/net/wireless/intel/iwlwifi/Kconfig
drivers/net/wireless/intel/iwlwifi/dvm/main.c
drivers/net/wireless/intel/iwlwifi/iwl-1000.c
drivers/net/wireless/intel/iwlwifi/iwl-2000.c
drivers/net/wireless/intel/iwlwifi/iwl-5000.c
drivers/net/wireless/intel/iwlwifi/iwl-6000.c
drivers/net/wireless/intel/iwlwifi/iwl-7000.c
drivers/net/wireless/intel/iwlwifi/iwl-8000.c
drivers/net/wireless/intel/iwlwifi/iwl-9000.c
drivers/net/wireless/intel/iwlwifi/iwl-config.h
drivers/net/wireless/intel/iwlwifi/iwl-drv.c
drivers/net/wireless/intel/iwlwifi/iwl-fw-error-dump.h
drivers/net/wireless/intel/iwlwifi/iwl-fw-file.h
drivers/net/wireless/intel/iwlwifi/iwl-fw.h
drivers/net/wireless/intel/iwlwifi/iwl-modparams.h
drivers/net/wireless/intel/iwlwifi/iwl-prph.h
drivers/net/wireless/intel/iwlwifi/iwl-trans.h
drivers/net/wireless/intel/iwlwifi/mvm/Makefile
drivers/net/wireless/intel/iwlwifi/mvm/coex.c
drivers/net/wireless/intel/iwlwifi/mvm/coex_legacy.c [deleted file]
drivers/net/wireless/intel/iwlwifi/mvm/constants.h
drivers/net/wireless/intel/iwlwifi/mvm/d3.c
drivers/net/wireless/intel/iwlwifi/mvm/debugfs-vif.c
drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c
drivers/net/wireless/intel/iwlwifi/mvm/fw-api-rx.h
drivers/net/wireless/intel/iwlwifi/mvm/fw-api-tx.h
drivers/net/wireless/intel/iwlwifi/mvm/fw-api.h
drivers/net/wireless/intel/iwlwifi/mvm/fw-dbg.c
drivers/net/wireless/intel/iwlwifi/mvm/fw.c
drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c
drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
drivers/net/wireless/intel/iwlwifi/mvm/mvm.h
drivers/net/wireless/intel/iwlwifi/mvm/ops.c
drivers/net/wireless/intel/iwlwifi/mvm/power.c
drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c
drivers/net/wireless/intel/iwlwifi/mvm/scan.c
drivers/net/wireless/intel/iwlwifi/mvm/sf.c
drivers/net/wireless/intel/iwlwifi/mvm/sta.c
drivers/net/wireless/intel/iwlwifi/mvm/sta.h
drivers/net/wireless/intel/iwlwifi/mvm/tt.c
drivers/net/wireless/intel/iwlwifi/mvm/tx.c
drivers/net/wireless/intel/iwlwifi/mvm/utils.c
drivers/net/wireless/intel/iwlwifi/pcie/drv.c
drivers/net/wireless/intel/iwlwifi/pcie/internal.h
drivers/net/wireless/intel/iwlwifi/pcie/rx.c
drivers/net/wireless/intel/iwlwifi/pcie/trans.c
drivers/net/wireless/intel/iwlwifi/pcie/tx.c

index 16c4f38..492035f 100644 (file)
@@ -88,16 +88,6 @@ config IWLWIFI_BCAST_FILTERING
          If unsure, don't enable this option, as some programs might
          expect incoming broadcasts for their normal operations.
 
-config IWLWIFI_UAPSD
-       bool "enable U-APSD by default"
-       depends on IWLMVM
-       help
-         Say Y here to enable U-APSD by default. This may cause
-         interoperability problems with some APs, manifesting in lower than
-         expected throughput due to those APs not enabling aggregation
-
-         If unsure, say N.
-
 config IWLWIFI_PCIE_RTPM
        bool "Enable runtime power management mode for PCIe devices"
        depends on IWLMVM && PM
index 8562812..6147162 100644 (file)
@@ -1071,7 +1071,7 @@ static void iwl_bg_restart(struct work_struct *data)
 
 static void iwl_setup_deferred_work(struct iwl_priv *priv)
 {
-       priv->workqueue = create_singlethread_workqueue(DRV_NAME);
+       priv->workqueue = alloc_ordered_workqueue(DRV_NAME, 0);
 
        INIT_WORK(&priv->restart, iwl_bg_restart);
        INIT_WORK(&priv->beacon_update, iwl_bg_beacon_update);
index a90dbab..ef22c3d 100644 (file)
 #define IWL1000_UCODE_API_MAX 5
 #define IWL100_UCODE_API_MAX 5
 
-/* Oldest version we won't warn about */
-#define IWL1000_UCODE_API_OK 5
-#define IWL100_UCODE_API_OK 5
-
 /* Lowest firmware API version supported */
 #define IWL1000_UCODE_API_MIN 1
 #define IWL100_UCODE_API_MIN 5
@@ -86,7 +82,6 @@ static const struct iwl_eeprom_params iwl1000_eeprom_params = {
 #define IWL_DEVICE_1000                                                \
        .fw_name_pre = IWL1000_FW_PRE,                          \
        .ucode_api_max = IWL1000_UCODE_API_MAX,                 \
-       .ucode_api_ok = IWL1000_UCODE_API_OK,                   \
        .ucode_api_min = IWL1000_UCODE_API_MIN,                 \
        .device_family = IWL_DEVICE_FAMILY_1000,                \
        .max_inst_size = IWLAGN_RTC_INST_SIZE,                  \
@@ -112,7 +107,6 @@ const struct iwl_cfg iwl1000_bg_cfg = {
 #define IWL_DEVICE_100                                         \
        .fw_name_pre = IWL100_FW_PRE,                           \
        .ucode_api_max = IWL100_UCODE_API_MAX,                  \
-       .ucode_api_ok = IWL100_UCODE_API_OK,                    \
        .ucode_api_min = IWL100_UCODE_API_MIN,                  \
        .device_family = IWL_DEVICE_FAMILY_100,                 \
        .max_inst_size = IWLAGN_RTC_INST_SIZE,                  \
@@ -136,5 +130,5 @@ const struct iwl_cfg iwl100_bg_cfg = {
        IWL_DEVICE_100,
 };
 
-MODULE_FIRMWARE(IWL1000_MODULE_FIRMWARE(IWL1000_UCODE_API_OK));
-MODULE_FIRMWARE(IWL100_MODULE_FIRMWARE(IWL100_UCODE_API_OK));
+MODULE_FIRMWARE(IWL1000_MODULE_FIRMWARE(IWL1000_UCODE_API_MAX));
+MODULE_FIRMWARE(IWL100_MODULE_FIRMWARE(IWL100_UCODE_API_MAX));
index a6da959..dc246c9 100644 (file)
 #define IWL105_UCODE_API_MAX 6
 #define IWL135_UCODE_API_MAX 6
 
-/* Oldest version we won't warn about */
-#define IWL2030_UCODE_API_OK 6
-#define IWL2000_UCODE_API_OK 6
-#define IWL105_UCODE_API_OK 6
-#define IWL135_UCODE_API_OK 6
-
 /* Lowest firmware API version supported */
 #define IWL2030_UCODE_API_MIN 5
 #define IWL2000_UCODE_API_MIN 5
@@ -114,7 +108,6 @@ static const struct iwl_eeprom_params iwl20x0_eeprom_params = {
 #define IWL_DEVICE_2000                                                \
        .fw_name_pre = IWL2000_FW_PRE,                          \
        .ucode_api_max = IWL2000_UCODE_API_MAX,                 \
-       .ucode_api_ok = IWL2000_UCODE_API_OK,                   \
        .ucode_api_min = IWL2000_UCODE_API_MIN,                 \
        .device_family = IWL_DEVICE_FAMILY_2000,                \
        .max_inst_size = IWL60_RTC_INST_SIZE,                   \
@@ -142,7 +135,6 @@ const struct iwl_cfg iwl2000_2bgn_d_cfg = {
 #define IWL_DEVICE_2030                                                \
        .fw_name_pre = IWL2030_FW_PRE,                          \
        .ucode_api_max = IWL2030_UCODE_API_MAX,                 \
-       .ucode_api_ok = IWL2030_UCODE_API_OK,                   \
        .ucode_api_min = IWL2030_UCODE_API_MIN,                 \
        .device_family = IWL_DEVICE_FAMILY_2030,                \
        .max_inst_size = IWL60_RTC_INST_SIZE,                   \
@@ -163,7 +155,6 @@ const struct iwl_cfg iwl2030_2bgn_cfg = {
 #define IWL_DEVICE_105                                         \
        .fw_name_pre = IWL105_FW_PRE,                           \
        .ucode_api_max = IWL105_UCODE_API_MAX,                  \
-       .ucode_api_ok = IWL105_UCODE_API_OK,                    \
        .ucode_api_min = IWL105_UCODE_API_MIN,                  \
        .device_family = IWL_DEVICE_FAMILY_105,                 \
        .max_inst_size = IWL60_RTC_INST_SIZE,                   \
@@ -191,7 +182,6 @@ const struct iwl_cfg iwl105_bgn_d_cfg = {
 #define IWL_DEVICE_135                                         \
        .fw_name_pre = IWL135_FW_PRE,                           \
        .ucode_api_max = IWL135_UCODE_API_MAX,                  \
-       .ucode_api_ok = IWL135_UCODE_API_OK,                    \
        .ucode_api_min = IWL135_UCODE_API_MIN,                  \
        .device_family = IWL_DEVICE_FAMILY_135,                 \
        .max_inst_size = IWL60_RTC_INST_SIZE,                   \
@@ -210,7 +200,7 @@ const struct iwl_cfg iwl135_bgn_cfg = {
        .ht_params = &iwl2000_ht_params,
 };
 
-MODULE_FIRMWARE(IWL2000_MODULE_FIRMWARE(IWL2000_UCODE_API_OK));
-MODULE_FIRMWARE(IWL2030_MODULE_FIRMWARE(IWL2030_UCODE_API_OK));
-MODULE_FIRMWARE(IWL105_MODULE_FIRMWARE(IWL105_UCODE_API_OK));
-MODULE_FIRMWARE(IWL135_MODULE_FIRMWARE(IWL135_UCODE_API_OK));
+MODULE_FIRMWARE(IWL2000_MODULE_FIRMWARE(IWL2000_UCODE_API_MAX));
+MODULE_FIRMWARE(IWL2030_MODULE_FIRMWARE(IWL2030_UCODE_API_MAX));
+MODULE_FIRMWARE(IWL105_MODULE_FIRMWARE(IWL105_UCODE_API_MAX));
+MODULE_FIRMWARE(IWL135_MODULE_FIRMWARE(IWL135_UCODE_API_MAX));
index 8b5afde..4dcdab6 100644 (file)
 #define IWL5000_UCODE_API_MAX 5
 #define IWL5150_UCODE_API_MAX 2
 
-/* Oldest version we won't warn about */
-#define IWL5000_UCODE_API_OK 5
-#define IWL5150_UCODE_API_OK 2
-
 /* Lowest firmware API version supported */
 #define IWL5000_UCODE_API_MIN 1
 #define IWL5150_UCODE_API_MIN 1
@@ -84,7 +80,6 @@ static const struct iwl_eeprom_params iwl5000_eeprom_params = {
 #define IWL_DEVICE_5000                                                \
        .fw_name_pre = IWL5000_FW_PRE,                          \
        .ucode_api_max = IWL5000_UCODE_API_MAX,                 \
-       .ucode_api_ok = IWL5000_UCODE_API_OK,                   \
        .ucode_api_min = IWL5000_UCODE_API_MIN,                 \
        .device_family = IWL_DEVICE_FAMILY_5000,                \
        .max_inst_size = IWLAGN_RTC_INST_SIZE,                  \
@@ -132,7 +127,6 @@ const struct iwl_cfg iwl5350_agn_cfg = {
        .name = "Intel(R) WiMAX/WiFi Link 5350 AGN",
        .fw_name_pre = IWL5000_FW_PRE,
        .ucode_api_max = IWL5000_UCODE_API_MAX,
-       .ucode_api_ok = IWL5000_UCODE_API_OK,
        .ucode_api_min = IWL5000_UCODE_API_MIN,
        .device_family = IWL_DEVICE_FAMILY_5000,
        .max_inst_size = IWLAGN_RTC_INST_SIZE,
@@ -149,7 +143,6 @@ const struct iwl_cfg iwl5350_agn_cfg = {
 #define IWL_DEVICE_5150                                                \
        .fw_name_pre = IWL5150_FW_PRE,                          \
        .ucode_api_max = IWL5150_UCODE_API_MAX,                 \
-       .ucode_api_ok = IWL5150_UCODE_API_OK,                   \
        .ucode_api_min = IWL5150_UCODE_API_MIN,                 \
        .device_family = IWL_DEVICE_FAMILY_5150,                \
        .max_inst_size = IWLAGN_RTC_INST_SIZE,                  \
@@ -174,5 +167,5 @@ const struct iwl_cfg iwl5150_abg_cfg = {
        IWL_DEVICE_5150,
 };
 
-MODULE_FIRMWARE(IWL5000_MODULE_FIRMWARE(IWL5000_UCODE_API_OK));
-MODULE_FIRMWARE(IWL5150_MODULE_FIRMWARE(IWL5150_UCODE_API_OK));
+MODULE_FIRMWARE(IWL5000_MODULE_FIRMWARE(IWL5000_UCODE_API_MAX));
+MODULE_FIRMWARE(IWL5150_MODULE_FIRMWARE(IWL5150_UCODE_API_MAX));
index 0b4ba78..9938f53 100644 (file)
 #define IWL6000G2_UCODE_API_MAX 6
 #define IWL6035_UCODE_API_MAX 6
 
-/* Oldest version we won't warn about */
-#define IWL6000_UCODE_API_OK 4
-#define IWL6000G2_UCODE_API_OK 5
-#define IWL6050_UCODE_API_OK 5
-#define IWL6000G2B_UCODE_API_OK 6
-#define IWL6035_UCODE_API_OK 6
-
 /* Lowest firmware API version supported */
 #define IWL6000_UCODE_API_MIN 4
 #define IWL6050_UCODE_API_MIN 4
@@ -136,7 +129,6 @@ static const struct iwl_eeprom_params iwl6000_eeprom_params = {
 #define IWL_DEVICE_6005                                                \
        .fw_name_pre = IWL6005_FW_PRE,                          \
        .ucode_api_max = IWL6000G2_UCODE_API_MAX,               \
-       .ucode_api_ok = IWL6000G2_UCODE_API_OK,                 \
        .ucode_api_min = IWL6000G2_UCODE_API_MIN,               \
        .device_family = IWL_DEVICE_FAMILY_6005,                \
        .max_inst_size = IWL60_RTC_INST_SIZE,                   \
@@ -191,7 +183,6 @@ const struct iwl_cfg iwl6005_2agn_mow2_cfg = {
 #define IWL_DEVICE_6030                                                \
        .fw_name_pre = IWL6030_FW_PRE,                          \
        .ucode_api_max = IWL6000G2_UCODE_API_MAX,               \
-       .ucode_api_ok = IWL6000G2B_UCODE_API_OK,                \
        .ucode_api_min = IWL6000G2_UCODE_API_MIN,               \
        .device_family = IWL_DEVICE_FAMILY_6030,                \
        .max_inst_size = IWL60_RTC_INST_SIZE,                   \
@@ -228,7 +219,6 @@ const struct iwl_cfg iwl6030_2bg_cfg = {
 #define IWL_DEVICE_6035                                                \
        .fw_name_pre = IWL6030_FW_PRE,                          \
        .ucode_api_max = IWL6035_UCODE_API_MAX,                 \
-       .ucode_api_ok = IWL6035_UCODE_API_OK,                   \
        .ucode_api_min = IWL6035_UCODE_API_MIN,                 \
        .device_family = IWL_DEVICE_FAMILY_6030,                \
        .max_inst_size = IWL60_RTC_INST_SIZE,                   \
@@ -282,7 +272,6 @@ const struct iwl_cfg iwl130_bg_cfg = {
 #define IWL_DEVICE_6000i                                       \
        .fw_name_pre = IWL6000_FW_PRE,                          \
        .ucode_api_max = IWL6000_UCODE_API_MAX,                 \
-       .ucode_api_ok = IWL6000_UCODE_API_OK,                   \
        .ucode_api_min = IWL6000_UCODE_API_MIN,                 \
        .device_family = IWL_DEVICE_FAMILY_6000i,               \
        .max_inst_size = IWL60_RTC_INST_SIZE,                   \
@@ -370,7 +359,6 @@ const struct iwl_cfg iwl6000_3agn_cfg = {
        .name = "Intel(R) Centrino(R) Ultimate-N 6300 AGN",
        .fw_name_pre = IWL6000_FW_PRE,
        .ucode_api_max = IWL6000_UCODE_API_MAX,
-       .ucode_api_ok = IWL6000_UCODE_API_OK,
        .ucode_api_min = IWL6000_UCODE_API_MIN,
        .device_family = IWL_DEVICE_FAMILY_6000,
        .max_inst_size = IWL60_RTC_INST_SIZE,
@@ -383,7 +371,7 @@ const struct iwl_cfg iwl6000_3agn_cfg = {
        .led_mode = IWL_LED_BLINK,
 };
 
-MODULE_FIRMWARE(IWL6000_MODULE_FIRMWARE(IWL6000_UCODE_API_OK));
-MODULE_FIRMWARE(IWL6050_MODULE_FIRMWARE(IWL6050_UCODE_API_OK));
-MODULE_FIRMWARE(IWL6005_MODULE_FIRMWARE(IWL6000G2_UCODE_API_OK));
-MODULE_FIRMWARE(IWL6030_MODULE_FIRMWARE(IWL6000G2B_UCODE_API_OK));
+MODULE_FIRMWARE(IWL6000_MODULE_FIRMWARE(IWL6000_UCODE_API_MAX));
+MODULE_FIRMWARE(IWL6050_MODULE_FIRMWARE(IWL6050_UCODE_API_MAX));
+MODULE_FIRMWARE(IWL6005_MODULE_FIRMWARE(IWL6000G2_UCODE_API_MAX));
+MODULE_FIRMWARE(IWL6030_MODULE_FIRMWARE(IWL6000G2B_UCODE_API_MAX));
index fc475ce..b6283c8 100644 (file)
 #define IWL7265D_UCODE_API_MAX 21
 #define IWL3168_UCODE_API_MAX  21
 
-/* Oldest version we won't warn about */
-#define IWL7260_UCODE_API_OK   13
-#define IWL7265_UCODE_API_OK   13
-#define IWL7265D_UCODE_API_OK  13
-#define IWL3168_UCODE_API_OK   20
-
 /* Lowest firmware API version supported */
-#define IWL7260_UCODE_API_MIN  13
-#define IWL7265_UCODE_API_MIN  13
-#define IWL7265D_UCODE_API_MIN 13
+#define IWL7260_UCODE_API_MIN  16
+#define IWL7265_UCODE_API_MIN  16
+#define IWL7265D_UCODE_API_MIN 16
 #define IWL3168_UCODE_API_MIN  20
 
 /* NVM versions */
@@ -179,25 +173,21 @@ static const struct iwl_ht_params iwl7000_ht_params = {
 #define IWL_DEVICE_7000                                                \
        IWL_DEVICE_7000_COMMON,                                 \
        .ucode_api_max = IWL7260_UCODE_API_MAX,                 \
-       .ucode_api_ok = IWL7260_UCODE_API_OK,                   \
        .ucode_api_min = IWL7260_UCODE_API_MIN
 
 #define IWL_DEVICE_7005                                                \
        IWL_DEVICE_7000_COMMON,                                 \
        .ucode_api_max = IWL7265_UCODE_API_MAX,                 \
-       .ucode_api_ok = IWL7265_UCODE_API_OK,                   \
        .ucode_api_min = IWL7265_UCODE_API_MIN
 
 #define IWL_DEVICE_3008                                                \
        IWL_DEVICE_7000_COMMON,                                 \
        .ucode_api_max = IWL3168_UCODE_API_MAX,                 \
-       .ucode_api_ok = IWL3168_UCODE_API_OK,                   \
        .ucode_api_min = IWL3168_UCODE_API_MIN
 
 #define IWL_DEVICE_7005D                                       \
        IWL_DEVICE_7000_COMMON,                                 \
        .ucode_api_max = IWL7265D_UCODE_API_MAX,                \
-       .ucode_api_ok = IWL7265D_UCODE_API_OK,                  \
        .ucode_api_min = IWL7265D_UCODE_API_MIN
 
 const struct iwl_cfg iwl7260_2ac_cfg = {
@@ -388,8 +378,8 @@ const struct iwl_cfg iwl7265d_n_cfg = {
        .dccm_len = IWL7265_DCCM_LEN,
 };
 
-MODULE_FIRMWARE(IWL7260_MODULE_FIRMWARE(IWL7260_UCODE_API_OK));
-MODULE_FIRMWARE(IWL3160_MODULE_FIRMWARE(IWL7260_UCODE_API_OK));
-MODULE_FIRMWARE(IWL3168_MODULE_FIRMWARE(IWL3168_UCODE_API_OK));
-MODULE_FIRMWARE(IWL7265_MODULE_FIRMWARE(IWL7265_UCODE_API_OK));
-MODULE_FIRMWARE(IWL7265D_MODULE_FIRMWARE(IWL7265D_UCODE_API_OK));
+MODULE_FIRMWARE(IWL7260_MODULE_FIRMWARE(IWL7260_UCODE_API_MAX));
+MODULE_FIRMWARE(IWL3160_MODULE_FIRMWARE(IWL7260_UCODE_API_MAX));
+MODULE_FIRMWARE(IWL3168_MODULE_FIRMWARE(IWL3168_UCODE_API_MAX));
+MODULE_FIRMWARE(IWL7265_MODULE_FIRMWARE(IWL7265_UCODE_API_MAX));
+MODULE_FIRMWARE(IWL7265D_MODULE_FIRMWARE(IWL7265D_UCODE_API_MAX));
index 97be104..0728a28 100644 (file)
 #define IWL8000_UCODE_API_MAX  21
 #define IWL8265_UCODE_API_MAX  21
 
-/* Oldest version we won't warn about */
-#define IWL8000_UCODE_API_OK   13
-#define IWL8265_UCODE_API_OK   20
-
 /* Lowest firmware API version supported */
-#define IWL8000_UCODE_API_MIN  13
+#define IWL8000_UCODE_API_MIN  16
 #define IWL8265_UCODE_API_MIN  20
 
 /* NVM versions */
@@ -175,19 +171,16 @@ static const struct iwl_tt_params iwl8000_tt_params = {
 #define IWL_DEVICE_8000                                                        \
        IWL_DEVICE_8000_COMMON,                                         \
        .ucode_api_max = IWL8000_UCODE_API_MAX,                         \
-       .ucode_api_ok = IWL8000_UCODE_API_OK,                           \
        .ucode_api_min = IWL8000_UCODE_API_MIN                          \
 
 #define IWL_DEVICE_8260                                                        \
        IWL_DEVICE_8000_COMMON,                                         \
        .ucode_api_max = IWL8000_UCODE_API_MAX,                         \
-       .ucode_api_ok = IWL8000_UCODE_API_OK,                           \
        .ucode_api_min = IWL8000_UCODE_API_MIN                          \
 
 #define IWL_DEVICE_8265                                                        \
        IWL_DEVICE_8000_COMMON,                                         \
        .ucode_api_max = IWL8265_UCODE_API_MAX,                         \
-       .ucode_api_ok = IWL8265_UCODE_API_OK,                           \
        .ucode_api_min = IWL8265_UCODE_API_MIN                          \
 
 const struct iwl_cfg iwl8260_2n_cfg = {
@@ -259,5 +252,5 @@ const struct iwl_cfg iwl4165_2ac_sdio_cfg = {
        .max_vht_ampdu_exponent = MAX_VHT_AMPDU_EXPONENT_8260_SDIO,
 };
 
-MODULE_FIRMWARE(IWL8000_MODULE_FIRMWARE(IWL8000_UCODE_API_OK));
-MODULE_FIRMWARE(IWL8265_MODULE_FIRMWARE(IWL8265_UCODE_API_OK));
+MODULE_FIRMWARE(IWL8000_MODULE_FIRMWARE(IWL8000_UCODE_API_MAX));
+MODULE_FIRMWARE(IWL8265_MODULE_FIRMWARE(IWL8265_UCODE_API_MAX));
index 318b1dc..a3d35aa 100644 (file)
@@ -5,7 +5,7 @@
  *
  * GPL LICENSE SUMMARY
  *
- * Copyright(c) 2015 Intel Deutschland GmbH
+ * Copyright(c) 2015-2016 Intel Deutschland GmbH
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of version 2 of the GNU General Public License as
@@ -18,7 +18,7 @@
  *
  * BSD LICENSE
  *
- * Copyright(c) 2015 Intel Deutschland GmbH
+ * Copyright(c) 2015-2016 Intel Deutschland GmbH
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
 /* Highest firmware API version supported */
 #define IWL9000_UCODE_API_MAX  21
 
-/* Oldest version we won't warn about */
-#define IWL9000_UCODE_API_OK   13
-
 /* Lowest firmware API version supported */
-#define IWL9000_UCODE_API_MIN  13
+#define IWL9000_UCODE_API_MIN  16
 
 /* NVM versions */
 #define IWL9000_NVM_VERSION            0x0a1d
@@ -122,7 +119,6 @@ static const struct iwl_tt_params iwl9000_tt_params = {
 
 #define IWL_DEVICE_9000                                                        \
        .ucode_api_max = IWL9000_UCODE_API_MAX,                         \
-       .ucode_api_ok = IWL9000_UCODE_API_OK,                           \
        .ucode_api_min = IWL9000_UCODE_API_MIN,                         \
        .device_family = IWL_DEVICE_FAMILY_8000,                        \
        .max_inst_size = IWL60_RTC_INST_SIZE,                           \
@@ -137,14 +133,15 @@ static const struct iwl_tt_params iwl9000_tt_params = {
        .dccm2_len = IWL9000_DCCM2_LEN,                                 \
        .smem_offset = IWL9000_SMEM_OFFSET,                             \
        .smem_len = IWL9000_SMEM_LEN,                                   \
+       .features = IWL_TX_CSUM_NETIF_FLAGS | NETIF_F_RXCSUM,           \
        .thermal_params = &iwl9000_tt_params,                           \
        .apmg_not_supported = true,                                     \
        .mq_rx_supported = true,                                        \
        .vht_mu_mimo_supported = true,                                  \
        .mac_addr_from_csr = true
 
-const struct iwl_cfg iwl9260_2ac_cfg = {
-               .name = "Intel(R) Dual Band Wireless AC 9260",
+const struct iwl_cfg iwl9560_2ac_cfg = {
+               .name = "Intel(R) Dual Band Wireless AC 9560",
                .fw_name_pre = IWL9000_FW_PRE,
                IWL_DEVICE_9000,
                .ht_params = &iwl9000_ht_params,
@@ -163,4 +160,4 @@ const struct iwl_cfg iwl5165_2ac_cfg = {
                .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K,
 };
 
-MODULE_FIRMWARE(IWL9000_MODULE_FIRMWARE(IWL9000_UCODE_API_OK));
+MODULE_FIRMWARE(IWL9000_MODULE_FIRMWARE(IWL9000_UCODE_API_MAX));
index 3e4d346..08bb4f4 100644 (file)
@@ -131,6 +131,8 @@ enum iwl_led_mode {
 #define IWL_MAX_WD_TIMEOUT     120000
 
 #define IWL_DEFAULT_MAX_TX_POWER 22
+#define IWL_TX_CSUM_NETIF_FLAGS (NETIF_F_IPV6_CSUM | NETIF_F_IP_CSUM |\
+                                NETIF_F_TSO | NETIF_F_TSO6)
 
 /* Antenna presence definitions */
 #define        ANT_NONE        0x0
@@ -277,8 +279,6 @@ struct iwl_pwr_tx_backoff {
  *     (.ucode) will be added to filename before loading from disk. The
  *     filename is constructed as fw_name_pre<api>.ucode.
  * @ucode_api_max: Highest version of uCode API supported by driver.
- * @ucode_api_ok: oldest version of the uCode API that is OK to load
- *     without a warning, for use in transitions
  * @ucode_api_min: Lowest version of uCode API supported by driver.
  * @max_inst_size: The maximal length of the fw inst section
  * @max_data_size: The maximal length of the fw data section
@@ -324,7 +324,6 @@ struct iwl_cfg {
        const char *name;
        const char *fw_name_pre;
        const unsigned int ucode_api_max;
-       const unsigned int ucode_api_ok;
        const unsigned int ucode_api_min;
        const enum iwl_device_family device_family;
        const u32 max_data_size;
@@ -439,7 +438,7 @@ extern const struct iwl_cfg iwl8265_2ac_cfg;
 extern const struct iwl_cfg iwl4165_2ac_cfg;
 extern const struct iwl_cfg iwl8260_2ac_sdio_cfg;
 extern const struct iwl_cfg iwl4165_2ac_sdio_cfg;
-extern const struct iwl_cfg iwl9260_2ac_cfg;
+extern const struct iwl_cfg iwl9560_2ac_cfg;
 extern const struct iwl_cfg iwl5165_2ac_cfg;
 #endif /* CONFIG_IWLMVM */
 
index f899666..48e8737 100644 (file)
@@ -179,6 +179,8 @@ static void iwl_dealloc_ucode(struct iwl_drv *drv)
                kfree(drv->fw.dbg_conf_tlv[i]);
        for (i = 0; i < ARRAY_SIZE(drv->fw.dbg_trigger_tlv); i++)
                kfree(drv->fw.dbg_trigger_tlv[i]);
+       for (i = 0; i < ARRAY_SIZE(drv->fw.dbg_mem_tlv); i++)
+               kfree(drv->fw.dbg_mem_tlv[i]);
 
        for (i = 0; i < IWL_UCODE_TYPE_MAX; i++)
                iwl_free_fw_img(drv, drv->fw.img + i);
@@ -297,6 +299,7 @@ struct iwl_firmware_pieces {
        size_t dbg_conf_tlv_len[FW_DBG_CONF_MAX];
        struct iwl_fw_dbg_trigger_tlv *dbg_trigger_tlv[FW_DBG_TRIGGER_MAX];
        size_t dbg_trigger_tlv_len[FW_DBG_TRIGGER_MAX];
+       struct iwl_fw_dbg_mem_seg_tlv *dbg_mem_tlv[FW_DBG_MEM_MAX];
 };
 
 /*
@@ -1041,6 +1044,37 @@ static int iwl_parse_tlv_firmware(struct iwl_drv *drv,
                        iwl_store_gscan_capa(&drv->fw, tlv_data, tlv_len);
                        gscan_capa = true;
                        break;
+               case IWL_UCODE_TLV_FW_MEM_SEG: {
+                       struct iwl_fw_dbg_mem_seg_tlv *dbg_mem =
+                               (void *)tlv_data;
+                       u32 type;
+
+                       if (tlv_len != (sizeof(*dbg_mem)))
+                               goto invalid_tlv_len;
+
+                       type = le32_to_cpu(dbg_mem->data_type);
+                       drv->fw.dbg_dynamic_mem = true;
+
+                       if (type >= ARRAY_SIZE(drv->fw.dbg_mem_tlv)) {
+                               IWL_ERR(drv,
+                                       "Skip unknown dbg mem segment: %u\n",
+                                       dbg_mem->data_type);
+                               break;
+                       }
+
+                       if (pieces->dbg_mem_tlv[type]) {
+                               IWL_ERR(drv,
+                                       "Ignore duplicate mem segment: %u\n",
+                                       dbg_mem->data_type);
+                               break;
+                       }
+
+                       IWL_DEBUG_INFO(drv, "Found debug memory segment: %u\n",
+                                      dbg_mem->data_type);
+
+                       pieces->dbg_mem_tlv[type] = dbg_mem;
+                       break;
+                       }
                default:
                        IWL_DEBUG_INFO(drv, "unknown TLV: %d\n", tlv_type);
                        break;
@@ -1060,11 +1094,18 @@ static int iwl_parse_tlv_firmware(struct iwl_drv *drv,
                return -EINVAL;
        }
 
-       if (WARN(fw_has_capa(capa, IWL_UCODE_TLV_CAPA_GSCAN_SUPPORT) &&
-                !gscan_capa,
-                "GSCAN is supported but capabilities TLV is unavailable\n"))
+       /*
+        * If ucode advertises that it supports GSCAN but GSCAN
+        * capabilities TLV is not present, or if it has an old format,
+        * warn and continue without GSCAN.
+        */
+       if (fw_has_capa(capa, IWL_UCODE_TLV_CAPA_GSCAN_SUPPORT) &&
+           !gscan_capa) {
+               IWL_DEBUG_INFO(drv,
+                              "GSCAN is supported but capabilities TLV is unavailable\n");
                __clear_bit((__force long)IWL_UCODE_TLV_CAPA_GSCAN_SUPPORT,
                            capa->_capa);
+       }
 
        return 0;
 
@@ -1199,7 +1240,6 @@ static void iwl_req_fw_callback(const struct firmware *ucode_raw, void *context)
        int err;
        struct iwl_firmware_pieces *pieces;
        const unsigned int api_max = drv->cfg->ucode_api_max;
-       unsigned int api_ok = drv->cfg->ucode_api_ok;
        const unsigned int api_min = drv->cfg->ucode_api_min;
        size_t trigger_tlv_sz[FW_DBG_TRIGGER_MAX];
        u32 api_ver;
@@ -1212,20 +1252,12 @@ static void iwl_req_fw_callback(const struct firmware *ucode_raw, void *context)
                        IWL_DEFAULT_STANDARD_PHY_CALIBRATE_TBL_SIZE;
        fw->ucode_capa.n_scan_channels = IWL_DEFAULT_SCAN_CHANNELS;
 
-       if (!api_ok)
-               api_ok = api_max;
-
        pieces = kzalloc(sizeof(*pieces), GFP_KERNEL);
        if (!pieces)
                return;
 
-       if (!ucode_raw) {
-               if (drv->fw_index <= api_ok)
-                       IWL_ERR(drv,
-                               "request for firmware file '%s' failed.\n",
-                               drv->firmware_name);
+       if (!ucode_raw)
                goto try_again;
-       }
 
        IWL_DEBUG_INFO(drv, "Loaded firmware file '%s' (%zd bytes).\n",
                       drv->firmware_name, ucode_raw->size);
@@ -1248,10 +1280,7 @@ static void iwl_req_fw_callback(const struct firmware *ucode_raw, void *context)
        if (err)
                goto try_again;
 
-       if (fw_has_api(&drv->fw.ucode_capa, IWL_UCODE_TLV_API_NEW_VERSION))
-               api_ver = drv->fw.ucode_ver;
-       else
-               api_ver = IWL_UCODE_API(drv->fw.ucode_ver);
+       api_ver = drv->fw.ucode_ver;
 
        /*
         * api_ver should match the api version forming part of the
@@ -1267,19 +1296,6 @@ static void iwl_req_fw_callback(const struct firmware *ucode_raw, void *context)
                                api_max, api_ver);
                        goto try_again;
                }
-
-               if (api_ver < api_ok) {
-                       if (api_ok != api_max)
-                               IWL_ERR(drv, "Firmware has old API version, "
-                                       "expected v%u through v%u, got v%u.\n",
-                                       api_ok, api_max, api_ver);
-                       else
-                               IWL_ERR(drv, "Firmware has old API version, "
-                                       "expected v%u, got v%u.\n",
-                                       api_max, api_ver);
-                       IWL_ERR(drv, "New firmware can be obtained from "
-                                     "http://www.intellinuxwireless.org/.\n");
-               }
        }
 
        /*
@@ -1368,6 +1384,17 @@ static void iwl_req_fw_callback(const struct firmware *ucode_raw, void *context)
                }
        }
 
+       for (i = 0; i < ARRAY_SIZE(drv->fw.dbg_mem_tlv); i++) {
+               if (pieces->dbg_mem_tlv[i]) {
+                       drv->fw.dbg_mem_tlv[i] =
+                               kmemdup(pieces->dbg_mem_tlv[i],
+                                       sizeof(*drv->fw.dbg_mem_tlv[i]),
+                                       GFP_KERNEL);
+                       if (!drv->fw.dbg_mem_tlv[i])
+                               goto out_free_fw;
+               }
+       }
+
        /* Now that we can no longer fail, copy information */
 
        /*
@@ -1560,9 +1587,7 @@ struct iwl_mod_params iwlwifi_mod_params = {
        .power_level = IWL_POWER_INDEX_1,
        .d0i3_disable = true,
        .d0i3_entry_delay = 1000,
-#ifndef CONFIG_IWLWIFI_UAPSD
-       .uapsd_disable = true,
-#endif /* CONFIG_IWLWIFI_UAPSD */
+       .uapsd_disable = IWL_DISABLE_UAPSD_BSS | IWL_DISABLE_UAPSD_P2P_CLIENT,
        /* the rest are 0 by default */
 };
 IWL_EXPORT_SYMBOL(iwlwifi_mod_params);
@@ -1681,12 +1706,9 @@ module_param_named(lar_disable, iwlwifi_mod_params.lar_disable,
 MODULE_PARM_DESC(lar_disable, "disable LAR functionality (default: N)");
 
 module_param_named(uapsd_disable, iwlwifi_mod_params.uapsd_disable,
-                  bool, S_IRUGO | S_IWUSR);
-#ifdef CONFIG_IWLWIFI_UAPSD
-MODULE_PARM_DESC(uapsd_disable, "disable U-APSD functionality (default: N)");
-#else
-MODULE_PARM_DESC(uapsd_disable, "disable U-APSD functionality (default: Y)");
-#endif
+                  uint, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(uapsd_disable,
+                "disable U-APSD functionality bitmap 1: BSS 2: P2P Client (default: 3)");
 
 /*
  * set bt_coex_active to true, uCode will do kill/defer
index 8425e1a..09b7ea2 100644 (file)
@@ -105,6 +105,7 @@ enum iwl_fw_error_dump_type {
        IWL_FW_ERROR_DUMP_RB = 11,
        IWL_FW_ERROR_DUMP_PAGING = 12,
        IWL_FW_ERROR_DUMP_RADIO_REG = 13,
+       IWL_FW_ERROR_DUMP_INTERNAL_TXF = 14,
 
        IWL_FW_ERROR_DUMP_MAX,
 };
index 15ec4e2..843232b 100644 (file)
@@ -142,6 +142,7 @@ enum iwl_ucode_tlv_type {
        IWL_UCODE_TLV_FW_DBG_CONF       = 39,
        IWL_UCODE_TLV_FW_DBG_TRIGGER    = 40,
        IWL_UCODE_TLV_FW_GSCAN_CAPA     = 50,
+       IWL_UCODE_TLV_FW_MEM_SEG        = 51,
 };
 
 struct iwl_ucode_tlv {
@@ -245,13 +246,11 @@ typedef unsigned int __bitwise__ iwl_ucode_tlv_api_t;
 
 /**
  * enum iwl_ucode_tlv_api - ucode api
- * @IWL_UCODE_TLV_API_BT_COEX_SPLIT: new API for BT Coex
  * @IWL_UCODE_TLV_API_FRAGMENTED_SCAN: This ucode supports active dwell time
  *     longer than the passive one, which is essential for fragmented scan.
  * @IWL_UCODE_TLV_API_WIFI_MCC_UPDATE: ucode supports MCC updates with source.
  * @IWL_UCODE_TLV_API_WIDE_CMD_HDR: ucode supports wide command header
  * @IWL_UCODE_TLV_API_LQ_SS_PARAMS: Configure STBC/BFER via LQ CMD ss_params
- * @IWL_UCODE_TLV_API_NEW_VERSION: new versioning format
  * @IWL_UCODE_TLV_API_EXT_SCAN_PRIORITY: scan APIs use 8-level priority
  *     instead of 3.
  * @IWL_UCODE_TLV_API_TX_POWER_CHAIN: TX power API has larger command size
@@ -260,12 +259,10 @@ typedef unsigned int __bitwise__ iwl_ucode_tlv_api_t;
  * @NUM_IWL_UCODE_TLV_API: number of bits used
  */
 enum iwl_ucode_tlv_api {
-       IWL_UCODE_TLV_API_BT_COEX_SPLIT         = (__force iwl_ucode_tlv_api_t)3,
        IWL_UCODE_TLV_API_FRAGMENTED_SCAN       = (__force iwl_ucode_tlv_api_t)8,
        IWL_UCODE_TLV_API_WIFI_MCC_UPDATE       = (__force iwl_ucode_tlv_api_t)9,
        IWL_UCODE_TLV_API_WIDE_CMD_HDR          = (__force iwl_ucode_tlv_api_t)14,
        IWL_UCODE_TLV_API_LQ_SS_PARAMS          = (__force iwl_ucode_tlv_api_t)18,
-       IWL_UCODE_TLV_API_NEW_VERSION           = (__force iwl_ucode_tlv_api_t)20,
        IWL_UCODE_TLV_API_EXT_SCAN_PRIORITY     = (__force iwl_ucode_tlv_api_t)24,
        IWL_UCODE_TLV_API_TX_POWER_CHAIN        = (__force iwl_ucode_tlv_api_t)27,
 
@@ -324,6 +321,9 @@ typedef unsigned int __bitwise__ iwl_ucode_tlv_capa_t;
  * @IWL_UCODE_TLV_CAPA_CTDP_SUPPORT: supports cTDP command
  * @IWL_UCODE_TLV_CAPA_USNIFFER_UNIFIED: supports usniffer enabled in
  *     regular image.
+ * @IWL_UCODE_TLV_CAPA_EXTEND_SHARED_MEM_CFG: support getting more shared
+ *     memory addresses from the firmware.
+ * @IWL_UCODE_TLV_CAPA_LQM_SUPPORT: supports Link Quality Measurement
  *
  * @NUM_IWL_UCODE_TLV_CAPA: number of bits used
  */
@@ -361,6 +361,8 @@ enum iwl_ucode_tlv_capa {
        IWL_UCODE_TLV_CAPA_TEMP_THS_REPORT_SUPPORT      = (__force iwl_ucode_tlv_capa_t)75,
        IWL_UCODE_TLV_CAPA_CTDP_SUPPORT                 = (__force iwl_ucode_tlv_capa_t)76,
        IWL_UCODE_TLV_CAPA_USNIFFER_UNIFIED             = (__force iwl_ucode_tlv_capa_t)77,
+       IWL_UCODE_TLV_CAPA_EXTEND_SHARED_MEM_CFG        = (__force iwl_ucode_tlv_capa_t)80,
+       IWL_UCODE_TLV_CAPA_LQM_SUPPORT                  = (__force iwl_ucode_tlv_capa_t)81,
 
        NUM_IWL_UCODE_TLV_CAPA
 #ifdef __CHECKER__
@@ -490,6 +492,37 @@ enum iwl_fw_dbg_monitor_mode {
        MIPI_MODE = 3,
 };
 
+/**
+ * enum iwl_fw_mem_seg_type - data types for dumping on error
+ *
+ * @FW_DBG_MEM_SMEM: the data type is SMEM
+ * @FW_DBG_MEM_DCCM_LMAC: the data type is DCCM_LMAC
+ * @FW_DBG_MEM_DCCM_UMAC: the data type is DCCM_UMAC
+ */
+enum iwl_fw_dbg_mem_seg_type {
+       FW_DBG_MEM_DCCM_LMAC = 0,
+       FW_DBG_MEM_DCCM_UMAC,
+       FW_DBG_MEM_SMEM,
+
+       /* Must be last */
+       FW_DBG_MEM_MAX,
+};
+
+/**
+ * struct iwl_fw_dbg_mem_seg_tlv - configures the debug data memory segments
+ *
+ * @data_type: enum %iwl_fw_mem_seg_type
+ * @ofs: the memory segment offset
+ * @len: the memory segment length, in bytes
+ *
+ * This parses IWL_UCODE_TLV_FW_MEM_SEG
+ */
+struct iwl_fw_dbg_mem_seg_tlv {
+       __le32 data_type;
+       __le32 ofs;
+       __le32 len;
+} __packed;
+
 /**
  * struct iwl_fw_dbg_dest_tlv - configures the destination of the debug data
  *
index 2942571..e461d63 100644 (file)
@@ -286,6 +286,8 @@ struct iwl_fw {
        struct iwl_fw_dbg_conf_tlv *dbg_conf_tlv[FW_DBG_CONF_MAX];
        size_t dbg_conf_tlv_len[FW_DBG_CONF_MAX];
        struct iwl_fw_dbg_trigger_tlv *dbg_trigger_tlv[FW_DBG_TRIGGER_MAX];
+       struct iwl_fw_dbg_mem_seg_tlv *dbg_mem_tlv[FW_DBG_MEM_MAX];
+       bool dbg_dynamic_mem;
        size_t dbg_trigger_tlv_len[FW_DBG_TRIGGER_MAX];
        u8 dbg_dest_reg_num;
        struct iwl_gscan_capabilities gscan_capa;
index d1a5dd1..6c5c2f9 100644 (file)
@@ -92,6 +92,11 @@ enum iwl_amsdu_size {
        IWL_AMSDU_12K = 2,
 };
 
+enum iwl_uapsd_disable {
+       IWL_DISABLE_UAPSD_BSS           = BIT(0),
+       IWL_DISABLE_UAPSD_P2P_CLIENT    = BIT(1),
+};
+
 /**
  * struct iwl_mod_params
  *
@@ -109,7 +114,8 @@ enum iwl_amsdu_size {
  * @debug_level: levels are IWL_DL_*
  * @ant_coupling: antenna coupling in dB, default = 0
  * @nvm_file: specifies a external NVM file
- * @uapsd_disable: disable U-APSD, default = 1
+ * @uapsd_disable: disable U-APSD, see %enum iwl_uapsd_disable, default =
+ *     IWL_DISABLE_UAPSD_BSS | IWL_DISABLE_UAPSD_P2P_CLIENT
  * @d0i3_disable: disable d0i3, default = 1,
  * @d0i3_entry_delay: time to wait after no refs are taken before
  *     entering D0i3 (in msecs)
@@ -131,7 +137,7 @@ struct iwl_mod_params {
 #endif
        int ant_coupling;
        char *nvm_file;
-       bool uapsd_disable;
+       u32 uapsd_disable;
        bool d0i3_disable;
        unsigned int d0i3_entry_delay;
        bool lar_disable;
index c46e596..6c1d20d 100644 (file)
@@ -7,6 +7,7 @@
  *
  * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved.
  * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
+ * Copyright(c) 2016        Intel Deutschland GmbH
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of version 2 of the GNU General Public License as
@@ -33,6 +34,7 @@
  *
  * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved.
  * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
+ * Copyright(c) 2016        Intel Deutschland GmbH
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
@@ -345,6 +347,16 @@ enum secure_load_status_reg {
 #define TXF_READ_MODIFY_DATA           (0xa00448)
 #define TXF_READ_MODIFY_ADDR           (0xa0044c)
 
+/* UMAC Internal Tx Fifo */
+#define TXF_CPU2_FIFO_ITEM_CNT         (0xA00538)
+#define TXF_CPU2_WR_PTR                (0xA00514)
+#define TXF_CPU2_RD_PTR                (0xA00510)
+#define TXF_CPU2_FENCE_PTR             (0xA00518)
+#define TXF_CPU2_LOCK_FENCE            (0xA00524)
+#define TXF_CPU2_NUM                   (0xA0053C)
+#define TXF_CPU2_READ_MODIFY_DATA      (0xA00548)
+#define TXF_CPU2_READ_MODIFY_ADDR      (0xA0054C)
+
 /* Radio registers access */
 #define RSP_RADIO_CMD                  (0xa02804)
 #define RSP_RADIO_RDDAT                        (0xa02814)
index 91d74b3..fa4ab4b 100644 (file)
@@ -7,6 +7,7 @@
  *
  * Copyright(c) 2007 - 2014 Intel Corporation. All rights reserved.
  * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
+ * Copyright(c) 2016        Intel Deutschland GmbH
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of version 2 of the GNU General Public License as
@@ -33,6 +34,7 @@
  *
  * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved.
  * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
+ * Copyright(c) 2016        Intel Deutschland GmbH
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
@@ -519,7 +521,7 @@ struct iwl_trans;
 
 struct iwl_trans_txq_scd_cfg {
        u8 fifo;
-       s8 sta_id;
+       u8 sta_id;
        u8 tid;
        bool aggregate;
        int frame_limit;
index 23e7e29..2e06dfc 100644 (file)
@@ -2,7 +2,7 @@ obj-$(CONFIG_IWLMVM)   += iwlmvm.o
 iwlmvm-y += fw.o mac80211.o nvm.o ops.o phy-ctxt.o mac-ctxt.o
 iwlmvm-y += utils.o rx.o rxmq.o tx.o binding.o quota.o sta.o sf.o
 iwlmvm-y += scan.o time-event.o rs.o
-iwlmvm-y += power.o coex.o coex_legacy.o
+iwlmvm-y += power.o coex.o
 iwlmvm-y += tt.o offloading.o tdls.o
 iwlmvm-$(CONFIG_IWLWIFI_DEBUGFS) += debugfs.o debugfs-vif.o
 iwlmvm-$(CONFIG_IWLWIFI_LEDS) += led.o
index 2e098f8..35cdeca 100644 (file)
@@ -411,9 +411,6 @@ int iwl_send_bt_init_conf(struct iwl_mvm *mvm)
        struct iwl_bt_coex_cmd bt_cmd = {};
        u32 mode;
 
-       if (!fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_BT_COEX_SPLIT))
-               return iwl_send_bt_init_conf_old(mvm);
-
        lockdep_assert_held(&mvm->mutex);
 
        if (unlikely(mvm->bt_force_ant_mode != BT_FORCE_ANT_DIS)) {
@@ -728,12 +725,6 @@ void iwl_mvm_rx_bt_coex_notif(struct iwl_mvm *mvm,
        struct iwl_rx_packet *pkt = rxb_addr(rxb);
        struct iwl_bt_coex_profile_notif *notif = (void *)pkt->data;
 
-       if (!fw_has_api(&mvm->fw->ucode_capa,
-                       IWL_UCODE_TLV_API_BT_COEX_SPLIT)) {
-               iwl_mvm_rx_bt_coex_notif_old(mvm, rxb);
-               return;
-       }
-
        IWL_DEBUG_COEX(mvm, "BT Coex Notification received\n");
        IWL_DEBUG_COEX(mvm, "\tBT ci compliance %d\n", notif->bt_ci_compliance);
        IWL_DEBUG_COEX(mvm, "\tBT primary_ch_lut %d\n",
@@ -755,12 +746,6 @@ void iwl_mvm_bt_rssi_event(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
        struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
        int ret;
 
-       if (!fw_has_api(&mvm->fw->ucode_capa,
-                       IWL_UCODE_TLV_API_BT_COEX_SPLIT)) {
-               iwl_mvm_bt_rssi_event_old(mvm, vif, rssi_event);
-               return;
-       }
-
        lockdep_assert_held(&mvm->mutex);
 
        /* Ignore updates if we are in force mode */
@@ -807,9 +792,6 @@ u16 iwl_mvm_coex_agg_time_limit(struct iwl_mvm *mvm,
        struct iwl_mvm_phy_ctxt *phy_ctxt = mvmvif->phy_ctxt;
        enum iwl_bt_coex_lut_type lut_type;
 
-       if (!fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_BT_COEX_SPLIT))
-               return iwl_mvm_coex_agg_time_limit_old(mvm, sta);
-
        if (IWL_COEX_IS_TTC_ON(mvm->last_bt_notif.ttc_rrc_status, phy_ctxt->id))
                return LINK_QUAL_AGG_TIME_LIMIT_DEF;
 
@@ -834,9 +816,6 @@ bool iwl_mvm_bt_coex_is_mimo_allowed(struct iwl_mvm *mvm,
        struct iwl_mvm_phy_ctxt *phy_ctxt = mvmvif->phy_ctxt;
        enum iwl_bt_coex_lut_type lut_type;
 
-       if (!fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_BT_COEX_SPLIT))
-               return iwl_mvm_bt_coex_is_mimo_allowed_old(mvm, sta);
-
        if (IWL_COEX_IS_TTC_ON(mvm->last_bt_notif.ttc_rrc_status, phy_ctxt->id))
                return true;
 
@@ -864,9 +843,6 @@ bool iwl_mvm_bt_coex_is_ant_avail(struct iwl_mvm *mvm, u8 ant)
        if (ant & mvm->cfg->non_shared_ant)
                return true;
 
-       if (!fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_BT_COEX_SPLIT))
-               return iwl_mvm_bt_coex_is_shared_ant_avail_old(mvm);
-
        return le32_to_cpu(mvm->last_bt_notif.bt_activity_grading) <
                BT_HIGH_TRAFFIC;
 }
@@ -877,9 +853,6 @@ bool iwl_mvm_bt_coex_is_shared_ant_avail(struct iwl_mvm *mvm)
        if (mvm->cfg->bt_shared_single_ant)
                return true;
 
-       if (!fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_BT_COEX_SPLIT))
-               return iwl_mvm_bt_coex_is_shared_ant_avail_old(mvm);
-
        return le32_to_cpu(mvm->last_bt_notif.bt_activity_grading) < BT_HIGH_TRAFFIC;
 }
 
@@ -888,9 +861,6 @@ bool iwl_mvm_bt_coex_is_tpc_allowed(struct iwl_mvm *mvm,
 {
        u32 bt_activity = le32_to_cpu(mvm->last_bt_notif.bt_activity_grading);
 
-       if (!fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_BT_COEX_SPLIT))
-               return iwl_mvm_bt_coex_is_tpc_allowed_old(mvm, band);
-
        if (band != IEEE80211_BAND_2GHZ)
                return false;
 
@@ -937,12 +907,6 @@ u8 iwl_mvm_bt_coex_tx_prio(struct iwl_mvm *mvm, struct ieee80211_hdr *hdr,
 
 void iwl_mvm_bt_coex_vif_change(struct iwl_mvm *mvm)
 {
-       if (!fw_has_api(&mvm->fw->ucode_capa,
-                       IWL_UCODE_TLV_API_BT_COEX_SPLIT)) {
-               iwl_mvm_bt_coex_vif_change_old(mvm);
-               return;
-       }
-
        iwl_mvm_bt_coex_notif_handle(mvm);
 }
 
@@ -955,12 +919,6 @@ void iwl_mvm_rx_ant_coupling_notif(struct iwl_mvm *mvm,
        u8 __maybe_unused lower_bound, upper_bound;
        u8 lut;
 
-       if (!fw_has_api(&mvm->fw->ucode_capa,
-                       IWL_UCODE_TLV_API_BT_COEX_SPLIT)) {
-               iwl_mvm_rx_ant_coupling_notif_old(mvm, rxb);
-               return;
-       }
-
        if (!iwl_mvm_bt_is_plcr_supported(mvm))
                return;
 
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/coex_legacy.c b/drivers/net/wireless/intel/iwlwifi/mvm/coex_legacy.c
deleted file mode 100644 (file)
index 0150457..0000000
+++ /dev/null
@@ -1,1315 +0,0 @@
-/******************************************************************************
- *
- * This file is provided under a dual BSD/GPLv2 license.  When using or
- * redistributing this file, you may do so under either license.
- *
- * GPL LICENSE SUMMARY
- *
- * Copyright(c) 2013 - 2014 Intel Corporation. All rights reserved.
- * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
- * USA
- *
- * The full GNU General Public License is included in this distribution
- * in the file called COPYING.
- *
- * Contact Information:
- *  Intel Linux Wireless <linuxwifi@intel.com>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- *
- * BSD LICENSE
- *
- * Copyright(c) 2013 - 2014 Intel Corporation. All rights reserved.
- * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- *  * Redistributions of source code must retain the above copyright
- *    notice, this list of conditions and the following disclaimer.
- *  * Redistributions in binary form must reproduce the above copyright
- *    notice, this list of conditions and the following disclaimer in
- *    the documentation and/or other materials provided with the
- *    distribution.
- *  * Neither the name Intel Corporation nor the names of its
- *    contributors may be used to endorse or promote products derived
- *    from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- *****************************************************************************/
-
-#include <linux/ieee80211.h>
-#include <linux/etherdevice.h>
-#include <net/mac80211.h>
-
-#include "fw-api-coex.h"
-#include "iwl-modparams.h"
-#include "mvm.h"
-#include "iwl-debug.h"
-
-#define EVENT_PRIO_ANT(_evt, _prio, _shrd_ant)                 \
-       [(_evt)] = (((_prio) << BT_COEX_PRIO_TBL_PRIO_POS) |    \
-                  ((_shrd_ant) << BT_COEX_PRIO_TBL_SHRD_ANT_POS))
-
-static const u8 iwl_bt_prio_tbl[BT_COEX_PRIO_TBL_EVT_MAX] = {
-       EVENT_PRIO_ANT(BT_COEX_PRIO_TBL_EVT_INIT_CALIB1,
-                      BT_COEX_PRIO_TBL_PRIO_BYPASS, 0),
-       EVENT_PRIO_ANT(BT_COEX_PRIO_TBL_EVT_INIT_CALIB2,
-                      BT_COEX_PRIO_TBL_PRIO_BYPASS, 1),
-       EVENT_PRIO_ANT(BT_COEX_PRIO_TBL_EVT_PERIODIC_CALIB_LOW1,
-                      BT_COEX_PRIO_TBL_PRIO_LOW, 0),
-       EVENT_PRIO_ANT(BT_COEX_PRIO_TBL_EVT_PERIODIC_CALIB_LOW2,
-                      BT_COEX_PRIO_TBL_PRIO_LOW, 1),
-       EVENT_PRIO_ANT(BT_COEX_PRIO_TBL_EVT_PERIODIC_CALIB_HIGH1,
-                      BT_COEX_PRIO_TBL_PRIO_HIGH, 0),
-       EVENT_PRIO_ANT(BT_COEX_PRIO_TBL_EVT_PERIODIC_CALIB_HIGH2,
-                      BT_COEX_PRIO_TBL_PRIO_HIGH, 1),
-       EVENT_PRIO_ANT(BT_COEX_PRIO_TBL_EVT_DTIM,
-                      BT_COEX_PRIO_TBL_DISABLED, 0),
-       EVENT_PRIO_ANT(BT_COEX_PRIO_TBL_EVT_SCAN52,
-                      BT_COEX_PRIO_TBL_PRIO_COEX_OFF, 0),
-       EVENT_PRIO_ANT(BT_COEX_PRIO_TBL_EVT_SCAN24,
-                      BT_COEX_PRIO_TBL_PRIO_COEX_ON, 0),
-       EVENT_PRIO_ANT(BT_COEX_PRIO_TBL_EVT_IDLE,
-                      BT_COEX_PRIO_TBL_PRIO_COEX_IDLE, 0),
-       0, 0, 0, 0, 0, 0,
-};
-
-#undef EVENT_PRIO_ANT
-
-static int iwl_send_bt_prio_tbl(struct iwl_mvm *mvm)
-{
-       if (unlikely(mvm->bt_force_ant_mode != BT_FORCE_ANT_DIS))
-               return 0;
-
-       return iwl_mvm_send_cmd_pdu(mvm, BT_COEX_PRIO_TABLE, 0,
-                                   sizeof(struct iwl_bt_coex_prio_tbl_cmd),
-                                   &iwl_bt_prio_tbl);
-}
-
-static const __le32 iwl_bt_prio_boost[BT_COEX_BOOST_SIZE] = {
-       cpu_to_le32(0xf0f0f0f0), /* 50% */
-       cpu_to_le32(0xc0c0c0c0), /* 25% */
-       cpu_to_le32(0xfcfcfcfc), /* 75% */
-       cpu_to_le32(0xfefefefe), /* 87.5% */
-};
-
-static const __le32 iwl_single_shared_ant[BT_COEX_MAX_LUT][BT_COEX_LUT_SIZE] = {
-       {
-               cpu_to_le32(0x40000000),
-               cpu_to_le32(0x00000000),
-               cpu_to_le32(0x44000000),
-               cpu_to_le32(0x00000000),
-               cpu_to_le32(0x40000000),
-               cpu_to_le32(0x00000000),
-               cpu_to_le32(0x44000000),
-               cpu_to_le32(0x00000000),
-               cpu_to_le32(0xc0004000),
-               cpu_to_le32(0xf0005000),
-               cpu_to_le32(0xc0004000),
-               cpu_to_le32(0xf0005000),
-       },
-       {
-               cpu_to_le32(0x40000000),
-               cpu_to_le32(0x00000000),
-               cpu_to_le32(0x44000000),
-               cpu_to_le32(0x00000000),
-               cpu_to_le32(0x40000000),
-               cpu_to_le32(0x00000000),
-               cpu_to_le32(0x44000000),
-               cpu_to_le32(0x00000000),
-               cpu_to_le32(0xc0004000),
-               cpu_to_le32(0xf0005000),
-               cpu_to_le32(0xc0004000),
-               cpu_to_le32(0xf0005000),
-       },
-       {
-               cpu_to_le32(0x40000000),
-               cpu_to_le32(0x00000000),
-               cpu_to_le32(0x44000000),
-               cpu_to_le32(0x00000000),
-               cpu_to_le32(0x40000000),
-               cpu_to_le32(0x00000000),
-               cpu_to_le32(0x44000000),
-               cpu_to_le32(0x00000000),
-               cpu_to_le32(0xc0004000),
-               cpu_to_le32(0xf0005000),
-               cpu_to_le32(0xc0004000),
-               cpu_to_le32(0xf0005000),
-       },
-};
-
-static const __le32 iwl_combined_lookup[BT_COEX_MAX_LUT][BT_COEX_LUT_SIZE] = {
-       {
-               /* Tight */
-               cpu_to_le32(0xaaaaaaaa),
-               cpu_to_le32(0xaaaaaaaa),
-               cpu_to_le32(0xaeaaaaaa),
-               cpu_to_le32(0xaaaaaaaa),
-               cpu_to_le32(0xcc00ff28),
-               cpu_to_le32(0x0000aaaa),
-               cpu_to_le32(0xcc00aaaa),
-               cpu_to_le32(0x0000aaaa),
-               cpu_to_le32(0xc0004000),
-               cpu_to_le32(0x00004000),
-               cpu_to_le32(0xf0005000),
-               cpu_to_le32(0xf0005000),
-       },
-       {
-               /* Loose */
-               cpu_to_le32(0xaaaaaaaa),
-               cpu_to_le32(0xaaaaaaaa),
-               cpu_to_le32(0xaaaaaaaa),
-               cpu_to_le32(0xaaaaaaaa),
-               cpu_to_le32(0xcc00ff28),
-               cpu_to_le32(0x0000aaaa),
-               cpu_to_le32(0xcc00aaaa),
-               cpu_to_le32(0x0000aaaa),
-               cpu_to_le32(0x00000000),
-               cpu_to_le32(0x00000000),
-               cpu_to_le32(0xf0005000),
-               cpu_to_le32(0xf0005000),
-       },
-       {
-               /* Tx Tx disabled */
-               cpu_to_le32(0xaaaaaaaa),
-               cpu_to_le32(0xaaaaaaaa),
-               cpu_to_le32(0xeeaaaaaa),
-               cpu_to_le32(0xaaaaaaaa),
-               cpu_to_le32(0xcc00ff28),
-               cpu_to_le32(0x0000aaaa),
-               cpu_to_le32(0xcc00aaaa),
-               cpu_to_le32(0x0000aaaa),
-               cpu_to_le32(0xc0004000),
-               cpu_to_le32(0xc0004000),
-               cpu_to_le32(0xf0005000),
-               cpu_to_le32(0xf0005000),
-       },
-};
-
-/* 20MHz / 40MHz below / 40Mhz above*/
-static const __le64 iwl_ci_mask[][3] = {
-       /* dummy entry for channel 0 */
-       {cpu_to_le64(0), cpu_to_le64(0), cpu_to_le64(0)},
-       {
-               cpu_to_le64(0x0000001FFFULL),
-               cpu_to_le64(0x0ULL),
-               cpu_to_le64(0x00007FFFFFULL),
-       },
-       {
-               cpu_to_le64(0x000000FFFFULL),
-               cpu_to_le64(0x0ULL),
-               cpu_to_le64(0x0003FFFFFFULL),
-       },
-       {
-               cpu_to_le64(0x000003FFFCULL),
-               cpu_to_le64(0x0ULL),
-               cpu_to_le64(0x000FFFFFFCULL),
-       },
-       {
-               cpu_to_le64(0x00001FFFE0ULL),
-               cpu_to_le64(0x0ULL),
-               cpu_to_le64(0x007FFFFFE0ULL),
-       },
-       {
-               cpu_to_le64(0x00007FFF80ULL),
-               cpu_to_le64(0x00007FFFFFULL),
-               cpu_to_le64(0x01FFFFFF80ULL),
-       },
-       {
-               cpu_to_le64(0x0003FFFC00ULL),
-               cpu_to_le64(0x0003FFFFFFULL),
-               cpu_to_le64(0x0FFFFFFC00ULL),
-       },
-       {
-               cpu_to_le64(0x000FFFF000ULL),
-               cpu_to_le64(0x000FFFFFFCULL),
-               cpu_to_le64(0x3FFFFFF000ULL),
-       },
-       {
-               cpu_to_le64(0x007FFF8000ULL),
-               cpu_to_le64(0x007FFFFFE0ULL),
-               cpu_to_le64(0xFFFFFF8000ULL),
-       },
-       {
-               cpu_to_le64(0x01FFFE0000ULL),
-               cpu_to_le64(0x01FFFFFF80ULL),
-               cpu_to_le64(0xFFFFFE0000ULL),
-       },
-       {
-               cpu_to_le64(0x0FFFF00000ULL),
-               cpu_to_le64(0x0FFFFFFC00ULL),
-               cpu_to_le64(0x0ULL),
-       },
-       {
-               cpu_to_le64(0x3FFFC00000ULL),
-               cpu_to_le64(0x3FFFFFF000ULL),
-               cpu_to_le64(0x0)
-       },
-       {
-               cpu_to_le64(0xFFFE000000ULL),
-               cpu_to_le64(0xFFFFFF8000ULL),
-               cpu_to_le64(0x0)
-       },
-       {
-               cpu_to_le64(0xFFF8000000ULL),
-               cpu_to_le64(0xFFFFFE0000ULL),
-               cpu_to_le64(0x0)
-       },
-       {
-               cpu_to_le64(0xFFC0000000ULL),
-               cpu_to_le64(0x0ULL),
-               cpu_to_le64(0x0ULL)
-       },
-};
-
-enum iwl_bt_kill_msk {
-       BT_KILL_MSK_DEFAULT,
-       BT_KILL_MSK_NEVER,
-       BT_KILL_MSK_ALWAYS,
-       BT_KILL_MSK_MAX,
-};
-
-static const u32 iwl_bt_ctl_kill_msk[BT_KILL_MSK_MAX] = {
-       [BT_KILL_MSK_DEFAULT] = 0xfffffc00,
-       [BT_KILL_MSK_NEVER] = 0xffffffff,
-       [BT_KILL_MSK_ALWAYS] = 0,
-};
-
-static const u8 iwl_bt_cts_kill_msk[BT_MAX_AG][BT_COEX_MAX_LUT] = {
-       {
-               BT_KILL_MSK_ALWAYS,
-               BT_KILL_MSK_ALWAYS,
-               BT_KILL_MSK_ALWAYS,
-       },
-       {
-               BT_KILL_MSK_NEVER,
-               BT_KILL_MSK_NEVER,
-               BT_KILL_MSK_NEVER,
-       },
-       {
-               BT_KILL_MSK_NEVER,
-               BT_KILL_MSK_NEVER,
-               BT_KILL_MSK_NEVER,
-       },
-       {
-               BT_KILL_MSK_DEFAULT,
-               BT_KILL_MSK_NEVER,
-               BT_KILL_MSK_DEFAULT,
-       },
-};
-
-static const u8 iwl_bt_ack_kill_msk[BT_MAX_AG][BT_COEX_MAX_LUT] = {
-       {
-               BT_KILL_MSK_ALWAYS,
-               BT_KILL_MSK_ALWAYS,
-               BT_KILL_MSK_ALWAYS,
-       },
-       {
-               BT_KILL_MSK_ALWAYS,
-               BT_KILL_MSK_ALWAYS,
-               BT_KILL_MSK_ALWAYS,
-       },
-       {
-               BT_KILL_MSK_ALWAYS,
-               BT_KILL_MSK_ALWAYS,
-               BT_KILL_MSK_ALWAYS,
-       },
-       {
-               BT_KILL_MSK_DEFAULT,
-               BT_KILL_MSK_ALWAYS,
-               BT_KILL_MSK_DEFAULT,
-       },
-};
-
-struct corunning_block_luts {
-       u8 range;
-       __le32 lut20[BT_COEX_CORUN_LUT_SIZE];
-};
-
-/*
- * Ranges for the antenna coupling calibration / co-running block LUT:
- *             LUT0: [ 0, 12[
- *             LUT1: [12, 20[
- *             LUT2: [20, 21[
- *             LUT3: [21, 23[
- *             LUT4: [23, 27[
- *             LUT5: [27, 30[
- *             LUT6: [30, 32[
- *             LUT7: [32, 33[
- *             LUT8: [33, - [
- */
-static const struct corunning_block_luts antenna_coupling_ranges[] = {
-       {
-               .range = 0,
-               .lut20 = {
-                       cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
-                       cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
-                       cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
-                       cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
-                       cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
-                       cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
-                       cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
-                       cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
-                       cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
-                       cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
-                       cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
-                       cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
-                       cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
-                       cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
-                       cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
-                       cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
-               },
-       },
-       {
-               .range = 12,
-               .lut20 = {
-                       cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
-                       cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
-                       cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
-                       cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
-                       cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
-                       cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
-                       cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
-                       cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
-                       cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
-                       cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
-                       cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
-                       cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
-                       cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
-                       cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
-                       cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
-                       cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
-               },
-       },
-       {
-               .range = 20,
-               .lut20 = {
-                       cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
-                       cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
-                       cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
-                       cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
-                       cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
-                       cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
-                       cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
-                       cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
-                       cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
-                       cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
-                       cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
-                       cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
-                       cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
-                       cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
-                       cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
-                       cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
-               },
-       },
-       {
-               .range = 21,
-               .lut20 = {
-                       cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
-                       cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
-                       cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
-                       cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
-                       cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
-                       cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
-                       cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
-                       cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
-                       cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
-                       cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
-                       cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
-                       cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
-                       cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
-                       cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
-                       cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
-                       cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
-               },
-       },
-       {
-               .range = 23,
-               .lut20 = {
-                       cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
-                       cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
-                       cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
-                       cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
-                       cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
-                       cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
-                       cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
-                       cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
-                       cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
-                       cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
-                       cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
-                       cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
-                       cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
-                       cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
-                       cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
-                       cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
-               },
-       },
-       {
-               .range = 27,
-               .lut20 = {
-                       cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
-                       cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
-                       cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
-                       cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
-                       cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
-                       cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
-                       cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
-                       cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
-                       cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
-                       cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
-                       cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
-                       cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
-                       cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
-                       cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
-                       cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
-                       cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
-               },
-       },
-       {
-               .range = 30,
-               .lut20 = {
-                       cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
-                       cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
-                       cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
-                       cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
-                       cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
-                       cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
-                       cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
-                       cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
-                       cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
-                       cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
-                       cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
-                       cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
-                       cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
-                       cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
-                       cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
-                       cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
-               },
-       },
-       {
-               .range = 32,
-               .lut20 = {
-                       cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
-                       cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
-                       cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
-                       cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
-                       cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
-                       cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
-                       cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
-                       cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
-                       cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
-                       cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
-                       cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
-                       cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
-                       cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
-                       cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
-                       cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
-                       cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
-               },
-       },
-       {
-               .range = 33,
-               .lut20 = {
-                       cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
-                       cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
-                       cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
-                       cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
-                       cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
-                       cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
-                       cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
-                       cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
-                       cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
-                       cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
-                       cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
-                       cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
-                       cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
-                       cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
-                       cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
-                       cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
-               },
-       },
-};
-
-static enum iwl_bt_coex_lut_type
-iwl_get_coex_type(struct iwl_mvm *mvm, const struct ieee80211_vif *vif)
-{
-       struct ieee80211_chanctx_conf *chanctx_conf;
-       enum iwl_bt_coex_lut_type ret;
-       u16 phy_ctx_id;
-
-       /*
-        * Checking that we hold mvm->mutex is a good idea, but the rate
-        * control can't acquire the mutex since it runs in Tx path.
-        * So this is racy in that case, but in the worst case, the AMPDU
-        * size limit will be wrong for a short time which is not a big
-        * issue.
-        */
-
-       rcu_read_lock();
-
-       chanctx_conf = rcu_dereference(vif->chanctx_conf);
-
-       if (!chanctx_conf ||
-           chanctx_conf->def.chan->band != IEEE80211_BAND_2GHZ) {
-               rcu_read_unlock();
-               return BT_COEX_INVALID_LUT;
-       }
-
-       ret = BT_COEX_TX_DIS_LUT;
-
-       if (mvm->cfg->bt_shared_single_ant) {
-               rcu_read_unlock();
-               return ret;
-       }
-
-       phy_ctx_id = *((u16 *)chanctx_conf->drv_priv);
-
-       if (mvm->last_bt_ci_cmd_old.primary_ch_phy_id == phy_ctx_id)
-               ret = le32_to_cpu(mvm->last_bt_notif_old.primary_ch_lut);
-       else if (mvm->last_bt_ci_cmd_old.secondary_ch_phy_id == phy_ctx_id)
-               ret = le32_to_cpu(mvm->last_bt_notif_old.secondary_ch_lut);
-       /* else - default = TX TX disallowed */
-
-       rcu_read_unlock();
-
-       return ret;
-}
-
-int iwl_send_bt_init_conf_old(struct iwl_mvm *mvm)
-{
-       struct iwl_bt_coex_cmd_old *bt_cmd;
-       struct iwl_host_cmd cmd = {
-               .id = BT_CONFIG,
-               .len = { sizeof(*bt_cmd), },
-               .dataflags = { IWL_HCMD_DFL_NOCOPY, },
-       };
-       int ret;
-       u32 flags;
-
-       ret = iwl_send_bt_prio_tbl(mvm);
-       if (ret)
-               return ret;
-
-       bt_cmd = kzalloc(sizeof(*bt_cmd), GFP_KERNEL);
-       if (!bt_cmd)
-               return -ENOMEM;
-       cmd.data[0] = bt_cmd;
-
-       lockdep_assert_held(&mvm->mutex);
-
-       if (unlikely(mvm->bt_force_ant_mode != BT_FORCE_ANT_DIS)) {
-               switch (mvm->bt_force_ant_mode) {
-               case BT_FORCE_ANT_AUTO:
-                       flags = BT_COEX_AUTO_OLD;
-                       break;
-               case BT_FORCE_ANT_BT:
-                       flags = BT_COEX_BT_OLD;
-                       break;
-               case BT_FORCE_ANT_WIFI:
-                       flags = BT_COEX_WIFI_OLD;
-                       break;
-               default:
-                       WARN_ON(1);
-                       flags = 0;
-               }
-
-               bt_cmd->flags = cpu_to_le32(flags);
-               bt_cmd->valid_bit_msk = cpu_to_le32(BT_VALID_ENABLE);
-               goto send_cmd;
-       }
-
-       bt_cmd->max_kill = 5;
-       bt_cmd->bt4_antenna_isolation_thr =
-               IWL_MVM_BT_COEX_ANTENNA_COUPLING_THRS;
-       bt_cmd->bt4_antenna_isolation = iwlwifi_mod_params.ant_coupling;
-       bt_cmd->bt4_tx_tx_delta_freq_thr = 15;
-       bt_cmd->bt4_tx_rx_max_freq0 = 15;
-       bt_cmd->override_primary_lut = BT_COEX_INVALID_LUT;
-       bt_cmd->override_secondary_lut = BT_COEX_INVALID_LUT;
-
-       flags = iwlwifi_mod_params.bt_coex_active ?
-                       BT_COEX_NW_OLD : BT_COEX_DISABLE_OLD;
-       bt_cmd->flags = cpu_to_le32(flags);
-
-       bt_cmd->valid_bit_msk = cpu_to_le32(BT_VALID_ENABLE |
-                                           BT_VALID_BT_PRIO_BOOST |
-                                           BT_VALID_MAX_KILL |
-                                           BT_VALID_3W_TMRS |
-                                           BT_VALID_KILL_ACK |
-                                           BT_VALID_KILL_CTS |
-                                           BT_VALID_REDUCED_TX_POWER |
-                                           BT_VALID_LUT |
-                                           BT_VALID_WIFI_RX_SW_PRIO_BOOST |
-                                           BT_VALID_WIFI_TX_SW_PRIO_BOOST |
-                                           BT_VALID_ANT_ISOLATION |
-                                           BT_VALID_ANT_ISOLATION_THRS |
-                                           BT_VALID_TXTX_DELTA_FREQ_THRS |
-                                           BT_VALID_TXRX_MAX_FREQ_0 |
-                                           BT_VALID_SYNC_TO_SCO |
-                                           BT_VALID_TTC |
-                                           BT_VALID_RRC);
-
-       if (IWL_MVM_BT_COEX_SYNC2SCO)
-               bt_cmd->flags |= cpu_to_le32(BT_COEX_SYNC2SCO);
-
-       if (iwl_mvm_bt_is_plcr_supported(mvm)) {
-               bt_cmd->valid_bit_msk |= cpu_to_le32(BT_VALID_CORUN_LUT_20 |
-                                                    BT_VALID_CORUN_LUT_40);
-               bt_cmd->flags |= cpu_to_le32(BT_COEX_CORUNNING);
-       }
-
-       if (IWL_MVM_BT_COEX_MPLUT) {
-               bt_cmd->flags |= cpu_to_le32(BT_COEX_MPLUT);
-               bt_cmd->valid_bit_msk |= cpu_to_le32(BT_VALID_MULTI_PRIO_LUT);
-       }
-
-       if (IWL_MVM_BT_COEX_TTC)
-               bt_cmd->flags |= cpu_to_le32(BT_COEX_TTC);
-
-       if (iwl_mvm_bt_is_rrc_supported(mvm))
-               bt_cmd->flags |= cpu_to_le32(BT_COEX_RRC);
-
-       if (mvm->cfg->bt_shared_single_ant)
-               memcpy(&bt_cmd->decision_lut, iwl_single_shared_ant,
-                      sizeof(iwl_single_shared_ant));
-       else
-               memcpy(&bt_cmd->decision_lut, iwl_combined_lookup,
-                      sizeof(iwl_combined_lookup));
-
-       /* Take first Co-running block LUT to get started */
-       memcpy(bt_cmd->bt4_corun_lut20, antenna_coupling_ranges[0].lut20,
-              sizeof(bt_cmd->bt4_corun_lut20));
-       memcpy(bt_cmd->bt4_corun_lut40, antenna_coupling_ranges[0].lut20,
-              sizeof(bt_cmd->bt4_corun_lut40));
-
-       memcpy(&bt_cmd->bt_prio_boost, iwl_bt_prio_boost,
-              sizeof(iwl_bt_prio_boost));
-       bt_cmd->bt4_multiprio_lut[0] = cpu_to_le32(IWL_MVM_BT_COEX_MPLUT_REG0);
-       bt_cmd->bt4_multiprio_lut[1] = cpu_to_le32(IWL_MVM_BT_COEX_MPLUT_REG1);
-
-send_cmd:
-       memset(&mvm->last_bt_notif_old, 0, sizeof(mvm->last_bt_notif_old));
-       memset(&mvm->last_bt_ci_cmd_old, 0, sizeof(mvm->last_bt_ci_cmd_old));
-
-       ret = iwl_mvm_send_cmd(mvm, &cmd);
-
-       kfree(bt_cmd);
-       return ret;
-}
-
-static int iwl_mvm_bt_udpate_ctrl_kill_msk(struct iwl_mvm *mvm)
-{
-       struct iwl_bt_coex_profile_notif_old *notif = &mvm->last_bt_notif_old;
-       u32 primary_lut = le32_to_cpu(notif->primary_ch_lut);
-       u32 ag = le32_to_cpu(notif->bt_activity_grading);
-       struct iwl_bt_coex_cmd_old *bt_cmd;
-       u8 ack_kill_msk, cts_kill_msk;
-       struct iwl_host_cmd cmd = {
-               .id = BT_CONFIG,
-               .data[0] = &bt_cmd,
-               .len = { sizeof(*bt_cmd), },
-               .dataflags = { IWL_HCMD_DFL_NOCOPY, },
-       };
-       int ret = 0;
-
-       lockdep_assert_held(&mvm->mutex);
-
-       ack_kill_msk = iwl_bt_ack_kill_msk[ag][primary_lut];
-       cts_kill_msk = iwl_bt_cts_kill_msk[ag][primary_lut];
-
-       if (mvm->bt_ack_kill_msk[0] == ack_kill_msk &&
-           mvm->bt_cts_kill_msk[0] == cts_kill_msk)
-               return 0;
-
-       mvm->bt_ack_kill_msk[0] = ack_kill_msk;
-       mvm->bt_cts_kill_msk[0] = cts_kill_msk;
-
-       bt_cmd = kzalloc(sizeof(*bt_cmd), GFP_KERNEL);
-       if (!bt_cmd)
-               return -ENOMEM;
-       cmd.data[0] = bt_cmd;
-       bt_cmd->flags = cpu_to_le32(BT_COEX_NW_OLD);
-
-       bt_cmd->kill_ack_msk = cpu_to_le32(iwl_bt_ctl_kill_msk[ack_kill_msk]);
-       bt_cmd->kill_cts_msk = cpu_to_le32(iwl_bt_ctl_kill_msk[cts_kill_msk]);
-       bt_cmd->valid_bit_msk |= cpu_to_le32(BT_VALID_ENABLE |
-                                            BT_VALID_KILL_ACK |
-                                            BT_VALID_KILL_CTS);
-
-       ret = iwl_mvm_send_cmd(mvm, &cmd);
-
-       kfree(bt_cmd);
-       return ret;
-}
-
-static int iwl_mvm_bt_coex_reduced_txp(struct iwl_mvm *mvm, u8 sta_id,
-                                      bool enable)
-{
-       struct iwl_bt_coex_cmd_old *bt_cmd;
-       /* Send ASYNC since this can be sent from an atomic context */
-       struct iwl_host_cmd cmd = {
-               .id = BT_CONFIG,
-               .len = { sizeof(*bt_cmd), },
-               .dataflags = { IWL_HCMD_DFL_DUP, },
-               .flags = CMD_ASYNC,
-       };
-       struct iwl_mvm_sta *mvmsta;
-       int ret;
-
-       mvmsta = iwl_mvm_sta_from_staid_protected(mvm, sta_id);
-       if (!mvmsta)
-               return 0;
-
-       /* nothing to do */
-       if (mvmsta->bt_reduced_txpower == enable)
-               return 0;
-
-       bt_cmd = kzalloc(sizeof(*bt_cmd), GFP_ATOMIC);
-       if (!bt_cmd)
-               return -ENOMEM;
-       cmd.data[0] = bt_cmd;
-       bt_cmd->flags = cpu_to_le32(BT_COEX_NW_OLD);
-
-       bt_cmd->valid_bit_msk =
-               cpu_to_le32(BT_VALID_ENABLE | BT_VALID_REDUCED_TX_POWER);
-       bt_cmd->bt_reduced_tx_power = sta_id;
-
-       if (enable)
-               bt_cmd->bt_reduced_tx_power |= BT_REDUCED_TX_POWER_BIT;
-
-       IWL_DEBUG_COEX(mvm, "%sable reduced Tx Power for sta %d\n",
-                      enable ? "en" : "dis", sta_id);
-
-       mvmsta->bt_reduced_txpower = enable;
-
-       ret = iwl_mvm_send_cmd(mvm, &cmd);
-
-       kfree(bt_cmd);
-       return ret;
-}
-
-struct iwl_bt_iterator_data {
-       struct iwl_bt_coex_profile_notif_old *notif;
-       struct iwl_mvm *mvm;
-       struct ieee80211_chanctx_conf *primary;
-       struct ieee80211_chanctx_conf *secondary;
-       bool primary_ll;
-};
-
-static inline
-void iwl_mvm_bt_coex_enable_rssi_event(struct iwl_mvm *mvm,
-                                      struct ieee80211_vif *vif,
-                                      bool enable, int rssi)
-{
-       struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
-
-       mvmvif->bf_data.last_bt_coex_event = rssi;
-       mvmvif->bf_data.bt_coex_max_thold =
-               enable ? -IWL_MVM_BT_COEX_EN_RED_TXP_THRESH : 0;
-       mvmvif->bf_data.bt_coex_min_thold =
-               enable ? -IWL_MVM_BT_COEX_DIS_RED_TXP_THRESH : 0;
-}
-
-/* must be called under rcu_read_lock */
-static void iwl_mvm_bt_notif_iterator(void *_data, u8 *mac,
-                                     struct ieee80211_vif *vif)
-{
-       struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
-       struct iwl_bt_iterator_data *data = _data;
-       struct iwl_mvm *mvm = data->mvm;
-       struct ieee80211_chanctx_conf *chanctx_conf;
-       enum ieee80211_smps_mode smps_mode;
-       u32 bt_activity_grading;
-       int ave_rssi;
-
-       lockdep_assert_held(&mvm->mutex);
-
-       switch (vif->type) {
-       case NL80211_IFTYPE_STATION:
-               /* default smps_mode for BSS / P2P client is AUTOMATIC */
-               smps_mode = IEEE80211_SMPS_AUTOMATIC;
-               break;
-       case NL80211_IFTYPE_AP:
-               if (!mvmvif->ap_ibss_active)
-                       return;
-               break;
-       default:
-               return;
-       }
-
-       chanctx_conf = rcu_dereference(vif->chanctx_conf);
-
-       /* If channel context is invalid or not on 2.4GHz .. */
-       if ((!chanctx_conf ||
-            chanctx_conf->def.chan->band != IEEE80211_BAND_2GHZ)) {
-               if (vif->type == NL80211_IFTYPE_STATION) {
-                       /* ... relax constraints and disable rssi events */
-                       iwl_mvm_update_smps(mvm, vif, IWL_MVM_SMPS_REQ_BT_COEX,
-                                           smps_mode);
-                       iwl_mvm_bt_coex_reduced_txp(mvm, mvmvif->ap_sta_id,
-                                                   false);
-                       iwl_mvm_bt_coex_enable_rssi_event(mvm, vif, false, 0);
-               }
-               return;
-       }
-
-       bt_activity_grading = le32_to_cpu(data->notif->bt_activity_grading);
-       if (bt_activity_grading >= BT_HIGH_TRAFFIC)
-               smps_mode = IEEE80211_SMPS_STATIC;
-       else if (bt_activity_grading >= BT_LOW_TRAFFIC)
-               smps_mode = vif->type == NL80211_IFTYPE_AP ?
-                               IEEE80211_SMPS_OFF :
-                               IEEE80211_SMPS_DYNAMIC;
-
-       /* relax SMPS contraints for next association */
-       if (!vif->bss_conf.assoc)
-               smps_mode = IEEE80211_SMPS_AUTOMATIC;
-
-       if (mvmvif->phy_ctxt &&
-           data->notif->rrc_enabled & BIT(mvmvif->phy_ctxt->id))
-               smps_mode = IEEE80211_SMPS_AUTOMATIC;
-
-       IWL_DEBUG_COEX(data->mvm,
-                      "mac %d: bt_status %d bt_activity_grading %d smps_req %d\n",
-                      mvmvif->id, data->notif->bt_status, bt_activity_grading,
-                      smps_mode);
-
-       if (vif->type == NL80211_IFTYPE_STATION)
-               iwl_mvm_update_smps(mvm, vif, IWL_MVM_SMPS_REQ_BT_COEX,
-                                   smps_mode);
-
-       /* low latency is always primary */
-       if (iwl_mvm_vif_low_latency(mvmvif)) {
-               data->primary_ll = true;
-
-               data->secondary = data->primary;
-               data->primary = chanctx_conf;
-       }
-
-       if (vif->type == NL80211_IFTYPE_AP) {
-               if (!mvmvif->ap_ibss_active)
-                       return;
-
-               if (chanctx_conf == data->primary)
-                       return;
-
-               if (!data->primary_ll) {
-                       /*
-                        * downgrade the current primary no matter what its
-                        * type is.
-                        */
-                       data->secondary = data->primary;
-                       data->primary = chanctx_conf;
-               } else {
-                       /* there is low latency vif - we will be secondary */
-                       data->secondary = chanctx_conf;
-               }
-               return;
-       }
-
-       /*
-        * STA / P2P Client, try to be primary if first vif. If we are in low
-        * latency mode, we are already in primary and just don't do much
-        */
-       if (!data->primary || data->primary == chanctx_conf)
-               data->primary = chanctx_conf;
-       else if (!data->secondary)
-               /* if secondary is not NULL, it might be a GO */
-               data->secondary = chanctx_conf;
-
-       /*
-        * don't reduce the Tx power if one of these is true:
-        *  we are in LOOSE
-        *  single share antenna product
-        *  BT is active
-        *  we are associated
-        */
-       if (iwl_get_coex_type(mvm, vif) == BT_COEX_LOOSE_LUT ||
-           mvm->cfg->bt_shared_single_ant || !vif->bss_conf.assoc ||
-           !data->notif->bt_status) {
-               iwl_mvm_bt_coex_reduced_txp(mvm, mvmvif->ap_sta_id, false);
-               iwl_mvm_bt_coex_enable_rssi_event(mvm, vif, false, 0);
-               return;
-       }
-
-       /* try to get the avg rssi from fw */
-       ave_rssi = mvmvif->bf_data.ave_beacon_signal;
-
-       /* if the RSSI isn't valid, fake it is very low */
-       if (!ave_rssi)
-               ave_rssi = -100;
-       if (ave_rssi > -IWL_MVM_BT_COEX_EN_RED_TXP_THRESH) {
-               if (iwl_mvm_bt_coex_reduced_txp(mvm, mvmvif->ap_sta_id, true))
-                       IWL_ERR(mvm, "Couldn't send BT_CONFIG cmd\n");
-       } else if (ave_rssi < -IWL_MVM_BT_COEX_DIS_RED_TXP_THRESH) {
-               if (iwl_mvm_bt_coex_reduced_txp(mvm, mvmvif->ap_sta_id, false))
-                       IWL_ERR(mvm, "Couldn't send BT_CONFIG cmd\n");
-       }
-
-       /* Begin to monitor the RSSI: it may influence the reduced Tx power */
-       iwl_mvm_bt_coex_enable_rssi_event(mvm, vif, true, ave_rssi);
-}
-
-static void iwl_mvm_bt_coex_notif_handle(struct iwl_mvm *mvm)
-{
-       struct iwl_bt_iterator_data data = {
-               .mvm = mvm,
-               .notif = &mvm->last_bt_notif_old,
-       };
-       struct iwl_bt_coex_ci_cmd_old cmd = {};
-       u8 ci_bw_idx;
-
-       /* Ignore updates if we are in force mode */
-       if (unlikely(mvm->bt_force_ant_mode != BT_FORCE_ANT_DIS))
-               return;
-
-       rcu_read_lock();
-       ieee80211_iterate_active_interfaces_atomic(
-                                       mvm->hw, IEEE80211_IFACE_ITER_NORMAL,
-                                       iwl_mvm_bt_notif_iterator, &data);
-
-       if (data.primary) {
-               struct ieee80211_chanctx_conf *chan = data.primary;
-
-               if (WARN_ON(!chan->def.chan)) {
-                       rcu_read_unlock();
-                       return;
-               }
-
-               if (chan->def.width < NL80211_CHAN_WIDTH_40) {
-                       ci_bw_idx = 0;
-                       cmd.co_run_bw_primary = 0;
-               } else {
-                       cmd.co_run_bw_primary = 1;
-                       if (chan->def.center_freq1 >
-                           chan->def.chan->center_freq)
-                               ci_bw_idx = 2;
-                       else
-                               ci_bw_idx = 1;
-               }
-
-               cmd.bt_primary_ci =
-                       iwl_ci_mask[chan->def.chan->hw_value][ci_bw_idx];
-               cmd.primary_ch_phy_id = *((u16 *)data.primary->drv_priv);
-       }
-
-       if (data.secondary) {
-               struct ieee80211_chanctx_conf *chan = data.secondary;
-
-               if (WARN_ON(!data.secondary->def.chan)) {
-                       rcu_read_unlock();
-                       return;
-               }
-
-               if (chan->def.width < NL80211_CHAN_WIDTH_40) {
-                       ci_bw_idx = 0;
-                       cmd.co_run_bw_secondary = 0;
-               } else {
-                       cmd.co_run_bw_secondary = 1;
-                       if (chan->def.center_freq1 >
-                           chan->def.chan->center_freq)
-                               ci_bw_idx = 2;
-                       else
-                               ci_bw_idx = 1;
-               }
-
-               cmd.bt_secondary_ci =
-                       iwl_ci_mask[chan->def.chan->hw_value][ci_bw_idx];
-               cmd.secondary_ch_phy_id = *((u16 *)data.secondary->drv_priv);
-       }
-
-       rcu_read_unlock();
-
-       /* Don't spam the fw with the same command over and over */
-       if (memcmp(&cmd, &mvm->last_bt_ci_cmd_old, sizeof(cmd))) {
-               if (iwl_mvm_send_cmd_pdu(mvm, BT_COEX_CI, 0,
-                                        sizeof(cmd), &cmd))
-                       IWL_ERR(mvm, "Failed to send BT_CI cmd\n");
-               memcpy(&mvm->last_bt_ci_cmd_old, &cmd, sizeof(cmd));
-       }
-
-       if (iwl_mvm_bt_udpate_ctrl_kill_msk(mvm))
-               IWL_ERR(mvm, "Failed to update the ctrl_kill_msk\n");
-}
-
-void iwl_mvm_rx_bt_coex_notif_old(struct iwl_mvm *mvm,
-                                 struct iwl_rx_cmd_buffer *rxb)
-{
-       struct iwl_rx_packet *pkt = rxb_addr(rxb);
-       struct iwl_bt_coex_profile_notif_old *notif = (void *)pkt->data;
-
-       IWL_DEBUG_COEX(mvm, "BT Coex Notification received\n");
-       IWL_DEBUG_COEX(mvm, "\tBT status: %s\n",
-                      notif->bt_status ? "ON" : "OFF");
-       IWL_DEBUG_COEX(mvm, "\tBT open conn %d\n", notif->bt_open_conn);
-       IWL_DEBUG_COEX(mvm, "\tBT ci compliance %d\n", notif->bt_ci_compliance);
-       IWL_DEBUG_COEX(mvm, "\tBT primary_ch_lut %d\n",
-                      le32_to_cpu(notif->primary_ch_lut));
-       IWL_DEBUG_COEX(mvm, "\tBT secondary_ch_lut %d\n",
-                      le32_to_cpu(notif->secondary_ch_lut));
-       IWL_DEBUG_COEX(mvm, "\tBT activity grading %d\n",
-                      le32_to_cpu(notif->bt_activity_grading));
-       IWL_DEBUG_COEX(mvm, "\tBT agg traffic load %d\n",
-                      notif->bt_agg_traffic_load);
-
-       /* remember this notification for future use: rssi fluctuations */
-       memcpy(&mvm->last_bt_notif_old, notif, sizeof(mvm->last_bt_notif_old));
-
-       iwl_mvm_bt_coex_notif_handle(mvm);
-}
-
-static void iwl_mvm_bt_rssi_iterator(void *_data, u8 *mac,
-                                    struct ieee80211_vif *vif)
-{
-       struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
-       struct iwl_bt_iterator_data *data = _data;
-       struct iwl_mvm *mvm = data->mvm;
-
-       struct ieee80211_sta *sta;
-       struct iwl_mvm_sta *mvmsta;
-
-       struct ieee80211_chanctx_conf *chanctx_conf;
-
-       rcu_read_lock();
-       chanctx_conf = rcu_dereference(vif->chanctx_conf);
-       /* If channel context is invalid or not on 2.4GHz - don't count it */
-       if (!chanctx_conf ||
-           chanctx_conf->def.chan->band != IEEE80211_BAND_2GHZ) {
-               rcu_read_unlock();
-               return;
-       }
-       rcu_read_unlock();
-
-       if (vif->type != NL80211_IFTYPE_STATION ||
-           mvmvif->ap_sta_id == IWL_MVM_STATION_COUNT)
-               return;
-
-       sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[mvmvif->ap_sta_id],
-                                       lockdep_is_held(&mvm->mutex));
-
-       /* This can happen if the station has been removed right now */
-       if (IS_ERR_OR_NULL(sta))
-               return;
-
-       mvmsta = iwl_mvm_sta_from_mac80211(sta);
-}
-
-void iwl_mvm_bt_rssi_event_old(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
-                              enum ieee80211_rssi_event_data rssi_event)
-{
-       struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
-       struct iwl_bt_iterator_data data = {
-               .mvm = mvm,
-       };
-       int ret;
-
-       lockdep_assert_held(&mvm->mutex);
-
-       /* Ignore updates if we are in force mode */
-       if (unlikely(mvm->bt_force_ant_mode != BT_FORCE_ANT_DIS))
-               return;
-
-       /*
-        * Rssi update while not associated - can happen since the statistics
-        * are handled asynchronously
-        */
-       if (mvmvif->ap_sta_id == IWL_MVM_STATION_COUNT)
-               return;
-
-       /* No BT - reports should be disabled */
-       if (!mvm->last_bt_notif_old.bt_status)
-               return;
-
-       IWL_DEBUG_COEX(mvm, "RSSI for %pM is now %s\n", vif->bss_conf.bssid,
-                      rssi_event == RSSI_EVENT_HIGH ? "HIGH" : "LOW");
-
-       /*
-        * Check if rssi is good enough for reduced Tx power, but not in loose
-        * scheme.
-        */
-       if (rssi_event == RSSI_EVENT_LOW || mvm->cfg->bt_shared_single_ant ||
-           iwl_get_coex_type(mvm, vif) == BT_COEX_LOOSE_LUT)
-               ret = iwl_mvm_bt_coex_reduced_txp(mvm, mvmvif->ap_sta_id,
-                                                 false);
-       else
-               ret = iwl_mvm_bt_coex_reduced_txp(mvm, mvmvif->ap_sta_id, true);
-
-       if (ret)
-               IWL_ERR(mvm, "couldn't send BT_CONFIG HCMD upon RSSI event\n");
-
-       ieee80211_iterate_active_interfaces_atomic(
-               mvm->hw, IEEE80211_IFACE_ITER_NORMAL,
-               iwl_mvm_bt_rssi_iterator, &data);
-
-       if (iwl_mvm_bt_udpate_ctrl_kill_msk(mvm))
-               IWL_ERR(mvm, "Failed to update the ctrl_kill_msk\n");
-}
-
-#define LINK_QUAL_AGG_TIME_LIMIT_DEF   (4000)
-#define LINK_QUAL_AGG_TIME_LIMIT_BT_ACT        (1200)
-
-u16 iwl_mvm_coex_agg_time_limit_old(struct iwl_mvm *mvm,
-                                   struct ieee80211_sta *sta)
-{
-       struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
-       enum iwl_bt_coex_lut_type lut_type;
-
-       if (le32_to_cpu(mvm->last_bt_notif_old.bt_activity_grading) <
-           BT_HIGH_TRAFFIC)
-               return LINK_QUAL_AGG_TIME_LIMIT_DEF;
-
-       if (mvm->last_bt_notif_old.ttc_enabled)
-               return LINK_QUAL_AGG_TIME_LIMIT_DEF;
-
-       lut_type = iwl_get_coex_type(mvm, mvmsta->vif);
-
-       if (lut_type == BT_COEX_LOOSE_LUT || lut_type == BT_COEX_INVALID_LUT)
-               return LINK_QUAL_AGG_TIME_LIMIT_DEF;
-
-       /* tight coex, high bt traffic, reduce AGG time limit */
-       return LINK_QUAL_AGG_TIME_LIMIT_BT_ACT;
-}
-
-bool iwl_mvm_bt_coex_is_mimo_allowed_old(struct iwl_mvm *mvm,
-                                        struct ieee80211_sta *sta)
-{
-       struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
-       enum iwl_bt_coex_lut_type lut_type;
-
-       if (mvm->last_bt_notif_old.ttc_enabled)
-               return true;
-
-       if (le32_to_cpu(mvm->last_bt_notif_old.bt_activity_grading) <
-           BT_HIGH_TRAFFIC)
-               return true;
-
-       /*
-        * In Tight / TxTxDis, BT can't Rx while we Tx, so use both antennas
-        * since BT is already killed.
-        * In Loose, BT can Rx while we Tx, so forbid MIMO to let BT Rx while
-        * we Tx.
-        * When we are in 5GHz, we'll get BT_COEX_INVALID_LUT allowing MIMO.
-        */
-       lut_type = iwl_get_coex_type(mvm, mvmsta->vif);
-       return lut_type != BT_COEX_LOOSE_LUT;
-}
-
-bool iwl_mvm_bt_coex_is_shared_ant_avail_old(struct iwl_mvm *mvm)
-{
-       u32 ag = le32_to_cpu(mvm->last_bt_notif_old.bt_activity_grading);
-       return ag < BT_HIGH_TRAFFIC;
-}
-
-bool iwl_mvm_bt_coex_is_tpc_allowed_old(struct iwl_mvm *mvm,
-                                       enum ieee80211_band band)
-{
-       u32 bt_activity =
-               le32_to_cpu(mvm->last_bt_notif_old.bt_activity_grading);
-
-       if (band != IEEE80211_BAND_2GHZ)
-               return false;
-
-       return bt_activity >= BT_LOW_TRAFFIC;
-}
-
-void iwl_mvm_bt_coex_vif_change_old(struct iwl_mvm *mvm)
-{
-       iwl_mvm_bt_coex_notif_handle(mvm);
-}
-
-void iwl_mvm_rx_ant_coupling_notif_old(struct iwl_mvm *mvm,
-                                      struct iwl_rx_cmd_buffer *rxb)
-{
-       struct iwl_rx_packet *pkt = rxb_addr(rxb);
-       u32 ant_isolation = le32_to_cpup((void *)pkt->data);
-       u8 __maybe_unused lower_bound, upper_bound;
-       u8 lut;
-
-       struct iwl_bt_coex_cmd_old *bt_cmd;
-       struct iwl_host_cmd cmd = {
-               .id = BT_CONFIG,
-               .len = { sizeof(*bt_cmd), },
-               .dataflags = { IWL_HCMD_DFL_NOCOPY, },
-       };
-
-       if (!iwl_mvm_bt_is_plcr_supported(mvm))
-               return;
-
-       lockdep_assert_held(&mvm->mutex);
-
-       /* Ignore updates if we are in force mode */
-       if (unlikely(mvm->bt_force_ant_mode != BT_FORCE_ANT_DIS))
-               return;
-
-       if (ant_isolation ==  mvm->last_ant_isol)
-               return;
-
-       for (lut = 0; lut < ARRAY_SIZE(antenna_coupling_ranges) - 1; lut++)
-               if (ant_isolation < antenna_coupling_ranges[lut + 1].range)
-                       break;
-
-       lower_bound = antenna_coupling_ranges[lut].range;
-
-       if (lut < ARRAY_SIZE(antenna_coupling_ranges) - 1)
-               upper_bound = antenna_coupling_ranges[lut + 1].range;
-       else
-               upper_bound = antenna_coupling_ranges[lut].range;
-
-       IWL_DEBUG_COEX(mvm, "Antenna isolation=%d in range [%d,%d[, lut=%d\n",
-                      ant_isolation, lower_bound, upper_bound, lut);
-
-       mvm->last_ant_isol = ant_isolation;
-
-       if (mvm->last_corun_lut == lut)
-               return;
-
-       mvm->last_corun_lut = lut;
-
-       bt_cmd = kzalloc(sizeof(*bt_cmd), GFP_KERNEL);
-       if (!bt_cmd)
-               return;
-       cmd.data[0] = bt_cmd;
-
-       bt_cmd->flags = cpu_to_le32(BT_COEX_NW_OLD);
-       bt_cmd->valid_bit_msk |= cpu_to_le32(BT_VALID_ENABLE |
-                                            BT_VALID_CORUN_LUT_20 |
-                                            BT_VALID_CORUN_LUT_40);
-
-       /* For the moment, use the same LUT for 20GHz and 40GHz */
-       memcpy(bt_cmd->bt4_corun_lut20, antenna_coupling_ranges[lut].lut20,
-              sizeof(bt_cmd->bt4_corun_lut20));
-
-       memcpy(bt_cmd->bt4_corun_lut40, antenna_coupling_ranges[lut].lut20,
-              sizeof(bt_cmd->bt4_corun_lut40));
-
-       if (iwl_mvm_send_cmd(mvm, &cmd))
-               IWL_ERR(mvm, "failed to send BT_CONFIG command\n");
-
-       kfree(bt_cmd);
-}
index 4b560e4..b96b1c6 100644 (file)
@@ -75,7 +75,6 @@
 #define IWL_MVM_WOWLAN_PS_RX_DATA_TIMEOUT      (10 * USEC_PER_MSEC)
 #define IWL_MVM_SHORT_PS_TX_DATA_TIMEOUT       (2 * 1024) /* defined in TU */
 #define IWL_MVM_SHORT_PS_RX_DATA_TIMEOUT       (40 * 1024) /* defined in TU */
-#define IWL_MVM_P2P_UAPSD_STANDALONE           0
 #define IWL_MVM_P2P_LOWLATENCY_PS_ENABLE       0
 #define IWL_MVM_UAPSD_RX_DATA_TIMEOUT          (50 * USEC_PER_MSEC)
 #define IWL_MVM_UAPSD_TX_DATA_TIMEOUT          (50 * USEC_PER_MSEC)
index c1a3131..e3561bb 100644 (file)
@@ -723,7 +723,7 @@ static int iwl_mvm_d3_reprogram(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
                return -EIO;
        }
 
-       ret = iwl_mvm_sta_send_to_fw(mvm, ap_sta, false);
+       ret = iwl_mvm_sta_send_to_fw(mvm, ap_sta, false, 0);
        if (ret)
                return ret;
        rcu_assign_pointer(mvm->fw_id_to_mac_id[mvmvif->ap_sta_id], ap_sta);
index 1400445..3a279d3 100644 (file)
@@ -1425,6 +1425,89 @@ static ssize_t iwl_dbgfs_quota_min_read(struct file *file,
        return simple_read_from_buffer(user_buf, count, ppos, buf, len);
 }
 
+static const char * const chanwidths[] = {
+       [NL80211_CHAN_WIDTH_20_NOHT] = "noht",
+       [NL80211_CHAN_WIDTH_20] = "ht20",
+       [NL80211_CHAN_WIDTH_40] = "ht40",
+       [NL80211_CHAN_WIDTH_80] = "vht80",
+       [NL80211_CHAN_WIDTH_80P80] = "vht80p80",
+       [NL80211_CHAN_WIDTH_160] = "vht160",
+};
+
+static bool iwl_mvm_lqm_notif_wait(struct iwl_notif_wait_data *notif_wait,
+                                  struct iwl_rx_packet *pkt, void *data)
+{
+       struct ieee80211_vif *vif = data;
+       struct iwl_mvm *mvm =
+               container_of(notif_wait, struct iwl_mvm, notif_wait);
+       struct iwl_link_qual_msrmnt_notif *report = (void *)pkt->data;
+       u32 num_of_stations = le32_to_cpu(report->number_of_stations);
+       int i;
+
+       IWL_INFO(mvm, "LQM report:\n");
+       IWL_INFO(mvm, "\tstatus: %d\n", report->status);
+       IWL_INFO(mvm, "\tmacID: %d\n", le32_to_cpu(report->mac_id));
+       IWL_INFO(mvm, "\ttx_frame_dropped: %d\n",
+                le32_to_cpu(report->tx_frame_dropped));
+       IWL_INFO(mvm, "\ttime_in_measurement_window: %d us\n",
+                le32_to_cpu(report->time_in_measurement_window));
+       IWL_INFO(mvm, "\ttotal_air_time_other_stations: %d\n",
+                le32_to_cpu(report->total_air_time_other_stations));
+       IWL_INFO(mvm, "\tchannel_freq: %d\n",
+                vif->bss_conf.chandef.center_freq1);
+       IWL_INFO(mvm, "\tchannel_width: %s\n",
+                chanwidths[vif->bss_conf.chandef.width]);
+       IWL_INFO(mvm, "\tnumber_of_stations: %d\n", num_of_stations);
+       for (i = 0; i < num_of_stations; i++)
+               IWL_INFO(mvm, "\t\tsta[%d]: %d\n", i,
+                        report->frequent_stations_air_time[i]);
+
+       return true;
+}
+
+static ssize_t iwl_dbgfs_lqm_send_cmd_write(struct ieee80211_vif *vif,
+                                           char *buf, size_t count,
+                                           loff_t *ppos)
+{
+       struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+       struct iwl_mvm *mvm = mvmvif->mvm;
+       struct iwl_notification_wait wait_lqm_notif;
+       static u16 lqm_notif[] = {
+               WIDE_ID(MAC_CONF_GROUP,
+                       LINK_QUALITY_MEASUREMENT_COMPLETE_NOTIF)
+       };
+       int err;
+       u32 duration;
+       u32 timeout;
+
+       if (sscanf(buf, "%d,%d", &duration, &timeout) != 2)
+               return -EINVAL;
+
+       iwl_init_notification_wait(&mvm->notif_wait, &wait_lqm_notif,
+                                  lqm_notif, ARRAY_SIZE(lqm_notif),
+                                  iwl_mvm_lqm_notif_wait, vif);
+       mutex_lock(&mvm->mutex);
+       err = iwl_mvm_send_lqm_cmd(vif, LQM_CMD_OPERATION_START_MEASUREMENT,
+                                  duration, timeout);
+       mutex_unlock(&mvm->mutex);
+
+       if (err) {
+               IWL_ERR(mvm, "Failed to send lqm cmdf(err=%d)\n", err);
+               iwl_remove_notification(&mvm->notif_wait, &wait_lqm_notif);
+               return err;
+       }
+
+       /* wait for 2 * timeout (safety guard) and convert to jiffies*/
+       timeout = msecs_to_jiffies((timeout * 2) / 1000);
+
+       err = iwl_wait_notification(&mvm->notif_wait, &wait_lqm_notif,
+                                   timeout);
+       if (err)
+               IWL_ERR(mvm, "Getting lqm notif timed out\n");
+
+       return count;
+}
+
 #define MVM_DEBUGFS_WRITE_FILE_OPS(name, bufsz) \
        _MVM_DEBUGFS_WRITE_FILE_OPS(name, bufsz, struct ieee80211_vif)
 #define MVM_DEBUGFS_READ_WRITE_FILE_OPS(name, bufsz) \
@@ -1449,6 +1532,7 @@ MVM_DEBUGFS_READ_WRITE_FILE_OPS(tof_range_abort, 32);
 MVM_DEBUGFS_READ_FILE_OPS(tof_range_response);
 MVM_DEBUGFS_READ_WRITE_FILE_OPS(tof_responder_params, 32);
 MVM_DEBUGFS_READ_WRITE_FILE_OPS(quota_min, 32);
+MVM_DEBUGFS_WRITE_FILE_OPS(lqm_send_cmd, 64);
 
 void iwl_mvm_vif_dbgfs_register(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
 {
@@ -1488,6 +1572,7 @@ void iwl_mvm_vif_dbgfs_register(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
                                 S_IRUSR | S_IWUSR);
        MVM_DEBUGFS_ADD_FILE_VIF(quota_min, mvmvif->dbgfs_dir,
                                 S_IRUSR | S_IWUSR);
+       MVM_DEBUGFS_ADD_FILE_VIF(lqm_send_cmd, mvmvif->dbgfs_dir, S_IWUSR);
 
        if (vif->type == NL80211_IFTYPE_STATION && !vif->p2p &&
            mvmvif == mvm->bf_allowed_vif)
index a43b392..362a546 100644 (file)
@@ -65,6 +65,7 @@
  *****************************************************************************/
 #include <linux/vmalloc.h>
 #include <linux/ieee80211.h>
+#include <linux/netdevice.h>
 
 #include "mvm.h"
 #include "fw-dbg.h"
@@ -463,69 +464,11 @@ int iwl_mvm_coex_dump_mbox(struct iwl_bt_coex_profile_notif *notif, char *buf,
        return pos;
 }
 
-static
-int iwl_mvm_coex_dump_mbox_old(struct iwl_bt_coex_profile_notif_old *notif,
-                              char *buf, int pos, int bufsz)
-{
-       pos += scnprintf(buf+pos, bufsz-pos, "MBOX dw0:\n");
-
-       BT_MBOX_PRINT(0, LE_SLAVE_LAT, false);
-       BT_MBOX_PRINT(0, LE_PROF1, false);
-       BT_MBOX_PRINT(0, LE_PROF2, false);
-       BT_MBOX_PRINT(0, LE_PROF_OTHER, false);
-       BT_MBOX_PRINT(0, CHL_SEQ_N, false);
-       BT_MBOX_PRINT(0, INBAND_S, false);
-       BT_MBOX_PRINT(0, LE_MIN_RSSI, false);
-       BT_MBOX_PRINT(0, LE_SCAN, false);
-       BT_MBOX_PRINT(0, LE_ADV, false);
-       BT_MBOX_PRINT(0, LE_MAX_TX_POWER, false);
-       BT_MBOX_PRINT(0, OPEN_CON_1, true);
-
-       pos += scnprintf(buf+pos, bufsz-pos, "MBOX dw1:\n");
-
-       BT_MBOX_PRINT(1, BR_MAX_TX_POWER, false);
-       BT_MBOX_PRINT(1, IP_SR, false);
-       BT_MBOX_PRINT(1, LE_MSTR, false);
-       BT_MBOX_PRINT(1, AGGR_TRFC_LD, false);
-       BT_MBOX_PRINT(1, MSG_TYPE, false);
-       BT_MBOX_PRINT(1, SSN, true);
-
-       pos += scnprintf(buf+pos, bufsz-pos, "MBOX dw2:\n");
-
-       BT_MBOX_PRINT(2, SNIFF_ACT, false);
-       BT_MBOX_PRINT(2, PAG, false);
-       BT_MBOX_PRINT(2, INQUIRY, false);
-       BT_MBOX_PRINT(2, CONN, false);
-       BT_MBOX_PRINT(2, SNIFF_INTERVAL, false);
-       BT_MBOX_PRINT(2, DISC, false);
-       BT_MBOX_PRINT(2, SCO_TX_ACT, false);
-       BT_MBOX_PRINT(2, SCO_RX_ACT, false);
-       BT_MBOX_PRINT(2, ESCO_RE_TX, false);
-       BT_MBOX_PRINT(2, SCO_DURATION, true);
-
-       pos += scnprintf(buf+pos, bufsz-pos, "MBOX dw3:\n");
-
-       BT_MBOX_PRINT(3, SCO_STATE, false);
-       BT_MBOX_PRINT(3, SNIFF_STATE, false);
-       BT_MBOX_PRINT(3, A2DP_STATE, false);
-       BT_MBOX_PRINT(3, ACL_STATE, false);
-       BT_MBOX_PRINT(3, MSTR_STATE, false);
-       BT_MBOX_PRINT(3, OBX_STATE, false);
-       BT_MBOX_PRINT(3, OPEN_CON_2, false);
-       BT_MBOX_PRINT(3, TRAFFIC_LOAD, false);
-       BT_MBOX_PRINT(3, CHL_SEQN_LSB, false);
-       BT_MBOX_PRINT(3, INBAND_P, false);
-       BT_MBOX_PRINT(3, MSG_TYPE_2, false);
-       BT_MBOX_PRINT(3, SSN_2, false);
-       BT_MBOX_PRINT(3, UPDATE_REQUEST, true);
-
-       return pos;
-}
-
 static ssize_t iwl_dbgfs_bt_notif_read(struct file *file, char __user *user_buf,
                                       size_t count, loff_t *ppos)
 {
        struct iwl_mvm *mvm = file->private_data;
+       struct iwl_bt_coex_profile_notif *notif = &mvm->last_bt_notif;
        char *buf;
        int ret, pos = 0, bufsz = sizeof(char) * 1024;
 
@@ -535,52 +478,24 @@ static ssize_t iwl_dbgfs_bt_notif_read(struct file *file, char __user *user_buf,
 
        mutex_lock(&mvm->mutex);
 
-       if (!fw_has_api(&mvm->fw->ucode_capa,
-                       IWL_UCODE_TLV_API_BT_COEX_SPLIT)) {
-               struct iwl_bt_coex_profile_notif_old *notif =
-                       &mvm->last_bt_notif_old;
-
-               pos += iwl_mvm_coex_dump_mbox_old(notif, buf, pos, bufsz);
-
-               pos += scnprintf(buf+pos, bufsz-pos, "bt_ci_compliance = %d\n",
-                                notif->bt_ci_compliance);
-               pos += scnprintf(buf+pos, bufsz-pos, "primary_ch_lut = %d\n",
-                                le32_to_cpu(notif->primary_ch_lut));
-               pos += scnprintf(buf+pos, bufsz-pos, "secondary_ch_lut = %d\n",
-                                le32_to_cpu(notif->secondary_ch_lut));
-               pos += scnprintf(buf+pos,
-                                bufsz-pos, "bt_activity_grading = %d\n",
-                                le32_to_cpu(notif->bt_activity_grading));
-               pos += scnprintf(buf+pos, bufsz-pos,
-                                "antenna isolation = %d CORUN LUT index = %d\n",
-                                mvm->last_ant_isol, mvm->last_corun_lut);
-               pos += scnprintf(buf + pos, bufsz - pos, "bt_rrc = %d\n",
-                                notif->rrc_enabled);
-               pos += scnprintf(buf + pos, bufsz - pos, "bt_ttc = %d\n",
-                                notif->ttc_enabled);
-       } else {
-               struct iwl_bt_coex_profile_notif *notif =
-                       &mvm->last_bt_notif;
-
-               pos += iwl_mvm_coex_dump_mbox(notif, buf, pos, bufsz);
-
-               pos += scnprintf(buf+pos, bufsz-pos, "bt_ci_compliance = %d\n",
-                                notif->bt_ci_compliance);
-               pos += scnprintf(buf+pos, bufsz-pos, "primary_ch_lut = %d\n",
-                                le32_to_cpu(notif->primary_ch_lut));
-               pos += scnprintf(buf+pos, bufsz-pos, "secondary_ch_lut = %d\n",
-                                le32_to_cpu(notif->secondary_ch_lut));
-               pos += scnprintf(buf+pos,
-                                bufsz-pos, "bt_activity_grading = %d\n",
-                                le32_to_cpu(notif->bt_activity_grading));
-               pos += scnprintf(buf+pos, bufsz-pos,
-                                "antenna isolation = %d CORUN LUT index = %d\n",
-                                mvm->last_ant_isol, mvm->last_corun_lut);
-               pos += scnprintf(buf + pos, bufsz - pos, "bt_rrc = %d\n",
-                                (notif->ttc_rrc_status >> 4) & 0xF);
-               pos += scnprintf(buf + pos, bufsz - pos, "bt_ttc = %d\n",
-                                notif->ttc_rrc_status & 0xF);
-       }
+       pos += iwl_mvm_coex_dump_mbox(notif, buf, pos, bufsz);
+
+       pos += scnprintf(buf + pos, bufsz - pos, "bt_ci_compliance = %d\n",
+                        notif->bt_ci_compliance);
+       pos += scnprintf(buf + pos, bufsz - pos, "primary_ch_lut = %d\n",
+                        le32_to_cpu(notif->primary_ch_lut));
+       pos += scnprintf(buf + pos, bufsz - pos, "secondary_ch_lut = %d\n",
+                        le32_to_cpu(notif->secondary_ch_lut));
+       pos += scnprintf(buf + pos,
+                        bufsz - pos, "bt_activity_grading = %d\n",
+                        le32_to_cpu(notif->bt_activity_grading));
+       pos += scnprintf(buf + pos, bufsz - pos,
+                        "antenna isolation = %d CORUN LUT index = %d\n",
+                        mvm->last_ant_isol, mvm->last_corun_lut);
+       pos += scnprintf(buf + pos, bufsz - pos, "bt_rrc = %d\n",
+                        (notif->ttc_rrc_status >> 4) & 0xF);
+       pos += scnprintf(buf + pos, bufsz - pos, "bt_ttc = %d\n",
+                        notif->ttc_rrc_status & 0xF);
 
        pos += scnprintf(buf + pos, bufsz - pos, "sync_sco = %d\n",
                         IWL_MVM_BT_COEX_SYNC2SCO);
@@ -602,44 +517,20 @@ static ssize_t iwl_dbgfs_bt_cmd_read(struct file *file, char __user *user_buf,
                                     size_t count, loff_t *ppos)
 {
        struct iwl_mvm *mvm = file->private_data;
+       struct iwl_bt_coex_ci_cmd *cmd = &mvm->last_bt_ci_cmd;
        char buf[256];
        int bufsz = sizeof(buf);
        int pos = 0;
 
        mutex_lock(&mvm->mutex);
 
-       if (!fw_has_api(&mvm->fw->ucode_capa,
-                       IWL_UCODE_TLV_API_BT_COEX_SPLIT)) {
-               struct iwl_bt_coex_ci_cmd_old *cmd = &mvm->last_bt_ci_cmd_old;
-
-               pos += scnprintf(buf+pos, bufsz-pos,
-                                "Channel inhibition CMD\n");
-               pos += scnprintf(buf+pos, bufsz-pos,
-                              "\tPrimary Channel Bitmap 0x%016llx\n",
-                              le64_to_cpu(cmd->bt_primary_ci));
-               pos += scnprintf(buf+pos, bufsz-pos,
-                              "\tSecondary Channel Bitmap 0x%016llx\n",
-                              le64_to_cpu(cmd->bt_secondary_ci));
-
-               pos += scnprintf(buf+pos, bufsz-pos,
-                                "BT Configuration CMD - 0=default, 1=never, 2=always\n");
-               pos += scnprintf(buf+pos, bufsz-pos, "\tACK Kill msk idx %d\n",
-                                mvm->bt_ack_kill_msk[0]);
-               pos += scnprintf(buf+pos, bufsz-pos, "\tCTS Kill msk idx %d\n",
-                                mvm->bt_cts_kill_msk[0]);
-
-       } else {
-               struct iwl_bt_coex_ci_cmd *cmd = &mvm->last_bt_ci_cmd;
-
-               pos += scnprintf(buf+pos, bufsz-pos,
-                                "Channel inhibition CMD\n");
-               pos += scnprintf(buf+pos, bufsz-pos,
-                              "\tPrimary Channel Bitmap 0x%016llx\n",
-                              le64_to_cpu(cmd->bt_primary_ci));
-               pos += scnprintf(buf+pos, bufsz-pos,
-                              "\tSecondary Channel Bitmap 0x%016llx\n",
-                              le64_to_cpu(cmd->bt_secondary_ci));
-       }
+       pos += scnprintf(buf + pos, bufsz - pos, "Channel inhibition CMD\n");
+       pos += scnprintf(buf + pos, bufsz - pos,
+                        "\tPrimary Channel Bitmap 0x%016llx\n",
+                        le64_to_cpu(cmd->bt_primary_ci));
+       pos += scnprintf(buf + pos, bufsz - pos,
+                        "\tSecondary Channel Bitmap 0x%016llx\n",
+                        le64_to_cpu(cmd->bt_secondary_ci));
 
        mutex_unlock(&mvm->mutex);
 
@@ -990,8 +881,10 @@ static ssize_t iwl_dbgfs_indirection_tbl_write(struct iwl_mvm *mvm,
        struct iwl_rss_config_cmd cmd = {
                .flags = cpu_to_le32(IWL_RSS_ENABLE),
                .hash_mask = IWL_RSS_HASH_TYPE_IPV4_TCP |
+                            IWL_RSS_HASH_TYPE_IPV4_UDP |
                             IWL_RSS_HASH_TYPE_IPV4_PAYLOAD |
                             IWL_RSS_HASH_TYPE_IPV6_TCP |
+                            IWL_RSS_HASH_TYPE_IPV6_UDP |
                             IWL_RSS_HASH_TYPE_IPV6_PAYLOAD,
        };
        int ret, i, num_repeats, nbytes = count / 2;
@@ -1015,7 +908,7 @@ static ssize_t iwl_dbgfs_indirection_tbl_write(struct iwl_mvm *mvm,
        memcpy(&cmd.indirection_table[i * nbytes], cmd.indirection_table,
               ARRAY_SIZE(cmd.indirection_table) % nbytes);
 
-       memcpy(cmd.secret_key, mvm->secret_key, sizeof(cmd.secret_key));
+       netdev_rss_key_fill(cmd.secret_key, sizeof(cmd.secret_key));
 
        mutex_lock(&mvm->mutex);
        ret = iwl_mvm_send_cmd_pdu(mvm, RSS_CONFIG_CMD, 0, sizeof(cmd), &cmd);
index 7a16e55..4c086d0 100644 (file)
@@ -268,12 +268,25 @@ enum iwl_rx_mpdu_amsdu_info {
        IWL_RX_MPDU_AMSDU_LAST_SUBFRAME         = 0x80,
 };
 
+enum iwl_rx_l3_proto_values {
+       IWL_RX_L3_TYPE_NONE,
+       IWL_RX_L3_TYPE_IPV4,
+       IWL_RX_L3_TYPE_IPV4_FRAG,
+       IWL_RX_L3_TYPE_IPV6_FRAG,
+       IWL_RX_L3_TYPE_IPV6,
+       IWL_RX_L3_TYPE_IPV6_IN_IPV4,
+       IWL_RX_L3_TYPE_ARP,
+       IWL_RX_L3_TYPE_EAPOL,
+};
+
+#define IWL_RX_L3_PROTO_POS 4
+
 enum iwl_rx_l3l4_flags {
        IWL_RX_L3L4_IP_HDR_CSUM_OK              = BIT(0),
        IWL_RX_L3L4_TCP_UDP_CSUM_OK             = BIT(1),
        IWL_RX_L3L4_TCP_FIN_SYN_RST_PSH         = BIT(2),
        IWL_RX_L3L4_TCP_ACK                     = BIT(3),
-       IWL_RX_L3L4_L3_PROTO_MASK               = 0xf << 4,
+       IWL_RX_L3L4_L3_PROTO_MASK               = 0xf << IWL_RX_L3_PROTO_POS,
        IWL_RX_L3L4_L4_PROTO_MASK               = 0xf << 8,
        IWL_RX_L3L4_RSS_HASH_MASK               = 0xf << 12,
 };
index ba3f0bb..dadcccd 100644 (file)
@@ -6,6 +6,7 @@
  * GPL LICENSE SUMMARY
  *
  * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
+ * Copyright(c) 2016 Intel Deutschland GmbH
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of version 2 of the GNU General Public License as
@@ -193,11 +194,41 @@ enum iwl_tx_pm_timeouts {
 #define IWL_BAR_DFAULT_RETRY_LIMIT             60
 #define IWL_LOW_RETRY_LIMIT                    7
 
+/**
+ * enum iwl_tx_offload_assist_flags_pos -  set %iwl_tx_cmd offload_assist values
+ * @TX_CMD_OFFLD_IP_HDR_OFFSET: offset to start of IP header (in words)
+ *     from mac header end. For normal case it is 4 words for SNAP.
+ *     note: tx_cmd, mac header and pad are not counted in the offset.
+ *     This is used to help the offload in case there is tunneling such as
+ *     IPv6 in IPv4, in such case the ip header offset should point to the
+ *     inner ip header and IPv4 checksum of the external header should be
+ *     calculated by driver.
+ * @TX_CMD_OFFLD_L4_EN: enable TCP/UDP checksum
+ * @TX_CMD_OFFLD_L3_EN: enable IP header checksum
+ * @TX_CMD_OFFLD_MH_SIZE: size of the mac header in words. Includes the IV
+ *     field. Doesn't include the pad.
+ * @TX_CMD_OFFLD_PAD: mark 2-byte pad was inserted after the mac header for
+ *     alignment
+ * @TX_CMD_OFFLD_AMSDU: mark TX command is A-MSDU
+ */
+enum iwl_tx_offload_assist_flags_pos {
+       TX_CMD_OFFLD_IP_HDR =           0,
+       TX_CMD_OFFLD_L4_EN =            6,
+       TX_CMD_OFFLD_L3_EN =            7,
+       TX_CMD_OFFLD_MH_SIZE =          8,
+       TX_CMD_OFFLD_PAD =              13,
+       TX_CMD_OFFLD_AMSDU =            14,
+};
+
+#define IWL_TX_CMD_OFFLD_MH_MASK       0x1f
+#define IWL_TX_CMD_OFFLD_IP_HDR_MASK   0x3f
+
 /* TODO: complete documentation for try_cnt and btkill_cnt */
 /**
  * struct iwl_tx_cmd - TX command struct to FW
  * ( TX_CMD = 0x1c )
  * @len: in bytes of the payload, see below for details
+ * @offload_assist: TX offload configuration
  * @tx_flags: combination of TX_CMD_FLG_*
  * @rate_n_flags: rate for *all* Tx attempts, if TX_CMD_FLG_STA_RATE_MSK is
  *     cleared. Combination of RATE_MCS_*
@@ -231,7 +262,7 @@ enum iwl_tx_pm_timeouts {
  */
 struct iwl_tx_cmd {
        __le16 len;
-       __le16 next_frame_len;
+       __le16 offload_assist;
        __le32 tx_flags;
        struct {
                u8 try_cnt;
@@ -255,7 +286,7 @@ struct iwl_tx_cmd {
        __le16 reserved4;
        u8 payload[0];
        struct ieee80211_hdr hdr[0];
-} __packed; /* TX_CMD_API_S_VER_3 */
+} __packed; /* TX_CMD_API_S_VER_6 */
 
 /*
  * TX response related data
index 4a0fc47..60eed84 100644 (file)
 #include "fw-api-stats.h"
 #include "fw-api-tof.h"
 
-/* Tx queue numbers */
+/* Tx queue numbers for non-DQA mode */
 enum {
        IWL_MVM_OFFCHANNEL_QUEUE = 8,
        IWL_MVM_CMD_QUEUE = 9,
 };
 
+/*
+ * DQA queue numbers
+ *
+ * @IWL_MVM_DQA_CMD_QUEUE: a queue reserved for sending HCMDs to the FW
+ * @IWL_MVM_DQA_GCAST_QUEUE: a queue reserved for P2P GO/SoftAP GCAST frames
+ * @IWL_MVM_DQA_BSS_CLIENT_QUEUE: a queue reserved for BSS activity, to ensure
+ *     that we are never left without the possibility to connect to an AP.
+ * @IWL_MVM_DQA_MIN_MGMT_QUEUE: first TXQ in pool for MGMT and non-QOS frames.
+ *     Each MGMT queue is mapped to a single STA
+ *     MGMT frames are frames that return true on ieee80211_is_mgmt()
+ * @IWL_MVM_DQA_MAX_MGMT_QUEUE: last TXQ in pool for MGMT frames
+ * @IWL_MVM_DQA_MIN_DATA_QUEUE: first TXQ in pool for DATA frames.
+ *     DATA frames are intended for !ieee80211_is_mgmt() frames, but if
+ *     the MGMT TXQ pool is exhausted, mgmt frames can be sent on DATA queues
+ *     as well
+ * @IWL_MVM_DQA_MAX_DATA_QUEUE: last TXQ in pool for DATA frames
+ */
+enum iwl_mvm_dqa_txq {
+       IWL_MVM_DQA_CMD_QUEUE = 0,
+       IWL_MVM_DQA_GCAST_QUEUE = 3,
+       IWL_MVM_DQA_BSS_CLIENT_QUEUE = 4,
+       IWL_MVM_DQA_MIN_MGMT_QUEUE = 5,
+       IWL_MVM_DQA_MAX_MGMT_QUEUE = 8,
+       IWL_MVM_DQA_MIN_DATA_QUEUE = 10,
+       IWL_MVM_DQA_MAX_DATA_QUEUE = 31,
+};
+
 enum iwl_mvm_tx_fifo {
        IWL_MVM_TX_FIFO_BK = 0,
        IWL_MVM_TX_FIFO_BE,
@@ -279,6 +306,11 @@ enum {
 /* Please keep this enum *SORTED* by hex value.
  * Needed for binary search, otherwise a warning will be triggered.
  */
+enum iwl_mac_conf_subcmd_ids {
+       LINK_QUALITY_MEASUREMENT_CMD = 0x1,
+       LINK_QUALITY_MEASUREMENT_COMPLETE_NOTIF = 0xFE,
+};
+
 enum iwl_phy_ops_subcmd_ids {
        CMD_DTS_MEASUREMENT_TRIGGER_WIDE = 0x0,
        CTDP_CONFIG_CMD = 0x03,
@@ -287,6 +319,10 @@ enum iwl_phy_ops_subcmd_ids {
        DTS_MEASUREMENT_NOTIF_WIDE = 0xFF,
 };
 
+enum iwl_system_subcmd_ids {
+       SHARED_MEM_CFG_CMD = 0x0,
+};
+
 enum iwl_data_path_subcmd_ids {
        UPDATE_MU_GROUPS_CMD = 0x1,
        TRIGGER_RX_QUEUES_NOTIF_CMD = 0x2,
@@ -302,6 +338,8 @@ enum iwl_prot_offload_subcmd_ids {
 enum {
        LEGACY_GROUP = 0x0,
        LONG_GROUP = 0x1,
+       SYSTEM_GROUP = 0x2,
+       MAC_CONF_GROUP = 0x3,
        PHY_OPS_GROUP = 0x4,
        DATA_PATH_GROUP = 0x5,
        PROT_OFFLOAD_GROUP = 0xb,
@@ -1923,6 +1961,7 @@ struct iwl_tdls_config_res {
 
 #define TX_FIFO_MAX_NUM                8
 #define RX_FIFO_MAX_NUM                2
+#define TX_FIFO_INTERNAL_MAX_NUM       6
 
 /**
  * Shared memory configuration information from the FW
@@ -1940,6 +1979,12 @@ struct iwl_tdls_config_res {
  * @page_buff_addr: used by UMAC and performance debug (page miss analysis),
  *     when paging is not supported this should be 0
  * @page_buff_size: size of %page_buff_addr
+ * @rxfifo_addr: Start address of rxFifo
+ * @internal_txfifo_addr: start address of internalFifo
+ * @internal_txfifo_size: internal fifos' size
+ *
+ * NOTE: on firmware that don't have IWL_UCODE_TLV_CAPA_EXTEND_SHARED_MEM_CFG
+ *      set, the last 3 members don't exist.
  */
 struct iwl_shared_mem_cfg {
        __le32 shared_mem_addr;
@@ -1951,7 +1996,10 @@ struct iwl_shared_mem_cfg {
        __le32 rxfifo_size[RX_FIFO_MAX_NUM];
        __le32 page_buff_addr;
        __le32 page_buff_size;
-} __packed; /* SHARED_MEM_ALLOC_API_S_VER_1 */
+       __le32 rxfifo_addr;
+       __le32 internal_txfifo_addr;
+       __le32 internal_txfifo_size[TX_FIFO_INTERNAL_MAX_NUM];
+} __packed; /* SHARED_MEM_ALLOC_API_S_VER_2 */
 
 /**
  * VHT MU-MIMO group configuration
@@ -2002,4 +2050,60 @@ struct iwl_stored_beacon_notif {
        u8 data[MAX_STORED_BEACON_SIZE];
 } __packed; /* WOWLAN_STROED_BEACON_INFO_S_VER_1 */
 
+#define LQM_NUMBER_OF_STATIONS_IN_REPORT 16
+
+enum iwl_lqm_cmd_operatrions {
+       LQM_CMD_OPERATION_START_MEASUREMENT = 0x01,
+       LQM_CMD_OPERATION_STOP_MEASUREMENT = 0x02,
+};
+
+enum iwl_lqm_status {
+       LQM_STATUS_SUCCESS = 0,
+       LQM_STATUS_TIMEOUT = 1,
+       LQM_STATUS_ABORT = 2,
+};
+
+/**
+ * Link Quality Measurement command
+ * @cmd_operatrion: command operation to be performed (start or stop)
+ *     as defined above.
+ * @mac_id: MAC ID the measurement applies to.
+ * @measurement_time: time of the total measurement to be performed, in uSec.
+ * @timeout: maximum time allowed until a response is sent, in uSec.
+ */
+struct iwl_link_qual_msrmnt_cmd {
+       __le32 cmd_operation;
+       __le32 mac_id;
+       __le32 measurement_time;
+       __le32 timeout;
+} __packed /* LQM_CMD_API_S_VER_1 */;
+
+/**
+ * Link Quality Measurement notification
+ *
+ * @frequent_stations_air_time: an array containing the total air time
+ *     (in uSec) used by the most frequently transmitting stations.
+ * @number_of_stations: the number of uniqe stations included in the array
+ *     (a number between 0 to 16)
+ * @total_air_time_other_stations: the total air time (uSec) used by all the
+ *     stations which are not included in the above report.
+ * @time_in_measurement_window: the total time in uSec in which a measurement
+ *     took place.
+ * @tx_frame_dropped: the number of TX frames dropped due to retry limit during
+ *     measurement
+ * @mac_id: MAC ID the measurement applies to.
+ * @status: return status. may be one of the LQM_STATUS_* defined above.
+ * @reserved: reserved.
+ */
+struct iwl_link_qual_msrmnt_notif {
+       __le32 frequent_stations_air_time[LQM_NUMBER_OF_STATIONS_IN_REPORT];
+       __le32 number_of_stations;
+       __le32 total_air_time_other_stations;
+       __le32 time_in_measurement_window;
+       __le32 tx_frame_dropped;
+       __le32 mac_id;
+       __le32 status;
+       __le32 reserved[3];
+} __packed; /* LQM_MEASUREMENT_COMPLETE_NTF_API_S_VER1 */
+
 #endif /* __fw_api_h__ */
index 4856eac..cbb5947 100644 (file)
@@ -7,7 +7,7 @@
  *
  * Copyright(c) 2008 - 2014 Intel Corporation. All rights reserved.
  * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
- * Copyright(c) 2015        Intel Deutschland GmbH
+ * Copyright(c) 2015 - 2016 Intel Deutschland GmbH
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of version 2 of the GNU General Public License as
@@ -32,7 +32,7 @@
  *
  * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved.
  * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
- * Copyright(c) 2015        Intel Deutschland GmbH
+ * Copyright(c) 2015 - 2016 Intel Deutschland GmbH
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
@@ -265,6 +265,65 @@ static void iwl_mvm_dump_fifos(struct iwl_mvm *mvm,
                *dump_data = iwl_fw_error_next_data(*dump_data);
        }
 
+       if (fw_has_capa(&mvm->fw->ucode_capa,
+                       IWL_UCODE_TLV_CAPA_EXTEND_SHARED_MEM_CFG)) {
+               /* Pull UMAC internal TXF data from all TXFs */
+               for (i = 0;
+                    i < ARRAY_SIZE(mvm->shared_mem_cfg.internal_txfifo_size);
+                    i++) {
+                       /* Mark the number of TXF we're pulling now */
+                       iwl_trans_write_prph(mvm->trans, TXF_CPU2_NUM, i);
+
+                       fifo_hdr = (void *)(*dump_data)->data;
+                       fifo_data = (void *)fifo_hdr->data;
+                       fifo_len = mvm->shared_mem_cfg.internal_txfifo_size[i];
+
+                       /* No need to try to read the data if the length is 0 */
+                       if (fifo_len == 0)
+                               continue;
+
+                       /* Add a TLV for the internal FIFOs */
+                       (*dump_data)->type =
+                               cpu_to_le32(IWL_FW_ERROR_DUMP_INTERNAL_TXF);
+                       (*dump_data)->len =
+                               cpu_to_le32(fifo_len + sizeof(*fifo_hdr));
+
+                       fifo_hdr->fifo_num = cpu_to_le32(i);
+                       fifo_hdr->available_bytes =
+                               cpu_to_le32(iwl_trans_read_prph(mvm->trans,
+                                                               TXF_CPU2_FIFO_ITEM_CNT));
+                       fifo_hdr->wr_ptr =
+                               cpu_to_le32(iwl_trans_read_prph(mvm->trans,
+                                                               TXF_CPU2_WR_PTR));
+                       fifo_hdr->rd_ptr =
+                               cpu_to_le32(iwl_trans_read_prph(mvm->trans,
+                                                               TXF_CPU2_RD_PTR));
+                       fifo_hdr->fence_ptr =
+                               cpu_to_le32(iwl_trans_read_prph(mvm->trans,
+                                                               TXF_CPU2_FENCE_PTR));
+                       fifo_hdr->fence_mode =
+                               cpu_to_le32(iwl_trans_read_prph(mvm->trans,
+                                                               TXF_CPU2_LOCK_FENCE));
+
+                       /* Set TXF_CPU2_READ_MODIFY_ADDR to TXF_CPU2_WR_PTR */
+                       iwl_trans_write_prph(mvm->trans,
+                                            TXF_CPU2_READ_MODIFY_ADDR,
+                                            TXF_CPU2_WR_PTR);
+
+                       /* Dummy-read to advance the read pointer to head */
+                       iwl_trans_read_prph(mvm->trans,
+                                           TXF_CPU2_READ_MODIFY_DATA);
+
+                       /* Read FIFO */
+                       fifo_len /= sizeof(u32); /* Size in DWORDS */
+                       for (j = 0; j < fifo_len; j++)
+                               fifo_data[j] =
+                                       iwl_trans_read_prph(mvm->trans,
+                                                           TXF_CPU2_READ_MODIFY_DATA);
+                       *dump_data = iwl_fw_error_next_data(*dump_data);
+               }
+       }
+
        iwl_trans_release_nic_access(mvm->trans, &flags);
 }
 
@@ -429,9 +488,11 @@ void iwl_mvm_fw_error_dump(struct iwl_mvm *mvm)
        struct iwl_fw_error_dump_trigger_desc *dump_trig;
        struct iwl_mvm_dump_ptrs *fw_error_dump;
        u32 sram_len, sram_ofs;
+       struct iwl_fw_dbg_mem_seg_tlv * const *fw_dbg_mem =
+               mvm->fw->dbg_mem_tlv;
        u32 file_len, fifo_data_len = 0, prph_len = 0, radio_len = 0;
-       u32 smem_len = mvm->cfg->smem_len;
-       u32 sram2_len = mvm->cfg->dccm2_len;
+       u32 smem_len = mvm->fw->dbg_dynamic_mem ? 0 : mvm->cfg->smem_len;
+       u32 sram2_len = mvm->fw->dbg_dynamic_mem ? 0 : mvm->cfg->dccm2_len;
        bool monitor_dump_only = false;
        int i;
 
@@ -494,6 +555,22 @@ void iwl_mvm_fw_error_dump(struct iwl_mvm *mvm)
                                         sizeof(struct iwl_fw_error_dump_fifo);
                }
 
+               if (fw_has_capa(&mvm->fw->ucode_capa,
+                               IWL_UCODE_TLV_CAPA_EXTEND_SHARED_MEM_CFG)) {
+                       for (i = 0;
+                            i < ARRAY_SIZE(mem_cfg->internal_txfifo_size);
+                            i++) {
+                               if (!mem_cfg->internal_txfifo_size[i])
+                                       continue;
+
+                               /* Add header info */
+                               fifo_data_len +=
+                                       mem_cfg->internal_txfifo_size[i] +
+                                       sizeof(*dump_data) +
+                                       sizeof(struct iwl_fw_error_dump_fifo);
+                       }
+               }
+
                /* Make room for PRPH registers */
                for (i = 0; i < ARRAY_SIZE(iwl_prph_dump_addr); i++) {
                        /* The range includes both boundaries */
@@ -511,7 +588,6 @@ void iwl_mvm_fw_error_dump(struct iwl_mvm *mvm)
 
        file_len = sizeof(*dump_file) +
                   sizeof(*dump_data) * 2 +
-                  sram_len + sizeof(*dump_mem) +
                   fifo_data_len +
                   prph_len +
                   radio_len +
@@ -525,6 +601,13 @@ void iwl_mvm_fw_error_dump(struct iwl_mvm *mvm)
        if (sram2_len)
                file_len += sizeof(*dump_data) + sizeof(*dump_mem) + sram2_len;
 
+       /* Make room for MEM segments */
+       for (i = 0; i < ARRAY_SIZE(mvm->fw->dbg_mem_tlv); i++) {
+               if (fw_dbg_mem[i])
+                       file_len += sizeof(*dump_data) + sizeof(*dump_mem) +
+                               le32_to_cpu(fw_dbg_mem[i]->len);
+       }
+
        /* Make room for fw's virtual image pages, if it exists */
        if (mvm->fw->img[mvm->cur_ucode].paging_mem_size)
                file_len += mvm->num_of_paging_blk *
@@ -550,6 +633,9 @@ void iwl_mvm_fw_error_dump(struct iwl_mvm *mvm)
                file_len += sizeof(*dump_data) + sizeof(*dump_trig) +
                            mvm->fw_dump_desc->len;
 
+       if (!mvm->fw->dbg_dynamic_mem)
+               file_len += sram_len + sizeof(*dump_mem);
+
        dump_file = vzalloc(file_len);
        if (!dump_file) {
                kfree(fw_error_dump);
@@ -599,16 +685,36 @@ void iwl_mvm_fw_error_dump(struct iwl_mvm *mvm)
        if (monitor_dump_only)
                goto dump_trans_data;
 
-       dump_data->type = cpu_to_le32(IWL_FW_ERROR_DUMP_MEM);
-       dump_data->len = cpu_to_le32(sram_len + sizeof(*dump_mem));
-       dump_mem = (void *)dump_data->data;
-       dump_mem->type = cpu_to_le32(IWL_FW_ERROR_DUMP_MEM_SRAM);
-       dump_mem->offset = cpu_to_le32(sram_ofs);
-       iwl_trans_read_mem_bytes(mvm->trans, sram_ofs, dump_mem->data,
-                                sram_len);
+       if (!mvm->fw->dbg_dynamic_mem) {
+               dump_data->type = cpu_to_le32(IWL_FW_ERROR_DUMP_MEM);
+               dump_data->len = cpu_to_le32(sram_len + sizeof(*dump_mem));
+               dump_mem = (void *)dump_data->data;
+               dump_mem->type = cpu_to_le32(IWL_FW_ERROR_DUMP_MEM_SRAM);
+               dump_mem->offset = cpu_to_le32(sram_ofs);
+               iwl_trans_read_mem_bytes(mvm->trans, sram_ofs, dump_mem->data,
+                                        sram_len);
+               dump_data = iwl_fw_error_next_data(dump_data);
+       }
+
+       for (i = 0; i < ARRAY_SIZE(mvm->fw->dbg_mem_tlv); i++) {
+               if (fw_dbg_mem[i]) {
+                       u32 len = le32_to_cpu(fw_dbg_mem[i]->len);
+                       u32 ofs = le32_to_cpu(fw_dbg_mem[i]->ofs);
+
+                       dump_data->type = cpu_to_le32(IWL_FW_ERROR_DUMP_MEM);
+                       dump_data->len = cpu_to_le32(len +
+                                       sizeof(*dump_mem));
+                       dump_mem = (void *)dump_data->data;
+                       dump_mem->type = fw_dbg_mem[i]->data_type;
+                       dump_mem->offset = cpu_to_le32(ofs);
+                       iwl_trans_read_mem_bytes(mvm->trans, ofs,
+                                                dump_mem->data,
+                                                len);
+                       dump_data = iwl_fw_error_next_data(dump_data);
+               }
+       }
 
        if (smem_len) {
-               dump_data = iwl_fw_error_next_data(dump_data);
                dump_data->type = cpu_to_le32(IWL_FW_ERROR_DUMP_MEM);
                dump_data->len = cpu_to_le32(smem_len + sizeof(*dump_mem));
                dump_mem = (void *)dump_data->data;
@@ -616,10 +722,10 @@ void iwl_mvm_fw_error_dump(struct iwl_mvm *mvm)
                dump_mem->offset = cpu_to_le32(mvm->cfg->smem_offset);
                iwl_trans_read_mem_bytes(mvm->trans, mvm->cfg->smem_offset,
                                         dump_mem->data, smem_len);
+               dump_data = iwl_fw_error_next_data(dump_data);
        }
 
        if (sram2_len) {
-               dump_data = iwl_fw_error_next_data(dump_data);
                dump_data->type = cpu_to_le32(IWL_FW_ERROR_DUMP_MEM);
                dump_data->len = cpu_to_le32(sram2_len + sizeof(*dump_mem));
                dump_mem = (void *)dump_data->data;
@@ -627,11 +733,11 @@ void iwl_mvm_fw_error_dump(struct iwl_mvm *mvm)
                dump_mem->offset = cpu_to_le32(mvm->cfg->dccm2_offset);
                iwl_trans_read_mem_bytes(mvm->trans, mvm->cfg->dccm2_offset,
                                         dump_mem->data, sram2_len);
+               dump_data = iwl_fw_error_next_data(dump_data);
        }
 
        if (mvm->cfg->device_family == IWL_DEVICE_FAMILY_8000 &&
            CSR_HW_REV_STEP(mvm->trans->hw_rev) == SILICON_B_STEP) {
-               dump_data = iwl_fw_error_next_data(dump_data);
                dump_data->type = cpu_to_le32(IWL_FW_ERROR_DUMP_MEM);
                dump_data->len = cpu_to_le32(IWL8260_ICCM_LEN +
                                             sizeof(*dump_mem));
@@ -640,6 +746,7 @@ void iwl_mvm_fw_error_dump(struct iwl_mvm *mvm)
                dump_mem->offset = cpu_to_le32(IWL8260_ICCM_OFFSET);
                iwl_trans_read_mem_bytes(mvm->trans, IWL8260_ICCM_OFFSET,
                                         dump_mem->data, IWL8260_ICCM_LEN);
+               dump_data = iwl_fw_error_next_data(dump_data);
        }
 
        /* Dump fw's virtual image */
@@ -649,7 +756,6 @@ void iwl_mvm_fw_error_dump(struct iwl_mvm *mvm)
                        struct page *pages =
                                mvm->fw_paging_db[i].fw_paging_block;
 
-                       dump_data = iwl_fw_error_next_data(dump_data);
                        dump_data->type = cpu_to_le32(IWL_FW_ERROR_DUMP_PAGING);
                        dump_data->len = cpu_to_le32(sizeof(*paging) +
                                                     PAGING_BLOCK_SIZE);
@@ -657,10 +763,10 @@ void iwl_mvm_fw_error_dump(struct iwl_mvm *mvm)
                        paging->index = cpu_to_le32(i);
                        memcpy(paging->data, page_address(pages),
                               PAGING_BLOCK_SIZE);
+                       dump_data = iwl_fw_error_next_data(dump_data);
                }
        }
 
-       dump_data = iwl_fw_error_next_data(dump_data);
        if (prph_len)
                iwl_dump_prph(mvm->trans, &dump_data);
 
index 594cd0d..6ad5c60 100644 (file)
@@ -64,6 +64,7 @@
  *
  *****************************************************************************/
 #include <net/mac80211.h>
+#include <linux/netdevice.h>
 
 #include "iwl-trans.h"
 #include "iwl-op-mode.h"
@@ -114,14 +115,18 @@ static int iwl_send_rss_cfg_cmd(struct iwl_mvm *mvm)
        struct iwl_rss_config_cmd cmd = {
                .flags = cpu_to_le32(IWL_RSS_ENABLE),
                .hash_mask = IWL_RSS_HASH_TYPE_IPV4_TCP |
+                            IWL_RSS_HASH_TYPE_IPV4_UDP |
                             IWL_RSS_HASH_TYPE_IPV4_PAYLOAD |
                             IWL_RSS_HASH_TYPE_IPV6_TCP |
+                            IWL_RSS_HASH_TYPE_IPV6_UDP |
                             IWL_RSS_HASH_TYPE_IPV6_PAYLOAD,
        };
 
+       /* Do not direct RSS traffic to Q 0 which is our fallback queue */
        for (i = 0; i < ARRAY_SIZE(cmd.indirection_table); i++)
-               cmd.indirection_table[i] = i % mvm->trans->num_rx_queues;
-       memcpy(cmd.secret_key, mvm->secret_key, sizeof(cmd.secret_key));
+               cmd.indirection_table[i] =
+                       1 + (i % (mvm->trans->num_rx_queues - 1));
+       netdev_rss_key_fill(cmd.secret_key, sizeof(cmd.secret_key));
 
        return iwl_mvm_send_cmd_pdu(mvm, RSS_CONFIG_CMD, 0, sizeof(cmd), &cmd);
 }
@@ -174,8 +179,12 @@ static int iwl_fill_paging_mem(struct iwl_mvm *mvm, const struct fw_img *image)
                }
        }
 
-       if (sec_idx >= IWL_UCODE_SECTION_MAX) {
-               IWL_ERR(mvm, "driver didn't find paging image\n");
+       /*
+        * If paging is enabled there should be at least 2 more sections left
+        * (one for CSS and one for Paging data)
+        */
+       if (sec_idx >= ARRAY_SIZE(image->sec) - 1) {
+               IWL_ERR(mvm, "Paging: Missing CSS and/or paging sections\n");
                iwl_free_fw_paging(mvm);
                return -EINVAL;
        }
@@ -410,7 +419,9 @@ static int iwl_trans_get_paging_item(struct iwl_mvm *mvm)
                goto exit;
        }
 
-       mvm->trans->paging_download_buf = kzalloc(MAX_PAGING_IMAGE_SIZE,
+       /* Add an extra page for headers */
+       mvm->trans->paging_download_buf = kzalloc(PAGING_BLOCK_SIZE +
+                                                 FW_PAGING_SIZE,
                                                  GFP_KERNEL);
        if (!mvm->trans->paging_download_buf) {
                ret = -ENOMEM;
@@ -641,7 +652,10 @@ static int iwl_mvm_load_ucode_wait_alive(struct iwl_mvm *mvm,
         */
 
        memset(&mvm->queue_info, 0, sizeof(mvm->queue_info));
-       mvm->queue_info[IWL_MVM_CMD_QUEUE].hw_queue_refcount = 1;
+       if (iwl_mvm_is_dqa_supported(mvm))
+               mvm->queue_info[IWL_MVM_DQA_CMD_QUEUE].hw_queue_refcount = 1;
+       else
+               mvm->queue_info[IWL_MVM_CMD_QUEUE].hw_queue_refcount = 1;
 
        for (i = 0; i < IEEE80211_MAX_QUEUES; i++)
                atomic_set(&mvm->mac80211_queue_stop_count[i], 0);
@@ -788,17 +802,22 @@ out:
 static void iwl_mvm_get_shared_mem_conf(struct iwl_mvm *mvm)
 {
        struct iwl_host_cmd cmd = {
-               .id = SHARED_MEM_CFG,
                .flags = CMD_WANT_SKB,
                .data = { NULL, },
                .len = { 0, },
        };
-       struct iwl_rx_packet *pkt;
        struct iwl_shared_mem_cfg *mem_cfg;
+       struct iwl_rx_packet *pkt;
        u32 i;
 
        lockdep_assert_held(&mvm->mutex);
 
+       if (fw_has_capa(&mvm->fw->ucode_capa,
+                       IWL_UCODE_TLV_CAPA_EXTEND_SHARED_MEM_CFG))
+               cmd.id = iwl_cmd_id(SHARED_MEM_CFG_CMD, SYSTEM_GROUP, 0);
+       else
+               cmd.id = SHARED_MEM_CFG;
+
        if (WARN_ON(iwl_mvm_send_cmd(mvm, &cmd)))
                return;
 
@@ -824,6 +843,25 @@ static void iwl_mvm_get_shared_mem_conf(struct iwl_mvm *mvm)
                le32_to_cpu(mem_cfg->page_buff_addr);
        mvm->shared_mem_cfg.page_buff_size =
                le32_to_cpu(mem_cfg->page_buff_size);
+
+       /* new API has more data */
+       if (fw_has_capa(&mvm->fw->ucode_capa,
+                       IWL_UCODE_TLV_CAPA_EXTEND_SHARED_MEM_CFG)) {
+               mvm->shared_mem_cfg.rxfifo_addr =
+                       le32_to_cpu(mem_cfg->rxfifo_addr);
+               mvm->shared_mem_cfg.internal_txfifo_addr =
+                       le32_to_cpu(mem_cfg->internal_txfifo_addr);
+
+               BUILD_BUG_ON(sizeof(mvm->shared_mem_cfg.internal_txfifo_size) !=
+                            sizeof(mem_cfg->internal_txfifo_size));
+
+               for (i = 0;
+                    i < ARRAY_SIZE(mvm->shared_mem_cfg.internal_txfifo_size);
+                    i++)
+                       mvm->shared_mem_cfg.internal_txfifo_size[i] =
+                               le32_to_cpu(mem_cfg->internal_txfifo_size[i]);
+       }
+
        IWL_DEBUG_INFO(mvm, "SHARED MEM CFG: got memory offsets/sizes\n");
 
        iwl_free_resp(&cmd);
index e885db3..5f95056 100644 (file)
@@ -252,10 +252,14 @@ unsigned long iwl_mvm_get_used_hw_queues(struct iwl_mvm *mvm,
                .exclude_vif = exclude_vif,
                .used_hw_queues =
                        BIT(IWL_MVM_OFFCHANNEL_QUEUE) |
-                       BIT(mvm->aux_queue) |
-                       BIT(IWL_MVM_CMD_QUEUE),
+                       BIT(mvm->aux_queue),
        };
 
+       if (iwl_mvm_is_dqa_supported(mvm))
+               data.used_hw_queues |= BIT(IWL_MVM_DQA_CMD_QUEUE);
+       else
+               data.used_hw_queues |= BIT(IWL_MVM_CMD_QUEUE);
+
        lockdep_assert_held(&mvm->mutex);
 
        /* mark all VIF used hw queues */
@@ -425,12 +429,17 @@ static int iwl_mvm_mac_ctxt_allocate_resources(struct iwl_mvm *mvm,
                return 0;
        }
 
-       /* Find available queues, and allocate them to the ACs */
+       /*
+        * Find available queues, and allocate them to the ACs. When in
+        * DQA-mode they aren't really used, and this is done only so the
+        * mac80211 ieee80211_check_queues() function won't fail
+        */
        for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) {
                u8 queue = find_first_zero_bit(&used_hw_queues,
                                               mvm->first_agg_queue);
 
-               if (queue >= mvm->first_agg_queue) {
+               if (!iwl_mvm_is_dqa_supported(mvm) &&
+                   queue >= mvm->first_agg_queue) {
                        IWL_ERR(mvm, "Failed to allocate queue\n");
                        ret = -EIO;
                        goto exit_fail;
@@ -442,13 +451,19 @@ static int iwl_mvm_mac_ctxt_allocate_resources(struct iwl_mvm *mvm,
 
        /* Allocate the CAB queue for softAP and GO interfaces */
        if (vif->type == NL80211_IFTYPE_AP) {
-               u8 queue = find_first_zero_bit(&used_hw_queues,
-                                              mvm->first_agg_queue);
+               u8 queue;
 
-               if (queue >= mvm->first_agg_queue) {
-                       IWL_ERR(mvm, "Failed to allocate cab queue\n");
-                       ret = -EIO;
-                       goto exit_fail;
+               if (!iwl_mvm_is_dqa_supported(mvm)) {
+                       queue = find_first_zero_bit(&used_hw_queues,
+                                                   mvm->first_agg_queue);
+
+                       if (queue >= mvm->first_agg_queue) {
+                               IWL_ERR(mvm, "Failed to allocate cab queue\n");
+                               ret = -EIO;
+                               goto exit_fail;
+                       }
+               } else {
+                       queue = IWL_MVM_DQA_GCAST_QUEUE;
                }
 
                vif->cab_queue = queue;
@@ -495,6 +510,10 @@ int iwl_mvm_mac_ctxt_init(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
                                      IWL_MVM_TX_FIFO_MCAST, 0, wdg_timeout);
                /* fall through */
        default:
+               /* If DQA is supported - queues will be enabled when needed */
+               if (iwl_mvm_is_dqa_supported(mvm))
+                       break;
+
                for (ac = 0; ac < IEEE80211_NUM_ACS; ac++)
                        iwl_mvm_enable_ac_txq(mvm, vif->hw_queue[ac],
                                              vif->hw_queue[ac],
@@ -523,6 +542,14 @@ void iwl_mvm_mac_ctxt_release(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
                                    IWL_MAX_TID_COUNT, 0);
                /* fall through */
        default:
+               /*
+                * If DQA is supported - queues were already disabled, since in
+                * DQA-mode the queues are a property of the STA and not of the
+                * vif, and at this point the STA was already deleted
+                */
+               if (iwl_mvm_is_dqa_supported(mvm))
+                       break;
+
                for (ac = 0; ac < IEEE80211_NUM_ACS; ac++)
                        iwl_mvm_disable_txq(mvm, vif->hw_queue[ac],
                                            vif->hw_queue[ac],
index 76e649c..4f5ec49 100644 (file)
@@ -665,12 +665,13 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm)
        }
 
        hw->netdev_features |= mvm->cfg->features;
-       if (!iwl_mvm_is_csum_supported(mvm))
-               hw->netdev_features &= ~NETIF_F_RXCSUM;
-
-       if (IWL_MVM_SW_TX_CSUM_OFFLOAD)
-               hw->netdev_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
-                       NETIF_F_TSO | NETIF_F_TSO6;
+       if (!iwl_mvm_is_csum_supported(mvm)) {
+               hw->netdev_features &= ~(IWL_TX_CSUM_NETIF_FLAGS |
+                                        NETIF_F_RXCSUM);
+               /* We may support SW TX CSUM */
+               if (IWL_MVM_SW_TX_CSUM_OFFLOAD)
+                       hw->netdev_features |= IWL_TX_CSUM_NETIF_FLAGS;
+       }
 
        ret = ieee80211_register_hw(mvm->hw);
        if (ret)
@@ -992,6 +993,7 @@ static void iwl_mvm_restart_cleanup(struct iwl_mvm *mvm)
        iwl_mvm_reset_phy_ctxts(mvm);
        memset(mvm->fw_key_table, 0, sizeof(mvm->fw_key_table));
        memset(mvm->sta_drained, 0, sizeof(mvm->sta_drained));
+       memset(mvm->sta_deferred_frames, 0, sizeof(mvm->sta_deferred_frames));
        memset(mvm->tfd_drained, 0, sizeof(mvm->tfd_drained));
        memset(&mvm->last_bt_notif, 0, sizeof(mvm->last_bt_notif));
        memset(&mvm->last_bt_notif_old, 0, sizeof(mvm->last_bt_notif_old));
@@ -1178,6 +1180,7 @@ static void iwl_mvm_mac_stop(struct ieee80211_hw *hw)
 
        flush_work(&mvm->d0i3_exit_work);
        flush_work(&mvm->async_handlers_wk);
+       flush_work(&mvm->add_stream_wk);
        cancel_delayed_work_sync(&mvm->fw_dump_wk);
        iwl_mvm_free_fw_dump_desc(mvm);
 
@@ -1821,6 +1824,11 @@ static void iwl_mvm_bss_info_changed_station(struct iwl_mvm *mvm,
        if (changes & BSS_CHANGED_ASSOC && bss_conf->assoc)
                iwl_mvm_mac_ctxt_recalc_tsf_id(mvm, vif);
 
+       if (changes & BSS_CHANGED_ASSOC && !bss_conf->assoc &&
+           mvmvif->lqm_active)
+               iwl_mvm_send_lqm_cmd(vif, LQM_CMD_OPERATION_STOP_MEASUREMENT,
+                                    0, 0);
+
        /*
         * If we're not associated yet, take the (new) BSSID before associating
         * so the firmware knows. If we're already associated, then use the old
@@ -2340,7 +2348,8 @@ static void iwl_mvm_check_uapsd(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
                return;
        }
 
-       if (iwlwifi_mod_params.uapsd_disable) {
+       if (!vif->p2p &&
+           (iwlwifi_mod_params.uapsd_disable & IWL_DISABLE_UAPSD_BSS)) {
                vif->driver_flags &= ~IEEE80211_VIF_SUPPORTS_UAPSD;
                return;
        }
@@ -2376,6 +2385,22 @@ iwl_mvm_tdls_check_trigger(struct iwl_mvm *mvm,
                                    peer_addr, action);
 }
 
+static void iwl_mvm_purge_deferred_tx_frames(struct iwl_mvm *mvm,
+                                            struct iwl_mvm_sta *mvm_sta)
+{
+       struct iwl_mvm_tid_data *tid_data;
+       struct sk_buff *skb;
+       int i;
+
+       spin_lock_bh(&mvm_sta->lock);
+       for (i = 0; i <= IWL_MAX_TID_COUNT; i++) {
+               tid_data = &mvm_sta->tid_data[i];
+               while ((skb = __skb_dequeue(&tid_data->deferred_tx_frames)))
+                       ieee80211_free_txskb(mvm->hw, skb);
+       }
+       spin_unlock_bh(&mvm_sta->lock);
+}
+
 static int iwl_mvm_mac_sta_state(struct ieee80211_hw *hw,
                                 struct ieee80211_vif *vif,
                                 struct ieee80211_sta *sta,
@@ -2396,6 +2421,33 @@ static int iwl_mvm_mac_sta_state(struct ieee80211_hw *hw,
        /* if a STA is being removed, reuse its ID */
        flush_work(&mvm->sta_drained_wk);
 
+       /*
+        * If we are in a STA removal flow and in DQA mode:
+        *
+        * This is after the sync_rcu part, so the queues have already been
+        * flushed. No more TXs on their way in mac80211's path, and no more in
+        * the queues.
+        * Also, we won't be getting any new TX frames for this station.
+        * What we might have are deferred TX frames that need to be taken care
+        * of.
+        *
+        * Drop any still-queued deferred-frame before removing the STA, and
+        * make sure the worker is no longer handling frames for this STA.
+        */
+       if (old_state == IEEE80211_STA_NONE &&
+           new_state == IEEE80211_STA_NOTEXIST &&
+           iwl_mvm_is_dqa_supported(mvm)) {
+               struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta);
+
+               iwl_mvm_purge_deferred_tx_frames(mvm, mvm_sta);
+               flush_work(&mvm->add_stream_wk);
+
+               /*
+                * No need to make sure deferred TX indication is off since the
+                * worker will already remove it if it was on
+                */
+       }
+
        mutex_lock(&mvm->mutex);
        if (old_state == IEEE80211_STA_NOTEXIST &&
            new_state == IEEE80211_STA_NONE) {
@@ -3628,6 +3680,11 @@ static int iwl_mvm_pre_channel_switch(struct ieee80211_hw *hw,
 
                break;
        case NL80211_IFTYPE_STATION:
+               if (mvmvif->lqm_active)
+                       iwl_mvm_send_lqm_cmd(vif,
+                                            LQM_CMD_OPERATION_STOP_MEASUREMENT,
+                                            0, 0);
+
                /* Schedule the time event to a bit before beacon 1,
                 * to make sure we're in the new channel when the
                 * GO/AP arrives.
@@ -3727,6 +3784,10 @@ static void iwl_mvm_mac_flush(struct ieee80211_hw *hw,
        if (!vif || vif->type != NL80211_IFTYPE_STATION)
                return;
 
+       /* Make sure we're done with the deferred traffic before flushing */
+       if (iwl_mvm_is_dqa_supported(mvm))
+               flush_work(&mvm->add_stream_wk);
+
        mutex_lock(&mvm->mutex);
        mvmvif = iwl_mvm_vif_from_mac80211(vif);
 
index 9abbc93..2d685e0 100644 (file)
@@ -208,7 +208,7 @@ enum iwl_power_scheme {
 };
 
 #define IWL_CONN_MAX_LISTEN_INTERVAL   10
-#define IWL_UAPSD_MAX_SP               IEEE80211_WMM_IE_STA_QOSINFO_SP_2
+#define IWL_UAPSD_MAX_SP               IEEE80211_WMM_IE_STA_QOSINFO_SP_ALL
 
 #ifdef CONFIG_IWLWIFI_DEBUGFS
 enum iwl_dbgfs_pm_mask {
@@ -453,6 +453,12 @@ struct iwl_mvm_vif {
 
        /* TCP Checksum Offload */
        netdev_features_t features;
+
+       /*
+        * link quality measurement - used to check whether this interface
+        * is in the middle of a link quality measurement
+        */
+       bool lqm_active;
 };
 
 static inline struct iwl_mvm_vif *
@@ -602,6 +608,9 @@ struct iwl_mvm_shared_mem_cfg {
        u32 rxfifo_size[RX_FIFO_MAX_NUM];
        u32 page_buff_addr;
        u32 page_buff_size;
+       u32 rxfifo_addr;
+       u32 internal_txfifo_addr;
+       u32 internal_txfifo_size[TX_FIFO_INTERNAL_MAX_NUM];
 };
 
 struct iwl_mvm {
@@ -656,10 +665,17 @@ struct iwl_mvm {
                /* Map to HW queue */
                u32 hw_queue_to_mac80211;
                u8 hw_queue_refcount;
+               u8 ra_sta_id; /* The RA this queue is mapped to, if exists */
+               /*
+                * This is to mark that queue is reserved for a STA but not yet
+                * allocated. This is needed to make sure we have at least one
+                * available queue to use when adding a new STA
+                */
                bool setup_reserved;
                u16 tid_bitmap; /* Bitmap of the TIDs mapped to this queue */
        } queue_info[IWL_MAX_HW_QUEUES];
        spinlock_t queue_info_lock; /* For syncing queue mgmt operations */
+       struct work_struct add_stream_wk; /* To add streams to queues */
        atomic_t mac80211_queue_stop_count[IEEE80211_MAX_QUEUES];
 
        const char *nvm_file_name;
@@ -679,11 +695,11 @@ struct iwl_mvm {
        struct iwl_rx_phy_info last_phy_info;
        struct ieee80211_sta __rcu *fw_id_to_mac_id[IWL_MVM_STATION_COUNT];
        struct work_struct sta_drained_wk;
+       unsigned long sta_deferred_frames[BITS_TO_LONGS(IWL_MVM_STATION_COUNT)];
        unsigned long sta_drained[BITS_TO_LONGS(IWL_MVM_STATION_COUNT)];
        atomic_t pending_frames[IWL_MVM_STATION_COUNT];
        u32 tfd_drained[IWL_MVM_STATION_COUNT];
        u8 rx_ba_sessions;
-       u32 secret_key[IWL_RSS_HASH_KEY_CNT];
 
        /* configured by mac80211 */
        u32 rts_threshold;
@@ -694,6 +710,7 @@ struct iwl_mvm {
        struct iwl_mcast_filter_cmd *mcast_filter_cmd;
        enum iwl_mvm_scan_type scan_type;
        enum iwl_mvm_sched_scan_pass_all_states sched_scan_pass_all;
+       struct timer_list scan_timer;
 
        /* max number of simultaneous scans the FW supports */
        unsigned int max_scans;
@@ -1063,7 +1080,8 @@ bool iwl_mvm_is_p2p_standalone_uapsd_supported(struct iwl_mvm *mvm)
 {
        return fw_has_capa(&mvm->fw->ucode_capa,
                           IWL_UCODE_TLV_CAPA_P2P_STANDALONE_UAPSD) &&
-               IWL_MVM_P2P_UAPSD_STANDALONE;
+               !(iwlwifi_mod_params.uapsd_disable &
+                 IWL_DISABLE_UAPSD_P2P_CLIENT);
 }
 
 static inline bool iwl_mvm_has_new_rx_api(struct iwl_mvm *mvm)
@@ -1297,6 +1315,7 @@ int iwl_mvm_scan_size(struct iwl_mvm *mvm);
 int iwl_mvm_scan_stop(struct iwl_mvm *mvm, int type, bool notify);
 int iwl_mvm_max_scan_ie_len(struct iwl_mvm *mvm);
 void iwl_mvm_report_scan_aborted(struct iwl_mvm *mvm);
+void iwl_mvm_scan_timeout(unsigned long data);
 
 /* Scheduled scan */
 void iwl_mvm_rx_lmac_scan_complete_notif(struct iwl_mvm *mvm,
@@ -1453,22 +1472,6 @@ bool iwl_mvm_bt_coex_is_tpc_allowed(struct iwl_mvm *mvm,
 u8 iwl_mvm_bt_coex_tx_prio(struct iwl_mvm *mvm, struct ieee80211_hdr *hdr,
                           struct ieee80211_tx_info *info, u8 ac);
 
-bool iwl_mvm_bt_coex_is_shared_ant_avail_old(struct iwl_mvm *mvm);
-void iwl_mvm_bt_coex_vif_change_old(struct iwl_mvm *mvm);
-int iwl_send_bt_init_conf_old(struct iwl_mvm *mvm);
-void iwl_mvm_rx_bt_coex_notif_old(struct iwl_mvm *mvm,
-                                 struct iwl_rx_cmd_buffer *rxb);
-void iwl_mvm_bt_rssi_event_old(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
-                              enum ieee80211_rssi_event_data);
-u16 iwl_mvm_coex_agg_time_limit_old(struct iwl_mvm *mvm,
-                                   struct ieee80211_sta *sta);
-bool iwl_mvm_bt_coex_is_mimo_allowed_old(struct iwl_mvm *mvm,
-                                        struct ieee80211_sta *sta);
-bool iwl_mvm_bt_coex_is_tpc_allowed_old(struct iwl_mvm *mvm,
-                                       enum ieee80211_band band);
-void iwl_mvm_rx_ant_coupling_notif_old(struct iwl_mvm *mvm,
-                                      struct iwl_rx_cmd_buffer *rxb);
-
 /* beacon filtering */
 #ifdef CONFIG_IWLWIFI_DEBUGFS
 void
@@ -1634,4 +1637,10 @@ unsigned int iwl_mvm_get_wd_timeout(struct iwl_mvm *mvm,
 void iwl_mvm_connection_loss(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
                             const char *errmsg);
 
+/* Link Quality Measurement */
+int iwl_mvm_send_lqm_cmd(struct ieee80211_vif *vif,
+                        enum iwl_lqm_cmd_operatrions operation,
+                        u32 duration, u32 timeout);
+bool iwl_mvm_lqm_active(struct iwl_mvm *mvm);
+
 #endif /* __IWL_MVM_H__ */
index 5e8ab79..656541c 100644 (file)
@@ -292,7 +292,7 @@ static const struct iwl_rx_handlers iwl_mvm_rx_handlers[] = {
        RX_HANDLER(DTS_MEASUREMENT_NOTIFICATION, iwl_mvm_temp_notif,
                   RX_HANDLER_ASYNC_LOCKED),
        RX_HANDLER_GRP(PHY_OPS_GROUP, DTS_MEASUREMENT_NOTIF_WIDE,
-                      iwl_mvm_temp_notif, RX_HANDLER_ASYNC_LOCKED),
+                      iwl_mvm_temp_notif, RX_HANDLER_ASYNC_UNLOCKED),
        RX_HANDLER_GRP(PHY_OPS_GROUP, CT_KILL_NOTIFICATION,
                       iwl_mvm_ct_kill_notif, RX_HANDLER_SYNC),
 
@@ -418,6 +418,21 @@ static const struct iwl_hcmd_names iwl_mvm_legacy_names[] = {
        HCMD_NAME(REPLY_DEBUG_CMD),
 };
 
+/* Please keep this array *SORTED* by hex value.
+ * Access is done through binary search
+ */
+static const struct iwl_hcmd_names iwl_mvm_system_names[] = {
+       HCMD_NAME(SHARED_MEM_CFG_CMD),
+};
+
+/* Please keep this array *SORTED* by hex value.
+ * Access is done through binary search
+ */
+static const struct iwl_hcmd_names iwl_mvm_mac_conf_names[] = {
+       HCMD_NAME(LINK_QUALITY_MEASUREMENT_CMD),
+       HCMD_NAME(LINK_QUALITY_MEASUREMENT_COMPLETE_NOTIF),
+};
+
 /* Please keep this array *SORTED* by hex value.
  * Access is done through binary search
  */
@@ -449,6 +464,8 @@ static const struct iwl_hcmd_names iwl_mvm_prot_offload_names[] = {
 static const struct iwl_hcmd_arr iwl_mvm_groups[] = {
        [LEGACY_GROUP] = HCMD_ARR(iwl_mvm_legacy_names),
        [LONG_GROUP] = HCMD_ARR(iwl_mvm_legacy_names),
+       [SYSTEM_GROUP] = HCMD_ARR(iwl_mvm_system_names),
+       [MAC_CONF_GROUP] = HCMD_ARR(iwl_mvm_mac_conf_names),
        [PHY_OPS_GROUP] = HCMD_ARR(iwl_mvm_phy_names),
        [DATA_PATH_GROUP] = HCMD_ARR(iwl_mvm_data_path_names),
        [PROT_OFFLOAD_GROUP] = HCMD_ARR(iwl_mvm_prot_offload_names),
@@ -562,6 +579,7 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg,
        INIT_WORK(&mvm->d0i3_exit_work, iwl_mvm_d0i3_exit_work);
        INIT_DELAYED_WORK(&mvm->fw_dump_wk, iwl_mvm_fw_error_dump_wk);
        INIT_DELAYED_WORK(&mvm->tdls_cs.dwork, iwl_mvm_tdls_ch_switch_work);
+       INIT_WORK(&mvm->add_stream_wk, iwl_mvm_add_new_dqa_stream_wk);
 
        spin_lock_init(&mvm->d0i3_tx_lock);
        spin_lock_init(&mvm->refs_lock);
@@ -601,7 +619,10 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg,
        trans_cfg.command_groups = iwl_mvm_groups;
        trans_cfg.command_groups_size = ARRAY_SIZE(iwl_mvm_groups);
 
-       trans_cfg.cmd_queue = IWL_MVM_CMD_QUEUE;
+       if (iwl_mvm_is_dqa_supported(mvm))
+               trans_cfg.cmd_queue = IWL_MVM_DQA_CMD_QUEUE;
+       else
+               trans_cfg.cmd_queue = IWL_MVM_CMD_QUEUE;
        trans_cfg.cmd_fifo = IWL_MVM_TX_FIFO_CMD;
        trans_cfg.scd_set_active = true;
 
@@ -707,8 +728,8 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg,
 
        iwl_mvm_tof_init(mvm);
 
-       /* init RSS hash key */
-       get_random_bytes(mvm->secret_key, sizeof(mvm->secret_key));
+       setup_timer(&mvm->scan_timer, iwl_mvm_scan_timeout,
+                   (unsigned long)mvm);
 
        return op_mode;
 
@@ -765,6 +786,11 @@ static void iwl_op_mode_mvm_stop(struct iwl_op_mode *op_mode)
 
        iwl_mvm_tof_clean(mvm);
 
+       del_timer_sync(&mvm->scan_timer);
+
+       mutex_destroy(&mvm->mutex);
+       mutex_destroy(&mvm->d0i3_suspend_mutex);
+
        ieee80211_free_hw(mvm->hw);
 }
 
index f313910..7b1f6ad 100644 (file)
@@ -227,7 +227,7 @@ static void iwl_mvm_power_configure_uapsd(struct iwl_mvm *mvm,
                        cpu_to_le16(IWL_MVM_PS_SNOOZE_WINDOW);
        }
 
-       cmd->uapsd_max_sp = IWL_UAPSD_MAX_SP;
+       cmd->uapsd_max_sp = mvm->hw->uapsd_max_sp_len;
 
        if (mvm->cur_ucode == IWL_UCODE_WOWLAN || cmd->flags &
            cpu_to_le16(POWER_FLAGS_SNOOZE_ENA_MSK)) {
index 9a54f2d..b2bc3d9 100644 (file)
@@ -294,10 +294,15 @@ static void iwl_mvm_rx_csum(struct ieee80211_sta *sta,
 {
        struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
        struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(mvmsta->vif);
+       u16 flags = le16_to_cpu(desc->l3l4_flags);
+       u8 l3_prot = (u8)((flags & IWL_RX_L3L4_L3_PROTO_MASK) >>
+                         IWL_RX_L3_PROTO_POS);
 
        if (mvmvif->features & NETIF_F_RXCSUM &&
-           desc->l3l4_flags & cpu_to_le16(IWL_RX_L3L4_IP_HDR_CSUM_OK) &&
-           desc->l3l4_flags & cpu_to_le16(IWL_RX_L3L4_TCP_UDP_CSUM_OK))
+           flags & IWL_RX_L3L4_TCP_UDP_CSUM_OK &&
+           (flags & IWL_RX_L3L4_IP_HDR_CSUM_OK ||
+            l3_prot == IWL_RX_L3_TYPE_IPV6 ||
+            l3_prot == IWL_RX_L3_TYPE_IPV6_FRAG))
                skb->ip_summed = CHECKSUM_UNNECESSARY;
 }
 
index 09eb72c..c1d1be9 100644 (file)
@@ -70,6 +70,7 @@
 
 #include "mvm.h"
 #include "fw-api-scan.h"
+#include "iwl-io.h"
 
 #define IWL_DENSE_EBS_SCAN_RATIO 5
 #define IWL_SPARSE_EBS_SCAN_RATIO 1
@@ -398,6 +399,10 @@ void iwl_mvm_rx_lmac_scan_complete_notif(struct iwl_mvm *mvm,
                ieee80211_scan_completed(mvm->hw,
                                scan_notif->status == IWL_SCAN_OFFLOAD_ABORTED);
                iwl_mvm_unref(mvm, IWL_MVM_REF_SCAN);
+               del_timer(&mvm->scan_timer);
+       } else {
+               IWL_ERR(mvm,
+                       "got scan complete notification but no scan is running\n");
        }
 
        mvm->last_ebs_successful =
@@ -961,6 +966,7 @@ int iwl_mvm_config_scan(struct iwl_mvm *mvm)
                                         SCAN_CONFIG_FLAG_ALLOW_CHUB_REQS |
                                         SCAN_CONFIG_FLAG_SET_TX_CHAINS |
                                         SCAN_CONFIG_FLAG_SET_RX_CHAINS |
+                                        SCAN_CONFIG_FLAG_SET_AUX_STA_ID |
                                         SCAN_CONFIG_FLAG_SET_ALL_TIMES |
                                         SCAN_CONFIG_FLAG_SET_LEGACY_RATES |
                                         SCAN_CONFIG_FLAG_SET_MAC_ADDR |
@@ -1216,6 +1222,18 @@ static int iwl_mvm_check_running_scans(struct iwl_mvm *mvm, int type)
        return -EIO;
 }
 
+#define SCAN_TIMEOUT (16 * HZ)
+
+void iwl_mvm_scan_timeout(unsigned long data)
+{
+       struct iwl_mvm *mvm = (struct iwl_mvm *)data;
+
+       IWL_ERR(mvm, "regular scan timed out\n");
+
+       del_timer(&mvm->scan_timer);
+       iwl_force_nmi(mvm->trans);
+}
+
 int iwl_mvm_reg_scan_start(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
                           struct cfg80211_scan_request *req,
                           struct ieee80211_scan_ies *ies)
@@ -1295,6 +1313,8 @@ int iwl_mvm_reg_scan_start(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
        mvm->scan_status |= IWL_MVM_SCAN_REGULAR;
        iwl_mvm_ref(mvm, IWL_MVM_REF_SCAN);
 
+       mod_timer(&mvm->scan_timer, jiffies + SCAN_TIMEOUT);
+
        return 0;
 }
 
@@ -1412,6 +1432,7 @@ void iwl_mvm_rx_umac_scan_complete_notif(struct iwl_mvm *mvm,
        if (mvm->scan_uid_status[uid] == IWL_MVM_SCAN_REGULAR) {
                ieee80211_scan_completed(mvm->hw, aborted);
                iwl_mvm_unref(mvm, IWL_MVM_REF_SCAN);
+               del_timer(&mvm->scan_timer);
        } else if (mvm->scan_uid_status[uid] == IWL_MVM_SCAN_SCHED) {
                ieee80211_sched_scan_stopped(mvm->hw);
                mvm->sched_scan_pass_all = SCHED_SCAN_PASS_ALL_DISABLED;
@@ -1607,6 +1628,7 @@ out:
                 * to release the scan reference here.
                 */
                iwl_mvm_unref(mvm, IWL_MVM_REF_SCAN);
+               del_timer(&mvm->scan_timer);
                if (notify)
                        ieee80211_scan_completed(mvm->hw, true);
        } else if (notify) {
index c2def12..443a428 100644 (file)
@@ -193,7 +193,7 @@ static void iwl_mvm_fill_sf_command(struct iwl_mvm *mvm,
                }
        }
 
-       if (sta || IWL_UCODE_API(mvm->fw->ucode_ver) < 13) {
+       if (sta) {
                BUILD_BUG_ON(sizeof(sf_full_timeout) !=
                             sizeof(__le32) * SF_NUM_SCENARIO *
                             SF_NUM_TIMEOUT_TYPES);
@@ -220,9 +220,6 @@ static int iwl_mvm_sf_config(struct iwl_mvm *mvm, u8 sta_id,
        struct ieee80211_sta *sta;
        int ret = 0;
 
-       if (IWL_UCODE_API(mvm->fw->ucode_ver) < 13)
-               sf_cmd.state = cpu_to_le32(new_state);
-
        if (mvm->cfg->disable_dummy_notification)
                sf_cmd.state |= cpu_to_le32(SF_CFG_DUMMY_NOTIF_OFF);
 
@@ -235,8 +232,7 @@ static int iwl_mvm_sf_config(struct iwl_mvm *mvm, u8 sta_id,
 
        switch (new_state) {
        case SF_UNINIT:
-               if (IWL_UCODE_API(mvm->fw->ucode_ver) >= 13)
-                       iwl_mvm_fill_sf_command(mvm, &sf_cmd, NULL);
+               iwl_mvm_fill_sf_command(mvm, &sf_cmd, NULL);
                break;
        case SF_FULL_ON:
                if (sta_id == IWL_MVM_STATION_COUNT) {
index ef99942..12614b7 100644 (file)
@@ -111,7 +111,7 @@ static int iwl_mvm_find_free_sta_id(struct iwl_mvm *mvm,
 
 /* send station add/update command to firmware */
 int iwl_mvm_sta_send_to_fw(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
-                          bool update)
+                          bool update, unsigned int flags)
 {
        struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta);
        struct iwl_mvm_add_sta_cmd add_sta_cmd = {
@@ -126,9 +126,12 @@ int iwl_mvm_sta_send_to_fw(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
        u32 status;
        u32 agg_size = 0, mpdu_dens = 0;
 
-       if (!update) {
+       if (!update || (flags & STA_MODIFY_QUEUES)) {
                add_sta_cmd.tfd_queue_msk = cpu_to_le32(mvm_sta->tfd_queue_msk);
                memcpy(&add_sta_cmd.addr, sta->addr, ETH_ALEN);
+
+               if (flags & STA_MODIFY_QUEUES)
+                       add_sta_cmd.modify_mask |= STA_MODIFY_QUEUES;
        }
 
        switch (sta->bandwidth) {
@@ -274,6 +277,211 @@ static void iwl_mvm_tdls_sta_deinit(struct iwl_mvm *mvm,
                iwl_mvm_disable_txq(mvm, i, i, IWL_MAX_TID_COUNT, 0);
 }
 
+static int iwl_mvm_sta_alloc_queue(struct iwl_mvm *mvm,
+                                  struct ieee80211_sta *sta, u8 ac, int tid,
+                                  struct ieee80211_hdr *hdr)
+{
+       struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
+       struct iwl_trans_txq_scd_cfg cfg = {
+               .fifo = iwl_mvm_ac_to_tx_fifo[ac],
+               .sta_id = mvmsta->sta_id,
+               .tid = tid,
+               .frame_limit = IWL_FRAME_LIMIT,
+       };
+       unsigned int wdg_timeout =
+               iwl_mvm_get_wd_timeout(mvm, mvmsta->vif, false, false);
+       u8 mac_queue = mvmsta->vif->hw_queue[ac];
+       int queue = -1;
+       int ssn;
+
+       lockdep_assert_held(&mvm->mutex);
+
+       spin_lock_bh(&mvm->queue_info_lock);
+
+       /*
+        * Non-QoS, QoS NDP and MGMT frames should go to a MGMT queue, if one
+        * exists
+        */
+       if (!ieee80211_is_data_qos(hdr->frame_control) ||
+           ieee80211_is_qos_nullfunc(hdr->frame_control)) {
+               queue = iwl_mvm_find_free_queue(mvm, IWL_MVM_DQA_MIN_MGMT_QUEUE,
+                                               IWL_MVM_DQA_MAX_MGMT_QUEUE);
+               if (queue >= IWL_MVM_DQA_MIN_MGMT_QUEUE)
+                       IWL_DEBUG_TX_QUEUES(mvm, "Found free MGMT queue #%d\n",
+                                           queue);
+
+               /* If no such queue is found, we'll use a DATA queue instead */
+       }
+
+       if (queue < 0 && mvmsta->reserved_queue != IEEE80211_INVAL_HW_QUEUE) {
+               queue = mvmsta->reserved_queue;
+               IWL_DEBUG_TX_QUEUES(mvm, "Using reserved queue #%d\n", queue);
+       }
+
+       if (queue < 0)
+               queue = iwl_mvm_find_free_queue(mvm, IWL_MVM_DQA_MIN_DATA_QUEUE,
+                                               IWL_MVM_DQA_MAX_DATA_QUEUE);
+       if (queue >= 0)
+               mvm->queue_info[queue].setup_reserved = false;
+
+       spin_unlock_bh(&mvm->queue_info_lock);
+
+       /* TODO: support shared queues for same RA */
+       if (queue < 0)
+               return -ENOSPC;
+
+       /*
+        * Actual en/disablement of aggregations is through the ADD_STA HCMD,
+        * but for configuring the SCD to send A-MPDUs we need to mark the queue
+        * as aggregatable.
+        * Mark all DATA queues as allowing to be aggregated at some point
+        */
+       cfg.aggregate = (queue >= IWL_MVM_DQA_MIN_DATA_QUEUE ||
+                        queue == IWL_MVM_DQA_BSS_CLIENT_QUEUE);
+
+       IWL_DEBUG_TX_QUEUES(mvm, "Allocating queue #%d to sta %d on tid %d\n",
+                           queue, mvmsta->sta_id, tid);
+
+       ssn = IEEE80211_SEQ_TO_SN(le16_to_cpu(hdr->seq_ctrl));
+       iwl_mvm_enable_txq(mvm, queue, mac_queue, ssn, &cfg,
+                          wdg_timeout);
+
+       spin_lock_bh(&mvmsta->lock);
+       mvmsta->tid_data[tid].txq_id = queue;
+       mvmsta->tfd_queue_msk |= BIT(queue);
+
+       if (mvmsta->reserved_queue == queue)
+               mvmsta->reserved_queue = IEEE80211_INVAL_HW_QUEUE;
+       spin_unlock_bh(&mvmsta->lock);
+
+       return iwl_mvm_sta_send_to_fw(mvm, sta, true, STA_MODIFY_QUEUES);
+}
+
+static inline u8 iwl_mvm_tid_to_ac_queue(int tid)
+{
+       if (tid == IWL_MAX_TID_COUNT)
+               return IEEE80211_AC_VO; /* MGMT */
+
+       return tid_to_mac80211_ac[tid];
+}
+
+static void iwl_mvm_tx_deferred_stream(struct iwl_mvm *mvm,
+                                      struct ieee80211_sta *sta, int tid)
+{
+       struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
+       struct iwl_mvm_tid_data *tid_data = &mvmsta->tid_data[tid];
+       struct sk_buff *skb;
+       struct ieee80211_hdr *hdr;
+       struct sk_buff_head deferred_tx;
+       u8 mac_queue;
+       bool no_queue = false; /* Marks if there is a problem with the queue */
+       u8 ac;
+
+       lockdep_assert_held(&mvm->mutex);
+
+       skb = skb_peek(&tid_data->deferred_tx_frames);
+       if (!skb)
+               return;
+       hdr = (void *)skb->data;
+
+       ac = iwl_mvm_tid_to_ac_queue(tid);
+       mac_queue = IEEE80211_SKB_CB(skb)->hw_queue;
+
+       if (tid_data->txq_id == IEEE80211_INVAL_HW_QUEUE &&
+           iwl_mvm_sta_alloc_queue(mvm, sta, ac, tid, hdr)) {
+               IWL_ERR(mvm,
+                       "Can't alloc TXQ for sta %d tid %d - dropping frame\n",
+                       mvmsta->sta_id, tid);
+
+               /*
+                * Mark queue as problematic so later the deferred traffic is
+                * freed, as we can do nothing with it
+                */
+               no_queue = true;
+       }
+
+       __skb_queue_head_init(&deferred_tx);
+
+       /* Disable bottom-halves when entering TX path */
+       local_bh_disable();
+       spin_lock(&mvmsta->lock);
+       skb_queue_splice_init(&tid_data->deferred_tx_frames, &deferred_tx);
+       spin_unlock(&mvmsta->lock);
+
+       while ((skb = __skb_dequeue(&deferred_tx)))
+               if (no_queue || iwl_mvm_tx_skb(mvm, skb, sta))
+                       ieee80211_free_txskb(mvm->hw, skb);
+       local_bh_enable();
+
+       /* Wake queue */
+       iwl_mvm_start_mac_queues(mvm, BIT(mac_queue));
+}
+
+void iwl_mvm_add_new_dqa_stream_wk(struct work_struct *wk)
+{
+       struct iwl_mvm *mvm = container_of(wk, struct iwl_mvm,
+                                          add_stream_wk);
+       struct ieee80211_sta *sta;
+       struct iwl_mvm_sta *mvmsta;
+       unsigned long deferred_tid_traffic;
+       int sta_id, tid;
+
+       mutex_lock(&mvm->mutex);
+
+       /* Go over all stations with deferred traffic */
+       for_each_set_bit(sta_id, mvm->sta_deferred_frames,
+                        IWL_MVM_STATION_COUNT) {
+               clear_bit(sta_id, mvm->sta_deferred_frames);
+               sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[sta_id],
+                                               lockdep_is_held(&mvm->mutex));
+               if (IS_ERR_OR_NULL(sta))
+                       continue;
+
+               mvmsta = iwl_mvm_sta_from_mac80211(sta);
+               deferred_tid_traffic = mvmsta->deferred_traffic_tid_map;
+
+               for_each_set_bit(tid, &deferred_tid_traffic,
+                                IWL_MAX_TID_COUNT + 1)
+                       iwl_mvm_tx_deferred_stream(mvm, sta, tid);
+       }
+
+       mutex_unlock(&mvm->mutex);
+}
+
+static int iwl_mvm_reserve_sta_stream(struct iwl_mvm *mvm,
+                                     struct ieee80211_sta *sta,
+                                     enum nl80211_iftype vif_type)
+{
+       struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
+       int queue;
+
+       spin_lock_bh(&mvm->queue_info_lock);
+
+       /* Make sure we have free resources for this STA */
+       if (vif_type == NL80211_IFTYPE_STATION && !sta->tdls &&
+           !mvm->queue_info[IWL_MVM_DQA_BSS_CLIENT_QUEUE].hw_queue_refcount &&
+           !mvm->queue_info[IWL_MVM_DQA_BSS_CLIENT_QUEUE].setup_reserved)
+               queue = IWL_MVM_DQA_BSS_CLIENT_QUEUE;
+       else
+               queue = iwl_mvm_find_free_queue(mvm, IWL_MVM_DQA_MIN_DATA_QUEUE,
+                                               IWL_MVM_DQA_MAX_DATA_QUEUE);
+       if (queue < 0) {
+               spin_unlock_bh(&mvm->queue_info_lock);
+               IWL_ERR(mvm, "No available queues for new station\n");
+               return -ENOSPC;
+       }
+       mvm->queue_info[queue].setup_reserved = true;
+
+       spin_unlock_bh(&mvm->queue_info_lock);
+
+       mvmsta->reserved_queue = queue;
+
+       IWL_DEBUG_TX_QUEUES(mvm, "Reserving data queue #%d for sta_id %d\n",
+                           queue, mvmsta->sta_id);
+
+       return 0;
+}
+
 int iwl_mvm_add_sta(struct iwl_mvm *mvm,
                    struct ieee80211_vif *vif,
                    struct ieee80211_sta *sta)
@@ -314,18 +522,29 @@ int iwl_mvm_add_sta(struct iwl_mvm *mvm,
                ret = iwl_mvm_tdls_sta_init(mvm, sta);
                if (ret)
                        return ret;
-       } else {
+       } else if (!iwl_mvm_is_dqa_supported(mvm)) {
                for (i = 0; i < IEEE80211_NUM_ACS; i++)
                        if (vif->hw_queue[i] != IEEE80211_INVAL_HW_QUEUE)
                                mvm_sta->tfd_queue_msk |= BIT(vif->hw_queue[i]);
        }
 
        /* for HW restart - reset everything but the sequence number */
-       for (i = 0; i < IWL_MAX_TID_COUNT; i++) {
+       for (i = 0; i <= IWL_MAX_TID_COUNT; i++) {
                u16 seq = mvm_sta->tid_data[i].seq_number;
                memset(&mvm_sta->tid_data[i], 0, sizeof(mvm_sta->tid_data[i]));
                mvm_sta->tid_data[i].seq_number = seq;
+
+               if (!iwl_mvm_is_dqa_supported(mvm))
+                       continue;
+
+               /*
+                * Mark all queues for this STA as unallocated and defer TX
+                * frames until the queue is allocated
+                */
+               mvm_sta->tid_data[i].txq_id = IEEE80211_INVAL_HW_QUEUE;
+               skb_queue_head_init(&mvm_sta->tid_data[i].deferred_tx_frames);
        }
+       mvm_sta->deferred_traffic_tid_map = 0;
        mvm_sta->agg_tids = 0;
 
        if (iwl_mvm_has_new_rx_api(mvm) &&
@@ -338,7 +557,14 @@ int iwl_mvm_add_sta(struct iwl_mvm *mvm,
                mvm_sta->dup_data = dup_data;
        }
 
-       ret = iwl_mvm_sta_send_to_fw(mvm, sta, false);
+       if (iwl_mvm_is_dqa_supported(mvm)) {
+               ret = iwl_mvm_reserve_sta_stream(mvm, sta,
+                                                ieee80211_vif_type_p2p(vif));
+               if (ret)
+                       goto err;
+       }
+
+       ret = iwl_mvm_sta_send_to_fw(mvm, sta, false, 0);
        if (ret)
                goto err;
 
@@ -364,7 +590,7 @@ int iwl_mvm_update_sta(struct iwl_mvm *mvm,
                       struct ieee80211_vif *vif,
                       struct ieee80211_sta *sta)
 {
-       return iwl_mvm_sta_send_to_fw(mvm, sta, true);
+       return iwl_mvm_sta_send_to_fw(mvm, sta, true, 0);
 }
 
 int iwl_mvm_drain_sta(struct iwl_mvm *mvm, struct iwl_mvm_sta *mvmsta,
@@ -509,6 +735,26 @@ void iwl_mvm_sta_drained_wk(struct work_struct *wk)
        mutex_unlock(&mvm->mutex);
 }
 
+static void iwl_mvm_disable_sta_queues(struct iwl_mvm *mvm,
+                                      struct ieee80211_vif *vif,
+                                      struct iwl_mvm_sta *mvm_sta)
+{
+       int ac;
+       int i;
+
+       lockdep_assert_held(&mvm->mutex);
+
+       for (i = 0; i < ARRAY_SIZE(mvm_sta->tid_data); i++) {
+               if (mvm_sta->tid_data[i].txq_id == IEEE80211_INVAL_HW_QUEUE)
+                       continue;
+
+               ac = iwl_mvm_tid_to_ac_queue(i);
+               iwl_mvm_disable_txq(mvm, mvm_sta->tid_data[i].txq_id,
+                                   vif->hw_queue[ac], i, 0);
+               mvm_sta->tid_data[i].txq_id = IEEE80211_INVAL_HW_QUEUE;
+       }
+}
+
 int iwl_mvm_rm_sta(struct iwl_mvm *mvm,
                   struct ieee80211_vif *vif,
                   struct ieee80211_sta *sta)
@@ -537,6 +783,10 @@ int iwl_mvm_rm_sta(struct iwl_mvm *mvm,
                        return ret;
                ret = iwl_mvm_drain_sta(mvm, mvm_sta, false);
 
+               /* If DQA is supported - the queues can be disabled now */
+               if (iwl_mvm_is_dqa_supported(mvm))
+                       iwl_mvm_disable_sta_queues(mvm, vif, mvm_sta);
+
                /* if we are associated - we can't remove the AP STA now */
                if (vif->bss_conf.assoc)
                        return ret;
index 1a8f69a..e3efdcd 100644 (file)
@@ -7,7 +7,7 @@
  *
  * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
  * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
- * Copyright(c) 2015        Intel Deutschland GmbH
+ * Copyright(c) 2015 - 2016 Intel Deutschland GmbH
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of version 2 of the GNU General Public License as
@@ -34,7 +34,7 @@
  *
  * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
  * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
- * Copyright(c) 2015        Intel Deutschland GmbH
+ * Copyright(c) 2015 - 2016 Intel Deutschland GmbH
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
 struct iwl_mvm;
 struct iwl_mvm_vif;
 
+/**
+ * DOC: DQA - Dynamic Queue Allocation -introduction
+ *
+ * Dynamic Queue Allocation (AKA "DQA") is a feature implemented in iwlwifi
+ * driver to allow dynamic allocation of queues on-demand, rather than allocate
+ * them statically ahead of time. Ideally, we would like to allocate one queue
+ * per RA/TID, thus allowing an AP - for example - to send BE traffic to STA2
+ * even if it also needs to send traffic to a sleeping STA1, without being
+ * blocked by the sleeping station.
+ *
+ * Although the queues in DQA mode are dynamically allocated, there are still
+ * some queues that are statically allocated:
+ *     TXQ #0 - command queue
+ *     TXQ #1 - aux frames
+ *     TXQ #2 - P2P device frames
+ *     TXQ #3 - P2P GO/SoftAP GCAST/BCAST frames
+ *     TXQ #4 - BSS DATA frames queue
+ *     TXQ #5-8 - Non-QoS and MGMT frames queue pool
+ *     TXQ #9 - P2P GO/SoftAP probe responses
+ *     TXQ #10-31 - DATA frames queue pool
+ * The queues are dynamically taken from either the MGMT frames queue pool or
+ * the DATA frames one. See the %iwl_mvm_dqa_txq for more information on every
+ * queue.
+ *
+ * When a frame for a previously unseen RA/TID comes in, it needs to be deferred
+ * until a queue is allocated for it, and only then can be TXed. Therefore, it
+ * is placed into %iwl_mvm_tid_data.deferred_tx_frames, and a worker called
+ * %mvm->add_stream_wk later allocates the queues and TXes the deferred frames.
+ *
+ * For convenience, MGMT is considered as if it has TID=8, and go to the MGMT
+ * queues in the pool. If there is no longer a free MGMT queue to allocate, a
+ * queue will be allocated from the DATA pool instead. Since QoS NDPs can create
+ * a problem for aggregations, they too will use a MGMT queue.
+ *
+ * When adding a STA, a DATA queue is reserved for it so that it can TX from
+ * it. If no such free queue exists for reserving, the STA addition will fail.
+ *
+ * If the DATA queue pool gets exhausted, no new STA will be accepted, and if a
+ * new RA/TID comes in for an existing STA, one of the STA's queues will become
+ * shared and will serve more than the single TID (but always for the same RA!).
+ *
+ * When a RA/TID needs to become aggregated, no new queue is required to be
+ * allocated, only mark the queue as aggregated via the ADD_STA command. Note,
+ * however, that a shared queue cannot be aggregated, and only after the other
+ * TIDs become inactive and are removed - only then can the queue be
+ * reconfigured and become aggregated.
+ *
+ * When removing a station, its queues are returned to the pool for reuse. Here
+ * we also need to make sure that we are synced with the worker thread that TXes
+ * the deferred frames so we don't get into a situation where the queues are
+ * removed and then the worker puts deferred frames onto the released queues or
+ * tries to allocate new queues for a STA we don't need anymore.
+ */
+
 /**
  * DOC: station table - introduction
  *
@@ -253,6 +307,7 @@ enum iwl_mvm_agg_state {
 
 /**
  * struct iwl_mvm_tid_data - holds the states for each RA / TID
+ * @deferred_tx_frames: deferred TX frames for this RA/TID
  * @seq_number: the next WiFi sequence number to use
  * @next_reclaimed: the WiFi sequence number of the next packet to be acked.
  *     This is basically (last acked packet++).
@@ -260,7 +315,7 @@ enum iwl_mvm_agg_state {
  *     Tx response (TX_CMD), and the block ack notification (COMPRESSED_BA).
  * @amsdu_in_ampdu_allowed: true if A-MSDU in A-MPDU is allowed.
  * @state: state of the BA agreement establishment / tear down.
- * @txq_id: Tx queue used by the BA session
+ * @txq_id: Tx queue used by the BA session / DQA
  * @ssn: the first packet to be sent in AGG HW queue in Tx AGG start flow, or
  *     the first packet to be sent in legacy HW queue in Tx AGG stop flow.
  *     Basically when next_reclaimed reaches ssn, we can tell mac80211 that
@@ -268,6 +323,7 @@ enum iwl_mvm_agg_state {
  * @tx_time: medium time consumed by this A-MPDU
  */
 struct iwl_mvm_tid_data {
+       struct sk_buff_head deferred_tx_frames;
        u16 seq_number;
        u16 next_reclaimed;
        /* The rest is Tx AGG related */
@@ -316,7 +372,10 @@ struct iwl_mvm_rxq_dup_data {
  *     we need to signal the EOSP
  * @lock: lock to protect the whole struct. Since %tid_data is access from Tx
  * and from Tx response flow, it needs a spinlock.
- * @tid_data: per tid data. Look at %iwl_mvm_tid_data.
+ * @tid_data: per tid data + mgmt. Look at %iwl_mvm_tid_data.
+ * @reserved_queue: the queue reserved for this STA for DQA purposes
+ *     Every STA has is given one reserved queue to allow it to operate. If no
+ *     such queue can be guaranteed, the STA addition will fail.
  * @tx_protection: reference counter for controlling the Tx protection.
  * @tt_tx_protection: is thermal throttling enable Tx protection?
  * @disable_tx: is tx to this STA disabled?
@@ -329,6 +388,7 @@ struct iwl_mvm_rxq_dup_data {
  *     the BA window. To be used for UAPSD only.
  * @ptk_pn: per-queue PTK PN data structures
  * @dup_data: per queue duplicate packet detection data
+ * @deferred_traffic_tid_map: indication bitmap of deferred traffic per-TID
  *
  * When mac80211 creates a station it reserves some space (hw->sta_data_size)
  * in the structure for use by driver. This structure is placed in that
@@ -345,12 +405,16 @@ struct iwl_mvm_sta {
        bool bt_reduced_txpower;
        bool next_status_eosp;
        spinlock_t lock;
-       struct iwl_mvm_tid_data tid_data[IWL_MAX_TID_COUNT];
+       struct iwl_mvm_tid_data tid_data[IWL_MAX_TID_COUNT + 1];
        struct iwl_lq_sta lq_sta;
        struct ieee80211_vif *vif;
        struct iwl_mvm_key_pn __rcu *ptk_pn[4];
        struct iwl_mvm_rxq_dup_data *dup_data;
 
+       u16 deferred_traffic_tid_map;
+
+       u8 reserved_queue;
+
        /* Temporary, until the new TLC will control the Tx protection */
        s8 tx_protection;
        bool tt_tx_protection;
@@ -378,8 +442,18 @@ struct iwl_mvm_int_sta {
        u32 tfd_queue_msk;
 };
 
+/**
+ * Send the STA info to the FW.
+ *
+ * @mvm: the iwl_mvm* to use
+ * @sta: the STA
+ * @update: this is true if the FW is being updated about a STA it already knows
+ *     about. Otherwise (if this is a new STA), this should be false.
+ * @flags: if update==true, this marks what is being changed via ORs of values
+ *     from enum iwl_sta_modify_flag. Otherwise, this is ignored.
+ */
 int iwl_mvm_sta_send_to_fw(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
-                          bool update);
+                          bool update, unsigned int flags);
 int iwl_mvm_add_sta(struct iwl_mvm *mvm,
                    struct ieee80211_vif *vif,
                    struct ieee80211_sta *sta);
@@ -459,5 +533,6 @@ void iwl_mvm_modify_all_sta_disable_tx(struct iwl_mvm *mvm,
                                       struct iwl_mvm_vif *mvmvif,
                                       bool disable);
 void iwl_mvm_csa_client_absent(struct iwl_mvm *mvm, struct ieee80211_vif *vif);
+void iwl_mvm_add_new_dqa_stream_wk(struct work_struct *wk);
 
 #endif /* __sta_h__ */
index f1f2825..eb3f460 100644 (file)
@@ -204,20 +204,11 @@ void iwl_mvm_temp_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb)
        if (WARN_ON(ths_crossed >= IWL_MAX_DTS_TRIPS))
                return;
 
-       /*
-        * We are now handling a temperature notification from the firmware
-        * in ASYNC and hold the mutex. thermal_notify_framework will call
-        * us back through get_temp() which ought to send a SYNC command to
-        * the firmware and hence to take the mutex.
-        * Avoid the deadlock by unlocking the mutex here.
-        */
        if (mvm->tz_device.tzone) {
                struct iwl_mvm_thermal_device *tz_dev = &mvm->tz_device;
 
-               mutex_unlock(&mvm->mutex);
                thermal_notify_framework(tz_dev->tzone,
                                         tz_dev->fw_trips_index[ths_crossed]);
-               mutex_lock(&mvm->mutex);
        }
 #endif /* CONFIG_THERMAL */
 }
@@ -796,9 +787,6 @@ static int iwl_mvm_tcool_get_cur_state(struct thermal_cooling_device *cdev,
 {
        struct iwl_mvm *mvm = (struct iwl_mvm *)(cdev->devdata);
 
-       if (test_bit(IWL_MVM_STATUS_IN_D0I3, &mvm->status))
-               return -EBUSY;
-
        *state = mvm->cooling_dev.cur_state;
 
        return 0;
@@ -813,9 +801,6 @@ static int iwl_mvm_tcool_set_cur_state(struct thermal_cooling_device *cdev,
        if (!mvm->ucode_loaded || !(mvm->cur_ucode == IWL_UCODE_REGULAR))
                return -EIO;
 
-       if (test_bit(IWL_MVM_STATUS_IN_D0I3, &mvm->status))
-               return -EBUSY;
-
        mutex_lock(&mvm->mutex);
 
        if (new_state >= ARRAY_SIZE(iwl_mvm_cdev_budgets)) {
index 75870e6..efb9b98 100644 (file)
@@ -67,6 +67,7 @@
 #include <linux/etherdevice.h>
 #include <linux/tcp.h>
 #include <net/ip.h>
+#include <net/ipv6.h>
 
 #include "iwl-trans.h"
 #include "iwl-eeprom-parse.h"
@@ -98,6 +99,111 @@ iwl_mvm_bar_check_trigger(struct iwl_mvm *mvm, const u8 *addr,
                                    addr, tid, ssn);
 }
 
+#define OPT_HDR(type, skb, off) \
+       (type *)(skb_network_header(skb) + (off))
+
+static void iwl_mvm_tx_csum(struct iwl_mvm *mvm, struct sk_buff *skb,
+                           struct ieee80211_hdr *hdr,
+                           struct ieee80211_tx_info *info,
+                           struct iwl_tx_cmd *tx_cmd)
+{
+#if IS_ENABLED(CONFIG_INET)
+       u16 mh_len = ieee80211_hdrlen(hdr->frame_control);
+       u16 offload_assist = le16_to_cpu(tx_cmd->offload_assist);
+       u8 protocol = 0;
+
+       /*
+        * Do not compute checksum if already computed or if transport will
+        * compute it
+        */
+       if (skb->ip_summed != CHECKSUM_PARTIAL || IWL_MVM_SW_TX_CSUM_OFFLOAD)
+               return;
+
+       /* We do not expect to be requested to csum stuff we do not support */
+       if (WARN_ONCE(!(mvm->hw->netdev_features & IWL_TX_CSUM_NETIF_FLAGS) ||
+                     (skb->protocol != htons(ETH_P_IP) &&
+                      skb->protocol != htons(ETH_P_IPV6)),
+                     "No support for requested checksum\n")) {
+               skb_checksum_help(skb);
+               return;
+       }
+
+       if (skb->protocol == htons(ETH_P_IP)) {
+               protocol = ip_hdr(skb)->protocol;
+       } else {
+#if IS_ENABLED(CONFIG_IPV6)
+               struct ipv6hdr *ipv6h =
+                       (struct ipv6hdr *)skb_network_header(skb);
+               unsigned int off = sizeof(*ipv6h);
+
+               protocol = ipv6h->nexthdr;
+               while (protocol != NEXTHDR_NONE && ipv6_ext_hdr(protocol)) {
+                       /* only supported extension headers */
+                       if (protocol != NEXTHDR_ROUTING &&
+                           protocol != NEXTHDR_HOP &&
+                           protocol != NEXTHDR_DEST &&
+                           protocol != NEXTHDR_FRAGMENT) {
+                               skb_checksum_help(skb);
+                               return;
+                       }
+
+                       if (protocol == NEXTHDR_FRAGMENT) {
+                               struct frag_hdr *hp =
+                                       OPT_HDR(struct frag_hdr, skb, off);
+
+                               protocol = hp->nexthdr;
+                               off += sizeof(struct frag_hdr);
+                       } else {
+                               struct ipv6_opt_hdr *hp =
+                                       OPT_HDR(struct ipv6_opt_hdr, skb, off);
+
+                               protocol = hp->nexthdr;
+                               off += ipv6_optlen(hp);
+                       }
+               }
+               /* if we get here - protocol now should be TCP/UDP */
+#endif
+       }
+
+       if (protocol != IPPROTO_TCP && protocol != IPPROTO_UDP) {
+               WARN_ON_ONCE(1);
+               skb_checksum_help(skb);
+               return;
+       }
+
+       /* enable L4 csum */
+       offload_assist |= BIT(TX_CMD_OFFLD_L4_EN);
+
+       /*
+        * Set offset to IP header (snap).
+        * We don't support tunneling so no need to take care of inner header.
+        * Size is in words.
+        */
+       offload_assist |= (4 << TX_CMD_OFFLD_IP_HDR);
+
+       /* Do IPv4 csum for AMSDU only (no IP csum for Ipv6) */
+       if (skb->protocol == htons(ETH_P_IP) &&
+           (offload_assist & BIT(TX_CMD_OFFLD_AMSDU))) {
+               ip_hdr(skb)->check = 0;
+               offload_assist |= BIT(TX_CMD_OFFLD_L3_EN);
+       }
+
+       /* reset UDP/TCP header csum */
+       if (protocol == IPPROTO_TCP)
+               tcp_hdr(skb)->check = 0;
+       else
+               udp_hdr(skb)->check = 0;
+
+       /* mac header len should include IV, size is in words */
+       if (info->control.hw_key)
+               mh_len += info->control.hw_key->iv_len;
+       mh_len /= 2;
+       offload_assist |= mh_len << TX_CMD_OFFLD_MH_SIZE;
+
+       tx_cmd->offload_assist = cpu_to_le16(offload_assist);
+#endif
+}
+
 /*
  * Sets most of the Tx cmd's fields
  */
@@ -126,6 +232,9 @@ void iwl_mvm_set_tx_cmd(struct iwl_mvm *mvm, struct sk_buff *skb,
                u8 *qc = ieee80211_get_qos_ctl(hdr);
                tx_cmd->tid_tspec = qc[0] & 0xf;
                tx_flags &= ~TX_CMD_FLG_SEQ_CTL;
+               if (*qc & IEEE80211_QOS_CTL_A_MSDU_PRESENT)
+                       tx_cmd->offload_assist |=
+                               cpu_to_le16(BIT(TX_CMD_OFFLD_AMSDU));
        } else if (ieee80211_is_back_req(fc)) {
                struct ieee80211_bar *bar = (void *)skb->data;
                u16 control = le16_to_cpu(bar->control);
@@ -186,9 +295,15 @@ void iwl_mvm_set_tx_cmd(struct iwl_mvm *mvm, struct sk_buff *skb,
        /* Total # bytes to be transmitted */
        tx_cmd->len = cpu_to_le16((u16)skb->len +
                (uintptr_t)info->driver_data[0]);
-       tx_cmd->next_frame_len = 0;
        tx_cmd->life_time = cpu_to_le32(TX_CMD_LIFE_TIME_INFINITE);
        tx_cmd->sta_id = sta_id;
+
+       /* padding is inserted later in transport */
+       if (ieee80211_hdrlen(fc) % 4 &&
+           !(tx_cmd->offload_assist & cpu_to_le16(BIT(TX_CMD_OFFLD_AMSDU))))
+               tx_cmd->offload_assist |= cpu_to_le16(BIT(TX_CMD_OFFLD_PAD));
+
+       iwl_mvm_tx_csum(mvm, skb, hdr, info, tx_cmd);
 }
 
 /*
@@ -459,6 +574,7 @@ static int iwl_mvm_tx_tso(struct iwl_mvm *mvm, struct sk_buff *skb,
        u16 ip_base_id = ipv4 ? ntohs(ip_hdr(skb)->id) : 0;
        u16 amsdu_add, snap_ip_tcp, pad, i = 0;
        unsigned int dbg_max_amsdu_len;
+       netdev_features_t netdev_features = NETIF_F_CSUM_MASK | NETIF_F_SG;
        u8 *qc, tid, txf;
 
        snap_ip_tcp = 8 + skb_transport_header(skb) - skb_network_header(skb) +
@@ -477,6 +593,19 @@ static int iwl_mvm_tx_tso(struct iwl_mvm *mvm, struct sk_buff *skb,
                goto segment;
        }
 
+       /*
+        * Do not build AMSDU for IPv6 with extension headers.
+        * ask stack to segment and checkum the generated MPDUs for us.
+        */
+       if (skb->protocol == htons(ETH_P_IPV6) &&
+           ((struct ipv6hdr *)skb_network_header(skb))->nexthdr !=
+           IPPROTO_TCP) {
+               num_subframes = 1;
+               pad = 0;
+               netdev_features &= ~NETIF_F_CSUM_MASK;
+               goto segment;
+       }
+
        /*
         * No need to lock amsdu_in_ampdu_allowed since it can't be modified
         * during an BA session.
@@ -570,7 +699,7 @@ segment:
        skb_shinfo(skb)->gso_size = num_subframes * mss;
        memcpy(cb, skb->cb, sizeof(cb));
 
-       next = skb_gso_segment(skb, NETIF_F_CSUM_MASK | NETIF_F_SG);
+       next = skb_gso_segment(skb, netdev_features);
        skb_shinfo(skb)->gso_size = mss;
        if (WARN_ON_ONCE(IS_ERR(next)))
                return -EINVAL;
@@ -632,6 +761,35 @@ static int iwl_mvm_tx_tso(struct iwl_mvm *mvm, struct sk_buff *skb,
 }
 #endif
 
+static void iwl_mvm_tx_add_stream(struct iwl_mvm *mvm,
+                                 struct iwl_mvm_sta *mvm_sta, u8 tid,
+                                 struct sk_buff *skb)
+{
+       struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+       u8 mac_queue = info->hw_queue;
+       struct sk_buff_head *deferred_tx_frames;
+
+       lockdep_assert_held(&mvm_sta->lock);
+
+       mvm_sta->deferred_traffic_tid_map |= BIT(tid);
+       set_bit(mvm_sta->sta_id, mvm->sta_deferred_frames);
+
+       deferred_tx_frames = &mvm_sta->tid_data[tid].deferred_tx_frames;
+
+       skb_queue_tail(deferred_tx_frames, skb);
+
+       /*
+        * The first deferred frame should've stopped the MAC queues, so we
+        * should never get a second deferred frame for the RA/TID.
+        */
+       if (!WARN(skb_queue_len(deferred_tx_frames) != 1,
+                 "RATID %d/%d has %d deferred frames\n", mvm_sta->sta_id, tid,
+                 skb_queue_len(deferred_tx_frames))) {
+               iwl_mvm_stop_mac_queues(mvm, BIT(mac_queue));
+               schedule_work(&mvm->add_stream_wk);
+       }
+}
+
 /*
  * Sets the fields in the Tx cmd that are crypto related
  */
@@ -647,7 +805,7 @@ static int iwl_mvm_tx_mpdu(struct iwl_mvm *mvm, struct sk_buff *skb,
        u16 seq_number = 0;
        u8 tid = IWL_MAX_TID_COUNT;
        u8 txq_id = info->hw_queue;
-       bool is_data_qos = false, is_ampdu = false;
+       bool is_ampdu = false;
        int hdrlen;
 
        mvmsta = iwl_mvm_sta_from_mac80211(sta);
@@ -687,8 +845,15 @@ static int iwl_mvm_tx_mpdu(struct iwl_mvm *mvm, struct sk_buff *skb,
                seq_number &= IEEE80211_SCTL_SEQ;
                hdr->seq_ctrl &= cpu_to_le16(IEEE80211_SCTL_FRAG);
                hdr->seq_ctrl |= cpu_to_le16(seq_number);
-               is_data_qos = true;
                is_ampdu = info->flags & IEEE80211_TX_CTL_AMPDU;
+       } else if (iwl_mvm_is_dqa_supported(mvm) &&
+                  (ieee80211_is_qos_nullfunc(fc) ||
+                   ieee80211_is_nullfunc(fc))) {
+               /*
+                * nullfunc frames should go to the MGMT queue regardless of QOS
+                */
+               tid = IWL_MAX_TID_COUNT;
+               txq_id = mvmsta->tid_data[tid].txq_id;
        }
 
        /* Copy MAC header from skb into command buffer */
@@ -709,13 +874,30 @@ static int iwl_mvm_tx_mpdu(struct iwl_mvm *mvm, struct sk_buff *skb,
                txq_id = mvmsta->tid_data[tid].txq_id;
        }
 
+       if (iwl_mvm_is_dqa_supported(mvm)) {
+               if (unlikely(mvmsta->tid_data[tid].txq_id ==
+                            IEEE80211_INVAL_HW_QUEUE)) {
+                       iwl_mvm_tx_add_stream(mvm, mvmsta, tid, skb);
+
+                       /*
+                        * The frame is now deferred, and the worker scheduled
+                        * will re-allocate it, so we can free it for now.
+                        */
+                       iwl_trans_free_tx_cmd(mvm->trans, dev_cmd);
+                       spin_unlock(&mvmsta->lock);
+                       return 0;
+               }
+
+               txq_id = mvmsta->tid_data[tid].txq_id;
+       }
+
        IWL_DEBUG_TX(mvm, "TX to [%d|%d] Q:%d - seq: 0x%x\n", mvmsta->sta_id,
                     tid, txq_id, IEEE80211_SEQ_TO_SN(seq_number));
 
        if (iwl_trans_tx(mvm->trans, skb, dev_cmd, txq_id))
                goto drop_unlock_sta;
 
-       if (is_data_qos && !ieee80211_has_morefrags(fc))
+       if (tid < IWL_MAX_TID_COUNT && !ieee80211_has_morefrags(fc))
                mvmsta->tid_data[tid].seq_number = seq_number + 0x10;
 
        spin_unlock(&mvmsta->lock);
index 53cdc57..486c985 100644 (file)
@@ -491,98 +491,12 @@ static void iwl_mvm_dump_umac_error_log(struct iwl_mvm *mvm)
        IWL_ERR(mvm, "0x%08X | isr status reg\n", table.nic_isr_pref);
 }
 
-static void iwl_mvm_dump_nic_error_log_old(struct iwl_mvm *mvm)
-{
-       struct iwl_trans *trans = mvm->trans;
-       struct iwl_error_event_table_v1 table;
-       u32 base;
-
-       base = mvm->error_event_table;
-       if (mvm->cur_ucode == IWL_UCODE_INIT) {
-               if (!base)
-                       base = mvm->fw->init_errlog_ptr;
-       } else {
-               if (!base)
-                       base = mvm->fw->inst_errlog_ptr;
-       }
-
-       if (base < 0x800000) {
-               IWL_ERR(mvm,
-                       "Not valid error log pointer 0x%08X for %s uCode\n",
-                       base,
-                       (mvm->cur_ucode == IWL_UCODE_INIT)
-                                       ? "Init" : "RT");
-               return;
-       }
-
-       iwl_trans_read_mem_bytes(trans, base, &table, sizeof(table));
-
-       if (ERROR_START_OFFSET <= table.valid * ERROR_ELEM_SIZE) {
-               IWL_ERR(trans, "Start IWL Error Log Dump:\n");
-               IWL_ERR(trans, "Status: 0x%08lX, count: %d\n",
-                       mvm->status, table.valid);
-       }
-
-       /* Do not change this output - scripts rely on it */
-
-       IWL_ERR(mvm, "Loaded firmware version: %s\n", mvm->fw->fw_version);
-
-       trace_iwlwifi_dev_ucode_error(trans->dev, table.error_id, table.tsf_low,
-                                     table.data1, table.data2, table.data3,
-                                     table.blink2, table.ilink1, table.ilink2,
-                                     table.bcon_time, table.gp1, table.gp2,
-                                     table.gp3, table.ucode_ver, 0,
-                                     table.hw_ver, table.brd_ver);
-       IWL_ERR(mvm, "0x%08X | %-28s\n", table.error_id,
-               desc_lookup(table.error_id));
-       IWL_ERR(mvm, "0x%08X | uPc\n", table.pc);
-       IWL_ERR(mvm, "0x%08X | branchlink1\n", table.blink1);
-       IWL_ERR(mvm, "0x%08X | branchlink2\n", table.blink2);
-       IWL_ERR(mvm, "0x%08X | interruptlink1\n", table.ilink1);
-       IWL_ERR(mvm, "0x%08X | interruptlink2\n", table.ilink2);
-       IWL_ERR(mvm, "0x%08X | data1\n", table.data1);
-       IWL_ERR(mvm, "0x%08X | data2\n", table.data2);
-       IWL_ERR(mvm, "0x%08X | data3\n", table.data3);
-       IWL_ERR(mvm, "0x%08X | beacon time\n", table.bcon_time);
-       IWL_ERR(mvm, "0x%08X | tsf low\n", table.tsf_low);
-       IWL_ERR(mvm, "0x%08X | tsf hi\n", table.tsf_hi);
-       IWL_ERR(mvm, "0x%08X | time gp1\n", table.gp1);
-       IWL_ERR(mvm, "0x%08X | time gp2\n", table.gp2);
-       IWL_ERR(mvm, "0x%08X | time gp3\n", table.gp3);
-       IWL_ERR(mvm, "0x%08X | uCode version\n", table.ucode_ver);
-       IWL_ERR(mvm, "0x%08X | hw version\n", table.hw_ver);
-       IWL_ERR(mvm, "0x%08X | board version\n", table.brd_ver);
-       IWL_ERR(mvm, "0x%08X | hcmd\n", table.hcmd);
-       IWL_ERR(mvm, "0x%08X | isr0\n", table.isr0);
-       IWL_ERR(mvm, "0x%08X | isr1\n", table.isr1);
-       IWL_ERR(mvm, "0x%08X | isr2\n", table.isr2);
-       IWL_ERR(mvm, "0x%08X | isr3\n", table.isr3);
-       IWL_ERR(mvm, "0x%08X | isr4\n", table.isr4);
-       IWL_ERR(mvm, "0x%08X | isr_pref\n", table.isr_pref);
-       IWL_ERR(mvm, "0x%08X | wait_event\n", table.wait_event);
-       IWL_ERR(mvm, "0x%08X | l2p_control\n", table.l2p_control);
-       IWL_ERR(mvm, "0x%08X | l2p_duration\n", table.l2p_duration);
-       IWL_ERR(mvm, "0x%08X | l2p_mhvalid\n", table.l2p_mhvalid);
-       IWL_ERR(mvm, "0x%08X | l2p_addr_match\n", table.l2p_addr_match);
-       IWL_ERR(mvm, "0x%08X | lmpm_pmg_sel\n", table.lmpm_pmg_sel);
-       IWL_ERR(mvm, "0x%08X | timestamp\n", table.u_timestamp);
-       IWL_ERR(mvm, "0x%08X | flow_handler\n", table.flow_handler);
-
-       if (mvm->support_umac_log)
-               iwl_mvm_dump_umac_error_log(mvm);
-}
-
 void iwl_mvm_dump_nic_error_log(struct iwl_mvm *mvm)
 {
        struct iwl_trans *trans = mvm->trans;
        struct iwl_error_event_table table;
        u32 base;
 
-       if (!fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_NEW_VERSION)) {
-               iwl_mvm_dump_nic_error_log_old(mvm);
-               return;
-       }
-
        base = mvm->error_event_table;
        if (mvm->cur_ucode == IWL_UCODE_INIT) {
                if (!base)
@@ -694,6 +608,8 @@ void iwl_mvm_enable_txq(struct iwl_mvm *mvm, int queue, int mac80211_queue,
        mvm->queue_info[queue].hw_queue_refcount++;
        if (mvm->queue_info[queue].hw_queue_refcount > 1)
                enable_queue = false;
+       else
+               mvm->queue_info[queue].ra_sta_id = cfg->sta_id;
        mvm->queue_info[queue].tid_bitmap |= BIT(cfg->tid);
 
        IWL_DEBUG_TX_QUEUES(mvm,
@@ -779,6 +695,8 @@ void iwl_mvm_disable_txq(struct iwl_mvm *mvm, int queue, int mac80211_queue,
                return;
        }
 
+       cmd.sta_id = mvm->queue_info[queue].ra_sta_id;
+
        /* Make sure queue info is correct even though we overwrite it */
        WARN(mvm->queue_info[queue].hw_queue_refcount ||
             mvm->queue_info[queue].tid_bitmap ||
@@ -1079,3 +997,74 @@ void iwl_mvm_connection_loss(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
 out:
        ieee80211_connection_loss(vif);
 }
+
+int iwl_mvm_send_lqm_cmd(struct ieee80211_vif *vif,
+                        enum iwl_lqm_cmd_operatrions operation,
+                        u32 duration, u32 timeout)
+{
+       struct iwl_mvm_vif *mvm_vif = iwl_mvm_vif_from_mac80211(vif);
+       struct iwl_link_qual_msrmnt_cmd cmd = {
+               .cmd_operation = cpu_to_le32(operation),
+               .mac_id = cpu_to_le32(mvm_vif->id),
+               .measurement_time = cpu_to_le32(duration),
+               .timeout = cpu_to_le32(timeout),
+       };
+       u32 cmdid =
+               iwl_cmd_id(LINK_QUALITY_MEASUREMENT_CMD, MAC_CONF_GROUP, 0);
+       int ret;
+
+       if (!fw_has_capa(&mvm_vif->mvm->fw->ucode_capa,
+                        IWL_UCODE_TLV_CAPA_LQM_SUPPORT))
+               return -EOPNOTSUPP;
+
+       if (vif->type != NL80211_IFTYPE_STATION || vif->p2p)
+               return -EINVAL;
+
+       switch (operation) {
+       case LQM_CMD_OPERATION_START_MEASUREMENT:
+               if (iwl_mvm_lqm_active(mvm_vif->mvm))
+                       return -EBUSY;
+               if (!vif->bss_conf.assoc)
+                       return -EINVAL;
+               mvm_vif->lqm_active = true;
+               break;
+       case LQM_CMD_OPERATION_STOP_MEASUREMENT:
+               if (!iwl_mvm_lqm_active(mvm_vif->mvm))
+                       return -EINVAL;
+               break;
+       default:
+               return -EINVAL;
+       }
+
+       ret = iwl_mvm_send_cmd_pdu(mvm_vif->mvm, cmdid, 0, sizeof(cmd),
+                                  &cmd);
+
+       /* command failed - roll back lqm_active state */
+       if (ret) {
+               mvm_vif->lqm_active =
+                       operation == LQM_CMD_OPERATION_STOP_MEASUREMENT;
+       }
+
+       return ret;
+}
+
+static void iwl_mvm_lqm_active_iterator(void *_data, u8 *mac,
+                                       struct ieee80211_vif *vif)
+{
+       struct iwl_mvm_vif *mvm_vif = iwl_mvm_vif_from_mac80211(vif);
+       bool *lqm_active = _data;
+
+       *lqm_active = *lqm_active || mvm_vif->lqm_active;
+}
+
+bool iwl_mvm_lqm_active(struct iwl_mvm *mvm)
+{
+       bool ret = false;
+
+       lockdep_assert_held(&mvm->mutex);
+       ieee80211_iterate_active_interfaces_atomic(
+               mvm->hw, IEEE80211_IFACE_ITER_NORMAL,
+               iwl_mvm_lqm_active_iterator, &ret);
+
+       return ret;
+}
index 05b9685..41c6dd5 100644 (file)
@@ -483,17 +483,19 @@ static const struct pci_device_id iwl_hw_card_ids[] = {
        {IWL_PCI_DEVICE(0x24FD, 0x0810, iwl8265_2ac_cfg)},
 
 /* 9000 Series */
+       {IWL_PCI_DEVICE(0x9DF0, 0x0A10, iwl9560_2ac_cfg)},
+       {IWL_PCI_DEVICE(0x9DF0, 0x0010, iwl9560_2ac_cfg)},
        {IWL_PCI_DEVICE(0x9DF0, 0x2A10, iwl5165_2ac_cfg)},
        {IWL_PCI_DEVICE(0x9DF0, 0x2010, iwl5165_2ac_cfg)},
-       {IWL_PCI_DEVICE(0x9DF0, 0x0A10, iwl9260_2ac_cfg)},
-       {IWL_PCI_DEVICE(0x9DF0, 0x0010, iwl9260_2ac_cfg)},
+       {IWL_PCI_DEVICE(0x2526, 0x1420, iwl5165_2ac_cfg)},
+       {IWL_PCI_DEVICE(0x2526, 0x0010, iwl5165_2ac_cfg)},
        {IWL_PCI_DEVICE(0x9DF0, 0x0000, iwl5165_2ac_cfg)},
        {IWL_PCI_DEVICE(0x9DF0, 0x0310, iwl5165_2ac_cfg)},
        {IWL_PCI_DEVICE(0x9DF0, 0x0510, iwl5165_2ac_cfg)},
        {IWL_PCI_DEVICE(0x9DF0, 0x0710, iwl5165_2ac_cfg)},
-       {IWL_PCI_DEVICE(0x9DF0, 0x0210, iwl9260_2ac_cfg)},
-       {IWL_PCI_DEVICE(0x9DF0, 0x0410, iwl9260_2ac_cfg)},
-       {IWL_PCI_DEVICE(0x9DF0, 0x0610, iwl9260_2ac_cfg)},
+       {IWL_PCI_DEVICE(0x9DF0, 0x0210, iwl9560_2ac_cfg)},
+       {IWL_PCI_DEVICE(0x9DF0, 0x0410, iwl9560_2ac_cfg)},
+       {IWL_PCI_DEVICE(0x9DF0, 0x0610, iwl9560_2ac_cfg)},
 #endif /* CONFIG_IWLMVM */
 
        {0}
@@ -651,10 +653,8 @@ static int iwl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
        /* The PCI device starts with a reference taken and we are
         * supposed to release it here.  But to simplify the
         * interaction with the opmode, we don't do it now, but let
-        * the opmode release it when it's ready.  To account for this
-        * reference, we start with ref_count set to 1.
+        * the opmode release it when it's ready.
         */
-       trans_pcie->ref_count = 1;
 
        return 0;
 
index dadafbd..9ce4ec6 100644 (file)
@@ -348,7 +348,7 @@ struct iwl_tso_hdr_page {
 struct iwl_trans_pcie {
        struct iwl_rxq *rxq;
        struct iwl_rx_mem_buffer rx_pool[RX_POOL_SIZE];
-       struct iwl_rx_mem_buffer *global_table[MQ_RX_TABLE_SIZE];
+       struct iwl_rx_mem_buffer *global_table[RX_POOL_SIZE];
        struct iwl_rb_allocator rba;
        struct iwl_trans *trans;
        struct iwl_drv *drv;
@@ -403,10 +403,6 @@ struct iwl_trans_pcie {
        bool cmd_hold_nic_awake;
        bool ref_cmd_in_flight;
 
-       /* protect ref counter */
-       spinlock_t ref_lock;
-       u32 ref_count;
-
        dma_addr_t fw_mon_phys;
        struct page *fw_mon_page;
        u32 fw_mon_size;
index 4be3c35..7f8a232 100644 (file)
@@ -210,8 +210,12 @@ static void iwl_pcie_rxq_inc_wr_ptr(struct iwl_trans *trans,
        if (trans->cfg->mq_rx_supported)
                iwl_write_prph(trans, RFH_Q_FRBDCB_WIDX(rxq->id),
                               rxq->write_actual);
-       else
-               iwl_write32(trans, FH_RSCSR_CHNL0_WPTR, rxq->write_actual);
+       /*
+        * write to FH_RSCSR_CHNL0_WPTR register even in MQ as a W/A to
+        * hardware shadow registers bug - writing to RFH_Q_FRBDCB_WIDX will
+        * not wake the NIC.
+        */
+       iwl_write32(trans, FH_RSCSR_CHNL0_WPTR, rxq->write_actual);
 }
 
 static void iwl_pcie_rxq_check_wrptr(struct iwl_trans *trans)
@@ -908,6 +912,8 @@ int iwl_pcie_rx_init(struct iwl_trans *trans)
        allocator_pool_size = trans->num_rx_queues *
                (RX_CLAIM_REQ_ALLOC - RX_POST_REQ_ALLOC);
        num_alloc = queue_size + allocator_pool_size;
+       BUILD_BUG_ON(ARRAY_SIZE(trans_pcie->global_table) !=
+                    ARRAY_SIZE(trans_pcie->rx_pool));
        for (i = 0; i < num_alloc; i++) {
                struct iwl_rx_mem_buffer *rxb = &trans_pcie->rx_pool[i];
 
@@ -1805,7 +1811,7 @@ irqreturn_t iwl_pcie_irq_msix_handler(int irq, void *dev_id)
        struct msix_entry *entry = dev_id;
        struct iwl_trans_pcie *trans_pcie = iwl_pcie_get_trans_pcie(entry);
        struct iwl_trans *trans = trans_pcie->trans;
-       struct isr_statistics *isr_stats = isr_stats = &trans_pcie->isr_stats;
+       struct isr_statistics *isr_stats = &trans_pcie->isr_stats;
        u32 inta_fh, inta_hw;
 
        lock_map_acquire(&trans->sync_cmd_lockdep_map);
index eb39c7e..5e1a13e 100644 (file)
@@ -1321,6 +1321,7 @@ static int iwl_trans_pcie_d3_resume(struct iwl_trans *trans,
         * after this call.
         */
        iwl_pcie_reset_ict(trans);
+       iwl_enable_interrupts(trans);
 
        iwl_set_bit(trans, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
        iwl_set_bit(trans, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
@@ -1434,7 +1435,7 @@ static void iwl_pcie_set_interrupt_capa(struct pci_dev *pdev,
        int ret, i;
 
        if (trans->cfg->mq_rx_supported) {
-               max_vector = min_t(u32, (num_possible_cpus() + 1),
+               max_vector = min_t(u32, (num_possible_cpus() + 2),
                                   IWL_MAX_RX_HW_QUEUES);
                for (i = 0; i < max_vector; i++)
                        trans_pcie->msix_entries[i].entry = i;
@@ -1465,7 +1466,7 @@ static void iwl_pcie_set_interrupt_capa(struct pci_dev *pdev,
 
        ret = pci_enable_msi(pdev);
        if (ret) {
-               dev_err(&pdev->dev, "pci_enable_msi failed(0X%x)\n", ret);
+               dev_err(&pdev->dev, "pci_enable_msi failed - %d\n", ret);
                /* enable rfkill interrupt: hw bug w/a */
                pci_read_config_word(pdev, PCI_COMMAND, &pci_cmd);
                if (pci_cmd & PCI_COMMAND_INTX_DISABLE) {
@@ -1499,8 +1500,8 @@ static int iwl_pcie_init_msix_handler(struct pci_dev *pdev,
                        IWL_ERR(trans_pcie->trans,
                                "Error allocating IRQ %d\n", i);
                        for (j = 0; j < i; j++)
-                               free_irq(trans_pcie->msix_entries[i].vector,
-                                        &trans_pcie->msix_entries[i]);
+                               free_irq(trans_pcie->msix_entries[j].vector,
+                                        &trans_pcie->msix_entries[j]);
                        pci_disable_msix(pdev);
                        return ret;
                }
@@ -1694,6 +1695,7 @@ void iwl_trans_pcie_free(struct iwl_trans *trans)
        }
 
        free_percpu(trans_pcie->tso_hdr_page);
+       mutex_destroy(&trans_pcie->mutex);
        iwl_trans_free(trans);
 }
 
@@ -2014,38 +2016,32 @@ static void iwl_trans_pcie_set_bits_mask(struct iwl_trans *trans, u32 reg,
 void iwl_trans_pcie_ref(struct iwl_trans *trans)
 {
        struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
-       unsigned long flags;
 
        if (iwlwifi_mod_params.d0i3_disable)
                return;
 
-       spin_lock_irqsave(&trans_pcie->ref_lock, flags);
-       IWL_DEBUG_RPM(trans, "ref_counter: %d\n", trans_pcie->ref_count);
-       trans_pcie->ref_count++;
        pm_runtime_get(&trans_pcie->pci_dev->dev);
-       spin_unlock_irqrestore(&trans_pcie->ref_lock, flags);
+
+#ifdef CONFIG_PM
+       IWL_DEBUG_RPM(trans, "runtime usage count: %d\n",
+                     atomic_read(&trans_pcie->pci_dev->dev.power.usage_count));
+#endif /* CONFIG_PM */
 }
 
 void iwl_trans_pcie_unref(struct iwl_trans *trans)
 {
        struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
-       unsigned long flags;
 
        if (iwlwifi_mod_params.d0i3_disable)
                return;
 
-       spin_lock_irqsave(&trans_pcie->ref_lock, flags);
-       IWL_DEBUG_RPM(trans, "ref_counter: %d\n", trans_pcie->ref_count);
-       if (WARN_ON_ONCE(trans_pcie->ref_count == 0)) {
-               spin_unlock_irqrestore(&trans_pcie->ref_lock, flags);
-               return;
-       }
-       trans_pcie->ref_count--;
-
        pm_runtime_mark_last_busy(&trans_pcie->pci_dev->dev);
        pm_runtime_put_autosuspend(&trans_pcie->pci_dev->dev);
 
-       spin_unlock_irqrestore(&trans_pcie->ref_lock, flags);
+#ifdef CONFIG_PM
+       IWL_DEBUG_RPM(trans, "runtime usage count: %d\n",
+                     atomic_read(&trans_pcie->pci_dev->dev.power.usage_count));
+#endif /* CONFIG_PM */
 }
 
 static const char *get_csr_string(int cmd)
@@ -2793,7 +2789,6 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
        trans_pcie->trans = trans;
        spin_lock_init(&trans_pcie->irq_lock);
        spin_lock_init(&trans_pcie->reg_lock);
-       spin_lock_init(&trans_pcie->ref_lock);
        mutex_init(&trans_pcie->mutex);
        init_waitqueue_head(&trans_pcie->ucode_write_waitq);
        trans_pcie->tso_hdr_page = alloc_percpu(struct iwl_tso_hdr_page);
index 16ad820..e1f7a3f 100644 (file)
@@ -596,6 +596,28 @@ static void iwl_pcie_free_tso_page(struct sk_buff *skb)
        }
 }
 
+static void iwl_pcie_clear_cmd_in_flight(struct iwl_trans *trans)
+{
+       struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+
+       lockdep_assert_held(&trans_pcie->reg_lock);
+
+       if (trans_pcie->ref_cmd_in_flight) {
+               trans_pcie->ref_cmd_in_flight = false;
+               IWL_DEBUG_RPM(trans, "clear ref_cmd_in_flight - unref\n");
+               iwl_trans_pcie_unref(trans);
+       }
+
+       if (!trans->cfg->base_params->apmg_wake_up_wa)
+               return;
+       if (WARN_ON(!trans_pcie->cmd_hold_nic_awake))
+               return;
+
+       trans_pcie->cmd_hold_nic_awake = false;
+       __iwl_trans_pcie_clear_bit(trans, CSR_GP_CNTRL,
+                                  CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
+}
+
 /*
  * iwl_pcie_txq_unmap -  Unmap any remaining DMA mappings and free skb's
  */
@@ -620,6 +642,20 @@ static void iwl_pcie_txq_unmap(struct iwl_trans *trans, int txq_id)
                }
                iwl_pcie_txq_free_tfd(trans, txq);
                q->read_ptr = iwl_queue_inc_wrap(q->read_ptr);
+
+               if (q->read_ptr == q->write_ptr) {
+                       unsigned long flags;
+
+                       spin_lock_irqsave(&trans_pcie->reg_lock, flags);
+                       if (txq_id != trans_pcie->cmd_queue) {
+                               IWL_DEBUG_RPM(trans, "Q %d - last tx freed\n",
+                                             q->id);
+                               iwl_trans_pcie_unref(trans);
+                       } else {
+                               iwl_pcie_clear_cmd_in_flight(trans);
+                       }
+                       spin_unlock_irqrestore(&trans_pcie->reg_lock, flags);
+               }
        }
        txq->active = false;
 
@@ -1148,29 +1184,6 @@ static int iwl_pcie_set_cmd_in_flight(struct iwl_trans *trans,
        return 0;
 }
 
-static int iwl_pcie_clear_cmd_in_flight(struct iwl_trans *trans)
-{
-       struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
-
-       lockdep_assert_held(&trans_pcie->reg_lock);
-
-       if (trans_pcie->ref_cmd_in_flight) {
-               trans_pcie->ref_cmd_in_flight = false;
-               IWL_DEBUG_RPM(trans, "clear ref_cmd_in_flight - unref\n");
-               iwl_trans_pcie_unref(trans);
-       }
-
-       if (trans->cfg->base_params->apmg_wake_up_wa) {
-               if (WARN_ON(!trans_pcie->cmd_hold_nic_awake))
-                       return 0;
-
-               trans_pcie->cmd_hold_nic_awake = false;
-               __iwl_trans_pcie_clear_bit(trans, CSR_GP_CNTRL,
-                                          CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
-       }
-       return 0;
-}
-
 /*
  * iwl_pcie_cmdq_reclaim - Reclaim TX command queue entries already Tx'd
  *
@@ -2197,6 +2210,7 @@ int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb,
        __le16 fc;
        u8 hdr_len;
        u16 wifi_seq;
+       bool amsdu;
 
        txq = &trans_pcie->txq[txq_id];
        q = &txq->q;
@@ -2288,11 +2302,18 @@ int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb,
         */
        len = sizeof(struct iwl_tx_cmd) + sizeof(struct iwl_cmd_header) +
              hdr_len - IWL_HCMD_SCRATCHBUF_SIZE;
-       tb1_len = ALIGN(len, 4);
-
-       /* Tell NIC about any 2-byte padding after MAC header */
-       if (tb1_len != len)
-               tx_cmd->tx_flags |= TX_CMD_FLG_MH_PAD_MSK;
+       /* do not align A-MSDU to dword as the subframe header aligns it */
+       amsdu = ieee80211_is_data_qos(fc) &&
+               (*ieee80211_get_qos_ctl(hdr) &
+                IEEE80211_QOS_CTL_A_MSDU_PRESENT);
+       if (trans_pcie->sw_csum_tx || !amsdu) {
+               tb1_len = ALIGN(len, 4);
+               /* Tell NIC about any 2-byte padding after MAC header */
+               if (tb1_len != len)
+                       tx_cmd->tx_flags |= TX_CMD_FLG_MH_PAD_MSK;
+       } else {
+               tb1_len = len;
+       }
 
        /* The first TB points to the scratchbuf data - min_copy bytes */
        memcpy(&txq->scratchbufs[q->write_ptr], &dev_cmd->hdr,
@@ -2310,8 +2331,7 @@ int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb,
                goto out_err;
        iwl_pcie_txq_build_tfd(trans, txq, tb1_phys, tb1_len, false);
 
-       if (ieee80211_is_data_qos(fc) &&
-           (*ieee80211_get_qos_ctl(hdr) & IEEE80211_QOS_CTL_A_MSDU_PRESENT)) {
+       if (amsdu) {
                if (unlikely(iwl_fill_data_tbs_amsdu(trans, skb, txq, hdr_len,
                                                     out_meta, dev_cmd,
                                                     tb1_len)))