Merge tag 'iwlwifi-next-for-kalle-2015-06-03' of https://git.kernel.org/pub/scm/linux...
authorKalle Valo <kvalo@codeaurora.org>
Wed, 3 Jun 2015 09:15:51 +0000 (12:15 +0300)
committerKalle Valo <kvalo@codeaurora.org>
Wed, 3 Jun 2015 09:15:51 +0000 (12:15 +0300)
* a few fixes (re-enablement of interrupts for certain new
  platforms that have special power states)
* Rework completely the RBD allocation model towards new
  multi RX hardware.
* cleanups
* scan reworks continuation (Luca)

41 files changed:
drivers/net/wireless/iwlwifi/Kconfig
drivers/net/wireless/iwlwifi/Makefile
drivers/net/wireless/iwlwifi/dvm/mac80211.c
drivers/net/wireless/iwlwifi/iwl-7000.c
drivers/net/wireless/iwlwifi/iwl-8000.c
drivers/net/wireless/iwlwifi/iwl-config.h
drivers/net/wireless/iwlwifi/iwl-devtrace-iwlwifi.h
drivers/net/wireless/iwlwifi/iwl-drv.c
drivers/net/wireless/iwlwifi/iwl-eeprom-parse.c
drivers/net/wireless/iwlwifi/iwl-eeprom-parse.h
drivers/net/wireless/iwlwifi/iwl-fh.h
drivers/net/wireless/iwlwifi/iwl-fw-error-dump.h
drivers/net/wireless/iwlwifi/iwl-fw-file.h
drivers/net/wireless/iwlwifi/iwl-fw.h
drivers/net/wireless/iwlwifi/iwl-nvm-parse.c
drivers/net/wireless/iwlwifi/iwl-trans.c [new file with mode: 0644]
drivers/net/wireless/iwlwifi/iwl-trans.h
drivers/net/wireless/iwlwifi/mvm/coex.c
drivers/net/wireless/iwlwifi/mvm/coex_legacy.c
drivers/net/wireless/iwlwifi/mvm/d3.c
drivers/net/wireless/iwlwifi/mvm/debugfs-vif.c
drivers/net/wireless/iwlwifi/mvm/debugfs.c
drivers/net/wireless/iwlwifi/mvm/fw-api-scan.h
drivers/net/wireless/iwlwifi/mvm/fw-api.h
drivers/net/wireless/iwlwifi/mvm/fw.c
drivers/net/wireless/iwlwifi/mvm/mac80211.c
drivers/net/wireless/iwlwifi/mvm/mvm.h
drivers/net/wireless/iwlwifi/mvm/nvm.c
drivers/net/wireless/iwlwifi/mvm/ops.c
drivers/net/wireless/iwlwifi/mvm/rs.c
drivers/net/wireless/iwlwifi/mvm/rx.c
drivers/net/wireless/iwlwifi/mvm/scan.c
drivers/net/wireless/iwlwifi/mvm/sta.c
drivers/net/wireless/iwlwifi/mvm/time-event.c
drivers/net/wireless/iwlwifi/mvm/tx.c
drivers/net/wireless/iwlwifi/mvm/utils.c
drivers/net/wireless/iwlwifi/pcie/drv.c
drivers/net/wireless/iwlwifi/pcie/internal.h
drivers/net/wireless/iwlwifi/pcie/rx.c
drivers/net/wireless/iwlwifi/pcie/trans.c
drivers/net/wireless/iwlwifi/pcie/tx.c

index 99f9760..efe3cf3 100644 (file)
@@ -21,6 +21,7 @@ config IWLWIFI
                Intel 7260 Wi-Fi Adapter
                Intel 3160 Wi-Fi Adapter
                Intel 7265 Wi-Fi Adapter
+               Intel 3165 Wi-Fi Adapter
                Intel 8260 Wi-Fi Adapter
 
 
index 3d32f41..dbfc5b1 100644 (file)
@@ -9,6 +9,7 @@ iwlwifi-objs            += iwl-phy-db.o iwl-nvm-parse.o
 iwlwifi-objs           += pcie/drv.o pcie/rx.o pcie/tx.o pcie/trans.o
 iwlwifi-$(CONFIG_IWLDVM) += iwl-1000.o iwl-2000.o iwl-5000.o iwl-6000.o
 iwlwifi-$(CONFIG_IWLMVM) += iwl-7000.o iwl-8000.o
+iwlwifi-objs           += iwl-trans.o
 
 iwlwifi-objs += $(iwlwifi-m)
 
index ba7fc42..852461f 100644 (file)
@@ -112,6 +112,7 @@ int iwlagn_mac_setup_register(struct iwl_priv *priv,
                    IEEE80211_HW_QUEUE_CONTROL |
                    IEEE80211_HW_SUPPORTS_PS |
                    IEEE80211_HW_SUPPORTS_DYNAMIC_PS |
+                   IEEE80211_HW_SUPPORT_FAST_XMIT |
                    IEEE80211_HW_WANT_MONITOR_VIF;
 
        hw->offchannel_tx_hw_queue = IWL_AUX_QUEUE;
index 69b2c0b..cc35f79 100644 (file)
 #include "iwl-agn-hw.h"
 
 /* Highest firmware API version supported */
-#define IWL7260_UCODE_API_MAX  13
-#define IWL3160_UCODE_API_MAX  13
+#define IWL7260_UCODE_API_MAX  15
 
 /* Oldest version we won't warn about */
 #define IWL7260_UCODE_API_OK   12
-#define IWL3160_UCODE_API_OK   12
+#define IWL3165_UCODE_API_OK   13
 
 /* Lowest firmware API version supported */
 #define IWL7260_UCODE_API_MIN  10
-#define IWL3160_UCODE_API_MIN  10
+#define IWL3165_UCODE_API_MIN  13
 
 /* NVM versions */
 #define IWL7260_NVM_VERSION            0x0a1d
 #define IWL3160_FW_PRE "iwlwifi-3160-"
 #define IWL3160_MODULE_FIRMWARE(api) IWL3160_FW_PRE __stringify(api) ".ucode"
 
-#define IWL3165_FW_PRE "iwlwifi-3165-"
-#define IWL3165_MODULE_FIRMWARE(api) IWL3165_FW_PRE __stringify(api) ".ucode"
-
 #define IWL7265_FW_PRE "iwlwifi-7265-"
 #define IWL7265_MODULE_FIRMWARE(api) IWL7265_FW_PRE __stringify(api) ".ucode"
 
@@ -271,8 +267,13 @@ static const struct iwl_ht_params iwl7265_ht_params = {
 
 const struct iwl_cfg iwl3165_2ac_cfg = {
        .name = "Intel(R) Dual Band Wireless AC 3165",
-       .fw_name_pre = IWL3165_FW_PRE,
+       .fw_name_pre = IWL7265D_FW_PRE,
        IWL_DEVICE_7000,
+       /* sparse doens't like the re-assignment but it is safe */
+#ifndef __CHECKER__
+       .ucode_api_ok = IWL3165_UCODE_API_OK,
+       .ucode_api_min = IWL3165_UCODE_API_MIN,
+#endif
        .ht_params = &iwl7000_ht_params,
        .nvm_ver = IWL3165_NVM_VERSION,
        .nvm_calib_ver = IWL3165_TX_POWER_VERSION,
@@ -348,6 +349,5 @@ const struct iwl_cfg iwl7265d_n_cfg = {
 
 MODULE_FIRMWARE(IWL7260_MODULE_FIRMWARE(IWL7260_UCODE_API_OK));
 MODULE_FIRMWARE(IWL3160_MODULE_FIRMWARE(IWL3160_UCODE_API_OK));
-MODULE_FIRMWARE(IWL3165_MODULE_FIRMWARE(IWL3160_UCODE_API_OK));
 MODULE_FIRMWARE(IWL7265_MODULE_FIRMWARE(IWL7260_UCODE_API_OK));
 MODULE_FIRMWARE(IWL7265D_MODULE_FIRMWARE(IWL7260_UCODE_API_OK));
index ce6321b..72040cd 100644 (file)
@@ -6,7 +6,7 @@
  * GPL LICENSE SUMMARY
  *
  * Copyright(c) 2014 Intel Corporation. All rights reserved.
- * Copyright(c) 2014 Intel Mobile Communications GmbH
+ * Copyright(c) 2014 - 2015 Intel Mobile Communications GmbH
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of version 2 of the GNU General Public License as
@@ -32,7 +32,7 @@
  * BSD LICENSE
  *
  * Copyright(c) 2014 Intel Corporation. All rights reserved.
- * Copyright(c) 2014 Intel Mobile Communications GmbH
+ * Copyright(c) 2014 - 2015 Intel Mobile Communications GmbH
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
@@ -69,7 +69,7 @@
 #include "iwl-agn-hw.h"
 
 /* Highest firmware API version supported */
-#define IWL8000_UCODE_API_MAX  13
+#define IWL8000_UCODE_API_MAX  15
 
 /* Oldest version we won't warn about */
 #define IWL8000_UCODE_API_OK   12
@@ -122,24 +122,49 @@ static const struct iwl_ht_params iwl8000_ht_params = {
        .ht40_bands = BIT(IEEE80211_BAND_2GHZ) | BIT(IEEE80211_BAND_5GHZ),
 };
 
-#define IWL_DEVICE_8000                                                \
-       .ucode_api_max = IWL8000_UCODE_API_MAX,                 \
-       .ucode_api_ok = IWL8000_UCODE_API_OK,                   \
-       .ucode_api_min = IWL8000_UCODE_API_MIN,                 \
-       .device_family = IWL_DEVICE_FAMILY_8000,                \
-       .max_inst_size = IWL60_RTC_INST_SIZE,                   \
-       .max_data_size = IWL60_RTC_DATA_SIZE,                   \
-       .base_params = &iwl8000_base_params,                    \
-       .led_mode = IWL_LED_RF_STATE,                           \
-       .nvm_hw_section_num = NVM_HW_SECTION_NUM_FAMILY_8000,   \
-       .d0i3 = true,                                           \
-       .non_shared_ant = ANT_A,                                \
-       .dccm_offset = IWL8260_DCCM_OFFSET,                     \
-       .dccm_len = IWL8260_DCCM_LEN,                           \
-       .dccm2_offset = IWL8260_DCCM2_OFFSET,                   \
-       .dccm2_len = IWL8260_DCCM2_LEN,                         \
-       .smem_offset = IWL8260_SMEM_OFFSET,                     \
-       .smem_len = IWL8260_SMEM_LEN
+static const struct iwl_tt_params iwl8000_tt_params = {
+       .ct_kill_entry = 115,
+       .ct_kill_exit = 93,
+       .ct_kill_duration = 5,
+       .dynamic_smps_entry = 111,
+       .dynamic_smps_exit = 107,
+       .tx_protection_entry = 112,
+       .tx_protection_exit = 105,
+       .tx_backoff = {
+               {.temperature = 110, .backoff = 200},
+               {.temperature = 111, .backoff = 600},
+               {.temperature = 112, .backoff = 1200},
+               {.temperature = 113, .backoff = 2000},
+               {.temperature = 114, .backoff = 4000},
+       },
+       .support_ct_kill = true,
+       .support_dynamic_smps = true,
+       .support_tx_protection = true,
+       .support_tx_backoff = true,
+};
+
+#define IWL_DEVICE_8000                                                        \
+       .ucode_api_max = IWL8000_UCODE_API_MAX,                         \
+       .ucode_api_ok = IWL8000_UCODE_API_OK,                           \
+       .ucode_api_min = IWL8000_UCODE_API_MIN,                         \
+       .device_family = IWL_DEVICE_FAMILY_8000,                        \
+       .max_inst_size = IWL60_RTC_INST_SIZE,                           \
+       .max_data_size = IWL60_RTC_DATA_SIZE,                           \
+       .base_params = &iwl8000_base_params,                            \
+       .led_mode = IWL_LED_RF_STATE,                                   \
+       .nvm_hw_section_num = NVM_HW_SECTION_NUM_FAMILY_8000,           \
+       .d0i3 = true,                                                   \
+       .non_shared_ant = ANT_A,                                        \
+       .dccm_offset = IWL8260_DCCM_OFFSET,                             \
+       .dccm_len = IWL8260_DCCM_LEN,                                   \
+       .dccm2_offset = IWL8260_DCCM2_OFFSET,                           \
+       .dccm2_len = IWL8260_DCCM2_LEN,                                 \
+       .smem_offset = IWL8260_SMEM_OFFSET,                             \
+       .smem_len = IWL8260_SMEM_LEN,                                   \
+       .default_nvm_file_B_step = DEFAULT_NVM_FILE_FAMILY_8000B,       \
+       .default_nvm_file_C_step = DEFAULT_NVM_FILE_FAMILY_8000C,       \
+       .thermal_params = &iwl8000_tt_params,                           \
+       .apmg_not_supported = true
 
 const struct iwl_cfg iwl8260_2n_cfg = {
        .name = "Intel(R) Dual Band Wireless N 8260",
@@ -177,8 +202,6 @@ const struct iwl_cfg iwl8260_2ac_sdio_cfg = {
        .ht_params = &iwl8000_ht_params,
        .nvm_ver = IWL8000_NVM_VERSION,
        .nvm_calib_ver = IWL8000_TX_POWER_VERSION,
-       .default_nvm_file_B_step = DEFAULT_NVM_FILE_FAMILY_8000B,
-       .default_nvm_file_C_step = DEFAULT_NVM_FILE_FAMILY_8000C,
        .max_rx_agg_size = MAX_RX_AGG_SIZE_8260_SDIO,
        .disable_dummy_notification = true,
        .max_ht_ampdu_exponent  = MAX_HT_AMPDU_EXPONENT_8260_SDIO,
@@ -192,8 +215,6 @@ const struct iwl_cfg iwl4165_2ac_sdio_cfg = {
        .ht_params = &iwl8000_ht_params,
        .nvm_ver = IWL8000_NVM_VERSION,
        .nvm_calib_ver = IWL8000_TX_POWER_VERSION,
-       .default_nvm_file_B_step = DEFAULT_NVM_FILE_FAMILY_8000B,
-       .default_nvm_file_C_step = DEFAULT_NVM_FILE_FAMILY_8000C,
        .max_rx_agg_size = MAX_RX_AGG_SIZE_8260_SDIO,
        .bt_shared_single_ant = true,
        .disable_dummy_notification = true,
index 225b6d6..08c14af 100644 (file)
@@ -360,6 +360,7 @@ struct iwl_cfg {
        const u32 smem_offset;
        const u32 smem_len;
        const struct iwl_tt_params *thermal_params;
+       bool apmg_not_supported;
 };
 
 /*
index 223b875..948ce08 100644 (file)
@@ -1,6 +1,7 @@
 /******************************************************************************
  *
  * Copyright(c) 2009 - 2014 Intel Corporation. All rights reserved.
+ * Copyright(c) 2015 Intel Mobile Communications GmbH
  *
  * This program is free software; you can redistribute it and/or modify it
  * under the terms of version 2 of the GNU General Public License as
@@ -64,19 +65,21 @@ TRACE_EVENT(iwlwifi_dev_hcmd,
 
 TRACE_EVENT(iwlwifi_dev_rx,
        TP_PROTO(const struct device *dev, const struct iwl_trans *trans,
-                void *rxbuf, size_t len),
-       TP_ARGS(dev, trans, rxbuf, len),
+                struct iwl_rx_packet *pkt, size_t len),
+       TP_ARGS(dev, trans, pkt, len),
        TP_STRUCT__entry(
                DEV_ENTRY
-               __dynamic_array(u8, rxbuf, iwl_rx_trace_len(trans, rxbuf, len))
+               __field(u8, cmd)
+               __dynamic_array(u8, rxbuf, iwl_rx_trace_len(trans, pkt, len))
        ),
        TP_fast_assign(
                DEV_ASSIGN;
-               memcpy(__get_dynamic_array(rxbuf), rxbuf,
-                      iwl_rx_trace_len(trans, rxbuf, len));
+               __entry->cmd = pkt->hdr.cmd;
+               memcpy(__get_dynamic_array(rxbuf), pkt,
+                      iwl_rx_trace_len(trans, pkt, len));
        ),
        TP_printk("[%s] RX cmd %#.2x",
-                 __get_str(dev), ((u8 *)__get_dynamic_array(rxbuf))[4])
+                 __get_str(dev), __entry->cmd)
 );
 
 TRACE_EVENT(iwlwifi_dev_tx,
index 7267152..6685259 100644 (file)
@@ -6,7 +6,7 @@
  * GPL LICENSE SUMMARY
  *
  * Copyright(c) 2007 - 2014 Intel Corporation. All rights reserved.
- * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
+ * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of version 2 of the GNU General Public License as
@@ -32,7 +32,7 @@
  * BSD LICENSE
  *
  * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved.
- * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
+ * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
@@ -423,13 +423,19 @@ static int iwl_set_ucode_api_flags(struct iwl_drv *drv, const u8 *data,
 {
        const struct iwl_ucode_api *ucode_api = (void *)data;
        u32 api_index = le32_to_cpu(ucode_api->api_index);
+       u32 api_flags = le32_to_cpu(ucode_api->api_flags);
+       int i;
 
-       if (api_index >= IWL_API_ARRAY_SIZE) {
+       if (api_index >= IWL_API_MAX_BITS / 32) {
                IWL_ERR(drv, "api_index larger than supported by driver\n");
-               return -EINVAL;
+               /* don't return an error so we can load FW that has more bits */
+               return 0;
        }
 
-       capa->api[api_index] = le32_to_cpu(ucode_api->api_flags);
+       for (i = 0; i < 32; i++) {
+               if (api_flags & BIT(i))
+                       __set_bit(i + 32 * api_index, capa->_api);
+       }
 
        return 0;
 }
@@ -439,13 +445,19 @@ static int iwl_set_ucode_capabilities(struct iwl_drv *drv, const u8 *data,
 {
        const struct iwl_ucode_capa *ucode_capa = (void *)data;
        u32 api_index = le32_to_cpu(ucode_capa->api_index);
+       u32 api_flags = le32_to_cpu(ucode_capa->api_capa);
+       int i;
 
-       if (api_index >= IWL_CAPABILITIES_ARRAY_SIZE) {
+       if (api_index >= IWL_CAPABILITIES_MAX_BITS / 32) {
                IWL_ERR(drv, "api_index larger than supported by driver\n");
-               return -EINVAL;
+               /* don't return an error so we can load FW that has more bits */
+               return 0;
        }
 
-       capa->capa[api_index] = le32_to_cpu(ucode_capa->api_capa);
+       for (i = 0; i < 32; i++) {
+               if (api_flags & BIT(i))
+                       __set_bit(i + 32 * api_index, capa->_capa);
+       }
 
        return 0;
 }
@@ -1148,7 +1160,7 @@ static void iwl_req_fw_callback(const struct firmware *ucode_raw, void *context)
        if (err)
                goto try_again;
 
-       if (drv->fw.ucode_capa.api[0] & IWL_UCODE_TLV_API_NEW_VERSION)
+       if (fw_has_api(&drv->fw.ucode_capa, IWL_UCODE_TLV_API_NEW_VERSION))
                api_ver = drv->fw.ucode_ver;
        else
                api_ver = IWL_UCODE_API(drv->fw.ucode_ver);
@@ -1239,6 +1251,8 @@ static void iwl_req_fw_callback(const struct firmware *ucode_raw, void *context)
                sizeof(struct iwl_fw_dbg_trigger_txq_timer);
        trigger_tlv_sz[FW_DBG_TRIGGER_TIME_EVENT] =
                sizeof(struct iwl_fw_dbg_trigger_time_event);
+       trigger_tlv_sz[FW_DBG_TRIGGER_BA] =
+               sizeof(struct iwl_fw_dbg_trigger_ba);
 
        for (i = 0; i < ARRAY_SIZE(drv->fw.dbg_trigger_tlv); i++) {
                if (pieces->dbg_trigger_tlv[i]) {
index 41ff85d..21302b6 100644 (file)
@@ -6,6 +6,7 @@
  * GPL LICENSE SUMMARY
  *
  * Copyright(c) 2008 - 2014 Intel Corporation. All rights reserved.
+ * Copyright(c) 2015 Intel Mobile Communications GmbH
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of version 2 of the GNU General Public License as
@@ -31,6 +32,7 @@
  * BSD LICENSE
  *
  * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved.
+ * Copyright(c) 2015 Intel Mobile Communications GmbH
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
@@ -748,6 +750,9 @@ void iwl_init_ht_hw_capab(const struct iwl_cfg *cfg,
                return;
        }
 
+       if (data->sku_cap_mimo_disabled)
+               rx_chains = 1;
+
        ht_info->ht_supported = true;
        ht_info->cap = IEEE80211_HT_CAP_DSSSCCK40;
 
index 5234a0b..750c8c9 100644 (file)
@@ -6,6 +6,7 @@
  * GPL LICENSE SUMMARY
  *
  * Copyright(c) 2008 - 2014 Intel Corporation. All rights reserved.
+ * Copyright(c) 2015 Intel Mobile Communications GmbH
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of version 2 of the GNU General Public License as
@@ -31,6 +32,7 @@
  * BSD LICENSE
  *
  * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved.
+ * Copyright(c) 2015 Intel Mobile Communications GmbH
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
@@ -84,6 +86,7 @@ struct iwl_nvm_data {
        bool sku_cap_11ac_enable;
        bool sku_cap_amt_enable;
        bool sku_cap_ipan_enable;
+       bool sku_cap_mimo_disabled;
 
        u16 radio_cfg_type;
        u8 radio_cfg_step;
index d45dc02..d560648 100644 (file)
@@ -438,12 +438,6 @@ static inline unsigned int FH_MEM_CBBC_QUEUE(unsigned int chnl)
 #define RX_QUEUE_MASK                         255
 #define RX_QUEUE_SIZE_LOG                     8
 
-/*
- * RX related structures and functions
- */
-#define RX_FREE_BUFFERS 64
-#define RX_LOW_WATERMARK 8
-
 /**
  * struct iwl_rb_status - reserve buffer status
  *     host memory mapped FH registers
index 251bf8d..e57dbd0 100644 (file)
@@ -6,7 +6,7 @@
  * GPL LICENSE SUMMARY
  *
  * Copyright(c) 2014 Intel Corporation. All rights reserved.
- * Copyright(c) 2014 Intel Mobile Communications GmbH
+ * Copyright(c) 2014 - 2015 Intel Mobile Communications GmbH
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of version 2 of the GNU General Public License as
@@ -32,7 +32,7 @@
  * BSD LICENSE
  *
  * Copyright(c) 2014 Intel Corporation. All rights reserved.
- * Copyright(c) 2014 Intel Mobile Communications GmbH
+ * Copyright(c) 2014 - 2015 Intel Mobile Communications GmbH
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
@@ -254,6 +254,7 @@ iwl_fw_error_next_data(struct iwl_fw_error_dump_data *data)
  *     detection.
  * @FW_DBG_TRIGGER_TIME_EVENT: trigger log collection upon time events related
  *     events.
+ * @FW_DBG_TRIGGER_BA: trigger log collection upon BlockAck related events.
  */
 enum iwl_fw_dbg_trigger {
        FW_DBG_TRIGGER_INVALID = 0,
@@ -267,6 +268,7 @@ enum iwl_fw_dbg_trigger {
        FW_DBG_TRIGGER_RSSI,
        FW_DBG_TRIGGER_TXQ_TIMERS,
        FW_DBG_TRIGGER_TIME_EVENT,
+       FW_DBG_TRIGGER_BA,
 
        /* must be last */
        FW_DBG_TRIGGER_MAX,
index c7cfc38..a9b5ae4 100644 (file)
@@ -237,6 +237,8 @@ enum iwl_ucode_tlv_flag {
        IWL_UCODE_TLV_FLAGS_GO_UAPSD            = BIT(30),
 };
 
+typedef unsigned int __bitwise__ iwl_ucode_tlv_api_t;
+
 /**
  * enum iwl_ucode_tlv_api - ucode api
  * @IWL_UCODE_TLV_API_BT_COEX_SPLIT: new API for BT Coex
@@ -255,22 +257,27 @@ enum iwl_ucode_tlv_flag {
  * @IWL_UCODE_TLV_API_LQ_SS_PARAMS: Configure STBC/BFER via LQ CMD ss_params
  * @IWL_UCODE_TLV_API_STATS_V10: uCode supports/uses statistics API version 10
  * @IWL_UCODE_TLV_API_NEW_VERSION: new versioning format
+ * @IWL_UCODE_TLV_API_EXT_SCAN_PRIORITY: scan APIs use 8-level priority
+ *     instead of 3.
  */
 enum iwl_ucode_tlv_api {
-       IWL_UCODE_TLV_API_BT_COEX_SPLIT         = BIT(3),
-       IWL_UCODE_TLV_API_FRAGMENTED_SCAN       = BIT(8),
-       IWL_UCODE_TLV_API_WIFI_MCC_UPDATE       = BIT(9),
-       IWL_UCODE_TLV_API_HDC_PHASE_0           = BIT(10),
-       IWL_UCODE_TLV_API_TX_POWER_DEV          = BIT(11),
-       IWL_UCODE_TLV_API_BASIC_DWELL           = BIT(13),
-       IWL_UCODE_TLV_API_SCD_CFG               = BIT(15),
-       IWL_UCODE_TLV_API_SINGLE_SCAN_EBS       = BIT(16),
-       IWL_UCODE_TLV_API_ASYNC_DTM             = BIT(17),
-       IWL_UCODE_TLV_API_LQ_SS_PARAMS          = BIT(18),
-       IWL_UCODE_TLV_API_STATS_V10             = BIT(19),
-       IWL_UCODE_TLV_API_NEW_VERSION           = BIT(20),
+       IWL_UCODE_TLV_API_BT_COEX_SPLIT         = (__force iwl_ucode_tlv_api_t)3,
+       IWL_UCODE_TLV_API_FRAGMENTED_SCAN       = (__force iwl_ucode_tlv_api_t)8,
+       IWL_UCODE_TLV_API_WIFI_MCC_UPDATE       = (__force iwl_ucode_tlv_api_t)9,
+       IWL_UCODE_TLV_API_HDC_PHASE_0           = (__force iwl_ucode_tlv_api_t)10,
+       IWL_UCODE_TLV_API_TX_POWER_DEV          = (__force iwl_ucode_tlv_api_t)11,
+       IWL_UCODE_TLV_API_BASIC_DWELL           = (__force iwl_ucode_tlv_api_t)13,
+       IWL_UCODE_TLV_API_SCD_CFG               = (__force iwl_ucode_tlv_api_t)15,
+       IWL_UCODE_TLV_API_SINGLE_SCAN_EBS       = (__force iwl_ucode_tlv_api_t)16,
+       IWL_UCODE_TLV_API_ASYNC_DTM             = (__force iwl_ucode_tlv_api_t)17,
+       IWL_UCODE_TLV_API_LQ_SS_PARAMS          = (__force iwl_ucode_tlv_api_t)18,
+       IWL_UCODE_TLV_API_STATS_V10             = (__force iwl_ucode_tlv_api_t)19,
+       IWL_UCODE_TLV_API_NEW_VERSION           = (__force iwl_ucode_tlv_api_t)20,
+       IWL_UCODE_TLV_API_EXT_SCAN_PRIORITY     = (__force iwl_ucode_tlv_api_t)24,
 };
 
+typedef unsigned int __bitwise__ iwl_ucode_tlv_capa_t;
+
 /**
  * enum iwl_ucode_tlv_capa - ucode capabilities
  * @IWL_UCODE_TLV_CAPA_D0I3_SUPPORT: supports D0i3
@@ -290,6 +297,7 @@ enum iwl_ucode_tlv_api {
  *     which also implies support for the scheduler configuration command
  * @IWL_UCODE_TLV_CAPA_TDLS_CHANNEL_SWITCH: supports TDLS channel switching
  * @IWL_UCODE_TLV_CAPA_HOTSPOT_SUPPORT: supports Hot Spot Command
+ * @IWL_UCODE_TLV_CAPA_DC2DC_SUPPORT: supports DC2DC Command
  * @IWL_UCODE_TLV_CAPA_RADIO_BEACON_STATS: support radio and beacon statistics
  * @IWL_UCODE_TLV_CAPA_BT_COEX_PLCR: enabled BT Coex packet level co-running
  * @IWL_UCODE_TLV_CAPA_LAR_MULTI_MCC: ucode supports LAR updates with different
@@ -299,22 +307,23 @@ enum iwl_ucode_tlv_api {
  * @IWL_UCODE_TLV_CAPA_BT_COEX_RRC: supports BT Coex RRC
  */
 enum iwl_ucode_tlv_capa {
-       IWL_UCODE_TLV_CAPA_D0I3_SUPPORT                 = BIT(0),
-       IWL_UCODE_TLV_CAPA_LAR_SUPPORT                  = BIT(1),
-       IWL_UCODE_TLV_CAPA_UMAC_SCAN                    = BIT(2),
-       IWL_UCODE_TLV_CAPA_BEAMFORMER                   = BIT(3),
-       IWL_UCODE_TLV_CAPA_TDLS_SUPPORT                 = BIT(6),
-       IWL_UCODE_TLV_CAPA_TXPOWER_INSERTION_SUPPORT    = BIT(8),
-       IWL_UCODE_TLV_CAPA_DS_PARAM_SET_IE_SUPPORT      = BIT(9),
-       IWL_UCODE_TLV_CAPA_WFA_TPC_REP_IE_SUPPORT       = BIT(10),
-       IWL_UCODE_TLV_CAPA_QUIET_PERIOD_SUPPORT         = BIT(11),
-       IWL_UCODE_TLV_CAPA_DQA_SUPPORT                  = BIT(12),
-       IWL_UCODE_TLV_CAPA_TDLS_CHANNEL_SWITCH          = BIT(13),
-       IWL_UCODE_TLV_CAPA_HOTSPOT_SUPPORT              = BIT(18),
-       IWL_UCODE_TLV_CAPA_RADIO_BEACON_STATS           = BIT(22),
-       IWL_UCODE_TLV_CAPA_BT_COEX_PLCR                 = BIT(28),
-       IWL_UCODE_TLV_CAPA_LAR_MULTI_MCC                = BIT(29),
-       IWL_UCODE_TLV_CAPA_BT_COEX_RRC                  = BIT(30),
+       IWL_UCODE_TLV_CAPA_D0I3_SUPPORT                 = (__force iwl_ucode_tlv_capa_t)0,
+       IWL_UCODE_TLV_CAPA_LAR_SUPPORT                  = (__force iwl_ucode_tlv_capa_t)1,
+       IWL_UCODE_TLV_CAPA_UMAC_SCAN                    = (__force iwl_ucode_tlv_capa_t)2,
+       IWL_UCODE_TLV_CAPA_BEAMFORMER                   = (__force iwl_ucode_tlv_capa_t)3,
+       IWL_UCODE_TLV_CAPA_TDLS_SUPPORT                 = (__force iwl_ucode_tlv_capa_t)6,
+       IWL_UCODE_TLV_CAPA_TXPOWER_INSERTION_SUPPORT    = (__force iwl_ucode_tlv_capa_t)8,
+       IWL_UCODE_TLV_CAPA_DS_PARAM_SET_IE_SUPPORT      = (__force iwl_ucode_tlv_capa_t)9,
+       IWL_UCODE_TLV_CAPA_WFA_TPC_REP_IE_SUPPORT       = (__force iwl_ucode_tlv_capa_t)10,
+       IWL_UCODE_TLV_CAPA_QUIET_PERIOD_SUPPORT         = (__force iwl_ucode_tlv_capa_t)11,
+       IWL_UCODE_TLV_CAPA_DQA_SUPPORT                  = (__force iwl_ucode_tlv_capa_t)12,
+       IWL_UCODE_TLV_CAPA_TDLS_CHANNEL_SWITCH          = (__force iwl_ucode_tlv_capa_t)13,
+       IWL_UCODE_TLV_CAPA_HOTSPOT_SUPPORT              = (__force iwl_ucode_tlv_capa_t)18,
+       IWL_UCODE_TLV_CAPA_DC2DC_CONFIG_SUPPORT         = (__force iwl_ucode_tlv_capa_t)19,
+       IWL_UCODE_TLV_CAPA_RADIO_BEACON_STATS           = (__force iwl_ucode_tlv_capa_t)22,
+       IWL_UCODE_TLV_CAPA_BT_COEX_PLCR                 = (__force iwl_ucode_tlv_capa_t)28,
+       IWL_UCODE_TLV_CAPA_LAR_MULTI_MCC                = (__force iwl_ucode_tlv_capa_t)29,
+       IWL_UCODE_TLV_CAPA_BT_COEX_RRC                  = (__force iwl_ucode_tlv_capa_t)30,
 };
 
 /* The default calibrate table size if not specified by firmware file */
@@ -325,13 +334,14 @@ enum iwl_ucode_tlv_capa {
 /* The default max probe length if not specified by the firmware file */
 #define IWL_DEFAULT_MAX_PROBE_LENGTH   200
 
+#define IWL_API_MAX_BITS               64
+#define IWL_CAPABILITIES_MAX_BITS      64
+
 /*
  * For 16.0 uCode and above, there is no differentiation between sections,
  * just an offset to the HW address.
  */
 #define IWL_UCODE_SECTION_MAX 12
-#define IWL_API_ARRAY_SIZE     1
-#define IWL_CAPABILITIES_ARRAY_SIZE    1
 #define CPU1_CPU2_SEPARATOR_SECTION    0xFFFFCCCC
 
 /* uCode version contains 4 values: Major/Minor/API/Serial */
@@ -424,11 +434,13 @@ struct iwl_fw_dbg_reg_op {
  * @SMEM_MODE: monitor stores the data in SMEM
  * @EXTERNAL_MODE: monitor stores the data in allocated DRAM
  * @MARBH_MODE: monitor stores the data in MARBH buffer
+ * @MIPI_MODE: monitor outputs the data through the MIPI interface
  */
 enum iwl_fw_dbg_monitor_mode {
        SMEM_MODE = 0,
        EXTERNAL_MODE = 1,
        MARBH_MODE = 2,
+       MIPI_MODE = 3,
 };
 
 /**
@@ -660,6 +672,33 @@ struct iwl_fw_dbg_trigger_time_event {
        } __packed time_events[16];
 } __packed;
 
+/**
+ * struct iwl_fw_dbg_trigger_ba - configures BlockAck related trigger
+ * rx_ba_start: tid bitmap to configure on what tid the trigger should occur
+ *     when an Rx BlockAck session is started.
+ * rx_ba_stop: tid bitmap to configure on what tid the trigger should occur
+ *     when an Rx BlockAck session is stopped.
+ * tx_ba_start: tid bitmap to configure on what tid the trigger should occur
+ *     when a Tx BlockAck session is started.
+ * tx_ba_stop: tid bitmap to configure on what tid the trigger should occur
+ *     when a Tx BlockAck session is stopped.
+ * rx_bar: tid bitmap to configure on what tid the trigger should occur
+ *     when a BAR is received (for a Tx BlockAck session).
+ * tx_bar: tid bitmap to configure on what tid the trigger should occur
+ *     when a BAR is send (for an Rx BlocAck session).
+ * frame_timeout: tid bitmap to configure on what tid the trigger should occur
+ *     when a frame times out in the reodering buffer.
+ */
+struct iwl_fw_dbg_trigger_ba {
+       __le16 rx_ba_start;
+       __le16 rx_ba_stop;
+       __le16 tx_ba_start;
+       __le16 tx_ba_stop;
+       __le16 rx_bar;
+       __le16 tx_bar;
+       __le16 frame_timeout;
+} __packed;
+
 /**
  * struct iwl_fw_dbg_conf_tlv - a TLV that describes a debug configuration.
  * @id: conf id
index cf75baf..3e3c9d8 100644 (file)
@@ -6,7 +6,7 @@
  * GPL LICENSE SUMMARY
  *
  * Copyright(c) 2008 - 2014 Intel Corporation. All rights reserved.
- * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
+ * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of version 2 of the GNU General Public License as
@@ -32,7 +32,7 @@
  * BSD LICENSE
  *
  * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved.
- * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
+ * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
@@ -105,10 +105,24 @@ struct iwl_ucode_capabilities {
        u32 n_scan_channels;
        u32 standard_phy_calibration_size;
        u32 flags;
-       u32 api[IWL_API_ARRAY_SIZE];
-       u32 capa[IWL_CAPABILITIES_ARRAY_SIZE];
+       unsigned long _api[BITS_TO_LONGS(IWL_API_MAX_BITS)];
+       unsigned long _capa[BITS_TO_LONGS(IWL_CAPABILITIES_MAX_BITS)];
 };
 
+static inline bool
+fw_has_api(const struct iwl_ucode_capabilities *capabilities,
+          iwl_ucode_tlv_api_t api)
+{
+       return test_bit((__force long)api, capabilities->_api);
+}
+
+static inline bool
+fw_has_capa(const struct iwl_ucode_capabilities *capabilities,
+           iwl_ucode_tlv_capa_t capa)
+{
+       return test_bit((__force long)capa, capabilities->_capa);
+}
+
 /* one for each uCode image (inst/data, init/runtime/wowlan) */
 struct fw_desc {
        const void *data;       /* vmalloc'ed data */
@@ -205,6 +219,8 @@ static inline const char *get_fw_dbg_mode_string(int mode)
                return "EXTERNAL_DRAM";
        case MARBH_MODE:
                return "MARBH";
+       case MIPI_MODE:
+               return "MIPI";
        default:
                return "UNKNOWN";
        }
index 0b5a81d..830dfec 100644 (file)
@@ -6,7 +6,7 @@
  * GPL LICENSE SUMMARY
  *
  * Copyright(c) 2008 - 2014 Intel Corporation. All rights reserved.
- * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
+ * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of version 2 of the GNU General Public License as
@@ -32,7 +32,7 @@
  * BSD LICENSE
  *
  * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved.
- * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
+ * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
@@ -116,10 +116,11 @@ enum family_8000_nvm_offsets {
 
 /* SKU Capabilities (actual values from NVM definition) */
 enum nvm_sku_bits {
-       NVM_SKU_CAP_BAND_24GHZ  = BIT(0),
-       NVM_SKU_CAP_BAND_52GHZ  = BIT(1),
-       NVM_SKU_CAP_11N_ENABLE  = BIT(2),
-       NVM_SKU_CAP_11AC_ENABLE = BIT(3),
+       NVM_SKU_CAP_BAND_24GHZ          = BIT(0),
+       NVM_SKU_CAP_BAND_52GHZ          = BIT(1),
+       NVM_SKU_CAP_11N_ENABLE          = BIT(2),
+       NVM_SKU_CAP_11AC_ENABLE         = BIT(3),
+       NVM_SKU_CAP_MIMO_DISABLE        = BIT(5),
 };
 
 /*
@@ -368,6 +369,11 @@ static void iwl_init_vht_hw_capab(const struct iwl_cfg *cfg,
        if (cfg->ht_params->ldpc)
                vht_cap->cap |= IEEE80211_VHT_CAP_RXLDPC;
 
+       if (data->sku_cap_mimo_disabled) {
+               num_rx_ants = 1;
+               num_tx_ants = 1;
+       }
+
        if (num_tx_ants > 1)
                vht_cap->cap |= IEEE80211_VHT_CAP_TXSTBC;
        else
@@ -527,6 +533,10 @@ static void iwl_set_hw_address_family_8000(struct device *dev,
        const u8 *hw_addr;
 
        if (mac_override) {
+               static const u8 reserved_mac[] = {
+                       0x02, 0xcc, 0xaa, 0xff, 0xee, 0x00
+               };
+
                hw_addr = (const u8 *)(mac_override +
                                 MAC_ADDRESS_OVERRIDE_FAMILY_8000);
 
@@ -538,7 +548,12 @@ static void iwl_set_hw_address_family_8000(struct device *dev,
                data->hw_addr[4] = hw_addr[5];
                data->hw_addr[5] = hw_addr[4];
 
-               if (is_valid_ether_addr(data->hw_addr))
+               /*
+                * Force the use of the OTP MAC address in case of reserved MAC
+                * address in the NVM, or if address is given but invalid.
+                */
+               if (is_valid_ether_addr(data->hw_addr) &&
+                   memcmp(reserved_mac, hw_addr, ETH_ALEN) != 0)
                        return;
 
                IWL_ERR_DEV(dev,
@@ -610,6 +625,7 @@ iwl_parse_nvm_data(struct device *dev, const struct iwl_cfg *cfg,
                data->sku_cap_11n_enable = false;
        data->sku_cap_11ac_enable = data->sku_cap_11n_enable &&
                                    (sku & NVM_SKU_CAP_11AC_ENABLE);
+       data->sku_cap_mimo_disabled = sku & NVM_SKU_CAP_MIMO_DISABLE;
 
        data->n_hw_addrs = iwl_get_n_hw_addrs(cfg, nvm_sw);
 
diff --git a/drivers/net/wireless/iwlwifi/iwl-trans.c b/drivers/net/wireless/iwlwifi/iwl-trans.c
new file mode 100644 (file)
index 0000000..9f8bcef
--- /dev/null
@@ -0,0 +1,113 @@
+/******************************************************************************
+ *
+ * This file is provided under a dual BSD/GPLv2 license.  When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2015 Intel Mobile Communications GmbH
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
+ * USA
+ *
+ * The full GNU General Public License is included in this distribution
+ * in the file called COPYING.
+ *
+ * Contact Information:
+ *  Intel Linux Wireless <ilw@linux.intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2015 Intel Mobile Communications GmbH
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ *  * Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ *  * Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in
+ *    the documentation and/or other materials provided with the
+ *    distribution.
+ *  * Neither the name Intel Corporation nor the names of its
+ *    contributors may be used to endorse or promote products derived
+ *    from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ *****************************************************************************/
+#include <linux/kernel.h>
+#include "iwl-trans.h"
+
+struct iwl_trans *iwl_trans_alloc(unsigned int priv_size,
+                                 struct device *dev,
+                                 const struct iwl_cfg *cfg,
+                                 const struct iwl_trans_ops *ops,
+                                 size_t dev_cmd_headroom)
+{
+       struct iwl_trans *trans;
+#ifdef CONFIG_LOCKDEP
+       static struct lock_class_key __key;
+#endif
+
+       trans = kzalloc(sizeof(*trans) + priv_size, GFP_KERNEL);
+       if (!trans)
+               return NULL;
+
+#ifdef CONFIG_LOCKDEP
+       lockdep_init_map(&trans->sync_cmd_lockdep_map, "sync_cmd_lockdep_map",
+                        &__key, 0);
+#endif
+
+       trans->dev = dev;
+       trans->cfg = cfg;
+       trans->ops = ops;
+       trans->dev_cmd_headroom = dev_cmd_headroom;
+
+       snprintf(trans->dev_cmd_pool_name, sizeof(trans->dev_cmd_pool_name),
+                "iwl_cmd_pool:%s", dev_name(trans->dev));
+       trans->dev_cmd_pool =
+               kmem_cache_create(trans->dev_cmd_pool_name,
+                                 sizeof(struct iwl_device_cmd)
+                                 + trans->dev_cmd_headroom,
+                                 sizeof(void *),
+                                 SLAB_HWCACHE_ALIGN,
+                                 NULL);
+       if (!trans->dev_cmd_pool)
+               goto free;
+
+       return trans;
+ free:
+       kfree(trans);
+       return NULL;
+}
+
+void iwl_trans_free(struct iwl_trans *trans)
+{
+       kmem_cache_destroy(trans->dev_cmd_pool);
+       kfree(trans);
+}
index 56254a8..87a230a 100644 (file)
@@ -641,6 +641,8 @@ struct iwl_trans {
 
        enum iwl_d0i3_mode d0i3_mode;
 
+       bool wowlan_d0i3;
+
        /* pointer to trans specific struct */
        /*Ensure that this pointer will always be aligned to sizeof pointer */
        char trans_specific[0] __aligned(sizeof(void *));
@@ -1010,20 +1012,20 @@ static inline void iwl_trans_fw_error(struct iwl_trans *trans)
                iwl_op_mode_nic_error(trans->op_mode);
 }
 
+/*****************************************************
+ * transport helper functions
+ *****************************************************/
+struct iwl_trans *iwl_trans_alloc(unsigned int priv_size,
+                                 struct device *dev,
+                                 const struct iwl_cfg *cfg,
+                                 const struct iwl_trans_ops *ops,
+                                 size_t dev_cmd_headroom);
+void iwl_trans_free(struct iwl_trans *trans);
+
 /*****************************************************
 * driver (transport) register/unregister functions
 ******************************************************/
 int __must_check iwl_pci_register_driver(void);
 void iwl_pci_unregister_driver(void);
 
-static inline void trans_lockdep_init(struct iwl_trans *trans)
-{
-#ifdef CONFIG_LOCKDEP
-       static struct lock_class_key __key;
-
-       lockdep_init_map(&trans->sync_cmd_lockdep_map, "sync_cmd_lockdep_map",
-                        &__key, 0);
-#endif
-}
-
 #endif /* __iwl_trans_h__ */
index 13a0a03..b4737e2 100644 (file)
@@ -6,7 +6,7 @@
  * GPL LICENSE SUMMARY
  *
  * Copyright(c) 2013 - 2014 Intel Corporation. All rights reserved.
- * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
+ * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of version 2 of the GNU General Public License as
@@ -32,7 +32,7 @@
  * BSD LICENSE
  *
  * Copyright(c) 2013 - 2014 Intel Corporation. All rights reserved.
- * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
+ * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
@@ -408,23 +408,12 @@ iwl_get_coex_type(struct iwl_mvm *mvm, const struct ieee80211_vif *vif)
 
 int iwl_send_bt_init_conf(struct iwl_mvm *mvm)
 {
-       struct iwl_bt_coex_cmd *bt_cmd;
-       struct iwl_host_cmd cmd = {
-               .id = BT_CONFIG,
-               .len = { sizeof(*bt_cmd), },
-               .dataflags = { IWL_HCMD_DFL_NOCOPY, },
-       };
-       int ret;
+       struct iwl_bt_coex_cmd bt_cmd = {};
        u32 mode;
 
-       if (!(mvm->fw->ucode_capa.api[0] & IWL_UCODE_TLV_API_BT_COEX_SPLIT))
+       if (!fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_BT_COEX_SPLIT))
                return iwl_send_bt_init_conf_old(mvm);
 
-       bt_cmd = kzalloc(sizeof(*bt_cmd), GFP_KERNEL);
-       if (!bt_cmd)
-               return -ENOMEM;
-       cmd.data[0] = bt_cmd;
-
        lockdep_assert_held(&mvm->mutex);
 
        if (unlikely(mvm->bt_force_ant_mode != BT_FORCE_ANT_DIS)) {
@@ -440,36 +429,33 @@ int iwl_send_bt_init_conf(struct iwl_mvm *mvm)
                        mode = 0;
                }
 
-               bt_cmd->mode = cpu_to_le32(mode);
+               bt_cmd.mode = cpu_to_le32(mode);
                goto send_cmd;
        }
 
        mode = iwlwifi_mod_params.bt_coex_active ? BT_COEX_NW : BT_COEX_DISABLE;
-       bt_cmd->mode = cpu_to_le32(mode);
+       bt_cmd.mode = cpu_to_le32(mode);
 
        if (IWL_MVM_BT_COEX_SYNC2SCO)
-               bt_cmd->enabled_modules |=
+               bt_cmd.enabled_modules |=
                        cpu_to_le32(BT_COEX_SYNC2SCO_ENABLED);
 
        if (iwl_mvm_bt_is_plcr_supported(mvm))
-               bt_cmd->enabled_modules |= cpu_to_le32(BT_COEX_CORUN_ENABLED);
+               bt_cmd.enabled_modules |= cpu_to_le32(BT_COEX_CORUN_ENABLED);
 
        if (IWL_MVM_BT_COEX_MPLUT) {
-               bt_cmd->enabled_modules |= cpu_to_le32(BT_COEX_MPLUT_ENABLED);
-               bt_cmd->enabled_modules |=
+               bt_cmd.enabled_modules |= cpu_to_le32(BT_COEX_MPLUT_ENABLED);
+               bt_cmd.enabled_modules |=
                        cpu_to_le32(BT_COEX_MPLUT_BOOST_ENABLED);
        }
 
-       bt_cmd->enabled_modules |= cpu_to_le32(BT_COEX_HIGH_BAND_RET);
+       bt_cmd.enabled_modules |= cpu_to_le32(BT_COEX_HIGH_BAND_RET);
 
 send_cmd:
        memset(&mvm->last_bt_notif, 0, sizeof(mvm->last_bt_notif));
        memset(&mvm->last_bt_ci_cmd, 0, sizeof(mvm->last_bt_ci_cmd));
 
-       ret = iwl_mvm_send_cmd(mvm, &cmd);
-
-       kfree(bt_cmd);
-       return ret;
+       return iwl_mvm_send_cmd_pdu(mvm, BT_CONFIG, 0, sizeof(bt_cmd), &bt_cmd);
 }
 
 static int iwl_mvm_bt_coex_reduced_txp(struct iwl_mvm *mvm, u8 sta_id,
@@ -746,7 +732,7 @@ int iwl_mvm_rx_bt_coex_notif(struct iwl_mvm *mvm,
        struct iwl_rx_packet *pkt = rxb_addr(rxb);
        struct iwl_bt_coex_profile_notif *notif = (void *)pkt->data;
 
-       if (!(mvm->fw->ucode_capa.api[0] & IWL_UCODE_TLV_API_BT_COEX_SPLIT))
+       if (!fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_BT_COEX_SPLIT))
                return iwl_mvm_rx_bt_coex_notif_old(mvm, rxb, dev_cmd);
 
        IWL_DEBUG_COEX(mvm, "BT Coex Notification received\n");
@@ -770,52 +756,14 @@ int iwl_mvm_rx_bt_coex_notif(struct iwl_mvm *mvm,
        return 0;
 }
 
-static void iwl_mvm_bt_rssi_iterator(void *_data, u8 *mac,
-                                  struct ieee80211_vif *vif)
-{
-       struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
-       struct iwl_bt_iterator_data *data = _data;
-       struct iwl_mvm *mvm = data->mvm;
-
-       struct ieee80211_sta *sta;
-       struct iwl_mvm_sta *mvmsta;
-
-       struct ieee80211_chanctx_conf *chanctx_conf;
-
-       rcu_read_lock();
-       chanctx_conf = rcu_dereference(vif->chanctx_conf);
-       /* If channel context is invalid or not on 2.4GHz - don't count it */
-       if (!chanctx_conf ||
-           chanctx_conf->def.chan->band != IEEE80211_BAND_2GHZ) {
-               rcu_read_unlock();
-               return;
-       }
-       rcu_read_unlock();
-
-       if (vif->type != NL80211_IFTYPE_STATION ||
-           mvmvif->ap_sta_id == IWL_MVM_STATION_COUNT)
-               return;
-
-       sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[mvmvif->ap_sta_id],
-                                       lockdep_is_held(&mvm->mutex));
-
-       /* This can happen if the station has been removed right now */
-       if (IS_ERR_OR_NULL(sta))
-               return;
-
-       mvmsta = iwl_mvm_sta_from_mac80211(sta);
-}
-
 void iwl_mvm_bt_rssi_event(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
                           enum ieee80211_rssi_event_data rssi_event)
 {
        struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
-       struct iwl_bt_iterator_data data = {
-               .mvm = mvm,
-       };
        int ret;
 
-       if (!(mvm->fw->ucode_capa.api[0] & IWL_UCODE_TLV_API_BT_COEX_SPLIT)) {
+       if (!fw_has_api(&mvm->fw->ucode_capa,
+                       IWL_UCODE_TLV_API_BT_COEX_SPLIT)) {
                iwl_mvm_bt_rssi_event_old(mvm, vif, rssi_event);
                return;
        }
@@ -853,10 +801,6 @@ void iwl_mvm_bt_rssi_event(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
 
        if (ret)
                IWL_ERR(mvm, "couldn't send BT_CONFIG HCMD upon RSSI event\n");
-
-       ieee80211_iterate_active_interfaces_atomic(
-               mvm->hw, IEEE80211_IFACE_ITER_NORMAL,
-               iwl_mvm_bt_rssi_iterator, &data);
 }
 
 #define LINK_QUAL_AGG_TIME_LIMIT_DEF   (4000)
@@ -870,7 +814,7 @@ u16 iwl_mvm_coex_agg_time_limit(struct iwl_mvm *mvm,
        struct iwl_mvm_phy_ctxt *phy_ctxt = mvmvif->phy_ctxt;
        enum iwl_bt_coex_lut_type lut_type;
 
-       if (!(mvm->fw->ucode_capa.api[0] & IWL_UCODE_TLV_API_BT_COEX_SPLIT))
+       if (!fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_BT_COEX_SPLIT))
                return iwl_mvm_coex_agg_time_limit_old(mvm, sta);
 
        if (IWL_COEX_IS_TTC_ON(mvm->last_bt_notif.ttc_rrc_status, phy_ctxt->id))
@@ -897,7 +841,7 @@ bool iwl_mvm_bt_coex_is_mimo_allowed(struct iwl_mvm *mvm,
        struct iwl_mvm_phy_ctxt *phy_ctxt = mvmvif->phy_ctxt;
        enum iwl_bt_coex_lut_type lut_type;
 
-       if (!(mvm->fw->ucode_capa.api[0] & IWL_UCODE_TLV_API_BT_COEX_SPLIT))
+       if (!fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_BT_COEX_SPLIT))
                return iwl_mvm_bt_coex_is_mimo_allowed_old(mvm, sta);
 
        if (IWL_COEX_IS_TTC_ON(mvm->last_bt_notif.ttc_rrc_status, phy_ctxt->id))
@@ -927,7 +871,7 @@ bool iwl_mvm_bt_coex_is_ant_avail(struct iwl_mvm *mvm, u8 ant)
        if (ant & mvm->cfg->non_shared_ant)
                return true;
 
-       if (!(mvm->fw->ucode_capa.api[0] & IWL_UCODE_TLV_API_BT_COEX_SPLIT))
+       if (!fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_BT_COEX_SPLIT))
                return iwl_mvm_bt_coex_is_shared_ant_avail_old(mvm);
 
        return le32_to_cpu(mvm->last_bt_notif.bt_activity_grading) <
@@ -940,10 +884,10 @@ bool iwl_mvm_bt_coex_is_shared_ant_avail(struct iwl_mvm *mvm)
        if (mvm->cfg->bt_shared_single_ant)
                return true;
 
-       if (!(mvm->fw->ucode_capa.api[0] & IWL_UCODE_TLV_API_BT_COEX_SPLIT))
+       if (!fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_BT_COEX_SPLIT))
                return iwl_mvm_bt_coex_is_shared_ant_avail_old(mvm);
 
-       return le32_to_cpu(mvm->last_bt_notif.bt_activity_grading) == BT_OFF;
+       return le32_to_cpu(mvm->last_bt_notif.bt_activity_grading) < BT_HIGH_TRAFFIC;
 }
 
 bool iwl_mvm_bt_coex_is_tpc_allowed(struct iwl_mvm *mvm,
@@ -951,7 +895,7 @@ bool iwl_mvm_bt_coex_is_tpc_allowed(struct iwl_mvm *mvm,
 {
        u32 bt_activity = le32_to_cpu(mvm->last_bt_notif.bt_activity_grading);
 
-       if (!(mvm->fw->ucode_capa.api[0] & IWL_UCODE_TLV_API_BT_COEX_SPLIT))
+       if (!fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_BT_COEX_SPLIT))
                return iwl_mvm_bt_coex_is_tpc_allowed_old(mvm, band);
 
        if (band != IEEE80211_BAND_2GHZ)
@@ -994,7 +938,8 @@ u8 iwl_mvm_bt_coex_tx_prio(struct iwl_mvm *mvm, struct ieee80211_hdr *hdr,
 
 void iwl_mvm_bt_coex_vif_change(struct iwl_mvm *mvm)
 {
-       if (!(mvm->fw->ucode_capa.api[0] & IWL_UCODE_TLV_API_BT_COEX_SPLIT)) {
+       if (!fw_has_api(&mvm->fw->ucode_capa,
+                       IWL_UCODE_TLV_API_BT_COEX_SPLIT)) {
                iwl_mvm_bt_coex_vif_change_old(mvm);
                return;
        }
@@ -1012,7 +957,7 @@ int iwl_mvm_rx_ant_coupling_notif(struct iwl_mvm *mvm,
        u8 __maybe_unused lower_bound, upper_bound;
        u8 lut;
 
-       if (!(mvm->fw->ucode_capa.api[0] & IWL_UCODE_TLV_API_BT_COEX_SPLIT))
+       if (!fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_BT_COEX_SPLIT))
                return iwl_mvm_rx_ant_coupling_notif_old(mvm, rxb, dev_cmd);
 
        if (!iwl_mvm_bt_is_plcr_supported(mvm))
index d954591..6ac6de2 100644 (file)
@@ -776,7 +776,7 @@ static int iwl_mvm_bt_coex_reduced_txp(struct iwl_mvm *mvm, u8 sta_id,
        struct iwl_host_cmd cmd = {
                .id = BT_CONFIG,
                .len = { sizeof(*bt_cmd), },
-               .dataflags = { IWL_HCMD_DFL_NOCOPY, },
+               .dataflags = { IWL_HCMD_DFL_DUP, },
                .flags = CMD_ASYNC,
        };
        struct iwl_mvm_sta *mvmsta;
index 36bf6a8..4165d10 100644 (file)
@@ -761,7 +761,7 @@ void iwl_mvm_set_last_nonqos_seq(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
 
 static int iwl_mvm_switch_to_d3(struct iwl_mvm *mvm)
 {
-       iwl_mvm_cancel_scan(mvm);
+       iwl_mvm_scan_stop(mvm, IWL_MVM_SCAN_REGULAR, true);
 
        iwl_trans_stop_device(mvm->trans);
 
@@ -1170,7 +1170,8 @@ int iwl_mvm_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan)
        struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
 
        iwl_trans_suspend(mvm->trans);
-       if (wowlan->any) {
+       mvm->trans->wowlan_d0i3 = wowlan->any;
+       if (mvm->trans->wowlan_d0i3) {
                /* 'any' trigger means d0i3 usage */
                if (mvm->trans->d0i3_mode == IWL_D0I3_MODE_ON_SUSPEND) {
                        int ret = iwl_mvm_enter_d0i3_sync(mvm);
@@ -1751,8 +1752,10 @@ static void iwl_mvm_query_netdetect_reasons(struct iwl_mvm *mvm,
        int i, j, n_matches, ret;
 
        fw_status = iwl_mvm_get_wakeup_status(mvm, vif);
-       if (!IS_ERR_OR_NULL(fw_status))
+       if (!IS_ERR_OR_NULL(fw_status)) {
                reasons = le32_to_cpu(fw_status->wakeup_reasons);
+               kfree(fw_status);
+       }
 
        if (reasons & IWL_WOWLAN_WAKEUP_BY_RFKILL_DEASSERTED)
                wakeup.rfkill_release = true;
@@ -1783,7 +1786,7 @@ static void iwl_mvm_query_netdetect_reasons(struct iwl_mvm *mvm,
        for_each_set_bit(i, &matched_profiles, mvm->n_nd_match_sets) {
                struct iwl_scan_offload_profile_match *fw_match;
                struct cfg80211_wowlan_nd_match *match;
-               int n_channels = 0;
+               int idx, n_channels = 0;
 
                fw_match = &query.matches[i];
 
@@ -1798,8 +1801,12 @@ static void iwl_mvm_query_netdetect_reasons(struct iwl_mvm *mvm,
 
                net_detect->matches[net_detect->n_matches++] = match;
 
-               match->ssid.ssid_len = mvm->nd_match_sets[i].ssid.ssid_len;
-               memcpy(match->ssid.ssid, mvm->nd_match_sets[i].ssid.ssid,
+               /* We inverted the order of the SSIDs in the scan
+                * request, so invert the index here.
+                */
+               idx = mvm->n_nd_match_sets - i - 1;
+               match->ssid.ssid_len = mvm->nd_match_sets[idx].ssid.ssid_len;
+               memcpy(match->ssid.ssid, mvm->nd_match_sets[idx].ssid.ssid,
                       match->ssid.ssid_len);
 
                if (mvm->n_nd_channels < n_channels)
@@ -1869,15 +1876,15 @@ static int __iwl_mvm_resume(struct iwl_mvm *mvm, bool test)
        /* get the BSS vif pointer again */
        vif = iwl_mvm_get_bss_vif(mvm);
        if (IS_ERR_OR_NULL(vif))
-               goto out_unlock;
+               goto err;
 
        ret = iwl_trans_d3_resume(mvm->trans, &d3_status, test);
        if (ret)
-               goto out_unlock;
+               goto err;
 
        if (d3_status != IWL_D3_STATUS_ALIVE) {
                IWL_INFO(mvm, "Device was reset during suspend\n");
-               goto out_unlock;
+               goto err;
        }
 
        /* query SRAM first in case we want event logging */
@@ -1903,7 +1910,8 @@ static int __iwl_mvm_resume(struct iwl_mvm *mvm, bool test)
                goto out_iterate;
        }
 
- out_unlock:
+err:
+       iwl_mvm_free_nd(mvm);
        mutex_unlock(&mvm->mutex);
 
 out_iterate:
@@ -1916,6 +1924,14 @@ out:
        /* return 1 to reconfigure the device */
        set_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status);
        set_bit(IWL_MVM_STATUS_D3_RECONFIG, &mvm->status);
+
+       /* We always return 1, which causes mac80211 to do a reconfig
+        * with IEEE80211_RECONFIG_TYPE_RESTART.  This type of
+        * reconfig calls iwl_mvm_restart_complete(), where we unref
+        * the IWL_MVM_REF_UCODE_DOWN, so we need to take the
+        * reference here.
+        */
+       iwl_mvm_ref(mvm, IWL_MVM_REF_UCODE_DOWN);
        return 1;
 }
 
@@ -2022,7 +2038,6 @@ static int iwl_mvm_d3_test_release(struct inode *inode, struct file *file)
        __iwl_mvm_resume(mvm, true);
        rtnl_unlock();
        iwl_abort_notification_waits(&mvm->notif_wait);
-       iwl_mvm_ref(mvm, IWL_MVM_REF_UCODE_DOWN);
        ieee80211_restart_hw(mvm->hw);
 
        /* wait for restart and disconnect all interfaces */
index 5f37eab..5c8a65d 100644 (file)
@@ -6,7 +6,7 @@
  * GPL LICENSE SUMMARY
  *
  * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
- * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
+ * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of version 2 of the GNU General Public License as
@@ -32,7 +32,7 @@
  * BSD LICENSE
  *
  * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
- * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
+ * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
@@ -190,6 +190,21 @@ static ssize_t iwl_dbgfs_pm_params_write(struct ieee80211_vif *vif, char *buf,
        return ret ?: count;
 }
 
+static ssize_t iwl_dbgfs_tx_pwr_lmt_read(struct file *file,
+                                        char __user *user_buf,
+                                        size_t count, loff_t *ppos)
+{
+       struct ieee80211_vif *vif = file->private_data;
+       char buf[64];
+       int bufsz = sizeof(buf);
+       int pos;
+
+       pos = scnprintf(buf, bufsz, "bss limit = %d\n",
+                       vif->bss_conf.txpower);
+
+       return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
+}
+
 static ssize_t iwl_dbgfs_pm_params_read(struct file *file,
                                        char __user *user_buf,
                                        size_t count, loff_t *ppos)
@@ -607,6 +622,7 @@ static ssize_t iwl_dbgfs_rx_phyinfo_read(struct file *file,
        } while (0)
 
 MVM_DEBUGFS_READ_FILE_OPS(mac_params);
+MVM_DEBUGFS_READ_FILE_OPS(tx_pwr_lmt);
 MVM_DEBUGFS_READ_WRITE_FILE_OPS(pm_params, 32);
 MVM_DEBUGFS_READ_WRITE_FILE_OPS(bf_params, 256);
 MVM_DEBUGFS_READ_WRITE_FILE_OPS(low_latency, 10);
@@ -641,6 +657,7 @@ void iwl_mvm_vif_dbgfs_register(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
                MVM_DEBUGFS_ADD_FILE_VIF(pm_params, mvmvif->dbgfs_dir, S_IWUSR |
                                         S_IRUSR);
 
+       MVM_DEBUGFS_ADD_FILE_VIF(tx_pwr_lmt, mvmvif->dbgfs_dir, S_IRUSR);
        MVM_DEBUGFS_ADD_FILE_VIF(mac_params, mvmvif->dbgfs_dir, S_IRUSR);
        MVM_DEBUGFS_ADD_FILE_VIF(low_latency, mvmvif->dbgfs_dir,
                                 S_IRUSR | S_IWUSR);
index 9ac04c1..ffb4b5c 100644 (file)
@@ -6,7 +6,7 @@
  * GPL LICENSE SUMMARY
  *
  * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
- * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
+ * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of version 2 of the GNU General Public License as
@@ -32,7 +32,7 @@
  * BSD LICENSE
  *
  * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
- * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
+ * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
@@ -493,7 +493,8 @@ static ssize_t iwl_dbgfs_bt_notif_read(struct file *file, char __user *user_buf,
 
        mutex_lock(&mvm->mutex);
 
-       if (!(mvm->fw->ucode_capa.api[0] & IWL_UCODE_TLV_API_BT_COEX_SPLIT)) {
+       if (!fw_has_api(&mvm->fw->ucode_capa,
+                       IWL_UCODE_TLV_API_BT_COEX_SPLIT)) {
                struct iwl_bt_coex_profile_notif_old *notif =
                        &mvm->last_bt_notif_old;
 
@@ -550,7 +551,8 @@ static ssize_t iwl_dbgfs_bt_cmd_read(struct file *file, char __user *user_buf,
 
        mutex_lock(&mvm->mutex);
 
-       if (!(mvm->fw->ucode_capa.api[0] & IWL_UCODE_TLV_API_BT_COEX_SPLIT)) {
+       if (!fw_has_api(&mvm->fw->ucode_capa,
+                       IWL_UCODE_TLV_API_BT_COEX_SPLIT)) {
                struct iwl_bt_coex_ci_cmd_old *cmd = &mvm->last_bt_ci_cmd_old;
 
                pos += scnprintf(buf+pos, bufsz-pos,
@@ -916,7 +918,8 @@ iwl_dbgfs_scan_ant_rxchain_write(struct iwl_mvm *mvm, char *buf,
 
        if (mvm->scan_rx_ant != scan_rx_ant) {
                mvm->scan_rx_ant = scan_rx_ant;
-               if (mvm->fw->ucode_capa.capa[0] & IWL_UCODE_TLV_CAPA_UMAC_SCAN)
+               if (fw_has_capa(&mvm->fw->ucode_capa,
+                               IWL_UCODE_TLV_CAPA_UMAC_SCAN))
                        iwl_mvm_config_scan(mvm);
        }
 
@@ -1356,6 +1359,7 @@ static ssize_t iwl_dbgfs_d0i3_refs_read(struct file *file,
        PRINT_MVM_REF(IWL_MVM_REF_UCODE_DOWN);
        PRINT_MVM_REF(IWL_MVM_REF_SCAN);
        PRINT_MVM_REF(IWL_MVM_REF_ROC);
+       PRINT_MVM_REF(IWL_MVM_REF_ROC_AUX);
        PRINT_MVM_REF(IWL_MVM_REF_P2P_CLIENT);
        PRINT_MVM_REF(IWL_MVM_REF_AP_IBSS);
        PRINT_MVM_REF(IWL_MVM_REF_USER);
index be1a0a1..5e4cbdb 100644 (file)
@@ -294,6 +294,7 @@ enum iwl_scan_ebs_status {
        IWL_SCAN_EBS_SUCCESS,
        IWL_SCAN_EBS_FAILED,
        IWL_SCAN_EBS_CHAN_NOT_FOUND,
+       IWL_SCAN_EBS_INACTIVE,
 };
 
 /**
@@ -431,6 +432,17 @@ enum iwl_scan_priority {
        IWL_SCAN_PRIORITY_HIGH,
 };
 
+enum iwl_scan_priority_ext {
+       IWL_SCAN_PRIORITY_EXT_0_LOWEST,
+       IWL_SCAN_PRIORITY_EXT_1,
+       IWL_SCAN_PRIORITY_EXT_2,
+       IWL_SCAN_PRIORITY_EXT_3,
+       IWL_SCAN_PRIORITY_EXT_4,
+       IWL_SCAN_PRIORITY_EXT_5,
+       IWL_SCAN_PRIORITY_EXT_6,
+       IWL_SCAN_PRIORITY_EXT_7_HIGHEST,
+};
+
 /**
  * iwl_scan_req_lmac - SCAN_REQUEST_CMD_API_S_VER_1
  * @reserved1: for alignment and future use
@@ -837,4 +849,27 @@ struct iwl_scan_offload_profiles_query {
        struct iwl_scan_offload_profile_match matches[IWL_SCAN_MAX_PROFILES];
 } __packed; /* SCAN_OFFLOAD_PROFILES_QUERY_RSP_S_VER_2 */
 
+/**
+ * struct iwl_umac_scan_iter_complete_notif - notifies end of scanning iteration
+ * @uid: scan id, &enum iwl_umac_scan_uid_offsets
+ * @scanned_channels: number of channels scanned and number of valid elements in
+ *     results array
+ * @status: one of SCAN_COMP_STATUS_*
+ * @bt_status: BT on/off status
+ * @last_channel: last channel that was scanned
+ * @tsf_low: TSF timer (lower half) in usecs
+ * @tsf_high: TSF timer (higher half) in usecs
+ * @results: array of scan results, only "scanned_channels" of them are valid
+ */
+struct iwl_umac_scan_iter_complete_notif {
+       __le32 uid;
+       u8 scanned_channels;
+       u8 status;
+       u8 bt_status;
+       u8 last_channel;
+       __le32 tsf_low;
+       __le32 tsf_high;
+       struct iwl_scan_results_notif results[];
+} __packed; /* SCAN_ITER_COMPLETE_NTF_UMAC_API_S_VER_1 */
+
 #endif
index 56db2ba..16e9ef4 100644 (file)
@@ -6,7 +6,7 @@
  * GPL LICENSE SUMMARY
  *
  * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
- * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
+ * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of version 2 of the GNU General Public License as
@@ -32,7 +32,7 @@
  * BSD LICENSE
  *
  * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
- * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
+ * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
@@ -108,6 +108,7 @@ enum {
        ANTENNA_COUPLING_NOTIFICATION = 0xa,
 
        /* UMAC scan commands */
+       SCAN_ITERATION_COMPLETE_UMAC = 0xb5,
        SCAN_CFG_CMD = 0xc,
        SCAN_REQ_UMAC = 0xd,
        SCAN_ABORT_UMAC = 0xe,
@@ -170,12 +171,8 @@ enum {
        /* Thermal Throttling*/
        REPLY_THERMAL_MNG_BACKOFF = 0x7e,
 
-       /* Scanning */
-       SCAN_REQUEST_CMD = 0x80,
-       SCAN_ABORT_CMD = 0x81,
-       SCAN_START_NOTIFICATION = 0x82,
-       SCAN_RESULTS_NOTIFICATION = 0x83,
-       SCAN_COMPLETE_NOTIFICATION = 0x84,
+       /* Set/Get DC2DC frequency tune */
+       DC2DC_CONFIG_CMD = 0x83,
 
        /* NVM */
        NVM_ACCESS_CMD = 0x88,
@@ -1395,6 +1392,49 @@ struct iwl_mvm_marker {
        __le32 metadata[0];
 } __packed; /* MARKER_API_S_VER_1 */
 
+/*
+ * enum iwl_dc2dc_config_id - flag ids
+ *
+ * Ids of dc2dc configuration flags
+ */
+enum iwl_dc2dc_config_id {
+       DCDC_LOW_POWER_MODE_MSK_SET  = 0x1, /* not used */
+       DCDC_FREQ_TUNE_SET = 0x2,
+}; /* MARKER_ID_API_E_VER_1 */
+
+/**
+ * struct iwl_dc2dc_config_cmd - configure dc2dc values
+ *
+ * (DC2DC_CONFIG_CMD = 0x83)
+ *
+ * Set/Get & configure dc2dc values.
+ * The command always returns the current dc2dc values.
+ *
+ * @flags: set/get dc2dc
+ * @enable_low_power_mode: not used.
+ * @dc2dc_freq_tune0: frequency divider - digital domain
+ * @dc2dc_freq_tune1: frequency divider - analog domain
+ */
+struct iwl_dc2dc_config_cmd {
+       __le32 flags;
+       __le32 enable_low_power_mode; /* not used */
+       __le32 dc2dc_freq_tune0;
+       __le32 dc2dc_freq_tune1;
+} __packed; /* DC2DC_CONFIG_CMD_API_S_VER_1 */
+
+/**
+ * struct iwl_dc2dc_config_resp - response for iwl_dc2dc_config_cmd
+ *
+ * Current dc2dc values returned by the FW.
+ *
+ * @dc2dc_freq_tune0: frequency divider - digital domain
+ * @dc2dc_freq_tune1: frequency divider - analog domain
+ */
+struct iwl_dc2dc_config_resp {
+       __le32 dc2dc_freq_tune0;
+       __le32 dc2dc_freq_tune1;
+} __packed; /* DC2DC_CONFIG_RESP_API_S_VER_1 */
+
 /***********************************
  * Smart Fifo API
  ***********************************/
index 0601445..eb10c5e 100644 (file)
@@ -623,7 +623,7 @@ static int iwl_mvm_config_ltr(struct iwl_mvm *mvm)
        if (!mvm->trans->ltr_enabled)
                return 0;
 
-       if (!(mvm->fw->ucode_capa.api[0] & IWL_UCODE_TLV_API_HDC_PHASE_0))
+       if (!fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_HDC_PHASE_0))
                return iwl_mvm_config_ltr_v1(mvm);
 
        return iwl_mvm_send_cmd_pdu(mvm, LTR_CONFIG, 0,
@@ -662,9 +662,9 @@ int iwl_mvm_up(struct iwl_mvm *mvm)
                 * device that are triggered by the INIT firwmare (MFUART).
                 */
                _iwl_trans_stop_device(mvm->trans, false);
-               _iwl_trans_start_hw(mvm->trans, false);
+               ret = _iwl_trans_start_hw(mvm->trans, false);
                if (ret)
-                       return ret;
+                       goto error;
        }
 
        if (iwlmvm_mod_params.init_dbg)
@@ -754,7 +754,7 @@ int iwl_mvm_up(struct iwl_mvm *mvm)
                        goto error;
        }
 
-       if (mvm->fw->ucode_capa.capa[0] & IWL_UCODE_TLV_CAPA_UMAC_SCAN) {
+       if (fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_UMAC_SCAN)) {
                ret = iwl_mvm_config_scan(mvm);
                if (ret)
                        goto error;
index b56a445..08367fb 100644 (file)
@@ -318,7 +318,7 @@ struct ieee80211_regdomain *iwl_mvm_get_regdomain(struct wiphy *wiphy,
        resp = iwl_mvm_update_mcc(mvm, alpha2, src_id);
        if (IS_ERR_OR_NULL(resp)) {
                IWL_DEBUG_LAR(mvm, "Could not get update from FW %d\n",
-                             PTR_RET(resp));
+                             PTR_ERR_OR_ZERO(resp));
                goto out;
        }
 
@@ -334,7 +334,7 @@ struct ieee80211_regdomain *iwl_mvm_get_regdomain(struct wiphy *wiphy,
        kfree(resp);
        if (IS_ERR_OR_NULL(regd)) {
                IWL_DEBUG_LAR(mvm, "Could not get parse update from FW %d\n",
-                             PTR_RET(regd));
+                             PTR_ERR_OR_ZERO(regd));
                goto out;
        }
 
@@ -415,6 +415,12 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm)
 {
        struct ieee80211_hw *hw = mvm->hw;
        int num_mac, ret, i;
+       static const u32 mvm_ciphers[] = {
+               WLAN_CIPHER_SUITE_WEP40,
+               WLAN_CIPHER_SUITE_WEP104,
+               WLAN_CIPHER_SUITE_TKIP,
+               WLAN_CIPHER_SUITE_CCMP,
+       };
 
        /* Tell mac80211 our characteristics */
        hw->flags = IEEE80211_HW_SIGNAL_DBM |
@@ -428,6 +434,7 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm)
                    IEEE80211_HW_TIMING_BEACON_ONLY |
                    IEEE80211_HW_CONNECTION_MONITOR |
                    IEEE80211_HW_CHANCTX_STA_CSA |
+                   IEEE80211_HW_SUPPORT_FAST_XMIT |
                    IEEE80211_HW_SUPPORTS_CLONED_SKBS;
 
        hw->queues = mvm->first_agg_queue;
@@ -440,19 +447,38 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm)
        hw->uapsd_queues = IWL_MVM_UAPSD_QUEUES;
        hw->uapsd_max_sp_len = IWL_UAPSD_MAX_SP;
 
+       BUILD_BUG_ON(ARRAY_SIZE(mvm->ciphers) < ARRAY_SIZE(mvm_ciphers) + 2);
+       memcpy(mvm->ciphers, mvm_ciphers, sizeof(mvm_ciphers));
+       hw->wiphy->n_cipher_suites = ARRAY_SIZE(mvm_ciphers);
+       hw->wiphy->cipher_suites = mvm->ciphers;
+
        /*
         * Enable 11w if advertised by firmware and software crypto
         * is not enabled (as the firmware will interpret some mgmt
         * packets, so enabling it with software crypto isn't safe)
         */
        if (mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_MFP &&
-           !iwlwifi_mod_params.sw_crypto)
+           !iwlwifi_mod_params.sw_crypto) {
                hw->flags |= IEEE80211_HW_MFP_CAPABLE;
+               mvm->ciphers[hw->wiphy->n_cipher_suites] =
+                       WLAN_CIPHER_SUITE_AES_CMAC;
+               hw->wiphy->n_cipher_suites++;
+       }
+
+       /* currently FW API supports only one optional cipher scheme */
+       if (mvm->fw->cs[0].cipher) {
+               mvm->hw->n_cipher_schemes = 1;
+               mvm->hw->cipher_schemes = &mvm->fw->cs[0];
+               mvm->ciphers[hw->wiphy->n_cipher_suites] =
+                       mvm->fw->cs[0].cipher;
+               hw->wiphy->n_cipher_suites++;
+       }
 
        hw->flags |= IEEE80211_SINGLE_HW_SCAN_ON_ALL_BANDS;
        hw->wiphy->features |=
                NL80211_FEATURE_SCHED_SCAN_RANDOM_MAC_ADDR |
-               NL80211_FEATURE_SCAN_RANDOM_MAC_ADDR;
+               NL80211_FEATURE_SCAN_RANDOM_MAC_ADDR |
+               NL80211_FEATURE_ND_RANDOM_MAC_ADDR;
 
        hw->sta_data_size = sizeof(struct iwl_mvm_sta);
        hw->vif_data_size = sizeof(struct iwl_mvm_vif);
@@ -509,10 +535,11 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm)
 
        hw->wiphy->max_scan_ssids = PROBE_OPTION_MAX;
 
+       BUILD_BUG_ON(IWL_MVM_SCAN_STOPPING_MASK & IWL_MVM_SCAN_MASK);
        BUILD_BUG_ON(IWL_MVM_MAX_UMAC_SCANS > HWEIGHT32(IWL_MVM_SCAN_MASK) ||
                     IWL_MVM_MAX_LMAC_SCANS > HWEIGHT32(IWL_MVM_SCAN_MASK));
 
-       if (mvm->fw->ucode_capa.capa[0] & IWL_UCODE_TLV_CAPA_UMAC_SCAN)
+       if (fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_UMAC_SCAN))
                mvm->max_scans = IWL_MVM_MAX_UMAC_SCANS;
        else
                mvm->max_scans = IWL_MVM_MAX_LMAC_SCANS;
@@ -524,10 +551,10 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm)
                hw->wiphy->bands[IEEE80211_BAND_5GHZ] =
                        &mvm->nvm_data->bands[IEEE80211_BAND_5GHZ];
 
-               if ((mvm->fw->ucode_capa.capa[0] &
-                    IWL_UCODE_TLV_CAPA_BEAMFORMER) &&
-                   (mvm->fw->ucode_capa.api[0] &
-                    IWL_UCODE_TLV_API_LQ_SS_PARAMS))
+               if (fw_has_capa(&mvm->fw->ucode_capa,
+                               IWL_UCODE_TLV_CAPA_BEAMFORMER) &&
+                   fw_has_api(&mvm->fw->ucode_capa,
+                              IWL_UCODE_TLV_API_LQ_SS_PARAMS))
                        hw->wiphy->bands[IEEE80211_BAND_5GHZ]->vht_cap.cap |=
                                IEEE80211_VHT_CAP_SU_BEAMFORMER_CAPABLE;
        }
@@ -553,30 +580,24 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm)
                               NL80211_FEATURE_STATIC_SMPS |
                               NL80211_FEATURE_SUPPORTS_WMM_ADMISSION;
 
-       if (mvm->fw->ucode_capa.capa[0] &
-           IWL_UCODE_TLV_CAPA_TXPOWER_INSERTION_SUPPORT)
+       if (fw_has_capa(&mvm->fw->ucode_capa,
+                       IWL_UCODE_TLV_CAPA_TXPOWER_INSERTION_SUPPORT))
                hw->wiphy->features |= NL80211_FEATURE_TX_POWER_INSERTION;
-       if (mvm->fw->ucode_capa.capa[0] &
-           IWL_UCODE_TLV_CAPA_QUIET_PERIOD_SUPPORT)
+       if (fw_has_capa(&mvm->fw->ucode_capa,
+                       IWL_UCODE_TLV_CAPA_QUIET_PERIOD_SUPPORT))
                hw->wiphy->features |= NL80211_FEATURE_QUIET;
 
-       if (mvm->fw->ucode_capa.capa[0] &
-           IWL_UCODE_TLV_CAPA_DS_PARAM_SET_IE_SUPPORT)
+       if (fw_has_capa(&mvm->fw->ucode_capa,
+                       IWL_UCODE_TLV_CAPA_DS_PARAM_SET_IE_SUPPORT))
                hw->wiphy->features |=
                        NL80211_FEATURE_DS_PARAM_SET_IE_IN_PROBES;
 
-       if (mvm->fw->ucode_capa.capa[0] &
-           IWL_UCODE_TLV_CAPA_WFA_TPC_REP_IE_SUPPORT)
+       if (fw_has_capa(&mvm->fw->ucode_capa,
+                       IWL_UCODE_TLV_CAPA_WFA_TPC_REP_IE_SUPPORT))
                hw->wiphy->features |= NL80211_FEATURE_WFA_TPC_IE_IN_PROBES;
 
        mvm->rts_threshold = IEEE80211_MAX_RTS_THRESHOLD;
 
-       /* currently FW API supports only one optional cipher scheme */
-       if (mvm->fw->cs[0].cipher) {
-               mvm->hw->n_cipher_schemes = 1;
-               mvm->hw->cipher_schemes = &mvm->fw->cs[0];
-       }
-
 #ifdef CONFIG_PM_SLEEP
        if (iwl_mvm_is_d0i3_supported(mvm) &&
            device_can_wakeup(mvm->trans->dev)) {
@@ -616,13 +637,14 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm)
        if (ret)
                return ret;
 
-       if (mvm->fw->ucode_capa.capa[0] & IWL_UCODE_TLV_CAPA_TDLS_SUPPORT) {
+       if (fw_has_capa(&mvm->fw->ucode_capa,
+                       IWL_UCODE_TLV_CAPA_TDLS_SUPPORT)) {
                IWL_DEBUG_TDLS(mvm, "TDLS supported\n");
                hw->wiphy->flags |= WIPHY_FLAG_SUPPORTS_TDLS;
        }
 
-       if (mvm->fw->ucode_capa.capa[0] &
-           IWL_UCODE_TLV_CAPA_TDLS_CHANNEL_SWITCH) {
+       if (fw_has_capa(&mvm->fw->ucode_capa,
+                       IWL_UCODE_TLV_CAPA_TDLS_CHANNEL_SWITCH)) {
                IWL_DEBUG_TDLS(mvm, "TDLS channel switch supported\n");
                hw->wiphy->features |= NL80211_FEATURE_TDLS_CHANNEL_SWITCH;
        }
@@ -735,6 +757,60 @@ static inline bool iwl_enable_tx_ampdu(const struct iwl_cfg *cfg)
        return true;
 }
 
+#define CHECK_BA_TRIGGER(_mvm, _trig, _tid_bm, _tid, _fmt...)  \
+       do {                                                    \
+               if (!(le16_to_cpu(_tid_bm) & BIT(_tid)))        \
+                       break;                                  \
+               iwl_mvm_fw_dbg_collect_trig(_mvm, _trig, _fmt); \
+       } while (0)
+
+static void
+iwl_mvm_ampdu_check_trigger(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
+                           struct ieee80211_sta *sta, u16 tid, u16 rx_ba_ssn,
+                           enum ieee80211_ampdu_mlme_action action)
+{
+       struct iwl_fw_dbg_trigger_tlv *trig;
+       struct iwl_fw_dbg_trigger_ba *ba_trig;
+
+       if (!iwl_fw_dbg_trigger_enabled(mvm->fw, FW_DBG_TRIGGER_BA))
+               return;
+
+       trig = iwl_fw_dbg_get_trigger(mvm->fw, FW_DBG_TRIGGER_BA);
+       ba_trig = (void *)trig->data;
+
+       if (!iwl_fw_dbg_trigger_check_stop(mvm, vif, trig))
+               return;
+
+       switch (action) {
+       case IEEE80211_AMPDU_TX_OPERATIONAL: {
+               struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
+               struct iwl_mvm_tid_data *tid_data = &mvmsta->tid_data[tid];
+
+               CHECK_BA_TRIGGER(mvm, trig, ba_trig->tx_ba_start, tid,
+                                "TX AGG START: MAC %pM tid %d ssn %d\n",
+                                sta->addr, tid, tid_data->ssn);
+               break;
+               }
+       case IEEE80211_AMPDU_TX_STOP_CONT:
+               CHECK_BA_TRIGGER(mvm, trig, ba_trig->tx_ba_stop, tid,
+                                "TX AGG STOP: MAC %pM tid %d\n",
+                                sta->addr, tid);
+               break;
+       case IEEE80211_AMPDU_RX_START:
+               CHECK_BA_TRIGGER(mvm, trig, ba_trig->rx_ba_start, tid,
+                                "RX AGG START: MAC %pM tid %d ssn %d\n",
+                                sta->addr, tid, rx_ba_ssn);
+               break;
+       case IEEE80211_AMPDU_RX_STOP:
+               CHECK_BA_TRIGGER(mvm, trig, ba_trig->rx_ba_stop, tid,
+                                "RX AGG STOP: MAC %pM tid %d\n",
+                                sta->addr, tid);
+               break;
+       default:
+               break;
+       }
+}
+
 static int iwl_mvm_mac_ampdu_action(struct ieee80211_hw *hw,
                                    struct ieee80211_vif *vif,
                                    enum ieee80211_ampdu_mlme_action action,
@@ -811,6 +887,16 @@ static int iwl_mvm_mac_ampdu_action(struct ieee80211_hw *hw,
                ret = -EINVAL;
                break;
        }
+
+       if (!ret) {
+               u16 rx_ba_ssn = 0;
+
+               if (action == IEEE80211_AMPDU_RX_START)
+                       rx_ba_ssn = *ssn;
+
+               iwl_mvm_ampdu_check_trigger(mvm, vif, sta, tid,
+                                           rx_ba_ssn, action);
+       }
        mutex_unlock(&mvm->mutex);
 
        /*
@@ -1410,7 +1496,7 @@ void __iwl_mvm_mac_stop(struct iwl_mvm *mvm)
         * The work item could be running or queued if the
         * ROC time event stops just as we get here.
         */
-       cancel_work_sync(&mvm->roc_done_wk);
+       flush_work(&mvm->roc_done_wk);
 
        iwl_trans_stop_device(mvm->trans);
 
@@ -1423,20 +1509,24 @@ void __iwl_mvm_mac_stop(struct iwl_mvm *mvm)
        /*
         * Clear IN_HW_RESTART flag when stopping the hw (as restart_complete()
         * won't be called in this case).
+        * But make sure to cleanup interfaces that have gone down before/during
+        * HW restart was requested.
         */
-       clear_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status);
+       if (test_and_clear_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status))
+               ieee80211_iterate_interfaces(mvm->hw, 0,
+                                            iwl_mvm_cleanup_iterator, mvm);
 
        /* We shouldn't have any UIDs still set.  Loop over all the UIDs to
         * make sure there's nothing left there and warn if any is found.
         */
-       if (mvm->fw->ucode_capa.capa[0] & IWL_UCODE_TLV_CAPA_UMAC_SCAN) {
+       if (fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_UMAC_SCAN)) {
                int i;
 
                for (i = 0; i < mvm->max_scans; i++) {
-                       if (WARN_ONCE(mvm->scan_uid[i],
-                                     "UMAC scan UID %d was not cleaned\n",
-                                     mvm->scan_uid[i]))
-                               mvm->scan_uid[i] = 0;
+                       if (WARN_ONCE(mvm->scan_uid_status[i],
+                                     "UMAC scan UID %d status was not cleaned\n",
+                                     i))
+                               mvm->scan_uid_status[i] = 0;
                }
        }
 
@@ -1501,7 +1591,7 @@ static int iwl_mvm_set_tx_power(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
                .pwr_restriction = cpu_to_le16(8 * tx_power),
        };
 
-       if (!(mvm->fw->ucode_capa.api[0] & IWL_UCODE_TLV_API_TX_POWER_DEV))
+       if (!fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_TX_POWER_DEV))
                return iwl_mvm_set_tx_power_old(mvm, vif, tx_power);
 
        if (tx_power == IWL_DEFAULT_MAX_TX_POWER)
@@ -2360,7 +2450,7 @@ static void iwl_mvm_bss_info_changed(struct ieee80211_hw *hw,
        mutex_lock(&mvm->mutex);
 
        if (changes & BSS_CHANGED_IDLE && !bss_conf->idle)
-               iwl_mvm_scan_offload_stop(mvm, true);
+               iwl_mvm_scan_stop(mvm, IWL_MVM_SCAN_SCHED, true);
 
        switch (vif->type) {
        case NL80211_IFTYPE_STATION:
@@ -2411,12 +2501,8 @@ static void iwl_mvm_mac_cancel_hw_scan(struct ieee80211_hw *hw,
         * cancel scan scan before ieee80211_scan_work() could run.
         * To handle that, simply return if the scan is not running.
        */
-       /* FIXME: for now, we ignore this race for UMAC scans, since
-        * they don't set the scan_status.
-        */
-       if ((mvm->scan_status & IWL_MVM_SCAN_REGULAR) ||
-           (mvm->fw->ucode_capa.capa[0] & IWL_UCODE_TLV_CAPA_UMAC_SCAN))
-               iwl_mvm_cancel_scan(mvm);
+       if (mvm->scan_status & IWL_MVM_SCAN_REGULAR)
+               iwl_mvm_scan_stop(mvm, IWL_MVM_SCAN_REGULAR, true);
 
        mutex_unlock(&mvm->mutex);
 }
@@ -2765,16 +2851,12 @@ static int iwl_mvm_mac_sched_scan_stop(struct ieee80211_hw *hw,
         * could run.  To handle this, simply return if the scan is
         * not running.
        */
-       /* FIXME: for now, we ignore this race for UMAC scans, since
-        * they don't set the scan_status.
-        */
-       if (!(mvm->scan_status & IWL_MVM_SCAN_SCHED) &&
-           !(mvm->fw->ucode_capa.capa[0] & IWL_UCODE_TLV_CAPA_UMAC_SCAN)) {
+       if (!(mvm->scan_status & IWL_MVM_SCAN_SCHED)) {
                mutex_unlock(&mvm->mutex);
                return 0;
        }
 
-       ret = iwl_mvm_scan_offload_stop(mvm, false);
+       ret = iwl_mvm_scan_stop(mvm, IWL_MVM_SCAN_SCHED, false);
        mutex_unlock(&mvm->mutex);
        iwl_mvm_wait_for_async_handlers(mvm);
 
@@ -3039,8 +3121,8 @@ static int iwl_mvm_roc(struct ieee80211_hw *hw,
 
        switch (vif->type) {
        case NL80211_IFTYPE_STATION:
-               if (mvm->fw->ucode_capa.capa[0] &
-                   IWL_UCODE_TLV_CAPA_HOTSPOT_SUPPORT) {
+               if (fw_has_capa(&mvm->fw->ucode_capa,
+                               IWL_UCODE_TLV_CAPA_HOTSPOT_SUPPORT)) {
                        /* Use aux roc framework (HS20) */
                        ret = iwl_mvm_send_aux_roc_cmd(mvm, channel,
                                                       vif, duration);
@@ -3832,7 +3914,7 @@ static int iwl_mvm_mac_get_survey(struct ieee80211_hw *hw, int idx,
        if (idx != 0)
                return -ENOENT;
 
-       if (!(mvm->fw->ucode_capa.capa[0] &
+       if (fw_has_capa(&mvm->fw->ucode_capa,
                        IWL_UCODE_TLV_CAPA_RADIO_BEACON_STATS))
                return -ENOENT;
 
@@ -3879,8 +3961,8 @@ static void iwl_mvm_mac_sta_statistics(struct ieee80211_hw *hw,
        struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
        struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
 
-       if (!(mvm->fw->ucode_capa.capa[0] &
-                               IWL_UCODE_TLV_CAPA_RADIO_BEACON_STATS))
+       if (fw_has_capa(&mvm->fw->ucode_capa,
+                       IWL_UCODE_TLV_CAPA_RADIO_BEACON_STATS))
                return;
 
        /* if beacon filtering isn't on mac80211 does it anyway */
@@ -3910,9 +3992,9 @@ static void iwl_mvm_mac_sta_statistics(struct ieee80211_hw *hw,
        mutex_unlock(&mvm->mutex);
 }
 
-static void iwl_mvm_mac_event_callback(struct ieee80211_hw *hw,
-                                      struct ieee80211_vif *vif,
-                                      const struct ieee80211_event *event)
+static void iwl_mvm_event_mlme_callback(struct iwl_mvm *mvm,
+                                       struct ieee80211_vif *vif,
+                                       const struct ieee80211_event *event)
 {
 #define CHECK_MLME_TRIGGER(_mvm, _trig, _buf, _cnt, _fmt...)   \
        do {                                                    \
@@ -3921,16 +4003,12 @@ static void iwl_mvm_mac_event_callback(struct ieee80211_hw *hw,
                iwl_mvm_fw_dbg_collect_trig(_mvm, _trig, _fmt);\
        } while (0)
 
-       struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
        struct iwl_fw_dbg_trigger_tlv *trig;
        struct iwl_fw_dbg_trigger_mlme *trig_mlme;
 
        if (!iwl_fw_dbg_trigger_enabled(mvm->fw, FW_DBG_TRIGGER_MLME))
                return;
 
-       if (event->u.mlme.status == MLME_SUCCESS)
-               return;
-
        trig = iwl_fw_dbg_get_trigger(mvm->fw, FW_DBG_TRIGGER_MLME);
        trig_mlme = (void *)trig->data;
        if (!iwl_fw_dbg_trigger_check_stop(mvm, vif, trig))
@@ -3968,6 +4046,75 @@ static void iwl_mvm_mac_event_callback(struct ieee80211_hw *hw,
 #undef CHECK_MLME_TRIGGER
 }
 
+static void iwl_mvm_event_bar_rx_callback(struct iwl_mvm *mvm,
+                                         struct ieee80211_vif *vif,
+                                         const struct ieee80211_event *event)
+{
+       struct iwl_fw_dbg_trigger_tlv *trig;
+       struct iwl_fw_dbg_trigger_ba *ba_trig;
+
+       if (!iwl_fw_dbg_trigger_enabled(mvm->fw, FW_DBG_TRIGGER_BA))
+               return;
+
+       trig = iwl_fw_dbg_get_trigger(mvm->fw, FW_DBG_TRIGGER_BA);
+       ba_trig = (void *)trig->data;
+       if (!iwl_fw_dbg_trigger_check_stop(mvm, vif, trig))
+               return;
+
+       if (!(le16_to_cpu(ba_trig->rx_bar) & BIT(event->u.ba.tid)))
+               return;
+
+       iwl_mvm_fw_dbg_collect_trig(mvm, trig,
+                                   "BAR received from %pM, tid %d, ssn %d",
+                                   event->u.ba.sta->addr, event->u.ba.tid,
+                                   event->u.ba.ssn);
+}
+
+static void
+iwl_mvm_event_frame_timeout_callback(struct iwl_mvm *mvm,
+                                    struct ieee80211_vif *vif,
+                                    const struct ieee80211_event *event)
+{
+       struct iwl_fw_dbg_trigger_tlv *trig;
+       struct iwl_fw_dbg_trigger_ba *ba_trig;
+
+       if (!iwl_fw_dbg_trigger_enabled(mvm->fw, FW_DBG_TRIGGER_BA))
+               return;
+
+       trig = iwl_fw_dbg_get_trigger(mvm->fw, FW_DBG_TRIGGER_BA);
+       ba_trig = (void *)trig->data;
+       if (!iwl_fw_dbg_trigger_check_stop(mvm, vif, trig))
+               return;
+
+       if (!(le16_to_cpu(ba_trig->frame_timeout) & BIT(event->u.ba.tid)))
+               return;
+
+       iwl_mvm_fw_dbg_collect_trig(mvm, trig,
+                                   "Frame from %pM timed out, tid %d",
+                                   event->u.ba.sta->addr, event->u.ba.tid);
+}
+
+static void iwl_mvm_mac_event_callback(struct ieee80211_hw *hw,
+                                      struct ieee80211_vif *vif,
+                                      const struct ieee80211_event *event)
+{
+       struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
+
+       switch (event->type) {
+       case MLME_EVENT:
+               iwl_mvm_event_mlme_callback(mvm, vif, event);
+               break;
+       case BAR_RX_EVENT:
+               iwl_mvm_event_bar_rx_callback(mvm, vif, event);
+               break;
+       case BA_FRAME_TIMEOUT:
+               iwl_mvm_event_frame_timeout_callback(mvm, vif, event);
+               break;
+       default:
+               break;
+       }
+}
+
 const struct ieee80211_ops iwl_mvm_hw_ops = {
        .tx = iwl_mvm_mac_tx,
        .ampdu_action = iwl_mvm_mac_ampdu_action,
index 6d33234..2d4bad5 100644 (file)
@@ -276,6 +276,7 @@ enum iwl_mvm_ref_type {
        IWL_MVM_REF_UCODE_DOWN,
        IWL_MVM_REF_SCAN,
        IWL_MVM_REF_ROC,
+       IWL_MVM_REF_ROC_AUX,
        IWL_MVM_REF_P2P_CLIENT,
        IWL_MVM_REF_AP_IBSS,
        IWL_MVM_REF_USER,
@@ -446,6 +447,8 @@ iwl_mvm_vif_from_mac80211(struct ieee80211_vif *vif)
 
 extern const u8 tid_to_mac80211_ac[];
 
+#define IWL_MVM_SCAN_STOPPING_SHIFT    8
+
 enum iwl_scan_status {
        IWL_MVM_SCAN_REGULAR            = BIT(0),
        IWL_MVM_SCAN_SCHED              = BIT(1),
@@ -462,8 +465,8 @@ enum iwl_scan_status {
        IWL_MVM_SCAN_NETDETECT_MASK     = IWL_MVM_SCAN_NETDETECT |
                                          IWL_MVM_SCAN_STOPPING_NETDETECT,
 
-       IWL_MVM_SCAN_STOPPING_MASK      = 0xff00,
-       IWL_MVM_SCAN_MASK               = 0x00ff,
+       IWL_MVM_SCAN_STOPPING_MASK      = 0xff << IWL_MVM_SCAN_STOPPING_SHIFT,
+       IWL_MVM_SCAN_MASK               = 0xff,
 };
 
 /**
@@ -627,8 +630,7 @@ struct iwl_mvm {
        unsigned int max_scans;
 
        /* UMAC scan tracking */
-       u32 scan_uid[IWL_MVM_MAX_UMAC_SCANS];
-       u8 scan_seq_num, sched_scan_seq_num;
+       u32 scan_uid_status[IWL_MVM_MAX_UMAC_SCANS];
 
        /* rx chain antennas set through debugfs for the scan command */
        u8 scan_rx_ant;
@@ -818,6 +820,8 @@ struct iwl_mvm {
        } tdls_cs;
 
        struct iwl_mvm_shared_mem_cfg shared_mem_cfg;
+
+       u32 ciphers[6];
 };
 
 /* Extract MVM priv from op_mode and _hw */
@@ -887,14 +891,15 @@ static inline bool iwl_mvm_is_d0i3_supported(struct iwl_mvm *mvm)
        return mvm->trans->cfg->d0i3 &&
               mvm->trans->d0i3_mode != IWL_D0I3_MODE_OFF &&
               !iwlwifi_mod_params.d0i3_disable &&
-              (mvm->fw->ucode_capa.capa[0] & IWL_UCODE_TLV_CAPA_D0I3_SUPPORT);
+              fw_has_capa(&mvm->fw->ucode_capa,
+                          IWL_UCODE_TLV_CAPA_D0I3_SUPPORT);
 }
 
 static inline bool iwl_mvm_is_lar_supported(struct iwl_mvm *mvm)
 {
        bool nvm_lar = mvm->nvm_data->lar_enabled;
-       bool tlv_lar = mvm->fw->ucode_capa.capa[0] &
-               IWL_UCODE_TLV_CAPA_LAR_SUPPORT;
+       bool tlv_lar = fw_has_capa(&mvm->fw->ucode_capa,
+                                  IWL_UCODE_TLV_CAPA_LAR_SUPPORT);
 
        if (iwlwifi_mod_params.lar_disable)
                return false;
@@ -911,24 +916,28 @@ static inline bool iwl_mvm_is_lar_supported(struct iwl_mvm *mvm)
 
 static inline bool iwl_mvm_is_wifi_mcc_supported(struct iwl_mvm *mvm)
 {
-       return mvm->fw->ucode_capa.api[0] & IWL_UCODE_TLV_API_WIFI_MCC_UPDATE ||
-              mvm->fw->ucode_capa.capa[0] & IWL_UCODE_TLV_CAPA_LAR_MULTI_MCC;
+       return fw_has_api(&mvm->fw->ucode_capa,
+                         IWL_UCODE_TLV_API_WIFI_MCC_UPDATE) ||
+              fw_has_capa(&mvm->fw->ucode_capa,
+                          IWL_UCODE_TLV_CAPA_LAR_MULTI_MCC);
 }
 
 static inline bool iwl_mvm_is_scd_cfg_supported(struct iwl_mvm *mvm)
 {
-       return mvm->fw->ucode_capa.api[0] & IWL_UCODE_TLV_API_SCD_CFG;
+       return fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_SCD_CFG);
 }
 
 static inline bool iwl_mvm_bt_is_plcr_supported(struct iwl_mvm *mvm)
 {
-       return (mvm->fw->ucode_capa.capa[0] & IWL_UCODE_TLV_CAPA_BT_COEX_PLCR) &&
+       return fw_has_capa(&mvm->fw->ucode_capa,
+                          IWL_UCODE_TLV_CAPA_BT_COEX_PLCR) &&
                IWL_MVM_BT_COEX_CORUNNING;
 }
 
 static inline bool iwl_mvm_bt_is_rrc_supported(struct iwl_mvm *mvm)
 {
-       return (mvm->fw->ucode_capa.capa[0] & IWL_UCODE_TLV_CAPA_BT_COEX_RRC) &&
+       return fw_has_capa(&mvm->fw->ucode_capa,
+                          IWL_UCODE_TLV_CAPA_BT_COEX_RRC) &&
                IWL_MVM_BT_COEX_RRC;
 }
 
@@ -1121,34 +1130,34 @@ int iwl_mvm_reg_scan_start(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
                           struct cfg80211_scan_request *req,
                           struct ieee80211_scan_ies *ies);
 int iwl_mvm_scan_size(struct iwl_mvm *mvm);
-int iwl_mvm_cancel_scan(struct iwl_mvm *mvm);
+int iwl_mvm_scan_stop(struct iwl_mvm *mvm, int type, bool notify);
 int iwl_mvm_max_scan_ie_len(struct iwl_mvm *mvm);
 void iwl_mvm_report_scan_aborted(struct iwl_mvm *mvm);
 
 /* Scheduled scan */
-int iwl_mvm_rx_scan_offload_complete_notif(struct iwl_mvm *mvm,
-                                          struct iwl_rx_cmd_buffer *rxb,
-                                          struct iwl_device_cmd *cmd);
-int iwl_mvm_rx_scan_offload_iter_complete_notif(struct iwl_mvm *mvm,
-                                               struct iwl_rx_cmd_buffer *rxb,
-                                               struct iwl_device_cmd *cmd);
-int iwl_mvm_config_sched_scan_profiles(struct iwl_mvm *mvm,
-                                      struct cfg80211_sched_scan_request *req);
+int iwl_mvm_rx_lmac_scan_complete_notif(struct iwl_mvm *mvm,
+                                       struct iwl_rx_cmd_buffer *rxb,
+                                       struct iwl_device_cmd *cmd);
+int iwl_mvm_rx_lmac_scan_iter_complete_notif(struct iwl_mvm *mvm,
+                                            struct iwl_rx_cmd_buffer *rxb,
+                                            struct iwl_device_cmd *cmd);
 int iwl_mvm_sched_scan_start(struct iwl_mvm *mvm,
                             struct ieee80211_vif *vif,
                             struct cfg80211_sched_scan_request *req,
                             struct ieee80211_scan_ies *ies,
                             int type);
-int iwl_mvm_scan_offload_stop(struct iwl_mvm *mvm, bool notify);
-int iwl_mvm_rx_scan_offload_results(struct iwl_mvm *mvm,
-                                   struct iwl_rx_cmd_buffer *rxb,
-                                   struct iwl_device_cmd *cmd);
+int iwl_mvm_rx_scan_match_found(struct iwl_mvm *mvm,
+                               struct iwl_rx_cmd_buffer *rxb,
+                               struct iwl_device_cmd *cmd);
 
 /* UMAC scan */
 int iwl_mvm_config_scan(struct iwl_mvm *mvm);
 int iwl_mvm_rx_umac_scan_complete_notif(struct iwl_mvm *mvm,
                                        struct iwl_rx_cmd_buffer *rxb,
                                        struct iwl_device_cmd *cmd);
+int iwl_mvm_rx_umac_scan_iter_complete_notif(struct iwl_mvm *mvm,
+                                            struct iwl_rx_cmd_buffer *rxb,
+                                            struct iwl_device_cmd *cmd);
 
 /* MVM debugfs */
 #ifdef CONFIG_IWLWIFI_DEBUGFS
index 87b2a30..2a6be35 100644 (file)
@@ -6,7 +6,7 @@
  * GPL LICENSE SUMMARY
  *
  * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
- * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
+ * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of version 2 of the GNU General Public License as
@@ -32,7 +32,7 @@
  * BSD LICENSE
  *
  * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
- * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
+ * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
@@ -316,8 +316,8 @@ iwl_parse_nvm_sections(struct iwl_mvm *mvm)
        phy_sku = (const __le16 *)sections[NVM_SECTION_TYPE_PHY_SKU].data;
 
        lar_enabled = !iwlwifi_mod_params.lar_disable &&
-                     (mvm->fw->ucode_capa.capa[0] &
-                      IWL_UCODE_TLV_CAPA_LAR_SUPPORT);
+                     fw_has_capa(&mvm->fw->ucode_capa,
+                                 IWL_UCODE_TLV_CAPA_LAR_SUPPORT);
 
        return iwl_parse_nvm_data(mvm->trans->dev, mvm->cfg, hw, sw, calib,
                                  regulatory, mac_override, phy_sku,
@@ -583,9 +583,9 @@ int iwl_nvm_init(struct iwl_mvm *mvm, bool read_nvm_from_nic)
                kfree(nvm_buffer);
        }
 
-       /* load external NVM if configured */
+       /* Only if PNVM selected in the mod param - load external NVM  */
        if (mvm->nvm_file_name) {
-               /* read External NVM file - take the default */
+               /* read External NVM file from the mod param */
                ret = iwl_mvm_read_external_nvm(mvm);
                if (ret) {
                        /* choose the nvm_file name according to the
@@ -792,8 +792,8 @@ int iwl_mvm_init_mcc(struct iwl_mvm *mvm)
        char mcc[3];
 
        if (mvm->cfg->device_family == IWL_DEVICE_FAMILY_8000) {
-               tlv_lar = mvm->fw->ucode_capa.capa[0] &
-                       IWL_UCODE_TLV_CAPA_LAR_SUPPORT;
+               tlv_lar = fw_has_capa(&mvm->fw->ucode_capa,
+                                     IWL_UCODE_TLV_CAPA_LAR_SUPPORT);
                nvm_lar = mvm->nvm_data->lar_enabled;
                if (tlv_lar != nvm_lar)
                        IWL_INFO(mvm,
index 02028bc..e4fa500 100644 (file)
@@ -6,7 +6,7 @@
  * GPL LICENSE SUMMARY
  *
  * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
- * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
+ * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of version 2 of the GNU General Public License as
@@ -32,7 +32,7 @@
  * BSD LICENSE
  *
  * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
- * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
+ * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
@@ -194,7 +194,7 @@ static void iwl_mvm_nic_config(struct iwl_op_mode *op_mode)
         * (PCIe power is lost before PERST# is asserted), causing ME FW
         * to lose ownership and not being able to obtain it back.
         */
-       if (mvm->trans->cfg->device_family != IWL_DEVICE_FAMILY_8000)
+       if (!mvm->trans->cfg->apmg_not_supported)
                iwl_set_bits_mask_prph(mvm->trans, APMG_PS_CTRL_REG,
                                       APMG_PS_CTRL_EARLY_PWR_OFF_RESET_DIS,
                                       ~APMG_PS_CTRL_EARLY_PWR_OFF_RESET_DIS);
@@ -238,13 +238,15 @@ static const struct iwl_rx_handlers iwl_mvm_rx_handlers[] = {
        RX_HANDLER(EOSP_NOTIFICATION, iwl_mvm_rx_eosp_notif, false),
 
        RX_HANDLER(SCAN_ITERATION_COMPLETE,
-                  iwl_mvm_rx_scan_offload_iter_complete_notif, false),
+                  iwl_mvm_rx_lmac_scan_iter_complete_notif, false),
        RX_HANDLER(SCAN_OFFLOAD_COMPLETE,
-                  iwl_mvm_rx_scan_offload_complete_notif, true),
-       RX_HANDLER(MATCH_FOUND_NOTIFICATION, iwl_mvm_rx_scan_offload_results,
+                  iwl_mvm_rx_lmac_scan_complete_notif, true),
+       RX_HANDLER(MATCH_FOUND_NOTIFICATION, iwl_mvm_rx_scan_match_found,
                   false),
        RX_HANDLER(SCAN_COMPLETE_UMAC, iwl_mvm_rx_umac_scan_complete_notif,
                   true),
+       RX_HANDLER(SCAN_ITERATION_COMPLETE_UMAC,
+                  iwl_mvm_rx_umac_scan_iter_complete_notif, false),
 
        RX_HANDLER(CARD_STATE_NOTIFICATION, iwl_mvm_rx_card_state_notif, false),
 
@@ -279,11 +281,7 @@ static const char *const iwl_mvm_cmd_strings[REPLY_MAX] = {
        CMD(BINDING_CONTEXT_CMD),
        CMD(TIME_QUOTA_CMD),
        CMD(NON_QOS_TX_COUNTER_CMD),
-       CMD(SCAN_REQUEST_CMD),
-       CMD(SCAN_ABORT_CMD),
-       CMD(SCAN_START_NOTIFICATION),
-       CMD(SCAN_RESULTS_NOTIFICATION),
-       CMD(SCAN_COMPLETE_NOTIFICATION),
+       CMD(DC2DC_CONFIG_CMD),
        CMD(NVM_ACCESS_CMD),
        CMD(PHY_CONFIGURATION_CMD),
        CMD(CALIB_RES_NOTIF_PHY_DB),
@@ -356,6 +354,7 @@ static const char *const iwl_mvm_cmd_strings[REPLY_MAX] = {
        CMD(TDLS_CHANNEL_SWITCH_NOTIFICATION),
        CMD(TDLS_CONFIG_CMD),
        CMD(MCC_UPDATE_CMD),
+       CMD(SCAN_ITERATION_COMPLETE_UMAC),
 };
 #undef CMD
 
@@ -517,15 +516,12 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg,
 
        min_backoff = calc_min_backoff(trans, cfg);
        iwl_mvm_tt_initialize(mvm, min_backoff);
-       /* set the nvm_file_name according to priority */
-       if (iwlwifi_mod_params.nvm_file) {
+
+       if (iwlwifi_mod_params.nvm_file)
                mvm->nvm_file_name = iwlwifi_mod_params.nvm_file;
-       } else if (trans->cfg->device_family == IWL_DEVICE_FAMILY_8000) {
-               if (CSR_HW_REV_STEP(trans->hw_rev) == SILICON_B_STEP)
-                       mvm->nvm_file_name = mvm->cfg->default_nvm_file_B_step;
-               else
-                       mvm->nvm_file_name = mvm->cfg->default_nvm_file_C_step;
-       }
+       else
+               IWL_DEBUG_EEPROM(mvm->trans->dev,
+                                "working without external nvm file\n");
 
        if (WARN(cfg->no_power_up_nic_in_init && !mvm->nvm_file_name,
                 "not allowing power-up and not having nvm_file\n"))
index 0440142..daff1d0 100644 (file)
@@ -138,7 +138,7 @@ struct rs_tx_column;
 
 typedef bool (*allow_column_func_t) (struct iwl_mvm *mvm,
                                     struct ieee80211_sta *sta,
-                                    struct iwl_scale_tbl_info *tbl,
+                                    struct rs_rate *rate,
                                     const struct rs_tx_column *next_col);
 
 struct rs_tx_column {
@@ -150,14 +150,14 @@ struct rs_tx_column {
 };
 
 static bool rs_ant_allow(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
-                        struct iwl_scale_tbl_info *tbl,
+                        struct rs_rate *rate,
                         const struct rs_tx_column *next_col)
 {
        return iwl_mvm_bt_coex_is_ant_avail(mvm, next_col->ant);
 }
 
 static bool rs_mimo_allow(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
-                         struct iwl_scale_tbl_info *tbl,
+                         struct rs_rate *rate,
                          const struct rs_tx_column *next_col)
 {
        struct iwl_mvm_sta *mvmsta;
@@ -180,11 +180,14 @@ static bool rs_mimo_allow(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
        if (iwl_mvm_vif_low_latency(mvmvif) && mvmsta->vif->p2p)
                return false;
 
+       if (mvm->nvm_data->sku_cap_mimo_disabled)
+               return false;
+
        return true;
 }
 
 static bool rs_siso_allow(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
-                         struct iwl_scale_tbl_info *tbl,
+                         struct rs_rate *rate,
                          const struct rs_tx_column *next_col)
 {
        if (!sta->ht_cap.ht_supported)
@@ -194,10 +197,9 @@ static bool rs_siso_allow(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
 }
 
 static bool rs_sgi_allow(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
-                        struct iwl_scale_tbl_info *tbl,
+                        struct rs_rate *rate,
                         const struct rs_tx_column *next_col)
 {
-       struct rs_rate *rate = &tbl->rate;
        struct ieee80211_sta_ht_cap *ht_cap = &sta->ht_cap;
        struct ieee80211_sta_vht_cap *vht_cap = &sta->vht_cap;
 
@@ -1125,8 +1127,8 @@ void iwl_mvm_rs_tx_status(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
        u32 tx_resp_hwrate = (uintptr_t)info->status.status_driver_data[1];
        struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
        struct iwl_lq_sta *lq_sta = &mvmsta->lq_sta;
-       bool allow_ant_mismatch = mvm->fw->ucode_capa.api[0] &
-               IWL_UCODE_TLV_API_LQ_SS_PARAMS;
+       bool allow_ant_mismatch = fw_has_api(&mvm->fw->ucode_capa,
+                                            IWL_UCODE_TLV_API_LQ_SS_PARAMS);
 
        /* Treat uninitialized rate scaling data same as non-existing. */
        if (!lq_sta) {
@@ -1656,7 +1658,8 @@ static enum rs_column rs_get_next_column(struct iwl_mvm *mvm,
 
                for (j = 0; j < MAX_COLUMN_CHECKS; j++) {
                        allow_func = next_col->checks[j];
-                       if (allow_func && !allow_func(mvm, sta, tbl, next_col))
+                       if (allow_func && !allow_func(mvm, sta, &tbl->rate,
+                                                     next_col))
                                break;
                }
 
@@ -2711,7 +2714,7 @@ static void rs_vht_init(struct iwl_mvm *mvm,
            (vht_cap->cap & IEEE80211_VHT_CAP_RXSTBC_MASK))
                lq_sta->stbc_capable = true;
 
-       if ((mvm->fw->ucode_capa.capa[0] & IWL_UCODE_TLV_CAPA_BEAMFORMER) &&
+       if (fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_BEAMFORMER) &&
            (num_of_ant(iwl_mvm_get_valid_tx_ant(mvm)) > 1) &&
            (vht_cap->cap & IEEE80211_VHT_CAP_SU_BEAMFORMEE_CAPABLE))
                lq_sta->bfer_capable = true;
@@ -2995,7 +2998,7 @@ static void rs_build_rates_table(struct iwl_mvm *mvm,
        valid_tx_ant = iwl_mvm_get_valid_tx_ant(mvm);
 
        /* TODO: remove old API when min FW API hits 14 */
-       if (!(mvm->fw->ucode_capa.api[0] & IWL_UCODE_TLV_API_LQ_SS_PARAMS) &&
+       if (!fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_LQ_SS_PARAMS) &&
            rs_stbc_allow(mvm, sta, lq_sta))
                rate.stbc = true;
 
@@ -3209,7 +3212,7 @@ static void rs_fill_lq_cmd(struct iwl_mvm *mvm,
 
        rs_build_rates_table(mvm, sta, lq_sta, initial_rate);
 
-       if (mvm->fw->ucode_capa.api[0] & IWL_UCODE_TLV_API_LQ_SS_PARAMS)
+       if (fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_LQ_SS_PARAMS))
                rs_set_lq_ss_params(mvm, sta, lq_sta, initial_rate);
 
        mvmsta = iwl_mvm_sta_from_mac80211(sta);
index d6314dd..8f1d93b 100644 (file)
@@ -570,7 +570,7 @@ void iwl_mvm_handle_rx_statistics(struct iwl_mvm *mvm,
        };
        u32 temperature;
 
-       if (mvm->fw->ucode_capa.api[0] & IWL_UCODE_TLV_API_STATS_V10) {
+       if (fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_STATS_V10)) {
                struct iwl_notif_statistics_v10 *stats = (void *)&pkt->data;
 
                if (iwl_rx_packet_payload_len(pkt) != v10_len)
@@ -610,7 +610,7 @@ void iwl_mvm_handle_rx_statistics(struct iwl_mvm *mvm,
        /* Only handle rx statistics temperature changes if async temp
         * notifications are not supported
         */
-       if (!(mvm->fw->ucode_capa.api[0] & IWL_UCODE_TLV_API_ASYNC_DTM))
+       if (!fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_ASYNC_DTM))
                iwl_mvm_tt_temp_changed(mvm, temperature);
 
        ieee80211_iterate_active_interfaces(mvm->hw,
index e50fd3f..5de1449 100644 (file)
@@ -101,14 +101,6 @@ struct iwl_mvm_scan_params {
        } schedule[2];
 };
 
-enum iwl_umac_scan_uid_type {
-       IWL_UMAC_SCAN_UID_REG_SCAN      = BIT(0),
-       IWL_UMAC_SCAN_UID_SCHED_SCAN    = BIT(1),
-};
-
-static int iwl_umac_scan_stop(struct iwl_mvm *mvm,
-                             enum iwl_umac_scan_uid_type type, bool notify);
-
 static u8 iwl_mvm_scan_rx_ant(struct iwl_mvm *mvm)
 {
        if (mvm->scan_rx_ant != ANT_NONE)
@@ -168,7 +160,7 @@ iwl_mvm_scan_rate_n_flags(struct iwl_mvm *mvm, enum ieee80211_band band,
 static u16 iwl_mvm_get_active_dwell(struct iwl_mvm *mvm,
                                    enum ieee80211_band band, int n_ssids)
 {
-       if (mvm->fw->ucode_capa.api[0] & IWL_UCODE_TLV_API_BASIC_DWELL)
+       if (fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_BASIC_DWELL))
                return 10;
        if (band == IEEE80211_BAND_2GHZ)
                return 20  + 3 * (n_ssids + 1);
@@ -178,7 +170,7 @@ static u16 iwl_mvm_get_active_dwell(struct iwl_mvm *mvm,
 static u16 iwl_mvm_get_passive_dwell(struct iwl_mvm *mvm,
                                     enum ieee80211_band band)
 {
-       if (mvm->fw->ucode_capa.api[0] & IWL_UCODE_TLV_API_BASIC_DWELL)
+       if (fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_BASIC_DWELL))
                        return 110;
        return band == IEEE80211_BAND_2GHZ ? 100 + 20 : 100 + 10;
 }
@@ -213,8 +205,9 @@ static void iwl_mvm_scan_calc_dwell(struct iwl_mvm *mvm,
        params->max_out_time = 120;
 
        if (iwl_mvm_low_latency(mvm)) {
-               if (mvm->fw->ucode_capa.api[0] &
-                   IWL_UCODE_TLV_API_FRAGMENTED_SCAN) {
+               if (fw_has_api(&mvm->fw->ucode_capa,
+                              IWL_UCODE_TLV_API_FRAGMENTED_SCAN)) {
+
                        params->suspend_time = 105;
                        /*
                         * If there is more than one active interface make
@@ -228,8 +221,9 @@ static void iwl_mvm_scan_calc_dwell(struct iwl_mvm *mvm,
                }
        }
 
-       if (frag_passive_dwell && (mvm->fw->ucode_capa.api[0] &
-                                  IWL_UCODE_TLV_API_FRAGMENTED_SCAN)) {
+       if (frag_passive_dwell &&
+           fw_has_api(&mvm->fw->ucode_capa,
+                      IWL_UCODE_TLV_API_FRAGMENTED_SCAN)) {
                /*
                 * P2P device scan should not be fragmented to avoid negative
                 * impact on P2P device discovery. Configure max_out_time to be
@@ -281,8 +275,8 @@ not_bound:
 static inline bool iwl_mvm_rrm_scan_needed(struct iwl_mvm *mvm)
 {
        /* require rrm scan whenever the fw supports it */
-       return mvm->fw->ucode_capa.capa[0] &
-              IWL_UCODE_TLV_CAPA_DS_PARAM_SET_IE_SUPPORT;
+       return fw_has_capa(&mvm->fw->ucode_capa,
+                          IWL_UCODE_TLV_CAPA_DS_PARAM_SET_IE_SUPPORT);
 }
 
 static int iwl_mvm_max_scan_ie_fw_cmd_room(struct iwl_mvm *mvm)
@@ -318,22 +312,41 @@ int iwl_mvm_max_scan_ie_len(struct iwl_mvm *mvm)
        return max_ie_len;
 }
 
-int iwl_mvm_rx_scan_offload_iter_complete_notif(struct iwl_mvm *mvm,
-                                               struct iwl_rx_cmd_buffer *rxb,
-                                               struct iwl_device_cmd *cmd)
+static u8 *iwl_mvm_dump_channel_list(struct iwl_scan_results_notif *res,
+                                    int num_res, u8 *buf, size_t buf_size)
+{
+       int i;
+       u8 *pos = buf, *end = buf + buf_size;
+
+       for (i = 0; pos < end && i < num_res; i++)
+               pos += snprintf(pos, end - pos, " %u", res[i].channel);
+
+       /* terminate the string in case the buffer was too short */
+       *(buf + buf_size - 1) = '\0';
+
+       return buf;
+}
+
+int iwl_mvm_rx_lmac_scan_iter_complete_notif(struct iwl_mvm *mvm,
+                                            struct iwl_rx_cmd_buffer *rxb,
+                                            struct iwl_device_cmd *cmd)
 {
        struct iwl_rx_packet *pkt = rxb_addr(rxb);
        struct iwl_lmac_scan_complete_notif *notif = (void *)pkt->data;
+       u8 buf[256];
 
        IWL_DEBUG_SCAN(mvm,
-                      "Scan offload iteration complete: status=0x%x scanned channels=%d\n",
-                      notif->status, notif->scanned_channels);
+                      "Scan offload iteration complete: status=0x%x scanned channels=%d channels list: %s\n",
+                      notif->status, notif->scanned_channels,
+                      iwl_mvm_dump_channel_list(notif->results,
+                                                notif->scanned_channels, buf,
+                                                sizeof(buf)));
        return 0;
 }
 
-int iwl_mvm_rx_scan_offload_results(struct iwl_mvm *mvm,
-                                   struct iwl_rx_cmd_buffer *rxb,
-                                   struct iwl_device_cmd *cmd)
+int iwl_mvm_rx_scan_match_found(struct iwl_mvm *mvm,
+                               struct iwl_rx_cmd_buffer *rxb,
+                               struct iwl_device_cmd *cmd)
 {
        IWL_DEBUG_SCAN(mvm, "Scheduled scan results\n");
        ieee80211_sched_scan_results(mvm->hw);
@@ -341,14 +354,27 @@ int iwl_mvm_rx_scan_offload_results(struct iwl_mvm *mvm,
        return 0;
 }
 
-int iwl_mvm_rx_scan_offload_complete_notif(struct iwl_mvm *mvm,
-                                          struct iwl_rx_cmd_buffer *rxb,
-                                          struct iwl_device_cmd *cmd)
+static const char *iwl_mvm_ebs_status_str(enum iwl_scan_ebs_status status)
+{
+       switch (status) {
+       case IWL_SCAN_EBS_SUCCESS:
+               return "successful";
+       case IWL_SCAN_EBS_INACTIVE:
+               return "inactive";
+       case IWL_SCAN_EBS_FAILED:
+       case IWL_SCAN_EBS_CHAN_NOT_FOUND:
+       default:
+               return "failed";
+       }
+}
+
+int iwl_mvm_rx_lmac_scan_complete_notif(struct iwl_mvm *mvm,
+                                       struct iwl_rx_cmd_buffer *rxb,
+                                       struct iwl_device_cmd *cmd)
 {
        struct iwl_rx_packet *pkt = rxb_addr(rxb);
        struct iwl_periodic_scan_complete *scan_notif = (void *)pkt->data;
        bool aborted = (scan_notif->status == IWL_SCAN_OFFLOAD_ABORTED);
-       bool ebs_successful = (scan_notif->ebs_status == IWL_SCAN_EBS_SUCCESS);
 
        /* scan status must be locked for proper checking */
        lockdep_assert_held(&mvm->mutex);
@@ -368,13 +394,13 @@ int iwl_mvm_rx_scan_offload_complete_notif(struct iwl_mvm *mvm,
 
                IWL_DEBUG_SCAN(mvm, "Scheduled scan %s, EBS status %s\n",
                               aborted ? "aborted" : "completed",
-                              ebs_successful ? "successful" : "failed");
+                              iwl_mvm_ebs_status_str(scan_notif->ebs_status));
 
                mvm->scan_status &= ~IWL_MVM_SCAN_STOPPING_SCHED;
        } else if (mvm->scan_status & IWL_MVM_SCAN_STOPPING_REGULAR) {
                IWL_DEBUG_SCAN(mvm, "Regular scan %s, EBS status %s\n",
                               aborted ? "aborted" : "completed",
-                              ebs_successful ? "successful" : "failed");
+                              iwl_mvm_ebs_status_str(scan_notif->ebs_status));
 
                mvm->scan_status &= ~IWL_MVM_SCAN_STOPPING_REGULAR;
        } else if (mvm->scan_status & IWL_MVM_SCAN_SCHED) {
@@ -382,14 +408,14 @@ int iwl_mvm_rx_scan_offload_complete_notif(struct iwl_mvm *mvm,
 
                IWL_DEBUG_SCAN(mvm, "Scheduled scan %s, EBS status %s (FW)\n",
                               aborted ? "aborted" : "completed",
-                              ebs_successful ? "successful" : "failed");
+                              iwl_mvm_ebs_status_str(scan_notif->ebs_status));
 
                mvm->scan_status &= ~IWL_MVM_SCAN_SCHED;
                ieee80211_sched_scan_stopped(mvm->hw);
        } else if (mvm->scan_status & IWL_MVM_SCAN_REGULAR) {
                IWL_DEBUG_SCAN(mvm, "Regular scan %s, EBS status %s (FW)\n",
                               aborted ? "aborted" : "completed",
-                              ebs_successful ? "successful" : "failed");
+                              iwl_mvm_ebs_status_str(scan_notif->ebs_status));
 
                mvm->scan_status &= ~IWL_MVM_SCAN_REGULAR;
                ieee80211_scan_completed(mvm->hw,
@@ -397,7 +423,9 @@ int iwl_mvm_rx_scan_offload_complete_notif(struct iwl_mvm *mvm,
                iwl_mvm_unref(mvm, IWL_MVM_REF_SCAN);
        }
 
-       mvm->last_ebs_successful = ebs_successful;
+       mvm->last_ebs_successful =
+                       scan_notif->ebs_status == IWL_SCAN_EBS_SUCCESS ||
+                       scan_notif->ebs_status == IWL_SCAN_EBS_INACTIVE;
 
        return 0;
 }
@@ -463,8 +491,9 @@ static void iwl_scan_build_ssids(struct iwl_mvm_scan_params *params,
        }
 }
 
-int iwl_mvm_config_sched_scan_profiles(struct iwl_mvm *mvm,
-                                      struct cfg80211_sched_scan_request *req)
+static int
+iwl_mvm_config_sched_scan_profiles(struct iwl_mvm *mvm,
+                                  struct cfg80211_sched_scan_request *req)
 {
        struct iwl_scan_offload_profile *profile;
        struct iwl_scan_offload_profile_cfg *profile_cfg;
@@ -545,7 +574,7 @@ static bool iwl_mvm_scan_pass_all(struct iwl_mvm *mvm,
        return true;
 }
 
-static int iwl_mvm_send_scan_offload_abort(struct iwl_mvm *mvm)
+static int iwl_mvm_lmac_scan_abort(struct iwl_mvm *mvm)
 {
        int ret;
        struct iwl_host_cmd cmd = {
@@ -553,12 +582,6 @@ static int iwl_mvm_send_scan_offload_abort(struct iwl_mvm *mvm)
        };
        u32 status;
 
-       /* Exit instantly with error when device is not ready
-        * to receive scan abort command or it does not perform
-        * scheduled scan currently */
-       if (!mvm->scan_status)
-               return -EIO;
-
        ret = iwl_mvm_send_cmd_status(mvm, &cmd, &status);
        if (ret)
                return ret;
@@ -578,73 +601,6 @@ static int iwl_mvm_send_scan_offload_abort(struct iwl_mvm *mvm)
        return ret;
 }
 
-int iwl_mvm_scan_offload_stop(struct iwl_mvm *mvm, bool notify)
-{
-       int ret;
-       struct iwl_notification_wait wait_scan_done;
-       static const u8 scan_done_notif[] = { SCAN_OFFLOAD_COMPLETE, };
-       bool sched = !!(mvm->scan_status & IWL_MVM_SCAN_SCHED);
-
-       lockdep_assert_held(&mvm->mutex);
-
-       if (mvm->fw->ucode_capa.capa[0] & IWL_UCODE_TLV_CAPA_UMAC_SCAN)
-               return iwl_umac_scan_stop(mvm, IWL_UMAC_SCAN_UID_SCHED_SCAN,
-                                         notify);
-
-       /* FIXME: For now we only check if no scan is set here, since
-        * we only support LMAC in this flow and it doesn't support
-        * multiple scans.
-        */
-       if (!mvm->scan_status)
-               return 0;
-
-       if (iwl_mvm_is_radio_killed(mvm)) {
-               ret = 0;
-               goto out;
-       }
-
-       iwl_init_notification_wait(&mvm->notif_wait, &wait_scan_done,
-                                  scan_done_notif,
-                                  ARRAY_SIZE(scan_done_notif),
-                                  NULL, NULL);
-
-       ret = iwl_mvm_send_scan_offload_abort(mvm);
-       if (ret) {
-               IWL_DEBUG_SCAN(mvm, "Send stop %sscan failed %d\n",
-                              sched ? "offloaded " : "", ret);
-               iwl_remove_notification(&mvm->notif_wait, &wait_scan_done);
-               goto out;
-       }
-
-       IWL_DEBUG_SCAN(mvm, "Successfully sent stop %sscan\n",
-                      sched ? "scheduled " : "");
-
-       ret = iwl_wait_notification(&mvm->notif_wait, &wait_scan_done, 1 * HZ);
-out:
-       /* Clear the scan status so the next scan requests will
-        * succeed and mark the scan as stopping, so that the Rx
-        * handler doesn't do anything, as the scan was stopped from
-        * above. Since the rx handler won't do anything now, we have
-        * to release the scan reference here.
-        */
-       if (mvm->scan_status == IWL_MVM_SCAN_REGULAR)
-               iwl_mvm_unref(mvm, IWL_MVM_REF_SCAN);
-
-       if (sched) {
-               mvm->scan_status &= ~IWL_MVM_SCAN_SCHED;
-               mvm->scan_status |= IWL_MVM_SCAN_STOPPING_SCHED;
-               if (notify)
-                       ieee80211_sched_scan_stopped(mvm->hw);
-       } else {
-               mvm->scan_status &= ~IWL_MVM_SCAN_REGULAR;
-               mvm->scan_status |= IWL_MVM_SCAN_STOPPING_REGULAR;
-               if (notify)
-                       ieee80211_scan_completed(mvm->hw, true);
-       }
-
-       return ret;
-}
-
 static void iwl_mvm_scan_fill_tx_cmd(struct iwl_mvm *mvm,
                                     struct iwl_scan_req_tx_cmd *tx_cmd,
                                     bool no_cck)
@@ -775,6 +731,22 @@ iwl_mvm_build_scan_probe(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
        params->preq.common_data.len = cpu_to_le16(ies->common_ie_len);
 }
 
+static __le32 iwl_mvm_scan_priority(struct iwl_mvm *mvm,
+                                   enum iwl_scan_priority_ext prio)
+{
+       if (fw_has_api(&mvm->fw->ucode_capa,
+                      IWL_UCODE_TLV_API_EXT_SCAN_PRIORITY))
+               return cpu_to_le32(prio);
+
+       if (prio <= IWL_SCAN_PRIORITY_EXT_2)
+               return cpu_to_le32(IWL_SCAN_PRIORITY_LOW);
+
+       if (prio <= IWL_SCAN_PRIORITY_EXT_4)
+               return cpu_to_le32(IWL_SCAN_PRIORITY_MEDIUM);
+
+       return cpu_to_le32(IWL_SCAN_PRIORITY_HIGH);
+}
+
 static void iwl_mvm_scan_lmac_dwell(struct iwl_mvm *mvm,
                                    struct iwl_scan_req_lmac *cmd,
                                    struct iwl_mvm_scan_params *params)
@@ -786,7 +758,7 @@ static void iwl_mvm_scan_lmac_dwell(struct iwl_mvm *mvm,
                                params->dwell[IEEE80211_BAND_2GHZ].fragmented;
        cmd->max_out_time = cpu_to_le32(params->max_out_time);
        cmd->suspend_time = cpu_to_le32(params->suspend_time);
-       cmd->scan_prio = cpu_to_le32(IWL_SCAN_PRIORITY_HIGH);
+       cmd->scan_prio = iwl_mvm_scan_priority(mvm, IWL_SCAN_PRIORITY_EXT_6);
 }
 
 static inline bool iwl_mvm_scan_fits(struct iwl_mvm *mvm, int n_ssids,
@@ -801,19 +773,23 @@ static inline bool iwl_mvm_scan_fits(struct iwl_mvm *mvm, int n_ssids,
                 iwl_mvm_max_scan_ie_fw_cmd_room(mvm)));
 }
 
-static inline bool iwl_mvm_scan_use_ebs(struct iwl_mvm *mvm, int n_iterations)
+static inline bool iwl_mvm_scan_use_ebs(struct iwl_mvm *mvm,
+                                       struct ieee80211_vif *vif,
+                                       int n_iterations)
 {
        const struct iwl_ucode_capabilities *capa = &mvm->fw->ucode_capa;
 
        /* We can only use EBS if:
         *      1. the feature is supported;
         *      2. the last EBS was successful;
-        *      3. if only single scan, the single scan EBS API is supported.
+        *      3. if only single scan, the single scan EBS API is supported;
+        *      4. it's not a p2p find operation.
         */
        return ((capa->flags & IWL_UCODE_TLV_FLAGS_EBS_SUPPORT) &&
                mvm->last_ebs_successful &&
                (n_iterations > 1 ||
-                (capa->api[0] & IWL_UCODE_TLV_API_SINGLE_SCAN_EBS)));
+                fw_has_api(capa, IWL_UCODE_TLV_API_SINGLE_SCAN_EBS)) &&
+               vif->type != NL80211_IFTYPE_P2P_DEVICE);
 }
 
 static int iwl_mvm_scan_total_iterations(struct iwl_mvm_scan_params *params)
@@ -891,7 +867,7 @@ static int iwl_mvm_scan_lmac(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
        cmd->schedule[1].iterations = params->schedule[1].iterations;
        cmd->schedule[1].full_scan_mul = params->schedule[1].iterations;
 
-       if (iwl_mvm_scan_use_ebs(mvm, n_iterations)) {
+       if (iwl_mvm_scan_use_ebs(mvm, vif, n_iterations)) {
                cmd->channel_opt[0].flags =
                        cpu_to_le16(IWL_SCAN_CHANNEL_FLAG_EBS |
                                    IWL_SCAN_CHANNEL_FLAG_EBS_ACCURATE |
@@ -914,32 +890,6 @@ static int iwl_mvm_scan_lmac(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
        return 0;
 }
 
-int iwl_mvm_cancel_scan(struct iwl_mvm *mvm)
-{
-       if (mvm->fw->ucode_capa.capa[0] & IWL_UCODE_TLV_CAPA_UMAC_SCAN)
-               return iwl_umac_scan_stop(mvm, IWL_UMAC_SCAN_UID_REG_SCAN,
-                                         true);
-
-       if (!(mvm->scan_status & IWL_MVM_SCAN_REGULAR))
-               return 0;
-
-       if (iwl_mvm_is_radio_killed(mvm)) {
-               ieee80211_scan_completed(mvm->hw, true);
-               iwl_mvm_unref(mvm, IWL_MVM_REF_SCAN);
-               mvm->scan_status &= ~IWL_MVM_SCAN_REGULAR;
-               return 0;
-       }
-
-       return iwl_mvm_scan_offload_stop(mvm, true);
-}
-
-/* UMAC scan API */
-
-struct iwl_umac_scan_done {
-       struct iwl_mvm *mvm;
-       enum iwl_umac_scan_uid_type type;
-};
-
 static int rate_to_scan_rate_flag(unsigned int rate)
 {
        static const int rate_to_scan_rate[IWL_RATE_COUNT] = {
@@ -1048,68 +998,15 @@ int iwl_mvm_config_scan(struct iwl_mvm *mvm)
        return ret;
 }
 
-static int iwl_mvm_find_scan_uid(struct iwl_mvm *mvm, u32 uid)
+static int iwl_mvm_scan_uid_by_status(struct iwl_mvm *mvm, int status)
 {
        int i;
 
        for (i = 0; i < mvm->max_scans; i++)
-               if (mvm->scan_uid[i] == uid)
+               if (mvm->scan_uid_status[i] == status)
                        return i;
 
-       return i;
-}
-
-static int iwl_mvm_find_free_scan_uid(struct iwl_mvm *mvm)
-{
-       return iwl_mvm_find_scan_uid(mvm, 0);
-}
-
-static bool iwl_mvm_find_scan_type(struct iwl_mvm *mvm,
-                                  enum iwl_umac_scan_uid_type type)
-{
-       int i;
-
-       for (i = 0; i < mvm->max_scans; i++)
-               if (mvm->scan_uid[i] & type)
-                       return true;
-
-       return false;
-}
-
-static int iwl_mvm_find_first_scan(struct iwl_mvm *mvm,
-                                  enum iwl_umac_scan_uid_type type)
-{
-       int i;
-
-       for (i = 0; i < mvm->max_scans; i++)
-               if (mvm->scan_uid[i] & type)
-                       return i;
-
-       return i;
-}
-
-static u32 iwl_generate_scan_uid(struct iwl_mvm *mvm,
-                                enum iwl_umac_scan_uid_type type)
-{
-       u32 uid;
-
-       /* make sure exactly one bit is on in scan type */
-       WARN_ON(hweight8(type) != 1);
-
-       /*
-        * Make sure scan uids are unique. If one scan lasts long time while
-        * others are completing frequently, the seq number will wrap up and
-        * we may have more than one scan with the same uid.
-        */
-       do {
-               uid = type | (mvm->scan_seq_num <<
-                             IWL_UMAC_SCAN_UID_SEQ_OFFSET);
-               mvm->scan_seq_num++;
-       } while (iwl_mvm_find_scan_uid(mvm, uid) < mvm->max_scans);
-
-       IWL_DEBUG_SCAN(mvm, "Generated scan UID %u\n", uid);
-
-       return uid;
+       return -ENOENT;
 }
 
 static void iwl_mvm_scan_umac_dwell(struct iwl_mvm *mvm,
@@ -1123,12 +1020,15 @@ static void iwl_mvm_scan_umac_dwell(struct iwl_mvm *mvm,
                                params->dwell[IEEE80211_BAND_2GHZ].fragmented;
        cmd->max_out_time = cpu_to_le32(params->max_out_time);
        cmd->suspend_time = cpu_to_le32(params->suspend_time);
-       cmd->scan_priority = cpu_to_le32(IWL_SCAN_PRIORITY_HIGH);
+       cmd->scan_priority =
+               iwl_mvm_scan_priority(mvm, IWL_SCAN_PRIORITY_EXT_6);
 
        if (iwl_mvm_scan_total_iterations(params) == 0)
-               cmd->ooc_priority = cpu_to_le32(IWL_SCAN_PRIORITY_HIGH);
+               cmd->ooc_priority =
+                       iwl_mvm_scan_priority(mvm, IWL_SCAN_PRIORITY_EXT_6);
        else
-               cmd->ooc_priority = cpu_to_le32(IWL_SCAN_PRIORITY_LOW);
+               cmd->ooc_priority =
+                       iwl_mvm_scan_priority(mvm, IWL_SCAN_PRIORITY_EXT_2);
 }
 
 static void
@@ -1173,26 +1073,30 @@ static u32 iwl_mvm_scan_umac_flags(struct iwl_mvm *mvm,
        if (iwl_mvm_scan_total_iterations(params) > 1)
                flags |= IWL_UMAC_SCAN_GEN_FLAGS_PERIODIC;
 
+#ifdef CONFIG_IWLWIFI_DEBUGFS
+       if (mvm->scan_iter_notif_enabled)
+               flags |= IWL_UMAC_SCAN_GEN_FLAGS_ITER_COMPLETE;
+#endif
        return flags;
 }
 
 static int iwl_mvm_scan_umac(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
-                            struct iwl_mvm_scan_params *params)
+                            struct iwl_mvm_scan_params *params,
+                            int type)
 {
        struct iwl_scan_req_umac *cmd = mvm->scan_cmd;
        struct iwl_scan_req_umac_tail *sec_part = (void *)&cmd->data +
                sizeof(struct iwl_scan_channel_cfg_umac) *
                        mvm->fw->ucode_capa.n_scan_channels;
-       u32 uid;
+       int uid;
        u32 ssid_bitmap = 0;
        int n_iterations = iwl_mvm_scan_total_iterations(params);
-       int uid_idx;
 
        lockdep_assert_held(&mvm->mutex);
 
-       uid_idx = iwl_mvm_find_free_scan_uid(mvm);
-       if (uid_idx >= mvm->max_scans)
-               return -EBUSY;
+       uid = iwl_mvm_scan_uid_by_status(mvm, 0);
+       if (uid < 0)
+               return uid;
 
        memset(cmd, 0, ksize(cmd));
        cmd->hdr.size = cpu_to_le16(iwl_mvm_scan_size(mvm) -
@@ -1200,17 +1104,12 @@ static int iwl_mvm_scan_umac(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
 
        iwl_mvm_scan_umac_dwell(mvm, cmd, params);
 
-       if (n_iterations == 1)
-               uid = iwl_generate_scan_uid(mvm, IWL_UMAC_SCAN_UID_REG_SCAN);
-       else
-               uid = iwl_generate_scan_uid(mvm, IWL_UMAC_SCAN_UID_SCHED_SCAN);
+       mvm->scan_uid_status[uid] = type;
 
-       mvm->scan_uid[uid_idx] = uid;
        cmd->uid = cpu_to_le32(uid);
-
        cmd->general_flags = cpu_to_le32(iwl_mvm_scan_umac_flags(mvm, params));
 
-       if (iwl_mvm_scan_use_ebs(mvm, n_iterations))
+       if (iwl_mvm_scan_use_ebs(mvm, vif, n_iterations))
                cmd->channel_flags = IWL_SCAN_CHANNEL_FLAG_EBS |
                                     IWL_SCAN_CHANNEL_FLAG_EBS_ACCURATE |
                                     IWL_SCAN_CHANNEL_FLAG_CACHE_ADD;
@@ -1222,8 +1121,8 @@ static int iwl_mvm_scan_umac(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
        iwl_mvm_umac_scan_cfg_channels(mvm, params->channels,
                                       params->n_channels, ssid_bitmap, cmd);
 
-       /* With UMAC we can have only one schedule, so use the sum of
-        * the iterations (with a a maximum of 255).
+       /* With UMAC we use only one schedule for now, so use the sum
+        * of the iterations (with a a maximum of 255).
         */
        sec_part->schedule[0].iter_count =
                (n_iterations > 255) ? 255 : n_iterations;
@@ -1262,11 +1161,11 @@ static int iwl_mvm_check_running_scans(struct iwl_mvm *mvm, int type)
        case IWL_MVM_SCAN_REGULAR:
                if (mvm->scan_status & IWL_MVM_SCAN_REGULAR_MASK)
                        return -EBUSY;
-               return iwl_mvm_scan_offload_stop(mvm, true);
+               return iwl_mvm_scan_stop(mvm, IWL_MVM_SCAN_SCHED, true);
        case IWL_MVM_SCAN_SCHED:
                if (mvm->scan_status & IWL_MVM_SCAN_SCHED_MASK)
                        return -EBUSY;
-               return iwl_mvm_cancel_scan(mvm);
+               iwl_mvm_scan_stop(mvm, IWL_MVM_SCAN_REGULAR, true);
        case IWL_MVM_SCAN_NETDETECT:
                /* No need to stop anything for net-detect since the
                 * firmware is restarted anyway.  This way, any sched
@@ -1337,9 +1236,10 @@ int iwl_mvm_reg_scan_start(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
 
        iwl_mvm_build_scan_probe(mvm, vif, ies, &params);
 
-       if (mvm->fw->ucode_capa.capa[0] & IWL_UCODE_TLV_CAPA_UMAC_SCAN) {
+       if (fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_UMAC_SCAN)) {
                hcmd.id = SCAN_REQ_UMAC;
-               ret = iwl_mvm_scan_umac(mvm, vif, &params);
+               ret = iwl_mvm_scan_umac(mvm, vif, &params,
+                                       IWL_MVM_SCAN_REGULAR);
        } else {
                hcmd.id = SCAN_OFFLOAD_REQUEST_CMD;
                ret = iwl_mvm_scan_lmac(mvm, vif, &params);
@@ -1444,9 +1344,9 @@ int iwl_mvm_sched_scan_start(struct iwl_mvm *mvm,
 
        iwl_mvm_build_scan_probe(mvm, vif, ies, &params);
 
-       if (mvm->fw->ucode_capa.capa[0] & IWL_UCODE_TLV_CAPA_UMAC_SCAN) {
+       if (fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_UMAC_SCAN)) {
                hcmd.id = SCAN_REQ_UMAC;
-               ret = iwl_mvm_scan_umac(mvm, vif, &params);
+               ret = iwl_mvm_scan_umac(mvm, vif, &params, IWL_MVM_SCAN_SCHED);
        } else {
                hcmd.id = SCAN_OFFLOAD_REQUEST_CMD;
                ret = iwl_mvm_scan_lmac(mvm, vif, &params);
@@ -1478,144 +1378,118 @@ int iwl_mvm_rx_umac_scan_complete_notif(struct iwl_mvm *mvm,
        struct iwl_rx_packet *pkt = rxb_addr(rxb);
        struct iwl_umac_scan_complete *notif = (void *)pkt->data;
        u32 uid = __le32_to_cpu(notif->uid);
-       bool sched = !!(uid & IWL_UMAC_SCAN_UID_SCHED_SCAN);
-       int uid_idx = iwl_mvm_find_scan_uid(mvm, uid);
+       bool aborted = (notif->status == IWL_SCAN_OFFLOAD_ABORTED);
 
-       /*
-        * Scan uid may be set to zero in case of scan abort request from above.
-        */
-       if (uid_idx >= mvm->max_scans)
+       if (WARN_ON(!(mvm->scan_uid_status[uid] & mvm->scan_status)))
                return 0;
 
+       /* if the scan is already stopping, we don't need to notify mac80211 */
+       if (mvm->scan_uid_status[uid] == IWL_MVM_SCAN_REGULAR) {
+               ieee80211_scan_completed(mvm->hw, aborted);
+               iwl_mvm_unref(mvm, IWL_MVM_REF_SCAN);
+       } else if (mvm->scan_uid_status[uid] == IWL_MVM_SCAN_SCHED) {
+               ieee80211_sched_scan_stopped(mvm->hw);
+       }
+
+       mvm->scan_status &= ~mvm->scan_uid_status[uid];
+
        IWL_DEBUG_SCAN(mvm,
-                      "Scan completed, uid %u type %s, status %s, EBS status %s\n",
-                      uid, sched ? "sched" : "regular",
+                      "Scan completed, uid %u type %u, status %s, EBS status %s\n",
+                      uid, mvm->scan_uid_status[uid],
                       notif->status == IWL_SCAN_OFFLOAD_COMPLETED ?
                                "completed" : "aborted",
-                      notif->ebs_status == IWL_SCAN_EBS_SUCCESS ?
-                               "success" : "failed");
+                      iwl_mvm_ebs_status_str(notif->ebs_status));
 
-       if (notif->ebs_status)
+       if (notif->ebs_status != IWL_SCAN_EBS_SUCCESS &&
+           notif->ebs_status != IWL_SCAN_EBS_INACTIVE)
                mvm->last_ebs_successful = false;
 
-       mvm->scan_uid[uid_idx] = 0;
-
-       if (!sched) {
-               ieee80211_scan_completed(mvm->hw,
-                                        notif->status ==
-                                               IWL_SCAN_OFFLOAD_ABORTED);
-               iwl_mvm_unref(mvm, IWL_MVM_REF_SCAN);
-       } else if (!iwl_mvm_find_scan_type(mvm, IWL_UMAC_SCAN_UID_SCHED_SCAN)) {
-               ieee80211_sched_scan_stopped(mvm->hw);
-       } else {
-               IWL_DEBUG_SCAN(mvm, "Another sched scan is running\n");
-       }
+       mvm->scan_uid_status[uid] = 0;
 
        return 0;
 }
 
-static bool iwl_scan_umac_done_check(struct iwl_notif_wait_data *notif_wait,
-                                    struct iwl_rx_packet *pkt, void *data)
+int iwl_mvm_rx_umac_scan_iter_complete_notif(struct iwl_mvm *mvm,
+                                            struct iwl_rx_cmd_buffer *rxb,
+                                            struct iwl_device_cmd *cmd)
 {
-       struct iwl_umac_scan_done *scan_done = data;
-       struct iwl_umac_scan_complete *notif = (void *)pkt->data;
-       u32 uid = __le32_to_cpu(notif->uid);
-       int uid_idx = iwl_mvm_find_scan_uid(scan_done->mvm, uid);
-
-       if (WARN_ON(pkt->hdr.cmd != SCAN_COMPLETE_UMAC))
-               return false;
-
-       if (uid_idx >= scan_done->mvm->max_scans)
-               return false;
-
-       /*
-        * Clear scan uid of scans that was aborted from above and completed
-        * in FW so the RX handler does nothing. Set last_ebs_successful here if
-        * needed.
-        */
-       scan_done->mvm->scan_uid[uid_idx] = 0;
-
-       if (notif->ebs_status)
-               scan_done->mvm->last_ebs_successful = false;
+       struct iwl_rx_packet *pkt = rxb_addr(rxb);
+       struct iwl_umac_scan_iter_complete_notif *notif = (void *)pkt->data;
+       u8 buf[256];
 
-       return !iwl_mvm_find_scan_type(scan_done->mvm, scan_done->type);
+       IWL_DEBUG_SCAN(mvm,
+                      "UMAC Scan iteration complete: status=0x%x scanned_channels=%d channels list: %s\n",
+                      notif->status, notif->scanned_channels,
+                      iwl_mvm_dump_channel_list(notif->results,
+                                                notif->scanned_channels, buf,
+                                                sizeof(buf)));
+       return 0;
 }
 
-static int iwl_umac_scan_abort_one(struct iwl_mvm *mvm, u32 uid)
+static int iwl_mvm_umac_scan_abort(struct iwl_mvm *mvm, int type)
 {
        struct iwl_umac_scan_abort cmd = {
                .hdr.size = cpu_to_le16(sizeof(struct iwl_umac_scan_abort) -
                                        sizeof(struct iwl_mvm_umac_cmd_hdr)),
-               .uid = cpu_to_le32(uid),
        };
+       int uid, ret;
 
        lockdep_assert_held(&mvm->mutex);
 
+       /* We should always get a valid index here, because we already
+        * checked that this type of scan was running in the generic
+        * code.
+        */
+       uid = iwl_mvm_scan_uid_by_status(mvm, type);
+       if (WARN_ON_ONCE(uid < 0))
+               return uid;
+
+       cmd.uid = cpu_to_le32(uid);
+
        IWL_DEBUG_SCAN(mvm, "Sending scan abort, uid %u\n", uid);
 
-       return iwl_mvm_send_cmd_pdu(mvm, SCAN_ABORT_UMAC, 0, sizeof(cmd), &cmd);
+       ret = iwl_mvm_send_cmd_pdu(mvm, SCAN_ABORT_UMAC, 0, sizeof(cmd), &cmd);
+       if (!ret)
+               mvm->scan_uid_status[uid] = type << IWL_MVM_SCAN_STOPPING_SHIFT;
+
+       return ret;
 }
 
-static int iwl_umac_scan_stop(struct iwl_mvm *mvm,
-                             enum iwl_umac_scan_uid_type type, bool notify)
+static int iwl_mvm_scan_stop_wait(struct iwl_mvm *mvm, int type)
 {
        struct iwl_notification_wait wait_scan_done;
-       static const u8 scan_done_notif[] = { SCAN_COMPLETE_UMAC, };
-       struct iwl_umac_scan_done scan_done = {
-               .mvm = mvm,
-               .type = type,
-       };
-       int i, ret = -EIO;
+       static const u8 scan_done_notif[] = { SCAN_COMPLETE_UMAC,
+                                             SCAN_OFFLOAD_COMPLETE, };
+       int ret;
+
+       lockdep_assert_held(&mvm->mutex);
 
        iwl_init_notification_wait(&mvm->notif_wait, &wait_scan_done,
                                   scan_done_notif,
                                   ARRAY_SIZE(scan_done_notif),
-                                  iwl_scan_umac_done_check, &scan_done);
+                                  NULL, NULL);
 
        IWL_DEBUG_SCAN(mvm, "Preparing to stop scan, type %x\n", type);
 
-       for (i = 0; i < mvm->max_scans; i++) {
-               if (mvm->scan_uid[i] & type) {
-                       int err;
-
-                       if (iwl_mvm_is_radio_killed(mvm) &&
-                           (type & IWL_UMAC_SCAN_UID_REG_SCAN)) {
-                               ieee80211_scan_completed(mvm->hw, true);
-                               iwl_mvm_unref(mvm, IWL_MVM_REF_SCAN);
-                               break;
-                       }
-
-                       err = iwl_umac_scan_abort_one(mvm, mvm->scan_uid[i]);
-                       if (!err)
-                               ret = 0;
-               }
-       }
+       if (fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_UMAC_SCAN))
+               ret = iwl_mvm_umac_scan_abort(mvm, type);
+       else
+               ret = iwl_mvm_lmac_scan_abort(mvm);
 
        if (ret) {
-               IWL_DEBUG_SCAN(mvm, "Couldn't stop scan\n");
+               IWL_DEBUG_SCAN(mvm, "couldn't stop scan type %d\n", type);
                iwl_remove_notification(&mvm->notif_wait, &wait_scan_done);
                return ret;
        }
 
        ret = iwl_wait_notification(&mvm->notif_wait, &wait_scan_done, 1 * HZ);
-       if (ret)
-               return ret;
-
-       if (notify) {
-               if (type & IWL_UMAC_SCAN_UID_SCHED_SCAN)
-                       ieee80211_sched_scan_stopped(mvm->hw);
-               if (type & IWL_UMAC_SCAN_UID_REG_SCAN) {
-                       ieee80211_scan_completed(mvm->hw, true);
-                       iwl_mvm_unref(mvm, IWL_MVM_REF_SCAN);
-               }
-       }
 
        return ret;
 }
 
 int iwl_mvm_scan_size(struct iwl_mvm *mvm)
 {
-       if (mvm->fw->ucode_capa.capa[0] & IWL_UCODE_TLV_CAPA_UMAC_SCAN)
+       if (fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_UMAC_SCAN))
                return sizeof(struct iwl_scan_req_umac) +
                        sizeof(struct iwl_scan_channel_cfg_umac) *
                                mvm->fw->ucode_capa.n_scan_channels +
@@ -1633,19 +1507,18 @@ int iwl_mvm_scan_size(struct iwl_mvm *mvm)
  */
 void iwl_mvm_report_scan_aborted(struct iwl_mvm *mvm)
 {
-       if (mvm->fw->ucode_capa.capa[0] & IWL_UCODE_TLV_CAPA_UMAC_SCAN) {
-               u32 uid, i;
+       if (fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_UMAC_SCAN)) {
+               int uid, i;
 
-               uid = iwl_mvm_find_first_scan(mvm, IWL_UMAC_SCAN_UID_REG_SCAN);
-               if (uid < mvm->max_scans) {
+               uid = iwl_mvm_scan_uid_by_status(mvm, IWL_MVM_SCAN_REGULAR);
+               if (uid >= 0) {
                        ieee80211_scan_completed(mvm->hw, true);
-                       mvm->scan_uid[uid] = 0;
+                       mvm->scan_uid_status[uid] = 0;
                }
-               uid = iwl_mvm_find_first_scan(mvm,
-                                             IWL_UMAC_SCAN_UID_SCHED_SCAN);
-               if (uid < mvm->max_scans && !mvm->restart_fw) {
+               uid = iwl_mvm_scan_uid_by_status(mvm, IWL_MVM_SCAN_SCHED);
+               if (uid >= 0 && !mvm->restart_fw) {
                        ieee80211_sched_scan_stopped(mvm->hw);
-                       mvm->scan_uid[uid] = 0;
+                       mvm->scan_uid_status[uid] = 0;
                }
 
                /* We shouldn't have any UIDs still set.  Loop over all the
@@ -1653,10 +1526,10 @@ void iwl_mvm_report_scan_aborted(struct iwl_mvm *mvm)
                 * any is found.
                 */
                for (i = 0; i < mvm->max_scans; i++) {
-                       if (WARN_ONCE(mvm->scan_uid[i],
-                                     "UMAC scan UID %d was not cleaned\n",
-                                     mvm->scan_uid[i]))
-                               mvm->scan_uid[i] = 0;
+                       if (WARN_ONCE(mvm->scan_uid_status[i],
+                                     "UMAC scan UID %d status was not cleaned\n",
+                                     i))
+                               mvm->scan_uid_status[i] = 0;
                }
        } else {
                if (mvm->scan_status & IWL_MVM_SCAN_REGULAR)
@@ -1670,3 +1543,40 @@ void iwl_mvm_report_scan_aborted(struct iwl_mvm *mvm)
                        ieee80211_sched_scan_stopped(mvm->hw);
        }
 }
+
+int iwl_mvm_scan_stop(struct iwl_mvm *mvm, int type, bool notify)
+{
+       int ret;
+
+       if (!(mvm->scan_status & type))
+               return 0;
+
+       if (iwl_mvm_is_radio_killed(mvm)) {
+               ret = 0;
+               goto out;
+       }
+
+       ret = iwl_mvm_scan_stop_wait(mvm, type);
+       if (!ret)
+               mvm->scan_status |= type << IWL_MVM_SCAN_STOPPING_SHIFT;
+out:
+       /* Clear the scan status so the next scan requests will
+        * succeed and mark the scan as stopping, so that the Rx
+        * handler doesn't do anything, as the scan was stopped from
+        * above.
+        */
+       mvm->scan_status &= ~type;
+
+       if (type == IWL_MVM_SCAN_REGULAR) {
+               /* Since the rx handler won't do anything now, we have
+                * to release the scan reference here.
+                */
+               iwl_mvm_unref(mvm, IWL_MVM_REF_SCAN);
+               if (notify)
+                       ieee80211_scan_completed(mvm->hw, true);
+       } else if (notify) {
+               ieee80211_sched_scan_stopped(mvm->hw);
+       }
+
+       return ret;
+}
index 1845b79..d68dc69 100644 (file)
@@ -5,8 +5,8 @@
  *
  * GPL LICENSE SUMMARY
  *
- * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
- * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
+ * Copyright(c) 2012 - 2015 Intel Corporation. All rights reserved.
+ * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of version 2 of the GNU General Public License as
@@ -31,8 +31,8 @@
  *
  * BSD LICENSE
  *
- * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
- * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
+ * Copyright(c) 2012 - 2015 Intel Corporation. All rights reserved.
+ * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
@@ -1000,13 +1000,13 @@ int iwl_mvm_sta_tx_agg_oper(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
 
        fifo = iwl_mvm_ac_to_tx_fifo[tid_to_mac80211_ac[tid]];
 
+       iwl_mvm_enable_agg_txq(mvm, queue, fifo, mvmsta->sta_id, tid,
+                              buf_size, ssn, wdg_timeout);
+
        ret = iwl_mvm_sta_tx_agg(mvm, sta, tid, queue, true);
        if (ret)
                return -EIO;
 
-       iwl_mvm_enable_agg_txq(mvm, queue, fifo, mvmsta->sta_id, tid,
-                              buf_size, ssn, wdg_timeout);
-
        /*
         * Even though in theory the peer could have different
         * aggregation reorder buffer sizes for different sessions,
index fd7b0d3..d24b6a8 100644 (file)
@@ -6,7 +6,7 @@
  * GPL LICENSE SUMMARY
  *
  * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
- * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
+ * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of version 2 of the GNU General Public License as
@@ -32,7 +32,7 @@
  * BSD LICENSE
  *
  * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
- * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
+ * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
@@ -108,12 +108,14 @@ void iwl_mvm_roc_done_wk(struct work_struct *wk)
         * in the case that the time event actually completed in the firmware
         * (which is handled in iwl_mvm_te_handle_notif).
         */
-       if (test_and_clear_bit(IWL_MVM_STATUS_ROC_RUNNING, &mvm->status))
+       if (test_and_clear_bit(IWL_MVM_STATUS_ROC_RUNNING, &mvm->status)) {
                queues |= BIT(IWL_MVM_OFFCHANNEL_QUEUE);
-       if (test_and_clear_bit(IWL_MVM_STATUS_ROC_AUX_RUNNING, &mvm->status))
+               iwl_mvm_unref(mvm, IWL_MVM_REF_ROC);
+       }
+       if (test_and_clear_bit(IWL_MVM_STATUS_ROC_AUX_RUNNING, &mvm->status)) {
                queues |= BIT(mvm->aux_queue);
-
-       iwl_mvm_unref(mvm, IWL_MVM_REF_ROC);
+               iwl_mvm_unref(mvm, IWL_MVM_REF_ROC_AUX);
+       }
 
        synchronize_net();
 
@@ -393,6 +395,7 @@ static int iwl_mvm_aux_roc_te_handle_notif(struct iwl_mvm *mvm,
        } else if (le32_to_cpu(notif->action) == TE_V2_NOTIF_HOST_EVENT_START) {
                set_bit(IWL_MVM_STATUS_ROC_AUX_RUNNING, &mvm->status);
                te_data->running = true;
+               iwl_mvm_ref(mvm, IWL_MVM_REF_ROC_AUX);
                ieee80211_ready_on_channel(mvm->hw); /* Start TE */
        } else {
                IWL_DEBUG_TE(mvm,
@@ -794,13 +797,12 @@ int iwl_mvm_start_p2p_roc(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
 
 void iwl_mvm_stop_roc(struct iwl_mvm *mvm)
 {
-       struct iwl_mvm_vif *mvmvif;
+       struct iwl_mvm_vif *mvmvif = NULL;
        struct iwl_mvm_time_event_data *te_data;
        bool is_p2p = false;
 
        lockdep_assert_held(&mvm->mutex);
 
-       mvmvif = NULL;
        spin_lock_bh(&mvm->time_event_lock);
 
        /*
@@ -818,17 +820,14 @@ void iwl_mvm_stop_roc(struct iwl_mvm *mvm)
                }
        }
 
-       /*
-        * Iterate over the list of aux roc time events and find the time
-        * event that is associated with a BSS interface.
-        * This assumes that a BSS interface can have only a single time
-        * event at any given time and this time event corresponds to a ROC
-        * request
+       /* There can only be at most one AUX ROC time event, we just use the
+        * list to simplify/unify code. Remove it if it exists.
         */
-       list_for_each_entry(te_data, &mvm->aux_roc_te_list, list) {
+       te_data = list_first_entry_or_null(&mvm->aux_roc_te_list,
+                                          struct iwl_mvm_time_event_data,
+                                          list);
+       if (te_data)
                mvmvif = iwl_mvm_vif_from_mac80211(te_data->vif);
-               goto remove_te;
-       }
 
 remove_te:
        spin_unlock_bh(&mvm->time_event_lock);
index ef32e17..7ba7a11 100644 (file)
@@ -6,7 +6,7 @@
  * GPL LICENSE SUMMARY
  *
  * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
- * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
+ * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of version 2 of the GNU General Public License as
@@ -32,7 +32,7 @@
  * BSD LICENSE
  *
  * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
- * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
+ * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
 #include "mvm.h"
 #include "sta.h"
 
+static void
+iwl_mvm_bar_check_trigger(struct iwl_mvm *mvm, const u8 *addr,
+                         u16 tid, u16 ssn)
+{
+       struct iwl_fw_dbg_trigger_tlv *trig;
+       struct iwl_fw_dbg_trigger_ba *ba_trig;
+
+       if (!iwl_fw_dbg_trigger_enabled(mvm->fw, FW_DBG_TRIGGER_BA))
+               return;
+
+       trig = iwl_fw_dbg_get_trigger(mvm->fw, FW_DBG_TRIGGER_BA);
+       ba_trig = (void *)trig->data;
+
+       if (!iwl_fw_dbg_trigger_check_stop(mvm, NULL, trig))
+               return;
+
+       if (!(le16_to_cpu(ba_trig->tx_bar) & BIT(tid)))
+               return;
+
+       iwl_mvm_fw_dbg_collect_trig(mvm, trig,
+                                   "BAR sent to %pM, tid %d, ssn %d",
+                                   addr, tid, ssn);
+}
+
 /*
  * Sets most of the Tx cmd's fields
  */
@@ -101,12 +125,15 @@ void iwl_mvm_set_tx_cmd(struct iwl_mvm *mvm, struct sk_buff *skb,
        } else if (ieee80211_is_back_req(fc)) {
                struct ieee80211_bar *bar = (void *)skb->data;
                u16 control = le16_to_cpu(bar->control);
+               u16 ssn = le16_to_cpu(bar->start_seq_num);
 
                tx_flags |= TX_CMD_FLG_ACK | TX_CMD_FLG_BAR;
                tx_cmd->tid_tspec = (control &
                                     IEEE80211_BAR_CTRL_TID_INFO_MASK) >>
                        IEEE80211_BAR_CTRL_TID_INFO_SHIFT;
                WARN_ON_ONCE(tx_cmd->tid_tspec >= IWL_MAX_TID_COUNT);
+               iwl_mvm_bar_check_trigger(mvm, bar->ra, tx_cmd->tid_tspec,
+                                         ssn);
        } else {
                tx_cmd->tid_tspec = IWL_TID_NON_QOS;
                if (info->flags & IEEE80211_TX_CTL_ASSIGN_SEQ)
@@ -144,8 +171,8 @@ void iwl_mvm_set_tx_cmd(struct iwl_mvm *mvm, struct sk_buff *skb,
            !is_multicast_ether_addr(ieee80211_get_DA(hdr)))
                tx_flags |= TX_CMD_FLG_PROT_REQUIRE;
 
-       if ((mvm->fw->ucode_capa.capa[0] &
-            IWL_UCODE_TLV_CAPA_TXPOWER_INSERTION_SUPPORT) &&
+       if (fw_has_capa(&mvm->fw->ucode_capa,
+                       IWL_UCODE_TLV_CAPA_TXPOWER_INSERTION_SUPPORT) &&
            ieee80211_action_contains_tpc(skb))
                tx_flags |= TX_CMD_FLG_WRITE_TX_POWER;
 
index bc55a8b..03f8e06 100644 (file)
@@ -584,7 +584,7 @@ void iwl_mvm_dump_nic_error_log(struct iwl_mvm *mvm)
        struct iwl_error_event_table table;
        u32 base;
 
-       if (!(mvm->fw->ucode_capa.api[0] & IWL_UCODE_TLV_API_NEW_VERSION)) {
+       if (!fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_NEW_VERSION)) {
                iwl_mvm_dump_nic_error_log_old(mvm);
                return;
        }
index b185697..2ed1e4d 100644 (file)
@@ -6,7 +6,7 @@
  * GPL LICENSE SUMMARY
  *
  * Copyright(c) 2007 - 2014 Intel Corporation. All rights reserved.
- * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
+ * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of version 2 of the GNU General Public License as
@@ -32,7 +32,7 @@
  * BSD LICENSE
  *
  * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved.
- * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
+ * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
@@ -629,7 +629,18 @@ static int iwl_pci_resume(struct device *device)
        if (!trans->op_mode)
                return 0;
 
-       iwl_enable_rfkill_int(trans);
+       /*
+        * On suspend, ict is disabled, and the interrupt mask
+        * gets cleared. Reconfigure them both in case of d0i3
+        * image. Otherwise, only enable rfkill interrupt (in
+        * order to keep track of the rfkill status)
+        */
+       if (trans->wowlan_d0i3) {
+               iwl_pcie_reset_ict(trans);
+               iwl_enable_interrupts(trans);
+       } else {
+               iwl_enable_rfkill_int(trans);
+       }
 
        hw_rfkill = iwl_is_rfkill_set(trans);
        iwl_trans_pcie_rf_kill(trans, hw_rfkill);
index 01996c9..9a3dae6 100644 (file)
 #include "iwl-io.h"
 #include "iwl-op-mode.h"
 
+/*
+ * RX related structures and functions
+ */
+#define RX_NUM_QUEUES 1
+#define RX_POST_REQ_ALLOC 2
+#define RX_CLAIM_REQ_ALLOC 8
+#define RX_POOL_SIZE ((RX_CLAIM_REQ_ALLOC - RX_POST_REQ_ALLOC) * RX_NUM_QUEUES)
+#define RX_LOW_WATERMARK 8
+
 struct iwl_host_cmd;
 
 /*This file includes the declaration that are internal to the
@@ -77,29 +86,29 @@ struct isr_statistics {
  * struct iwl_rxq - Rx queue
  * @bd: driver's pointer to buffer of receive buffer descriptors (rbd)
  * @bd_dma: bus address of buffer of receive buffer descriptors (rbd)
- * @pool:
- * @queue:
  * @read: Shared index to newest available Rx buffer
  * @write: Shared index to oldest written Rx packet
  * @free_count: Number of pre-allocated buffers in rx_free
+ * @used_count: Number of RBDs handled to allocator to use for allocation
  * @write_actual:
- * @rx_free: list of free SKBs for use
- * @rx_used: List of Rx buffers with no SKB
+ * @rx_free: list of RBDs with allocated RB ready for use
+ * @rx_used: list of RBDs with no RB attached
  * @need_update: flag to indicate we need to update read/write index
  * @rb_stts: driver's pointer to receive buffer status
  * @rb_stts_dma: bus address of receive buffer status
  * @lock:
+ * @pool: initial pool of iwl_rx_mem_buffer for the queue
+ * @queue: actual rx queue
  *
  * NOTE:  rx_free and rx_used are used as a FIFO for iwl_rx_mem_buffers
  */
 struct iwl_rxq {
        __le32 *bd;
        dma_addr_t bd_dma;
-       struct iwl_rx_mem_buffer pool[RX_QUEUE_SIZE + RX_FREE_BUFFERS];
-       struct iwl_rx_mem_buffer *queue[RX_QUEUE_SIZE];
        u32 read;
        u32 write;
        u32 free_count;
+       u32 used_count;
        u32 write_actual;
        struct list_head rx_free;
        struct list_head rx_used;
@@ -107,6 +116,32 @@ struct iwl_rxq {
        struct iwl_rb_status *rb_stts;
        dma_addr_t rb_stts_dma;
        spinlock_t lock;
+       struct iwl_rx_mem_buffer pool[RX_QUEUE_SIZE];
+       struct iwl_rx_mem_buffer *queue[RX_QUEUE_SIZE];
+};
+
+/**
+ * struct iwl_rb_allocator - Rx allocator
+ * @pool: initial pool of allocator
+ * @req_pending: number of requests the allcator had not processed yet
+ * @req_ready: number of requests honored and ready for claiming
+ * @rbd_allocated: RBDs with pages allocated and ready to be handled to
+ *     the queue. This is a list of &struct iwl_rx_mem_buffer
+ * @rbd_empty: RBDs with no page attached for allocator use. This is a list
+ *     of &struct iwl_rx_mem_buffer
+ * @lock: protects the rbd_allocated and rbd_empty lists
+ * @alloc_wq: work queue for background calls
+ * @rx_alloc: work struct for background calls
+ */
+struct iwl_rb_allocator {
+       struct iwl_rx_mem_buffer pool[RX_POOL_SIZE];
+       atomic_t req_pending;
+       atomic_t req_ready;
+       struct list_head rbd_allocated;
+       struct list_head rbd_empty;
+       spinlock_t lock;
+       struct workqueue_struct *alloc_wq;
+       struct work_struct rx_alloc;
 };
 
 struct iwl_dma_ptr {
@@ -250,7 +285,7 @@ iwl_pcie_get_scratchbuf_dma(struct iwl_txq *txq, int idx)
 /**
  * struct iwl_trans_pcie - PCIe transport specific data
  * @rxq: all the RX queue data
- * @rx_replenish: work that will be called when buffers need to be allocated
+ * @rba: allocator for RX replenishing
  * @drv - pointer to iwl_drv
  * @trans: pointer to the generic transport area
  * @scd_base_addr: scheduler sram base address in SRAM
@@ -273,7 +308,7 @@ iwl_pcie_get_scratchbuf_dma(struct iwl_txq *txq, int idx)
  */
 struct iwl_trans_pcie {
        struct iwl_rxq rxq;
-       struct work_struct rx_replenish;
+       struct iwl_rb_allocator rba;
        struct iwl_trans *trans;
        struct iwl_drv *drv;
 
index 7ff69c6..a3fbaa0 100644 (file)
@@ -1,7 +1,7 @@
 /******************************************************************************
  *
  * Copyright(c) 2003 - 2014 Intel Corporation. All rights reserved.
- * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
+ * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
  *
  * Portions of this file are derived from the ipw3945 project, as well
  * as portions of the ieee80211 subsystem header files.
  * resets the Rx queue buffers with new memory.
  *
  * The management in the driver is as follows:
- * + A list of pre-allocated SKBs is stored in iwl->rxq->rx_free.  When
- *   iwl->rxq->free_count drops to or below RX_LOW_WATERMARK, work is scheduled
- *   to replenish the iwl->rxq->rx_free.
- * + In iwl_pcie_rx_replenish (scheduled) if 'processed' != 'read' then the
- *   iwl->rxq is replenished and the READ INDEX is updated (updating the
- *   'processed' and 'read' driver indexes as well)
+ * + A list of pre-allocated RBDs is stored in iwl->rxq->rx_free.
+ *   When the interrupt handler is called, the request is processed.
+ *   The page is either stolen - transferred to the upper layer
+ *   or reused - added immediately to the iwl->rxq->rx_free list.
+ * + When the page is stolen - the driver updates the matching queue's used
+ *   count, detaches the RBD and transfers it to the queue used list.
+ *   When there are two used RBDs - they are transferred to the allocator empty
+ *   list. Work is then scheduled for the allocator to start allocating
+ *   eight buffers.
+ *   When there are another 6 used RBDs - they are transferred to the allocator
+ *   empty list and the driver tries to claim the pre-allocated buffers and
+ *   add them to iwl->rxq->rx_free. If it fails - it continues to claim them
+ *   until ready.
+ *   When there are 8+ buffers in the free list - either from allocation or from
+ *   8 reused unstolen pages - restock is called to update the FW and indexes.
+ * + In order to make sure the allocator always has RBDs to use for allocation
+ *   the allocator has initial pool in the size of num_queues*(8-2) - the
+ *   maximum missing RBDs per allocation request (request posted with 2
+ *    empty RBDs, there is no guarantee when the other 6 RBDs are supplied).
+ *   The queues supplies the recycle of the rest of the RBDs.
  * + A received packet is processed and handed to the kernel network stack,
  *   detached from the iwl->rxq.  The driver 'processed' index is updated.
- * + The Host/Firmware iwl->rxq is replenished at irq thread time from the
- *   rx_free list. If there are no allocated buffers in iwl->rxq->rx_free,
+ * + If there are no allocated buffers in iwl->rxq->rx_free,
  *   the READ INDEX is not incremented and iwl->status(RX_STALLED) is set.
  *   If there were enough free buffers and RX_STALLED is set it is cleared.
  *
  *
  * iwl_rxq_alloc()            Allocates rx_free
  * iwl_pcie_rx_replenish()    Replenishes rx_free list from rx_used, and calls
- *                            iwl_pcie_rxq_restock
+ *                            iwl_pcie_rxq_restock.
+ *                            Used only during initialization.
  * iwl_pcie_rxq_restock()     Moves available buffers from rx_free into Rx
  *                            queue, updates firmware pointers, and updates
- *                            the WRITE index.  If insufficient rx_free buffers
- *                            are available, schedules iwl_pcie_rx_replenish
+ *                            the WRITE index.
+ * iwl_pcie_rx_allocator()     Background work for allocating pages.
  *
  * -- enable interrupts --
  * ISR - iwl_rx()             Detach iwl_rx_mem_buffers from pool up to the
  *                            READ INDEX, detaching the SKB from the pool.
  *                            Moves the packet buffer from queue to rx_used.
+ *                            Posts and claims requests to the allocator.
  *                            Calls iwl_pcie_rxq_restock to refill any empty
  *                            slots.
+ *
+ * RBD life-cycle:
+ *
+ * Init:
+ * rxq.pool -> rxq.rx_used -> rxq.rx_free -> rxq.queue
+ *
+ * Regular Receive interrupt:
+ * Page Stolen:
+ * rxq.queue -> rxq.rx_used -> allocator.rbd_empty ->
+ * allocator.rbd_allocated -> rxq.rx_free -> rxq.queue
+ * Page not Stolen:
+ * rxq.queue -> rxq.rx_free -> rxq.queue
  * ...
  *
  */
@@ -240,10 +267,6 @@ static void iwl_pcie_rxq_restock(struct iwl_trans *trans)
                rxq->free_count--;
        }
        spin_unlock(&rxq->lock);
-       /* If the pre-allocated buffer pool is dropping low, schedule to
-        * refill it */
-       if (rxq->free_count <= RX_LOW_WATERMARK)
-               schedule_work(&trans_pcie->rx_replenish);
 
        /* If we've added more space for the firmware to place data, tell it.
         * Increment device's write pointer in multiples of 8. */
@@ -254,6 +277,44 @@ static void iwl_pcie_rxq_restock(struct iwl_trans *trans)
        }
 }
 
+/*
+ * iwl_pcie_rx_alloc_page - allocates and returns a page.
+ *
+ */
+static struct page *iwl_pcie_rx_alloc_page(struct iwl_trans *trans)
+{
+       struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+       struct iwl_rxq *rxq = &trans_pcie->rxq;
+       struct page *page;
+       gfp_t gfp_mask = GFP_KERNEL;
+
+       if (rxq->free_count > RX_LOW_WATERMARK)
+               gfp_mask |= __GFP_NOWARN;
+
+       if (trans_pcie->rx_page_order > 0)
+               gfp_mask |= __GFP_COMP;
+
+       /* Alloc a new receive buffer */
+       page = alloc_pages(gfp_mask, trans_pcie->rx_page_order);
+       if (!page) {
+               if (net_ratelimit())
+                       IWL_DEBUG_INFO(trans, "alloc_pages failed, order: %d\n",
+                                      trans_pcie->rx_page_order);
+               /* Issue an error if the hardware has consumed more than half
+                * of its free buffer list and we don't have enough
+                * pre-allocated buffers.
+`               */
+               if (rxq->free_count <= RX_LOW_WATERMARK &&
+                   iwl_rxq_space(rxq) > (RX_QUEUE_SIZE / 2) &&
+                   net_ratelimit())
+                       IWL_CRIT(trans,
+                                "Failed to alloc_pages with GFP_KERNEL. Only %u free buffers remaining.\n",
+                                rxq->free_count);
+               return NULL;
+       }
+       return page;
+}
+
 /*
  * iwl_pcie_rxq_alloc_rbs - allocate a page for each used RBD
  *
@@ -263,13 +324,12 @@ static void iwl_pcie_rxq_restock(struct iwl_trans *trans)
  * iwl_pcie_rxq_restock. The latter function will update the HW to use the newly
  * allocated buffers.
  */
-static void iwl_pcie_rxq_alloc_rbs(struct iwl_trans *trans, gfp_t priority)
+static void iwl_pcie_rxq_alloc_rbs(struct iwl_trans *trans)
 {
        struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
        struct iwl_rxq *rxq = &trans_pcie->rxq;
        struct iwl_rx_mem_buffer *rxb;
        struct page *page;
-       gfp_t gfp_mask = priority;
 
        while (1) {
                spin_lock(&rxq->lock);
@@ -279,32 +339,10 @@ static void iwl_pcie_rxq_alloc_rbs(struct iwl_trans *trans, gfp_t priority)
                }
                spin_unlock(&rxq->lock);
 
-               if (rxq->free_count > RX_LOW_WATERMARK)
-                       gfp_mask |= __GFP_NOWARN;
-
-               if (trans_pcie->rx_page_order > 0)
-                       gfp_mask |= __GFP_COMP;
-
                /* Alloc a new receive buffer */
-               page = alloc_pages(gfp_mask, trans_pcie->rx_page_order);
-               if (!page) {
-                       if (net_ratelimit())
-                               IWL_DEBUG_INFO(trans, "alloc_pages failed, "
-                                          "order: %d\n",
-                                          trans_pcie->rx_page_order);
-
-                       if ((rxq->free_count <= RX_LOW_WATERMARK) &&
-                           net_ratelimit())
-                               IWL_CRIT(trans, "Failed to alloc_pages with %s."
-                                        "Only %u free buffers remaining.\n",
-                                        priority == GFP_ATOMIC ?
-                                        "GFP_ATOMIC" : "GFP_KERNEL",
-                                        rxq->free_count);
-                       /* We don't reschedule replenish work here -- we will
-                        * call the restock method and if it still needs
-                        * more buffers it will schedule replenish */
+               page = iwl_pcie_rx_alloc_page(trans);
+               if (!page)
                        return;
-               }
 
                spin_lock(&rxq->lock);
 
@@ -355,7 +393,7 @@ static void iwl_pcie_rxq_free_rbs(struct iwl_trans *trans)
 
        lockdep_assert_held(&rxq->lock);
 
-       for (i = 0; i < RX_FREE_BUFFERS + RX_QUEUE_SIZE; i++) {
+       for (i = 0; i < RX_QUEUE_SIZE; i++) {
                if (!rxq->pool[i].page)
                        continue;
                dma_unmap_page(trans->dev, rxq->pool[i].page_dma,
@@ -372,32 +410,144 @@ static void iwl_pcie_rxq_free_rbs(struct iwl_trans *trans)
  * When moving to rx_free an page is allocated for the slot.
  *
  * Also restock the Rx queue via iwl_pcie_rxq_restock.
- * This is called as a scheduled work item (except for during initialization)
+ * This is called only during initialization
  */
-static void iwl_pcie_rx_replenish(struct iwl_trans *trans, gfp_t gfp)
+static void iwl_pcie_rx_replenish(struct iwl_trans *trans)
 {
-       iwl_pcie_rxq_alloc_rbs(trans, gfp);
+       iwl_pcie_rxq_alloc_rbs(trans);
 
        iwl_pcie_rxq_restock(trans);
 }
 
-static void iwl_pcie_rx_replenish_work(struct work_struct *data)
+/*
+ * iwl_pcie_rx_allocator - Allocates pages in the background for RX queues
+ *
+ * Allocates for each received request 8 pages
+ * Called as a scheduled work item.
+ */
+static void iwl_pcie_rx_allocator(struct iwl_trans *trans)
+{
+       struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+       struct iwl_rb_allocator *rba = &trans_pcie->rba;
+
+       while (atomic_read(&rba->req_pending)) {
+               int i;
+               struct list_head local_empty;
+               struct list_head local_allocated;
+
+               INIT_LIST_HEAD(&local_allocated);
+               spin_lock(&rba->lock);
+               /* swap out the entire rba->rbd_empty to a local list */
+               list_replace_init(&rba->rbd_empty, &local_empty);
+               spin_unlock(&rba->lock);
+
+               for (i = 0; i < RX_CLAIM_REQ_ALLOC;) {
+                       struct iwl_rx_mem_buffer *rxb;
+                       struct page *page;
+
+                       /* List should never be empty - each reused RBD is
+                        * returned to the list, and initial pool covers any
+                        * possible gap between the time the page is allocated
+                        * to the time the RBD is added.
+                        */
+                       BUG_ON(list_empty(&local_empty));
+                       /* Get the first rxb from the rbd list */
+                       rxb = list_first_entry(&local_empty,
+                                              struct iwl_rx_mem_buffer, list);
+                       BUG_ON(rxb->page);
+
+                       /* Alloc a new receive buffer */
+                       page = iwl_pcie_rx_alloc_page(trans);
+                       if (!page)
+                               continue;
+                       rxb->page = page;
+
+                       /* Get physical address of the RB */
+                       rxb->page_dma = dma_map_page(trans->dev, page, 0,
+                                       PAGE_SIZE << trans_pcie->rx_page_order,
+                                       DMA_FROM_DEVICE);
+                       if (dma_mapping_error(trans->dev, rxb->page_dma)) {
+                               rxb->page = NULL;
+                               __free_pages(page, trans_pcie->rx_page_order);
+                               continue;
+                       }
+                       /* dma address must be no more than 36 bits */
+                       BUG_ON(rxb->page_dma & ~DMA_BIT_MASK(36));
+                       /* and also 256 byte aligned! */
+                       BUG_ON(rxb->page_dma & DMA_BIT_MASK(8));
+
+                       /* move the allocated entry to the out list */
+                       list_move(&rxb->list, &local_allocated);
+                       i++;
+               }
+
+               spin_lock(&rba->lock);
+               /* add the allocated rbds to the allocator allocated list */
+               list_splice_tail(&local_allocated, &rba->rbd_allocated);
+               /* add the unused rbds back to the allocator empty list */
+               list_splice_tail(&local_empty, &rba->rbd_empty);
+               spin_unlock(&rba->lock);
+
+               atomic_dec(&rba->req_pending);
+               atomic_inc(&rba->req_ready);
+       }
+}
+
+/*
+ * iwl_pcie_rx_allocator_get - Returns the pre-allocated pages
+.*
+.* Called by queue when the queue posted allocation request and
+ * has freed 8 RBDs in order to restock itself.
+ */
+static int iwl_pcie_rx_allocator_get(struct iwl_trans *trans,
+                                    struct iwl_rx_mem_buffer
+                                    *out[RX_CLAIM_REQ_ALLOC])
+{
+       struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+       struct iwl_rb_allocator *rba = &trans_pcie->rba;
+       int i;
+
+       if (atomic_dec_return(&rba->req_ready) < 0) {
+               atomic_inc(&rba->req_ready);
+               IWL_DEBUG_RX(trans,
+                            "Allocation request not ready, pending requests = %d\n",
+                            atomic_read(&rba->req_pending));
+               return -ENOMEM;
+       }
+
+       spin_lock(&rba->lock);
+       for (i = 0; i < RX_CLAIM_REQ_ALLOC; i++) {
+               /* Get next free Rx buffer, remove it from free list */
+               out[i] = list_first_entry(&rba->rbd_allocated,
+                              struct iwl_rx_mem_buffer, list);
+               list_del(&out[i]->list);
+       }
+       spin_unlock(&rba->lock);
+
+       return 0;
+}
+
+static void iwl_pcie_rx_allocator_work(struct work_struct *data)
 {
+       struct iwl_rb_allocator *rba_p =
+               container_of(data, struct iwl_rb_allocator, rx_alloc);
        struct iwl_trans_pcie *trans_pcie =
-           container_of(data, struct iwl_trans_pcie, rx_replenish);
+               container_of(rba_p, struct iwl_trans_pcie, rba);
 
-       iwl_pcie_rx_replenish(trans_pcie->trans, GFP_KERNEL);
+       iwl_pcie_rx_allocator(trans_pcie->trans);
 }
 
 static int iwl_pcie_rx_alloc(struct iwl_trans *trans)
 {
        struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
        struct iwl_rxq *rxq = &trans_pcie->rxq;
+       struct iwl_rb_allocator *rba = &trans_pcie->rba;
        struct device *dev = trans->dev;
 
        memset(&trans_pcie->rxq, 0, sizeof(trans_pcie->rxq));
 
        spin_lock_init(&rxq->lock);
+       spin_lock_init(&rba->lock);
 
        if (WARN_ON(rxq->bd || rxq->rb_stts))
                return -EINVAL;
@@ -487,15 +637,49 @@ static void iwl_pcie_rx_init_rxb_lists(struct iwl_rxq *rxq)
        INIT_LIST_HEAD(&rxq->rx_free);
        INIT_LIST_HEAD(&rxq->rx_used);
        rxq->free_count = 0;
+       rxq->used_count = 0;
 
-       for (i = 0; i < RX_FREE_BUFFERS + RX_QUEUE_SIZE; i++)
+       for (i = 0; i < RX_QUEUE_SIZE; i++)
                list_add(&rxq->pool[i].list, &rxq->rx_used);
 }
 
+static void iwl_pcie_rx_init_rba(struct iwl_rb_allocator *rba)
+{
+       int i;
+
+       lockdep_assert_held(&rba->lock);
+
+       INIT_LIST_HEAD(&rba->rbd_allocated);
+       INIT_LIST_HEAD(&rba->rbd_empty);
+
+       for (i = 0; i < RX_POOL_SIZE; i++)
+               list_add(&rba->pool[i].list, &rba->rbd_empty);
+}
+
+static void iwl_pcie_rx_free_rba(struct iwl_trans *trans)
+{
+       struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+       struct iwl_rb_allocator *rba = &trans_pcie->rba;
+       int i;
+
+       lockdep_assert_held(&rba->lock);
+
+       for (i = 0; i < RX_POOL_SIZE; i++) {
+               if (!rba->pool[i].page)
+                       continue;
+               dma_unmap_page(trans->dev, rba->pool[i].page_dma,
+                              PAGE_SIZE << trans_pcie->rx_page_order,
+                              DMA_FROM_DEVICE);
+               __free_pages(rba->pool[i].page, trans_pcie->rx_page_order);
+               rba->pool[i].page = NULL;
+       }
+}
+
 int iwl_pcie_rx_init(struct iwl_trans *trans)
 {
        struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
        struct iwl_rxq *rxq = &trans_pcie->rxq;
+       struct iwl_rb_allocator *rba = &trans_pcie->rba;
        int i, err;
 
        if (!rxq->bd) {
@@ -503,11 +687,21 @@ int iwl_pcie_rx_init(struct iwl_trans *trans)
                if (err)
                        return err;
        }
+       if (!rba->alloc_wq)
+               rba->alloc_wq = alloc_workqueue("rb_allocator",
+                                               WQ_HIGHPRI | WQ_UNBOUND, 1);
+       INIT_WORK(&rba->rx_alloc, iwl_pcie_rx_allocator_work);
+
+       spin_lock(&rba->lock);
+       atomic_set(&rba->req_pending, 0);
+       atomic_set(&rba->req_ready, 0);
+       /* free all first - we might be reconfigured for a different size */
+       iwl_pcie_rx_free_rba(trans);
+       iwl_pcie_rx_init_rba(rba);
+       spin_unlock(&rba->lock);
 
        spin_lock(&rxq->lock);
 
-       INIT_WORK(&trans_pcie->rx_replenish, iwl_pcie_rx_replenish_work);
-
        /* free all first - we might be reconfigured for a different size */
        iwl_pcie_rxq_free_rbs(trans);
        iwl_pcie_rx_init_rxb_lists(rxq);
@@ -522,7 +716,7 @@ int iwl_pcie_rx_init(struct iwl_trans *trans)
        memset(rxq->rb_stts, 0, sizeof(*rxq->rb_stts));
        spin_unlock(&rxq->lock);
 
-       iwl_pcie_rx_replenish(trans, GFP_KERNEL);
+       iwl_pcie_rx_replenish(trans);
 
        iwl_pcie_rx_hw_init(trans, rxq);
 
@@ -537,6 +731,7 @@ void iwl_pcie_rx_free(struct iwl_trans *trans)
 {
        struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
        struct iwl_rxq *rxq = &trans_pcie->rxq;
+       struct iwl_rb_allocator *rba = &trans_pcie->rba;
 
        /*if rxq->bd is NULL, it means that nothing has been allocated,
         * exit now */
@@ -545,7 +740,15 @@ void iwl_pcie_rx_free(struct iwl_trans *trans)
                return;
        }
 
-       cancel_work_sync(&trans_pcie->rx_replenish);
+       cancel_work_sync(&rba->rx_alloc);
+       if (rba->alloc_wq) {
+               destroy_workqueue(rba->alloc_wq);
+               rba->alloc_wq = NULL;
+       }
+
+       spin_lock(&rba->lock);
+       iwl_pcie_rx_free_rba(trans);
+       spin_unlock(&rba->lock);
 
        spin_lock(&rxq->lock);
        iwl_pcie_rxq_free_rbs(trans);
@@ -566,6 +769,43 @@ void iwl_pcie_rx_free(struct iwl_trans *trans)
        rxq->rb_stts = NULL;
 }
 
+/*
+ * iwl_pcie_rx_reuse_rbd - Recycle used RBDs
+ *
+ * Called when a RBD can be reused. The RBD is transferred to the allocator.
+ * When there are 2 empty RBDs - a request for allocation is posted
+ */
+static void iwl_pcie_rx_reuse_rbd(struct iwl_trans *trans,
+                                 struct iwl_rx_mem_buffer *rxb,
+                                 struct iwl_rxq *rxq)
+{
+       struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+       struct iwl_rb_allocator *rba = &trans_pcie->rba;
+
+       /* Count the used RBDs */
+       rxq->used_count++;
+
+       /* Move the RBD to the used list, will be moved to allocator in batches
+        * before claiming or posting a request*/
+       list_add_tail(&rxb->list, &rxq->rx_used);
+
+       /* If we have RX_POST_REQ_ALLOC new released rx buffers -
+        * issue a request for allocator. Modulo RX_CLAIM_REQ_ALLOC is
+        * used for the case we failed to claim RX_CLAIM_REQ_ALLOC,
+        * after but we still need to post another request.
+        */
+       if ((rxq->used_count % RX_CLAIM_REQ_ALLOC) == RX_POST_REQ_ALLOC) {
+               /* Move the 2 RBDs to the allocator ownership.
+                Allocator has another 6 from pool for the request completion*/
+               spin_lock(&rba->lock);
+               list_splice_tail_init(&rxq->rx_used, &rba->rbd_empty);
+               spin_unlock(&rba->lock);
+
+               atomic_inc(&rba->req_pending);
+               queue_work(rba->alloc_wq, &rba->rx_alloc);
+       }
+}
+
 static void iwl_pcie_rx_handle_rb(struct iwl_trans *trans,
                                struct iwl_rx_mem_buffer *rxb)
 {
@@ -688,13 +928,13 @@ static void iwl_pcie_rx_handle_rb(struct iwl_trans *trans,
                         */
                        __free_pages(rxb->page, trans_pcie->rx_page_order);
                        rxb->page = NULL;
-                       list_add_tail(&rxb->list, &rxq->rx_used);
+                       iwl_pcie_rx_reuse_rbd(trans, rxb, rxq);
                } else {
                        list_add_tail(&rxb->list, &rxq->rx_free);
                        rxq->free_count++;
                }
        } else
-               list_add_tail(&rxb->list, &rxq->rx_used);
+               iwl_pcie_rx_reuse_rbd(trans, rxb, rxq);
 }
 
 /*
@@ -704,10 +944,7 @@ static void iwl_pcie_rx_handle(struct iwl_trans *trans)
 {
        struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
        struct iwl_rxq *rxq = &trans_pcie->rxq;
-       u32 r, i;
-       u8 fill_rx = 0;
-       u32 count = 8;
-       int total_empty;
+       u32 r, i, j;
 
 restart:
        spin_lock(&rxq->lock);
@@ -720,14 +957,6 @@ restart:
        if (i == r)
                IWL_DEBUG_RX(trans, "HW = SW = %d\n", r);
 
-       /* calculate total frames need to be restock after handling RX */
-       total_empty = r - rxq->write_actual;
-       if (total_empty < 0)
-               total_empty += RX_QUEUE_SIZE;
-
-       if (total_empty > (RX_QUEUE_SIZE / 2))
-               fill_rx = 1;
-
        while (i != r) {
                struct iwl_rx_mem_buffer *rxb;
 
@@ -739,29 +968,48 @@ restart:
                iwl_pcie_rx_handle_rb(trans, rxb);
 
                i = (i + 1) & RX_QUEUE_MASK;
-               /* If there are a lot of unused frames,
-                * restock the Rx queue so ucode wont assert. */
-               if (fill_rx) {
-                       count++;
-                       if (count >= 8) {
-                               rxq->read = i;
-                               spin_unlock(&rxq->lock);
-                               iwl_pcie_rx_replenish(trans, GFP_ATOMIC);
-                               count = 0;
-                               goto restart;
+
+               /* If we have RX_CLAIM_REQ_ALLOC released rx buffers -
+                * try to claim the pre-allocated buffers from the allocator */
+               if (rxq->used_count >= RX_CLAIM_REQ_ALLOC) {
+                       struct iwl_rb_allocator *rba = &trans_pcie->rba;
+                       struct iwl_rx_mem_buffer *out[RX_CLAIM_REQ_ALLOC];
+
+                       /* Add the remaining 6 empty RBDs for allocator use */
+                       spin_lock(&rba->lock);
+                       list_splice_tail_init(&rxq->rx_used, &rba->rbd_empty);
+                       spin_unlock(&rba->lock);
+
+                       /* If not ready - continue, will try to reclaim later.
+                       * No need to reschedule work - allocator exits only on
+                       * success */
+                       if (!iwl_pcie_rx_allocator_get(trans, out)) {
+                               /* If success - then RX_CLAIM_REQ_ALLOC
+                                * buffers were retrieved and should be added
+                                * to free list */
+                               rxq->used_count -= RX_CLAIM_REQ_ALLOC;
+                               for (j = 0; j < RX_CLAIM_REQ_ALLOC; j++) {
+                                       list_add_tail(&out[j]->list,
+                                                     &rxq->rx_free);
+                                       rxq->free_count++;
+                               }
                        }
                }
+               /* handle restock for two cases:
+               * - we just pulled buffers from the allocator
+               * - we have 8+ unstolen pages accumulated */
+               if (rxq->free_count >=  RX_CLAIM_REQ_ALLOC) {
+                       rxq->read = i;
+                       spin_unlock(&rxq->lock);
+                       iwl_pcie_rxq_restock(trans);
+                       goto restart;
+               }
        }
 
        /* Backtrack one entry */
        rxq->read = i;
        spin_unlock(&rxq->lock);
 
-       if (fill_rx)
-               iwl_pcie_rx_replenish(trans, GFP_ATOMIC);
-       else
-               iwl_pcie_rxq_restock(trans);
-
        if (trans_pcie->napi.poll)
                napi_gro_flush(&trans_pcie->napi, false);
 }
@@ -775,6 +1023,7 @@ static void iwl_pcie_irq_handle_error(struct iwl_trans *trans)
 
        /* W/A for WiFi/WiMAX coex and WiMAX own the RF */
        if (trans->cfg->internal_wimax_coex &&
+           !trans->cfg->apmg_not_supported &&
            (!(iwl_read_prph(trans, APMG_CLK_CTRL_REG) &
                             APMS_CLK_VAL_MRB_FUNC_MODE) ||
             (iwl_read_prph(trans, APMG_PS_CTRL_REG) &
index 4526336..dd1b90b 100644 (file)
@@ -182,6 +182,9 @@ static void iwl_trans_pcie_write_shr(struct iwl_trans *trans, u32 reg, u32 val)
 
 static void iwl_pcie_set_pwr(struct iwl_trans *trans, bool vaux)
 {
+       if (!trans->cfg->apmg_not_supported)
+               return;
+
        if (vaux && pci_pme_capable(to_pci_dev(trans->dev), PCI_D3cold))
                iwl_set_bits_mask_prph(trans, APMG_PS_CTRL_REG,
                                       APMG_PS_CTRL_VAL_PWR_SRC_VAUX,
@@ -315,7 +318,7 @@ static int iwl_pcie_apm_init(struct iwl_trans *trans)
         * bits do not disable clocks.  This preserves any hardware
         * bits already set by default in "CLK_CTRL_REG" after reset.
         */
-       if (trans->cfg->device_family != IWL_DEVICE_FAMILY_8000) {
+       if (!trans->cfg->apmg_not_supported) {
                iwl_write_prph(trans, APMG_CLK_EN_REG,
                               APMG_CLK_VAL_DMA_CLK_RQT);
                udelay(20);
@@ -515,8 +518,7 @@ static int iwl_pcie_nic_init(struct iwl_trans *trans)
 
        spin_unlock(&trans_pcie->irq_lock);
 
-       if (trans->cfg->device_family != IWL_DEVICE_FAMILY_8000)
-               iwl_pcie_set_pwr(trans, false);
+       iwl_pcie_set_pwr(trans, false);
 
        iwl_op_mode_nic_config(trans->op_mode);
 
@@ -973,12 +975,8 @@ static int iwl_pcie_load_given_ucode_8000(struct iwl_trans *trans,
                return ret;
 
        /* load to FW the binary sections of CPU2 */
-       ret = iwl_pcie_load_cpu_sections_8000(trans, image, 2,
-                                             &first_ucode_section);
-       if (ret)
-               return ret;
-
-       return 0;
+       return iwl_pcie_load_cpu_sections_8000(trans, image, 2,
+                                              &first_ucode_section);
 }
 
 static int iwl_trans_pcie_start_fw(struct iwl_trans *trans,
@@ -1067,9 +1065,11 @@ static void iwl_trans_pcie_stop_device(struct iwl_trans *trans, bool low_power)
                iwl_pcie_rx_stop(trans);
 
                /* Power-down device's busmaster DMA clocks */
-               iwl_write_prph(trans, APMG_CLK_DIS_REG,
-                              APMG_CLK_VAL_DMA_CLK_RQT);
-               udelay(5);
+               if (!trans->cfg->apmg_not_supported) {
+                       iwl_write_prph(trans, APMG_CLK_DIS_REG,
+                                      APMG_CLK_VAL_DMA_CLK_RQT);
+                       udelay(5);
+               }
        }
 
        /* Make sure (redundant) we've released our request to stay awake */
@@ -1362,14 +1362,13 @@ void iwl_trans_pcie_free(struct iwl_trans *trans)
        iounmap(trans_pcie->hw_base);
        pci_release_regions(trans_pcie->pci_dev);
        pci_disable_device(trans_pcie->pci_dev);
-       kmem_cache_destroy(trans->dev_cmd_pool);
 
        if (trans_pcie->napi.poll)
                netif_napi_del(&trans_pcie->napi);
 
        iwl_pcie_free_fw_monitor(trans);
 
-       kfree(trans);
+       iwl_trans_free(trans);
 }
 
 static void iwl_trans_pcie_set_pmi(struct iwl_trans *trans, bool state)
@@ -2462,18 +2461,13 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
        u16 pci_cmd;
        int err;
 
-       trans = kzalloc(sizeof(struct iwl_trans) +
-                       sizeof(struct iwl_trans_pcie), GFP_KERNEL);
-       if (!trans) {
-               err = -ENOMEM;
-               goto out;
-       }
+       trans = iwl_trans_alloc(sizeof(struct iwl_trans_pcie),
+                               &pdev->dev, cfg, &trans_ops_pcie, 0);
+       if (!trans)
+               return ERR_PTR(-ENOMEM);
 
        trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
 
-       trans->ops = &trans_ops_pcie;
-       trans->cfg = cfg;
-       trans_lockdep_init(trans);
        trans_pcie->trans = trans;
        spin_lock_init(&trans_pcie->irq_lock);
        spin_lock_init(&trans_pcie->reg_lock);
@@ -2597,25 +2591,8 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
        /* Initialize the wait queue for commands */
        init_waitqueue_head(&trans_pcie->wait_command_queue);
 
-       snprintf(trans->dev_cmd_pool_name, sizeof(trans->dev_cmd_pool_name),
-                "iwl_cmd_pool:%s", dev_name(trans->dev));
-
-       trans->dev_cmd_headroom = 0;
-       trans->dev_cmd_pool =
-               kmem_cache_create(trans->dev_cmd_pool_name,
-                                 sizeof(struct iwl_device_cmd)
-                                 + trans->dev_cmd_headroom,
-                                 sizeof(void *),
-                                 SLAB_HWCACHE_ALIGN,
-                                 NULL);
-
-       if (!trans->dev_cmd_pool) {
-               err = -ENOMEM;
-               goto out_pci_disable_msi;
-       }
-
        if (iwl_pcie_alloc_ict(trans))
-               goto out_free_cmd_pool;
+               goto out_pci_disable_msi;
 
        err = request_threaded_irq(pdev->irq, iwl_pcie_isr,
                                   iwl_pcie_irq_handler,
@@ -2632,8 +2609,6 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
 
 out_free_ict:
        iwl_pcie_free_ict(trans);
-out_free_cmd_pool:
-       kmem_cache_destroy(trans->dev_cmd_pool);
 out_pci_disable_msi:
        pci_disable_msi(pdev);
 out_pci_release_regions:
@@ -2641,7 +2616,6 @@ out_pci_release_regions:
 out_pci_disable_device:
        pci_disable_device(pdev);
 out_no_pci:
-       kfree(trans);
-out:
+       iwl_trans_free(trans);
        return ERR_PTR(err);
 }
index 06952aa..2b06f99 100644 (file)
@@ -1053,8 +1053,6 @@ static int iwl_pcie_set_cmd_in_flight(struct iwl_trans *trans,
        if (trans->cfg->base_params->apmg_wake_up_wa) {
                __iwl_trans_pcie_set_bit(trans, CSR_GP_CNTRL,
                                         CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
-               if (trans->cfg->device_family == IWL_DEVICE_FAMILY_8000)
-                       udelay(2);
 
                ret = iwl_poll_bit(trans, CSR_GP_CNTRL,
                                   CSR_GP_CNTRL_REG_VAL_MAC_ACCESS_EN,