Merge tag 'iwlwifi-next-for-kalle-2016-03-09_2' of https://git.kernel.org/pub/scm...
authorKalle Valo <kvalo@codeaurora.org>
Thu, 10 Mar 2016 12:53:35 +0000 (14:53 +0200)
committerKalle Valo <kvalo@codeaurora.org>
Thu, 10 Mar 2016 12:53:35 +0000 (14:53 +0200)
* update GSCAN capabilities (Ayala)
* fix AES-CMAC in AP mode (Johannes)
* adapt prints to new firmware API
* rx path improvements (Sara and Gregory)
* fixes for the thermal / cooling device code (Chaya Rachel)
* fixes for GO uAPSD handling
* more code for the 9000 device family (Sara)
* infrastructure work for firmware notification (Chaya Rachel)
* improve association reliablity (Sara)
* runtime PM fixes
* fixes for ROC (HS2.0)

27 files changed:
drivers/net/wireless/intel/iwlwifi/dvm/main.c
drivers/net/wireless/intel/iwlwifi/iwl-9000.c
drivers/net/wireless/intel/iwlwifi/iwl-config.h
drivers/net/wireless/intel/iwlwifi/iwl-csr.h
drivers/net/wireless/intel/iwlwifi/iwl-devtrace-iwlwifi.h
drivers/net/wireless/intel/iwlwifi/iwl-drv.c
drivers/net/wireless/intel/iwlwifi/iwl-fh.h
drivers/net/wireless/intel/iwlwifi/iwl-fw-file.h
drivers/net/wireless/intel/iwlwifi/iwl-fw.h
drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c
drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.h
drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c
drivers/net/wireless/intel/iwlwifi/mvm/fw-api-rx.h
drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
drivers/net/wireless/intel/iwlwifi/mvm/mvm.h
drivers/net/wireless/intel/iwlwifi/mvm/nvm.c
drivers/net/wireless/intel/iwlwifi/mvm/ops.c
drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c
drivers/net/wireless/intel/iwlwifi/mvm/time-event.c
drivers/net/wireless/intel/iwlwifi/mvm/time-event.h
drivers/net/wireless/intel/iwlwifi/mvm/tt.c
drivers/net/wireless/intel/iwlwifi/mvm/tx.c
drivers/net/wireless/intel/iwlwifi/mvm/utils.c
drivers/net/wireless/intel/iwlwifi/pcie/drv.c
drivers/net/wireless/intel/iwlwifi/pcie/internal.h
drivers/net/wireless/intel/iwlwifi/pcie/rx.c
drivers/net/wireless/intel/iwlwifi/pcie/trans.c

index f62c2d7..8562812 100644 (file)
@@ -1652,10 +1652,10 @@ static void iwl_dump_nic_error_log(struct iwl_priv *priv)
 
        trace_iwlwifi_dev_ucode_error(trans->dev, table.error_id, table.tsf_low,
                                      table.data1, table.data2, table.line,
-                                     table.blink1, table.blink2, table.ilink1,
-                                     table.ilink2, table.bcon_time, table.gp1,
-                                     table.gp2, table.gp3, table.ucode_ver,
-                                     table.hw_ver, 0, table.brd_ver);
+                                     table.blink2, table.ilink1, table.ilink2,
+                                     table.bcon_time, table.gp1, table.gp2,
+                                     table.gp3, table.ucode_ver, table.hw_ver,
+                                     0, table.brd_ver);
        IWL_ERR(priv, "0x%08X | %-28s\n", table.error_id,
                desc_lookup(table.error_id));
        IWL_ERR(priv, "0x%08X | uPc\n", table.pc);
index 8e32a57..318b1dc 100644 (file)
@@ -140,7 +140,8 @@ static const struct iwl_tt_params iwl9000_tt_params = {
        .thermal_params = &iwl9000_tt_params,                           \
        .apmg_not_supported = true,                                     \
        .mq_rx_supported = true,                                        \
-       .vht_mu_mimo_supported = true
+       .vht_mu_mimo_supported = true,                                  \
+       .mac_addr_from_csr = true
 
 const struct iwl_cfg iwl9260_2ac_cfg = {
                .name = "Intel(R) Dual Band Wireless AC 9260",
index 4f2b57e..3e4d346 100644 (file)
@@ -297,6 +297,7 @@ struct iwl_pwr_tx_backoff {
  * @host_interrupt_operation_mode: device needs host interrupt operation
  *     mode set
  * @nvm_hw_section_num: the ID of the HW NVM section
+ * @mac_addr_from_csr: read HW address from CSR registers
  * @features: hw features, any combination of feature_whitelist
  * @pwr_tx_backoffs: translation table between power limits and backoffs
  * @max_rx_agg_size: max RX aggregation size of the ADDBA request/response
@@ -345,6 +346,7 @@ struct iwl_cfg {
        const bool host_interrupt_operation_mode;
        bool high_temp;
        u8   nvm_hw_section_num;
+       bool mac_addr_from_csr;
        bool lp_xtal_workaround;
        const struct iwl_pwr_tx_backoff *pwr_tx_backoffs;
        bool no_power_up_nic_in_init;
index a79c4f6..b978f6c 100644 (file)
@@ -598,4 +598,14 @@ enum msix_hw_int_causes {
 #define MSIX_AUTO_CLEAR_CAUSE                  0
 #define MSIX_NON_AUTO_CLEAR_CAUSE              BIT(7)
 
+/*****************************************************************************
+ *                     HW address related registers                          *
+ *****************************************************************************/
+
+#define CSR_ADDR_BASE                  (0x380)
+#define CSR_MAC_ADDR0_OTP              (CSR_ADDR_BASE)
+#define CSR_MAC_ADDR1_OTP              (CSR_ADDR_BASE + 4)
+#define CSR_MAC_ADDR0_STRAP            (CSR_ADDR_BASE + 8)
+#define CSR_MAC_ADDR1_STRAP            (CSR_ADDR_BASE + 0xC)
+
 #endif /* !__iwl_csr_h__ */
index 2a0703f..f02e2c8 100644 (file)
@@ -2,6 +2,7 @@
  *
  * Copyright(c) 2009 - 2014 Intel Corporation. All rights reserved.
  * Copyright(c) 2015 Intel Mobile Communications GmbH
+ * Copyright(c) 2016        Intel Deutschland GmbH
  *
  * This program is free software; you can redistribute it and/or modify it
  * under the terms of version 2 of the GNU General Public License as
@@ -121,13 +122,12 @@ TRACE_EVENT(iwlwifi_dev_tx,
 
 TRACE_EVENT(iwlwifi_dev_ucode_error,
        TP_PROTO(const struct device *dev, u32 desc, u32 tsf_low,
-                u32 data1, u32 data2, u32 line, u32 blink1,
-                u32 blink2, u32 ilink1, u32 ilink2, u32 bcon_time,
-                u32 gp1, u32 gp2, u32 gp3, u32 major, u32 minor, u32 hw_ver,
-                u32 brd_ver),
+                u32 data1, u32 data2, u32 line, u32 blink2, u32 ilink1,
+                u32 ilink2, u32 bcon_time, u32 gp1, u32 gp2, u32 rev_type,
+                u32 major, u32 minor, u32 hw_ver, u32 brd_ver),
        TP_ARGS(dev, desc, tsf_low, data1, data2, line,
-               blink1, blink2, ilink1, ilink2, bcon_time, gp1, gp2,
-               gp3, major, minor, hw_ver, brd_ver),
+                blink2, ilink1, ilink2, bcon_time, gp1, gp2,
+                rev_type, major, minor, hw_ver, brd_ver),
        TP_STRUCT__entry(
                DEV_ENTRY
                __field(u32, desc)
@@ -135,14 +135,13 @@ TRACE_EVENT(iwlwifi_dev_ucode_error,
                __field(u32, data1)
                __field(u32, data2)
                __field(u32, line)
-               __field(u32, blink1)
                __field(u32, blink2)
                __field(u32, ilink1)
                __field(u32, ilink2)
                __field(u32, bcon_time)
                __field(u32, gp1)
                __field(u32, gp2)
-               __field(u32, gp3)
+               __field(u32, rev_type)
                __field(u32, major)
                __field(u32, minor)
                __field(u32, hw_ver)
@@ -155,29 +154,27 @@ TRACE_EVENT(iwlwifi_dev_ucode_error,
                __entry->data1 = data1;
                __entry->data2 = data2;
                __entry->line = line;
-               __entry->blink1 = blink1;
                __entry->blink2 = blink2;
                __entry->ilink1 = ilink1;
                __entry->ilink2 = ilink2;
                __entry->bcon_time = bcon_time;
                __entry->gp1 = gp1;
                __entry->gp2 = gp2;
-               __entry->gp3 = gp3;
+               __entry->rev_type = rev_type;
                __entry->major = major;
                __entry->minor = minor;
                __entry->hw_ver = hw_ver;
                __entry->brd_ver = brd_ver;
        ),
        TP_printk("[%s] #%02d %010u data 0x%08X 0x%08X line %u, "
-                 "blink 0x%05X 0x%05X ilink 0x%05X 0x%05X "
-                 "bcon_tm %010u gp 0x%08X 0x%08X 0x%08X major 0x%08X "
+                 "blink2 0x%05X ilink 0x%05X 0x%05X "
+                 "bcon_tm %010u gp 0x%08X 0x%08X rev_type 0x%08X major 0x%08X "
                  "minor 0x%08X hw 0x%08X brd 0x%08X",
                  __get_str(dev), __entry->desc, __entry->tsf_low,
-                 __entry->data1,
-                 __entry->data2, __entry->line, __entry->blink1,
+                 __entry->data1, __entry->data2, __entry->line,
                  __entry->blink2, __entry->ilink1, __entry->ilink2,
                  __entry->bcon_time, __entry->gp1, __entry->gp2,
-                 __entry->gp3, __entry->major, __entry->minor,
+                 __entry->rev_type, __entry->major, __entry->minor,
                  __entry->hw_ver, __entry->brd_ver)
 );
 
index 184c0fe..f899666 100644 (file)
@@ -7,6 +7,7 @@
  *
  * Copyright(c) 2007 - 2014 Intel Corporation. All rights reserved.
  * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
+ * Copyright(c) 2016        Intel Deutschland GmbH
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of version 2 of the GNU General Public License as
@@ -33,6 +34,7 @@
  *
  * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved.
  * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
+ * Copyright(c) 2016        Intel Deutschland GmbH
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
@@ -374,15 +376,12 @@ static int iwl_store_cscheme(struct iwl_fw *fw, const u8 *data, const u32 len)
        return 0;
 }
 
-static int iwl_store_gscan_capa(struct iwl_fw *fw, const u8 *data,
-                               const u32 len)
+static void iwl_store_gscan_capa(struct iwl_fw *fw, const u8 *data,
+                                const u32 len)
 {
        struct iwl_fw_gscan_capabilities *fw_capa = (void *)data;
        struct iwl_gscan_capabilities *capa = &fw->gscan_capa;
 
-       if (len < sizeof(*fw_capa))
-               return -EINVAL;
-
        capa->max_scan_cache_size = le32_to_cpu(fw_capa->max_scan_cache_size);
        capa->max_scan_buckets = le32_to_cpu(fw_capa->max_scan_buckets);
        capa->max_ap_cache_per_scan =
@@ -395,7 +394,15 @@ static int iwl_store_gscan_capa(struct iwl_fw *fw, const u8 *data,
                le32_to_cpu(fw_capa->max_significant_change_aps);
        capa->max_bssid_history_entries =
                le32_to_cpu(fw_capa->max_bssid_history_entries);
-       return 0;
+       capa->max_hotlist_ssids = le32_to_cpu(fw_capa->max_hotlist_ssids);
+       capa->max_number_epno_networks =
+               le32_to_cpu(fw_capa->max_number_epno_networks);
+       capa->max_number_epno_networks_by_ssid =
+               le32_to_cpu(fw_capa->max_number_epno_networks_by_ssid);
+       capa->max_number_of_white_listed_ssid =
+               le32_to_cpu(fw_capa->max_number_of_white_listed_ssid);
+       capa->max_number_of_black_listed_ssid =
+               le32_to_cpu(fw_capa->max_number_of_black_listed_ssid);
 }
 
 /*
@@ -1023,8 +1030,15 @@ static int iwl_parse_tlv_firmware(struct iwl_drv *drv,
                                le32_to_cpup((__le32 *)tlv_data);
                        break;
                case IWL_UCODE_TLV_FW_GSCAN_CAPA:
-                       if (iwl_store_gscan_capa(&drv->fw, tlv_data, tlv_len))
-                               goto invalid_tlv_len;
+                       /*
+                        * Don't return an error in case of a shorter tlv_len
+                        * to enable loading of FW that has an old format
+                        * of GSCAN capabilities TLV.
+                        */
+                       if (tlv_len < sizeof(struct iwl_fw_gscan_capabilities))
+                               break;
+
+                       iwl_store_gscan_capa(&drv->fw, tlv_data, tlv_len);
                        gscan_capa = true;
                        break;
                default:
@@ -1046,12 +1060,8 @@ static int iwl_parse_tlv_firmware(struct iwl_drv *drv,
                return -EINVAL;
        }
 
-       /*
-        * If ucode advertises that it supports GSCAN but GSCAN
-        * capabilities TLV is not present, warn and continue without GSCAN.
-        */
-       if (fw_has_capa(capa, IWL_UCODE_TLV_CAPA_GSCAN_SUPPORT) &&
-           WARN(!gscan_capa,
+       if (WARN(fw_has_capa(capa, IWL_UCODE_TLV_CAPA_GSCAN_SUPPORT) &&
+                !gscan_capa,
                 "GSCAN is supported but capabilities TLV is unavailable\n"))
                __clear_bit((__force long)IWL_UCODE_TLV_CAPA_GSCAN_SUPPORT,
                            capa->_capa);
index 8af818b..582008a 100644 (file)
@@ -511,9 +511,12 @@ static inline unsigned int FH_MEM_CBBC_QUEUE(unsigned int chnl)
  */
 #define FH_TX_CHICKEN_BITS_SCD_AUTO_RETRY_EN   (0x00000002)
 
-#define MQ_RX_TABLE_SIZE               512
-#define MQ_RX_TABLE_MASK               (MQ_RX_TABLE_SIZE - 1)
-#define MQ_RX_POOL_SIZE                        MQ_RX_TABLE_MASK
+#define MQ_RX_TABLE_SIZE       512
+#define MQ_RX_TABLE_MASK       (MQ_RX_TABLE_SIZE - 1)
+#define MQ_RX_NUM_RBDS         (MQ_RX_TABLE_SIZE - 1)
+#define RX_POOL_SIZE           (MQ_RX_NUM_RBDS +       \
+                                IWL_MAX_RX_HW_QUEUES * \
+                                (RX_CLAIM_REQ_ALLOC - RX_POST_REQ_ALLOC))
 
 #define RX_QUEUE_SIZE                         256
 #define RX_QUEUE_MASK                         255
index 5f69bf5..15ec4e2 100644 (file)
@@ -809,6 +809,12 @@ struct iwl_fw_dbg_conf_tlv {
  *     change APs.
  * @max_bssid_history_entries: number of BSSID/RSSI entries that the device can
  *     hold.
+ * @max_hotlist_ssids: maximum number of entries for hotlist SSIDs.
+ * @max_number_epno_networks: max number of epno entries.
+ * @max_number_epno_networks_by_ssid: max number of epno entries if ssid is
+ *     specified.
+ * @max_number_of_white_listed_ssid: max number of white listed SSIDs.
+ * @max_number_of_black_listed_ssid: max number of black listed SSIDs.
  */
 struct iwl_fw_gscan_capabilities {
        __le32 max_scan_cache_size;
@@ -819,6 +825,11 @@ struct iwl_fw_gscan_capabilities {
        __le32 max_hotlist_aps;
        __le32 max_significant_change_aps;
        __le32 max_bssid_history_entries;
+       __le32 max_hotlist_ssids;
+       __le32 max_number_epno_networks;
+       __le32 max_number_epno_networks_by_ssid;
+       __le32 max_number_of_white_listed_ssid;
+       __le32 max_number_of_black_listed_ssid;
 } __packed;
 
 #endif  /* __iwl_fw_file_h__ */
index 85d6d6d..2942571 100644 (file)
@@ -7,6 +7,7 @@
  *
  * Copyright(c) 2008 - 2014 Intel Corporation. All rights reserved.
  * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
+ * Copyright(c) 2016        Intel Deutschland GmbH
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of version 2 of the GNU General Public License as
@@ -33,6 +34,7 @@
  *
  * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved.
  * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
+ * Copyright(c) 2016        Intel Deutschland GmbH
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
@@ -205,6 +207,12 @@ struct iwl_fw_cscheme_list {
  *     change APs.
  * @max_bssid_history_entries: number of BSSID/RSSI entries that the device can
  *     hold.
+ * @max_hotlist_ssids: maximum number of entries for hotlist SSIDs.
+ * @max_number_epno_networks: max number of epno entries.
+ * @max_number_epno_networks_by_ssid: max number of epno entries if ssid is
+ *     specified.
+ * @max_number_of_white_listed_ssid: max number of white listed SSIDs.
+ * @max_number_of_black_listed_ssid: max number of black listed SSIDs.
  */
 struct iwl_gscan_capabilities {
        u32 max_scan_cache_size;
@@ -215,6 +223,11 @@ struct iwl_gscan_capabilities {
        u32 max_hotlist_aps;
        u32 max_significant_change_aps;
        u32 max_bssid_history_entries;
+       u32 max_hotlist_ssids;
+       u32 max_number_epno_networks;
+       u32 max_number_epno_networks_by_ssid;
+       u32 max_number_of_white_listed_ssid;
+       u32 max_number_of_black_listed_ssid;
 };
 
 /**
index 3481357..93a6895 100644 (file)
@@ -7,6 +7,7 @@
  *
  * Copyright(c) 2008 - 2014 Intel Corporation. All rights reserved.
  * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
+ * Copyright(c) 2016 Intel Deutschland GmbH
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of version 2 of the GNU General Public License as
@@ -69,6 +70,9 @@
 #include "iwl-drv.h"
 #include "iwl-modparams.h"
 #include "iwl-nvm-parse.h"
+#include "iwl-prph.h"
+#include "iwl-io.h"
+#include "iwl-csr.h"
 
 /* NVM offsets (in words) definitions */
 enum wkp_nvm_offsets {
@@ -522,27 +526,41 @@ static void iwl_set_radio_cfg(const struct iwl_cfg *cfg,
        data->valid_rx_ant = NVM_RF_CFG_RX_ANT_MSK_FAMILY_8000(radio_cfg);
 }
 
-static void iwl_set_hw_address(const struct iwl_cfg *cfg,
-                              struct iwl_nvm_data *data,
-                              const __le16 *nvm_sec)
+static void iwl_flip_hw_address(__le32 mac_addr0, __le32 mac_addr1, u8 *dest)
 {
-       const u8 *hw_addr = (const u8 *)(nvm_sec + HW_ADDR);
-
-       /* The byte order is little endian 16 bit, meaning 214365 */
-       data->hw_addr[0] = hw_addr[1];
-       data->hw_addr[1] = hw_addr[0];
-       data->hw_addr[2] = hw_addr[3];
-       data->hw_addr[3] = hw_addr[2];
-       data->hw_addr[4] = hw_addr[5];
-       data->hw_addr[5] = hw_addr[4];
+       const u8 *hw_addr;
+
+       hw_addr = (const u8 *)&mac_addr0;
+       dest[0] = hw_addr[3];
+       dest[1] = hw_addr[2];
+       dest[2] = hw_addr[1];
+       dest[3] = hw_addr[0];
+
+       hw_addr = (const u8 *)&mac_addr1;
+       dest[4] = hw_addr[1];
+       dest[5] = hw_addr[0];
 }
 
-static void iwl_set_hw_address_family_8000(struct device *dev,
+static void iwl_set_hw_address_from_csr(struct iwl_trans *trans,
+                                       struct iwl_nvm_data *data)
+{
+       __le32 mac_addr0 = cpu_to_le32(iwl_read32(trans, CSR_MAC_ADDR0_STRAP));
+       __le32 mac_addr1 = cpu_to_le32(iwl_read32(trans, CSR_MAC_ADDR1_STRAP));
+
+       /* If OEM did not fuse address - get it from OTP */
+       if (!mac_addr0 && !mac_addr1) {
+               mac_addr0 = cpu_to_le32(iwl_read32(trans, CSR_MAC_ADDR0_OTP));
+               mac_addr1 = cpu_to_le32(iwl_read32(trans, CSR_MAC_ADDR1_OTP));
+       }
+
+       iwl_flip_hw_address(mac_addr0, mac_addr1, data->hw_addr);
+}
+
+static void iwl_set_hw_address_family_8000(struct iwl_trans *trans,
                                           const struct iwl_cfg *cfg,
                                           struct iwl_nvm_data *data,
                                           const __le16 *mac_override,
-                                          const __le16 *nvm_hw,
-                                          __le32 mac_addr0, __le32 mac_addr1)
+                                          const __le16 *nvm_hw)
 {
        const u8 *hw_addr;
 
@@ -568,45 +586,68 @@ static void iwl_set_hw_address_family_8000(struct device *dev,
                    memcmp(reserved_mac, hw_addr, ETH_ALEN) != 0)
                        return;
 
-               IWL_ERR_DEV(dev,
-                           "mac address from nvm override section is not valid\n");
+               IWL_ERR(trans,
+                       "mac address from nvm override section is not valid\n");
        }
 
        if (nvm_hw) {
-               /* read the MAC address from HW resisters */
-               hw_addr = (const u8 *)&mac_addr0;
-               data->hw_addr[0] = hw_addr[3];
-               data->hw_addr[1] = hw_addr[2];
-               data->hw_addr[2] = hw_addr[1];
-               data->hw_addr[3] = hw_addr[0];
-
-               hw_addr = (const u8 *)&mac_addr1;
-               data->hw_addr[4] = hw_addr[1];
-               data->hw_addr[5] = hw_addr[0];
-
-               if (!is_valid_ether_addr(data->hw_addr))
-                       IWL_ERR_DEV(dev,
-                                   "mac address (%pM) from hw section is not valid\n",
-                                   data->hw_addr);
+               /* read the mac address from WFMP registers */
+               __le32 mac_addr0 = cpu_to_le32(iwl_trans_read_prph(trans,
+                                               WFMP_MAC_ADDR_0));
+               __le32 mac_addr1 = cpu_to_le32(iwl_trans_read_prph(trans,
+                                               WFMP_MAC_ADDR_1));
+
+               iwl_flip_hw_address(mac_addr0, mac_addr1, data->hw_addr);
 
                return;
        }
 
-       IWL_ERR_DEV(dev, "mac address is not found\n");
+       IWL_ERR(trans, "mac address is not found\n");
+}
+
+static int iwl_set_hw_address(struct iwl_trans *trans,
+                             const struct iwl_cfg *cfg,
+                             struct iwl_nvm_data *data, const __le16 *nvm_hw,
+                             const __le16 *mac_override)
+{
+       if (cfg->mac_addr_from_csr) {
+               iwl_set_hw_address_from_csr(trans, data);
+       } else if (cfg->device_family != IWL_DEVICE_FAMILY_8000) {
+               const u8 *hw_addr = (const u8 *)(nvm_hw + HW_ADDR);
+
+               /* The byte order is little endian 16 bit, meaning 214365 */
+               data->hw_addr[0] = hw_addr[1];
+               data->hw_addr[1] = hw_addr[0];
+               data->hw_addr[2] = hw_addr[3];
+               data->hw_addr[3] = hw_addr[2];
+               data->hw_addr[4] = hw_addr[5];
+               data->hw_addr[5] = hw_addr[4];
+       } else {
+               iwl_set_hw_address_family_8000(trans, cfg, data,
+                                              mac_override, nvm_hw);
+       }
+
+       if (!is_valid_ether_addr(data->hw_addr)) {
+               IWL_ERR(trans, "no valid mac address was found\n");
+               return -EINVAL;
+       }
+
+       return 0;
 }
 
 struct iwl_nvm_data *
-iwl_parse_nvm_data(struct device *dev, const struct iwl_cfg *cfg,
+iwl_parse_nvm_data(struct iwl_trans *trans, const struct iwl_cfg *cfg,
                   const __le16 *nvm_hw, const __le16 *nvm_sw,
                   const __le16 *nvm_calib, const __le16 *regulatory,
                   const __le16 *mac_override, const __le16 *phy_sku,
-                  u8 tx_chains, u8 rx_chains, bool lar_fw_supported,
-                  __le32 mac_addr0, __le32 mac_addr1)
+                  u8 tx_chains, u8 rx_chains, bool lar_fw_supported)
 {
+       struct device *dev = trans->dev;
        struct iwl_nvm_data *data;
-       u32 sku;
-       u32 radio_cfg;
+       bool lar_enabled;
+       u32 sku, radio_cfg;
        u16 lar_config;
+       const __le16 *ch_section;
 
        if (cfg->device_family != IWL_DEVICE_FAMILY_8000)
                data = kzalloc(sizeof(*data) +
@@ -645,21 +686,16 @@ iwl_parse_nvm_data(struct device *dev, const struct iwl_cfg *cfg,
        if (cfg->device_family != IWL_DEVICE_FAMILY_8000) {
                /* Checking for required sections */
                if (!nvm_calib) {
-                       IWL_ERR_DEV(dev,
-                                   "Can't parse empty Calib NVM sections\n");
+                       IWL_ERR(trans,
+                               "Can't parse empty Calib NVM sections\n");
                        kfree(data);
                        return NULL;
                }
                /* in family 8000 Xtal calibration values moved to OTP */
                data->xtal_calib[0] = *(nvm_calib + XTAL_CALIB);
                data->xtal_calib[1] = *(nvm_calib + XTAL_CALIB + 1);
-       }
-
-       if (cfg->device_family != IWL_DEVICE_FAMILY_8000) {
-               iwl_set_hw_address(cfg, data, nvm_hw);
-
-               iwl_init_sbands(dev, cfg, data, nvm_sw,
-                               tx_chains, rx_chains, lar_fw_supported);
+               lar_enabled = true;
+               ch_section = nvm_sw;
        } else {
                u16 lar_offset = data->nvm_version < 0xE39 ?
                                 NVM_LAR_OFFSET_FAMILY_8000_OLD :
@@ -668,16 +704,18 @@ iwl_parse_nvm_data(struct device *dev, const struct iwl_cfg *cfg,
                lar_config = le16_to_cpup(regulatory + lar_offset);
                data->lar_enabled = !!(lar_config &
                                       NVM_LAR_ENABLED_FAMILY_8000);
+               lar_enabled = data->lar_enabled;
+               ch_section = regulatory;
+       }
 
-               /* MAC address in family 8000 */
-               iwl_set_hw_address_family_8000(dev, cfg, data, mac_override,
-                                              nvm_hw, mac_addr0, mac_addr1);
-
-               iwl_init_sbands(dev, cfg, data, regulatory,
-                               tx_chains, rx_chains,
-                               lar_fw_supported && data->lar_enabled);
+       /* If no valid mac address was found - bail out */
+       if (iwl_set_hw_address(trans, cfg, data, nvm_hw, mac_override)) {
+               kfree(data);
+               return NULL;
        }
 
+       iwl_init_sbands(dev, cfg, data, ch_section, tx_chains, rx_chains,
+                       lar_fw_supported && lar_enabled);
        data->calib_version = 255;
 
        return data;
index 4e8e0dc..d704d52 100644 (file)
  * later with iwl_free_nvm_data().
  */
 struct iwl_nvm_data *
-iwl_parse_nvm_data(struct device *dev, const struct iwl_cfg *cfg,
+iwl_parse_nvm_data(struct iwl_trans *trans, const struct iwl_cfg *cfg,
                   const __le16 *nvm_hw, const __le16 *nvm_sw,
                   const __le16 *nvm_calib, const __le16 *regulatory,
                   const __le16 *mac_override, const __le16 *phy_sku,
-                  u8 tx_chains, u8 rx_chains, bool lar_fw_supported,
-                  __le32 mac_addr0, __le32 mac_addr1);
+                  u8 tx_chains, u8 rx_chains, bool lar_fw_supported);
 
 /**
  * iwl_parse_mcc_info - parse MCC (mobile country code) info coming from FW
index 56e6b0b..a43b392 100644 (file)
 #include "debugfs.h"
 #include "iwl-fw-error-dump.h"
 
+static ssize_t iwl_dbgfs_ctdp_budget_read(struct file *file,
+                                         char __user *user_buf,
+                                         size_t count, loff_t *ppos)
+{
+       struct iwl_mvm *mvm = file->private_data;
+       char buf[16];
+       int pos, budget;
+
+       if (!mvm->ucode_loaded || mvm->cur_ucode != IWL_UCODE_REGULAR)
+               return -EIO;
+
+       mutex_lock(&mvm->mutex);
+       budget = iwl_mvm_ctdp_command(mvm, CTDP_CMD_OPERATION_REPORT, 0);
+       mutex_unlock(&mvm->mutex);
+
+       if (budget < 0)
+               return budget;
+
+       pos = scnprintf(buf, sizeof(buf), "%d\n", budget);
+
+       return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
+}
+
+static ssize_t iwl_dbgfs_stop_ctdp_write(struct iwl_mvm *mvm, char *buf,
+                                        size_t count, loff_t *ppos)
+{
+       int ret;
+
+       if (!mvm->ucode_loaded || mvm->cur_ucode != IWL_UCODE_REGULAR)
+               return -EIO;
+
+       mutex_lock(&mvm->mutex);
+       ret = iwl_mvm_ctdp_command(mvm, CTDP_CMD_OPERATION_STOP, 0);
+       mutex_unlock(&mvm->mutex);
+
+       return ret ?: count;
+}
+
 static ssize_t iwl_dbgfs_tx_flush_write(struct iwl_mvm *mvm, char *buf,
                                        size_t count, loff_t *ppos)
 {
@@ -1493,6 +1531,8 @@ iwl_dbgfs_send_echo_cmd_write(struct iwl_mvm *mvm, char *buf,
 MVM_DEBUGFS_READ_WRITE_FILE_OPS(prph_reg, 64);
 
 /* Device wide debugfs entries */
+MVM_DEBUGFS_READ_FILE_OPS(ctdp_budget);
+MVM_DEBUGFS_WRITE_FILE_OPS(stop_ctdp, 8);
 MVM_DEBUGFS_WRITE_FILE_OPS(tx_flush, 16);
 MVM_DEBUGFS_WRITE_FILE_OPS(sta_drain, 8);
 MVM_DEBUGFS_WRITE_FILE_OPS(send_echo_cmd, 8);
@@ -1542,6 +1582,8 @@ int iwl_mvm_dbgfs_register(struct iwl_mvm *mvm, struct dentry *dbgfs_dir)
        MVM_DEBUGFS_ADD_FILE(set_nic_temperature, mvm->debugfs_dir,
                             S_IWUSR | S_IRUSR);
        MVM_DEBUGFS_ADD_FILE(nic_temp, dbgfs_dir, S_IRUSR);
+       MVM_DEBUGFS_ADD_FILE(ctdp_budget, dbgfs_dir, S_IRUSR);
+       MVM_DEBUGFS_ADD_FILE(stop_ctdp, dbgfs_dir, S_IWUSR);
        MVM_DEBUGFS_ADD_FILE(stations, dbgfs_dir, S_IRUSR);
        MVM_DEBUGFS_ADD_FILE(bt_notif, dbgfs_dir, S_IRUSR);
        MVM_DEBUGFS_ADD_FILE(bt_cmd, dbgfs_dir, S_IRUSR);
index eb9b870..7a16e55 100644 (file)
@@ -264,9 +264,8 @@ enum iwl_rx_mpdu_mac_flags2 {
 };
 
 enum iwl_rx_mpdu_amsdu_info {
-       IWL_RX_MPDU_AMSDU_SUBFRAME_IDX_MASK     = 0x3f,
-       IWL_RX_MPDU_AMSDU_LAST_SUBFRAME         = 0x40,
-       /* 0x80 bit reserved for now */
+       IWL_RX_MPDU_AMSDU_SUBFRAME_IDX_MASK     = 0x7f,
+       IWL_RX_MPDU_AMSDU_LAST_SUBFRAME         = 0x80,
 };
 
 enum iwl_rx_l3l4_flags {
index ec6b072..76e649c 100644 (file)
@@ -610,8 +610,6 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm)
                        IWL_UCODE_TLV_CAPA_WFA_TPC_REP_IE_SUPPORT))
                hw->wiphy->features |= NL80211_FEATURE_WFA_TPC_IE_IN_PROBES;
 
-       wiphy_ext_feature_set(hw->wiphy, NL80211_EXT_FEATURE_RRM);
-
        mvm->rts_threshold = IEEE80211_MAX_RTS_THRESHOLD;
 
 #ifdef CONFIG_PM_SLEEP
@@ -2556,10 +2554,8 @@ static void iwl_mvm_mac_mgd_prepare_tx(struct ieee80211_hw *hw,
                                      struct ieee80211_vif *vif)
 {
        struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
-       u32 duration = min(IWL_MVM_TE_SESSION_PROTECTION_MAX_TIME_MS,
-                          200 + vif->bss_conf.beacon_int);
-       u32 min_duration = min(IWL_MVM_TE_SESSION_PROTECTION_MIN_TIME_MS,
-                              100 + vif->bss_conf.beacon_int);
+       u32 duration = IWL_MVM_TE_SESSION_PROTECTION_MAX_TIME_MS;
+       u32 min_duration = IWL_MVM_TE_SESSION_PROTECTION_MIN_TIME_MS;
 
        if (WARN_ON_ONCE(vif->bss_conf.assoc))
                return;
@@ -2690,8 +2686,12 @@ static int iwl_mvm_mac_set_key(struct ieee80211_hw *hw,
                         * GTK on AP interface is a TX-only key, return 0;
                         * on IBSS they're per-station and because we're lazy
                         * we don't support them for RX, so do the same.
+                        * CMAC in AP/IBSS modes must be done in software.
                         */
-                       ret = 0;
+                       if (key->cipher == WLAN_CIPHER_SUITE_AES_CMAC)
+                               ret = -EOPNOTSUPP;
+                       else
+                               ret = 0;
                        key->hw_key_idx = STA_KEY_IDX_INVALID;
                        break;
                }
index ab410b4..9abbc93 100644 (file)
@@ -543,8 +543,8 @@ struct iwl_mvm_thermal_device {
 };
 
 /*
- * iwl_mvm_cooling_device
- * @cur_state: current state in milliwatts
+ * struct iwl_mvm_cooling_device
+ * @cur_state: current state
  * @cdev: struct thermal cooling device
  */
 struct iwl_mvm_cooling_device {
@@ -1575,7 +1575,6 @@ void iwl_mvm_set_hw_ctkill_state(struct iwl_mvm *mvm, bool state);
 int iwl_mvm_get_temp(struct iwl_mvm *mvm, s32 *temp);
 void iwl_mvm_ct_kill_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb);
 int iwl_mvm_send_temp_report_ths_cmd(struct iwl_mvm *mvm);
-int iwl_mvm_cooling_device_register(struct iwl_mvm *mvm);
 int iwl_mvm_ctdp_command(struct iwl_mvm *mvm, u32 op, u32 budget);
 
 /* Location Aware Regulatory */
index c446e0d..25a9840 100644 (file)
@@ -300,7 +300,6 @@ iwl_parse_nvm_sections(struct iwl_mvm *mvm)
        struct iwl_nvm_section *sections = mvm->nvm_sections;
        const __le16 *hw, *sw, *calib, *regulatory, *mac_override, *phy_sku;
        bool lar_enabled;
-       __le32 mac_addr0, mac_addr1;
 
        /* Checking for required sections */
        if (mvm->trans->cfg->device_family != IWL_DEVICE_FAMILY_8000) {
@@ -336,12 +335,6 @@ iwl_parse_nvm_sections(struct iwl_mvm *mvm)
        if (WARN_ON(!mvm->cfg))
                return NULL;
 
-       /* read the mac address from WFMP registers */
-       mac_addr0 = cpu_to_le32(iwl_trans_read_prph(mvm->trans,
-                                                   WFMP_MAC_ADDR_0));
-       mac_addr1 = cpu_to_le32(iwl_trans_read_prph(mvm->trans,
-                                                   WFMP_MAC_ADDR_1));
-
        hw = (const __le16 *)sections[mvm->cfg->nvm_hw_section_num].data;
        sw = (const __le16 *)sections[NVM_SECTION_TYPE_SW].data;
        calib = (const __le16 *)sections[NVM_SECTION_TYPE_CALIBRATION].data;
@@ -354,10 +347,10 @@ iwl_parse_nvm_sections(struct iwl_mvm *mvm)
                      fw_has_capa(&mvm->fw->ucode_capa,
                                  IWL_UCODE_TLV_CAPA_LAR_SUPPORT);
 
-       return iwl_parse_nvm_data(mvm->trans->dev, mvm->cfg, hw, sw, calib,
+       return iwl_parse_nvm_data(mvm->trans, mvm->cfg, hw, sw, calib,
                                  regulatory, mac_override, phy_sku,
                                  mvm->fw->valid_tx_ant, mvm->fw->valid_rx_ant,
-                                 lar_enabled, mac_addr0, mac_addr1);
+                                 lar_enabled);
 }
 
 #define MAX_NVM_FILE_LEN       16384
index 699a808..5e8ab79 100644 (file)
@@ -205,79 +205,107 @@ static void iwl_mvm_nic_config(struct iwl_op_mode *op_mode)
                                       ~APMG_PS_CTRL_EARLY_PWR_OFF_RESET_DIS);
 }
 
+/**
+ * enum iwl_rx_handler_context context for Rx handler
+ * @RX_HANDLER_SYNC : this means that it will be called in the Rx path
+ *     which can't acquire mvm->mutex.
+ * @RX_HANDLER_ASYNC_LOCKED : If the handler needs to hold mvm->mutex
+ *     (and only in this case!), it should be set as ASYNC. In that case,
+ *     it will be called from a worker with mvm->mutex held.
+ * @RX_HANDLER_ASYNC_UNLOCKED : in case the handler needs to lock the
+ *     mutex itself, it will be called from a worker without mvm->mutex held.
+ */
+enum iwl_rx_handler_context {
+       RX_HANDLER_SYNC,
+       RX_HANDLER_ASYNC_LOCKED,
+       RX_HANDLER_ASYNC_UNLOCKED,
+};
+
+/**
+ * struct iwl_rx_handlers handler for FW notification
+ * @cmd_id: command id
+ * @context: see &iwl_rx_handler_context
+ * @fn: the function is called when notification is received
+ */
 struct iwl_rx_handlers {
        u16 cmd_id;
-       bool async;
+       enum iwl_rx_handler_context context;
        void (*fn)(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb);
 };
 
-#define RX_HANDLER(_cmd_id, _fn, _async)       \
-       { .cmd_id = _cmd_id , .fn = _fn , .async = _async }
-#define RX_HANDLER_GRP(_grp, _cmd, _fn, _async)        \
-       { .cmd_id = WIDE_ID(_grp, _cmd), .fn = _fn, .async = _async }
+#define RX_HANDLER(_cmd_id, _fn, _context)     \
+       { .cmd_id = _cmd_id, .fn = _fn, .context = _context }
+#define RX_HANDLER_GRP(_grp, _cmd, _fn, _context)      \
+       { .cmd_id = WIDE_ID(_grp, _cmd), .fn = _fn, .context = _context }
 
 /*
  * Handlers for fw notifications
  * Convention: RX_HANDLER(CMD_NAME, iwl_mvm_rx_CMD_NAME
  * This list should be in order of frequency for performance purposes.
  *
- * The handler can be SYNC - this means that it will be called in the Rx path
- * which can't acquire mvm->mutex. If the handler needs to hold mvm->mutex (and
- * only in this case!), it should be set as ASYNC. In that case, it will be
- * called from a worker with mvm->mutex held.
+ * The handler can be one from three contexts, see &iwl_rx_handler_context
  */
 static const struct iwl_rx_handlers iwl_mvm_rx_handlers[] = {
-       RX_HANDLER(TX_CMD, iwl_mvm_rx_tx_cmd, false),
-       RX_HANDLER(BA_NOTIF, iwl_mvm_rx_ba_notif, false),
-
-       RX_HANDLER(BT_PROFILE_NOTIFICATION, iwl_mvm_rx_bt_coex_notif, true),
-       RX_HANDLER(BEACON_NOTIFICATION, iwl_mvm_rx_beacon_notif, true),
-       RX_HANDLER(STATISTICS_NOTIFICATION, iwl_mvm_rx_statistics, true),
+       RX_HANDLER(TX_CMD, iwl_mvm_rx_tx_cmd, RX_HANDLER_SYNC),
+       RX_HANDLER(BA_NOTIF, iwl_mvm_rx_ba_notif, RX_HANDLER_SYNC),
+
+       RX_HANDLER(BT_PROFILE_NOTIFICATION, iwl_mvm_rx_bt_coex_notif,
+                  RX_HANDLER_ASYNC_LOCKED),
+       RX_HANDLER(BEACON_NOTIFICATION, iwl_mvm_rx_beacon_notif,
+                  RX_HANDLER_ASYNC_LOCKED),
+       RX_HANDLER(STATISTICS_NOTIFICATION, iwl_mvm_rx_statistics,
+                  RX_HANDLER_ASYNC_LOCKED),
        RX_HANDLER(ANTENNA_COUPLING_NOTIFICATION,
-                  iwl_mvm_rx_ant_coupling_notif, true),
+                  iwl_mvm_rx_ant_coupling_notif, RX_HANDLER_ASYNC_LOCKED),
 
        RX_HANDLER(BA_WINDOW_STATUS_NOTIFICATION_ID,
-                  iwl_mvm_window_status_notif, false),
+                  iwl_mvm_window_status_notif, RX_HANDLER_SYNC),
 
-       RX_HANDLER(TIME_EVENT_NOTIFICATION, iwl_mvm_rx_time_event_notif, false),
-       RX_HANDLER(MCC_CHUB_UPDATE_CMD, iwl_mvm_rx_chub_update_mcc, true),
+       RX_HANDLER(TIME_EVENT_NOTIFICATION, iwl_mvm_rx_time_event_notif,
+                  RX_HANDLER_SYNC),
+       RX_HANDLER(MCC_CHUB_UPDATE_CMD, iwl_mvm_rx_chub_update_mcc,
+                  RX_HANDLER_ASYNC_LOCKED),
 
-       RX_HANDLER(EOSP_NOTIFICATION, iwl_mvm_rx_eosp_notif, false),
+       RX_HANDLER(EOSP_NOTIFICATION, iwl_mvm_rx_eosp_notif, RX_HANDLER_SYNC),
 
        RX_HANDLER(SCAN_ITERATION_COMPLETE,
-                  iwl_mvm_rx_lmac_scan_iter_complete_notif, false),
+                  iwl_mvm_rx_lmac_scan_iter_complete_notif, RX_HANDLER_SYNC),
        RX_HANDLER(SCAN_OFFLOAD_COMPLETE,
-                  iwl_mvm_rx_lmac_scan_complete_notif, true),
+                  iwl_mvm_rx_lmac_scan_complete_notif,
+                  RX_HANDLER_ASYNC_LOCKED),
        RX_HANDLER(MATCH_FOUND_NOTIFICATION, iwl_mvm_rx_scan_match_found,
-                  false),
+                  RX_HANDLER_SYNC),
        RX_HANDLER(SCAN_COMPLETE_UMAC, iwl_mvm_rx_umac_scan_complete_notif,
-                  true),
+                  RX_HANDLER_ASYNC_LOCKED),
        RX_HANDLER(SCAN_ITERATION_COMPLETE_UMAC,
-                  iwl_mvm_rx_umac_scan_iter_complete_notif, false),
+                  iwl_mvm_rx_umac_scan_iter_complete_notif, RX_HANDLER_SYNC),
 
-       RX_HANDLER(CARD_STATE_NOTIFICATION, iwl_mvm_rx_card_state_notif, false),
+       RX_HANDLER(CARD_STATE_NOTIFICATION, iwl_mvm_rx_card_state_notif,
+                  RX_HANDLER_SYNC),
 
        RX_HANDLER(MISSED_BEACONS_NOTIFICATION, iwl_mvm_rx_missed_beacons_notif,
-                  false),
+                  RX_HANDLER_SYNC),
 
-       RX_HANDLER(REPLY_ERROR, iwl_mvm_rx_fw_error, false),
+       RX_HANDLER(REPLY_ERROR, iwl_mvm_rx_fw_error, RX_HANDLER_SYNC),
        RX_HANDLER(PSM_UAPSD_AP_MISBEHAVING_NOTIFICATION,
-                  iwl_mvm_power_uapsd_misbehaving_ap_notif, false),
-       RX_HANDLER(DTS_MEASUREMENT_NOTIFICATION, iwl_mvm_temp_notif, true),
+                  iwl_mvm_power_uapsd_misbehaving_ap_notif, RX_HANDLER_SYNC),
+       RX_HANDLER(DTS_MEASUREMENT_NOTIFICATION, iwl_mvm_temp_notif,
+                  RX_HANDLER_ASYNC_LOCKED),
        RX_HANDLER_GRP(PHY_OPS_GROUP, DTS_MEASUREMENT_NOTIF_WIDE,
-                      iwl_mvm_temp_notif, true),
+                      iwl_mvm_temp_notif, RX_HANDLER_ASYNC_LOCKED),
        RX_HANDLER_GRP(PHY_OPS_GROUP, CT_KILL_NOTIFICATION,
-                      iwl_mvm_ct_kill_notif, false),
+                      iwl_mvm_ct_kill_notif, RX_HANDLER_SYNC),
 
        RX_HANDLER(TDLS_CHANNEL_SWITCH_NOTIFICATION, iwl_mvm_rx_tdls_notif,
-                  true),
-       RX_HANDLER(MFUART_LOAD_NOTIFICATION, iwl_mvm_rx_mfuart_notif, false),
-       RX_HANDLER(TOF_NOTIFICATION, iwl_mvm_tof_resp_handler, true),
+                  RX_HANDLER_ASYNC_LOCKED),
+       RX_HANDLER(MFUART_LOAD_NOTIFICATION, iwl_mvm_rx_mfuart_notif,
+                  RX_HANDLER_SYNC),
+       RX_HANDLER(TOF_NOTIFICATION, iwl_mvm_tof_resp_handler,
+                  RX_HANDLER_ASYNC_LOCKED),
        RX_HANDLER_GRP(PROT_OFFLOAD_GROUP, STORED_BEACON_NTF,
-                      iwl_mvm_rx_stored_beacon_notif, false),
+                      iwl_mvm_rx_stored_beacon_notif, RX_HANDLER_SYNC),
        RX_HANDLER_GRP(DATA_PATH_GROUP, MU_GROUP_MGMT_NOTIF,
-                      iwl_mvm_mu_mimo_grp_notif, false),
-
+                      iwl_mvm_mu_mimo_grp_notif, RX_HANDLER_SYNC),
 };
 #undef RX_HANDLER
 #undef RX_HANDLER_GRP
@@ -611,9 +639,6 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg,
        IWL_INFO(mvm, "Detected %s, REV=0x%X\n",
                 mvm->cfg->name, mvm->trans->hw_rev);
 
-       min_backoff = calc_min_backoff(trans, cfg);
-       iwl_mvm_thermal_initialize(mvm, min_backoff);
-
        if (iwlwifi_mod_params.nvm_file)
                mvm->nvm_file_name = iwlwifi_mod_params.nvm_file;
        else
@@ -666,6 +691,9 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg,
        if (err)
                goto out_free;
 
+       min_backoff = calc_min_backoff(trans, cfg);
+       iwl_mvm_thermal_initialize(mvm, min_backoff);
+
        err = iwl_mvm_dbgfs_register(mvm, dbgfs_dir);
        if (err)
                goto out_unregister;
@@ -743,6 +771,7 @@ static void iwl_op_mode_mvm_stop(struct iwl_op_mode *op_mode)
 struct iwl_async_handler_entry {
        struct list_head list;
        struct iwl_rx_cmd_buffer rxb;
+       enum iwl_rx_handler_context context;
        void (*fn)(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb);
 };
 
@@ -769,7 +798,6 @@ static void iwl_mvm_async_handlers_wk(struct work_struct *wk)
        INIT_LIST_HEAD(&local_list);
 
        /* Ensure that we are not in stop flow (check iwl_mvm_mac_stop) */
-       mutex_lock(&mvm->mutex);
 
        /*
         * Sync with Rx path with a lock. Remove all the entries from this list,
@@ -780,12 +808,15 @@ static void iwl_mvm_async_handlers_wk(struct work_struct *wk)
        spin_unlock_bh(&mvm->async_handlers_lock);
 
        list_for_each_entry_safe(entry, tmp, &local_list, list) {
+               if (entry->context == RX_HANDLER_ASYNC_LOCKED)
+                       mutex_lock(&mvm->mutex);
                entry->fn(mvm, &entry->rxb);
                iwl_free_rxb(&entry->rxb);
                list_del(&entry->list);
+               if (entry->context == RX_HANDLER_ASYNC_LOCKED)
+                       mutex_unlock(&mvm->mutex);
                kfree(entry);
        }
-       mutex_unlock(&mvm->mutex);
 }
 
 static inline void iwl_mvm_rx_check_trigger(struct iwl_mvm *mvm,
@@ -842,7 +873,7 @@ static void iwl_mvm_rx_common(struct iwl_mvm *mvm,
                if (rx_h->cmd_id != WIDE_ID(pkt->hdr.group_id, pkt->hdr.cmd))
                        continue;
 
-               if (!rx_h->async) {
+               if (rx_h->context == RX_HANDLER_SYNC) {
                        rx_h->fn(mvm, rxb);
                        return;
                }
@@ -856,6 +887,7 @@ static void iwl_mvm_rx_common(struct iwl_mvm *mvm,
                entry->rxb._offset = rxb->_offset;
                entry->rxb._rx_page_order = rxb->_rx_page_order;
                entry->fn = rx_h->fn;
+               entry->context = rx_h->context;
                spin_lock(&mvm->async_handlers_lock);
                list_add_tail(&entry->list, &mvm->async_handlers_list);
                spin_unlock(&mvm->async_handlers_lock);
index cd6ca37..9a54f2d 100644 (file)
@@ -519,6 +519,18 @@ void iwl_mvm_rx_mpdu_mq(struct iwl_mvm *mvm, struct napi_struct *napi,
                        rcu_read_unlock();
                        return;
                }
+
+               /*
+                * Our hardware de-aggregates AMSDUs but copies the mac header
+                * as it to the de-aggregated MPDUs. We need to turn off the
+                * AMSDU bit in the QoS control ourselves.
+                */
+               if ((desc->mac_flags2 & IWL_RX_MPDU_MFLG2_AMSDU) &&
+                   !WARN_ON(!ieee80211_is_data_qos(hdr->frame_control))) {
+                       u8 *qc = ieee80211_get_qos_ctl(hdr);
+
+                       *qc &= ~IEEE80211_QOS_CTL_A_MSDU_PRESENT;
+               }
        }
 
        /*
index 924dd6a..2c12789 100644 (file)
@@ -371,20 +371,13 @@ static int iwl_mvm_aux_roc_te_handle_notif(struct iwl_mvm *mvm,
 
        iwl_mvm_te_check_trigger(mvm, notif, te_data);
 
-       if (!le32_to_cpu(notif->status)) {
-               IWL_DEBUG_TE(mvm,
-                            "ERROR: Aux ROC Time Event %s notification failure\n",
-                            (le32_to_cpu(notif->action) &
-                             TE_V2_NOTIF_HOST_EVENT_START) ? "start" : "end");
-               return -EINVAL;
-       }
-
        IWL_DEBUG_TE(mvm,
-                    "Aux ROC time event notification  - UID = 0x%x action %d\n",
+                    "Aux ROC time event notification  - UID = 0x%x action %d (error = %d)\n",
                     le32_to_cpu(notif->unique_id),
-                    le32_to_cpu(notif->action));
+                    le32_to_cpu(notif->action), le32_to_cpu(notif->status));
 
-       if (le32_to_cpu(notif->action) == TE_V2_NOTIF_HOST_EVENT_END) {
+       if (!le32_to_cpu(notif->status) ||
+           le32_to_cpu(notif->action) == TE_V2_NOTIF_HOST_EVENT_END) {
                /* End TE, notify mac80211 */
                ieee80211_remain_on_channel_expired(mvm->hw);
                iwl_mvm_roc_finished(mvm); /* flush aux queue */
index 99d9a35..3d2e8b6 100644 (file)
  * needed by the driver.
  */
 
-#define IWL_MVM_TE_SESSION_PROTECTION_MAX_TIME_MS 500
+#define IWL_MVM_TE_SESSION_PROTECTION_MAX_TIME_MS 600
 #define IWL_MVM_TE_SESSION_PROTECTION_MIN_TIME_MS 400
 
 /**
index 999bcb8..f1f2825 100644 (file)
@@ -211,10 +211,14 @@ void iwl_mvm_temp_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb)
         * the firmware and hence to take the mutex.
         * Avoid the deadlock by unlocking the mutex here.
         */
-       mutex_unlock(&mvm->mutex);
-       thermal_notify_framework(mvm->tz_device.tzone,
-                                mvm->tz_device.fw_trips_index[ths_crossed]);
-       mutex_lock(&mvm->mutex);
+       if (mvm->tz_device.tzone) {
+               struct iwl_mvm_thermal_device *tz_dev = &mvm->tz_device;
+
+               mutex_unlock(&mvm->mutex);
+               thermal_notify_framework(tz_dev->tzone,
+                                        tz_dev->fw_trips_index[ths_crossed]);
+               mutex_lock(&mvm->mutex);
+       }
 #endif /* CONFIG_THERMAL */
 }
 
@@ -506,6 +510,74 @@ static const struct iwl_tt_params iwl_mvm_default_tt_params = {
        .support_tx_backoff = true,
 };
 
+/* budget in mWatt */
+static const u32 iwl_mvm_cdev_budgets[] = {
+       2000,   /* cooling state 0 */
+       1800,   /* cooling state 1 */
+       1600,   /* cooling state 2 */
+       1400,   /* cooling state 3 */
+       1200,   /* cooling state 4 */
+       1000,   /* cooling state 5 */
+       900,    /* cooling state 6 */
+       800,    /* cooling state 7 */
+       700,    /* cooling state 8 */
+       650,    /* cooling state 9 */
+       600,    /* cooling state 10 */
+       550,    /* cooling state 11 */
+       500,    /* cooling state 12 */
+       450,    /* cooling state 13 */
+       400,    /* cooling state 14 */
+       350,    /* cooling state 15 */
+       300,    /* cooling state 16 */
+       250,    /* cooling state 17 */
+       200,    /* cooling state 18 */
+       150,    /* cooling state 19 */
+};
+
+int iwl_mvm_ctdp_command(struct iwl_mvm *mvm, u32 op, u32 state)
+{
+       struct iwl_mvm_ctdp_cmd cmd = {
+               .operation = cpu_to_le32(op),
+               .budget = cpu_to_le32(iwl_mvm_cdev_budgets[state]),
+               .window_size = 0,
+       };
+       int ret;
+       u32 status;
+
+       lockdep_assert_held(&mvm->mutex);
+
+       ret = iwl_mvm_send_cmd_pdu_status(mvm, WIDE_ID(PHY_OPS_GROUP,
+                                                      CTDP_CONFIG_CMD),
+                                         sizeof(cmd), &cmd, &status);
+
+       if (ret) {
+               IWL_ERR(mvm, "cTDP command failed (err=%d)\n", ret);
+               return ret;
+       }
+
+       switch (op) {
+       case CTDP_CMD_OPERATION_START:
+#ifdef CONFIG_THERMAL
+               mvm->cooling_dev.cur_state = state;
+#endif /* CONFIG_THERMAL */
+               break;
+       case CTDP_CMD_OPERATION_REPORT:
+               IWL_DEBUG_TEMP(mvm, "cTDP avg energy in mWatt = %d\n", status);
+               /* when the function is called with CTDP_CMD_OPERATION_REPORT
+                * option the function should return the average budget value
+                * that is received from the FW.
+                * The budget can't be less or equal to 0, so it's possible
+                * to distinguish between error values and budgets.
+                */
+               return status;
+       case CTDP_CMD_OPERATION_STOP:
+               IWL_DEBUG_TEMP(mvm, "cTDP stopped successfully\n");
+               break;
+       }
+
+       return 0;
+}
+
 #ifdef CONFIG_THERMAL
 static int compare_temps(const void *a, const void *b)
 {
@@ -520,16 +592,20 @@ int iwl_mvm_send_temp_report_ths_cmd(struct iwl_mvm *mvm)
 
        lockdep_assert_held(&mvm->mutex);
 
+       if (!mvm->tz_device.tzone)
+               return -EINVAL;
+
        /* The driver holds array of temperature trips that are unsorted
         * and uncompressed, the FW should get it compressed and sorted
         */
 
        /* compress temp_trips to cmd array, remove uninitialized values*/
-       for (i = 0; i < IWL_MAX_DTS_TRIPS; i++)
+       for (i = 0; i < IWL_MAX_DTS_TRIPS; i++) {
                if (mvm->tz_device.temp_trips[i] != S16_MIN) {
                        cmd.thresholds[idx++] =
                                cpu_to_le16(mvm->tz_device.temp_trips[i]);
                }
+       }
        cmd.num_temps = cpu_to_le32(idx);
 
        if (!idx)
@@ -696,6 +772,7 @@ static void iwl_mvm_thermal_zone_register(struct iwl_mvm *mvm)
                IWL_DEBUG_TEMP(mvm,
                               "Failed to register to thermal zone (err = %ld)\n",
                               PTR_ERR(mvm->tz_device.tzone));
+               mvm->tz_device.tzone = NULL;
                return;
        }
 
@@ -706,59 +783,6 @@ static void iwl_mvm_thermal_zone_register(struct iwl_mvm *mvm)
                mvm->tz_device.temp_trips[i] = S16_MIN;
 }
 
-static const u32 iwl_mvm_cdev_budgets[] = {
-       2000,   /* cooling state 0 */
-       1800,   /* cooling state 1 */
-       1600,   /* cooling state 2 */
-       1400,   /* cooling state 3 */
-       1200,   /* cooling state 4 */
-       1000,   /* cooling state 5 */
-       900,    /* cooling state 6 */
-       800,    /* cooling state 7 */
-       700,    /* cooling state 8 */
-       650,    /* cooling state 9 */
-       600,    /* cooling state 10 */
-       550,    /* cooling state 11 */
-       500,    /* cooling state 12 */
-       450,    /* cooling state 13 */
-       400,    /* cooling state 14 */
-       350,    /* cooling state 15 */
-       300,    /* cooling state 16 */
-       250,    /* cooling state 17 */
-       200,    /* cooling state 18 */
-       150,    /* cooling state 19 */
-};
-
-int iwl_mvm_ctdp_command(struct iwl_mvm *mvm, u32 op, u32 budget)
-{
-       struct iwl_mvm_ctdp_cmd cmd = {
-               .operation = cpu_to_le32(op),
-               .budget = cpu_to_le32(budget),
-               .window_size = 0,
-       };
-       int ret;
-       u32 status;
-
-       lockdep_assert_held(&mvm->mutex);
-
-       ret = iwl_mvm_send_cmd_pdu_status(mvm, WIDE_ID(PHY_OPS_GROUP,
-                                                      CTDP_CONFIG_CMD),
-                                         sizeof(cmd), &cmd, &status);
-
-       if (ret) {
-               IWL_ERR(mvm, "cTDP command failed (err=%d)\n", ret);
-               return ret;
-       }
-
-       if (op == CTDP_CMD_OPERATION_START)
-               mvm->cooling_dev.cur_state = budget;
-
-       else if (op == CTDP_CMD_OPERATION_REPORT)
-               IWL_DEBUG_TEMP(mvm, "cTDP avg energy in mWatt = %d\n", status);
-
-       return 0;
-}
-
 static int iwl_mvm_tcool_get_max_state(struct thermal_cooling_device *cdev,
                                       unsigned long *state)
 {
@@ -776,6 +800,7 @@ static int iwl_mvm_tcool_get_cur_state(struct thermal_cooling_device *cdev,
                return -EBUSY;
 
        *state = mvm->cooling_dev.cur_state;
+
        return 0;
 }
 
@@ -799,7 +824,7 @@ static int iwl_mvm_tcool_set_cur_state(struct thermal_cooling_device *cdev,
        }
 
        ret = iwl_mvm_ctdp_command(mvm, CTDP_CMD_OPERATION_START,
-                                  iwl_mvm_cdev_budgets[new_state]);
+                                  new_state);
 
 unlock:
        mutex_unlock(&mvm->mutex);
@@ -812,15 +837,12 @@ static struct thermal_cooling_device_ops tcooling_ops = {
        .set_cur_state = iwl_mvm_tcool_set_cur_state,
 };
 
-int iwl_mvm_cooling_device_register(struct iwl_mvm *mvm)
+static void iwl_mvm_cooling_device_register(struct iwl_mvm *mvm)
 {
        char name[] = "iwlwifi";
 
-       if (!iwl_mvm_is_ctdp_supported(mvm)) {
-               mvm->cooling_dev.cdev = NULL;
-
-               return 0;
-       }
+       if (!iwl_mvm_is_ctdp_supported(mvm))
+               return;
 
        BUILD_BUG_ON(ARRAY_SIZE(name) >= THERMAL_NAME_LENGTH);
 
@@ -833,34 +855,29 @@ int iwl_mvm_cooling_device_register(struct iwl_mvm *mvm)
                IWL_DEBUG_TEMP(mvm,
                               "Failed to register to cooling device (err = %ld)\n",
                               PTR_ERR(mvm->cooling_dev.cdev));
-               return PTR_ERR(mvm->cooling_dev.cdev);
+               mvm->cooling_dev.cdev = NULL;
+               return;
        }
-
-       return 0;
 }
 
 static void iwl_mvm_thermal_zone_unregister(struct iwl_mvm *mvm)
 {
-       if (!iwl_mvm_is_tt_in_fw(mvm))
+       if (!iwl_mvm_is_tt_in_fw(mvm) || !mvm->tz_device.tzone)
                return;
 
-       if (mvm->tz_device.tzone) {
-               IWL_DEBUG_TEMP(mvm, "Thermal zone device unregister\n");
-               thermal_zone_device_unregister(mvm->tz_device.tzone);
-               mvm->tz_device.tzone = NULL;
-       }
+       IWL_DEBUG_TEMP(mvm, "Thermal zone device unregister\n");
+       thermal_zone_device_unregister(mvm->tz_device.tzone);
+       mvm->tz_device.tzone = NULL;
 }
 
 static void iwl_mvm_cooling_device_unregister(struct iwl_mvm *mvm)
 {
-       if (!iwl_mvm_is_ctdp_supported(mvm))
+       if (!iwl_mvm_is_ctdp_supported(mvm) || !mvm->cooling_dev.cdev)
                return;
 
-       if (mvm->cooling_dev.cdev) {
-               IWL_DEBUG_TEMP(mvm, "Cooling device unregister\n");
-               thermal_cooling_device_unregister(mvm->cooling_dev.cdev);
-               mvm->cooling_dev.cdev = NULL;
-       }
+       IWL_DEBUG_TEMP(mvm, "Cooling device unregister\n");
+       thermal_cooling_device_unregister(mvm->cooling_dev.cdev);
+       mvm->cooling_dev.cdev = NULL;
 }
 #endif /* CONFIG_THERMAL */
 
index 271e8da..75870e6 100644 (file)
@@ -7,6 +7,7 @@
  *
  * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
  * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
+ * Copyright(c) 2016        Intel Deutschland GmbH
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of version 2 of the GNU General Public License as
@@ -963,6 +964,7 @@ static void iwl_mvm_rx_tx_cmd_single(struct iwl_mvm *mvm,
        struct sk_buff_head skbs;
        u8 skb_freed = 0;
        u16 next_reclaimed, seq_ctl;
+       bool is_ndp = false;
 
        __skb_queue_head_init(&skbs);
 
@@ -1016,6 +1018,20 @@ static void iwl_mvm_rx_tx_cmd_single(struct iwl_mvm *mvm,
                        seq_ctl = le16_to_cpu(hdr->seq_ctrl);
                }
 
+               if (unlikely(!seq_ctl)) {
+                       struct ieee80211_hdr *hdr = (void *)skb->data;
+
+                       /*
+                        * If it is an NDP, we can't update next_reclaim since
+                        * its sequence control is 0. Note that for that same
+                        * reason, NDPs are never sent to A-MPDU'able queues
+                        * so that we can never have more than one freed frame
+                        * for a single Tx resonse (see WARN_ON below).
+                        */
+                       if (ieee80211_is_qos_nullfunc(hdr->frame_control))
+                               is_ndp = true;
+               }
+
                /*
                 * TODO: this is not accurate if we are freeing more than one
                 * packet.
@@ -1079,9 +1095,16 @@ static void iwl_mvm_rx_tx_cmd_single(struct iwl_mvm *mvm,
                        bool send_eosp_ndp = false;
 
                        spin_lock_bh(&mvmsta->lock);
-                       tid_data->next_reclaimed = next_reclaimed;
-                       IWL_DEBUG_TX_REPLY(mvm, "Next reclaimed packet:%d\n",
-                                          next_reclaimed);
+                       if (!is_ndp) {
+                               tid_data->next_reclaimed = next_reclaimed;
+                               IWL_DEBUG_TX_REPLY(mvm,
+                                                  "Next reclaimed packet:%d\n",
+                                                  next_reclaimed);
+                       } else {
+                               IWL_DEBUG_TX_REPLY(mvm,
+                                                  "NDP - don't update next_reclaimed\n");
+                       }
+
                        iwl_mvm_check_ratid_empty(mvm, sta, tid);
 
                        if (mvmsta->sleep_tx_count) {
index 59453c1..53cdc57 100644 (file)
@@ -376,8 +376,8 @@ struct iwl_error_event_table_v1 {
 struct iwl_error_event_table {
        u32 valid;              /* (nonzero) valid, (0) log is empty */
        u32 error_id;           /* type of error */
-       u32 pc;                 /* program counter */
-       u32 blink1;             /* branch link */
+       u32 trm_hw_status0;     /* TRM HW status */
+       u32 trm_hw_status1;     /* TRM HW status */
        u32 blink2;             /* branch link */
        u32 ilink1;             /* interrupt link */
        u32 ilink2;             /* interrupt link */
@@ -389,7 +389,7 @@ struct iwl_error_event_table {
        u32 tsf_hi;             /* network timestamp function timer */
        u32 gp1;                /* GP1 timer register */
        u32 gp2;                /* GP2 timer register */
-       u32 gp3;                /* GP3 timer register */
+       u32 fw_rev_type;        /* firmware revision type */
        u32 major;              /* uCode version major */
        u32 minor;              /* uCode version minor */
        u32 hw_ver;             /* HW Silicon version */
@@ -408,7 +408,7 @@ struct iwl_error_event_table {
                                 * time_flag */
        u32 isr4;               /* isr status register LMPM_NIC_ISR4:
                                 * wico interrupt */
-       u32 isr_pref;           /* isr status register LMPM_NIC_PREF_STAT */
+       u32 last_cmd_id;        /* last HCMD id handled by the firmware */
        u32 wait_event;         /* wait event() caller address */
        u32 l2p_control;        /* L2pControlField */
        u32 l2p_duration;       /* L2pDurationField */
@@ -419,7 +419,7 @@ struct iwl_error_event_table {
        u32 u_timestamp;        /* indicate when the date and time of the
                                 * compilation */
        u32 flow_handler;       /* FH read/write pointers, RX credit */
-} __packed /* LOG_ERROR_TABLE_API_S_VER_2 */;
+} __packed /* LOG_ERROR_TABLE_API_S_VER_3 */;
 
 /*
  * UMAC error struct - relevant starting from family 8000 chip.
@@ -529,9 +529,9 @@ static void iwl_mvm_dump_nic_error_log_old(struct iwl_mvm *mvm)
 
        trace_iwlwifi_dev_ucode_error(trans->dev, table.error_id, table.tsf_low,
                                      table.data1, table.data2, table.data3,
-                                     table.blink1, table.blink2, table.ilink1,
-                                     table.ilink2, table.bcon_time, table.gp1,
-                                     table.gp2, table.gp3, table.ucode_ver, 0,
+                                     table.blink2, table.ilink1, table.ilink2,
+                                     table.bcon_time, table.gp1, table.gp2,
+                                     table.gp3, table.ucode_ver, 0,
                                      table.hw_ver, table.brd_ver);
        IWL_ERR(mvm, "0x%08X | %-28s\n", table.error_id,
                desc_lookup(table.error_id));
@@ -615,14 +615,14 @@ void iwl_mvm_dump_nic_error_log(struct iwl_mvm *mvm)
 
        trace_iwlwifi_dev_ucode_error(trans->dev, table.error_id, table.tsf_low,
                                      table.data1, table.data2, table.data3,
-                                     table.blink1, table.blink2, table.ilink1,
+                                     table.blink2, table.ilink1,
                                      table.ilink2, table.bcon_time, table.gp1,
-                                     table.gp2, table.gp3, table.major,
+                                     table.gp2, table.fw_rev_type, table.major,
                                      table.minor, table.hw_ver, table.brd_ver);
        IWL_ERR(mvm, "0x%08X | %-28s\n", table.error_id,
                desc_lookup(table.error_id));
-       IWL_ERR(mvm, "0x%08X | uPc\n", table.pc);
-       IWL_ERR(mvm, "0x%08X | branchlink1\n", table.blink1);
+       IWL_ERR(mvm, "0x%08X | trm_hw_status0\n", table.trm_hw_status0);
+       IWL_ERR(mvm, "0x%08X | trm_hw_status1\n", table.trm_hw_status1);
        IWL_ERR(mvm, "0x%08X | branchlink2\n", table.blink2);
        IWL_ERR(mvm, "0x%08X | interruptlink1\n", table.ilink1);
        IWL_ERR(mvm, "0x%08X | interruptlink2\n", table.ilink2);
@@ -634,7 +634,7 @@ void iwl_mvm_dump_nic_error_log(struct iwl_mvm *mvm)
        IWL_ERR(mvm, "0x%08X | tsf hi\n", table.tsf_hi);
        IWL_ERR(mvm, "0x%08X | time gp1\n", table.gp1);
        IWL_ERR(mvm, "0x%08X | time gp2\n", table.gp2);
-       IWL_ERR(mvm, "0x%08X | time gp3\n", table.gp3);
+       IWL_ERR(mvm, "0x%08X | uCode revision type\n", table.fw_rev_type);
        IWL_ERR(mvm, "0x%08X | uCode version major\n", table.major);
        IWL_ERR(mvm, "0x%08X | uCode version minor\n", table.minor);
        IWL_ERR(mvm, "0x%08X | hw version\n", table.hw_ver);
@@ -645,7 +645,7 @@ void iwl_mvm_dump_nic_error_log(struct iwl_mvm *mvm)
        IWL_ERR(mvm, "0x%08X | isr2\n", table.isr2);
        IWL_ERR(mvm, "0x%08X | isr3\n", table.isr3);
        IWL_ERR(mvm, "0x%08X | isr4\n", table.isr4);
-       IWL_ERR(mvm, "0x%08X | isr_pref\n", table.isr_pref);
+       IWL_ERR(mvm, "0x%08X | last cmd Id\n", table.last_cmd_id);
        IWL_ERR(mvm, "0x%08X | wait_event\n", table.wait_event);
        IWL_ERR(mvm, "0x%08X | l2p_control\n", table.l2p_control);
        IWL_ERR(mvm, "0x%08X | l2p_duration\n", table.l2p_duration);
index d33b6ba..05b9685 100644 (file)
@@ -631,13 +631,31 @@ static int iwl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
 
        /* if RTPM is in use, enable it in our device */
        if (iwl_trans->runtime_pm_mode != IWL_PLAT_PM_MODE_DISABLED) {
+               /* We explicitly set the device to active here to
+                * clear contingent errors.
+                */
                pm_runtime_set_active(&pdev->dev);
+
                pm_runtime_set_autosuspend_delay(&pdev->dev,
                                         iwlwifi_mod_params.d0i3_entry_delay);
                pm_runtime_use_autosuspend(&pdev->dev);
+
+               /* We are not supposed to call pm_runtime_allow() by
+                * ourselves, but let userspace enable runtime PM via
+                * sysfs.  However, since we don't enable this from
+                * userspace yet, we need to allow/forbid() ourselves.
+               */
                pm_runtime_allow(&pdev->dev);
        }
 
+       /* The PCI device starts with a reference taken and we are
+        * supposed to release it here.  But to simplify the
+        * interaction with the opmode, we don't do it now, but let
+        * the opmode release it when it's ready.  To account for this
+        * reference, we start with ref_count set to 1.
+        */
+       trans_pcie->ref_count = 1;
+
        return 0;
 
 out_free_drv:
@@ -652,7 +670,17 @@ static void iwl_pci_remove(struct pci_dev *pdev)
        struct iwl_trans *trans = pci_get_drvdata(pdev);
        struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
 
+       /* if RTPM was in use, restore it to the state before probe */
+       if (trans->runtime_pm_mode != IWL_PLAT_PM_MODE_DISABLED) {
+               /* We should not call forbid here, but we do for now.
+                * Check the comment to pm_runtime_allow() in
+                * iwl_pci_probe().
+                */
+               pm_runtime_forbid(trans->dev);
+       }
+
        iwl_drv_stop(trans_pcie->drv);
+
        iwl_trans_pcie_free(trans);
 }
 
index 6677f31..dadafbd 100644 (file)
@@ -347,7 +347,7 @@ struct iwl_tso_hdr_page {
  */
 struct iwl_trans_pcie {
        struct iwl_rxq *rxq;
-       struct iwl_rx_mem_buffer rx_pool[MQ_RX_POOL_SIZE];
+       struct iwl_rx_mem_buffer rx_pool[RX_POOL_SIZE];
        struct iwl_rx_mem_buffer *global_table[MQ_RX_TABLE_SIZE];
        struct iwl_rb_allocator rba;
        struct iwl_trans *trans;
index 489b07a..4be3c35 100644 (file)
@@ -231,6 +231,9 @@ static void iwl_pcie_rxq_check_wrptr(struct iwl_trans *trans)
        }
 }
 
+/*
+ * iwl_pcie_rxq_mq_restock - restock implementation for multi-queue rx
+ */
 static void iwl_pcie_rxq_mq_restock(struct iwl_trans *trans,
                                    struct iwl_rxq *rxq)
 {
@@ -277,17 +280,10 @@ static void iwl_pcie_rxq_mq_restock(struct iwl_trans *trans,
 }
 
 /*
- * iwl_pcie_rxq_restock - refill RX queue from pre-allocated pool
- *
- * If there are slots in the RX queue that need to be restocked,
- * and we have free pre-allocated buffers, fill the ranks as much
- * as we can, pulling from rx_free.
- *
- * This moves the 'write' index forward to catch up with 'processed', and
- * also updates the memory address in the firmware to reference the new
- * target buffer.
+ * iwl_pcie_rxq_sq_restock - restock implementation for single queue rx
  */
-static void iwl_pcie_rxq_restock(struct iwl_trans *trans, struct iwl_rxq *rxq)
+static void iwl_pcie_rxq_sq_restock(struct iwl_trans *trans,
+                                   struct iwl_rxq *rxq)
 {
        struct iwl_rx_mem_buffer *rxb;
 
@@ -331,6 +327,26 @@ static void iwl_pcie_rxq_restock(struct iwl_trans *trans, struct iwl_rxq *rxq)
        }
 }
 
+/*
+ * iwl_pcie_rxq_restock - refill RX queue from pre-allocated pool
+ *
+ * If there are slots in the RX queue that need to be restocked,
+ * and we have free pre-allocated buffers, fill the ranks as much
+ * as we can, pulling from rx_free.
+ *
+ * This moves the 'write' index forward to catch up with 'processed', and
+ * also updates the memory address in the firmware to reference the new
+ * target buffer.
+ */
+static
+void iwl_pcie_rxq_restock(struct iwl_trans *trans, struct iwl_rxq *rxq)
+{
+       if (trans->cfg->mq_rx_supported)
+               iwl_pcie_rxq_mq_restock(trans, rxq);
+       else
+               iwl_pcie_rxq_sq_restock(trans, rxq);
+}
+
 /*
  * iwl_pcie_rx_alloc_page - allocates and returns a page.
  *
@@ -434,7 +450,7 @@ static void iwl_pcie_free_rbs_pool(struct iwl_trans *trans)
        struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
        int i;
 
-       for (i = 0; i < MQ_RX_POOL_SIZE; i++) {
+       for (i = 0; i < RX_POOL_SIZE; i++) {
                if (!trans_pcie->rx_pool[i].page)
                        continue;
                dma_unmap_page(trans->dev, trans_pcie->rx_pool[i].page_dma,
@@ -539,40 +555,46 @@ static void iwl_pcie_rx_allocator(struct iwl_trans *trans)
 }
 
 /*
- * iwl_pcie_rx_allocator_get - Returns the pre-allocated pages
+ * iwl_pcie_rx_allocator_get - returns the pre-allocated pages
 .*
 .* Called by queue when the queue posted allocation request and
  * has freed 8 RBDs in order to restock itself.
+ * This function directly moves the allocated RBs to the queue's ownership
+ * and updates the relevant counters.
  */
-static int iwl_pcie_rx_allocator_get(struct iwl_trans *trans,
-                                    struct iwl_rx_mem_buffer
-                                    *out[RX_CLAIM_REQ_ALLOC])
+static void iwl_pcie_rx_allocator_get(struct iwl_trans *trans,
+                                     struct iwl_rxq *rxq)
 {
        struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
        struct iwl_rb_allocator *rba = &trans_pcie->rba;
        int i;
 
+       lockdep_assert_held(&rxq->lock);
+
        /*
         * atomic_dec_if_positive returns req_ready - 1 for any scenario.
         * If req_ready is 0 atomic_dec_if_positive will return -1 and this
-        * function will return -ENOMEM, as there are no ready requests.
+        * function will return early, as there are no ready requests.
         * atomic_dec_if_positive will perofrm the *actual* decrement only if
         * req_ready > 0, i.e. - there are ready requests and the function
         * hands one request to the caller.
         */
        if (atomic_dec_if_positive(&rba->req_ready) < 0)
-               return -ENOMEM;
+               return;
 
        spin_lock(&rba->lock);
        for (i = 0; i < RX_CLAIM_REQ_ALLOC; i++) {
                /* Get next free Rx buffer, remove it from free list */
-               out[i] = list_first_entry(&rba->rbd_allocated,
-                              struct iwl_rx_mem_buffer, list);
-               list_del(&out[i]->list);
+               struct iwl_rx_mem_buffer *rxb =
+                       list_first_entry(&rba->rbd_allocated,
+                                        struct iwl_rx_mem_buffer, list);
+
+               list_move(&rxb->list, &rxq->rx_free);
        }
        spin_unlock(&rba->lock);
 
-       return 0;
+       rxq->used_count -= RX_CLAIM_REQ_ALLOC;
+       rxq->free_count += RX_CLAIM_REQ_ALLOC;
 }
 
 static void iwl_pcie_rx_allocator_work(struct work_struct *data)
@@ -795,11 +817,10 @@ static void iwl_pcie_rx_mq_hw_init(struct iwl_trans *trans)
 
        /*
         * Activate DMA snooping.
-        * Set RX DMA chunk size to 128 bit
+        * Set RX DMA chunk size to 64B
         * Default queue is 0
         */
        iwl_write_prph(trans, RFH_GEN_CFG, RFH_GEN_CFG_RFH_DMA_SNOOP |
-                      RFH_GEN_CFG_RB_CHUNK_SIZE |
                       (DEFAULT_RXQ_NUM << RFH_GEN_CFG_DEFAULT_RXQ_NUM_POS) |
                       RFH_GEN_CFG_SERVICE_DMA_SNOOP);
        /* Enable the relevant rx queues */
@@ -830,7 +851,7 @@ int iwl_pcie_rx_init(struct iwl_trans *trans)
        struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
        struct iwl_rxq *def_rxq;
        struct iwl_rb_allocator *rba = &trans_pcie->rba;
-       int i, err, num_rbds, allocator_pool_size;
+       int i, err, queue_size, allocator_pool_size, num_alloc;
 
        if (!trans_pcie->rxq) {
                err = iwl_pcie_rx_alloc(trans);
@@ -882,11 +903,12 @@ int iwl_pcie_rx_init(struct iwl_trans *trans)
        }
 
        /* move the pool to the default queue and allocator ownerships */
-       num_rbds = trans->cfg->mq_rx_supported ?
-                    MQ_RX_POOL_SIZE : RX_QUEUE_SIZE;
+       queue_size = trans->cfg->mq_rx_supported ?
+                    MQ_RX_NUM_RBDS : RX_QUEUE_SIZE;
        allocator_pool_size = trans->num_rx_queues *
                (RX_CLAIM_REQ_ALLOC - RX_POST_REQ_ALLOC);
-       for (i = 0; i < num_rbds; i++) {
+       num_alloc = queue_size + allocator_pool_size;
+       for (i = 0; i < num_alloc; i++) {
                struct iwl_rx_mem_buffer *rxb = &trans_pcie->rx_pool[i];
 
                if (i < allocator_pool_size)
@@ -901,7 +923,7 @@ int iwl_pcie_rx_init(struct iwl_trans *trans)
        if (trans->cfg->mq_rx_supported) {
                iwl_pcie_rx_mq_hw_init(trans);
        } else {
-               iwl_pcie_rxq_restock(trans, def_rxq);
+               iwl_pcie_rxq_sq_restock(trans, def_rxq);
                iwl_pcie_rx_hw_init(trans, def_rxq);
        }
 
@@ -1149,7 +1171,7 @@ static void iwl_pcie_rx_handle(struct iwl_trans *trans, int queue)
 {
        struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
        struct iwl_rxq *rxq = &trans_pcie->rxq[queue];
-       u32 r, i, j, count = 0;
+       u32 r, i, count = 0;
        bool emergency = false;
 
 restart:
@@ -1193,62 +1215,36 @@ restart:
 
                i = (i + 1) & (rxq->queue_size - 1);
 
-               /* If we have RX_CLAIM_REQ_ALLOC released rx buffers -
-                * try to claim the pre-allocated buffers from the allocator */
-               if (rxq->used_count >= RX_CLAIM_REQ_ALLOC) {
+               /*
+                * If we have RX_CLAIM_REQ_ALLOC released rx buffers -
+                * try to claim the pre-allocated buffers from the allocator.
+                * If not ready - will try to reclaim next time.
+                * There is no need to reschedule work - allocator exits only
+                * on success
+                */
+               if (rxq->used_count >= RX_CLAIM_REQ_ALLOC)
+                       iwl_pcie_rx_allocator_get(trans, rxq);
+
+               if (rxq->used_count % RX_CLAIM_REQ_ALLOC == 0 && !emergency) {
                        struct iwl_rb_allocator *rba = &trans_pcie->rba;
-                       struct iwl_rx_mem_buffer *out[RX_CLAIM_REQ_ALLOC];
-
-                       if (rxq->used_count % RX_CLAIM_REQ_ALLOC == 0 &&
-                           !emergency) {
-                               /* Add the remaining 6 empty RBDs
-                               * for allocator use
-                                */
-                               spin_lock(&rba->lock);
-                               list_splice_tail_init(&rxq->rx_used,
-                                                     &rba->rbd_empty);
-                               spin_unlock(&rba->lock);
-                       }
 
-                       /* If not ready - continue, will try to reclaim later.
-                       * No need to reschedule work - allocator exits only on
-                       * success */
-                       if (!iwl_pcie_rx_allocator_get(trans, out)) {
-                               /* If success - then RX_CLAIM_REQ_ALLOC
-                                * buffers were retrieved and should be added
-                                * to free list */
-                               rxq->used_count -= RX_CLAIM_REQ_ALLOC;
-                               for (j = 0; j < RX_CLAIM_REQ_ALLOC; j++) {
-                                       list_add_tail(&out[j]->list,
-                                                     &rxq->rx_free);
-                                       rxq->free_count++;
-                               }
-                       }
-               }
-               if (emergency) {
+                       /* Add the remaining empty RBDs for allocator use */
+                       spin_lock(&rba->lock);
+                       list_splice_tail_init(&rxq->rx_used, &rba->rbd_empty);
+                       spin_unlock(&rba->lock);
+               } else if (emergency) {
                        count++;
                        if (count == 8) {
                                count = 0;
                                if (rxq->used_count < rxq->queue_size / 3)
                                        emergency = false;
+
+                               rxq->read = i;
                                spin_unlock(&rxq->lock);
                                iwl_pcie_rxq_alloc_rbs(trans, GFP_ATOMIC, rxq);
-                               spin_lock(&rxq->lock);
-                       }
-               }
-               /* handle restock for three cases, can be all of them at once:
-               * - we just pulled buffers from the allocator
-               * - we have 8+ unstolen pages accumulated
-               * - we are in emergency and allocated buffers
-                */
-               if (rxq->free_count >=  RX_CLAIM_REQ_ALLOC) {
-                       rxq->read = i;
-                       spin_unlock(&rxq->lock);
-                       if (trans->cfg->mq_rx_supported)
-                               iwl_pcie_rxq_mq_restock(trans, rxq);
-                       else
                                iwl_pcie_rxq_restock(trans, rxq);
-                       goto restart;
+                               goto restart;
+                       }
                }
        }
 out:
@@ -1273,6 +1269,8 @@ out:
 
        if (rxq->napi.poll)
                napi_gro_flush(&rxq->napi, false);
+
+       iwl_pcie_rxq_restock(trans, rxq);
 }
 
 static struct iwl_trans_pcie *iwl_pcie_get_trans_pcie(struct msix_entry *entry)
index e67957d..eb39c7e 100644 (file)
@@ -1646,9 +1646,6 @@ static void iwl_trans_pcie_configure(struct iwl_trans *trans,
        trans->command_groups = trans_cfg->command_groups;
        trans->command_groups_size = trans_cfg->command_groups_size;
 
-       /* init ref_count to 1 (should be cleared when ucode is loaded) */
-       trans_pcie->ref_count = 1;
-
        /* Initialize NAPI here - it should be before registering to mac80211
         * in the opmode but after the HW struct is allocated.
         * As this function may be called again in some corner cases don't
@@ -1663,9 +1660,6 @@ void iwl_trans_pcie_free(struct iwl_trans *trans)
        struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
        int i;
 
-       /* TODO: check if this is really needed */
-       pm_runtime_disable(trans->dev);
-
        iwl_pcie_synchronize_irqs(trans);
 
        iwl_pcie_tx_free(trans);