Merge tag 'iwlwifi-next-for-kalle-2015-10-05' of git://git.kernel.org/pub/scm/linux...
authorKalle Valo <kvalo@codeaurora.org>
Wed, 7 Oct 2015 09:14:23 +0000 (12:14 +0300)
committerKalle Valo <kvalo@codeaurora.org>
Wed, 7 Oct 2015 09:14:23 +0000 (12:14 +0300)
* more clean-ups towards multiple RX queues;
* some rate scaling fixes and improvements;
* some time-of-flight fixes;
* other generic improvements and clean-ups;

32 files changed:
MAINTAINERS
drivers/net/wireless/iwlwifi/Kconfig
drivers/net/wireless/iwlwifi/iwl-7000.c
drivers/net/wireless/iwlwifi/iwl-config.h
drivers/net/wireless/iwlwifi/iwl-drv.c
drivers/net/wireless/iwlwifi/iwl-fw-file.h
drivers/net/wireless/iwlwifi/iwl-fw.h
drivers/net/wireless/iwlwifi/iwl-nvm-parse.c
drivers/net/wireless/iwlwifi/iwl-nvm-parse.h
drivers/net/wireless/iwlwifi/iwl-op-mode.h
drivers/net/wireless/iwlwifi/iwl-trans.c
drivers/net/wireless/iwlwifi/iwl-trans.h
drivers/net/wireless/iwlwifi/mvm/constants.h
drivers/net/wireless/iwlwifi/mvm/debugfs-vif.c
drivers/net/wireless/iwlwifi/mvm/debugfs.c
drivers/net/wireless/iwlwifi/mvm/fw-api-power.h
drivers/net/wireless/iwlwifi/mvm/fw-api.h
drivers/net/wireless/iwlwifi/mvm/fw.c
drivers/net/wireless/iwlwifi/mvm/mac-ctxt.c
drivers/net/wireless/iwlwifi/mvm/mac80211.c
drivers/net/wireless/iwlwifi/mvm/mvm.h
drivers/net/wireless/iwlwifi/mvm/nvm.c
drivers/net/wireless/iwlwifi/mvm/ops.c
drivers/net/wireless/iwlwifi/mvm/power.c
drivers/net/wireless/iwlwifi/mvm/rs.c
drivers/net/wireless/iwlwifi/mvm/rx.c
drivers/net/wireless/iwlwifi/mvm/sta.c
drivers/net/wireless/iwlwifi/mvm/tof.c
drivers/net/wireless/iwlwifi/mvm/tof.h
drivers/net/wireless/iwlwifi/mvm/tt.c
drivers/net/wireless/iwlwifi/mvm/tx.c
drivers/net/wireless/iwlwifi/mvm/utils.c

index 310da42..77b2728 100644 (file)
@@ -5541,7 +5541,7 @@ F:        drivers/net/wireless/iwlegacy/
 INTEL WIRELESS WIFI LINK (iwlwifi)
 M:     Johannes Berg <johannes.berg@intel.com>
 M:     Emmanuel Grumbach <emmanuel.grumbach@intel.com>
-M:     Intel Linux Wireless <ilw@linux.intel.com>
+M:     Intel Linux Wireless <linuxwifi@intel.com>
 L:     linux-wireless@vger.kernel.org
 W:     http://intellinuxwireless.org
 T:     git git://git.kernel.org/pub/scm/linux/kernel/git/iwlwifi/iwlwifi.git
index aba0957..6e949df 100644 (file)
@@ -142,6 +142,7 @@ config IWLWIFI_DEBUG_EXPERIMENTAL_UCODE
 config IWLWIFI_DEVICE_TRACING
        bool "iwlwifi device access tracing"
        depends on EVENT_TRACING
+       default y
        help
          Say Y here to trace all commands, including TX frames and IO
          accesses, sent to the device. If you say yes, iwlwifi will
index 5b25f36..d561181 100644 (file)
 
 /* Oldest version we won't warn about */
 #define IWL7260_UCODE_API_OK   13
-#define IWL3165_UCODE_API_OK   13
 
 /* Lowest firmware API version supported */
 #define IWL7260_UCODE_API_MIN  13
-#define IWL3165_UCODE_API_MIN  13
 
 /* NVM versions */
 #define IWL7260_NVM_VERSION            0x0a1d
@@ -269,11 +267,6 @@ const struct iwl_cfg iwl3165_2ac_cfg = {
        .name = "Intel(R) Dual Band Wireless AC 3165",
        .fw_name_pre = IWL7265D_FW_PRE,
        IWL_DEVICE_7000,
-       /* sparse doens't like the re-assignment but it is safe */
-#ifndef __CHECKER__
-       .ucode_api_ok = IWL3165_UCODE_API_OK,
-       .ucode_api_min = IWL3165_UCODE_API_MIN,
-#endif
        .ht_params = &iwl7000_ht_params,
        .nvm_ver = IWL3165_NVM_VERSION,
        .nvm_calib_ver = IWL3165_TX_POWER_VERSION,
index 939fa22..9109708 100644 (file)
@@ -223,13 +223,13 @@ struct iwl_tt_tx_backoff {
  * @support_tx_backoff: Support tx-backoff?
  */
 struct iwl_tt_params {
-       s32 ct_kill_entry;
-       s32 ct_kill_exit;
+       u32 ct_kill_entry;
+       u32 ct_kill_exit;
        u32 ct_kill_duration;
-       s32 dynamic_smps_entry;
-       s32 dynamic_smps_exit;
-       s32 tx_protection_entry;
-       s32 tx_protection_exit;
+       u32 dynamic_smps_entry;
+       u32 dynamic_smps_exit;
+       u32 tx_protection_entry;
+       u32 tx_protection_exit;
        struct iwl_tt_tx_backoff tx_backoff[TT_TX_BACKOFF_SIZE];
        bool support_ct_kill;
        bool support_dynamic_smps;
index a86aa5b..463cadf 100644 (file)
@@ -450,7 +450,7 @@ static int iwl_set_ucode_api_flags(struct iwl_drv *drv, const u8 *data,
        u32 api_flags = le32_to_cpu(ucode_api->api_flags);
        int i;
 
-       if (api_index >= IWL_API_MAX_BITS / 32) {
+       if (api_index >= DIV_ROUND_UP(NUM_IWL_UCODE_TLV_API, 32)) {
                IWL_ERR(drv, "api_index larger than supported by driver\n");
                /* don't return an error so we can load FW that has more bits */
                return 0;
@@ -472,7 +472,7 @@ static int iwl_set_ucode_capabilities(struct iwl_drv *drv, const u8 *data,
        u32 api_flags = le32_to_cpu(ucode_capa->api_capa);
        int i;
 
-       if (api_index >= IWL_CAPABILITIES_MAX_BITS / 32) {
+       if (api_index >= DIV_ROUND_UP(NUM_IWL_UCODE_TLV_CAPA, 32)) {
                IWL_ERR(drv, "api_index larger than supported by driver\n");
                /* don't return an error so we can load FW that has more bits */
                return 0;
index 352d245..72ddd4a 100644 (file)
@@ -254,6 +254,8 @@ typedef unsigned int __bitwise__ iwl_ucode_tlv_api_t;
  *     instead of 3.
  * @IWL_UCODE_TLV_API_TX_POWER_CHAIN: TX power API has larger command size
  *     (command version 3) that supports per-chain limits
+ *
+ * @NUM_IWL_UCODE_TLV_API: number of bits used
  */
 enum iwl_ucode_tlv_api {
        IWL_UCODE_TLV_API_BT_COEX_SPLIT         = (__force iwl_ucode_tlv_api_t)3,
@@ -264,6 +266,12 @@ enum iwl_ucode_tlv_api {
        IWL_UCODE_TLV_API_NEW_VERSION           = (__force iwl_ucode_tlv_api_t)20,
        IWL_UCODE_TLV_API_EXT_SCAN_PRIORITY     = (__force iwl_ucode_tlv_api_t)24,
        IWL_UCODE_TLV_API_TX_POWER_CHAIN        = (__force iwl_ucode_tlv_api_t)27,
+
+       NUM_IWL_UCODE_TLV_API
+#ifdef __CHECKER__
+               /* sparse says it cannot increment the previous enum member */
+               = 128
+#endif
 };
 
 typedef unsigned int __bitwise__ iwl_ucode_tlv_capa_t;
@@ -298,6 +306,8 @@ typedef unsigned int __bitwise__ iwl_ucode_tlv_capa_t;
  *     is supported.
  * @IWL_UCODE_TLV_CAPA_BT_COEX_RRC: supports BT Coex RRC
  * @IWL_UCODE_TLV_CAPA_GSCAN_SUPPORT: supports gscan
+ *
+ * @NUM_IWL_UCODE_TLV_CAPA: number of bits used
  */
 enum iwl_ucode_tlv_capa {
        IWL_UCODE_TLV_CAPA_D0I3_SUPPORT                 = (__force iwl_ucode_tlv_capa_t)0,
@@ -320,6 +330,12 @@ enum iwl_ucode_tlv_capa {
        IWL_UCODE_TLV_CAPA_LAR_MULTI_MCC                = (__force iwl_ucode_tlv_capa_t)29,
        IWL_UCODE_TLV_CAPA_BT_COEX_RRC                  = (__force iwl_ucode_tlv_capa_t)30,
        IWL_UCODE_TLV_CAPA_GSCAN_SUPPORT                = (__force iwl_ucode_tlv_capa_t)31,
+
+       NUM_IWL_UCODE_TLV_CAPA
+#ifdef __CHECKER__
+               /* sparse says it cannot increment the previous enum member */
+               = 128
+#endif
 };
 
 /* The default calibrate table size if not specified by firmware file */
@@ -330,9 +346,6 @@ enum iwl_ucode_tlv_capa {
 /* The default max probe length if not specified by the firmware file */
 #define IWL_DEFAULT_MAX_PROBE_LENGTH   200
 
-#define IWL_API_MAX_BITS               64
-#define IWL_CAPABILITIES_MAX_BITS      64
-
 /*
  * For 16.0 uCode and above, there is no differentiation between sections,
  * just an offset to the HW address.
index 45e7321..84ec0ce 100644 (file)
@@ -105,8 +105,8 @@ struct iwl_ucode_capabilities {
        u32 n_scan_channels;
        u32 standard_phy_calibration_size;
        u32 flags;
-       unsigned long _api[BITS_TO_LONGS(IWL_API_MAX_BITS)];
-       unsigned long _capa[BITS_TO_LONGS(IWL_CAPABILITIES_MAX_BITS)];
+       unsigned long _api[BITS_TO_LONGS(NUM_IWL_UCODE_TLV_API)];
+       unsigned long _capa[BITS_TO_LONGS(NUM_IWL_UCODE_TLV_CAPA)];
 };
 
 static inline bool
index 3b8e85e..d829849 100644 (file)
@@ -580,13 +580,15 @@ static void iwl_set_hw_address_family_8000(struct device *dev,
        IWL_ERR_DEV(dev, "mac address is not found\n");
 }
 
+#define IWL_4165_DEVICE_ID 0x5501
+
 struct iwl_nvm_data *
 iwl_parse_nvm_data(struct device *dev, const struct iwl_cfg *cfg,
                   const __le16 *nvm_hw, const __le16 *nvm_sw,
                   const __le16 *nvm_calib, const __le16 *regulatory,
                   const __le16 *mac_override, const __le16 *phy_sku,
                   u8 tx_chains, u8 rx_chains, bool lar_fw_supported,
-                  u32 mac_addr0, u32 mac_addr1)
+                  u32 mac_addr0, u32 mac_addr1, u32 hw_id)
 {
        struct iwl_nvm_data *data;
        u32 sku;
@@ -625,6 +627,17 @@ iwl_parse_nvm_data(struct device *dev, const struct iwl_cfg *cfg,
                                    (sku & NVM_SKU_CAP_11AC_ENABLE);
        data->sku_cap_mimo_disabled = sku & NVM_SKU_CAP_MIMO_DISABLE;
 
+       /*
+        * OTP 0x52 bug work around
+        * define antenna 1x1 according to MIMO disabled
+        */
+       if (hw_id == IWL_4165_DEVICE_ID && data->sku_cap_mimo_disabled) {
+               data->valid_tx_ant = ANT_B;
+               data->valid_rx_ant = ANT_B;
+               tx_chains = ANT_B;
+               rx_chains = ANT_B;
+       }
+
        data->n_hw_addrs = iwl_get_n_hw_addrs(cfg, nvm_sw);
 
        if (cfg->device_family != IWL_DEVICE_FAMILY_8000) {
index 822ba52..9f44d81 100644 (file)
@@ -79,7 +79,7 @@ iwl_parse_nvm_data(struct device *dev, const struct iwl_cfg *cfg,
                   const __le16 *nvm_calib, const __le16 *regulatory,
                   const __le16 *mac_override, const __le16 *phy_sku,
                   u8 tx_chains, u8 rx_chains, bool lar_fw_supported,
-                  u32 mac_addr0, u32 mac_addr1);
+                  u32 mac_addr0, u32 mac_addr1, u32 hw_id);
 
 /**
  * iwl_parse_mcc_info - parse MCC (mobile country code) info coming from FW
index b47fe9d..2a58d68 100644 (file)
@@ -7,6 +7,7 @@
  *
  * Copyright(c) 2007 - 2014 Intel Corporation. All rights reserved.
  * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
+ * Copyright(c) 2015        Intel Deutschland GmbH
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of version 2 of the GNU General Public License as
@@ -33,6 +34,7 @@
  *
  * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved.
  * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
+ * Copyright(c) 2015        Intel Deutschland GmbH
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
@@ -108,7 +110,8 @@ struct iwl_cfg;
  * interact with it. The driver layer typically calls the start and stop
  * handlers, the transport layer calls the others.
  *
- * All the handlers MUST be implemented
+ * All the handlers MUST be implemented, except @rx_rss which can be left
+ * out *iff* the opmode will never run on hardware with multi-queue capability.
  *
  * @start: start the op_mode. The transport layer is already allocated.
  *     May sleep
@@ -116,6 +119,10 @@ struct iwl_cfg;
  *     May sleep
  * @rx: Rx notification to the op_mode. rxb is the Rx buffer itself. Cmd is the
  *     HCMD this Rx responds to. Can't sleep.
+ * @rx_rss: data queue RX notification to the op_mode, for (data) notifications
+ *     received on the RSS queue(s). The queue parameter indicates which of the
+ *     RSS queues received this frame; it will always be non-zero.
+ *     This method must not sleep.
  * @queue_full: notifies that a HW queue is full.
  *     Must be atomic and called with BH disabled.
  * @queue_not_full: notifies that a HW queue is not full any more.
@@ -146,6 +153,8 @@ struct iwl_op_mode_ops {
        void (*stop)(struct iwl_op_mode *op_mode);
        void (*rx)(struct iwl_op_mode *op_mode, struct napi_struct *napi,
                   struct iwl_rx_cmd_buffer *rxb);
+       void (*rx_rss)(struct iwl_op_mode *op_mode, struct napi_struct *napi,
+                      struct iwl_rx_cmd_buffer *rxb, unsigned int queue);
        void (*queue_full)(struct iwl_op_mode *op_mode, int queue);
        void (*queue_not_full)(struct iwl_op_mode *op_mode, int queue);
        bool (*hw_rf_kill)(struct iwl_op_mode *op_mode, bool state);
@@ -186,6 +195,14 @@ static inline void iwl_op_mode_rx(struct iwl_op_mode *op_mode,
        return op_mode->ops->rx(op_mode, napi, rxb);
 }
 
+static inline void iwl_op_mode_rx_rss(struct iwl_op_mode *op_mode,
+                                     struct napi_struct *napi,
+                                     struct iwl_rx_cmd_buffer *rxb,
+                                     unsigned int queue)
+{
+       op_mode->ops->rx_rss(op_mode, napi, rxb, queue);
+}
+
 static inline void iwl_op_mode_queue_full(struct iwl_op_mode *op_mode,
                                          int queue)
 {
index 9f8bcef..7161096 100644 (file)
@@ -87,6 +87,7 @@ struct iwl_trans *iwl_trans_alloc(unsigned int priv_size,
        trans->cfg = cfg;
        trans->ops = ops;
        trans->dev_cmd_headroom = dev_cmd_headroom;
+       trans->num_rx_queues = 1;
 
        snprintf(trans->dev_cmd_pool_name, sizeof(trans->dev_cmd_pool_name),
                 "iwl_cmd_pool:%s", dev_name(trans->dev));
index c829c50..bb51b6f 100644 (file)
@@ -386,6 +386,7 @@ static inline void iwl_free_rxb(struct iwl_rx_cmd_buffer *r)
 #define IWL_MAX_HW_QUEUES              32
 #define IWL_MAX_TID_COUNT      8
 #define IWL_FRAME_LIMIT        64
+#define IWL_MAX_RX_HW_QUEUES   16
 
 /**
  * enum iwl_wowlan_status - WoWLAN image/device status
@@ -654,6 +655,8 @@ enum iwl_d0i3_mode {
  * @hw_id_str: a string with info about HW ID. Set during transport allocation.
  * @pm_support: set to true in start_hw if link pm is supported
  * @ltr_enabled: set to true if the LTR is enabled
+ * @num_rx_queues: number of RX queues allocated by the transport;
+ *     the transport must set this before calling iwl_drv_start()
  * @dev_cmd_pool: pool for Tx cmd allocation - for internal use only.
  *     The user should use iwl_trans_{alloc,free}_tx_cmd.
  * @dev_cmd_headroom: room needed for the transport's private use before the
@@ -693,6 +696,8 @@ struct iwl_trans {
        bool pm_support;
        bool ltr_enabled;
 
+       u8 num_rx_queues;
+
        /* The following fields are internal only */
        struct kmem_cache *dev_cmd_pool;
        size_t dev_cmd_headroom;
index b8ee312..a3ca6db 100644 (file)
 #define IWL_MVM_QUOTA_THRESHOLD                        4
 #define IWL_MVM_RS_RSSI_BASED_INIT_RATE         0
 #define IWL_MVM_RS_DISABLE_P2P_MIMO            0
+#define IWL_MVM_RS_80_20_FAR_RANGE_TWEAK       1
 #define IWL_MVM_TOF_IS_RESPONDER               0
 #define IWL_MVM_RS_NUM_TRY_BEFORE_ANT_TOGGLE    1
 #define IWL_MVM_RS_HT_VHT_RETRIES_PER_RATE      2
index 334ae56..398bef6 100644 (file)
@@ -511,7 +511,8 @@ static ssize_t iwl_dbgfs_tof_enable_write(struct ieee80211_vif *vif,
 {
        struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
        struct iwl_mvm *mvm = mvmvif->mvm;
-       int value, ret = -EINVAL;
+       u32 value;
+       int ret = -EINVAL;
        char *data;
 
        mutex_lock(&mvm->mutex);
@@ -599,7 +600,8 @@ static ssize_t iwl_dbgfs_tof_responder_params_write(struct ieee80211_vif *vif,
 {
        struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
        struct iwl_mvm *mvm = mvmvif->mvm;
-       int value, ret = 0;
+       u32 value;
+       int ret = 0;
        char *data;
 
        mutex_lock(&mvm->mutex);
@@ -822,7 +824,8 @@ static ssize_t iwl_dbgfs_tof_range_request_write(struct ieee80211_vif *vif,
 {
        struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
        struct iwl_mvm *mvm = mvmvif->mvm;
-       int value, ret = 0;
+       u32 value;
+       int ret = 0;
        char *data;
 
        mutex_lock(&mvm->mutex);
@@ -892,6 +895,7 @@ static ssize_t iwl_dbgfs_tof_range_request_write(struct ieee80211_vif *vif,
                        goto out;
                }
                memcpy(mvm->tof_data.range_req.macaddr_template, mac, ETH_ALEN);
+               goto out;
        }
 
        data = iwl_dbgfs_is_match("macaddr_mask=", buf);
@@ -903,21 +907,22 @@ static ssize_t iwl_dbgfs_tof_range_request_write(struct ieee80211_vif *vif,
                        goto out;
                }
                memcpy(mvm->tof_data.range_req.macaddr_mask, mac, ETH_ALEN);
+               goto out;
        }
 
        data = iwl_dbgfs_is_match("ap=", buf);
        if (data) {
-               struct iwl_tof_range_req_ap_entry ap;
+               struct iwl_tof_range_req_ap_entry ap = {};
                int size = sizeof(struct iwl_tof_range_req_ap_entry);
                u16 burst_period;
                u8 *mac = ap.bssid;
                unsigned int i;
 
-               if (sscanf(data, "%u %hhd %hhx %hhx"
+               if (sscanf(data, "%u %hhd %hhd %hhd"
                           "%hhx:%hhx:%hhx:%hhx:%hhx:%hhx"
-                          "%hhx %hhx %hx"
-                          "%hhx %hhx %x"
-                          "%hhx %hhx %hhx %hhx",
+                          "%hhd %hhd %hd"
+                          "%hhd %hhd %d"
+                          "%hhx %hhd %hhd %hhd",
                           &i, &ap.channel_num, &ap.bandwidth,
                           &ap.ctrl_ch_position,
                           mac, mac + 1, mac + 2, mac + 3, mac + 4, mac + 5,
@@ -944,12 +949,12 @@ static ssize_t iwl_dbgfs_tof_range_request_write(struct ieee80211_vif *vif,
        data = iwl_dbgfs_is_match("send_range_request=", buf);
        if (data) {
                ret = kstrtou32(data, 10, &value);
-               if (ret == 0 && value) {
+               if (ret == 0 && value)
                        ret = iwl_mvm_tof_range_request_cmd(mvm, vif);
-                       goto out;
-               }
+               goto out;
        }
 
+       ret = -EINVAL;
 out:
        mutex_unlock(&mvm->mutex);
        return ret ?: count;
@@ -994,16 +999,18 @@ static ssize_t iwl_dbgfs_tof_range_request_read(struct file *file,
                struct iwl_tof_range_req_ap_entry *ap = &cmd->ap[i];
 
                pos += scnprintf(buf + pos, bufsz - pos,
-                               "ap %.2d: channel_num=%hhx bw=%hhx"
-                               " control=%hhx bssid=%pM type=%hhx"
-                               " num_of_bursts=%hhx burst_period=%hx ftm=%hhx"
-                               " retries=%hhx tsf_delta=%x location_req=%hhx "
-                               " asap=%hhx enable=%hhx rssi=%hhx\n",
+                               "ap %.2d: channel_num=%hhd bw=%hhd"
+                               " control=%hhd bssid=%pM type=%hhd"
+                               " num_of_bursts=%hhd burst_period=%hd ftm=%hhd"
+                               " retries=%hhd tsf_delta=%d"
+                               " tsf_delta_direction=%hhd location_req=0x%hhx "
+                               " asap=%hhd enable=%hhd rssi=%hhd\n",
                                i, ap->channel_num, ap->bandwidth,
                                ap->ctrl_ch_position, ap->bssid,
                                ap->measure_type, ap->num_of_bursts,
                                ap->burst_period, ap->samples_per_burst,
                                ap->retries_per_sample, ap->tsf_delta,
+                               ap->tsf_delta_direction,
                                ap->location_req, ap->asap_mode,
                                ap->enable_dyn_ack, ap->rssi);
        }
@@ -1019,7 +1026,8 @@ static ssize_t iwl_dbgfs_tof_range_req_ext_write(struct ieee80211_vif *vif,
 {
        struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
        struct iwl_mvm *mvm = mvmvif->mvm;
-       int value, ret = 0;
+       u32 value;
+       int ret = 0;
        char *data;
 
        mutex_lock(&mvm->mutex);
@@ -1071,12 +1079,12 @@ static ssize_t iwl_dbgfs_tof_range_req_ext_write(struct ieee80211_vif *vif,
        data = iwl_dbgfs_is_match("send_range_req_ext=", buf);
        if (data) {
                ret = kstrtou32(data, 10, &value);
-               if (ret == 0 && value) {
+               if (ret == 0 && value)
                        ret = iwl_mvm_tof_range_request_ext_cmd(mvm, vif);
-                       goto out;
-               }
+               goto out;
        }
 
+       ret = -EINVAL;
 out:
        mutex_unlock(&mvm->mutex);
        return ret ?: count;
@@ -1099,18 +1107,18 @@ static ssize_t iwl_dbgfs_tof_range_req_ext_read(struct file *file,
        mutex_lock(&mvm->mutex);
 
        pos += scnprintf(buf + pos, bufsz - pos,
-                        "tsf_timer_offset_msec = %hx\n",
+                        "tsf_timer_offset_msec = %hd\n",
                         cmd->tsf_timer_offset_msec);
-       pos += scnprintf(buf + pos, bufsz - pos, "min_delta_ftm = %hhx\n",
+       pos += scnprintf(buf + pos, bufsz - pos, "min_delta_ftm = %hhd\n",
                         cmd->min_delta_ftm);
        pos += scnprintf(buf + pos, bufsz - pos,
-                        "ftm_format_and_bw20M = %hhx\n",
+                        "ftm_format_and_bw20M = %hhd\n",
                         cmd->ftm_format_and_bw20M);
        pos += scnprintf(buf + pos, bufsz - pos,
-                        "ftm_format_and_bw40M = %hhx\n",
+                        "ftm_format_and_bw40M = %hhd\n",
                         cmd->ftm_format_and_bw40M);
        pos += scnprintf(buf + pos, bufsz - pos,
-                        "ftm_format_and_bw80M = %hhx\n",
+                        "ftm_format_and_bw80M = %hhd\n",
                         cmd->ftm_format_and_bw80M);
 
        mutex_unlock(&mvm->mutex);
@@ -1123,8 +1131,8 @@ static ssize_t iwl_dbgfs_tof_range_abort_write(struct ieee80211_vif *vif,
 {
        struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
        struct iwl_mvm *mvm = mvmvif->mvm;
-       int value, ret = 0;
-       int abort_id;
+       u32 value;
+       int abort_id, ret = 0;
        char *data;
 
        mutex_lock(&mvm->mutex);
@@ -1205,11 +1213,11 @@ static ssize_t iwl_dbgfs_tof_range_response_read(struct file *file,
                struct iwl_tof_range_rsp_ap_entry_ntfy *ap = &cmd->ap[i];
 
                pos += scnprintf(buf + pos, bufsz - pos,
-                               "ap %.2d: bssid=%pM status=%hhx bw=%hhx"
-                               " rtt=%x rtt_var=%x rtt_spread=%x"
-                               " rssi=%hhx  rssi_spread=%hhx"
-                               " range=%x range_var=%x"
-                               " time_stamp=%x\n",
+                               "ap %.2d: bssid=%pM status=%hhd bw=%hhd"
+                               " rtt=%d rtt_var=%d rtt_spread=%d"
+                               " rssi=%hhd  rssi_spread=%hhd"
+                               " range=%d range_var=%d"
+                               " time_stamp=%d\n",
                                i, ap->bssid, ap->measure_status,
                                ap->measure_bw,
                                ap->rtt, ap->rtt_variance, ap->rtt_spread,
index 3b8481f..9b4fbb8 100644 (file)
@@ -1495,6 +1495,9 @@ int iwl_mvm_dbgfs_register(struct iwl_mvm *mvm, struct dentry *dbgfs_dir)
        if (!debugfs_create_blob("nvm_prod", S_IRUSR,
                                  mvm->debugfs_dir, &mvm->nvm_prod_blob))
                goto err;
+       if (!debugfs_create_blob("nvm_phy_sku", S_IRUSR,
+                                mvm->debugfs_dir, &mvm->nvm_phy_sku_blob))
+               goto err;
 
        /*
         * Create a symlink with mac80211. It will be removed when mac80211
index 7005fa4..c8f3e25 100644 (file)
@@ -192,16 +192,10 @@ struct iwl_powertable_cmd {
 /**
  * enum iwl_device_power_flags - masks for device power command flags
  * @DEVIC_POWER_FLAGS_POWER_SAVE_ENA_MSK: '1' Allow to save power by turning off
- *     receiver and transmitter. '0' - does not allow. This flag should be
- *     always set to '1' unless one need to disable actual power down for debug
- *     purposes.
- * @DEVICE_POWER_FLAGS_CAM_MSK: '1' CAM (Continuous Active Mode) is set, meaning
- *     that power management is disabled. '0' Power management is enabled, one
- *     of power schemes is applied.
+ *     receiver and transmitter. '0' - does not allow.
 */
 enum iwl_device_power_flags {
        DEVICE_POWER_FLAGS_POWER_SAVE_ENA_MSK   = BIT(0),
-       DEVICE_POWER_FLAGS_CAM_MSK              = BIT(13),
 };
 
 /**
index 5e4014a..44ff684 100644 (file)
@@ -268,6 +268,16 @@ enum {
        REPLY_MAX = 0xff,
 };
 
+enum iwl_phy_ops_subcmd_ids {
+       CMD_DTS_MEASUREMENT_TRIGGER_WIDE = 0x0,
+       DTS_MEASUREMENT_NOTIF_WIDE = 0xFF,
+};
+
+/* command groups */
+enum {
+       PHY_OPS_GROUP = 0x4,
+};
+
 /**
  * struct iwl_cmd_response - generic response struct for most commands
  * @status: status of the command asked, changes for each one
index 248b025..834641e 100644 (file)
@@ -616,12 +616,8 @@ static int iwl_mvm_load_ucode_wait_alive(struct iwl_mvm *mvm,
         * will be empty.
         */
 
-       for (i = 0; i < IWL_MAX_HW_QUEUES; i++) {
-               if (i < mvm->first_agg_queue && i != IWL_MVM_CMD_QUEUE)
-                       mvm->queue_to_mac80211[i] = i;
-               else
-                       mvm->queue_to_mac80211[i] = IWL_INVALID_MAC80211_QUEUE;
-       }
+       memset(&mvm->queue_info, 0, sizeof(mvm->queue_info));
+       mvm->queue_info[IWL_MVM_CMD_QUEUE].hw_queue_refcount = 1;
 
        for (i = 0; i < IEEE80211_MAX_QUEUES; i++)
                atomic_set(&mvm->mac80211_queue_stop_count[i], 0);
index 5af0090..9d36ba7 100644 (file)
@@ -486,16 +486,18 @@ int iwl_mvm_mac_ctxt_init(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
        switch (vif->type) {
        case NL80211_IFTYPE_P2P_DEVICE:
                iwl_mvm_enable_ac_txq(mvm, IWL_MVM_OFFCHANNEL_QUEUE,
-                                     IWL_MVM_TX_FIFO_VO, wdg_timeout);
+                                     IWL_MVM_OFFCHANNEL_QUEUE,
+                                     IWL_MVM_TX_FIFO_VO, 0, wdg_timeout);
                break;
        case NL80211_IFTYPE_AP:
-               iwl_mvm_enable_ac_txq(mvm, vif->cab_queue,
-                                     IWL_MVM_TX_FIFO_MCAST, wdg_timeout);
+               iwl_mvm_enable_ac_txq(mvm, vif->cab_queue, vif->cab_queue,
+                                     IWL_MVM_TX_FIFO_MCAST, 0, wdg_timeout);
                /* fall through */
        default:
                for (ac = 0; ac < IEEE80211_NUM_ACS; ac++)
                        iwl_mvm_enable_ac_txq(mvm, vif->hw_queue[ac],
-                                             iwl_mvm_ac_to_tx_fifo[ac],
+                                             vif->hw_queue[ac],
+                                             iwl_mvm_ac_to_tx_fifo[ac], 0,
                                              wdg_timeout);
                break;
        }
@@ -511,14 +513,19 @@ void iwl_mvm_mac_ctxt_release(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
 
        switch (vif->type) {
        case NL80211_IFTYPE_P2P_DEVICE:
-               iwl_mvm_disable_txq(mvm, IWL_MVM_OFFCHANNEL_QUEUE, 0);
+               iwl_mvm_disable_txq(mvm, IWL_MVM_OFFCHANNEL_QUEUE,
+                                   IWL_MVM_OFFCHANNEL_QUEUE, IWL_MAX_TID_COUNT,
+                                   0);
                break;
        case NL80211_IFTYPE_AP:
-               iwl_mvm_disable_txq(mvm, vif->cab_queue, 0);
+               iwl_mvm_disable_txq(mvm, vif->cab_queue, vif->cab_queue,
+                                   IWL_MAX_TID_COUNT, 0);
                /* fall through */
        default:
                for (ac = 0; ac < IEEE80211_NUM_ACS; ac++)
-                       iwl_mvm_disable_txq(mvm, vif->hw_queue[ac], 0);
+                       iwl_mvm_disable_txq(mvm, vif->hw_queue[ac],
+                                           vif->hw_queue[ac],
+                                           IWL_MAX_TID_COUNT, 0);
        }
 }
 
@@ -1126,9 +1133,9 @@ static void iwl_mvm_mac_ctxt_cmd_fill_ap(struct iwl_mvm *mvm,
        ctxt_ap->beacon_template = cpu_to_le32(mvmvif->id);
 }
 
-int iwl_mvm_mac_ctxt_cmd_ap(struct iwl_mvm *mvm,
-                           struct ieee80211_vif *vif,
-                           u32 action)
+static int iwl_mvm_mac_ctxt_cmd_ap(struct iwl_mvm *mvm,
+                                  struct ieee80211_vif *vif,
+                                  u32 action)
 {
        struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
        struct iwl_mac_ctx_cmd cmd = {};
index 4c497fe..d7275a5 100644 (file)
@@ -2616,7 +2616,7 @@ static void iwl_mvm_sta_pre_rcu_remove(struct ieee80211_hw *hw,
 
        if (mvm_sta->vif->type == NL80211_IFTYPE_AP) {
                mvmvif->ap_assoc_sta_count--;
-               iwl_mvm_mac_ctxt_cmd_ap(mvm, vif, FW_CTXT_ACTION_MODIFY);
+               iwl_mvm_mac_ctxt_changed(mvm, vif, false, NULL);
        }
 
        mutex_unlock(&mvm->mutex);
index cf1f514..0d3aff1 100644 (file)
@@ -82,7 +82,6 @@
 #include "constants.h"
 #include "tof.h"
 
-#define IWL_INVALID_MAC80211_QUEUE     0xff
 #define IWL_MVM_MAX_ADDRESSES          5
 /* RSSI offset for WkP */
 #define IWL_RSSI_OFFSET 50
@@ -605,7 +604,14 @@ struct iwl_mvm {
                u64 on_time_scan;
        } radio_stats, accu_radio_stats;
 
-       u8 queue_to_mac80211[IWL_MAX_HW_QUEUES];
+       struct {
+               /* Map to HW queue */
+               u32 hw_queue_to_mac80211;
+               u8 hw_queue_refcount;
+               bool setup_reserved;
+               u16 tid_bitmap; /* Bitmap of the TIDs mapped to this queue */
+       } queue_info[IWL_MAX_HW_QUEUES];
+       spinlock_t queue_info_lock; /* For syncing queue mgmt operations */
        atomic_t mac80211_queue_stop_count[IEEE80211_MAX_QUEUES];
 
        const char *nvm_file_name;
@@ -682,6 +688,7 @@ struct iwl_mvm {
        struct debugfs_blob_wrapper nvm_sw_blob;
        struct debugfs_blob_wrapper nvm_calib_blob;
        struct debugfs_blob_wrapper nvm_prod_blob;
+       struct debugfs_blob_wrapper nvm_phy_sku_blob;
 
        struct iwl_mvm_frame_stats drv_rx_stats;
        spinlock_t drv_stats_lock;
@@ -910,6 +917,12 @@ static inline bool iwl_mvm_is_d0i3_supported(struct iwl_mvm *mvm)
                           IWL_UCODE_TLV_CAPA_D0I3_SUPPORT);
 }
 
+static inline bool iwl_mvm_is_dqa_supported(struct iwl_mvm *mvm)
+{
+       return fw_has_capa(&mvm->fw->ucode_capa,
+                          IWL_UCODE_TLV_CAPA_DQA_SUPPORT);
+}
+
 static inline bool iwl_mvm_is_lar_supported(struct iwl_mvm *mvm)
 {
        bool nvm_lar = mvm->nvm_data->lar_enabled;
@@ -957,6 +970,12 @@ static inline bool iwl_mvm_is_csum_supported(struct iwl_mvm *mvm)
                           IWL_UCODE_TLV_CAPA_CSUM_SUPPORT);
 }
 
+static inline bool iwl_mvm_has_new_rx_api(struct iwl_mvm *mvm)
+{
+       /* firmware flag isn't defined yet */
+       return false;
+}
+
 extern const u8 iwl_mvm_ac_to_tx_fifo[];
 
 struct iwl_rate_info {
@@ -1129,10 +1148,6 @@ void iwl_mvm_mac_ctxt_recalc_tsf_id(struct iwl_mvm *mvm,
                                    struct ieee80211_vif *vif);
 unsigned long iwl_mvm_get_used_hw_queues(struct iwl_mvm *mvm,
                                         struct ieee80211_vif *exclude_vif);
-int iwl_mvm_mac_ctxt_cmd_ap(struct iwl_mvm *mvm,
-                           struct ieee80211_vif *vif,
-                           u32 action);
-
 /* Bindings */
 int iwl_mvm_binding_add_vif(struct iwl_mvm *mvm, struct ieee80211_vif *vif);
 int iwl_mvm_binding_remove_vif(struct iwl_mvm *mvm, struct ieee80211_vif *vif);
@@ -1345,14 +1360,20 @@ static inline bool iwl_mvm_vif_low_latency(struct iwl_mvm_vif *mvmvif)
 }
 
 /* hw scheduler queue config */
-void iwl_mvm_enable_txq(struct iwl_mvm *mvm, int queue, u16 ssn,
-                       const struct iwl_trans_txq_scd_cfg *cfg,
+void iwl_mvm_enable_txq(struct iwl_mvm *mvm, int queue, int mac80211_queue,
+                       u16 ssn, const struct iwl_trans_txq_scd_cfg *cfg,
                        unsigned int wdg_timeout);
-void iwl_mvm_disable_txq(struct iwl_mvm *mvm, int queue, u8 flags);
+/*
+ * Disable a TXQ.
+ * Note that in non-DQA mode the %mac80211_queue and %tid params are ignored.
+ */
+void iwl_mvm_disable_txq(struct iwl_mvm *mvm, int queue, int mac80211_queue,
+                        u8 tid, u8 flags);
+int iwl_mvm_find_free_queue(struct iwl_mvm *mvm, u8 minq, u8 maxq);
 
 static inline
-void iwl_mvm_enable_ac_txq(struct iwl_mvm *mvm, int queue,
-                          u8 fifo, unsigned int wdg_timeout)
+void iwl_mvm_enable_ac_txq(struct iwl_mvm *mvm, int queue, int mac80211_queue,
+                          u8 fifo, u16 ssn, unsigned int wdg_timeout)
 {
        struct iwl_trans_txq_scd_cfg cfg = {
                .fifo = fifo,
@@ -1361,13 +1382,13 @@ void iwl_mvm_enable_ac_txq(struct iwl_mvm *mvm, int queue,
                .frame_limit = IWL_FRAME_LIMIT,
        };
 
-       iwl_mvm_enable_txq(mvm, queue, 0, &cfg, wdg_timeout);
+       iwl_mvm_enable_txq(mvm, queue, mac80211_queue, ssn, &cfg, wdg_timeout);
 }
 
 static inline void iwl_mvm_enable_agg_txq(struct iwl_mvm *mvm, int queue,
-                                         int fifo, int sta_id, int tid,
-                                         int frame_limit, u16 ssn,
-                                         unsigned int wdg_timeout)
+                                         int mac80211_queue, int fifo,
+                                         int sta_id, int tid, int frame_limit,
+                                         u16 ssn, unsigned int wdg_timeout)
 {
        struct iwl_trans_txq_scd_cfg cfg = {
                .fifo = fifo,
@@ -1377,7 +1398,7 @@ static inline void iwl_mvm_enable_agg_txq(struct iwl_mvm *mvm, int queue,
                .aggregate = true,
        };
 
-       iwl_mvm_enable_txq(mvm, queue, ssn, &cfg, wdg_timeout);
+       iwl_mvm_enable_txq(mvm, queue, mac80211_queue, ssn, &cfg, wdg_timeout);
 }
 
 /* Thermal management and CT-kill */
index 328187d..4e4a680 100644 (file)
@@ -316,7 +316,8 @@ iwl_parse_nvm_sections(struct iwl_mvm *mvm)
        return iwl_parse_nvm_data(mvm->trans->dev, mvm->cfg, hw, sw, calib,
                                  regulatory, mac_override, phy_sku,
                                  mvm->fw->valid_tx_ant, mvm->fw->valid_rx_ant,
-                                 lar_enabled, mac_addr0, mac_addr1);
+                                 lar_enabled, mac_addr0, mac_addr1,
+                                 mvm->trans->hw_id);
 }
 
 #define MAX_NVM_FILE_LEN       16384
@@ -563,6 +564,10 @@ int iwl_nvm_init(struct iwl_mvm *mvm, bool read_nvm_from_nic)
                                mvm->nvm_prod_blob.data = temp;
                                mvm->nvm_prod_blob.size  = ret;
                                break;
+                       case NVM_SECTION_TYPE_PHY_SKU:
+                               mvm->nvm_phy_sku_blob.data = temp;
+                               mvm->nvm_phy_sku_blob.size  = ret;
+                               break;
                        default:
                                if (section == mvm->cfg->nvm_hw_section_num) {
                                        mvm->nvm_hw_blob.data = temp;
index 3f6428c..064c100 100644 (file)
@@ -89,6 +89,7 @@ MODULE_AUTHOR(DRV_COPYRIGHT " " DRV_AUTHOR);
 MODULE_LICENSE("GPL");
 
 static const struct iwl_op_mode_ops iwl_mvm_ops;
+static const struct iwl_op_mode_ops iwl_mvm_ops_mq;
 
 struct iwl_mvm_mod_params iwlmvm_mod_params = {
        .power_scheme = IWL_POWER_SCHEME_BPS,
@@ -222,7 +223,6 @@ struct iwl_rx_handlers {
  * called from a worker with mvm->mutex held.
  */
 static const struct iwl_rx_handlers iwl_mvm_rx_handlers[] = {
-       RX_HANDLER(REPLY_RX_PHY_CMD, iwl_mvm_rx_rx_phy_cmd, false),
        RX_HANDLER(TX_CMD, iwl_mvm_rx_tx_cmd, false),
        RX_HANDLER(BA_NOTIF, iwl_mvm_rx_ba_notif, false),
 
@@ -257,6 +257,8 @@ static const struct iwl_rx_handlers iwl_mvm_rx_handlers[] = {
        RX_HANDLER(PSM_UAPSD_AP_MISBEHAVING_NOTIFICATION,
                   iwl_mvm_power_uapsd_misbehaving_ap_notif, false),
        RX_HANDLER(DTS_MEASUREMENT_NOTIFICATION, iwl_mvm_temp_notif, true),
+       RX_HANDLER_GRP(PHY_OPS_GROUP, DTS_MEASUREMENT_NOTIF_WIDE,
+                      iwl_mvm_temp_notif, true),
 
        RX_HANDLER(TDLS_CHANNEL_SWITCH_NOTIFICATION, iwl_mvm_rx_tdls_notif,
                   true),
@@ -423,7 +425,6 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg,
                hw->max_tx_aggregation_subframes = cfg->max_tx_agg_size;
 
        op_mode = hw->priv;
-       op_mode->ops = &iwl_mvm_ops;
 
        mvm = IWL_OP_MODE_GET_MVM(op_mode);
        mvm->dev = trans->dev;
@@ -432,6 +433,15 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg,
        mvm->fw = fw;
        mvm->hw = hw;
 
+       if (iwl_mvm_has_new_rx_api(mvm)) {
+               op_mode->ops = &iwl_mvm_ops_mq;
+       } else {
+               op_mode->ops = &iwl_mvm_ops;
+
+               if (WARN_ON(trans->num_rx_queues > 1))
+                       goto out_free;
+       }
+
        mvm->restart_fw = iwlwifi_mod_params.restart_fw ? -1 : 0;
 
        mvm->aux_queue = 15;
@@ -452,6 +462,7 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg,
        INIT_LIST_HEAD(&mvm->aux_roc_te_list);
        INIT_LIST_HEAD(&mvm->async_handlers_list);
        spin_lock_init(&mvm->time_event_lock);
+       spin_lock_init(&mvm->queue_info_lock);
 
        INIT_WORK(&mvm->async_handlers_wk, iwl_mvm_async_handlers_wk);
        INIT_WORK(&mvm->roc_done_wk, iwl_mvm_roc_done_wk);
@@ -717,18 +728,11 @@ static inline void iwl_mvm_rx_check_trigger(struct iwl_mvm *mvm,
        }
 }
 
-static void iwl_mvm_rx_dispatch(struct iwl_op_mode *op_mode,
-                               struct napi_struct *napi,
-                               struct iwl_rx_cmd_buffer *rxb)
+static void iwl_mvm_rx_common(struct iwl_mvm *mvm,
+                             struct iwl_rx_cmd_buffer *rxb,
+                             struct iwl_rx_packet *pkt)
 {
-       struct iwl_rx_packet *pkt = rxb_addr(rxb);
-       struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode);
-       u8 i;
-
-       if (likely(pkt->hdr.cmd == REPLY_RX_MPDU_CMD)) {
-               iwl_mvm_rx_rx_mpdu(mvm, napi, rxb);
-               return;
-       }
+       int i;
 
        iwl_mvm_rx_check_trigger(mvm, pkt);
 
@@ -768,40 +772,84 @@ static void iwl_mvm_rx_dispatch(struct iwl_op_mode *op_mode,
        }
 }
 
+static void iwl_mvm_rx(struct iwl_op_mode *op_mode,
+                      struct napi_struct *napi,
+                      struct iwl_rx_cmd_buffer *rxb)
+{
+       struct iwl_rx_packet *pkt = rxb_addr(rxb);
+       struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode);
+
+       if (likely(pkt->hdr.cmd == REPLY_RX_MPDU_CMD))
+               iwl_mvm_rx_rx_mpdu(mvm, napi, rxb);
+       else if (pkt->hdr.cmd == REPLY_RX_PHY_CMD)
+               iwl_mvm_rx_rx_phy_cmd(mvm, rxb);
+       else
+               iwl_mvm_rx_common(mvm, rxb, pkt);
+}
+
+static void iwl_mvm_rx_mq(struct iwl_op_mode *op_mode,
+                         struct napi_struct *napi,
+                         struct iwl_rx_cmd_buffer *rxb)
+{
+       struct iwl_rx_packet *pkt = rxb_addr(rxb);
+       struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode);
+
+       if (likely(pkt->hdr.cmd == REPLY_RX_MPDU_CMD))
+               iwl_mvm_rx_rx_mpdu(mvm, napi, rxb);
+       else if (pkt->hdr.cmd == REPLY_RX_PHY_CMD)
+               iwl_mvm_rx_rx_phy_cmd(mvm, rxb);
+       else
+               iwl_mvm_rx_common(mvm, rxb, pkt);
+}
+
 static void iwl_mvm_stop_sw_queue(struct iwl_op_mode *op_mode, int queue)
 {
        struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode);
-       int mq = mvm->queue_to_mac80211[queue];
+       unsigned long mq;
+       int q;
 
-       if (WARN_ON_ONCE(mq == IWL_INVALID_MAC80211_QUEUE))
-               return;
+       spin_lock_bh(&mvm->queue_info_lock);
+       mq = mvm->queue_info[queue].hw_queue_to_mac80211;
+       spin_unlock_bh(&mvm->queue_info_lock);
 
-       if (atomic_inc_return(&mvm->mac80211_queue_stop_count[mq]) > 1) {
-               IWL_DEBUG_TX_QUEUES(mvm,
-                                   "queue %d (mac80211 %d) already stopped\n",
-                                   queue, mq);
+       if (WARN_ON_ONCE(!mq))
                return;
-       }
 
-       ieee80211_stop_queue(mvm->hw, mq);
+       for_each_set_bit(q, &mq, IEEE80211_MAX_QUEUES) {
+               if (atomic_inc_return(&mvm->mac80211_queue_stop_count[q]) > 1) {
+                       IWL_DEBUG_TX_QUEUES(mvm,
+                                           "queue %d (mac80211 %d) already stopped\n",
+                                           queue, q);
+                       continue;
+               }
+
+               ieee80211_stop_queue(mvm->hw, q);
+       }
 }
 
 static void iwl_mvm_wake_sw_queue(struct iwl_op_mode *op_mode, int queue)
 {
        struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode);
-       int mq = mvm->queue_to_mac80211[queue];
+       unsigned long mq;
+       int q;
 
-       if (WARN_ON_ONCE(mq == IWL_INVALID_MAC80211_QUEUE))
-               return;
+       spin_lock_bh(&mvm->queue_info_lock);
+       mq = mvm->queue_info[queue].hw_queue_to_mac80211;
+       spin_unlock_bh(&mvm->queue_info_lock);
 
-       if (atomic_dec_return(&mvm->mac80211_queue_stop_count[mq]) > 0) {
-               IWL_DEBUG_TX_QUEUES(mvm,
-                                   "queue %d (mac80211 %d) still stopped\n",
-                                   queue, mq);
+       if (WARN_ON_ONCE(!mq))
                return;
-       }
 
-       ieee80211_wake_queue(mvm->hw, mq);
+       for_each_set_bit(q, &mq, IEEE80211_MAX_QUEUES) {
+               if (atomic_dec_return(&mvm->mac80211_queue_stop_count[q]) > 0) {
+                       IWL_DEBUG_TX_QUEUES(mvm,
+                                           "queue %d (mac80211 %d) still stopped\n",
+                                           queue, q);
+                       continue;
+               }
+
+               ieee80211_wake_queue(mvm->hw, q);
+       }
 }
 
 void iwl_mvm_set_hw_ctkill_state(struct iwl_mvm *mvm, bool state)
@@ -1347,17 +1395,38 @@ int iwl_mvm_exit_d0i3(struct iwl_op_mode *op_mode)
        return _iwl_mvm_exit_d0i3(mvm);
 }
 
+#define IWL_MVM_COMMON_OPS                                     \
+       /* these could be differentiated */                     \
+       .queue_full = iwl_mvm_stop_sw_queue,                    \
+       .queue_not_full = iwl_mvm_wake_sw_queue,                \
+       .hw_rf_kill = iwl_mvm_set_hw_rfkill_state,              \
+       .free_skb = iwl_mvm_free_skb,                           \
+       .nic_error = iwl_mvm_nic_error,                         \
+       .cmd_queue_full = iwl_mvm_cmd_queue_full,               \
+       .nic_config = iwl_mvm_nic_config,                       \
+       .enter_d0i3 = iwl_mvm_enter_d0i3,                       \
+       .exit_d0i3 = iwl_mvm_exit_d0i3,                         \
+       /* as we only register one, these MUST be common! */    \
+       .start = iwl_op_mode_mvm_start,                         \
+       .stop = iwl_op_mode_mvm_stop
+
 static const struct iwl_op_mode_ops iwl_mvm_ops = {
-       .start = iwl_op_mode_mvm_start,
-       .stop = iwl_op_mode_mvm_stop,
-       .rx = iwl_mvm_rx_dispatch,
-       .queue_full = iwl_mvm_stop_sw_queue,
-       .queue_not_full = iwl_mvm_wake_sw_queue,
-       .hw_rf_kill = iwl_mvm_set_hw_rfkill_state,
-       .free_skb = iwl_mvm_free_skb,
-       .nic_error = iwl_mvm_nic_error,
-       .cmd_queue_full = iwl_mvm_cmd_queue_full,
-       .nic_config = iwl_mvm_nic_config,
-       .enter_d0i3 = iwl_mvm_enter_d0i3,
-       .exit_d0i3 = iwl_mvm_exit_d0i3,
+       IWL_MVM_COMMON_OPS,
+       .rx = iwl_mvm_rx,
+};
+
+static void iwl_mvm_rx_mq_rss(struct iwl_op_mode *op_mode,
+                             struct napi_struct *napi,
+                             struct iwl_rx_cmd_buffer *rxb,
+                             unsigned int queue)
+{
+       struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode);
+
+       iwl_mvm_rx_rx_mpdu(mvm, napi, rxb);
+}
+
+static const struct iwl_op_mode_ops iwl_mvm_ops_mq = {
+       IWL_MVM_COMMON_OPS,
+       .rx = iwl_mvm_rx_mq,
+       .rx_rss = iwl_mvm_rx_mq_rss,
 };
index 4645877..723b537 100644 (file)
@@ -7,6 +7,7 @@
  *
  * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
  * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
+ * Copyright(c) 2015        Intel Deutschland GmbH
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of version 2 of the GNU General Public License as
@@ -33,6 +34,7 @@
  *
  * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
  * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
+ * Copyright(c) 2015        Intel Deutschland GmbH
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
@@ -306,13 +308,50 @@ static bool iwl_mvm_power_is_radar(struct ieee80211_vif *vif)
        return radar_detect;
 }
 
+static void iwl_mvm_power_config_skip_dtim(struct iwl_mvm *mvm,
+                                          struct ieee80211_vif *vif,
+                                          struct iwl_mac_power_cmd *cmd,
+                                          bool host_awake)
+{
+       int dtimper = vif->bss_conf.dtim_period ?: 1;
+       int skip;
+
+       /* disable, in case we're supposed to override */
+       cmd->skip_dtim_periods = 0;
+       cmd->flags &= ~cpu_to_le16(POWER_FLAGS_SKIP_OVER_DTIM_MSK);
+
+       if (iwl_mvm_power_is_radar(vif))
+               return;
+
+       if (dtimper >= 10)
+               return;
+
+       /* TODO: check that multicast wake lock is off */
+
+       if (host_awake) {
+               if (iwlmvm_mod_params.power_scheme != IWL_POWER_SCHEME_LP)
+                       return;
+               skip = 2;
+       } else {
+               int dtimper_tu = dtimper * vif->bss_conf.beacon_int;
+
+               if (WARN_ON(!dtimper_tu))
+                       return;
+               /* configure skip over dtim up to 306TU - 314 msec */
+               skip = max_t(u8, 1, 306 / dtimper_tu);
+       }
+
+       /* the firmware really expects "look at every X DTIMs", so add 1 */
+       cmd->skip_dtim_periods = 1 + skip;
+       cmd->flags |= cpu_to_le16(POWER_FLAGS_SKIP_OVER_DTIM_MSK);
+}
+
 static void iwl_mvm_power_build_cmd(struct iwl_mvm *mvm,
                                    struct ieee80211_vif *vif,
                                    struct iwl_mac_power_cmd *cmd)
 {
        int dtimper, bi;
        int keep_alive;
-       bool radar_detect = false;
        struct iwl_mvm_vif *mvmvif __maybe_unused =
                iwl_mvm_vif_from_mac80211(vif);
 
@@ -350,16 +389,8 @@ static void iwl_mvm_power_build_cmd(struct iwl_mvm *mvm,
                cmd->lprx_rssi_threshold = POWER_LPRX_RSSI_THRESHOLD;
        }
 
-       /* Check if radar detection is required on current channel */
-       radar_detect = iwl_mvm_power_is_radar(vif);
-
-       /* Check skip over DTIM conditions */
-       if (!radar_detect && (dtimper < 10) &&
-           (iwlmvm_mod_params.power_scheme == IWL_POWER_SCHEME_LP ||
-            mvm->cur_ucode == IWL_UCODE_WOWLAN)) {
-               cmd->flags |= cpu_to_le16(POWER_FLAGS_SKIP_OVER_DTIM_MSK);
-               cmd->skip_dtim_periods = 3;
-       }
+       iwl_mvm_power_config_skip_dtim(mvm, vif, cmd,
+                                      mvm->cur_ucode != IWL_UCODE_WOWLAN);
 
        if (mvm->cur_ucode != IWL_UCODE_WOWLAN) {
                cmd->rx_data_timeout =
@@ -440,14 +471,14 @@ static int iwl_mvm_power_send_cmd(struct iwl_mvm *mvm,
 int iwl_mvm_power_update_device(struct iwl_mvm *mvm)
 {
        struct iwl_device_power_cmd cmd = {
-               .flags = cpu_to_le16(DEVICE_POWER_FLAGS_POWER_SAVE_ENA_MSK),
+               .flags = 0,
        };
 
        if (iwlmvm_mod_params.power_scheme == IWL_POWER_SCHEME_CAM)
                mvm->ps_disabled = true;
 
-       if (mvm->ps_disabled)
-               cmd.flags |= cpu_to_le16(DEVICE_POWER_FLAGS_CAM_MSK);
+       if (!mvm->ps_disabled)
+               cmd.flags |= cpu_to_le16(DEVICE_POWER_FLAGS_POWER_SAVE_ENA_MSK);
 
 #ifdef CONFIG_IWLWIFI_DEBUGFS
        if ((mvm->cur_ucode == IWL_UCODE_WOWLAN) ? mvm->disable_power_off_d3 :
@@ -964,24 +995,11 @@ int iwl_mvm_update_d0i3_power_mode(struct iwl_mvm *mvm,
                return 0;
 
        iwl_mvm_power_build_cmd(mvm, vif, &cmd);
-       if (enable) {
-               /* configure skip over dtim up to 306TU - 314 msec */
-               int dtimper = vif->bss_conf.dtim_period ?: 1;
-               int dtimper_tu = dtimper * vif->bss_conf.beacon_int;
-               bool radar_detect = iwl_mvm_power_is_radar(vif);
 
-               if (WARN_ON(!dtimper_tu))
-                       return 0;
-
-               /* Check skip over DTIM conditions */
-               /* TODO: check that multicast wake lock is off */
-               if (!radar_detect && (dtimper < 10)) {
-                       cmd.skip_dtim_periods = 306 / dtimper_tu;
-                       if (cmd.skip_dtim_periods)
-                               cmd.flags |= cpu_to_le16(
-                                       POWER_FLAGS_SKIP_OVER_DTIM_MSK);
-               }
-       }
+       /* when enabling D0i3, override the skip-over-dtim configuration */
+       if (enable)
+               iwl_mvm_power_config_skip_dtim(mvm, vif, &cmd, false);
+
        iwl_mvm_power_log(mvm, &cmd);
 #ifdef CONFIG_IWLWIFI_DEBUGFS
        memcpy(&mvmvif->mac_pwr_cmd, &cmd, sizeof(cmd));
index 5ae9c8a..34d98b2 100644 (file)
@@ -524,14 +524,56 @@ static const char *rs_pretty_lq_type(enum iwl_table_type type)
        return lq_types[type];
 }
 
+static char *rs_pretty_rate(const struct rs_rate *rate)
+{
+       static char buf[40];
+       static const char * const legacy_rates[] = {
+               [IWL_RATE_1M_INDEX] = "1M",
+               [IWL_RATE_2M_INDEX] = "2M",
+               [IWL_RATE_5M_INDEX] = "5.5M",
+               [IWL_RATE_11M_INDEX] = "11M",
+               [IWL_RATE_6M_INDEX] = "6M",
+               [IWL_RATE_9M_INDEX] = "9M",
+               [IWL_RATE_12M_INDEX] = "12M",
+               [IWL_RATE_18M_INDEX] = "18M",
+               [IWL_RATE_24M_INDEX] = "24M",
+               [IWL_RATE_36M_INDEX] = "36M",
+               [IWL_RATE_48M_INDEX] = "48M",
+               [IWL_RATE_54M_INDEX] = "54M",
+       };
+       static const char *const ht_vht_rates[] = {
+               [IWL_RATE_MCS_0_INDEX] = "MCS0",
+               [IWL_RATE_MCS_1_INDEX] = "MCS1",
+               [IWL_RATE_MCS_2_INDEX] = "MCS2",
+               [IWL_RATE_MCS_3_INDEX] = "MCS3",
+               [IWL_RATE_MCS_4_INDEX] = "MCS4",
+               [IWL_RATE_MCS_5_INDEX] = "MCS5",
+               [IWL_RATE_MCS_6_INDEX] = "MCS6",
+               [IWL_RATE_MCS_7_INDEX] = "MCS7",
+               [IWL_RATE_MCS_8_INDEX] = "MCS8",
+               [IWL_RATE_MCS_9_INDEX] = "MCS9",
+       };
+       const char *rate_str;
+
+       if (is_type_legacy(rate->type))
+               rate_str = legacy_rates[rate->index];
+       else if (is_type_ht(rate->type) || is_type_vht(rate->type))
+               rate_str = ht_vht_rates[rate->index];
+       else
+               rate_str = "BAD_RATE";
+
+       sprintf(buf, "(%s|%s|%s)", rs_pretty_lq_type(rate->type),
+               rs_pretty_ant(rate->ant), rate_str);
+       return buf;
+}
+
 static inline void rs_dump_rate(struct iwl_mvm *mvm, const struct rs_rate *rate,
                                const char *prefix)
 {
        IWL_DEBUG_RATE(mvm,
-                      "%s: (%s: %d) ANT: %s BW: %d SGI: %d LDPC: %d STBC: %d\n",
-                      prefix, rs_pretty_lq_type(rate->type),
-                      rate->index, rs_pretty_ant(rate->ant),
-                      rate->bw, rate->sgi, rate->ldpc, rate->stbc);
+                      "%s: %s BW: %d SGI: %d LDPC: %d STBC: %d\n",
+                      prefix, rs_pretty_rate(rate), rate->bw,
+                      rate->sgi, rate->ldpc, rate->stbc);
 }
 
 static void rs_rate_scale_clear_window(struct iwl_rate_scale_data *window)
@@ -562,8 +604,8 @@ static inline u8 rs_is_valid_ant(u8 valid_antenna, u8 ant_type)
 }
 
 static int rs_tl_turn_on_agg_for_tid(struct iwl_mvm *mvm,
-                                     struct iwl_lq_sta *lq_data, u8 tid,
-                                     struct ieee80211_sta *sta)
+                                    struct iwl_lq_sta *lq_data, u8 tid,
+                                    struct ieee80211_sta *sta)
 {
        int ret = -EAGAIN;
 
@@ -1485,7 +1527,7 @@ static s32 rs_get_best_rate(struct iwl_mvm *mvm,
        u32 target_tpt;
        int rate_idx;
 
-       if (success_ratio > IWL_MVM_RS_SR_NO_DECREASE) {
+       if (success_ratio >= RS_PERCENT(IWL_MVM_RS_SR_NO_DECREASE)) {
                target_tpt = 100 * expected_current_tpt;
                IWL_DEBUG_RATE(mvm,
                               "SR %d high. Find rate exceeding EXPECTED_CURRENT %d\n",
@@ -1493,7 +1535,7 @@ static s32 rs_get_best_rate(struct iwl_mvm *mvm,
        } else {
                target_tpt = lq_sta->last_tpt;
                IWL_DEBUG_RATE(mvm,
-                              "SR %d not thag good. Find rate exceeding ACTUAL_TPT %d\n",
+                              "SR %d not that good. Find rate exceeding ACTUAL_TPT %d\n",
                               success_ratio, target_tpt);
        }
 
@@ -1622,6 +1664,51 @@ static void rs_update_rate_tbl(struct iwl_mvm *mvm,
        iwl_mvm_send_lq_cmd(mvm, &lq_sta->lq, false);
 }
 
+static bool rs_tweak_rate_tbl(struct iwl_mvm *mvm,
+                             struct ieee80211_sta *sta,
+                             struct iwl_lq_sta *lq_sta,
+                             struct iwl_scale_tbl_info *tbl,
+                             enum rs_action scale_action)
+{
+       if (sta->bandwidth != IEEE80211_STA_RX_BW_80)
+               return false;
+
+       if (!is_vht_siso(&tbl->rate))
+               return false;
+
+       if ((tbl->rate.bw == RATE_MCS_CHAN_WIDTH_80) &&
+           (tbl->rate.index == IWL_RATE_MCS_0_INDEX) &&
+           (scale_action == RS_ACTION_DOWNSCALE)) {
+               tbl->rate.bw = RATE_MCS_CHAN_WIDTH_20;
+               tbl->rate.index = IWL_RATE_MCS_4_INDEX;
+               IWL_DEBUG_RATE(mvm, "Switch 80Mhz SISO MCS0 -> 20Mhz MCS4\n");
+               goto tweaked;
+       }
+
+       /* Go back to 80Mhz MCS1 only if we've established that 20Mhz MCS5 is
+        * sustainable, i.e. we're past the test window. We can't go back
+        * if MCS5 is just tested as this will happen always after switching
+        * to 20Mhz MCS4 because the rate stats are cleared.
+        */
+       if ((tbl->rate.bw == RATE_MCS_CHAN_WIDTH_20) &&
+           (((tbl->rate.index == IWL_RATE_MCS_5_INDEX) &&
+            (scale_action == RS_ACTION_STAY)) ||
+            ((tbl->rate.index > IWL_RATE_MCS_5_INDEX) &&
+             (scale_action == RS_ACTION_UPSCALE)))) {
+               tbl->rate.bw = RATE_MCS_CHAN_WIDTH_80;
+               tbl->rate.index = IWL_RATE_MCS_1_INDEX;
+               IWL_DEBUG_RATE(mvm, "Switch 20Mhz SISO MCS5 -> 80Mhz MCS1\n");
+               goto tweaked;
+       }
+
+       return false;
+
+tweaked:
+       rs_set_expected_tpt_table(lq_sta, tbl);
+       rs_rate_scale_clear_tbl_windows(mvm, tbl);
+       return true;
+}
+
 static enum rs_column rs_get_next_column(struct iwl_mvm *mvm,
                                         struct iwl_lq_sta *lq_sta,
                                         struct ieee80211_sta *sta,
@@ -2174,9 +2261,9 @@ static void rs_rate_scale_perform(struct iwl_mvm *mvm,
        if ((fail_count < IWL_MVM_RS_RATE_MIN_FAILURE_TH) &&
            (window->success_counter < IWL_MVM_RS_RATE_MIN_SUCCESS_TH)) {
                IWL_DEBUG_RATE(mvm,
-                              "(%s: %d): Test Window: succ %d total %d\n",
-                              rs_pretty_lq_type(rate->type),
-                              index, window->success_counter, window->counter);
+                              "%s: Test Window: succ %d total %d\n",
+                              rs_pretty_rate(rate),
+                              window->success_counter, window->counter);
 
                /* Can't calculate this yet; not enough history */
                window->average_tpt = IWL_INVALID_VALUE;
@@ -2253,8 +2340,8 @@ static void rs_rate_scale_perform(struct iwl_mvm *mvm,
                high_tpt = tbl->win[high].average_tpt;
 
        IWL_DEBUG_RATE(mvm,
-                      "(%s: %d): cur_tpt %d SR %d low %d high %d low_tpt %d high_tpt %d\n",
-                      rs_pretty_lq_type(rate->type), index, current_tpt, sr,
+                      "%s: cur_tpt %d SR %d low %d high %d low_tpt %d high_tpt %d\n",
+                      rs_pretty_rate(rate), current_tpt, sr,
                       low, high, low_tpt, high_tpt);
 
        scale_action = rs_get_rate_action(mvm, tbl, sr, low, high,
@@ -2305,6 +2392,8 @@ lq_update:
        /* Replace uCode's rate table for the destination station. */
        if (update_lq) {
                tbl->rate.index = index;
+               if (IWL_MVM_RS_80_20_FAR_RANGE_TWEAK)
+                       rs_tweak_rate_tbl(mvm, sta, lq_sta, tbl, scale_action);
                rs_update_rate_tbl(mvm, sta, lq_sta, tbl);
        }
 
@@ -2542,7 +2631,6 @@ static struct rs_rate *rs_get_optimal_rate(struct iwl_mvm *mvm,
                }
        }
 
-       rs_dump_rate(mvm, rate, "OPTIMAL RATE");
        return rate;
 }
 
index 0a6d47c..5b58f53 100644 (file)
@@ -346,8 +346,8 @@ void iwl_mvm_rx_rx_mpdu(struct iwl_mvm *mvm, struct napi_struct *napi,
        /* This is fine since we don't support multiple AP interfaces */
        sta = ieee80211_find_sta_by_ifaddr(mvm->hw, hdr->addr2, NULL);
        if (sta) {
-               struct iwl_mvm_sta *mvmsta;
-               mvmsta = iwl_mvm_sta_from_mac80211(sta);
+               struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
+
                rs_update_last_rssi(mvm, &mvmsta->lq_sta, rx_status);
 
                if (iwl_fw_dbg_trigger_enabled(mvm->fw, FW_DBG_TRIGGER_RSSI) &&
index fe2f538..a9a3eb6 100644 (file)
@@ -234,7 +234,9 @@ static int iwl_mvm_tdls_sta_init(struct iwl_mvm *mvm,
        /* Found a place for all queues - enable them */
        for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) {
                iwl_mvm_enable_ac_txq(mvm, mvmsta->hw_queue[ac],
-                                     iwl_mvm_ac_to_tx_fifo[ac], wdg_timeout);
+                                     mvmsta->hw_queue[ac],
+                                     iwl_mvm_ac_to_tx_fifo[ac], 0,
+                                     wdg_timeout);
                mvmsta->tfd_queue_msk |= BIT(mvmsta->hw_queue[ac]);
        }
 
@@ -253,7 +255,7 @@ static void iwl_mvm_tdls_sta_deinit(struct iwl_mvm *mvm,
        /* disable the TDLS STA-specific queues */
        sta_msk = mvmsta->tfd_queue_msk;
        for_each_set_bit(i, &sta_msk, sizeof(sta_msk) * BITS_PER_BYTE)
-               iwl_mvm_disable_txq(mvm, i, 0);
+               iwl_mvm_disable_txq(mvm, i, i, 0, 0);
 }
 
 int iwl_mvm_add_sta(struct iwl_mvm *mvm,
@@ -277,7 +279,7 @@ int iwl_mvm_add_sta(struct iwl_mvm *mvm,
 
        if (vif->type == NL80211_IFTYPE_AP) {
                mvmvif->ap_assoc_sta_count++;
-               iwl_mvm_mac_ctxt_cmd_ap(mvm, vif, FW_CTXT_ACTION_MODIFY);
+               iwl_mvm_mac_ctxt_changed(mvm, vif, false, NULL);
        }
 
        spin_lock_init(&mvm_sta->lock);
@@ -292,7 +294,7 @@ int iwl_mvm_add_sta(struct iwl_mvm *mvm,
 
        /* HW restart, don't assume the memory has been zeroed */
        atomic_set(&mvm->pending_frames[sta_id], 0);
-       mvm_sta->tid_disable_agg = 0;
+       mvm_sta->tid_disable_agg = 0xffff; /* No aggs at first */
        mvm_sta->tfd_queue_msk = 0;
 
        /* allocate new queues for a TDLS station */
@@ -472,7 +474,7 @@ void iwl_mvm_sta_drained_wk(struct work_struct *wk)
                        unsigned long i, msk = mvm->tfd_drained[sta_id];
 
                        for_each_set_bit(i, &msk, sizeof(msk) * BITS_PER_BYTE)
-                               iwl_mvm_disable_txq(mvm, i, 0);
+                               iwl_mvm_disable_txq(mvm, i, i, 0, 0);
 
                        mvm->tfd_drained[sta_id] = 0;
                        IWL_DEBUG_TDLS(mvm, "Drained sta %d, with queues %ld\n",
@@ -651,8 +653,8 @@ int iwl_mvm_add_aux_sta(struct iwl_mvm *mvm)
        lockdep_assert_held(&mvm->mutex);
 
        /* Map Aux queue to fifo - needs to happen before adding Aux station */
-       iwl_mvm_enable_ac_txq(mvm, mvm->aux_queue,
-                             IWL_MVM_TX_FIFO_MCAST, wdg_timeout);
+       iwl_mvm_enable_ac_txq(mvm, mvm->aux_queue, mvm->aux_queue,
+                             IWL_MVM_TX_FIFO_MCAST, 0, wdg_timeout);
 
        /* Allocate aux station and assign to it the aux queue */
        ret = iwl_mvm_allocate_int_sta(mvm, &mvm->aux_sta, BIT(mvm->aux_queue),
@@ -923,6 +925,7 @@ int iwl_mvm_sta_tx_agg_start(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
        struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
        struct iwl_mvm_tid_data *tid_data;
        int txq_id;
+       int ret;
 
        if (WARN_ON_ONCE(tid >= IWL_MAX_TID_COUNT))
                return -EINVAL;
@@ -935,17 +938,6 @@ int iwl_mvm_sta_tx_agg_start(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
 
        lockdep_assert_held(&mvm->mutex);
 
-       for (txq_id = mvm->first_agg_queue;
-            txq_id <= mvm->last_agg_queue; txq_id++)
-               if (mvm->queue_to_mac80211[txq_id] ==
-                   IWL_INVALID_MAC80211_QUEUE)
-                       break;
-
-       if (txq_id > mvm->last_agg_queue) {
-               IWL_ERR(mvm, "Failed to allocate agg queue\n");
-               return -EIO;
-       }
-
        spin_lock_bh(&mvmsta->lock);
 
        /* possible race condition - we entered D0i3 while starting agg */
@@ -955,8 +947,18 @@ int iwl_mvm_sta_tx_agg_start(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
                return -EIO;
        }
 
-       /* the new tx queue is still connected to the same mac80211 queue */
-       mvm->queue_to_mac80211[txq_id] = vif->hw_queue[tid_to_mac80211_ac[tid]];
+       spin_lock_bh(&mvm->queue_info_lock);
+
+       txq_id = iwl_mvm_find_free_queue(mvm, mvm->first_agg_queue,
+                                        mvm->last_agg_queue);
+       if (txq_id < 0) {
+               ret = txq_id;
+               spin_unlock_bh(&mvm->queue_info_lock);
+               IWL_ERR(mvm, "Failed to allocate agg queue\n");
+               goto release_locks;
+       }
+       mvm->queue_info[txq_id].setup_reserved = true;
+       spin_unlock_bh(&mvm->queue_info_lock);
 
        tid_data = &mvmsta->tid_data[tid];
        tid_data->ssn = IEEE80211_SEQ_TO_SN(tid_data->seq_number);
@@ -975,9 +977,12 @@ int iwl_mvm_sta_tx_agg_start(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
                tid_data->state = IWL_EMPTYING_HW_QUEUE_ADDBA;
        }
 
+       ret = 0;
+
+release_locks:
        spin_unlock_bh(&mvmsta->lock);
 
-       return 0;
+       return ret;
 }
 
 int iwl_mvm_sta_tx_agg_oper(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
@@ -1005,13 +1010,19 @@ int iwl_mvm_sta_tx_agg_oper(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
 
        fifo = iwl_mvm_ac_to_tx_fifo[tid_to_mac80211_ac[tid]];
 
-       iwl_mvm_enable_agg_txq(mvm, queue, fifo, mvmsta->sta_id, tid,
-                              buf_size, ssn, wdg_timeout);
+       iwl_mvm_enable_agg_txq(mvm, queue,
+                              vif->hw_queue[tid_to_mac80211_ac[tid]], fifo,
+                              mvmsta->sta_id, tid, buf_size, ssn, wdg_timeout);
 
        ret = iwl_mvm_sta_tx_agg(mvm, sta, tid, queue, true);
        if (ret)
                return -EIO;
 
+       /* No need to mark as reserved */
+       spin_lock_bh(&mvm->queue_info_lock);
+       mvm->queue_info[queue].setup_reserved = false;
+       spin_unlock_bh(&mvm->queue_info_lock);
+
        /*
         * Even though in theory the peer could have different
         * aggregation reorder buffer sizes for different sessions,
@@ -1056,6 +1067,11 @@ int iwl_mvm_sta_tx_agg_stop(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
 
        mvmsta->agg_tids &= ~BIT(tid);
 
+       /* No need to mark as reserved anymore */
+       spin_lock_bh(&mvm->queue_info_lock);
+       mvm->queue_info[txq_id].setup_reserved = false;
+       spin_unlock_bh(&mvm->queue_info_lock);
+
        switch (tid_data->state) {
        case IWL_AGG_ON:
                tid_data->ssn = IEEE80211_SEQ_TO_SN(tid_data->seq_number);
@@ -1073,14 +1089,15 @@ int iwl_mvm_sta_tx_agg_stop(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
 
                tid_data->ssn = 0xffff;
                tid_data->state = IWL_AGG_OFF;
-               mvm->queue_to_mac80211[txq_id] = IWL_INVALID_MAC80211_QUEUE;
                spin_unlock_bh(&mvmsta->lock);
 
                ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid);
 
                iwl_mvm_sta_tx_agg(mvm, sta, tid, txq_id, false);
 
-               iwl_mvm_disable_txq(mvm, txq_id, 0);
+               iwl_mvm_disable_txq(mvm, txq_id,
+                                   vif->hw_queue[tid_to_mac80211_ac[tid]], tid,
+                                   0);
                return 0;
        case IWL_AGG_STARTING:
        case IWL_EMPTYING_HW_QUEUE_ADDBA:
@@ -1091,7 +1108,6 @@ int iwl_mvm_sta_tx_agg_stop(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
 
                /* No barriers since we are under mutex */
                lockdep_assert_held(&mvm->mutex);
-               mvm->queue_to_mac80211[txq_id] = IWL_INVALID_MAC80211_QUEUE;
 
                ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid);
                tid_data->state = IWL_AGG_OFF;
@@ -1132,6 +1148,11 @@ int iwl_mvm_sta_tx_agg_flush(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
        mvmsta->agg_tids &= ~BIT(tid);
        spin_unlock_bh(&mvmsta->lock);
 
+       /* No need to mark as reserved */
+       spin_lock_bh(&mvm->queue_info_lock);
+       mvm->queue_info[txq_id].setup_reserved = false;
+       spin_unlock_bh(&mvm->queue_info_lock);
+
        if (old_state >= IWL_AGG_ON) {
                iwl_mvm_drain_sta(mvm, mvmsta, true);
                if (iwl_mvm_flush_tx_path(mvm, BIT(txq_id), true))
@@ -1142,12 +1163,11 @@ int iwl_mvm_sta_tx_agg_flush(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
 
                iwl_mvm_sta_tx_agg(mvm, sta, tid, txq_id, false);
 
-               iwl_mvm_disable_txq(mvm, tid_data->txq_id, 0);
+               iwl_mvm_disable_txq(mvm, tid_data->txq_id,
+                                   vif->hw_queue[tid_to_mac80211_ac[tid]], tid,
+                                   0);
        }
 
-       mvm->queue_to_mac80211[tid_data->txq_id] =
-                               IWL_INVALID_MAC80211_QUEUE;
-
        return 0;
 }
 
index 7ae0bd8..4007f1d 100644 (file)
@@ -185,6 +185,7 @@ int iwl_mvm_tof_responder_cmd(struct iwl_mvm *mvm,
        }
 
        cmd->sta_id = mvmvif->bcast_sta.sta_id;
+       memcpy(cmd->bssid, vif->addr, ETH_ALEN);
        return iwl_mvm_send_cmd_pdu(mvm, iwl_cmd_id(TOF_CMD,
                                                    IWL_ALWAYS_LONG_GROUP, 0),
                                    0, sizeof(*cmd), cmd);
index 50ae8ad..9beebc3 100644 (file)
@@ -60,7 +60,7 @@
  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  *
  *****************************************************************************/
-#ifndef __tof
+#ifndef __tof_h__
 #define __tof_h__
 
 #include "fw-api-tof.h"
index fe7145c..58b762f 100644 (file)
@@ -176,17 +176,27 @@ static int iwl_mvm_get_temp_cmd(struct iwl_mvm *mvm)
        struct iwl_dts_measurement_cmd cmd = {
                .flags = cpu_to_le32(DTS_TRIGGER_CMD_FLAGS_TEMP),
        };
+       u32 cmdid;
 
-       return iwl_mvm_send_cmd_pdu(mvm, CMD_DTS_MEASUREMENT_TRIGGER, 0,
+       if (fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_WIDE_CMD_HDR))
+               cmdid = iwl_cmd_id(CMD_DTS_MEASUREMENT_TRIGGER_WIDE,
+                                  PHY_OPS_GROUP, 0);
+       else
+               cmdid = CMD_DTS_MEASUREMENT_TRIGGER;
+       return iwl_mvm_send_cmd_pdu(mvm, cmdid, 0,
                                    sizeof(cmd), &cmd);
 }
 
 int iwl_mvm_get_temp(struct iwl_mvm *mvm)
 {
        struct iwl_notification_wait wait_temp_notif;
-       static const u16 temp_notif[] = { DTS_MEASUREMENT_NOTIFICATION };
+       static u16 temp_notif[] = { WIDE_ID(PHY_OPS_GROUP,
+                                           DTS_MEASUREMENT_NOTIF_WIDE) };
        int ret, temp;
 
+       if (!fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_WIDE_CMD_HDR))
+               temp_notif[0] = DTS_MEASUREMENT_NOTIFICATION;
+
        lockdep_assert_held(&mvm->mutex);
 
        iwl_init_notification_wait(&mvm->notif_wait, &wait_temp_notif,
index 6df5aad..ff8b9bd 100644 (file)
@@ -560,15 +560,10 @@ static void iwl_mvm_check_ratid_empty(struct iwl_mvm *mvm,
                IWL_DEBUG_TX_QUEUES(mvm,
                                    "Can continue DELBA flow ssn = next_recl = %d\n",
                                    tid_data->next_reclaimed);
-               iwl_mvm_disable_txq(mvm, tid_data->txq_id, CMD_ASYNC);
+               iwl_mvm_disable_txq(mvm, tid_data->txq_id,
+                                   vif->hw_queue[tid_to_mac80211_ac[tid]], tid,
+                                   CMD_ASYNC);
                tid_data->state = IWL_AGG_OFF;
-               /*
-                * we can't hold the mutex - but since we are after a sequence
-                * point (call to iwl_mvm_disable_txq(), so we don't even need
-                * a memory barrier.
-                */
-               mvm->queue_to_mac80211[tid_data->txq_id] =
-                                       IWL_INVALID_MAC80211_QUEUE;
                ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid);
                break;
 
index 06cba97..ad0f169 100644 (file)
@@ -7,6 +7,7 @@
  *
  * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
  * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
+ * Copyright (C) 2015 Intel Deutschland GmbH
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of version 2 of the GNU General Public License as
@@ -657,34 +658,143 @@ void iwl_mvm_dump_nic_error_log(struct iwl_mvm *mvm)
        if (mvm->support_umac_log)
                iwl_mvm_dump_umac_error_log(mvm);
 }
-void iwl_mvm_enable_txq(struct iwl_mvm *mvm, int queue, u16 ssn,
-                       const struct iwl_trans_txq_scd_cfg *cfg,
+
+int iwl_mvm_find_free_queue(struct iwl_mvm *mvm, u8 minq, u8 maxq)
+{
+       int i;
+
+       lockdep_assert_held(&mvm->queue_info_lock);
+
+       for (i = minq; i <= maxq; i++)
+               if (mvm->queue_info[i].hw_queue_refcount == 0 &&
+                   !mvm->queue_info[i].setup_reserved)
+                       return i;
+
+       return -ENOSPC;
+}
+
+void iwl_mvm_enable_txq(struct iwl_mvm *mvm, int queue, int mac80211_queue,
+                       u16 ssn, const struct iwl_trans_txq_scd_cfg *cfg,
                        unsigned int wdg_timeout)
 {
-       struct iwl_scd_txq_cfg_cmd cmd = {
-               .scd_queue = queue,
-               .enable = 1,
-               .window = cfg->frame_limit,
-               .sta_id = cfg->sta_id,
-               .ssn = cpu_to_le16(ssn),
-               .tx_fifo = cfg->fifo,
-               .aggregate = cfg->aggregate,
-               .tid = cfg->tid,
-       };
+       bool enable_queue = true;
+
+       spin_lock_bh(&mvm->queue_info_lock);
+
+       /* Make sure this TID isn't already enabled */
+       if (mvm->queue_info[queue].tid_bitmap & BIT(cfg->tid)) {
+               spin_unlock_bh(&mvm->queue_info_lock);
+               IWL_ERR(mvm, "Trying to enable TXQ with existing TID %d\n",
+                       cfg->tid);
+               return;
+       }
 
-       iwl_trans_txq_enable_cfg(mvm->trans, queue, ssn, NULL, wdg_timeout);
-       WARN(iwl_mvm_send_cmd_pdu(mvm, SCD_QUEUE_CFG, 0, sizeof(cmd), &cmd),
-            "Failed to configure queue %d on FIFO %d\n", queue, cfg->fifo);
+       /* Update mappings and refcounts */
+       mvm->queue_info[queue].hw_queue_to_mac80211 |= BIT(mac80211_queue);
+       mvm->queue_info[queue].hw_queue_refcount++;
+       if (mvm->queue_info[queue].hw_queue_refcount > 1)
+               enable_queue = false;
+       mvm->queue_info[queue].tid_bitmap |= BIT(cfg->tid);
+
+       IWL_DEBUG_TX_QUEUES(mvm,
+                           "Enabling TXQ #%d refcount=%d (mac80211 map:0x%x)\n",
+                           queue, mvm->queue_info[queue].hw_queue_refcount,
+                           mvm->queue_info[queue].hw_queue_to_mac80211);
+
+       spin_unlock_bh(&mvm->queue_info_lock);
+
+       /* Send the enabling command if we need to */
+       if (enable_queue) {
+               struct iwl_scd_txq_cfg_cmd cmd = {
+                       .scd_queue = queue,
+                       .enable = 1,
+                       .window = cfg->frame_limit,
+                       .sta_id = cfg->sta_id,
+                       .ssn = cpu_to_le16(ssn),
+                       .tx_fifo = cfg->fifo,
+                       .aggregate = cfg->aggregate,
+                       .tid = cfg->tid,
+               };
+
+               iwl_trans_txq_enable_cfg(mvm->trans, queue, ssn, NULL,
+                                        wdg_timeout);
+               WARN(iwl_mvm_send_cmd_pdu(mvm, SCD_QUEUE_CFG, 0, sizeof(cmd),
+                                         &cmd),
+                    "Failed to configure queue %d on FIFO %d\n", queue,
+                    cfg->fifo);
+       }
 }
 
-void iwl_mvm_disable_txq(struct iwl_mvm *mvm, int queue, u8 flags)
+void iwl_mvm_disable_txq(struct iwl_mvm *mvm, int queue, int mac80211_queue,
+                        u8 tid, u8 flags)
 {
        struct iwl_scd_txq_cfg_cmd cmd = {
                .scd_queue = queue,
                .enable = 0,
        };
+       bool remove_mac_queue = true;
        int ret;
 
+       spin_lock_bh(&mvm->queue_info_lock);
+
+       if (WARN_ON(mvm->queue_info[queue].hw_queue_refcount == 0)) {
+               spin_unlock_bh(&mvm->queue_info_lock);
+               return;
+       }
+
+       mvm->queue_info[queue].tid_bitmap &= ~BIT(tid);
+
+       /*
+        * If there is another TID with the same AC - don't remove the MAC queue
+        * from the mapping
+        */
+       if (tid < IWL_MAX_TID_COUNT) {
+               unsigned long tid_bitmap =
+                       mvm->queue_info[queue].tid_bitmap;
+               int ac = tid_to_mac80211_ac[tid];
+               int i;
+
+               for_each_set_bit(i, &tid_bitmap, IWL_MAX_TID_COUNT) {
+                       if (tid_to_mac80211_ac[i] == ac)
+                               remove_mac_queue = false;
+               }
+       }
+
+       if (remove_mac_queue)
+               mvm->queue_info[queue].hw_queue_to_mac80211 &=
+                       ~BIT(mac80211_queue);
+       mvm->queue_info[queue].hw_queue_refcount--;
+
+       cmd.enable = mvm->queue_info[queue].hw_queue_refcount ? 1 : 0;
+
+       IWL_DEBUG_TX_QUEUES(mvm,
+                           "Disabling TXQ #%d refcount=%d (mac80211 map:0x%x)\n",
+                           queue,
+                           mvm->queue_info[queue].hw_queue_refcount,
+                           mvm->queue_info[queue].hw_queue_to_mac80211);
+
+       /* If the queue is still enabled - nothing left to do in this func */
+       if (cmd.enable) {
+               spin_unlock_bh(&mvm->queue_info_lock);
+               return;
+       }
+
+       /* Make sure queue info is correct even though we overwrite it */
+       WARN(mvm->queue_info[queue].hw_queue_refcount ||
+            mvm->queue_info[queue].tid_bitmap ||
+            mvm->queue_info[queue].hw_queue_to_mac80211,
+            "TXQ #%d info out-of-sync - refcount=%d, mac map=0x%x, tid=0x%x\n",
+            queue, mvm->queue_info[queue].hw_queue_refcount,
+            mvm->queue_info[queue].hw_queue_to_mac80211,
+            mvm->queue_info[queue].tid_bitmap);
+
+       /* If we are here - the queue is freed and we can zero out these vals */
+       mvm->queue_info[queue].hw_queue_refcount = 0;
+       mvm->queue_info[queue].tid_bitmap = 0;
+       mvm->queue_info[queue].hw_queue_to_mac80211 = 0;
+
+       spin_unlock_bh(&mvm->queue_info_lock);
+
        iwl_trans_txq_disable(mvm->trans, queue, false);
        ret = iwl_mvm_send_cmd_pdu(mvm, SCD_QUEUE_CFG, flags,
                                   sizeof(cmd), &cmd);