d3a0378b547fbba3a59064cacdd5a51e14215dd0
[cascardo/linux.git] / drivers / net / wireless / intel / iwlwifi / mvm / sta.c
1 /******************************************************************************
2  *
3  * This file is provided under a dual BSD/GPLv2 license.  When using or
4  * redistributing this file, you may do so under either license.
5  *
6  * GPL LICENSE SUMMARY
7  *
8  * Copyright(c) 2012 - 2015 Intel Corporation. All rights reserved.
9  * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
10  * Copyright(c) 2016 Intel Deutschland GmbH
11  *
12  * This program is free software; you can redistribute it and/or modify
13  * it under the terms of version 2 of the GNU General Public License as
14  * published by the Free Software Foundation.
15  *
16  * This program is distributed in the hope that it will be useful, but
17  * WITHOUT ANY WARRANTY; without even the implied warranty of
18  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
19  * General Public License for more details.
20  *
21  * You should have received a copy of the GNU General Public License
22  * along with this program; if not, write to the Free Software
23  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
24  * USA
25  *
26  * The full GNU General Public License is included in this distribution
27  * in the file called COPYING.
28  *
29  * Contact Information:
30  *  Intel Linux Wireless <linuxwifi@intel.com>
31  * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
32  *
33  * BSD LICENSE
34  *
35  * Copyright(c) 2012 - 2015 Intel Corporation. All rights reserved.
36  * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
37  * Copyright(c) 2016 Intel Deutschland GmbH
38  * All rights reserved.
39  *
40  * Redistribution and use in source and binary forms, with or without
41  * modification, are permitted provided that the following conditions
42  * are met:
43  *
44  *  * Redistributions of source code must retain the above copyright
45  *    notice, this list of conditions and the following disclaimer.
46  *  * Redistributions in binary form must reproduce the above copyright
47  *    notice, this list of conditions and the following disclaimer in
48  *    the documentation and/or other materials provided with the
49  *    distribution.
50  *  * Neither the name Intel Corporation nor the names of its
51  *    contributors may be used to endorse or promote products derived
52  *    from this software without specific prior written permission.
53  *
54  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
55  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
56  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
57  * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
58  * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
59  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
60  * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
61  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
62  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
63  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
64  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
65  *
66  *****************************************************************************/
67 #include <net/mac80211.h>
68
69 #include "mvm.h"
70 #include "sta.h"
71 #include "rs.h"
72
73 /*
74  * New version of ADD_STA_sta command added new fields at the end of the
75  * structure, so sending the size of the relevant API's structure is enough to
76  * support both API versions.
77  */
78 static inline int iwl_mvm_add_sta_cmd_size(struct iwl_mvm *mvm)
79 {
80         return iwl_mvm_has_new_rx_api(mvm) ?
81                 sizeof(struct iwl_mvm_add_sta_cmd) :
82                 sizeof(struct iwl_mvm_add_sta_cmd_v7);
83 }
84
85 static int iwl_mvm_find_free_sta_id(struct iwl_mvm *mvm,
86                                     enum nl80211_iftype iftype)
87 {
88         int sta_id;
89         u32 reserved_ids = 0;
90
91         BUILD_BUG_ON(IWL_MVM_STATION_COUNT > 32);
92         WARN_ON_ONCE(test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status));
93
94         lockdep_assert_held(&mvm->mutex);
95
96         /* d0i3/d3 assumes the AP's sta_id (of sta vif) is 0. reserve it. */
97         if (iftype != NL80211_IFTYPE_STATION)
98                 reserved_ids = BIT(0);
99
100         /* Don't take rcu_read_lock() since we are protected by mvm->mutex */
101         for (sta_id = 0; sta_id < IWL_MVM_STATION_COUNT; sta_id++) {
102                 if (BIT(sta_id) & reserved_ids)
103                         continue;
104
105                 if (!rcu_dereference_protected(mvm->fw_id_to_mac_id[sta_id],
106                                                lockdep_is_held(&mvm->mutex)))
107                         return sta_id;
108         }
109         return IWL_MVM_STATION_COUNT;
110 }
111
112 /* send station add/update command to firmware */
113 int iwl_mvm_sta_send_to_fw(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
114                            bool update, unsigned int flags)
115 {
116         struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta);
117         struct iwl_mvm_add_sta_cmd add_sta_cmd = {
118                 .sta_id = mvm_sta->sta_id,
119                 .mac_id_n_color = cpu_to_le32(mvm_sta->mac_id_n_color),
120                 .add_modify = update ? 1 : 0,
121                 .station_flags_msk = cpu_to_le32(STA_FLG_FAT_EN_MSK |
122                                                  STA_FLG_MIMO_EN_MSK),
123                 .tid_disable_tx = cpu_to_le16(mvm_sta->tid_disable_agg),
124         };
125         int ret;
126         u32 status;
127         u32 agg_size = 0, mpdu_dens = 0;
128
129         if (!update || (flags & STA_MODIFY_QUEUES)) {
130                 add_sta_cmd.tfd_queue_msk = cpu_to_le32(mvm_sta->tfd_queue_msk);
131                 memcpy(&add_sta_cmd.addr, sta->addr, ETH_ALEN);
132
133                 if (flags & STA_MODIFY_QUEUES)
134                         add_sta_cmd.modify_mask |= STA_MODIFY_QUEUES;
135         }
136
137         switch (sta->bandwidth) {
138         case IEEE80211_STA_RX_BW_160:
139                 add_sta_cmd.station_flags |= cpu_to_le32(STA_FLG_FAT_EN_160MHZ);
140                 /* fall through */
141         case IEEE80211_STA_RX_BW_80:
142                 add_sta_cmd.station_flags |= cpu_to_le32(STA_FLG_FAT_EN_80MHZ);
143                 /* fall through */
144         case IEEE80211_STA_RX_BW_40:
145                 add_sta_cmd.station_flags |= cpu_to_le32(STA_FLG_FAT_EN_40MHZ);
146                 /* fall through */
147         case IEEE80211_STA_RX_BW_20:
148                 if (sta->ht_cap.ht_supported)
149                         add_sta_cmd.station_flags |=
150                                 cpu_to_le32(STA_FLG_FAT_EN_20MHZ);
151                 break;
152         }
153
154         switch (sta->rx_nss) {
155         case 1:
156                 add_sta_cmd.station_flags |= cpu_to_le32(STA_FLG_MIMO_EN_SISO);
157                 break;
158         case 2:
159                 add_sta_cmd.station_flags |= cpu_to_le32(STA_FLG_MIMO_EN_MIMO2);
160                 break;
161         case 3 ... 8:
162                 add_sta_cmd.station_flags |= cpu_to_le32(STA_FLG_MIMO_EN_MIMO3);
163                 break;
164         }
165
166         switch (sta->smps_mode) {
167         case IEEE80211_SMPS_AUTOMATIC:
168         case IEEE80211_SMPS_NUM_MODES:
169                 WARN_ON(1);
170                 break;
171         case IEEE80211_SMPS_STATIC:
172                 /* override NSS */
173                 add_sta_cmd.station_flags &= ~cpu_to_le32(STA_FLG_MIMO_EN_MSK);
174                 add_sta_cmd.station_flags |= cpu_to_le32(STA_FLG_MIMO_EN_SISO);
175                 break;
176         case IEEE80211_SMPS_DYNAMIC:
177                 add_sta_cmd.station_flags |= cpu_to_le32(STA_FLG_RTS_MIMO_PROT);
178                 break;
179         case IEEE80211_SMPS_OFF:
180                 /* nothing */
181                 break;
182         }
183
184         if (sta->ht_cap.ht_supported) {
185                 add_sta_cmd.station_flags_msk |=
186                         cpu_to_le32(STA_FLG_MAX_AGG_SIZE_MSK |
187                                     STA_FLG_AGG_MPDU_DENS_MSK);
188
189                 mpdu_dens = sta->ht_cap.ampdu_density;
190         }
191
192         if (sta->vht_cap.vht_supported) {
193                 agg_size = sta->vht_cap.cap &
194                         IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_MASK;
195                 agg_size >>=
196                         IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_SHIFT;
197         } else if (sta->ht_cap.ht_supported) {
198                 agg_size = sta->ht_cap.ampdu_factor;
199         }
200
201         add_sta_cmd.station_flags |=
202                 cpu_to_le32(agg_size << STA_FLG_MAX_AGG_SIZE_SHIFT);
203         add_sta_cmd.station_flags |=
204                 cpu_to_le32(mpdu_dens << STA_FLG_AGG_MPDU_DENS_SHIFT);
205
206         status = ADD_STA_SUCCESS;
207         ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA,
208                                           iwl_mvm_add_sta_cmd_size(mvm),
209                                           &add_sta_cmd, &status);
210         if (ret)
211                 return ret;
212
213         switch (status & IWL_ADD_STA_STATUS_MASK) {
214         case ADD_STA_SUCCESS:
215                 IWL_DEBUG_ASSOC(mvm, "ADD_STA PASSED\n");
216                 break;
217         default:
218                 ret = -EIO;
219                 IWL_ERR(mvm, "ADD_STA failed\n");
220                 break;
221         }
222
223         return ret;
224 }
225
226 static void iwl_mvm_rx_agg_session_expired(unsigned long data)
227 {
228         struct iwl_mvm_baid_data __rcu **rcu_ptr = (void *)data;
229         struct iwl_mvm_baid_data *ba_data;
230         struct ieee80211_sta *sta;
231         struct iwl_mvm_sta *mvm_sta;
232         unsigned long timeout;
233
234         rcu_read_lock();
235
236         ba_data = rcu_dereference(*rcu_ptr);
237
238         if (WARN_ON(!ba_data))
239                 goto unlock;
240
241         if (!ba_data->timeout)
242                 goto unlock;
243
244         timeout = ba_data->last_rx + TU_TO_JIFFIES(ba_data->timeout * 2);
245         if (time_is_after_jiffies(timeout)) {
246                 mod_timer(&ba_data->session_timer, timeout);
247                 goto unlock;
248         }
249
250         /* Timer expired */
251         sta = rcu_dereference(ba_data->mvm->fw_id_to_mac_id[ba_data->sta_id]);
252         mvm_sta = iwl_mvm_sta_from_mac80211(sta);
253         ieee80211_stop_rx_ba_session_offl(mvm_sta->vif,
254                                           sta->addr, ba_data->tid);
255 unlock:
256         rcu_read_unlock();
257 }
258
259 static int iwl_mvm_tdls_sta_init(struct iwl_mvm *mvm,
260                                  struct ieee80211_sta *sta)
261 {
262         unsigned long used_hw_queues;
263         struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
264         unsigned int wdg_timeout =
265                 iwl_mvm_get_wd_timeout(mvm, NULL, true, false);
266         u32 ac;
267
268         lockdep_assert_held(&mvm->mutex);
269
270         used_hw_queues = iwl_mvm_get_used_hw_queues(mvm, NULL);
271
272         /* Find available queues, and allocate them to the ACs */
273         for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) {
274                 u8 queue = find_first_zero_bit(&used_hw_queues,
275                                                mvm->first_agg_queue);
276
277                 if (queue >= mvm->first_agg_queue) {
278                         IWL_ERR(mvm, "Failed to allocate STA queue\n");
279                         return -EBUSY;
280                 }
281
282                 __set_bit(queue, &used_hw_queues);
283                 mvmsta->hw_queue[ac] = queue;
284         }
285
286         /* Found a place for all queues - enable them */
287         for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) {
288                 iwl_mvm_enable_ac_txq(mvm, mvmsta->hw_queue[ac],
289                                       mvmsta->hw_queue[ac],
290                                       iwl_mvm_ac_to_tx_fifo[ac], 0,
291                                       wdg_timeout);
292                 mvmsta->tfd_queue_msk |= BIT(mvmsta->hw_queue[ac]);
293         }
294
295         return 0;
296 }
297
298 static void iwl_mvm_tdls_sta_deinit(struct iwl_mvm *mvm,
299                                     struct ieee80211_sta *sta)
300 {
301         struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
302         unsigned long sta_msk;
303         int i;
304
305         lockdep_assert_held(&mvm->mutex);
306
307         /* disable the TDLS STA-specific queues */
308         sta_msk = mvmsta->tfd_queue_msk;
309         for_each_set_bit(i, &sta_msk, sizeof(sta_msk) * BITS_PER_BYTE)
310                 iwl_mvm_disable_txq(mvm, i, i, IWL_MAX_TID_COUNT, 0);
311 }
312
313 /* Disable aggregations for a bitmap of TIDs for a given station */
314 static int iwl_mvm_invalidate_sta_queue(struct iwl_mvm *mvm, int queue,
315                                         unsigned long disable_agg_tids,
316                                         bool remove_queue)
317 {
318         struct iwl_mvm_add_sta_cmd cmd = {};
319         struct ieee80211_sta *sta;
320         struct iwl_mvm_sta *mvmsta;
321         u32 status;
322         u8 sta_id;
323         int ret;
324
325         spin_lock_bh(&mvm->queue_info_lock);
326         sta_id = mvm->queue_info[queue].ra_sta_id;
327         spin_unlock_bh(&mvm->queue_info_lock);
328
329         rcu_read_lock();
330
331         sta = rcu_dereference(mvm->fw_id_to_mac_id[sta_id]);
332
333         if (WARN_ON_ONCE(IS_ERR_OR_NULL(sta))) {
334                 rcu_read_unlock();
335                 return -EINVAL;
336         }
337
338         mvmsta = iwl_mvm_sta_from_mac80211(sta);
339
340         mvmsta->tid_disable_agg |= disable_agg_tids;
341
342         cmd.mac_id_n_color = cpu_to_le32(mvmsta->mac_id_n_color);
343         cmd.sta_id = mvmsta->sta_id;
344         cmd.add_modify = STA_MODE_MODIFY;
345         cmd.modify_mask = STA_MODIFY_QUEUES;
346         if (disable_agg_tids)
347                 cmd.modify_mask |= STA_MODIFY_TID_DISABLE_TX;
348         if (remove_queue)
349                 cmd.modify_mask |= STA_MODIFY_QUEUE_REMOVAL;
350         cmd.tfd_queue_msk = cpu_to_le32(mvmsta->tfd_queue_msk);
351         cmd.tid_disable_tx = cpu_to_le16(mvmsta->tid_disable_agg);
352
353         rcu_read_unlock();
354
355         /* Notify FW of queue removal from the STA queues */
356         status = ADD_STA_SUCCESS;
357         ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA,
358                                           iwl_mvm_add_sta_cmd_size(mvm),
359                                           &cmd, &status);
360
361         return ret;
362 }
363
364 static int iwl_mvm_get_queue_agg_tids(struct iwl_mvm *mvm, int queue)
365 {
366         struct ieee80211_sta *sta;
367         struct iwl_mvm_sta *mvmsta;
368         unsigned long tid_bitmap;
369         unsigned long agg_tids = 0;
370         s8 sta_id;
371         int tid;
372
373         lockdep_assert_held(&mvm->mutex);
374
375         spin_lock_bh(&mvm->queue_info_lock);
376         sta_id = mvm->queue_info[queue].ra_sta_id;
377         tid_bitmap = mvm->queue_info[queue].tid_bitmap;
378         spin_unlock_bh(&mvm->queue_info_lock);
379
380         sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[sta_id],
381                                         lockdep_is_held(&mvm->mutex));
382
383         if (WARN_ON_ONCE(IS_ERR_OR_NULL(sta)))
384                 return -EINVAL;
385
386         mvmsta = iwl_mvm_sta_from_mac80211(sta);
387
388         spin_lock_bh(&mvmsta->lock);
389         for_each_set_bit(tid, &tid_bitmap, IWL_MAX_TID_COUNT + 1) {
390                 if (mvmsta->tid_data[tid].state == IWL_AGG_ON)
391                         agg_tids |= BIT(tid);
392         }
393         spin_unlock_bh(&mvmsta->lock);
394
395         return agg_tids;
396 }
397
398 /*
399  * Remove a queue from a station's resources.
400  * Note that this only marks as free. It DOESN'T delete a BA agreement, and
401  * doesn't disable the queue
402  */
403 static int iwl_mvm_remove_sta_queue_marking(struct iwl_mvm *mvm, int queue)
404 {
405         struct ieee80211_sta *sta;
406         struct iwl_mvm_sta *mvmsta;
407         unsigned long tid_bitmap;
408         unsigned long disable_agg_tids = 0;
409         u8 sta_id;
410         int tid;
411
412         lockdep_assert_held(&mvm->mutex);
413
414         spin_lock_bh(&mvm->queue_info_lock);
415         sta_id = mvm->queue_info[queue].ra_sta_id;
416         tid_bitmap = mvm->queue_info[queue].tid_bitmap;
417         spin_unlock_bh(&mvm->queue_info_lock);
418
419         rcu_read_lock();
420
421         sta = rcu_dereference(mvm->fw_id_to_mac_id[sta_id]);
422
423         if (WARN_ON_ONCE(IS_ERR_OR_NULL(sta))) {
424                 rcu_read_unlock();
425                 return 0;
426         }
427
428         mvmsta = iwl_mvm_sta_from_mac80211(sta);
429
430         spin_lock_bh(&mvmsta->lock);
431         /* Unmap MAC queues and TIDs from this queue */
432         for_each_set_bit(tid, &tid_bitmap, IWL_MAX_TID_COUNT + 1) {
433                 if (mvmsta->tid_data[tid].state == IWL_AGG_ON)
434                         disable_agg_tids |= BIT(tid);
435                 mvmsta->tid_data[tid].txq_id = IEEE80211_INVAL_HW_QUEUE;
436         }
437
438         mvmsta->tfd_queue_msk &= ~BIT(queue); /* Don't use this queue anymore */
439         spin_unlock_bh(&mvmsta->lock);
440
441         rcu_read_unlock();
442
443         spin_lock_bh(&mvm->queue_info_lock);
444         /* Unmap MAC queues and TIDs from this queue */
445         mvm->queue_info[queue].hw_queue_to_mac80211 = 0;
446         mvm->queue_info[queue].hw_queue_refcount = 0;
447         mvm->queue_info[queue].tid_bitmap = 0;
448         spin_unlock_bh(&mvm->queue_info_lock);
449
450         return disable_agg_tids;
451 }
452
453 static int iwl_mvm_get_shared_queue(struct iwl_mvm *mvm,
454                                     unsigned long tfd_queue_mask, u8 ac)
455 {
456         int queue = 0;
457         u8 ac_to_queue[IEEE80211_NUM_ACS];
458         int i;
459
460         lockdep_assert_held(&mvm->queue_info_lock);
461
462         memset(&ac_to_queue, IEEE80211_INVAL_HW_QUEUE, sizeof(ac_to_queue));
463
464         /* See what ACs the existing queues for this STA have */
465         for_each_set_bit(i, &tfd_queue_mask, IWL_MVM_DQA_MAX_DATA_QUEUE) {
466                 /* Only DATA queues can be shared */
467                 if (i < IWL_MVM_DQA_MIN_DATA_QUEUE &&
468                     i != IWL_MVM_DQA_BSS_CLIENT_QUEUE)
469                         continue;
470
471                 /* Don't try and take queues being reconfigured */
472                 if (mvm->queue_info[queue].status ==
473                     IWL_MVM_QUEUE_RECONFIGURING)
474                         continue;
475
476                 ac_to_queue[mvm->queue_info[i].mac80211_ac] = i;
477         }
478
479         /*
480          * The queue to share is chosen only from DATA queues as follows (in
481          * descending priority):
482          * 1. An AC_BE queue
483          * 2. Same AC queue
484          * 3. Highest AC queue that is lower than new AC
485          * 4. Any existing AC (there always is at least 1 DATA queue)
486          */
487
488         /* Priority 1: An AC_BE queue */
489         if (ac_to_queue[IEEE80211_AC_BE] != IEEE80211_INVAL_HW_QUEUE)
490                 queue = ac_to_queue[IEEE80211_AC_BE];
491         /* Priority 2: Same AC queue */
492         else if (ac_to_queue[ac] != IEEE80211_INVAL_HW_QUEUE)
493                 queue = ac_to_queue[ac];
494         /* Priority 3a: If new AC is VO and VI exists - use VI */
495         else if (ac == IEEE80211_AC_VO &&
496                  ac_to_queue[IEEE80211_AC_VI] != IEEE80211_INVAL_HW_QUEUE)
497                 queue = ac_to_queue[IEEE80211_AC_VI];
498         /* Priority 3b: No BE so only AC less than the new one is BK */
499         else if (ac_to_queue[IEEE80211_AC_BK] != IEEE80211_INVAL_HW_QUEUE)
500                 queue = ac_to_queue[IEEE80211_AC_BK];
501         /* Priority 4a: No BE nor BK - use VI if exists */
502         else if (ac_to_queue[IEEE80211_AC_VI] != IEEE80211_INVAL_HW_QUEUE)
503                 queue = ac_to_queue[IEEE80211_AC_VI];
504         /* Priority 4b: No BE, BK nor VI - use VO if exists */
505         else if (ac_to_queue[IEEE80211_AC_VO] != IEEE80211_INVAL_HW_QUEUE)
506                 queue = ac_to_queue[IEEE80211_AC_VO];
507
508         /* Make sure queue found (or not) is legal */
509         if (!iwl_mvm_is_dqa_data_queue(mvm, queue) &&
510             !iwl_mvm_is_dqa_mgmt_queue(mvm, queue) &&
511             (queue != IWL_MVM_DQA_BSS_CLIENT_QUEUE)) {
512                 IWL_ERR(mvm, "No DATA queues available to share\n");
513                 return -ENOSPC;
514         }
515
516         /* Make sure the queue isn't in the middle of being reconfigured */
517         if (mvm->queue_info[queue].status == IWL_MVM_QUEUE_RECONFIGURING) {
518                 IWL_ERR(mvm,
519                         "TXQ %d is in the middle of re-config - try again\n",
520                         queue);
521                 return -EBUSY;
522         }
523
524         return queue;
525 }
526
527 /*
528  * If a given queue has a higher AC than the TID stream that is being compared
529  * to, the queue needs to be redirected to the lower AC. This function does that
530  * in such a case, otherwise - if no redirection required - it does nothing,
531  * unless the %force param is true.
532  */
533 int iwl_mvm_scd_queue_redirect(struct iwl_mvm *mvm, int queue, int tid,
534                                int ac, int ssn, unsigned int wdg_timeout,
535                                bool force)
536 {
537         struct iwl_scd_txq_cfg_cmd cmd = {
538                 .scd_queue = queue,
539                 .action = SCD_CFG_DISABLE_QUEUE,
540         };
541         bool shared_queue;
542         unsigned long mq;
543         int ret;
544
545         /*
546          * If the AC is lower than current one - FIFO needs to be redirected to
547          * the lowest one of the streams in the queue. Check if this is needed
548          * here.
549          * Notice that the enum ieee80211_ac_numbers is "flipped", so BK is with
550          * value 3 and VO with value 0, so to check if ac X is lower than ac Y
551          * we need to check if the numerical value of X is LARGER than of Y.
552          */
553         spin_lock_bh(&mvm->queue_info_lock);
554         if (ac <= mvm->queue_info[queue].mac80211_ac && !force) {
555                 spin_unlock_bh(&mvm->queue_info_lock);
556
557                 IWL_DEBUG_TX_QUEUES(mvm,
558                                     "No redirection needed on TXQ #%d\n",
559                                     queue);
560                 return 0;
561         }
562
563         cmd.sta_id = mvm->queue_info[queue].ra_sta_id;
564         cmd.tx_fifo = iwl_mvm_ac_to_tx_fifo[mvm->queue_info[queue].mac80211_ac];
565         cmd.tid = mvm->queue_info[queue].txq_tid;
566         mq = mvm->queue_info[queue].hw_queue_to_mac80211;
567         shared_queue = (mvm->queue_info[queue].hw_queue_refcount > 1);
568         spin_unlock_bh(&mvm->queue_info_lock);
569
570         IWL_DEBUG_TX_QUEUES(mvm, "Redirecting TXQ #%d to FIFO #%d\n",
571                             queue, iwl_mvm_ac_to_tx_fifo[ac]);
572
573         /* Stop MAC queues and wait for this queue to empty */
574         iwl_mvm_stop_mac_queues(mvm, mq);
575         ret = iwl_trans_wait_tx_queue_empty(mvm->trans, BIT(queue));
576         if (ret) {
577                 IWL_ERR(mvm, "Error draining queue %d before reconfig\n",
578                         queue);
579                 ret = -EIO;
580                 goto out;
581         }
582
583         /* Before redirecting the queue we need to de-activate it */
584         iwl_trans_txq_disable(mvm->trans, queue, false);
585         ret = iwl_mvm_send_cmd_pdu(mvm, SCD_QUEUE_CFG, 0, sizeof(cmd), &cmd);
586         if (ret)
587                 IWL_ERR(mvm, "Failed SCD disable TXQ %d (ret=%d)\n", queue,
588                         ret);
589
590         /* Make sure the SCD wrptr is correctly set before reconfiguring */
591         iwl_trans_txq_enable_cfg(mvm->trans, queue, ssn, NULL, wdg_timeout);
592
593         /* Update the TID "owner" of the queue */
594         spin_lock_bh(&mvm->queue_info_lock);
595         mvm->queue_info[queue].txq_tid = tid;
596         spin_unlock_bh(&mvm->queue_info_lock);
597
598         /* TODO: Work-around SCD bug when moving back by multiples of 0x40 */
599
600         /* Redirect to lower AC */
601         iwl_mvm_reconfig_scd(mvm, queue, iwl_mvm_ac_to_tx_fifo[ac],
602                              cmd.sta_id, tid, LINK_QUAL_AGG_FRAME_LIMIT_DEF,
603                              ssn);
604
605         /* Update AC marking of the queue */
606         spin_lock_bh(&mvm->queue_info_lock);
607         mvm->queue_info[queue].mac80211_ac = ac;
608         spin_unlock_bh(&mvm->queue_info_lock);
609
610         /*
611          * Mark queue as shared in transport if shared
612          * Note this has to be done after queue enablement because enablement
613          * can also set this value, and there is no indication there to shared
614          * queues
615          */
616         if (shared_queue)
617                 iwl_trans_txq_set_shared_mode(mvm->trans, queue, true);
618
619 out:
620         /* Continue using the MAC queues */
621         iwl_mvm_start_mac_queues(mvm, mq);
622
623         return ret;
624 }
625
626 static int iwl_mvm_sta_alloc_queue(struct iwl_mvm *mvm,
627                                    struct ieee80211_sta *sta, u8 ac, int tid,
628                                    struct ieee80211_hdr *hdr)
629 {
630         struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
631         struct iwl_trans_txq_scd_cfg cfg = {
632                 .fifo = iwl_mvm_ac_to_tx_fifo[ac],
633                 .sta_id = mvmsta->sta_id,
634                 .tid = tid,
635                 .frame_limit = IWL_FRAME_LIMIT,
636         };
637         unsigned int wdg_timeout =
638                 iwl_mvm_get_wd_timeout(mvm, mvmsta->vif, false, false);
639         u8 mac_queue = mvmsta->vif->hw_queue[ac];
640         int queue = -1;
641         bool using_inactive_queue = false;
642         unsigned long disable_agg_tids = 0;
643         enum iwl_mvm_agg_state queue_state;
644         bool shared_queue = false;
645         int ssn;
646         unsigned long tfd_queue_mask;
647         int ret;
648
649         lockdep_assert_held(&mvm->mutex);
650
651         spin_lock_bh(&mvmsta->lock);
652         tfd_queue_mask = mvmsta->tfd_queue_msk;
653         spin_unlock_bh(&mvmsta->lock);
654
655         spin_lock_bh(&mvm->queue_info_lock);
656
657         /*
658          * Non-QoS, QoS NDP and MGMT frames should go to a MGMT queue, if one
659          * exists
660          */
661         if (!ieee80211_is_data_qos(hdr->frame_control) ||
662             ieee80211_is_qos_nullfunc(hdr->frame_control)) {
663                 queue = iwl_mvm_find_free_queue(mvm, mvmsta->sta_id,
664                                                 IWL_MVM_DQA_MIN_MGMT_QUEUE,
665                                                 IWL_MVM_DQA_MAX_MGMT_QUEUE);
666                 if (queue >= IWL_MVM_DQA_MIN_MGMT_QUEUE)
667                         IWL_DEBUG_TX_QUEUES(mvm, "Found free MGMT queue #%d\n",
668                                             queue);
669
670                 /* If no such queue is found, we'll use a DATA queue instead */
671         }
672
673         if ((queue < 0 && mvmsta->reserved_queue != IEEE80211_INVAL_HW_QUEUE) &&
674             (mvm->queue_info[mvmsta->reserved_queue].status ==
675              IWL_MVM_QUEUE_RESERVED ||
676              mvm->queue_info[mvmsta->reserved_queue].status ==
677              IWL_MVM_QUEUE_INACTIVE)) {
678                 queue = mvmsta->reserved_queue;
679                 mvm->queue_info[queue].reserved = true;
680                 IWL_DEBUG_TX_QUEUES(mvm, "Using reserved queue #%d\n", queue);
681         }
682
683         if (queue < 0)
684                 queue = iwl_mvm_find_free_queue(mvm, mvmsta->sta_id,
685                                                 IWL_MVM_DQA_MIN_DATA_QUEUE,
686                                                 IWL_MVM_DQA_MAX_DATA_QUEUE);
687
688         /*
689          * Check if this queue is already allocated but inactive.
690          * In such a case, we'll need to first free this queue before enabling
691          * it again, so we'll mark it as reserved to make sure no new traffic
692          * arrives on it
693          */
694         if (queue > 0 &&
695             mvm->queue_info[queue].status == IWL_MVM_QUEUE_INACTIVE) {
696                 mvm->queue_info[queue].status = IWL_MVM_QUEUE_RESERVED;
697                 using_inactive_queue = true;
698                 IWL_DEBUG_TX_QUEUES(mvm,
699                                     "Re-assigning TXQ %d: sta_id=%d, tid=%d\n",
700                                     queue, mvmsta->sta_id, tid);
701         }
702
703         /* No free queue - we'll have to share */
704         if (queue <= 0) {
705                 queue = iwl_mvm_get_shared_queue(mvm, tfd_queue_mask, ac);
706                 if (queue > 0) {
707                         shared_queue = true;
708                         mvm->queue_info[queue].status = IWL_MVM_QUEUE_SHARED;
709                 }
710         }
711
712         /*
713          * Mark TXQ as ready, even though it hasn't been fully configured yet,
714          * to make sure no one else takes it.
715          * This will allow avoiding re-acquiring the lock at the end of the
716          * configuration. On error we'll mark it back as free.
717          */
718         if ((queue > 0) && !shared_queue)
719                 mvm->queue_info[queue].status = IWL_MVM_QUEUE_READY;
720
721         spin_unlock_bh(&mvm->queue_info_lock);
722
723         /* This shouldn't happen - out of queues */
724         if (WARN_ON(queue <= 0)) {
725                 IWL_ERR(mvm, "No available queues for tid %d on sta_id %d\n",
726                         tid, cfg.sta_id);
727                 return queue;
728         }
729
730         /*
731          * Actual en/disablement of aggregations is through the ADD_STA HCMD,
732          * but for configuring the SCD to send A-MPDUs we need to mark the queue
733          * as aggregatable.
734          * Mark all DATA queues as allowing to be aggregated at some point
735          */
736         cfg.aggregate = (queue >= IWL_MVM_DQA_MIN_DATA_QUEUE ||
737                          queue == IWL_MVM_DQA_BSS_CLIENT_QUEUE);
738
739         /*
740          * If this queue was previously inactive (idle) - we need to free it
741          * first
742          */
743         if (using_inactive_queue) {
744                 struct iwl_scd_txq_cfg_cmd cmd = {
745                         .scd_queue = queue,
746                         .action = SCD_CFG_DISABLE_QUEUE,
747                 };
748                 u8 txq_curr_ac;
749
750                 disable_agg_tids = iwl_mvm_remove_sta_queue_marking(mvm, queue);
751
752                 spin_lock_bh(&mvm->queue_info_lock);
753                 txq_curr_ac = mvm->queue_info[queue].mac80211_ac;
754                 cmd.sta_id = mvm->queue_info[queue].ra_sta_id;
755                 cmd.tx_fifo = iwl_mvm_ac_to_tx_fifo[txq_curr_ac];
756                 cmd.tid = mvm->queue_info[queue].txq_tid;
757                 spin_unlock_bh(&mvm->queue_info_lock);
758
759                 /* Disable the queue */
760                 if (disable_agg_tids)
761                         iwl_mvm_invalidate_sta_queue(mvm, queue,
762                                                      disable_agg_tids, false);
763                 iwl_trans_txq_disable(mvm->trans, queue, false);
764                 ret = iwl_mvm_send_cmd_pdu(mvm, SCD_QUEUE_CFG, 0, sizeof(cmd),
765                                            &cmd);
766                 if (ret) {
767                         IWL_ERR(mvm,
768                                 "Failed to free inactive queue %d (ret=%d)\n",
769                                 queue, ret);
770
771                         /* Re-mark the inactive queue as inactive */
772                         spin_lock_bh(&mvm->queue_info_lock);
773                         mvm->queue_info[queue].status = IWL_MVM_QUEUE_INACTIVE;
774                         spin_unlock_bh(&mvm->queue_info_lock);
775
776                         return ret;
777                 }
778
779                 /* If TXQ is allocated to another STA, update removal in FW */
780                 if (cmd.sta_id != mvmsta->sta_id)
781                         iwl_mvm_invalidate_sta_queue(mvm, queue, 0, true);
782         }
783
784         IWL_DEBUG_TX_QUEUES(mvm,
785                             "Allocating %squeue #%d to sta %d on tid %d\n",
786                             shared_queue ? "shared " : "", queue,
787                             mvmsta->sta_id, tid);
788
789         if (shared_queue) {
790                 /* Disable any open aggs on this queue */
791                 disable_agg_tids = iwl_mvm_get_queue_agg_tids(mvm, queue);
792
793                 if (disable_agg_tids) {
794                         IWL_DEBUG_TX_QUEUES(mvm, "Disabling aggs on queue %d\n",
795                                             queue);
796                         iwl_mvm_invalidate_sta_queue(mvm, queue,
797                                                      disable_agg_tids, false);
798                 }
799         }
800
801         ssn = IEEE80211_SEQ_TO_SN(le16_to_cpu(hdr->seq_ctrl));
802         iwl_mvm_enable_txq(mvm, queue, mac_queue, ssn, &cfg,
803                            wdg_timeout);
804
805         /*
806          * Mark queue as shared in transport if shared
807          * Note this has to be done after queue enablement because enablement
808          * can also set this value, and there is no indication there to shared
809          * queues
810          */
811         if (shared_queue)
812                 iwl_trans_txq_set_shared_mode(mvm->trans, queue, true);
813
814         spin_lock_bh(&mvmsta->lock);
815         mvmsta->tid_data[tid].txq_id = queue;
816         mvmsta->tid_data[tid].is_tid_active = true;
817         mvmsta->tfd_queue_msk |= BIT(queue);
818         queue_state = mvmsta->tid_data[tid].state;
819
820         if (mvmsta->reserved_queue == queue)
821                 mvmsta->reserved_queue = IEEE80211_INVAL_HW_QUEUE;
822         spin_unlock_bh(&mvmsta->lock);
823
824         if (!shared_queue) {
825                 ret = iwl_mvm_sta_send_to_fw(mvm, sta, true, STA_MODIFY_QUEUES);
826                 if (ret)
827                         goto out_err;
828
829                 /* If we need to re-enable aggregations... */
830                 if (queue_state == IWL_AGG_ON) {
831                         ret = iwl_mvm_sta_tx_agg(mvm, sta, tid, queue, true);
832                         if (ret)
833                                 goto out_err;
834                 }
835         } else {
836                 /* Redirect queue, if needed */
837                 ret = iwl_mvm_scd_queue_redirect(mvm, queue, tid, ac, ssn,
838                                                  wdg_timeout, false);
839                 if (ret)
840                         goto out_err;
841         }
842
843         return 0;
844
845 out_err:
846         iwl_mvm_disable_txq(mvm, queue, mac_queue, tid, 0);
847
848         return ret;
849 }
850
851 static void iwl_mvm_change_queue_owner(struct iwl_mvm *mvm, int queue)
852 {
853         struct iwl_scd_txq_cfg_cmd cmd = {
854                 .scd_queue = queue,
855                 .action = SCD_CFG_UPDATE_QUEUE_TID,
856         };
857         s8 sta_id;
858         int tid;
859         unsigned long tid_bitmap;
860         int ret;
861
862         lockdep_assert_held(&mvm->mutex);
863
864         spin_lock_bh(&mvm->queue_info_lock);
865         sta_id = mvm->queue_info[queue].ra_sta_id;
866         tid_bitmap = mvm->queue_info[queue].tid_bitmap;
867         spin_unlock_bh(&mvm->queue_info_lock);
868
869         if (WARN(!tid_bitmap, "TXQ %d has no tids assigned to it\n", queue))
870                 return;
871
872         /* Find any TID for queue */
873         tid = find_first_bit(&tid_bitmap, IWL_MAX_TID_COUNT + 1);
874         cmd.tid = tid;
875         cmd.tx_fifo = iwl_mvm_ac_to_tx_fifo[tid_to_mac80211_ac[tid]];
876
877         ret = iwl_mvm_send_cmd_pdu(mvm, SCD_QUEUE_CFG, 0, sizeof(cmd), &cmd);
878         if (ret)
879                 IWL_ERR(mvm, "Failed to update owner of TXQ %d (ret=%d)\n",
880                         queue, ret);
881         else
882                 IWL_DEBUG_TX_QUEUES(mvm, "Changed TXQ %d ownership to tid %d\n",
883                                     queue, tid);
884 }
885
886 static void iwl_mvm_unshare_queue(struct iwl_mvm *mvm, int queue)
887 {
888         struct ieee80211_sta *sta;
889         struct iwl_mvm_sta *mvmsta;
890         s8 sta_id;
891         int tid = -1;
892         unsigned long tid_bitmap;
893         unsigned int wdg_timeout;
894         int ssn;
895         int ret = true;
896
897         lockdep_assert_held(&mvm->mutex);
898
899         spin_lock_bh(&mvm->queue_info_lock);
900         sta_id = mvm->queue_info[queue].ra_sta_id;
901         tid_bitmap = mvm->queue_info[queue].tid_bitmap;
902         spin_unlock_bh(&mvm->queue_info_lock);
903
904         /* Find TID for queue, and make sure it is the only one on the queue */
905         tid = find_first_bit(&tid_bitmap, IWL_MAX_TID_COUNT + 1);
906         if (tid_bitmap != BIT(tid)) {
907                 IWL_ERR(mvm, "Failed to unshare q %d, active tids=0x%lx\n",
908                         queue, tid_bitmap);
909                 return;
910         }
911
912         IWL_DEBUG_TX_QUEUES(mvm, "Unsharing TXQ %d, keeping tid %d\n", queue,
913                             tid);
914
915         sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[sta_id],
916                                         lockdep_is_held(&mvm->mutex));
917
918         if (WARN_ON_ONCE(IS_ERR_OR_NULL(sta)))
919                 return;
920
921         mvmsta = iwl_mvm_sta_from_mac80211(sta);
922         wdg_timeout = iwl_mvm_get_wd_timeout(mvm, mvmsta->vif, false, false);
923
924         ssn = IEEE80211_SEQ_TO_SN(mvmsta->tid_data[tid].seq_number);
925
926         ret = iwl_mvm_scd_queue_redirect(mvm, queue, tid,
927                                          tid_to_mac80211_ac[tid], ssn,
928                                          wdg_timeout, true);
929         if (ret) {
930                 IWL_ERR(mvm, "Failed to redirect TXQ %d\n", queue);
931                 return;
932         }
933
934         /* If aggs should be turned back on - do it */
935         if (mvmsta->tid_data[tid].state == IWL_AGG_ON) {
936                 struct iwl_mvm_add_sta_cmd cmd;
937
938                 mvmsta->tid_disable_agg &= ~BIT(tid);
939
940                 cmd.mac_id_n_color = cpu_to_le32(mvmsta->mac_id_n_color);
941                 cmd.sta_id = mvmsta->sta_id;
942                 cmd.add_modify = STA_MODE_MODIFY;
943                 cmd.modify_mask = STA_MODIFY_TID_DISABLE_TX;
944                 cmd.tfd_queue_msk = cpu_to_le32(mvmsta->tfd_queue_msk);
945                 cmd.tid_disable_tx = cpu_to_le16(mvmsta->tid_disable_agg);
946
947                 ret = iwl_mvm_send_cmd_pdu(mvm, ADD_STA, CMD_ASYNC,
948                                            iwl_mvm_add_sta_cmd_size(mvm), &cmd);
949                 if (!ret) {
950                         IWL_DEBUG_TX_QUEUES(mvm,
951                                             "TXQ #%d is now aggregated again\n",
952                                             queue);
953
954                         /* Mark queue intenally as aggregating again */
955                         iwl_trans_txq_set_shared_mode(mvm->trans, queue, false);
956                 }
957         }
958
959         spin_lock_bh(&mvm->queue_info_lock);
960         mvm->queue_info[queue].status = IWL_MVM_QUEUE_READY;
961         spin_unlock_bh(&mvm->queue_info_lock);
962 }
963
964 static inline u8 iwl_mvm_tid_to_ac_queue(int tid)
965 {
966         if (tid == IWL_MAX_TID_COUNT)
967                 return IEEE80211_AC_VO; /* MGMT */
968
969         return tid_to_mac80211_ac[tid];
970 }
971
972 static void iwl_mvm_tx_deferred_stream(struct iwl_mvm *mvm,
973                                        struct ieee80211_sta *sta, int tid)
974 {
975         struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
976         struct iwl_mvm_tid_data *tid_data = &mvmsta->tid_data[tid];
977         struct sk_buff *skb;
978         struct ieee80211_hdr *hdr;
979         struct sk_buff_head deferred_tx;
980         u8 mac_queue;
981         bool no_queue = false; /* Marks if there is a problem with the queue */
982         u8 ac;
983
984         lockdep_assert_held(&mvm->mutex);
985
986         skb = skb_peek(&tid_data->deferred_tx_frames);
987         if (!skb)
988                 return;
989         hdr = (void *)skb->data;
990
991         ac = iwl_mvm_tid_to_ac_queue(tid);
992         mac_queue = IEEE80211_SKB_CB(skb)->hw_queue;
993
994         if (tid_data->txq_id == IEEE80211_INVAL_HW_QUEUE &&
995             iwl_mvm_sta_alloc_queue(mvm, sta, ac, tid, hdr)) {
996                 IWL_ERR(mvm,
997                         "Can't alloc TXQ for sta %d tid %d - dropping frame\n",
998                         mvmsta->sta_id, tid);
999
1000                 /*
1001                  * Mark queue as problematic so later the deferred traffic is
1002                  * freed, as we can do nothing with it
1003                  */
1004                 no_queue = true;
1005         }
1006
1007         __skb_queue_head_init(&deferred_tx);
1008
1009         /* Disable bottom-halves when entering TX path */
1010         local_bh_disable();
1011         spin_lock(&mvmsta->lock);
1012         skb_queue_splice_init(&tid_data->deferred_tx_frames, &deferred_tx);
1013         spin_unlock(&mvmsta->lock);
1014
1015         while ((skb = __skb_dequeue(&deferred_tx)))
1016                 if (no_queue || iwl_mvm_tx_skb(mvm, skb, sta))
1017                         ieee80211_free_txskb(mvm->hw, skb);
1018         local_bh_enable();
1019
1020         /* Wake queue */
1021         iwl_mvm_start_mac_queues(mvm, BIT(mac_queue));
1022 }
1023
1024 void iwl_mvm_add_new_dqa_stream_wk(struct work_struct *wk)
1025 {
1026         struct iwl_mvm *mvm = container_of(wk, struct iwl_mvm,
1027                                            add_stream_wk);
1028         struct ieee80211_sta *sta;
1029         struct iwl_mvm_sta *mvmsta;
1030         unsigned long deferred_tid_traffic;
1031         int queue, sta_id, tid;
1032
1033         /* Check inactivity of queues */
1034         iwl_mvm_inactivity_check(mvm);
1035
1036         mutex_lock(&mvm->mutex);
1037
1038         /* Reconfigure queues requiring reconfiguation */
1039         for (queue = 0; queue < IWL_MAX_HW_QUEUES; queue++) {
1040                 bool reconfig;
1041                 bool change_owner;
1042
1043                 spin_lock_bh(&mvm->queue_info_lock);
1044                 reconfig = (mvm->queue_info[queue].status ==
1045                             IWL_MVM_QUEUE_RECONFIGURING);
1046
1047                 /*
1048                  * We need to take into account a situation in which a TXQ was
1049                  * allocated to TID x, and then turned shared by adding TIDs y
1050                  * and z. If TID x becomes inactive and is removed from the TXQ,
1051                  * ownership must be given to one of the remaining TIDs.
1052                  * This is mainly because if TID x continues - a new queue can't
1053                  * be allocated for it as long as it is an owner of another TXQ.
1054                  */
1055                 change_owner = !(mvm->queue_info[queue].tid_bitmap &
1056                                  BIT(mvm->queue_info[queue].txq_tid)) &&
1057                                (mvm->queue_info[queue].status ==
1058                                 IWL_MVM_QUEUE_SHARED);
1059                 spin_unlock_bh(&mvm->queue_info_lock);
1060
1061                 if (reconfig)
1062                         iwl_mvm_unshare_queue(mvm, queue);
1063                 else if (change_owner)
1064                         iwl_mvm_change_queue_owner(mvm, queue);
1065         }
1066
1067         /* Go over all stations with deferred traffic */
1068         for_each_set_bit(sta_id, mvm->sta_deferred_frames,
1069                          IWL_MVM_STATION_COUNT) {
1070                 clear_bit(sta_id, mvm->sta_deferred_frames);
1071                 sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[sta_id],
1072                                                 lockdep_is_held(&mvm->mutex));
1073                 if (IS_ERR_OR_NULL(sta))
1074                         continue;
1075
1076                 mvmsta = iwl_mvm_sta_from_mac80211(sta);
1077                 deferred_tid_traffic = mvmsta->deferred_traffic_tid_map;
1078
1079                 for_each_set_bit(tid, &deferred_tid_traffic,
1080                                  IWL_MAX_TID_COUNT + 1)
1081                         iwl_mvm_tx_deferred_stream(mvm, sta, tid);
1082         }
1083
1084         mutex_unlock(&mvm->mutex);
1085 }
1086
1087 static int iwl_mvm_reserve_sta_stream(struct iwl_mvm *mvm,
1088                                       struct ieee80211_sta *sta,
1089                                       enum nl80211_iftype vif_type)
1090 {
1091         struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
1092         int queue;
1093
1094         /*
1095          * Check for inactive queues, so we don't reach a situation where we
1096          * can't add a STA due to a shortage in queues that doesn't really exist
1097          */
1098         iwl_mvm_inactivity_check(mvm);
1099
1100         spin_lock_bh(&mvm->queue_info_lock);
1101
1102         /* Make sure we have free resources for this STA */
1103         if (vif_type == NL80211_IFTYPE_STATION && !sta->tdls &&
1104             !mvm->queue_info[IWL_MVM_DQA_BSS_CLIENT_QUEUE].hw_queue_refcount &&
1105             (mvm->queue_info[IWL_MVM_DQA_BSS_CLIENT_QUEUE].status ==
1106              IWL_MVM_QUEUE_FREE))
1107                 queue = IWL_MVM_DQA_BSS_CLIENT_QUEUE;
1108         else
1109                 queue = iwl_mvm_find_free_queue(mvm, mvmsta->sta_id,
1110                                                 IWL_MVM_DQA_MIN_DATA_QUEUE,
1111                                                 IWL_MVM_DQA_MAX_DATA_QUEUE);
1112         if (queue < 0) {
1113                 spin_unlock_bh(&mvm->queue_info_lock);
1114                 IWL_ERR(mvm, "No available queues for new station\n");
1115                 return -ENOSPC;
1116         }
1117         mvm->queue_info[queue].status = IWL_MVM_QUEUE_RESERVED;
1118
1119         spin_unlock_bh(&mvm->queue_info_lock);
1120
1121         mvmsta->reserved_queue = queue;
1122
1123         IWL_DEBUG_TX_QUEUES(mvm, "Reserving data queue #%d for sta_id %d\n",
1124                             queue, mvmsta->sta_id);
1125
1126         return 0;
1127 }
1128
1129 /*
1130  * In DQA mode, after a HW restart the queues should be allocated as before, in
1131  * order to avoid race conditions when there are shared queues. This function
1132  * does the re-mapping and queue allocation.
1133  *
1134  * Note that re-enabling aggregations isn't done in this function.
1135  */
1136 static void iwl_mvm_realloc_queues_after_restart(struct iwl_mvm *mvm,
1137                                                  struct iwl_mvm_sta *mvm_sta)
1138 {
1139         unsigned int wdg_timeout =
1140                         iwl_mvm_get_wd_timeout(mvm, mvm_sta->vif, false, false);
1141         int i;
1142         struct iwl_trans_txq_scd_cfg cfg = {
1143                 .sta_id = mvm_sta->sta_id,
1144                 .frame_limit = IWL_FRAME_LIMIT,
1145         };
1146
1147         /* Make sure reserved queue is still marked as such (or allocated) */
1148         mvm->queue_info[mvm_sta->reserved_queue].status =
1149                 IWL_MVM_QUEUE_RESERVED;
1150
1151         for (i = 0; i <= IWL_MAX_TID_COUNT; i++) {
1152                 struct iwl_mvm_tid_data *tid_data = &mvm_sta->tid_data[i];
1153                 int txq_id = tid_data->txq_id;
1154                 int ac;
1155                 u8 mac_queue;
1156
1157                 if (txq_id == IEEE80211_INVAL_HW_QUEUE)
1158                         continue;
1159
1160                 skb_queue_head_init(&tid_data->deferred_tx_frames);
1161
1162                 ac = tid_to_mac80211_ac[i];
1163                 mac_queue = mvm_sta->vif->hw_queue[ac];
1164
1165                 cfg.tid = i;
1166                 cfg.fifo = iwl_mvm_ac_to_tx_fifo[ac];
1167                 cfg.aggregate = (txq_id >= IWL_MVM_DQA_MIN_DATA_QUEUE ||
1168                                  txq_id == IWL_MVM_DQA_BSS_CLIENT_QUEUE);
1169
1170                 IWL_DEBUG_TX_QUEUES(mvm,
1171                                     "Re-mapping sta %d tid %d to queue %d\n",
1172                                     mvm_sta->sta_id, i, txq_id);
1173
1174                 iwl_mvm_enable_txq(mvm, txq_id, mac_queue,
1175                                    IEEE80211_SEQ_TO_SN(tid_data->seq_number),
1176                                    &cfg, wdg_timeout);
1177
1178                 mvm->queue_info[txq_id].status = IWL_MVM_QUEUE_READY;
1179         }
1180
1181         atomic_set(&mvm->pending_frames[mvm_sta->sta_id], 0);
1182 }
1183
1184 int iwl_mvm_add_sta(struct iwl_mvm *mvm,
1185                     struct ieee80211_vif *vif,
1186                     struct ieee80211_sta *sta)
1187 {
1188         struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
1189         struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta);
1190         struct iwl_mvm_rxq_dup_data *dup_data;
1191         int i, ret, sta_id;
1192
1193         lockdep_assert_held(&mvm->mutex);
1194
1195         if (!test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status))
1196                 sta_id = iwl_mvm_find_free_sta_id(mvm,
1197                                                   ieee80211_vif_type_p2p(vif));
1198         else
1199                 sta_id = mvm_sta->sta_id;
1200
1201         if (sta_id == IWL_MVM_STATION_COUNT)
1202                 return -ENOSPC;
1203
1204         spin_lock_init(&mvm_sta->lock);
1205
1206         /* In DQA mode, if this is a HW restart, re-alloc existing queues */
1207         if (iwl_mvm_is_dqa_supported(mvm) &&
1208             test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) {
1209                 iwl_mvm_realloc_queues_after_restart(mvm, mvm_sta);
1210                 goto update_fw;
1211         }
1212
1213         mvm_sta->sta_id = sta_id;
1214         mvm_sta->mac_id_n_color = FW_CMD_ID_AND_COLOR(mvmvif->id,
1215                                                       mvmvif->color);
1216         mvm_sta->vif = vif;
1217         mvm_sta->max_agg_bufsize = LINK_QUAL_AGG_FRAME_LIMIT_DEF;
1218         mvm_sta->tx_protection = 0;
1219         mvm_sta->tt_tx_protection = false;
1220
1221         /* HW restart, don't assume the memory has been zeroed */
1222         atomic_set(&mvm->pending_frames[sta_id], 0);
1223         mvm_sta->tid_disable_agg = 0xffff; /* No aggs at first */
1224         mvm_sta->tfd_queue_msk = 0;
1225
1226         /*
1227          * Allocate new queues for a TDLS station, unless we're in DQA mode,
1228          * and then they'll be allocated dynamically
1229          */
1230         if (!iwl_mvm_is_dqa_supported(mvm) && sta->tdls) {
1231                 ret = iwl_mvm_tdls_sta_init(mvm, sta);
1232                 if (ret)
1233                         return ret;
1234         } else if (!iwl_mvm_is_dqa_supported(mvm)) {
1235                 for (i = 0; i < IEEE80211_NUM_ACS; i++)
1236                         if (vif->hw_queue[i] != IEEE80211_INVAL_HW_QUEUE)
1237                                 mvm_sta->tfd_queue_msk |= BIT(vif->hw_queue[i]);
1238         }
1239
1240         /* for HW restart - reset everything but the sequence number */
1241         for (i = 0; i <= IWL_MAX_TID_COUNT; i++) {
1242                 u16 seq = mvm_sta->tid_data[i].seq_number;
1243                 memset(&mvm_sta->tid_data[i], 0, sizeof(mvm_sta->tid_data[i]));
1244                 mvm_sta->tid_data[i].seq_number = seq;
1245
1246                 if (!iwl_mvm_is_dqa_supported(mvm))
1247                         continue;
1248
1249                 /*
1250                  * Mark all queues for this STA as unallocated and defer TX
1251                  * frames until the queue is allocated
1252                  */
1253                 mvm_sta->tid_data[i].txq_id = IEEE80211_INVAL_HW_QUEUE;
1254                 skb_queue_head_init(&mvm_sta->tid_data[i].deferred_tx_frames);
1255         }
1256         mvm_sta->deferred_traffic_tid_map = 0;
1257         mvm_sta->agg_tids = 0;
1258
1259         if (iwl_mvm_has_new_rx_api(mvm) &&
1260             !test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) {
1261                 dup_data = kcalloc(mvm->trans->num_rx_queues,
1262                                    sizeof(*dup_data),
1263                                    GFP_KERNEL);
1264                 if (!dup_data)
1265                         return -ENOMEM;
1266                 mvm_sta->dup_data = dup_data;
1267         }
1268
1269         if (iwl_mvm_is_dqa_supported(mvm)) {
1270                 ret = iwl_mvm_reserve_sta_stream(mvm, sta,
1271                                                  ieee80211_vif_type_p2p(vif));
1272                 if (ret)
1273                         goto err;
1274         }
1275
1276 update_fw:
1277         ret = iwl_mvm_sta_send_to_fw(mvm, sta, false, 0);
1278         if (ret)
1279                 goto err;
1280
1281         if (vif->type == NL80211_IFTYPE_STATION) {
1282                 if (!sta->tdls) {
1283                         WARN_ON(mvmvif->ap_sta_id != IWL_MVM_STATION_COUNT);
1284                         mvmvif->ap_sta_id = sta_id;
1285                 } else {
1286                         WARN_ON(mvmvif->ap_sta_id == IWL_MVM_STATION_COUNT);
1287                 }
1288         }
1289
1290         rcu_assign_pointer(mvm->fw_id_to_mac_id[sta_id], sta);
1291
1292         return 0;
1293
1294 err:
1295         if (!iwl_mvm_is_dqa_supported(mvm) && sta->tdls)
1296                 iwl_mvm_tdls_sta_deinit(mvm, sta);
1297         return ret;
1298 }
1299
1300 int iwl_mvm_update_sta(struct iwl_mvm *mvm,
1301                        struct ieee80211_vif *vif,
1302                        struct ieee80211_sta *sta)
1303 {
1304         return iwl_mvm_sta_send_to_fw(mvm, sta, true, 0);
1305 }
1306
1307 int iwl_mvm_drain_sta(struct iwl_mvm *mvm, struct iwl_mvm_sta *mvmsta,
1308                       bool drain)
1309 {
1310         struct iwl_mvm_add_sta_cmd cmd = {};
1311         int ret;
1312         u32 status;
1313
1314         lockdep_assert_held(&mvm->mutex);
1315
1316         cmd.mac_id_n_color = cpu_to_le32(mvmsta->mac_id_n_color);
1317         cmd.sta_id = mvmsta->sta_id;
1318         cmd.add_modify = STA_MODE_MODIFY;
1319         cmd.station_flags = drain ? cpu_to_le32(STA_FLG_DRAIN_FLOW) : 0;
1320         cmd.station_flags_msk = cpu_to_le32(STA_FLG_DRAIN_FLOW);
1321
1322         status = ADD_STA_SUCCESS;
1323         ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA,
1324                                           iwl_mvm_add_sta_cmd_size(mvm),
1325                                           &cmd, &status);
1326         if (ret)
1327                 return ret;
1328
1329         switch (status & IWL_ADD_STA_STATUS_MASK) {
1330         case ADD_STA_SUCCESS:
1331                 IWL_DEBUG_INFO(mvm, "Frames for staid %d will drained in fw\n",
1332                                mvmsta->sta_id);
1333                 break;
1334         default:
1335                 ret = -EIO;
1336                 IWL_ERR(mvm, "Couldn't drain frames for staid %d\n",
1337                         mvmsta->sta_id);
1338                 break;
1339         }
1340
1341         return ret;
1342 }
1343
1344 /*
1345  * Remove a station from the FW table. Before sending the command to remove
1346  * the station validate that the station is indeed known to the driver (sanity
1347  * only).
1348  */
1349 static int iwl_mvm_rm_sta_common(struct iwl_mvm *mvm, u8 sta_id)
1350 {
1351         struct ieee80211_sta *sta;
1352         struct iwl_mvm_rm_sta_cmd rm_sta_cmd = {
1353                 .sta_id = sta_id,
1354         };
1355         int ret;
1356
1357         sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[sta_id],
1358                                         lockdep_is_held(&mvm->mutex));
1359
1360         /* Note: internal stations are marked as error values */
1361         if (!sta) {
1362                 IWL_ERR(mvm, "Invalid station id\n");
1363                 return -EINVAL;
1364         }
1365
1366         ret = iwl_mvm_send_cmd_pdu(mvm, REMOVE_STA, 0,
1367                                    sizeof(rm_sta_cmd), &rm_sta_cmd);
1368         if (ret) {
1369                 IWL_ERR(mvm, "Failed to remove station. Id=%d\n", sta_id);
1370                 return ret;
1371         }
1372
1373         return 0;
1374 }
1375
1376 void iwl_mvm_sta_drained_wk(struct work_struct *wk)
1377 {
1378         struct iwl_mvm *mvm = container_of(wk, struct iwl_mvm, sta_drained_wk);
1379         u8 sta_id;
1380
1381         /*
1382          * The mutex is needed because of the SYNC cmd, but not only: if the
1383          * work would run concurrently with iwl_mvm_rm_sta, it would run before
1384          * iwl_mvm_rm_sta sets the station as busy, and exit. Then
1385          * iwl_mvm_rm_sta would set the station as busy, and nobody will clean
1386          * that later.
1387          */
1388         mutex_lock(&mvm->mutex);
1389
1390         for_each_set_bit(sta_id, mvm->sta_drained, IWL_MVM_STATION_COUNT) {
1391                 int ret;
1392                 struct ieee80211_sta *sta =
1393                         rcu_dereference_protected(mvm->fw_id_to_mac_id[sta_id],
1394                                                   lockdep_is_held(&mvm->mutex));
1395
1396                 /*
1397                  * This station is in use or RCU-removed; the latter happens in
1398                  * managed mode, where mac80211 removes the station before we
1399                  * can remove it from firmware (we can only do that after the
1400                  * MAC is marked unassociated), and possibly while the deauth
1401                  * frame to disconnect from the AP is still queued. Then, the
1402                  * station pointer is -ENOENT when the last skb is reclaimed.
1403                  */
1404                 if (!IS_ERR(sta) || PTR_ERR(sta) == -ENOENT)
1405                         continue;
1406
1407                 if (PTR_ERR(sta) == -EINVAL) {
1408                         IWL_ERR(mvm, "Drained sta %d, but it is internal?\n",
1409                                 sta_id);
1410                         continue;
1411                 }
1412
1413                 if (!sta) {
1414                         IWL_ERR(mvm, "Drained sta %d, but it was NULL?\n",
1415                                 sta_id);
1416                         continue;
1417                 }
1418
1419                 WARN_ON(PTR_ERR(sta) != -EBUSY);
1420                 /* This station was removed and we waited until it got drained,
1421                  * we can now proceed and remove it.
1422                  */
1423                 ret = iwl_mvm_rm_sta_common(mvm, sta_id);
1424                 if (ret) {
1425                         IWL_ERR(mvm,
1426                                 "Couldn't remove sta %d after it was drained\n",
1427                                 sta_id);
1428                         continue;
1429                 }
1430                 RCU_INIT_POINTER(mvm->fw_id_to_mac_id[sta_id], NULL);
1431                 clear_bit(sta_id, mvm->sta_drained);
1432
1433                 if (mvm->tfd_drained[sta_id]) {
1434                         unsigned long i, msk = mvm->tfd_drained[sta_id];
1435
1436                         for_each_set_bit(i, &msk, sizeof(msk) * BITS_PER_BYTE)
1437                                 iwl_mvm_disable_txq(mvm, i, i,
1438                                                     IWL_MAX_TID_COUNT, 0);
1439
1440                         mvm->tfd_drained[sta_id] = 0;
1441                         IWL_DEBUG_TDLS(mvm, "Drained sta %d, with queues %ld\n",
1442                                        sta_id, msk);
1443                 }
1444         }
1445
1446         mutex_unlock(&mvm->mutex);
1447 }
1448
1449 static void iwl_mvm_disable_sta_queues(struct iwl_mvm *mvm,
1450                                        struct ieee80211_vif *vif,
1451                                        struct iwl_mvm_sta *mvm_sta)
1452 {
1453         int ac;
1454         int i;
1455
1456         lockdep_assert_held(&mvm->mutex);
1457
1458         for (i = 0; i < ARRAY_SIZE(mvm_sta->tid_data); i++) {
1459                 if (mvm_sta->tid_data[i].txq_id == IEEE80211_INVAL_HW_QUEUE)
1460                         continue;
1461
1462                 ac = iwl_mvm_tid_to_ac_queue(i);
1463                 iwl_mvm_disable_txq(mvm, mvm_sta->tid_data[i].txq_id,
1464                                     vif->hw_queue[ac], i, 0);
1465                 mvm_sta->tid_data[i].txq_id = IEEE80211_INVAL_HW_QUEUE;
1466         }
1467 }
1468
1469 int iwl_mvm_rm_sta(struct iwl_mvm *mvm,
1470                    struct ieee80211_vif *vif,
1471                    struct ieee80211_sta *sta)
1472 {
1473         struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
1474         struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta);
1475         int ret;
1476
1477         lockdep_assert_held(&mvm->mutex);
1478
1479         if (iwl_mvm_has_new_rx_api(mvm))
1480                 kfree(mvm_sta->dup_data);
1481
1482         if ((vif->type == NL80211_IFTYPE_STATION &&
1483              mvmvif->ap_sta_id == mvm_sta->sta_id) ||
1484             iwl_mvm_is_dqa_supported(mvm)){
1485                 ret = iwl_mvm_drain_sta(mvm, mvm_sta, true);
1486                 if (ret)
1487                         return ret;
1488                 /* flush its queues here since we are freeing mvm_sta */
1489                 ret = iwl_mvm_flush_tx_path(mvm, mvm_sta->tfd_queue_msk, 0);
1490                 if (ret)
1491                         return ret;
1492                 ret = iwl_trans_wait_tx_queue_empty(mvm->trans,
1493                                                     mvm_sta->tfd_queue_msk);
1494                 if (ret)
1495                         return ret;
1496                 ret = iwl_mvm_drain_sta(mvm, mvm_sta, false);
1497
1498                 /* If DQA is supported - the queues can be disabled now */
1499                 if (iwl_mvm_is_dqa_supported(mvm)) {
1500                         u8 reserved_txq = mvm_sta->reserved_queue;
1501                         enum iwl_mvm_queue_status *status;
1502
1503                         iwl_mvm_disable_sta_queues(mvm, vif, mvm_sta);
1504
1505                         /*
1506                          * If no traffic has gone through the reserved TXQ - it
1507                          * is still marked as IWL_MVM_QUEUE_RESERVED, and
1508                          * should be manually marked as free again
1509                          */
1510                         spin_lock_bh(&mvm->queue_info_lock);
1511                         status = &mvm->queue_info[reserved_txq].status;
1512                         if (WARN((*status != IWL_MVM_QUEUE_RESERVED) &&
1513                                  (*status != IWL_MVM_QUEUE_FREE),
1514                                  "sta_id %d reserved txq %d status %d",
1515                                  mvm_sta->sta_id, reserved_txq, *status)) {
1516                                 spin_unlock_bh(&mvm->queue_info_lock);
1517                                 return -EINVAL;
1518                         }
1519
1520                         *status = IWL_MVM_QUEUE_FREE;
1521                         spin_unlock_bh(&mvm->queue_info_lock);
1522                 }
1523
1524                 if (vif->type == NL80211_IFTYPE_STATION &&
1525                     mvmvif->ap_sta_id == mvm_sta->sta_id) {
1526                         /* if associated - we can't remove the AP STA now */
1527                         if (vif->bss_conf.assoc)
1528                                 return ret;
1529
1530                         /* unassoc - go ahead - remove the AP STA now */
1531                         mvmvif->ap_sta_id = IWL_MVM_STATION_COUNT;
1532
1533                         /* clear d0i3_ap_sta_id if no longer relevant */
1534                         if (mvm->d0i3_ap_sta_id == mvm_sta->sta_id)
1535                                 mvm->d0i3_ap_sta_id = IWL_MVM_STATION_COUNT;
1536                 }
1537         }
1538
1539         /*
1540          * This shouldn't happen - the TDLS channel switch should be canceled
1541          * before the STA is removed.
1542          */
1543         if (WARN_ON_ONCE(mvm->tdls_cs.peer.sta_id == mvm_sta->sta_id)) {
1544                 mvm->tdls_cs.peer.sta_id = IWL_MVM_STATION_COUNT;
1545                 cancel_delayed_work(&mvm->tdls_cs.dwork);
1546         }
1547
1548         /*
1549          * Make sure that the tx response code sees the station as -EBUSY and
1550          * calls the drain worker.
1551          */
1552         spin_lock_bh(&mvm_sta->lock);
1553         /*
1554          * There are frames pending on the AC queues for this station.
1555          * We need to wait until all the frames are drained...
1556          */
1557         if (atomic_read(&mvm->pending_frames[mvm_sta->sta_id])) {
1558                 rcu_assign_pointer(mvm->fw_id_to_mac_id[mvm_sta->sta_id],
1559                                    ERR_PTR(-EBUSY));
1560                 spin_unlock_bh(&mvm_sta->lock);
1561
1562                 /* disable TDLS sta queues on drain complete */
1563                 if (sta->tdls) {
1564                         mvm->tfd_drained[mvm_sta->sta_id] =
1565                                                         mvm_sta->tfd_queue_msk;
1566                         IWL_DEBUG_TDLS(mvm, "Draining TDLS sta %d\n",
1567                                        mvm_sta->sta_id);
1568                 }
1569
1570                 ret = iwl_mvm_drain_sta(mvm, mvm_sta, true);
1571         } else {
1572                 spin_unlock_bh(&mvm_sta->lock);
1573
1574                 if (!iwl_mvm_is_dqa_supported(mvm) && sta->tdls)
1575                         iwl_mvm_tdls_sta_deinit(mvm, sta);
1576
1577                 ret = iwl_mvm_rm_sta_common(mvm, mvm_sta->sta_id);
1578                 RCU_INIT_POINTER(mvm->fw_id_to_mac_id[mvm_sta->sta_id], NULL);
1579         }
1580
1581         return ret;
1582 }
1583
1584 int iwl_mvm_rm_sta_id(struct iwl_mvm *mvm,
1585                       struct ieee80211_vif *vif,
1586                       u8 sta_id)
1587 {
1588         int ret = iwl_mvm_rm_sta_common(mvm, sta_id);
1589
1590         lockdep_assert_held(&mvm->mutex);
1591
1592         RCU_INIT_POINTER(mvm->fw_id_to_mac_id[sta_id], NULL);
1593         return ret;
1594 }
1595
1596 int iwl_mvm_allocate_int_sta(struct iwl_mvm *mvm,
1597                              struct iwl_mvm_int_sta *sta,
1598                              u32 qmask, enum nl80211_iftype iftype)
1599 {
1600         if (!test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) {
1601                 sta->sta_id = iwl_mvm_find_free_sta_id(mvm, iftype);
1602                 if (WARN_ON_ONCE(sta->sta_id == IWL_MVM_STATION_COUNT))
1603                         return -ENOSPC;
1604         }
1605
1606         sta->tfd_queue_msk = qmask;
1607
1608         /* put a non-NULL value so iterating over the stations won't stop */
1609         rcu_assign_pointer(mvm->fw_id_to_mac_id[sta->sta_id], ERR_PTR(-EINVAL));
1610         return 0;
1611 }
1612
1613 static void iwl_mvm_dealloc_int_sta(struct iwl_mvm *mvm,
1614                                     struct iwl_mvm_int_sta *sta)
1615 {
1616         RCU_INIT_POINTER(mvm->fw_id_to_mac_id[sta->sta_id], NULL);
1617         memset(sta, 0, sizeof(struct iwl_mvm_int_sta));
1618         sta->sta_id = IWL_MVM_STATION_COUNT;
1619 }
1620
1621 static int iwl_mvm_add_int_sta_common(struct iwl_mvm *mvm,
1622                                       struct iwl_mvm_int_sta *sta,
1623                                       const u8 *addr,
1624                                       u16 mac_id, u16 color)
1625 {
1626         struct iwl_mvm_add_sta_cmd cmd;
1627         int ret;
1628         u32 status;
1629
1630         lockdep_assert_held(&mvm->mutex);
1631
1632         memset(&cmd, 0, sizeof(cmd));
1633         cmd.sta_id = sta->sta_id;
1634         cmd.mac_id_n_color = cpu_to_le32(FW_CMD_ID_AND_COLOR(mac_id,
1635                                                              color));
1636
1637         cmd.tfd_queue_msk = cpu_to_le32(sta->tfd_queue_msk);
1638         cmd.tid_disable_tx = cpu_to_le16(0xffff);
1639
1640         if (addr)
1641                 memcpy(cmd.addr, addr, ETH_ALEN);
1642
1643         ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA,
1644                                           iwl_mvm_add_sta_cmd_size(mvm),
1645                                           &cmd, &status);
1646         if (ret)
1647                 return ret;
1648
1649         switch (status & IWL_ADD_STA_STATUS_MASK) {
1650         case ADD_STA_SUCCESS:
1651                 IWL_DEBUG_INFO(mvm, "Internal station added.\n");
1652                 return 0;
1653         default:
1654                 ret = -EIO;
1655                 IWL_ERR(mvm, "Add internal station failed, status=0x%x\n",
1656                         status);
1657                 break;
1658         }
1659         return ret;
1660 }
1661
1662 int iwl_mvm_add_aux_sta(struct iwl_mvm *mvm)
1663 {
1664         unsigned int wdg_timeout = iwlmvm_mod_params.tfd_q_hang_detect ?
1665                                         mvm->cfg->base_params->wd_timeout :
1666                                         IWL_WATCHDOG_DISABLED;
1667         int ret;
1668
1669         lockdep_assert_held(&mvm->mutex);
1670
1671         /* Map Aux queue to fifo - needs to happen before adding Aux station */
1672         if (!iwl_mvm_is_dqa_supported(mvm))
1673                 iwl_mvm_enable_ac_txq(mvm, mvm->aux_queue, mvm->aux_queue,
1674                                       IWL_MVM_TX_FIFO_MCAST, 0, wdg_timeout);
1675
1676         /* Allocate aux station and assign to it the aux queue */
1677         ret = iwl_mvm_allocate_int_sta(mvm, &mvm->aux_sta, BIT(mvm->aux_queue),
1678                                        NL80211_IFTYPE_UNSPECIFIED);
1679         if (ret)
1680                 return ret;
1681
1682         if (iwl_mvm_is_dqa_supported(mvm)) {
1683                 struct iwl_trans_txq_scd_cfg cfg = {
1684                         .fifo = IWL_MVM_TX_FIFO_MCAST,
1685                         .sta_id = mvm->aux_sta.sta_id,
1686                         .tid = IWL_MAX_TID_COUNT,
1687                         .aggregate = false,
1688                         .frame_limit = IWL_FRAME_LIMIT,
1689                 };
1690
1691                 iwl_mvm_enable_txq(mvm, mvm->aux_queue, mvm->aux_queue, 0, &cfg,
1692                                    wdg_timeout);
1693         }
1694
1695         ret = iwl_mvm_add_int_sta_common(mvm, &mvm->aux_sta, NULL,
1696                                          MAC_INDEX_AUX, 0);
1697
1698         if (ret)
1699                 iwl_mvm_dealloc_int_sta(mvm, &mvm->aux_sta);
1700         return ret;
1701 }
1702
1703 int iwl_mvm_add_snif_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
1704 {
1705         struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
1706
1707         lockdep_assert_held(&mvm->mutex);
1708         return iwl_mvm_add_int_sta_common(mvm, &mvm->snif_sta, vif->addr,
1709                                          mvmvif->id, 0);
1710 }
1711
1712 int iwl_mvm_rm_snif_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
1713 {
1714         int ret;
1715
1716         lockdep_assert_held(&mvm->mutex);
1717
1718         ret = iwl_mvm_rm_sta_common(mvm, mvm->snif_sta.sta_id);
1719         if (ret)
1720                 IWL_WARN(mvm, "Failed sending remove station\n");
1721
1722         return ret;
1723 }
1724
1725 void iwl_mvm_dealloc_snif_sta(struct iwl_mvm *mvm)
1726 {
1727         iwl_mvm_dealloc_int_sta(mvm, &mvm->snif_sta);
1728 }
1729
1730 void iwl_mvm_del_aux_sta(struct iwl_mvm *mvm)
1731 {
1732         lockdep_assert_held(&mvm->mutex);
1733
1734         iwl_mvm_dealloc_int_sta(mvm, &mvm->aux_sta);
1735 }
1736
1737 /*
1738  * Send the add station command for the vif's broadcast station.
1739  * Assumes that the station was already allocated.
1740  *
1741  * @mvm: the mvm component
1742  * @vif: the interface to which the broadcast station is added
1743  * @bsta: the broadcast station to add.
1744  */
1745 int iwl_mvm_send_add_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
1746 {
1747         struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
1748         struct iwl_mvm_int_sta *bsta = &mvmvif->bcast_sta;
1749         static const u8 _baddr[] = {0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF};
1750         const u8 *baddr = _baddr;
1751
1752         lockdep_assert_held(&mvm->mutex);
1753
1754         if (iwl_mvm_is_dqa_supported(mvm)) {
1755                 struct iwl_trans_txq_scd_cfg cfg = {
1756                         .fifo = IWL_MVM_TX_FIFO_VO,
1757                         .sta_id = mvmvif->bcast_sta.sta_id,
1758                         .tid = IWL_MAX_TID_COUNT,
1759                         .aggregate = false,
1760                         .frame_limit = IWL_FRAME_LIMIT,
1761                 };
1762                 unsigned int wdg_timeout =
1763                         iwl_mvm_get_wd_timeout(mvm, vif, false, false);
1764                 int queue;
1765
1766                 if ((vif->type == NL80211_IFTYPE_AP) &&
1767                     (mvmvif->bcast_sta.tfd_queue_msk &
1768                      BIT(IWL_MVM_DQA_AP_PROBE_RESP_QUEUE)))
1769                         queue = IWL_MVM_DQA_AP_PROBE_RESP_QUEUE;
1770                 else if ((vif->type == NL80211_IFTYPE_P2P_DEVICE) &&
1771                          (mvmvif->bcast_sta.tfd_queue_msk &
1772                           BIT(IWL_MVM_DQA_P2P_DEVICE_QUEUE)))
1773                         queue = IWL_MVM_DQA_P2P_DEVICE_QUEUE;
1774                 else if (WARN(1, "Missed required TXQ for adding bcast STA\n"))
1775                         return -EINVAL;
1776
1777                 iwl_mvm_enable_txq(mvm, queue, vif->hw_queue[0], 0, &cfg,
1778                                    wdg_timeout);
1779         }
1780
1781         if (vif->type == NL80211_IFTYPE_ADHOC)
1782                 baddr = vif->bss_conf.bssid;
1783
1784         if (WARN_ON_ONCE(bsta->sta_id == IWL_MVM_STATION_COUNT))
1785                 return -ENOSPC;
1786
1787         return iwl_mvm_add_int_sta_common(mvm, bsta, baddr,
1788                                           mvmvif->id, mvmvif->color);
1789 }
1790
1791 /* Send the FW a request to remove the station from it's internal data
1792  * structures, but DO NOT remove the entry from the local data structures. */
1793 int iwl_mvm_send_rm_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
1794 {
1795         struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
1796         int ret;
1797
1798         lockdep_assert_held(&mvm->mutex);
1799
1800         ret = iwl_mvm_rm_sta_common(mvm, mvmvif->bcast_sta.sta_id);
1801         if (ret)
1802                 IWL_WARN(mvm, "Failed sending remove station\n");
1803         return ret;
1804 }
1805
1806 int iwl_mvm_alloc_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
1807 {
1808         struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
1809         u32 qmask = 0;
1810
1811         lockdep_assert_held(&mvm->mutex);
1812
1813         if (!iwl_mvm_is_dqa_supported(mvm))
1814                 qmask = iwl_mvm_mac_get_queues_mask(vif);
1815
1816         if (vif->type == NL80211_IFTYPE_AP) {
1817                 /*
1818                  * The firmware defines the TFD queue mask to only be relevant
1819                  * for *unicast* queues, so the multicast (CAB) queue shouldn't
1820                  * be included.
1821                  */
1822                 qmask &= ~BIT(vif->cab_queue);
1823
1824                 if (iwl_mvm_is_dqa_supported(mvm))
1825                         qmask |= BIT(IWL_MVM_DQA_AP_PROBE_RESP_QUEUE);
1826         } else if (iwl_mvm_is_dqa_supported(mvm) &&
1827                    vif->type == NL80211_IFTYPE_P2P_DEVICE) {
1828                 qmask |= BIT(IWL_MVM_DQA_P2P_DEVICE_QUEUE);
1829         }
1830
1831         return iwl_mvm_allocate_int_sta(mvm, &mvmvif->bcast_sta, qmask,
1832                                         ieee80211_vif_type_p2p(vif));
1833 }
1834
1835 /* Allocate a new station entry for the broadcast station to the given vif,
1836  * and send it to the FW.
1837  * Note that each P2P mac should have its own broadcast station.
1838  *
1839  * @mvm: the mvm component
1840  * @vif: the interface to which the broadcast station is added
1841  * @bsta: the broadcast station to add. */
1842 int iwl_mvm_add_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
1843 {
1844         struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
1845         struct iwl_mvm_int_sta *bsta = &mvmvif->bcast_sta;
1846         int ret;
1847
1848         lockdep_assert_held(&mvm->mutex);
1849
1850         ret = iwl_mvm_alloc_bcast_sta(mvm, vif);
1851         if (ret)
1852                 return ret;
1853
1854         ret = iwl_mvm_send_add_bcast_sta(mvm, vif);
1855
1856         if (ret)
1857                 iwl_mvm_dealloc_int_sta(mvm, bsta);
1858
1859         return ret;
1860 }
1861
1862 void iwl_mvm_dealloc_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
1863 {
1864         struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
1865
1866         iwl_mvm_dealloc_int_sta(mvm, &mvmvif->bcast_sta);
1867 }
1868
1869 /*
1870  * Send the FW a request to remove the station from it's internal data
1871  * structures, and in addition remove it from the local data structure.
1872  */
1873 int iwl_mvm_rm_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
1874 {
1875         int ret;
1876
1877         lockdep_assert_held(&mvm->mutex);
1878
1879         ret = iwl_mvm_send_rm_bcast_sta(mvm, vif);
1880
1881         iwl_mvm_dealloc_bcast_sta(mvm, vif);
1882
1883         return ret;
1884 }
1885
1886 #define IWL_MAX_RX_BA_SESSIONS 16
1887
1888 static void iwl_mvm_sync_rxq_del_ba(struct iwl_mvm *mvm, u8 baid)
1889 {
1890         struct iwl_mvm_delba_notif notif = {
1891                 .metadata.type = IWL_MVM_RXQ_NOTIF_DEL_BA,
1892                 .metadata.sync = 1,
1893                 .delba.baid = baid,
1894         };
1895         iwl_mvm_sync_rx_queues_internal(mvm, (void *)&notif, sizeof(notif));
1896 };
1897
1898 static void iwl_mvm_free_reorder(struct iwl_mvm *mvm,
1899                                  struct iwl_mvm_baid_data *data)
1900 {
1901         int i;
1902
1903         iwl_mvm_sync_rxq_del_ba(mvm, data->baid);
1904
1905         for (i = 0; i < mvm->trans->num_rx_queues; i++) {
1906                 int j;
1907                 struct iwl_mvm_reorder_buffer *reorder_buf =
1908                         &data->reorder_buf[i];
1909
1910                 spin_lock_bh(&reorder_buf->lock);
1911                 if (likely(!reorder_buf->num_stored)) {
1912                         spin_unlock_bh(&reorder_buf->lock);
1913                         continue;
1914                 }
1915
1916                 /*
1917                  * This shouldn't happen in regular DELBA since the internal
1918                  * delBA notification should trigger a release of all frames in
1919                  * the reorder buffer.
1920                  */
1921                 WARN_ON(1);
1922
1923                 for (j = 0; j < reorder_buf->buf_size; j++)
1924                         __skb_queue_purge(&reorder_buf->entries[j]);
1925                 /*
1926                  * Prevent timer re-arm. This prevents a very far fetched case
1927                  * where we timed out on the notification. There may be prior
1928                  * RX frames pending in the RX queue before the notification
1929                  * that might get processed between now and the actual deletion
1930                  * and we would re-arm the timer although we are deleting the
1931                  * reorder buffer.
1932                  */
1933                 reorder_buf->removed = true;
1934                 spin_unlock_bh(&reorder_buf->lock);
1935                 del_timer_sync(&reorder_buf->reorder_timer);
1936         }
1937 }
1938
1939 static void iwl_mvm_init_reorder_buffer(struct iwl_mvm *mvm,
1940                                         u32 sta_id,
1941                                         struct iwl_mvm_baid_data *data,
1942                                         u16 ssn, u8 buf_size)
1943 {
1944         int i;
1945
1946         for (i = 0; i < mvm->trans->num_rx_queues; i++) {
1947                 struct iwl_mvm_reorder_buffer *reorder_buf =
1948                         &data->reorder_buf[i];
1949                 int j;
1950
1951                 reorder_buf->num_stored = 0;
1952                 reorder_buf->head_sn = ssn;
1953                 reorder_buf->buf_size = buf_size;
1954                 /* rx reorder timer */
1955                 reorder_buf->reorder_timer.function =
1956                         iwl_mvm_reorder_timer_expired;
1957                 reorder_buf->reorder_timer.data = (unsigned long)reorder_buf;
1958                 init_timer(&reorder_buf->reorder_timer);
1959                 spin_lock_init(&reorder_buf->lock);
1960                 reorder_buf->mvm = mvm;
1961                 reorder_buf->queue = i;
1962                 reorder_buf->sta_id = sta_id;
1963                 for (j = 0; j < reorder_buf->buf_size; j++)
1964                         __skb_queue_head_init(&reorder_buf->entries[j]);
1965         }
1966 }
1967
1968 int iwl_mvm_sta_rx_agg(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
1969                        int tid, u16 ssn, bool start, u8 buf_size, u16 timeout)
1970 {
1971         struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta);
1972         struct iwl_mvm_add_sta_cmd cmd = {};
1973         struct iwl_mvm_baid_data *baid_data = NULL;
1974         int ret;
1975         u32 status;
1976
1977         lockdep_assert_held(&mvm->mutex);
1978
1979         if (start && mvm->rx_ba_sessions >= IWL_MAX_RX_BA_SESSIONS) {
1980                 IWL_WARN(mvm, "Not enough RX BA SESSIONS\n");
1981                 return -ENOSPC;
1982         }
1983
1984         if (iwl_mvm_has_new_rx_api(mvm) && start) {
1985                 /*
1986                  * Allocate here so if allocation fails we can bail out early
1987                  * before starting the BA session in the firmware
1988                  */
1989                 baid_data = kzalloc(sizeof(*baid_data) +
1990                                     mvm->trans->num_rx_queues *
1991                                     sizeof(baid_data->reorder_buf[0]),
1992                                     GFP_KERNEL);
1993                 if (!baid_data)
1994                         return -ENOMEM;
1995         }
1996
1997         cmd.mac_id_n_color = cpu_to_le32(mvm_sta->mac_id_n_color);
1998         cmd.sta_id = mvm_sta->sta_id;
1999         cmd.add_modify = STA_MODE_MODIFY;
2000         if (start) {
2001                 cmd.add_immediate_ba_tid = (u8) tid;
2002                 cmd.add_immediate_ba_ssn = cpu_to_le16(ssn);
2003                 cmd.rx_ba_window = cpu_to_le16((u16)buf_size);
2004         } else {
2005                 cmd.remove_immediate_ba_tid = (u8) tid;
2006         }
2007         cmd.modify_mask = start ? STA_MODIFY_ADD_BA_TID :
2008                                   STA_MODIFY_REMOVE_BA_TID;
2009
2010         status = ADD_STA_SUCCESS;
2011         ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA,
2012                                           iwl_mvm_add_sta_cmd_size(mvm),
2013                                           &cmd, &status);
2014         if (ret)
2015                 goto out_free;
2016
2017         switch (status & IWL_ADD_STA_STATUS_MASK) {
2018         case ADD_STA_SUCCESS:
2019                 IWL_DEBUG_HT(mvm, "RX BA Session %sed in fw\n",
2020                              start ? "start" : "stopp");
2021                 break;
2022         case ADD_STA_IMMEDIATE_BA_FAILURE:
2023                 IWL_WARN(mvm, "RX BA Session refused by fw\n");
2024                 ret = -ENOSPC;
2025                 break;
2026         default:
2027                 ret = -EIO;
2028                 IWL_ERR(mvm, "RX BA Session failed %sing, status 0x%x\n",
2029                         start ? "start" : "stopp", status);
2030                 break;
2031         }
2032
2033         if (ret)
2034                 goto out_free;
2035
2036         if (start) {
2037                 u8 baid;
2038
2039                 mvm->rx_ba_sessions++;
2040
2041                 if (!iwl_mvm_has_new_rx_api(mvm))
2042                         return 0;
2043
2044                 if (WARN_ON(!(status & IWL_ADD_STA_BAID_VALID_MASK))) {
2045                         ret = -EINVAL;
2046                         goto out_free;
2047                 }
2048                 baid = (u8)((status & IWL_ADD_STA_BAID_MASK) >>
2049                             IWL_ADD_STA_BAID_SHIFT);
2050                 baid_data->baid = baid;
2051                 baid_data->timeout = timeout;
2052                 baid_data->last_rx = jiffies;
2053                 setup_timer(&baid_data->session_timer,
2054                             iwl_mvm_rx_agg_session_expired,
2055                             (unsigned long)&mvm->baid_map[baid]);
2056                 baid_data->mvm = mvm;
2057                 baid_data->tid = tid;
2058                 baid_data->sta_id = mvm_sta->sta_id;
2059
2060                 mvm_sta->tid_to_baid[tid] = baid;
2061                 if (timeout)
2062                         mod_timer(&baid_data->session_timer,
2063                                   TU_TO_EXP_TIME(timeout * 2));
2064
2065                 iwl_mvm_init_reorder_buffer(mvm, mvm_sta->sta_id,
2066                                             baid_data, ssn, buf_size);
2067                 /*
2068                  * protect the BA data with RCU to cover a case where our
2069                  * internal RX sync mechanism will timeout (not that it's
2070                  * supposed to happen) and we will free the session data while
2071                  * RX is being processed in parallel
2072                  */
2073                 IWL_DEBUG_HT(mvm, "Sta %d(%d) is assigned to BAID %d\n",
2074                              mvm_sta->sta_id, tid, baid);
2075                 WARN_ON(rcu_access_pointer(mvm->baid_map[baid]));
2076                 rcu_assign_pointer(mvm->baid_map[baid], baid_data);
2077         } else  {
2078                 u8 baid = mvm_sta->tid_to_baid[tid];
2079
2080                 if (mvm->rx_ba_sessions > 0)
2081                         /* check that restart flow didn't zero the counter */
2082                         mvm->rx_ba_sessions--;
2083                 if (!iwl_mvm_has_new_rx_api(mvm))
2084                         return 0;
2085
2086                 if (WARN_ON(baid == IWL_RX_REORDER_DATA_INVALID_BAID))
2087                         return -EINVAL;
2088
2089                 baid_data = rcu_access_pointer(mvm->baid_map[baid]);
2090                 if (WARN_ON(!baid_data))
2091                         return -EINVAL;
2092
2093                 /* synchronize all rx queues so we can safely delete */
2094                 iwl_mvm_free_reorder(mvm, baid_data);
2095                 del_timer_sync(&baid_data->session_timer);
2096                 RCU_INIT_POINTER(mvm->baid_map[baid], NULL);
2097                 kfree_rcu(baid_data, rcu_head);
2098                 IWL_DEBUG_HT(mvm, "BAID %d is free\n", baid);
2099         }
2100         return 0;
2101
2102 out_free:
2103         kfree(baid_data);
2104         return ret;
2105 }
2106
2107 int iwl_mvm_sta_tx_agg(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
2108                        int tid, u8 queue, bool start)
2109 {
2110         struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta);
2111         struct iwl_mvm_add_sta_cmd cmd = {};
2112         int ret;
2113         u32 status;
2114
2115         lockdep_assert_held(&mvm->mutex);
2116
2117         if (start) {
2118                 mvm_sta->tfd_queue_msk |= BIT(queue);
2119                 mvm_sta->tid_disable_agg &= ~BIT(tid);
2120         } else {
2121                 /* In DQA-mode the queue isn't removed on agg termination */
2122                 if (!iwl_mvm_is_dqa_supported(mvm))
2123                         mvm_sta->tfd_queue_msk &= ~BIT(queue);
2124                 mvm_sta->tid_disable_agg |= BIT(tid);
2125         }
2126
2127         cmd.mac_id_n_color = cpu_to_le32(mvm_sta->mac_id_n_color);
2128         cmd.sta_id = mvm_sta->sta_id;
2129         cmd.add_modify = STA_MODE_MODIFY;
2130         cmd.modify_mask = STA_MODIFY_QUEUES | STA_MODIFY_TID_DISABLE_TX;
2131         cmd.tfd_queue_msk = cpu_to_le32(mvm_sta->tfd_queue_msk);
2132         cmd.tid_disable_tx = cpu_to_le16(mvm_sta->tid_disable_agg);
2133
2134         status = ADD_STA_SUCCESS;
2135         ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA,
2136                                           iwl_mvm_add_sta_cmd_size(mvm),
2137                                           &cmd, &status);
2138         if (ret)
2139                 return ret;
2140
2141         switch (status & IWL_ADD_STA_STATUS_MASK) {
2142         case ADD_STA_SUCCESS:
2143                 break;
2144         default:
2145                 ret = -EIO;
2146                 IWL_ERR(mvm, "TX BA Session failed %sing, status 0x%x\n",
2147                         start ? "start" : "stopp", status);
2148                 break;
2149         }
2150
2151         return ret;
2152 }
2153
2154 const u8 tid_to_mac80211_ac[] = {
2155         IEEE80211_AC_BE,
2156         IEEE80211_AC_BK,
2157         IEEE80211_AC_BK,
2158         IEEE80211_AC_BE,
2159         IEEE80211_AC_VI,
2160         IEEE80211_AC_VI,
2161         IEEE80211_AC_VO,
2162         IEEE80211_AC_VO,
2163         IEEE80211_AC_VO, /* We treat MGMT as TID 8, which is set as AC_VO */
2164 };
2165
2166 static const u8 tid_to_ucode_ac[] = {
2167         AC_BE,
2168         AC_BK,
2169         AC_BK,
2170         AC_BE,
2171         AC_VI,
2172         AC_VI,
2173         AC_VO,
2174         AC_VO,
2175 };
2176
2177 int iwl_mvm_sta_tx_agg_start(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
2178                              struct ieee80211_sta *sta, u16 tid, u16 *ssn)
2179 {
2180         struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
2181         struct iwl_mvm_tid_data *tid_data;
2182         int txq_id;
2183         int ret;
2184
2185         if (WARN_ON_ONCE(tid >= IWL_MAX_TID_COUNT))
2186                 return -EINVAL;
2187
2188         if (mvmsta->tid_data[tid].state != IWL_AGG_OFF) {
2189                 IWL_ERR(mvm, "Start AGG when state is not IWL_AGG_OFF %d!\n",
2190                         mvmsta->tid_data[tid].state);
2191                 return -ENXIO;
2192         }
2193
2194         lockdep_assert_held(&mvm->mutex);
2195
2196         spin_lock_bh(&mvmsta->lock);
2197
2198         /* possible race condition - we entered D0i3 while starting agg */
2199         if (test_bit(IWL_MVM_STATUS_IN_D0I3, &mvm->status)) {
2200                 spin_unlock_bh(&mvmsta->lock);
2201                 IWL_ERR(mvm, "Entered D0i3 while starting Tx agg\n");
2202                 return -EIO;
2203         }
2204
2205         spin_lock(&mvm->queue_info_lock);
2206
2207         /*
2208          * Note the possible cases:
2209          *  1. In DQA mode with an enabled TXQ - TXQ needs to become agg'ed
2210          *  2. Non-DQA mode: the TXQ hasn't yet been enabled, so find a free
2211          *      one and mark it as reserved
2212          *  3. In DQA mode, but no traffic yet on this TID: same treatment as in
2213          *      non-DQA mode, since the TXQ hasn't yet been allocated
2214          */
2215         txq_id = mvmsta->tid_data[tid].txq_id;
2216         if (iwl_mvm_is_dqa_supported(mvm) &&
2217             unlikely(mvm->queue_info[txq_id].status == IWL_MVM_QUEUE_SHARED)) {
2218                 ret = -ENXIO;
2219                 IWL_DEBUG_TX_QUEUES(mvm,
2220                                     "Can't start tid %d agg on shared queue!\n",
2221                                     tid);
2222                 goto release_locks;
2223         } else if (!iwl_mvm_is_dqa_supported(mvm) ||
2224             mvm->queue_info[txq_id].status != IWL_MVM_QUEUE_READY) {
2225                 txq_id = iwl_mvm_find_free_queue(mvm, mvmsta->sta_id,
2226                                                  mvm->first_agg_queue,
2227                                                  mvm->last_agg_queue);
2228                 if (txq_id < 0) {
2229                         ret = txq_id;
2230                         IWL_ERR(mvm, "Failed to allocate agg queue\n");
2231                         goto release_locks;
2232                 }
2233
2234                 /* TXQ hasn't yet been enabled, so mark it only as reserved */
2235                 mvm->queue_info[txq_id].status = IWL_MVM_QUEUE_RESERVED;
2236         }
2237
2238         spin_unlock(&mvm->queue_info_lock);
2239
2240         IWL_DEBUG_TX_QUEUES(mvm,
2241                             "AGG for tid %d will be on queue #%d\n",
2242                             tid, txq_id);
2243
2244         tid_data = &mvmsta->tid_data[tid];
2245         tid_data->ssn = IEEE80211_SEQ_TO_SN(tid_data->seq_number);
2246         tid_data->txq_id = txq_id;
2247         *ssn = tid_data->ssn;
2248
2249         IWL_DEBUG_TX_QUEUES(mvm,
2250                             "Start AGG: sta %d tid %d queue %d - ssn = %d, next_recl = %d\n",
2251                             mvmsta->sta_id, tid, txq_id, tid_data->ssn,
2252                             tid_data->next_reclaimed);
2253
2254         if (tid_data->ssn == tid_data->next_reclaimed) {
2255                 tid_data->state = IWL_AGG_STARTING;
2256                 ieee80211_start_tx_ba_cb_irqsafe(vif, sta->addr, tid);
2257         } else {
2258                 tid_data->state = IWL_EMPTYING_HW_QUEUE_ADDBA;
2259         }
2260
2261         ret = 0;
2262         goto out;
2263
2264 release_locks:
2265         spin_unlock(&mvm->queue_info_lock);
2266 out:
2267         spin_unlock_bh(&mvmsta->lock);
2268
2269         return ret;
2270 }
2271
2272 int iwl_mvm_sta_tx_agg_oper(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
2273                             struct ieee80211_sta *sta, u16 tid, u8 buf_size,
2274                             bool amsdu)
2275 {
2276         struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
2277         struct iwl_mvm_tid_data *tid_data = &mvmsta->tid_data[tid];
2278         unsigned int wdg_timeout =
2279                 iwl_mvm_get_wd_timeout(mvm, vif, sta->tdls, false);
2280         int queue, ret;
2281         bool alloc_queue = true;
2282         enum iwl_mvm_queue_status queue_status;
2283         u16 ssn;
2284
2285         struct iwl_trans_txq_scd_cfg cfg = {
2286                 .sta_id = mvmsta->sta_id,
2287                 .tid = tid,
2288                 .frame_limit = buf_size,
2289                 .aggregate = true,
2290         };
2291
2292         BUILD_BUG_ON((sizeof(mvmsta->agg_tids) * BITS_PER_BYTE)
2293                      != IWL_MAX_TID_COUNT);
2294
2295         buf_size = min_t(int, buf_size, LINK_QUAL_AGG_FRAME_LIMIT_DEF);
2296
2297         spin_lock_bh(&mvmsta->lock);
2298         ssn = tid_data->ssn;
2299         queue = tid_data->txq_id;
2300         tid_data->state = IWL_AGG_ON;
2301         mvmsta->agg_tids |= BIT(tid);
2302         tid_data->ssn = 0xffff;
2303         tid_data->amsdu_in_ampdu_allowed = amsdu;
2304         spin_unlock_bh(&mvmsta->lock);
2305
2306         cfg.fifo = iwl_mvm_ac_to_tx_fifo[tid_to_mac80211_ac[tid]];
2307
2308         spin_lock_bh(&mvm->queue_info_lock);
2309         queue_status = mvm->queue_info[queue].status;
2310         spin_unlock_bh(&mvm->queue_info_lock);
2311
2312         /* In DQA mode, the existing queue might need to be reconfigured */
2313         if (iwl_mvm_is_dqa_supported(mvm)) {
2314                 /* Maybe there is no need to even alloc a queue... */
2315                 if (mvm->queue_info[queue].status == IWL_MVM_QUEUE_READY)
2316                         alloc_queue = false;
2317
2318                 /*
2319                  * Only reconfig the SCD for the queue if the window size has
2320                  * changed from current (become smaller)
2321                  */
2322                 if (!alloc_queue && buf_size < mvmsta->max_agg_bufsize) {
2323                         /*
2324                          * If reconfiguring an existing queue, it first must be
2325                          * drained
2326                          */
2327                         ret = iwl_trans_wait_tx_queue_empty(mvm->trans,
2328                                                             BIT(queue));
2329                         if (ret) {
2330                                 IWL_ERR(mvm,
2331                                         "Error draining queue before reconfig\n");
2332                                 return ret;
2333                         }
2334
2335                         ret = iwl_mvm_reconfig_scd(mvm, queue, cfg.fifo,
2336                                                    mvmsta->sta_id, tid,
2337                                                    buf_size, ssn);
2338                         if (ret) {
2339                                 IWL_ERR(mvm,
2340                                         "Error reconfiguring TXQ #%d\n", queue);
2341                                 return ret;
2342                         }
2343                 }
2344         }
2345
2346         if (alloc_queue)
2347                 iwl_mvm_enable_txq(mvm, queue,
2348                                    vif->hw_queue[tid_to_mac80211_ac[tid]], ssn,
2349                                    &cfg, wdg_timeout);
2350
2351         /* Send ADD_STA command to enable aggs only if the queue isn't shared */
2352         if (queue_status != IWL_MVM_QUEUE_SHARED) {
2353                 ret = iwl_mvm_sta_tx_agg(mvm, sta, tid, queue, true);
2354                 if (ret)
2355                         return -EIO;
2356         }
2357
2358         /* No need to mark as reserved */
2359         spin_lock_bh(&mvm->queue_info_lock);
2360         mvm->queue_info[queue].status = IWL_MVM_QUEUE_READY;
2361         spin_unlock_bh(&mvm->queue_info_lock);
2362
2363         /*
2364          * Even though in theory the peer could have different
2365          * aggregation reorder buffer sizes for different sessions,
2366          * our ucode doesn't allow for that and has a global limit
2367          * for each station. Therefore, use the minimum of all the
2368          * aggregation sessions and our default value.
2369          */
2370         mvmsta->max_agg_bufsize =
2371                 min(mvmsta->max_agg_bufsize, buf_size);
2372         mvmsta->lq_sta.lq.agg_frame_cnt_limit = mvmsta->max_agg_bufsize;
2373
2374         IWL_DEBUG_HT(mvm, "Tx aggregation enabled on ra = %pM tid = %d\n",
2375                      sta->addr, tid);
2376
2377         return iwl_mvm_send_lq_cmd(mvm, &mvmsta->lq_sta.lq, false);
2378 }
2379
2380 int iwl_mvm_sta_tx_agg_stop(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
2381                             struct ieee80211_sta *sta, u16 tid)
2382 {
2383         struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
2384         struct iwl_mvm_tid_data *tid_data = &mvmsta->tid_data[tid];
2385         u16 txq_id;
2386         int err;
2387
2388         /*
2389          * If mac80211 is cleaning its state, then say that we finished since
2390          * our state has been cleared anyway.
2391          */
2392         if (test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) {
2393                 ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid);
2394                 return 0;
2395         }
2396
2397         spin_lock_bh(&mvmsta->lock);
2398
2399         txq_id = tid_data->txq_id;
2400
2401         IWL_DEBUG_TX_QUEUES(mvm, "Stop AGG: sta %d tid %d q %d state %d\n",
2402                             mvmsta->sta_id, tid, txq_id, tid_data->state);
2403
2404         mvmsta->agg_tids &= ~BIT(tid);
2405
2406         spin_lock_bh(&mvm->queue_info_lock);
2407         /*
2408          * The TXQ is marked as reserved only if no traffic came through yet
2409          * This means no traffic has been sent on this TID (agg'd or not), so
2410          * we no longer have use for the queue. Since it hasn't even been
2411          * allocated through iwl_mvm_enable_txq, so we can just mark it back as
2412          * free.
2413          */
2414         if (mvm->queue_info[txq_id].status == IWL_MVM_QUEUE_RESERVED)
2415                 mvm->queue_info[txq_id].status = IWL_MVM_QUEUE_FREE;
2416
2417         spin_unlock_bh(&mvm->queue_info_lock);
2418
2419         switch (tid_data->state) {
2420         case IWL_AGG_ON:
2421                 tid_data->ssn = IEEE80211_SEQ_TO_SN(tid_data->seq_number);
2422
2423                 IWL_DEBUG_TX_QUEUES(mvm,
2424                                     "ssn = %d, next_recl = %d\n",
2425                                     tid_data->ssn, tid_data->next_reclaimed);
2426
2427                 /* There are still packets for this RA / TID in the HW */
2428                 if (tid_data->ssn != tid_data->next_reclaimed) {
2429                         tid_data->state = IWL_EMPTYING_HW_QUEUE_DELBA;
2430                         err = 0;
2431                         break;
2432                 }
2433
2434                 tid_data->ssn = 0xffff;
2435                 tid_data->state = IWL_AGG_OFF;
2436                 spin_unlock_bh(&mvmsta->lock);
2437
2438                 ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid);
2439
2440                 iwl_mvm_sta_tx_agg(mvm, sta, tid, txq_id, false);
2441
2442                 if (!iwl_mvm_is_dqa_supported(mvm)) {
2443                         int mac_queue = vif->hw_queue[tid_to_mac80211_ac[tid]];
2444
2445                         iwl_mvm_disable_txq(mvm, txq_id, mac_queue, tid, 0);
2446                 }
2447                 return 0;
2448         case IWL_AGG_STARTING:
2449         case IWL_EMPTYING_HW_QUEUE_ADDBA:
2450                 /*
2451                  * The agg session has been stopped before it was set up. This
2452                  * can happen when the AddBA timer times out for example.
2453                  */
2454
2455                 /* No barriers since we are under mutex */
2456                 lockdep_assert_held(&mvm->mutex);
2457
2458                 ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid);
2459                 tid_data->state = IWL_AGG_OFF;
2460                 err = 0;
2461                 break;
2462         default:
2463                 IWL_ERR(mvm,
2464                         "Stopping AGG while state not ON or starting for %d on %d (%d)\n",
2465                         mvmsta->sta_id, tid, tid_data->state);
2466                 IWL_ERR(mvm,
2467                         "\ttid_data->txq_id = %d\n", tid_data->txq_id);
2468                 err = -EINVAL;
2469         }
2470
2471         spin_unlock_bh(&mvmsta->lock);
2472
2473         return err;
2474 }
2475
2476 int iwl_mvm_sta_tx_agg_flush(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
2477                             struct ieee80211_sta *sta, u16 tid)
2478 {
2479         struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
2480         struct iwl_mvm_tid_data *tid_data = &mvmsta->tid_data[tid];
2481         u16 txq_id;
2482         enum iwl_mvm_agg_state old_state;
2483
2484         /*
2485          * First set the agg state to OFF to avoid calling
2486          * ieee80211_stop_tx_ba_cb in iwl_mvm_check_ratid_empty.
2487          */
2488         spin_lock_bh(&mvmsta->lock);
2489         txq_id = tid_data->txq_id;
2490         IWL_DEBUG_TX_QUEUES(mvm, "Flush AGG: sta %d tid %d q %d state %d\n",
2491                             mvmsta->sta_id, tid, txq_id, tid_data->state);
2492         old_state = tid_data->state;
2493         tid_data->state = IWL_AGG_OFF;
2494         mvmsta->agg_tids &= ~BIT(tid);
2495         spin_unlock_bh(&mvmsta->lock);
2496
2497         spin_lock_bh(&mvm->queue_info_lock);
2498         /*
2499          * The TXQ is marked as reserved only if no traffic came through yet
2500          * This means no traffic has been sent on this TID (agg'd or not), so
2501          * we no longer have use for the queue. Since it hasn't even been
2502          * allocated through iwl_mvm_enable_txq, so we can just mark it back as
2503          * free.
2504          */
2505         if (mvm->queue_info[txq_id].status == IWL_MVM_QUEUE_RESERVED)
2506                 mvm->queue_info[txq_id].status = IWL_MVM_QUEUE_FREE;
2507         spin_unlock_bh(&mvm->queue_info_lock);
2508
2509         if (old_state >= IWL_AGG_ON) {
2510                 iwl_mvm_drain_sta(mvm, mvmsta, true);
2511                 if (iwl_mvm_flush_tx_path(mvm, BIT(txq_id), 0))
2512                         IWL_ERR(mvm, "Couldn't flush the AGG queue\n");
2513                 iwl_trans_wait_tx_queue_empty(mvm->trans,
2514                                               mvmsta->tfd_queue_msk);
2515                 iwl_mvm_drain_sta(mvm, mvmsta, false);
2516
2517                 iwl_mvm_sta_tx_agg(mvm, sta, tid, txq_id, false);
2518
2519                 if (!iwl_mvm_is_dqa_supported(mvm)) {
2520                         int mac_queue = vif->hw_queue[tid_to_mac80211_ac[tid]];
2521
2522                         iwl_mvm_disable_txq(mvm, tid_data->txq_id, mac_queue,
2523                                             tid, 0);
2524                 }
2525         }
2526
2527         return 0;
2528 }
2529
2530 static int iwl_mvm_set_fw_key_idx(struct iwl_mvm *mvm)
2531 {
2532         int i, max = -1, max_offs = -1;
2533
2534         lockdep_assert_held(&mvm->mutex);
2535
2536         /* Pick the unused key offset with the highest 'deleted'
2537          * counter. Every time a key is deleted, all the counters
2538          * are incremented and the one that was just deleted is
2539          * reset to zero. Thus, the highest counter is the one
2540          * that was deleted longest ago. Pick that one.
2541          */
2542         for (i = 0; i < STA_KEY_MAX_NUM; i++) {
2543                 if (test_bit(i, mvm->fw_key_table))
2544                         continue;
2545                 if (mvm->fw_key_deleted[i] > max) {
2546                         max = mvm->fw_key_deleted[i];
2547                         max_offs = i;
2548                 }
2549         }
2550
2551         if (max_offs < 0)
2552                 return STA_KEY_IDX_INVALID;
2553
2554         return max_offs;
2555 }
2556
2557 static struct iwl_mvm_sta *iwl_mvm_get_key_sta(struct iwl_mvm *mvm,
2558                                                struct ieee80211_vif *vif,
2559                                                struct ieee80211_sta *sta)
2560 {
2561         struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
2562
2563         if (sta)
2564                 return iwl_mvm_sta_from_mac80211(sta);
2565
2566         /*
2567          * The device expects GTKs for station interfaces to be
2568          * installed as GTKs for the AP station. If we have no
2569          * station ID, then use AP's station ID.
2570          */
2571         if (vif->type == NL80211_IFTYPE_STATION &&
2572             mvmvif->ap_sta_id != IWL_MVM_STATION_COUNT) {
2573                 u8 sta_id = mvmvif->ap_sta_id;
2574
2575                 sta = rcu_dereference_check(mvm->fw_id_to_mac_id[sta_id],
2576                                             lockdep_is_held(&mvm->mutex));
2577
2578                 /*
2579                  * It is possible that the 'sta' parameter is NULL,
2580                  * for example when a GTK is removed - the sta_id will then
2581                  * be the AP ID, and no station was passed by mac80211.
2582                  */
2583                 if (IS_ERR_OR_NULL(sta))
2584                         return NULL;
2585
2586                 return iwl_mvm_sta_from_mac80211(sta);
2587         }
2588
2589         return NULL;
2590 }
2591
2592 static int iwl_mvm_send_sta_key(struct iwl_mvm *mvm,
2593                                 struct iwl_mvm_sta *mvm_sta,
2594                                 struct ieee80211_key_conf *keyconf, bool mcast,
2595                                 u32 tkip_iv32, u16 *tkip_p1k, u32 cmd_flags,
2596                                 u8 key_offset)
2597 {
2598         struct iwl_mvm_add_sta_key_cmd cmd = {};
2599         __le16 key_flags;
2600         int ret;
2601         u32 status;
2602         u16 keyidx;
2603         int i;
2604         u8 sta_id = mvm_sta->sta_id;
2605
2606         keyidx = (keyconf->keyidx << STA_KEY_FLG_KEYID_POS) &
2607                  STA_KEY_FLG_KEYID_MSK;
2608         key_flags = cpu_to_le16(keyidx);
2609         key_flags |= cpu_to_le16(STA_KEY_FLG_WEP_KEY_MAP);
2610
2611         switch (keyconf->cipher) {
2612         case WLAN_CIPHER_SUITE_TKIP:
2613                 key_flags |= cpu_to_le16(STA_KEY_FLG_TKIP);
2614                 cmd.tkip_rx_tsc_byte2 = tkip_iv32;
2615                 for (i = 0; i < 5; i++)
2616                         cmd.tkip_rx_ttak[i] = cpu_to_le16(tkip_p1k[i]);
2617                 memcpy(cmd.key, keyconf->key, keyconf->keylen);
2618                 break;
2619         case WLAN_CIPHER_SUITE_CCMP:
2620                 key_flags |= cpu_to_le16(STA_KEY_FLG_CCM);
2621                 memcpy(cmd.key, keyconf->key, keyconf->keylen);
2622                 break;
2623         case WLAN_CIPHER_SUITE_WEP104:
2624                 key_flags |= cpu_to_le16(STA_KEY_FLG_WEP_13BYTES);
2625                 /* fall through */
2626         case WLAN_CIPHER_SUITE_WEP40:
2627                 key_flags |= cpu_to_le16(STA_KEY_FLG_WEP);
2628                 memcpy(cmd.key + 3, keyconf->key, keyconf->keylen);
2629                 break;
2630         case WLAN_CIPHER_SUITE_GCMP_256:
2631                 key_flags |= cpu_to_le16(STA_KEY_FLG_KEY_32BYTES);
2632                 /* fall through */
2633         case WLAN_CIPHER_SUITE_GCMP:
2634                 key_flags |= cpu_to_le16(STA_KEY_FLG_GCMP);
2635                 memcpy(cmd.key, keyconf->key, keyconf->keylen);
2636                 break;
2637         default:
2638                 key_flags |= cpu_to_le16(STA_KEY_FLG_EXT);
2639                 memcpy(cmd.key, keyconf->key, keyconf->keylen);
2640         }
2641
2642         if (mcast)
2643                 key_flags |= cpu_to_le16(STA_KEY_MULTICAST);
2644
2645         cmd.key_offset = key_offset;
2646         cmd.key_flags = key_flags;
2647         cmd.sta_id = sta_id;
2648
2649         status = ADD_STA_SUCCESS;
2650         if (cmd_flags & CMD_ASYNC)
2651                 ret =  iwl_mvm_send_cmd_pdu(mvm, ADD_STA_KEY, CMD_ASYNC,
2652                                             sizeof(cmd), &cmd);
2653         else
2654                 ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA_KEY, sizeof(cmd),
2655                                                   &cmd, &status);
2656
2657         switch (status) {
2658         case ADD_STA_SUCCESS:
2659                 IWL_DEBUG_WEP(mvm, "MODIFY_STA: set dynamic key passed\n");
2660                 break;
2661         default:
2662                 ret = -EIO;
2663                 IWL_ERR(mvm, "MODIFY_STA: set dynamic key failed\n");
2664                 break;
2665         }
2666
2667         return ret;
2668 }
2669
2670 static int iwl_mvm_send_sta_igtk(struct iwl_mvm *mvm,
2671                                  struct ieee80211_key_conf *keyconf,
2672                                  u8 sta_id, bool remove_key)
2673 {
2674         struct iwl_mvm_mgmt_mcast_key_cmd igtk_cmd = {};
2675
2676         /* verify the key details match the required command's expectations */
2677         if (WARN_ON((keyconf->flags & IEEE80211_KEY_FLAG_PAIRWISE) ||
2678                     (keyconf->keyidx != 4 && keyconf->keyidx != 5) ||
2679                     (keyconf->cipher != WLAN_CIPHER_SUITE_AES_CMAC &&
2680                      keyconf->cipher != WLAN_CIPHER_SUITE_BIP_GMAC_128 &&
2681                      keyconf->cipher != WLAN_CIPHER_SUITE_BIP_GMAC_256)))
2682                 return -EINVAL;
2683
2684         if (WARN_ON(!iwl_mvm_has_new_rx_api(mvm) &&
2685                     keyconf->cipher != WLAN_CIPHER_SUITE_AES_CMAC))
2686                 return -EINVAL;
2687
2688         igtk_cmd.key_id = cpu_to_le32(keyconf->keyidx);
2689         igtk_cmd.sta_id = cpu_to_le32(sta_id);
2690
2691         if (remove_key) {
2692                 igtk_cmd.ctrl_flags |= cpu_to_le32(STA_KEY_NOT_VALID);
2693         } else {
2694                 struct ieee80211_key_seq seq;
2695                 const u8 *pn;
2696
2697                 switch (keyconf->cipher) {
2698                 case WLAN_CIPHER_SUITE_AES_CMAC:
2699                         igtk_cmd.ctrl_flags |= cpu_to_le32(STA_KEY_FLG_CCM);
2700                         break;
2701                 case WLAN_CIPHER_SUITE_BIP_GMAC_128:
2702                 case WLAN_CIPHER_SUITE_BIP_GMAC_256:
2703                         igtk_cmd.ctrl_flags |= cpu_to_le32(STA_KEY_FLG_GCMP);
2704                         break;
2705                 default:
2706                         return -EINVAL;
2707                 }
2708
2709                 memcpy(igtk_cmd.igtk, keyconf->key, keyconf->keylen);
2710                 if (keyconf->cipher == WLAN_CIPHER_SUITE_BIP_GMAC_256)
2711                         igtk_cmd.ctrl_flags |=
2712                                 cpu_to_le32(STA_KEY_FLG_KEY_32BYTES);
2713                 ieee80211_get_key_rx_seq(keyconf, 0, &seq);
2714                 pn = seq.aes_cmac.pn;
2715                 igtk_cmd.receive_seq_cnt = cpu_to_le64(((u64) pn[5] << 0) |
2716                                                        ((u64) pn[4] << 8) |
2717                                                        ((u64) pn[3] << 16) |
2718                                                        ((u64) pn[2] << 24) |
2719                                                        ((u64) pn[1] << 32) |
2720                                                        ((u64) pn[0] << 40));
2721         }
2722
2723         IWL_DEBUG_INFO(mvm, "%s igtk for sta %u\n",
2724                        remove_key ? "removing" : "installing",
2725                        igtk_cmd.sta_id);
2726
2727         if (!iwl_mvm_has_new_rx_api(mvm)) {
2728                 struct iwl_mvm_mgmt_mcast_key_cmd_v1 igtk_cmd_v1 = {
2729                         .ctrl_flags = igtk_cmd.ctrl_flags,
2730                         .key_id = igtk_cmd.key_id,
2731                         .sta_id = igtk_cmd.sta_id,
2732                         .receive_seq_cnt = igtk_cmd.receive_seq_cnt
2733                 };
2734
2735                 memcpy(igtk_cmd_v1.igtk, igtk_cmd.igtk,
2736                        ARRAY_SIZE(igtk_cmd_v1.igtk));
2737                 return iwl_mvm_send_cmd_pdu(mvm, MGMT_MCAST_KEY, 0,
2738                                             sizeof(igtk_cmd_v1), &igtk_cmd_v1);
2739         }
2740         return iwl_mvm_send_cmd_pdu(mvm, MGMT_MCAST_KEY, 0,
2741                                     sizeof(igtk_cmd), &igtk_cmd);
2742 }
2743
2744
2745 static inline u8 *iwl_mvm_get_mac_addr(struct iwl_mvm *mvm,
2746                                        struct ieee80211_vif *vif,
2747                                        struct ieee80211_sta *sta)
2748 {
2749         struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
2750
2751         if (sta)
2752                 return sta->addr;
2753
2754         if (vif->type == NL80211_IFTYPE_STATION &&
2755             mvmvif->ap_sta_id != IWL_MVM_STATION_COUNT) {
2756                 u8 sta_id = mvmvif->ap_sta_id;
2757                 sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[sta_id],
2758                                                 lockdep_is_held(&mvm->mutex));
2759                 return sta->addr;
2760         }
2761
2762
2763         return NULL;
2764 }
2765
2766 static int __iwl_mvm_set_sta_key(struct iwl_mvm *mvm,
2767                                  struct ieee80211_vif *vif,
2768                                  struct ieee80211_sta *sta,
2769                                  struct ieee80211_key_conf *keyconf,
2770                                  u8 key_offset,
2771                                  bool mcast)
2772 {
2773         struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta);
2774         int ret;
2775         const u8 *addr;
2776         struct ieee80211_key_seq seq;
2777         u16 p1k[5];
2778
2779         switch (keyconf->cipher) {
2780         case WLAN_CIPHER_SUITE_TKIP:
2781                 addr = iwl_mvm_get_mac_addr(mvm, vif, sta);
2782                 /* get phase 1 key from mac80211 */
2783                 ieee80211_get_key_rx_seq(keyconf, 0, &seq);
2784                 ieee80211_get_tkip_rx_p1k(keyconf, addr, seq.tkip.iv32, p1k);
2785                 ret = iwl_mvm_send_sta_key(mvm, mvm_sta, keyconf, mcast,
2786                                            seq.tkip.iv32, p1k, 0, key_offset);
2787                 break;
2788         case WLAN_CIPHER_SUITE_CCMP:
2789         case WLAN_CIPHER_SUITE_WEP40:
2790         case WLAN_CIPHER_SUITE_WEP104:
2791         case WLAN_CIPHER_SUITE_GCMP:
2792         case WLAN_CIPHER_SUITE_GCMP_256:
2793                 ret = iwl_mvm_send_sta_key(mvm, mvm_sta, keyconf, mcast,
2794                                            0, NULL, 0, key_offset);
2795                 break;
2796         default:
2797                 ret = iwl_mvm_send_sta_key(mvm, mvm_sta, keyconf, mcast,
2798                                            0, NULL, 0, key_offset);
2799         }
2800
2801         return ret;
2802 }
2803
2804 static int __iwl_mvm_remove_sta_key(struct iwl_mvm *mvm, u8 sta_id,
2805                                     struct ieee80211_key_conf *keyconf,
2806                                     bool mcast)
2807 {
2808         struct iwl_mvm_add_sta_key_cmd cmd = {};
2809         __le16 key_flags;
2810         int ret;
2811         u32 status;
2812
2813         key_flags = cpu_to_le16((keyconf->keyidx << STA_KEY_FLG_KEYID_POS) &
2814                                  STA_KEY_FLG_KEYID_MSK);
2815         key_flags |= cpu_to_le16(STA_KEY_FLG_NO_ENC | STA_KEY_FLG_WEP_KEY_MAP);
2816         key_flags |= cpu_to_le16(STA_KEY_NOT_VALID);
2817
2818         if (mcast)
2819                 key_flags |= cpu_to_le16(STA_KEY_MULTICAST);
2820
2821         cmd.key_flags = key_flags;
2822         cmd.key_offset = keyconf->hw_key_idx;
2823         cmd.sta_id = sta_id;
2824
2825         status = ADD_STA_SUCCESS;
2826         ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA_KEY, sizeof(cmd),
2827                                           &cmd, &status);
2828
2829         switch (status) {
2830         case ADD_STA_SUCCESS:
2831                 IWL_DEBUG_WEP(mvm, "MODIFY_STA: remove sta key passed\n");
2832                 break;
2833         default:
2834                 ret = -EIO;
2835                 IWL_ERR(mvm, "MODIFY_STA: remove sta key failed\n");
2836                 break;
2837         }
2838
2839         return ret;
2840 }
2841
2842 int iwl_mvm_set_sta_key(struct iwl_mvm *mvm,
2843                         struct ieee80211_vif *vif,
2844                         struct ieee80211_sta *sta,
2845                         struct ieee80211_key_conf *keyconf,
2846                         u8 key_offset)
2847 {
2848         bool mcast = !(keyconf->flags & IEEE80211_KEY_FLAG_PAIRWISE);
2849         struct iwl_mvm_sta *mvm_sta;
2850         u8 sta_id;
2851         int ret;
2852         static const u8 __maybe_unused zero_addr[ETH_ALEN] = {0};
2853
2854         lockdep_assert_held(&mvm->mutex);
2855
2856         /* Get the station id from the mvm local station table */
2857         mvm_sta = iwl_mvm_get_key_sta(mvm, vif, sta);
2858         if (!mvm_sta) {
2859                 IWL_ERR(mvm, "Failed to find station\n");
2860                 return -EINVAL;
2861         }
2862         sta_id = mvm_sta->sta_id;
2863
2864         if (keyconf->cipher == WLAN_CIPHER_SUITE_AES_CMAC ||
2865             keyconf->cipher == WLAN_CIPHER_SUITE_BIP_GMAC_128 ||
2866             keyconf->cipher == WLAN_CIPHER_SUITE_BIP_GMAC_256) {
2867                 ret = iwl_mvm_send_sta_igtk(mvm, keyconf, sta_id, false);
2868                 goto end;
2869         }
2870
2871         /*
2872          * It is possible that the 'sta' parameter is NULL, and thus
2873          * there is a need to retrieve  the sta from the local station table.
2874          */
2875         if (!sta) {
2876                 sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[sta_id],
2877                                                 lockdep_is_held(&mvm->mutex));
2878                 if (IS_ERR_OR_NULL(sta)) {
2879                         IWL_ERR(mvm, "Invalid station id\n");
2880                         return -EINVAL;
2881                 }
2882         }
2883
2884         if (WARN_ON_ONCE(iwl_mvm_sta_from_mac80211(sta)->vif != vif))
2885                 return -EINVAL;
2886
2887         /* If the key_offset is not pre-assigned, we need to find a
2888          * new offset to use.  In normal cases, the offset is not
2889          * pre-assigned, but during HW_RESTART we want to reuse the
2890          * same indices, so we pass them when this function is called.
2891          *
2892          * In D3 entry, we need to hardcoded the indices (because the
2893          * firmware hardcodes the PTK offset to 0).  In this case, we
2894          * need to make sure we don't overwrite the hw_key_idx in the
2895          * keyconf structure, because otherwise we cannot configure
2896          * the original ones back when resuming.
2897          */
2898         if (key_offset == STA_KEY_IDX_INVALID) {
2899                 key_offset  = iwl_mvm_set_fw_key_idx(mvm);
2900                 if (key_offset == STA_KEY_IDX_INVALID)
2901                         return -ENOSPC;
2902                 keyconf->hw_key_idx = key_offset;
2903         }
2904
2905         ret = __iwl_mvm_set_sta_key(mvm, vif, sta, keyconf, key_offset, mcast);
2906         if (ret)
2907                 goto end;
2908
2909         /*
2910          * For WEP, the same key is used for multicast and unicast. Upload it
2911          * again, using the same key offset, and now pointing the other one
2912          * to the same key slot (offset).
2913          * If this fails, remove the original as well.
2914          */
2915         if (keyconf->cipher == WLAN_CIPHER_SUITE_WEP40 ||
2916             keyconf->cipher == WLAN_CIPHER_SUITE_WEP104) {
2917                 ret = __iwl_mvm_set_sta_key(mvm, vif, sta, keyconf,
2918                                             key_offset, !mcast);
2919                 if (ret) {
2920                         __iwl_mvm_remove_sta_key(mvm, sta_id, keyconf, mcast);
2921                         goto end;
2922                 }
2923         }
2924
2925         __set_bit(key_offset, mvm->fw_key_table);
2926
2927 end:
2928         IWL_DEBUG_WEP(mvm, "key: cipher=%x len=%d idx=%d sta=%pM ret=%d\n",
2929                       keyconf->cipher, keyconf->keylen, keyconf->keyidx,
2930                       sta ? sta->addr : zero_addr, ret);
2931         return ret;
2932 }
2933
2934 int iwl_mvm_remove_sta_key(struct iwl_mvm *mvm,
2935                            struct ieee80211_vif *vif,
2936                            struct ieee80211_sta *sta,
2937                            struct ieee80211_key_conf *keyconf)
2938 {
2939         bool mcast = !(keyconf->flags & IEEE80211_KEY_FLAG_PAIRWISE);
2940         struct iwl_mvm_sta *mvm_sta;
2941         u8 sta_id = IWL_MVM_STATION_COUNT;
2942         int ret, i;
2943
2944         lockdep_assert_held(&mvm->mutex);
2945
2946         /* Get the station from the mvm local station table */
2947         mvm_sta = iwl_mvm_get_key_sta(mvm, vif, sta);
2948
2949         IWL_DEBUG_WEP(mvm, "mvm remove dynamic key: idx=%d sta=%d\n",
2950                       keyconf->keyidx, sta_id);
2951
2952         if (keyconf->cipher == WLAN_CIPHER_SUITE_AES_CMAC ||
2953             keyconf->cipher == WLAN_CIPHER_SUITE_BIP_GMAC_128 ||
2954             keyconf->cipher == WLAN_CIPHER_SUITE_BIP_GMAC_256)
2955                 return iwl_mvm_send_sta_igtk(mvm, keyconf, sta_id, true);
2956
2957         if (!__test_and_clear_bit(keyconf->hw_key_idx, mvm->fw_key_table)) {
2958                 IWL_ERR(mvm, "offset %d not used in fw key table.\n",
2959                         keyconf->hw_key_idx);
2960                 return -ENOENT;
2961         }
2962
2963         /* track which key was deleted last */
2964         for (i = 0; i < STA_KEY_MAX_NUM; i++) {
2965                 if (mvm->fw_key_deleted[i] < U8_MAX)
2966                         mvm->fw_key_deleted[i]++;
2967         }
2968         mvm->fw_key_deleted[keyconf->hw_key_idx] = 0;
2969
2970         if (!mvm_sta) {
2971                 IWL_DEBUG_WEP(mvm, "station non-existent, early return.\n");
2972                 return 0;
2973         }
2974
2975         sta_id = mvm_sta->sta_id;
2976
2977         ret = __iwl_mvm_remove_sta_key(mvm, sta_id, keyconf, mcast);
2978         if (ret)
2979                 return ret;
2980
2981         /* delete WEP key twice to get rid of (now useless) offset */
2982         if (keyconf->cipher == WLAN_CIPHER_SUITE_WEP40 ||
2983             keyconf->cipher == WLAN_CIPHER_SUITE_WEP104)
2984                 ret = __iwl_mvm_remove_sta_key(mvm, sta_id, keyconf, !mcast);
2985
2986         return ret;
2987 }
2988
2989 void iwl_mvm_update_tkip_key(struct iwl_mvm *mvm,
2990                              struct ieee80211_vif *vif,
2991                              struct ieee80211_key_conf *keyconf,
2992                              struct ieee80211_sta *sta, u32 iv32,
2993                              u16 *phase1key)
2994 {
2995         struct iwl_mvm_sta *mvm_sta;
2996         bool mcast = !(keyconf->flags & IEEE80211_KEY_FLAG_PAIRWISE);
2997
2998         rcu_read_lock();
2999
3000         mvm_sta = iwl_mvm_get_key_sta(mvm, vif, sta);
3001         if (WARN_ON_ONCE(!mvm_sta))
3002                 goto unlock;
3003         iwl_mvm_send_sta_key(mvm, mvm_sta, keyconf, mcast,
3004                              iv32, phase1key, CMD_ASYNC, keyconf->hw_key_idx);
3005
3006  unlock:
3007         rcu_read_unlock();
3008 }
3009
3010 void iwl_mvm_sta_modify_ps_wake(struct iwl_mvm *mvm,
3011                                 struct ieee80211_sta *sta)
3012 {
3013         struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
3014         struct iwl_mvm_add_sta_cmd cmd = {
3015                 .add_modify = STA_MODE_MODIFY,
3016                 .sta_id = mvmsta->sta_id,
3017                 .station_flags_msk = cpu_to_le32(STA_FLG_PS),
3018                 .mac_id_n_color = cpu_to_le32(mvmsta->mac_id_n_color),
3019         };
3020         int ret;
3021
3022         ret = iwl_mvm_send_cmd_pdu(mvm, ADD_STA, CMD_ASYNC,
3023                                    iwl_mvm_add_sta_cmd_size(mvm), &cmd);
3024         if (ret)
3025                 IWL_ERR(mvm, "Failed to send ADD_STA command (%d)\n", ret);
3026 }
3027
3028 void iwl_mvm_sta_modify_sleep_tx_count(struct iwl_mvm *mvm,
3029                                        struct ieee80211_sta *sta,
3030                                        enum ieee80211_frame_release_type reason,
3031                                        u16 cnt, u16 tids, bool more_data,
3032                                        bool agg)
3033 {
3034         struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
3035         struct iwl_mvm_add_sta_cmd cmd = {
3036                 .add_modify = STA_MODE_MODIFY,
3037                 .sta_id = mvmsta->sta_id,
3038                 .modify_mask = STA_MODIFY_SLEEPING_STA_TX_COUNT,
3039                 .sleep_tx_count = cpu_to_le16(cnt),
3040                 .mac_id_n_color = cpu_to_le32(mvmsta->mac_id_n_color),
3041         };
3042         int tid, ret;
3043         unsigned long _tids = tids;
3044
3045         /* convert TIDs to ACs - we don't support TSPEC so that's OK
3046          * Note that this field is reserved and unused by firmware not
3047          * supporting GO uAPSD, so it's safe to always do this.
3048          */
3049         for_each_set_bit(tid, &_tids, IWL_MAX_TID_COUNT)
3050                 cmd.awake_acs |= BIT(tid_to_ucode_ac[tid]);
3051
3052         /* If we're releasing frames from aggregation queues then check if the
3053          * all queues combined that we're releasing frames from have
3054          *  - more frames than the service period, in which case more_data
3055          *    needs to be set
3056          *  - fewer than 'cnt' frames, in which case we need to adjust the
3057          *    firmware command (but do that unconditionally)
3058          */
3059         if (agg) {
3060                 int remaining = cnt;
3061                 int sleep_tx_count;
3062
3063                 spin_lock_bh(&mvmsta->lock);
3064                 for_each_set_bit(tid, &_tids, IWL_MAX_TID_COUNT) {
3065                         struct iwl_mvm_tid_data *tid_data;
3066                         u16 n_queued;
3067
3068                         tid_data = &mvmsta->tid_data[tid];
3069                         if (WARN(tid_data->state != IWL_AGG_ON &&
3070                                  tid_data->state != IWL_EMPTYING_HW_QUEUE_DELBA,
3071                                  "TID %d state is %d\n",
3072                                  tid, tid_data->state)) {
3073                                 spin_unlock_bh(&mvmsta->lock);
3074                                 ieee80211_sta_eosp(sta);
3075                                 return;
3076                         }
3077
3078                         n_queued = iwl_mvm_tid_queued(tid_data);
3079                         if (n_queued > remaining) {
3080                                 more_data = true;
3081                                 remaining = 0;
3082                                 break;
3083                         }
3084                         remaining -= n_queued;
3085                 }
3086                 sleep_tx_count = cnt - remaining;
3087                 if (reason == IEEE80211_FRAME_RELEASE_UAPSD)
3088                         mvmsta->sleep_tx_count = sleep_tx_count;
3089                 spin_unlock_bh(&mvmsta->lock);
3090
3091                 cmd.sleep_tx_count = cpu_to_le16(sleep_tx_count);
3092                 if (WARN_ON(cnt - remaining == 0)) {
3093                         ieee80211_sta_eosp(sta);
3094                         return;
3095                 }
3096         }
3097
3098         /* Note: this is ignored by firmware not supporting GO uAPSD */
3099         if (more_data)
3100                 cmd.sleep_state_flags |= cpu_to_le16(STA_SLEEP_STATE_MOREDATA);
3101
3102         if (reason == IEEE80211_FRAME_RELEASE_PSPOLL) {
3103                 mvmsta->next_status_eosp = true;
3104                 cmd.sleep_state_flags |= cpu_to_le16(STA_SLEEP_STATE_PS_POLL);
3105         } else {
3106                 cmd.sleep_state_flags |= cpu_to_le16(STA_SLEEP_STATE_UAPSD);
3107         }
3108
3109         /* block the Tx queues until the FW updated the sleep Tx count */
3110         iwl_trans_block_txq_ptrs(mvm->trans, true);
3111
3112         ret = iwl_mvm_send_cmd_pdu(mvm, ADD_STA,
3113                                    CMD_ASYNC | CMD_WANT_ASYNC_CALLBACK,
3114                                    iwl_mvm_add_sta_cmd_size(mvm), &cmd);
3115         if (ret)
3116                 IWL_ERR(mvm, "Failed to send ADD_STA command (%d)\n", ret);
3117 }
3118
3119 void iwl_mvm_rx_eosp_notif(struct iwl_mvm *mvm,
3120                            struct iwl_rx_cmd_buffer *rxb)
3121 {
3122         struct iwl_rx_packet *pkt = rxb_addr(rxb);
3123         struct iwl_mvm_eosp_notification *notif = (void *)pkt->data;
3124         struct ieee80211_sta *sta;
3125         u32 sta_id = le32_to_cpu(notif->sta_id);
3126
3127         if (WARN_ON_ONCE(sta_id >= IWL_MVM_STATION_COUNT))
3128                 return;
3129
3130         rcu_read_lock();
3131         sta = rcu_dereference(mvm->fw_id_to_mac_id[sta_id]);
3132         if (!IS_ERR_OR_NULL(sta))
3133                 ieee80211_sta_eosp(sta);
3134         rcu_read_unlock();
3135 }
3136
3137 void iwl_mvm_sta_modify_disable_tx(struct iwl_mvm *mvm,
3138                                    struct iwl_mvm_sta *mvmsta, bool disable)
3139 {
3140         struct iwl_mvm_add_sta_cmd cmd = {
3141                 .add_modify = STA_MODE_MODIFY,
3142                 .sta_id = mvmsta->sta_id,
3143                 .station_flags = disable ? cpu_to_le32(STA_FLG_DISABLE_TX) : 0,
3144                 .station_flags_msk = cpu_to_le32(STA_FLG_DISABLE_TX),
3145                 .mac_id_n_color = cpu_to_le32(mvmsta->mac_id_n_color),
3146         };
3147         int ret;
3148
3149         ret = iwl_mvm_send_cmd_pdu(mvm, ADD_STA, CMD_ASYNC,
3150                                    iwl_mvm_add_sta_cmd_size(mvm), &cmd);
3151         if (ret)
3152                 IWL_ERR(mvm, "Failed to send ADD_STA command (%d)\n", ret);
3153 }
3154
3155 void iwl_mvm_sta_modify_disable_tx_ap(struct iwl_mvm *mvm,
3156                                       struct ieee80211_sta *sta,
3157                                       bool disable)
3158 {
3159         struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta);
3160
3161         spin_lock_bh(&mvm_sta->lock);
3162
3163         if (mvm_sta->disable_tx == disable) {
3164                 spin_unlock_bh(&mvm_sta->lock);
3165                 return;
3166         }
3167
3168         mvm_sta->disable_tx = disable;
3169
3170         /*
3171          * Tell mac80211 to start/stop queuing tx for this station,
3172          * but don't stop queuing if there are still pending frames
3173          * for this station.
3174          */
3175         if (disable || !atomic_read(&mvm->pending_frames[mvm_sta->sta_id]))
3176                 ieee80211_sta_block_awake(mvm->hw, sta, disable);
3177
3178         iwl_mvm_sta_modify_disable_tx(mvm, mvm_sta, disable);
3179
3180         spin_unlock_bh(&mvm_sta->lock);
3181 }
3182
3183 void iwl_mvm_modify_all_sta_disable_tx(struct iwl_mvm *mvm,
3184                                        struct iwl_mvm_vif *mvmvif,
3185                                        bool disable)
3186 {
3187         struct ieee80211_sta *sta;
3188         struct iwl_mvm_sta *mvm_sta;
3189         int i;
3190
3191         lockdep_assert_held(&mvm->mutex);
3192
3193         /* Block/unblock all the stations of the given mvmvif */
3194         for (i = 0; i < IWL_MVM_STATION_COUNT; i++) {
3195                 sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[i],
3196                                                 lockdep_is_held(&mvm->mutex));
3197                 if (IS_ERR_OR_NULL(sta))
3198                         continue;
3199
3200                 mvm_sta = iwl_mvm_sta_from_mac80211(sta);
3201                 if (mvm_sta->mac_id_n_color !=
3202                     FW_CMD_ID_AND_COLOR(mvmvif->id, mvmvif->color))
3203                         continue;
3204
3205                 iwl_mvm_sta_modify_disable_tx_ap(mvm, sta, disable);
3206         }
3207 }
3208
3209 void iwl_mvm_csa_client_absent(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
3210 {
3211         struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
3212         struct iwl_mvm_sta *mvmsta;
3213
3214         rcu_read_lock();
3215
3216         mvmsta = iwl_mvm_sta_from_staid_rcu(mvm, mvmvif->ap_sta_id);
3217
3218         if (!WARN_ON(!mvmsta))
3219                 iwl_mvm_sta_modify_disable_tx(mvm, mvmsta, true);
3220
3221         rcu_read_unlock();
3222 }