3 * This file is part of wlcore
5 * Copyright (C) 2008-2010 Nokia Corporation
6 * Copyright (C) 2011-2013 Texas Instruments Inc.
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License
10 * version 2 as published by the Free Software Foundation.
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
24 #include <linux/module.h>
25 #include <linux/firmware.h>
26 #include <linux/etherdevice.h>
27 #include <linux/vmalloc.h>
28 #include <linux/wl12xx.h>
29 #include <linux/interrupt.h>
33 #include "wl12xx_80211.h"
44 #define WL1271_BOOT_RETRIES 3
46 static char *fwlog_param;
47 static int fwlog_mem_blocks = -1;
48 static int bug_on_recovery = -1;
49 static int no_recovery = -1;
51 static void __wl1271_op_remove_interface(struct wl1271 *wl,
52 struct ieee80211_vif *vif,
53 bool reset_tx_queues);
54 static void wlcore_op_stop_locked(struct wl1271 *wl);
55 static void wl1271_free_ap_keys(struct wl1271 *wl, struct wl12xx_vif *wlvif);
57 static int wl12xx_set_authorized(struct wl1271 *wl, struct wl12xx_vif *wlvif)
61 if (WARN_ON(wlvif->bss_type != BSS_TYPE_STA_BSS))
64 if (!test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags))
67 if (test_and_set_bit(WLVIF_FLAG_STA_STATE_SENT, &wlvif->flags))
70 ret = wl12xx_cmd_set_peer_state(wl, wlvif, wlvif->sta.hlid);
74 wl1271_info("Association completed.");
78 static void wl1271_reg_notify(struct wiphy *wiphy,
79 struct regulatory_request *request)
81 struct ieee80211_supported_band *band;
82 struct ieee80211_channel *ch;
84 struct ieee80211_hw *hw = wiphy_to_ieee80211_hw(wiphy);
85 struct wl1271 *wl = hw->priv;
87 band = wiphy->bands[IEEE80211_BAND_5GHZ];
88 for (i = 0; i < band->n_channels; i++) {
89 ch = &band->channels[i];
90 if (ch->flags & IEEE80211_CHAN_DISABLED)
93 if (ch->flags & IEEE80211_CHAN_RADAR)
94 ch->flags |= IEEE80211_CHAN_NO_IR;
98 wlcore_regdomain_config(wl);
101 static int wl1271_set_rx_streaming(struct wl1271 *wl, struct wl12xx_vif *wlvif,
106 /* we should hold wl->mutex */
107 ret = wl1271_acx_ps_rx_streaming(wl, wlvif, enable);
112 set_bit(WLVIF_FLAG_RX_STREAMING_STARTED, &wlvif->flags);
114 clear_bit(WLVIF_FLAG_RX_STREAMING_STARTED, &wlvif->flags);
120 * this function is being called when the rx_streaming interval
121 * has beed changed or rx_streaming should be disabled
123 int wl1271_recalc_rx_streaming(struct wl1271 *wl, struct wl12xx_vif *wlvif)
126 int period = wl->conf.rx_streaming.interval;
128 /* don't reconfigure if rx_streaming is disabled */
129 if (!test_bit(WLVIF_FLAG_RX_STREAMING_STARTED, &wlvif->flags))
132 /* reconfigure/disable according to new streaming_period */
134 test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags) &&
135 (wl->conf.rx_streaming.always ||
136 test_bit(WL1271_FLAG_SOFT_GEMINI, &wl->flags)))
137 ret = wl1271_set_rx_streaming(wl, wlvif, true);
139 ret = wl1271_set_rx_streaming(wl, wlvif, false);
140 /* don't cancel_work_sync since we might deadlock */
141 del_timer_sync(&wlvif->rx_streaming_timer);
147 static void wl1271_rx_streaming_enable_work(struct work_struct *work)
150 struct wl12xx_vif *wlvif = container_of(work, struct wl12xx_vif,
151 rx_streaming_enable_work);
152 struct wl1271 *wl = wlvif->wl;
154 mutex_lock(&wl->mutex);
156 if (test_bit(WLVIF_FLAG_RX_STREAMING_STARTED, &wlvif->flags) ||
157 !test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags) ||
158 (!wl->conf.rx_streaming.always &&
159 !test_bit(WL1271_FLAG_SOFT_GEMINI, &wl->flags)))
162 if (!wl->conf.rx_streaming.interval)
165 ret = wl1271_ps_elp_wakeup(wl);
169 ret = wl1271_set_rx_streaming(wl, wlvif, true);
173 /* stop it after some time of inactivity */
174 mod_timer(&wlvif->rx_streaming_timer,
175 jiffies + msecs_to_jiffies(wl->conf.rx_streaming.duration));
178 wl1271_ps_elp_sleep(wl);
180 mutex_unlock(&wl->mutex);
183 static void wl1271_rx_streaming_disable_work(struct work_struct *work)
186 struct wl12xx_vif *wlvif = container_of(work, struct wl12xx_vif,
187 rx_streaming_disable_work);
188 struct wl1271 *wl = wlvif->wl;
190 mutex_lock(&wl->mutex);
192 if (!test_bit(WLVIF_FLAG_RX_STREAMING_STARTED, &wlvif->flags))
195 ret = wl1271_ps_elp_wakeup(wl);
199 ret = wl1271_set_rx_streaming(wl, wlvif, false);
204 wl1271_ps_elp_sleep(wl);
206 mutex_unlock(&wl->mutex);
209 static void wl1271_rx_streaming_timer(unsigned long data)
211 struct wl12xx_vif *wlvif = (struct wl12xx_vif *)data;
212 struct wl1271 *wl = wlvif->wl;
213 ieee80211_queue_work(wl->hw, &wlvif->rx_streaming_disable_work);
216 /* wl->mutex must be taken */
217 void wl12xx_rearm_tx_watchdog_locked(struct wl1271 *wl)
219 /* if the watchdog is not armed, don't do anything */
220 if (wl->tx_allocated_blocks == 0)
223 cancel_delayed_work(&wl->tx_watchdog_work);
224 ieee80211_queue_delayed_work(wl->hw, &wl->tx_watchdog_work,
225 msecs_to_jiffies(wl->conf.tx.tx_watchdog_timeout));
228 static void wl12xx_tx_watchdog_work(struct work_struct *work)
230 struct delayed_work *dwork;
233 dwork = container_of(work, struct delayed_work, work);
234 wl = container_of(dwork, struct wl1271, tx_watchdog_work);
236 mutex_lock(&wl->mutex);
238 if (unlikely(wl->state != WLCORE_STATE_ON))
241 /* Tx went out in the meantime - everything is ok */
242 if (unlikely(wl->tx_allocated_blocks == 0))
246 * if a ROC is in progress, we might not have any Tx for a long
247 * time (e.g. pending Tx on the non-ROC channels)
249 if (find_first_bit(wl->roc_map, WL12XX_MAX_ROLES) < WL12XX_MAX_ROLES) {
250 wl1271_debug(DEBUG_TX, "No Tx (in FW) for %d ms due to ROC",
251 wl->conf.tx.tx_watchdog_timeout);
252 wl12xx_rearm_tx_watchdog_locked(wl);
257 * if a scan is in progress, we might not have any Tx for a long
260 if (wl->scan.state != WL1271_SCAN_STATE_IDLE) {
261 wl1271_debug(DEBUG_TX, "No Tx (in FW) for %d ms due to scan",
262 wl->conf.tx.tx_watchdog_timeout);
263 wl12xx_rearm_tx_watchdog_locked(wl);
268 * AP might cache a frame for a long time for a sleeping station,
269 * so rearm the timer if there's an AP interface with stations. If
270 * Tx is genuinely stuck we will most hopefully discover it when all
271 * stations are removed due to inactivity.
273 if (wl->active_sta_count) {
274 wl1271_debug(DEBUG_TX, "No Tx (in FW) for %d ms. AP has "
276 wl->conf.tx.tx_watchdog_timeout,
277 wl->active_sta_count);
278 wl12xx_rearm_tx_watchdog_locked(wl);
282 wl1271_error("Tx stuck (in FW) for %d ms. Starting recovery",
283 wl->conf.tx.tx_watchdog_timeout);
284 wl12xx_queue_recovery_work(wl);
287 mutex_unlock(&wl->mutex);
290 static void wlcore_adjust_conf(struct wl1271 *wl)
292 /* Adjust settings according to optional module parameters */
294 /* Firmware Logger params */
295 if (fwlog_mem_blocks != -1) {
296 if (fwlog_mem_blocks >= CONF_FWLOG_MIN_MEM_BLOCKS &&
297 fwlog_mem_blocks <= CONF_FWLOG_MAX_MEM_BLOCKS) {
298 wl->conf.fwlog.mem_blocks = fwlog_mem_blocks;
301 "Illegal fwlog_mem_blocks=%d using default %d",
302 fwlog_mem_blocks, wl->conf.fwlog.mem_blocks);
307 if (!strcmp(fwlog_param, "continuous")) {
308 wl->conf.fwlog.mode = WL12XX_FWLOG_CONTINUOUS;
309 } else if (!strcmp(fwlog_param, "ondemand")) {
310 wl->conf.fwlog.mode = WL12XX_FWLOG_ON_DEMAND;
311 } else if (!strcmp(fwlog_param, "dbgpins")) {
312 wl->conf.fwlog.mode = WL12XX_FWLOG_CONTINUOUS;
313 wl->conf.fwlog.output = WL12XX_FWLOG_OUTPUT_DBG_PINS;
314 } else if (!strcmp(fwlog_param, "disable")) {
315 wl->conf.fwlog.mem_blocks = 0;
316 wl->conf.fwlog.output = WL12XX_FWLOG_OUTPUT_NONE;
318 wl1271_error("Unknown fwlog parameter %s", fwlog_param);
322 if (bug_on_recovery != -1)
323 wl->conf.recovery.bug_on_recovery = (u8) bug_on_recovery;
325 if (no_recovery != -1)
326 wl->conf.recovery.no_recovery = (u8) no_recovery;
329 static void wl12xx_irq_ps_regulate_link(struct wl1271 *wl,
330 struct wl12xx_vif *wlvif,
335 fw_ps = test_bit(hlid, (unsigned long *)&wl->ap_fw_ps_map);
338 * Wake up from high level PS if the STA is asleep with too little
339 * packets in FW or if the STA is awake.
341 if (!fw_ps || tx_pkts < WL1271_PS_STA_MAX_PACKETS)
342 wl12xx_ps_link_end(wl, wlvif, hlid);
345 * Start high-level PS if the STA is asleep with enough blocks in FW.
346 * Make an exception if this is the only connected link. In this
347 * case FW-memory congestion is less of a problem.
348 * Note that a single connected STA means 2*ap_count + 1 active links,
349 * since we must account for the global and broadcast AP links
350 * for each AP. The "fw_ps" check assures us the other link is a STA
351 * connected to the AP. Otherwise the FW would not set the PSM bit.
353 else if (wl->active_link_count > (wl->ap_count*2 + 1) && fw_ps &&
354 tx_pkts >= WL1271_PS_STA_MAX_PACKETS)
355 wl12xx_ps_link_start(wl, wlvif, hlid, true);
358 static void wl12xx_irq_update_links_status(struct wl1271 *wl,
359 struct wl12xx_vif *wlvif,
360 struct wl_fw_status *status)
365 cur_fw_ps_map = status->link_ps_bitmap;
366 if (wl->ap_fw_ps_map != cur_fw_ps_map) {
367 wl1271_debug(DEBUG_PSM,
368 "link ps prev 0x%x cur 0x%x changed 0x%x",
369 wl->ap_fw_ps_map, cur_fw_ps_map,
370 wl->ap_fw_ps_map ^ cur_fw_ps_map);
372 wl->ap_fw_ps_map = cur_fw_ps_map;
375 for_each_set_bit(hlid, wlvif->ap.sta_hlid_map, wl->num_links)
376 wl12xx_irq_ps_regulate_link(wl, wlvif, hlid,
377 wl->links[hlid].allocated_pkts);
380 static int wlcore_fw_status(struct wl1271 *wl, struct wl_fw_status *status)
382 struct wl12xx_vif *wlvif;
384 u32 old_tx_blk_count = wl->tx_blocks_available;
385 int avail, freed_blocks;
388 struct wl1271_link *lnk;
390 ret = wlcore_raw_read_data(wl, REG_RAW_FW_STATUS_ADDR,
392 wl->fw_status_len, false);
396 wlcore_hw_convert_fw_status(wl, wl->raw_fw_status, wl->fw_status);
398 wl1271_debug(DEBUG_IRQ, "intr: 0x%x (fw_rx_counter = %d, "
399 "drv_rx_counter = %d, tx_results_counter = %d)",
401 status->fw_rx_counter,
402 status->drv_rx_counter,
403 status->tx_results_counter);
405 for (i = 0; i < NUM_TX_QUEUES; i++) {
406 /* prevent wrap-around in freed-packets counter */
407 wl->tx_allocated_pkts[i] -=
408 (status->counters.tx_released_pkts[i] -
409 wl->tx_pkts_freed[i]) & 0xff;
411 wl->tx_pkts_freed[i] = status->counters.tx_released_pkts[i];
415 for_each_set_bit(i, wl->links_map, wl->num_links) {
419 /* prevent wrap-around in freed-packets counter */
420 diff = (status->counters.tx_lnk_free_pkts[i] -
421 lnk->prev_freed_pkts) & 0xff;
426 lnk->allocated_pkts -= diff;
427 lnk->prev_freed_pkts = status->counters.tx_lnk_free_pkts[i];
429 /* accumulate the prev_freed_pkts counter */
430 lnk->total_freed_pkts += diff;
433 /* prevent wrap-around in total blocks counter */
434 if (likely(wl->tx_blocks_freed <= status->total_released_blks))
435 freed_blocks = status->total_released_blks -
438 freed_blocks = 0x100000000LL - wl->tx_blocks_freed +
439 status->total_released_blks;
441 wl->tx_blocks_freed = status->total_released_blks;
443 wl->tx_allocated_blocks -= freed_blocks;
446 * If the FW freed some blocks:
447 * If we still have allocated blocks - re-arm the timer, Tx is
448 * not stuck. Otherwise, cancel the timer (no Tx currently).
451 if (wl->tx_allocated_blocks)
452 wl12xx_rearm_tx_watchdog_locked(wl);
454 cancel_delayed_work(&wl->tx_watchdog_work);
457 avail = status->tx_total - wl->tx_allocated_blocks;
460 * The FW might change the total number of TX memblocks before
461 * we get a notification about blocks being released. Thus, the
462 * available blocks calculation might yield a temporary result
463 * which is lower than the actual available blocks. Keeping in
464 * mind that only blocks that were allocated can be moved from
465 * TX to RX, tx_blocks_available should never decrease here.
467 wl->tx_blocks_available = max((int)wl->tx_blocks_available,
470 /* if more blocks are available now, tx work can be scheduled */
471 if (wl->tx_blocks_available > old_tx_blk_count)
472 clear_bit(WL1271_FLAG_FW_TX_BUSY, &wl->flags);
474 /* for AP update num of allocated TX blocks per link and ps status */
475 wl12xx_for_each_wlvif_ap(wl, wlvif) {
476 wl12xx_irq_update_links_status(wl, wlvif, status);
479 /* update the host-chipset time offset */
481 wl->time_offset = (timespec_to_ns(&ts) >> 10) -
482 (s64)(status->fw_localtime);
484 wl->fw_fast_lnk_map = status->link_fast_bitmap;
489 static void wl1271_flush_deferred_work(struct wl1271 *wl)
493 /* Pass all received frames to the network stack */
494 while ((skb = skb_dequeue(&wl->deferred_rx_queue)))
495 ieee80211_rx_ni(wl->hw, skb);
497 /* Return sent skbs to the network stack */
498 while ((skb = skb_dequeue(&wl->deferred_tx_queue)))
499 ieee80211_tx_status_ni(wl->hw, skb);
502 static void wl1271_netstack_work(struct work_struct *work)
505 container_of(work, struct wl1271, netstack_work);
508 wl1271_flush_deferred_work(wl);
509 } while (skb_queue_len(&wl->deferred_rx_queue));
512 #define WL1271_IRQ_MAX_LOOPS 256
514 static int wlcore_irq_locked(struct wl1271 *wl)
518 int loopcount = WL1271_IRQ_MAX_LOOPS;
520 unsigned int defer_count;
524 * In case edge triggered interrupt must be used, we cannot iterate
525 * more than once without introducing race conditions with the hardirq.
527 if (wl->platform_quirks & WL12XX_PLATFORM_QUIRK_EDGE_IRQ)
530 wl1271_debug(DEBUG_IRQ, "IRQ work");
532 if (unlikely(wl->state != WLCORE_STATE_ON))
535 ret = wl1271_ps_elp_wakeup(wl);
539 while (!done && loopcount--) {
541 * In order to avoid a race with the hardirq, clear the flag
542 * before acknowledging the chip. Since the mutex is held,
543 * wl1271_ps_elp_wakeup cannot be called concurrently.
545 clear_bit(WL1271_FLAG_IRQ_RUNNING, &wl->flags);
546 smp_mb__after_atomic();
548 ret = wlcore_fw_status(wl, wl->fw_status);
552 wlcore_hw_tx_immediate_compl(wl);
554 intr = wl->fw_status->intr;
555 intr &= WLCORE_ALL_INTR_MASK;
561 if (unlikely(intr & WL1271_ACX_INTR_WATCHDOG)) {
562 wl1271_error("HW watchdog interrupt received! starting recovery.");
563 wl->watchdog_recovery = true;
566 /* restarting the chip. ignore any other interrupt. */
570 if (unlikely(intr & WL1271_ACX_SW_INTR_WATCHDOG)) {
571 wl1271_error("SW watchdog interrupt received! "
572 "starting recovery.");
573 wl->watchdog_recovery = true;
576 /* restarting the chip. ignore any other interrupt. */
580 if (likely(intr & WL1271_ACX_INTR_DATA)) {
581 wl1271_debug(DEBUG_IRQ, "WL1271_ACX_INTR_DATA");
583 ret = wlcore_rx(wl, wl->fw_status);
587 /* Check if any tx blocks were freed */
588 spin_lock_irqsave(&wl->wl_lock, flags);
589 if (!test_bit(WL1271_FLAG_FW_TX_BUSY, &wl->flags) &&
590 wl1271_tx_total_queue_count(wl) > 0) {
591 spin_unlock_irqrestore(&wl->wl_lock, flags);
593 * In order to avoid starvation of the TX path,
594 * call the work function directly.
596 ret = wlcore_tx_work_locked(wl);
600 spin_unlock_irqrestore(&wl->wl_lock, flags);
603 /* check for tx results */
604 ret = wlcore_hw_tx_delayed_compl(wl);
608 /* Make sure the deferred queues don't get too long */
609 defer_count = skb_queue_len(&wl->deferred_tx_queue) +
610 skb_queue_len(&wl->deferred_rx_queue);
611 if (defer_count > WL1271_DEFERRED_QUEUE_LIMIT)
612 wl1271_flush_deferred_work(wl);
615 if (intr & WL1271_ACX_INTR_EVENT_A) {
616 wl1271_debug(DEBUG_IRQ, "WL1271_ACX_INTR_EVENT_A");
617 ret = wl1271_event_handle(wl, 0);
622 if (intr & WL1271_ACX_INTR_EVENT_B) {
623 wl1271_debug(DEBUG_IRQ, "WL1271_ACX_INTR_EVENT_B");
624 ret = wl1271_event_handle(wl, 1);
629 if (intr & WL1271_ACX_INTR_INIT_COMPLETE)
630 wl1271_debug(DEBUG_IRQ,
631 "WL1271_ACX_INTR_INIT_COMPLETE");
633 if (intr & WL1271_ACX_INTR_HW_AVAILABLE)
634 wl1271_debug(DEBUG_IRQ, "WL1271_ACX_INTR_HW_AVAILABLE");
637 wl1271_ps_elp_sleep(wl);
643 static irqreturn_t wlcore_irq(int irq, void *cookie)
647 struct wl1271 *wl = cookie;
649 /* complete the ELP completion */
650 spin_lock_irqsave(&wl->wl_lock, flags);
651 set_bit(WL1271_FLAG_IRQ_RUNNING, &wl->flags);
653 complete(wl->elp_compl);
654 wl->elp_compl = NULL;
657 if (test_bit(WL1271_FLAG_SUSPENDED, &wl->flags)) {
658 /* don't enqueue a work right now. mark it as pending */
659 set_bit(WL1271_FLAG_PENDING_WORK, &wl->flags);
660 wl1271_debug(DEBUG_IRQ, "should not enqueue work");
661 disable_irq_nosync(wl->irq);
662 pm_wakeup_event(wl->dev, 0);
663 spin_unlock_irqrestore(&wl->wl_lock, flags);
666 spin_unlock_irqrestore(&wl->wl_lock, flags);
668 /* TX might be handled here, avoid redundant work */
669 set_bit(WL1271_FLAG_TX_PENDING, &wl->flags);
670 cancel_work_sync(&wl->tx_work);
672 mutex_lock(&wl->mutex);
674 ret = wlcore_irq_locked(wl);
676 wl12xx_queue_recovery_work(wl);
678 spin_lock_irqsave(&wl->wl_lock, flags);
679 /* In case TX was not handled here, queue TX work */
680 clear_bit(WL1271_FLAG_TX_PENDING, &wl->flags);
681 if (!test_bit(WL1271_FLAG_FW_TX_BUSY, &wl->flags) &&
682 wl1271_tx_total_queue_count(wl) > 0)
683 ieee80211_queue_work(wl->hw, &wl->tx_work);
684 spin_unlock_irqrestore(&wl->wl_lock, flags);
686 mutex_unlock(&wl->mutex);
691 struct vif_counter_data {
694 struct ieee80211_vif *cur_vif;
695 bool cur_vif_running;
698 static void wl12xx_vif_count_iter(void *data, u8 *mac,
699 struct ieee80211_vif *vif)
701 struct vif_counter_data *counter = data;
704 if (counter->cur_vif == vif)
705 counter->cur_vif_running = true;
708 /* caller must not hold wl->mutex, as it might deadlock */
709 static void wl12xx_get_vif_count(struct ieee80211_hw *hw,
710 struct ieee80211_vif *cur_vif,
711 struct vif_counter_data *data)
713 memset(data, 0, sizeof(*data));
714 data->cur_vif = cur_vif;
716 ieee80211_iterate_active_interfaces(hw, IEEE80211_IFACE_ITER_RESUME_ALL,
717 wl12xx_vif_count_iter, data);
720 static int wl12xx_fetch_firmware(struct wl1271 *wl, bool plt)
722 const struct firmware *fw;
724 enum wl12xx_fw_type fw_type;
728 fw_type = WL12XX_FW_TYPE_PLT;
729 fw_name = wl->plt_fw_name;
732 * we can't call wl12xx_get_vif_count() here because
733 * wl->mutex is taken, so use the cached last_vif_count value
735 if (wl->last_vif_count > 1 && wl->mr_fw_name) {
736 fw_type = WL12XX_FW_TYPE_MULTI;
737 fw_name = wl->mr_fw_name;
739 fw_type = WL12XX_FW_TYPE_NORMAL;
740 fw_name = wl->sr_fw_name;
744 if (wl->fw_type == fw_type)
747 wl1271_debug(DEBUG_BOOT, "booting firmware %s", fw_name);
749 ret = request_firmware(&fw, fw_name, wl->dev);
752 wl1271_error("could not get firmware %s: %d", fw_name, ret);
757 wl1271_error("firmware size is not multiple of 32 bits: %zu",
764 wl->fw_type = WL12XX_FW_TYPE_NONE;
765 wl->fw_len = fw->size;
766 wl->fw = vmalloc(wl->fw_len);
769 wl1271_error("could not allocate memory for the firmware");
774 memcpy(wl->fw, fw->data, wl->fw_len);
776 wl->fw_type = fw_type;
778 release_firmware(fw);
783 void wl12xx_queue_recovery_work(struct wl1271 *wl)
785 /* Avoid a recursive recovery */
786 if (wl->state == WLCORE_STATE_ON) {
787 WARN_ON(!test_bit(WL1271_FLAG_INTENDED_FW_RECOVERY,
790 wl->state = WLCORE_STATE_RESTARTING;
791 set_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags);
792 wl1271_ps_elp_wakeup(wl);
793 wlcore_disable_interrupts_nosync(wl);
794 ieee80211_queue_work(wl->hw, &wl->recovery_work);
798 size_t wl12xx_copy_fwlog(struct wl1271 *wl, u8 *memblock, size_t maxlen)
802 /* Make sure we have enough room */
803 len = min_t(size_t, maxlen, PAGE_SIZE - wl->fwlog_size);
805 /* Fill the FW log file, consumed by the sysfs fwlog entry */
806 memcpy(wl->fwlog + wl->fwlog_size, memblock, len);
807 wl->fwlog_size += len;
812 static void wl12xx_read_fwlog_panic(struct wl1271 *wl)
814 struct wlcore_partition_set part, old_part;
821 if ((wl->quirks & WLCORE_QUIRK_FWLOG_NOT_IMPLEMENTED) ||
822 (wl->conf.fwlog.mem_blocks == 0))
825 wl1271_info("Reading FW panic log");
827 block = kmalloc(wl->fw_mem_block_size, GFP_KERNEL);
832 * Make sure the chip is awake and the logger isn't active.
833 * Do not send a stop fwlog command if the fw is hanged or if
834 * dbgpins are used (due to some fw bug).
836 if (wl1271_ps_elp_wakeup(wl))
838 if (!wl->watchdog_recovery &&
839 wl->conf.fwlog.output != WL12XX_FWLOG_OUTPUT_DBG_PINS)
840 wl12xx_cmd_stop_fwlog(wl);
842 /* Read the first memory block address */
843 ret = wlcore_fw_status(wl, wl->fw_status);
847 addr = wl->fw_status->log_start_addr;
851 if (wl->conf.fwlog.mode == WL12XX_FWLOG_CONTINUOUS) {
852 offset = sizeof(addr) + sizeof(struct wl1271_rx_descriptor);
853 end_of_log = wl->fwlog_end;
855 offset = sizeof(addr);
859 old_part = wl->curr_part;
860 memset(&part, 0, sizeof(part));
862 /* Traverse the memory blocks linked list */
864 part.mem.start = wlcore_hw_convert_hwaddr(wl, addr);
865 part.mem.size = PAGE_SIZE;
867 ret = wlcore_set_partition(wl, &part);
869 wl1271_error("%s: set_partition start=0x%X size=%d",
870 __func__, part.mem.start, part.mem.size);
874 memset(block, 0, wl->fw_mem_block_size);
875 ret = wlcore_read_hwaddr(wl, addr, block,
876 wl->fw_mem_block_size, false);
882 * Memory blocks are linked to one another. The first 4 bytes
883 * of each memory block hold the hardware address of the next
884 * one. The last memory block points to the first one in
885 * on demand mode and is equal to 0x2000000 in continuous mode.
887 addr = le32_to_cpup((__le32 *)block);
889 if (!wl12xx_copy_fwlog(wl, block + offset,
890 wl->fw_mem_block_size - offset))
892 } while (addr && (addr != end_of_log));
894 wake_up_interruptible(&wl->fwlog_waitq);
898 wlcore_set_partition(wl, &old_part);
901 static void wlcore_save_freed_pkts(struct wl1271 *wl, struct wl12xx_vif *wlvif,
902 u8 hlid, struct ieee80211_sta *sta)
904 struct wl1271_station *wl_sta;
905 u32 sqn_recovery_padding = WL1271_TX_SQN_POST_RECOVERY_PADDING;
907 wl_sta = (void *)sta->drv_priv;
908 wl_sta->total_freed_pkts = wl->links[hlid].total_freed_pkts;
911 * increment the initial seq number on recovery to account for
912 * transmitted packets that we haven't yet got in the FW status
914 if (wlvif->encryption_type == KEY_GEM)
915 sqn_recovery_padding = WL1271_TX_SQN_POST_RECOVERY_PADDING_GEM;
917 if (test_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags))
918 wl_sta->total_freed_pkts += sqn_recovery_padding;
921 static void wlcore_save_freed_pkts_addr(struct wl1271 *wl,
922 struct wl12xx_vif *wlvif,
923 u8 hlid, const u8 *addr)
925 struct ieee80211_sta *sta;
926 struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif);
928 if (WARN_ON(hlid == WL12XX_INVALID_LINK_ID ||
929 is_zero_ether_addr(addr)))
933 sta = ieee80211_find_sta(vif, addr);
935 wlcore_save_freed_pkts(wl, wlvif, hlid, sta);
939 static void wlcore_print_recovery(struct wl1271 *wl)
945 wl1271_info("Hardware recovery in progress. FW ver: %s",
946 wl->chip.fw_ver_str);
948 /* change partitions momentarily so we can read the FW pc */
949 ret = wlcore_set_partition(wl, &wl->ptable[PART_BOOT]);
953 ret = wlcore_read_reg(wl, REG_PC_ON_RECOVERY, &pc);
957 ret = wlcore_read_reg(wl, REG_INTERRUPT_NO_CLEAR, &hint_sts);
961 wl1271_info("pc: 0x%x, hint_sts: 0x%08x count: %d",
962 pc, hint_sts, ++wl->recovery_count);
964 wlcore_set_partition(wl, &wl->ptable[PART_WORK]);
968 static void wl1271_recovery_work(struct work_struct *work)
971 container_of(work, struct wl1271, recovery_work);
972 struct wl12xx_vif *wlvif;
973 struct ieee80211_vif *vif;
975 mutex_lock(&wl->mutex);
977 if (wl->state == WLCORE_STATE_OFF || wl->plt)
980 if (!test_bit(WL1271_FLAG_INTENDED_FW_RECOVERY, &wl->flags)) {
981 if (wl->conf.fwlog.output == WL12XX_FWLOG_OUTPUT_HOST)
982 wl12xx_read_fwlog_panic(wl);
983 wlcore_print_recovery(wl);
986 BUG_ON(wl->conf.recovery.bug_on_recovery &&
987 !test_bit(WL1271_FLAG_INTENDED_FW_RECOVERY, &wl->flags));
989 if (wl->conf.recovery.no_recovery) {
990 wl1271_info("No recovery (chosen on module load). Fw will remain stuck.");
994 /* Prevent spurious TX during FW restart */
995 wlcore_stop_queues(wl, WLCORE_QUEUE_STOP_REASON_FW_RESTART);
997 /* reboot the chipset */
998 while (!list_empty(&wl->wlvif_list)) {
999 wlvif = list_first_entry(&wl->wlvif_list,
1000 struct wl12xx_vif, list);
1001 vif = wl12xx_wlvif_to_vif(wlvif);
1003 if (wlvif->bss_type == BSS_TYPE_STA_BSS &&
1004 test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags)) {
1005 wlcore_save_freed_pkts_addr(wl, wlvif, wlvif->sta.hlid,
1006 vif->bss_conf.bssid);
1009 __wl1271_op_remove_interface(wl, vif, false);
1012 wlcore_op_stop_locked(wl);
1014 ieee80211_restart_hw(wl->hw);
1017 * Its safe to enable TX now - the queues are stopped after a request
1018 * to restart the HW.
1020 wlcore_wake_queues(wl, WLCORE_QUEUE_STOP_REASON_FW_RESTART);
1023 wl->watchdog_recovery = false;
1024 clear_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags);
1025 mutex_unlock(&wl->mutex);
1028 static int wlcore_fw_wakeup(struct wl1271 *wl)
1030 return wlcore_raw_write32(wl, HW_ACCESS_ELP_CTRL_REG, ELPCTRL_WAKE_UP);
1033 static int wl1271_setup(struct wl1271 *wl)
1035 wl->raw_fw_status = kzalloc(wl->fw_status_len, GFP_KERNEL);
1036 if (!wl->raw_fw_status)
1039 wl->fw_status = kzalloc(sizeof(*wl->fw_status), GFP_KERNEL);
1043 wl->tx_res_if = kzalloc(sizeof(*wl->tx_res_if), GFP_KERNEL);
1049 kfree(wl->fw_status);
1050 kfree(wl->raw_fw_status);
1054 static int wl12xx_set_power_on(struct wl1271 *wl)
1058 msleep(WL1271_PRE_POWER_ON_SLEEP);
1059 ret = wl1271_power_on(wl);
1062 msleep(WL1271_POWER_ON_SLEEP);
1063 wl1271_io_reset(wl);
1066 ret = wlcore_set_partition(wl, &wl->ptable[PART_BOOT]);
1070 /* ELP module wake up */
1071 ret = wlcore_fw_wakeup(wl);
1079 wl1271_power_off(wl);
1083 static int wl12xx_chip_wakeup(struct wl1271 *wl, bool plt)
1087 ret = wl12xx_set_power_on(wl);
1092 * For wl127x based devices we could use the default block
1093 * size (512 bytes), but due to a bug in the sdio driver, we
1094 * need to set it explicitly after the chip is powered on. To
1095 * simplify the code and since the performance impact is
1096 * negligible, we use the same block size for all different
1099 * Check if the bus supports blocksize alignment and, if it
1100 * doesn't, make sure we don't have the quirk.
1102 if (!wl1271_set_block_size(wl))
1103 wl->quirks &= ~WLCORE_QUIRK_TX_BLOCKSIZE_ALIGN;
1105 /* TODO: make sure the lower driver has set things up correctly */
1107 ret = wl1271_setup(wl);
1111 ret = wl12xx_fetch_firmware(wl, plt);
1119 int wl1271_plt_start(struct wl1271 *wl, const enum plt_mode plt_mode)
1121 int retries = WL1271_BOOT_RETRIES;
1122 struct wiphy *wiphy = wl->hw->wiphy;
1124 static const char* const PLT_MODE[] = {
1133 mutex_lock(&wl->mutex);
1135 wl1271_notice("power up");
1137 if (wl->state != WLCORE_STATE_OFF) {
1138 wl1271_error("cannot go into PLT state because not "
1139 "in off state: %d", wl->state);
1144 /* Indicate to lower levels that we are now in PLT mode */
1146 wl->plt_mode = plt_mode;
1150 ret = wl12xx_chip_wakeup(wl, true);
1154 if (plt_mode != PLT_CHIP_AWAKE) {
1155 ret = wl->ops->plt_init(wl);
1160 wl->state = WLCORE_STATE_ON;
1161 wl1271_notice("firmware booted in PLT mode %s (%s)",
1163 wl->chip.fw_ver_str);
1165 /* update hw/fw version info in wiphy struct */
1166 wiphy->hw_version = wl->chip.id;
1167 strncpy(wiphy->fw_version, wl->chip.fw_ver_str,
1168 sizeof(wiphy->fw_version));
1173 wl1271_power_off(wl);
1177 wl->plt_mode = PLT_OFF;
1179 wl1271_error("firmware boot in PLT mode failed despite %d retries",
1180 WL1271_BOOT_RETRIES);
1182 mutex_unlock(&wl->mutex);
1187 int wl1271_plt_stop(struct wl1271 *wl)
1191 wl1271_notice("power down");
1194 * Interrupts must be disabled before setting the state to OFF.
1195 * Otherwise, the interrupt handler might be called and exit without
1196 * reading the interrupt status.
1198 wlcore_disable_interrupts(wl);
1199 mutex_lock(&wl->mutex);
1201 mutex_unlock(&wl->mutex);
1204 * This will not necessarily enable interrupts as interrupts
1205 * may have been disabled when op_stop was called. It will,
1206 * however, balance the above call to disable_interrupts().
1208 wlcore_enable_interrupts(wl);
1210 wl1271_error("cannot power down because not in PLT "
1211 "state: %d", wl->state);
1216 mutex_unlock(&wl->mutex);
1218 wl1271_flush_deferred_work(wl);
1219 cancel_work_sync(&wl->netstack_work);
1220 cancel_work_sync(&wl->recovery_work);
1221 cancel_delayed_work_sync(&wl->elp_work);
1222 cancel_delayed_work_sync(&wl->tx_watchdog_work);
1224 mutex_lock(&wl->mutex);
1225 wl1271_power_off(wl);
1227 wl->sleep_auth = WL1271_PSM_ILLEGAL;
1228 wl->state = WLCORE_STATE_OFF;
1230 wl->plt_mode = PLT_OFF;
1232 mutex_unlock(&wl->mutex);
1238 static void wl1271_op_tx(struct ieee80211_hw *hw,
1239 struct ieee80211_tx_control *control,
1240 struct sk_buff *skb)
1242 struct wl1271 *wl = hw->priv;
1243 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
1244 struct ieee80211_vif *vif = info->control.vif;
1245 struct wl12xx_vif *wlvif = NULL;
1246 unsigned long flags;
1251 wl1271_debug(DEBUG_TX, "DROP skb with no vif");
1252 ieee80211_free_txskb(hw, skb);
1256 wlvif = wl12xx_vif_to_data(vif);
1257 mapping = skb_get_queue_mapping(skb);
1258 q = wl1271_tx_get_queue(mapping);
1260 hlid = wl12xx_tx_get_hlid(wl, wlvif, skb, control->sta);
1262 spin_lock_irqsave(&wl->wl_lock, flags);
1265 * drop the packet if the link is invalid or the queue is stopped
1266 * for any reason but watermark. Watermark is a "soft"-stop so we
1267 * allow these packets through.
1269 if (hlid == WL12XX_INVALID_LINK_ID ||
1270 (!test_bit(hlid, wlvif->links_map)) ||
1271 (wlcore_is_queue_stopped_locked(wl, wlvif, q) &&
1272 !wlcore_is_queue_stopped_by_reason_locked(wl, wlvif, q,
1273 WLCORE_QUEUE_STOP_REASON_WATERMARK))) {
1274 wl1271_debug(DEBUG_TX, "DROP skb hlid %d q %d", hlid, q);
1275 ieee80211_free_txskb(hw, skb);
1279 wl1271_debug(DEBUG_TX, "queue skb hlid %d q %d len %d",
1281 skb_queue_tail(&wl->links[hlid].tx_queue[q], skb);
1283 wl->tx_queue_count[q]++;
1284 wlvif->tx_queue_count[q]++;
1287 * The workqueue is slow to process the tx_queue and we need stop
1288 * the queue here, otherwise the queue will get too long.
1290 if (wlvif->tx_queue_count[q] >= WL1271_TX_QUEUE_HIGH_WATERMARK &&
1291 !wlcore_is_queue_stopped_by_reason_locked(wl, wlvif, q,
1292 WLCORE_QUEUE_STOP_REASON_WATERMARK)) {
1293 wl1271_debug(DEBUG_TX, "op_tx: stopping queues for q %d", q);
1294 wlcore_stop_queue_locked(wl, wlvif, q,
1295 WLCORE_QUEUE_STOP_REASON_WATERMARK);
1299 * The chip specific setup must run before the first TX packet -
1300 * before that, the tx_work will not be initialized!
1303 if (!test_bit(WL1271_FLAG_FW_TX_BUSY, &wl->flags) &&
1304 !test_bit(WL1271_FLAG_TX_PENDING, &wl->flags))
1305 ieee80211_queue_work(wl->hw, &wl->tx_work);
1308 spin_unlock_irqrestore(&wl->wl_lock, flags);
1311 int wl1271_tx_dummy_packet(struct wl1271 *wl)
1313 unsigned long flags;
1316 /* no need to queue a new dummy packet if one is already pending */
1317 if (test_bit(WL1271_FLAG_DUMMY_PACKET_PENDING, &wl->flags))
1320 q = wl1271_tx_get_queue(skb_get_queue_mapping(wl->dummy_packet));
1322 spin_lock_irqsave(&wl->wl_lock, flags);
1323 set_bit(WL1271_FLAG_DUMMY_PACKET_PENDING, &wl->flags);
1324 wl->tx_queue_count[q]++;
1325 spin_unlock_irqrestore(&wl->wl_lock, flags);
1327 /* The FW is low on RX memory blocks, so send the dummy packet asap */
1328 if (!test_bit(WL1271_FLAG_FW_TX_BUSY, &wl->flags))
1329 return wlcore_tx_work_locked(wl);
1332 * If the FW TX is busy, TX work will be scheduled by the threaded
1333 * interrupt handler function
1339 * The size of the dummy packet should be at least 1400 bytes. However, in
1340 * order to minimize the number of bus transactions, aligning it to 512 bytes
1341 * boundaries could be beneficial, performance wise
1343 #define TOTAL_TX_DUMMY_PACKET_SIZE (ALIGN(1400, 512))
1345 static struct sk_buff *wl12xx_alloc_dummy_packet(struct wl1271 *wl)
1347 struct sk_buff *skb;
1348 struct ieee80211_hdr_3addr *hdr;
1349 unsigned int dummy_packet_size;
1351 dummy_packet_size = TOTAL_TX_DUMMY_PACKET_SIZE -
1352 sizeof(struct wl1271_tx_hw_descr) - sizeof(*hdr);
1354 skb = dev_alloc_skb(TOTAL_TX_DUMMY_PACKET_SIZE);
1356 wl1271_warning("Failed to allocate a dummy packet skb");
1360 skb_reserve(skb, sizeof(struct wl1271_tx_hw_descr));
1362 hdr = (struct ieee80211_hdr_3addr *) skb_put(skb, sizeof(*hdr));
1363 memset(hdr, 0, sizeof(*hdr));
1364 hdr->frame_control = cpu_to_le16(IEEE80211_FTYPE_DATA |
1365 IEEE80211_STYPE_NULLFUNC |
1366 IEEE80211_FCTL_TODS);
1368 memset(skb_put(skb, dummy_packet_size), 0, dummy_packet_size);
1370 /* Dummy packets require the TID to be management */
1371 skb->priority = WL1271_TID_MGMT;
1373 /* Initialize all fields that might be used */
1374 skb_set_queue_mapping(skb, 0);
1375 memset(IEEE80211_SKB_CB(skb), 0, sizeof(struct ieee80211_tx_info));
1383 wl1271_validate_wowlan_pattern(struct cfg80211_pkt_pattern *p)
1385 int num_fields = 0, in_field = 0, fields_size = 0;
1386 int i, pattern_len = 0;
1389 wl1271_warning("No mask in WoWLAN pattern");
1394 * The pattern is broken up into segments of bytes at different offsets
1395 * that need to be checked by the FW filter. Each segment is called
1396 * a field in the FW API. We verify that the total number of fields
1397 * required for this pattern won't exceed FW limits (8)
1398 * as well as the total fields buffer won't exceed the FW limit.
1399 * Note that if there's a pattern which crosses Ethernet/IP header
1400 * boundary a new field is required.
1402 for (i = 0; i < p->pattern_len; i++) {
1403 if (test_bit(i, (unsigned long *)p->mask)) {
1408 if (i == WL1271_RX_FILTER_ETH_HEADER_SIZE) {
1410 fields_size += pattern_len +
1411 RX_FILTER_FIELD_OVERHEAD;
1419 fields_size += pattern_len +
1420 RX_FILTER_FIELD_OVERHEAD;
1427 fields_size += pattern_len + RX_FILTER_FIELD_OVERHEAD;
1431 if (num_fields > WL1271_RX_FILTER_MAX_FIELDS) {
1432 wl1271_warning("RX Filter too complex. Too many segments");
1436 if (fields_size > WL1271_RX_FILTER_MAX_FIELDS_SIZE) {
1437 wl1271_warning("RX filter pattern is too big");
1444 struct wl12xx_rx_filter *wl1271_rx_filter_alloc(void)
1446 return kzalloc(sizeof(struct wl12xx_rx_filter), GFP_KERNEL);
1449 void wl1271_rx_filter_free(struct wl12xx_rx_filter *filter)
1456 for (i = 0; i < filter->num_fields; i++)
1457 kfree(filter->fields[i].pattern);
1462 int wl1271_rx_filter_alloc_field(struct wl12xx_rx_filter *filter,
1463 u16 offset, u8 flags,
1464 const u8 *pattern, u8 len)
1466 struct wl12xx_rx_filter_field *field;
1468 if (filter->num_fields == WL1271_RX_FILTER_MAX_FIELDS) {
1469 wl1271_warning("Max fields per RX filter. can't alloc another");
1473 field = &filter->fields[filter->num_fields];
1475 field->pattern = kzalloc(len, GFP_KERNEL);
1476 if (!field->pattern) {
1477 wl1271_warning("Failed to allocate RX filter pattern");
1481 filter->num_fields++;
1483 field->offset = cpu_to_le16(offset);
1484 field->flags = flags;
1486 memcpy(field->pattern, pattern, len);
1491 int wl1271_rx_filter_get_fields_size(struct wl12xx_rx_filter *filter)
1493 int i, fields_size = 0;
1495 for (i = 0; i < filter->num_fields; i++)
1496 fields_size += filter->fields[i].len +
1497 sizeof(struct wl12xx_rx_filter_field) -
1503 void wl1271_rx_filter_flatten_fields(struct wl12xx_rx_filter *filter,
1507 struct wl12xx_rx_filter_field *field;
1509 for (i = 0; i < filter->num_fields; i++) {
1510 field = (struct wl12xx_rx_filter_field *)buf;
1512 field->offset = filter->fields[i].offset;
1513 field->flags = filter->fields[i].flags;
1514 field->len = filter->fields[i].len;
1516 memcpy(&field->pattern, filter->fields[i].pattern, field->len);
1517 buf += sizeof(struct wl12xx_rx_filter_field) -
1518 sizeof(u8 *) + field->len;
1523 * Allocates an RX filter returned through f
1524 * which needs to be freed using rx_filter_free()
1527 wl1271_convert_wowlan_pattern_to_rx_filter(struct cfg80211_pkt_pattern *p,
1528 struct wl12xx_rx_filter **f)
1531 struct wl12xx_rx_filter *filter;
1535 filter = wl1271_rx_filter_alloc();
1537 wl1271_warning("Failed to alloc rx filter");
1543 while (i < p->pattern_len) {
1544 if (!test_bit(i, (unsigned long *)p->mask)) {
1549 for (j = i; j < p->pattern_len; j++) {
1550 if (!test_bit(j, (unsigned long *)p->mask))
1553 if (i < WL1271_RX_FILTER_ETH_HEADER_SIZE &&
1554 j >= WL1271_RX_FILTER_ETH_HEADER_SIZE)
1558 if (i < WL1271_RX_FILTER_ETH_HEADER_SIZE) {
1560 flags = WL1271_RX_FILTER_FLAG_ETHERNET_HEADER;
1562 offset = i - WL1271_RX_FILTER_ETH_HEADER_SIZE;
1563 flags = WL1271_RX_FILTER_FLAG_IP_HEADER;
1568 ret = wl1271_rx_filter_alloc_field(filter,
1571 &p->pattern[i], len);
1578 filter->action = FILTER_SIGNAL;
1584 wl1271_rx_filter_free(filter);
1590 static int wl1271_configure_wowlan(struct wl1271 *wl,
1591 struct cfg80211_wowlan *wow)
1595 if (!wow || wow->any || !wow->n_patterns) {
1596 ret = wl1271_acx_default_rx_filter_enable(wl, 0,
1601 ret = wl1271_rx_filter_clear_all(wl);
1608 if (WARN_ON(wow->n_patterns > WL1271_MAX_RX_FILTERS))
1611 /* Validate all incoming patterns before clearing current FW state */
1612 for (i = 0; i < wow->n_patterns; i++) {
1613 ret = wl1271_validate_wowlan_pattern(&wow->patterns[i]);
1615 wl1271_warning("Bad wowlan pattern %d", i);
1620 ret = wl1271_acx_default_rx_filter_enable(wl, 0, FILTER_SIGNAL);
1624 ret = wl1271_rx_filter_clear_all(wl);
1628 /* Translate WoWLAN patterns into filters */
1629 for (i = 0; i < wow->n_patterns; i++) {
1630 struct cfg80211_pkt_pattern *p;
1631 struct wl12xx_rx_filter *filter = NULL;
1633 p = &wow->patterns[i];
1635 ret = wl1271_convert_wowlan_pattern_to_rx_filter(p, &filter);
1637 wl1271_warning("Failed to create an RX filter from "
1638 "wowlan pattern %d", i);
1642 ret = wl1271_rx_filter_enable(wl, i, 1, filter);
1644 wl1271_rx_filter_free(filter);
1649 ret = wl1271_acx_default_rx_filter_enable(wl, 1, FILTER_DROP);
1655 static int wl1271_configure_suspend_sta(struct wl1271 *wl,
1656 struct wl12xx_vif *wlvif,
1657 struct cfg80211_wowlan *wow)
1661 if (!test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags))
1664 ret = wl1271_ps_elp_wakeup(wl);
1668 ret = wl1271_configure_wowlan(wl, wow);
1672 if ((wl->conf.conn.suspend_wake_up_event ==
1673 wl->conf.conn.wake_up_event) &&
1674 (wl->conf.conn.suspend_listen_interval ==
1675 wl->conf.conn.listen_interval))
1678 ret = wl1271_acx_wake_up_conditions(wl, wlvif,
1679 wl->conf.conn.suspend_wake_up_event,
1680 wl->conf.conn.suspend_listen_interval);
1683 wl1271_error("suspend: set wake up conditions failed: %d", ret);
1686 wl1271_ps_elp_sleep(wl);
1692 static int wl1271_configure_suspend_ap(struct wl1271 *wl,
1693 struct wl12xx_vif *wlvif)
1697 if (!test_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags))
1700 ret = wl1271_ps_elp_wakeup(wl);
1704 ret = wl1271_acx_beacon_filter_opt(wl, wlvif, true);
1706 wl1271_ps_elp_sleep(wl);
1712 static int wl1271_configure_suspend(struct wl1271 *wl,
1713 struct wl12xx_vif *wlvif,
1714 struct cfg80211_wowlan *wow)
1716 if (wlvif->bss_type == BSS_TYPE_STA_BSS)
1717 return wl1271_configure_suspend_sta(wl, wlvif, wow);
1718 if (wlvif->bss_type == BSS_TYPE_AP_BSS)
1719 return wl1271_configure_suspend_ap(wl, wlvif);
1723 static void wl1271_configure_resume(struct wl1271 *wl, struct wl12xx_vif *wlvif)
1726 bool is_ap = wlvif->bss_type == BSS_TYPE_AP_BSS;
1727 bool is_sta = wlvif->bss_type == BSS_TYPE_STA_BSS;
1729 if ((!is_ap) && (!is_sta))
1732 if (is_sta && !test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags))
1735 ret = wl1271_ps_elp_wakeup(wl);
1740 wl1271_configure_wowlan(wl, NULL);
1742 if ((wl->conf.conn.suspend_wake_up_event ==
1743 wl->conf.conn.wake_up_event) &&
1744 (wl->conf.conn.suspend_listen_interval ==
1745 wl->conf.conn.listen_interval))
1748 ret = wl1271_acx_wake_up_conditions(wl, wlvif,
1749 wl->conf.conn.wake_up_event,
1750 wl->conf.conn.listen_interval);
1753 wl1271_error("resume: wake up conditions failed: %d",
1757 ret = wl1271_acx_beacon_filter_opt(wl, wlvif, false);
1761 wl1271_ps_elp_sleep(wl);
1764 static int wl1271_op_suspend(struct ieee80211_hw *hw,
1765 struct cfg80211_wowlan *wow)
1767 struct wl1271 *wl = hw->priv;
1768 struct wl12xx_vif *wlvif;
1771 wl1271_debug(DEBUG_MAC80211, "mac80211 suspend wow=%d", !!wow);
1774 /* we want to perform the recovery before suspending */
1775 if (test_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags)) {
1776 wl1271_warning("postponing suspend to perform recovery");
1780 wl1271_tx_flush(wl);
1782 mutex_lock(&wl->mutex);
1783 wl->wow_enabled = true;
1784 wl12xx_for_each_wlvif(wl, wlvif) {
1785 ret = wl1271_configure_suspend(wl, wlvif, wow);
1787 mutex_unlock(&wl->mutex);
1788 wl1271_warning("couldn't prepare device to suspend");
1792 mutex_unlock(&wl->mutex);
1793 /* flush any remaining work */
1794 wl1271_debug(DEBUG_MAC80211, "flushing remaining works");
1797 * disable and re-enable interrupts in order to flush
1800 wlcore_disable_interrupts(wl);
1803 * set suspended flag to avoid triggering a new threaded_irq
1804 * work. no need for spinlock as interrupts are disabled.
1806 set_bit(WL1271_FLAG_SUSPENDED, &wl->flags);
1808 wlcore_enable_interrupts(wl);
1809 flush_work(&wl->tx_work);
1810 flush_delayed_work(&wl->elp_work);
1813 * Cancel the watchdog even if above tx_flush failed. We will detect
1814 * it on resume anyway.
1816 cancel_delayed_work(&wl->tx_watchdog_work);
1821 static int wl1271_op_resume(struct ieee80211_hw *hw)
1823 struct wl1271 *wl = hw->priv;
1824 struct wl12xx_vif *wlvif;
1825 unsigned long flags;
1826 bool run_irq_work = false, pending_recovery;
1829 wl1271_debug(DEBUG_MAC80211, "mac80211 resume wow=%d",
1831 WARN_ON(!wl->wow_enabled);
1834 * re-enable irq_work enqueuing, and call irq_work directly if
1835 * there is a pending work.
1837 spin_lock_irqsave(&wl->wl_lock, flags);
1838 clear_bit(WL1271_FLAG_SUSPENDED, &wl->flags);
1839 if (test_and_clear_bit(WL1271_FLAG_PENDING_WORK, &wl->flags))
1840 run_irq_work = true;
1841 spin_unlock_irqrestore(&wl->wl_lock, flags);
1843 mutex_lock(&wl->mutex);
1845 /* test the recovery flag before calling any SDIO functions */
1846 pending_recovery = test_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS,
1850 wl1271_debug(DEBUG_MAC80211,
1851 "run postponed irq_work directly");
1853 /* don't talk to the HW if recovery is pending */
1854 if (!pending_recovery) {
1855 ret = wlcore_irq_locked(wl);
1857 wl12xx_queue_recovery_work(wl);
1860 wlcore_enable_interrupts(wl);
1863 if (pending_recovery) {
1864 wl1271_warning("queuing forgotten recovery on resume");
1865 ieee80211_queue_work(wl->hw, &wl->recovery_work);
1869 wl12xx_for_each_wlvif(wl, wlvif) {
1870 wl1271_configure_resume(wl, wlvif);
1874 wl->wow_enabled = false;
1877 * Set a flag to re-init the watchdog on the first Tx after resume.
1878 * That way we avoid possible conditions where Tx-complete interrupts
1879 * fail to arrive and we perform a spurious recovery.
1881 set_bit(WL1271_FLAG_REINIT_TX_WDOG, &wl->flags);
1882 mutex_unlock(&wl->mutex);
1888 static int wl1271_op_start(struct ieee80211_hw *hw)
1890 wl1271_debug(DEBUG_MAC80211, "mac80211 start");
1893 * We have to delay the booting of the hardware because
1894 * we need to know the local MAC address before downloading and
1895 * initializing the firmware. The MAC address cannot be changed
1896 * after boot, and without the proper MAC address, the firmware
1897 * will not function properly.
1899 * The MAC address is first known when the corresponding interface
1900 * is added. That is where we will initialize the hardware.
1906 static void wlcore_op_stop_locked(struct wl1271 *wl)
1910 if (wl->state == WLCORE_STATE_OFF) {
1911 if (test_and_clear_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS,
1913 wlcore_enable_interrupts(wl);
1919 * this must be before the cancel_work calls below, so that the work
1920 * functions don't perform further work.
1922 wl->state = WLCORE_STATE_OFF;
1925 * Use the nosync variant to disable interrupts, so the mutex could be
1926 * held while doing so without deadlocking.
1928 wlcore_disable_interrupts_nosync(wl);
1930 mutex_unlock(&wl->mutex);
1932 wlcore_synchronize_interrupts(wl);
1933 if (!test_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags))
1934 cancel_work_sync(&wl->recovery_work);
1935 wl1271_flush_deferred_work(wl);
1936 cancel_delayed_work_sync(&wl->scan_complete_work);
1937 cancel_work_sync(&wl->netstack_work);
1938 cancel_work_sync(&wl->tx_work);
1939 cancel_delayed_work_sync(&wl->elp_work);
1940 cancel_delayed_work_sync(&wl->tx_watchdog_work);
1942 /* let's notify MAC80211 about the remaining pending TX frames */
1943 mutex_lock(&wl->mutex);
1944 wl12xx_tx_reset(wl);
1946 wl1271_power_off(wl);
1948 * In case a recovery was scheduled, interrupts were disabled to avoid
1949 * an interrupt storm. Now that the power is down, it is safe to
1950 * re-enable interrupts to balance the disable depth
1952 if (test_and_clear_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags))
1953 wlcore_enable_interrupts(wl);
1955 wl->band = IEEE80211_BAND_2GHZ;
1958 wl->power_level = WL1271_DEFAULT_POWER_LEVEL;
1959 wl->channel_type = NL80211_CHAN_NO_HT;
1960 wl->tx_blocks_available = 0;
1961 wl->tx_allocated_blocks = 0;
1962 wl->tx_results_count = 0;
1963 wl->tx_packets_count = 0;
1964 wl->time_offset = 0;
1965 wl->ap_fw_ps_map = 0;
1967 wl->sleep_auth = WL1271_PSM_ILLEGAL;
1968 memset(wl->roles_map, 0, sizeof(wl->roles_map));
1969 memset(wl->links_map, 0, sizeof(wl->links_map));
1970 memset(wl->roc_map, 0, sizeof(wl->roc_map));
1971 memset(wl->session_ids, 0, sizeof(wl->session_ids));
1972 memset(wl->rx_filter_enabled, 0, sizeof(wl->rx_filter_enabled));
1973 wl->active_sta_count = 0;
1974 wl->active_link_count = 0;
1976 /* The system link is always allocated */
1977 wl->links[WL12XX_SYSTEM_HLID].allocated_pkts = 0;
1978 wl->links[WL12XX_SYSTEM_HLID].prev_freed_pkts = 0;
1979 __set_bit(WL12XX_SYSTEM_HLID, wl->links_map);
1982 * this is performed after the cancel_work calls and the associated
1983 * mutex_lock, so that wl1271_op_add_interface does not accidentally
1984 * get executed before all these vars have been reset.
1988 wl->tx_blocks_freed = 0;
1990 for (i = 0; i < NUM_TX_QUEUES; i++) {
1991 wl->tx_pkts_freed[i] = 0;
1992 wl->tx_allocated_pkts[i] = 0;
1995 wl1271_debugfs_reset(wl);
1997 kfree(wl->raw_fw_status);
1998 wl->raw_fw_status = NULL;
1999 kfree(wl->fw_status);
2000 wl->fw_status = NULL;
2001 kfree(wl->tx_res_if);
2002 wl->tx_res_if = NULL;
2003 kfree(wl->target_mem_map);
2004 wl->target_mem_map = NULL;
2007 * FW channels must be re-calibrated after recovery,
2008 * save current Reg-Domain channel configuration and clear it.
2010 memcpy(wl->reg_ch_conf_pending, wl->reg_ch_conf_last,
2011 sizeof(wl->reg_ch_conf_pending));
2012 memset(wl->reg_ch_conf_last, 0, sizeof(wl->reg_ch_conf_last));
2015 static void wlcore_op_stop(struct ieee80211_hw *hw)
2017 struct wl1271 *wl = hw->priv;
2019 wl1271_debug(DEBUG_MAC80211, "mac80211 stop");
2021 mutex_lock(&wl->mutex);
2023 wlcore_op_stop_locked(wl);
2025 mutex_unlock(&wl->mutex);
2028 static void wlcore_channel_switch_work(struct work_struct *work)
2030 struct delayed_work *dwork;
2032 struct ieee80211_vif *vif;
2033 struct wl12xx_vif *wlvif;
2036 dwork = container_of(work, struct delayed_work, work);
2037 wlvif = container_of(dwork, struct wl12xx_vif, channel_switch_work);
2040 wl1271_info("channel switch failed (role_id: %d).", wlvif->role_id);
2042 mutex_lock(&wl->mutex);
2044 if (unlikely(wl->state != WLCORE_STATE_ON))
2047 /* check the channel switch is still ongoing */
2048 if (!test_and_clear_bit(WLVIF_FLAG_CS_PROGRESS, &wlvif->flags))
2051 vif = wl12xx_wlvif_to_vif(wlvif);
2052 ieee80211_chswitch_done(vif, false);
2054 ret = wl1271_ps_elp_wakeup(wl);
2058 wl12xx_cmd_stop_channel_switch(wl, wlvif);
2060 wl1271_ps_elp_sleep(wl);
2062 mutex_unlock(&wl->mutex);
2065 static void wlcore_connection_loss_work(struct work_struct *work)
2067 struct delayed_work *dwork;
2069 struct ieee80211_vif *vif;
2070 struct wl12xx_vif *wlvif;
2072 dwork = container_of(work, struct delayed_work, work);
2073 wlvif = container_of(dwork, struct wl12xx_vif, connection_loss_work);
2076 wl1271_info("Connection loss work (role_id: %d).", wlvif->role_id);
2078 mutex_lock(&wl->mutex);
2080 if (unlikely(wl->state != WLCORE_STATE_ON))
2083 /* Call mac80211 connection loss */
2084 if (!test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags))
2087 vif = wl12xx_wlvif_to_vif(wlvif);
2088 ieee80211_connection_loss(vif);
2090 mutex_unlock(&wl->mutex);
2093 static void wlcore_pending_auth_complete_work(struct work_struct *work)
2095 struct delayed_work *dwork;
2097 struct wl12xx_vif *wlvif;
2098 unsigned long time_spare;
2101 dwork = container_of(work, struct delayed_work, work);
2102 wlvif = container_of(dwork, struct wl12xx_vif,
2103 pending_auth_complete_work);
2106 mutex_lock(&wl->mutex);
2108 if (unlikely(wl->state != WLCORE_STATE_ON))
2112 * Make sure a second really passed since the last auth reply. Maybe
2113 * a second auth reply arrived while we were stuck on the mutex.
2114 * Check for a little less than the timeout to protect from scheduler
2117 time_spare = jiffies +
2118 msecs_to_jiffies(WLCORE_PEND_AUTH_ROC_TIMEOUT - 50);
2119 if (!time_after(time_spare, wlvif->pending_auth_reply_time))
2122 ret = wl1271_ps_elp_wakeup(wl);
2126 /* cancel the ROC if active */
2127 wlcore_update_inconn_sta(wl, wlvif, NULL, false);
2129 wl1271_ps_elp_sleep(wl);
2131 mutex_unlock(&wl->mutex);
2134 static int wl12xx_allocate_rate_policy(struct wl1271 *wl, u8 *idx)
2136 u8 policy = find_first_zero_bit(wl->rate_policies_map,
2137 WL12XX_MAX_RATE_POLICIES);
2138 if (policy >= WL12XX_MAX_RATE_POLICIES)
2141 __set_bit(policy, wl->rate_policies_map);
2146 static void wl12xx_free_rate_policy(struct wl1271 *wl, u8 *idx)
2148 if (WARN_ON(*idx >= WL12XX_MAX_RATE_POLICIES))
2151 __clear_bit(*idx, wl->rate_policies_map);
2152 *idx = WL12XX_MAX_RATE_POLICIES;
2155 static int wlcore_allocate_klv_template(struct wl1271 *wl, u8 *idx)
2157 u8 policy = find_first_zero_bit(wl->klv_templates_map,
2158 WLCORE_MAX_KLV_TEMPLATES);
2159 if (policy >= WLCORE_MAX_KLV_TEMPLATES)
2162 __set_bit(policy, wl->klv_templates_map);
2167 static void wlcore_free_klv_template(struct wl1271 *wl, u8 *idx)
2169 if (WARN_ON(*idx >= WLCORE_MAX_KLV_TEMPLATES))
2172 __clear_bit(*idx, wl->klv_templates_map);
2173 *idx = WLCORE_MAX_KLV_TEMPLATES;
2176 static u8 wl12xx_get_role_type(struct wl1271 *wl, struct wl12xx_vif *wlvif)
2178 switch (wlvif->bss_type) {
2179 case BSS_TYPE_AP_BSS:
2181 return WL1271_ROLE_P2P_GO;
2183 return WL1271_ROLE_AP;
2185 case BSS_TYPE_STA_BSS:
2187 return WL1271_ROLE_P2P_CL;
2189 return WL1271_ROLE_STA;
2192 return WL1271_ROLE_IBSS;
2195 wl1271_error("invalid bss_type: %d", wlvif->bss_type);
2197 return WL12XX_INVALID_ROLE_TYPE;
2200 static int wl12xx_init_vif_data(struct wl1271 *wl, struct ieee80211_vif *vif)
2202 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
2205 /* clear everything but the persistent data */
2206 memset(wlvif, 0, offsetof(struct wl12xx_vif, persistent));
2208 switch (ieee80211_vif_type_p2p(vif)) {
2209 case NL80211_IFTYPE_P2P_CLIENT:
2212 case NL80211_IFTYPE_STATION:
2213 wlvif->bss_type = BSS_TYPE_STA_BSS;
2215 case NL80211_IFTYPE_ADHOC:
2216 wlvif->bss_type = BSS_TYPE_IBSS;
2218 case NL80211_IFTYPE_P2P_GO:
2221 case NL80211_IFTYPE_AP:
2222 wlvif->bss_type = BSS_TYPE_AP_BSS;
2225 wlvif->bss_type = MAX_BSS_TYPE;
2229 wlvif->role_id = WL12XX_INVALID_ROLE_ID;
2230 wlvif->dev_role_id = WL12XX_INVALID_ROLE_ID;
2231 wlvif->dev_hlid = WL12XX_INVALID_LINK_ID;
2233 if (wlvif->bss_type == BSS_TYPE_STA_BSS ||
2234 wlvif->bss_type == BSS_TYPE_IBSS) {
2235 /* init sta/ibss data */
2236 wlvif->sta.hlid = WL12XX_INVALID_LINK_ID;
2237 wl12xx_allocate_rate_policy(wl, &wlvif->sta.basic_rate_idx);
2238 wl12xx_allocate_rate_policy(wl, &wlvif->sta.ap_rate_idx);
2239 wl12xx_allocate_rate_policy(wl, &wlvif->sta.p2p_rate_idx);
2240 wlcore_allocate_klv_template(wl, &wlvif->sta.klv_template_id);
2241 wlvif->basic_rate_set = CONF_TX_RATE_MASK_BASIC;
2242 wlvif->basic_rate = CONF_TX_RATE_MASK_BASIC;
2243 wlvif->rate_set = CONF_TX_RATE_MASK_BASIC;
2246 wlvif->ap.bcast_hlid = WL12XX_INVALID_LINK_ID;
2247 wlvif->ap.global_hlid = WL12XX_INVALID_LINK_ID;
2248 wl12xx_allocate_rate_policy(wl, &wlvif->ap.mgmt_rate_idx);
2249 wl12xx_allocate_rate_policy(wl, &wlvif->ap.bcast_rate_idx);
2250 for (i = 0; i < CONF_TX_MAX_AC_COUNT; i++)
2251 wl12xx_allocate_rate_policy(wl,
2252 &wlvif->ap.ucast_rate_idx[i]);
2253 wlvif->basic_rate_set = CONF_TX_ENABLED_RATES;
2255 * TODO: check if basic_rate shouldn't be
2256 * wl1271_tx_min_rate_get(wl, wlvif->basic_rate_set);
2257 * instead (the same thing for STA above).
2259 wlvif->basic_rate = CONF_TX_ENABLED_RATES;
2260 /* TODO: this seems to be used only for STA, check it */
2261 wlvif->rate_set = CONF_TX_ENABLED_RATES;
2264 wlvif->bitrate_masks[IEEE80211_BAND_2GHZ] = wl->conf.tx.basic_rate;
2265 wlvif->bitrate_masks[IEEE80211_BAND_5GHZ] = wl->conf.tx.basic_rate_5;
2266 wlvif->beacon_int = WL1271_DEFAULT_BEACON_INT;
2269 * mac80211 configures some values globally, while we treat them
2270 * per-interface. thus, on init, we have to copy them from wl
2272 wlvif->band = wl->band;
2273 wlvif->channel = wl->channel;
2274 wlvif->power_level = wl->power_level;
2275 wlvif->channel_type = wl->channel_type;
2277 INIT_WORK(&wlvif->rx_streaming_enable_work,
2278 wl1271_rx_streaming_enable_work);
2279 INIT_WORK(&wlvif->rx_streaming_disable_work,
2280 wl1271_rx_streaming_disable_work);
2281 INIT_DELAYED_WORK(&wlvif->channel_switch_work,
2282 wlcore_channel_switch_work);
2283 INIT_DELAYED_WORK(&wlvif->connection_loss_work,
2284 wlcore_connection_loss_work);
2285 INIT_DELAYED_WORK(&wlvif->pending_auth_complete_work,
2286 wlcore_pending_auth_complete_work);
2287 INIT_LIST_HEAD(&wlvif->list);
2289 setup_timer(&wlvif->rx_streaming_timer, wl1271_rx_streaming_timer,
2290 (unsigned long) wlvif);
2294 static int wl12xx_init_fw(struct wl1271 *wl)
2296 int retries = WL1271_BOOT_RETRIES;
2297 bool booted = false;
2298 struct wiphy *wiphy = wl->hw->wiphy;
2303 ret = wl12xx_chip_wakeup(wl, false);
2307 ret = wl->ops->boot(wl);
2311 ret = wl1271_hw_init(wl);
2319 mutex_unlock(&wl->mutex);
2320 /* Unlocking the mutex in the middle of handling is
2321 inherently unsafe. In this case we deem it safe to do,
2322 because we need to let any possibly pending IRQ out of
2323 the system (and while we are WLCORE_STATE_OFF the IRQ
2324 work function will not do anything.) Also, any other
2325 possible concurrent operations will fail due to the
2326 current state, hence the wl1271 struct should be safe. */
2327 wlcore_disable_interrupts(wl);
2328 wl1271_flush_deferred_work(wl);
2329 cancel_work_sync(&wl->netstack_work);
2330 mutex_lock(&wl->mutex);
2332 wl1271_power_off(wl);
2336 wl1271_error("firmware boot failed despite %d retries",
2337 WL1271_BOOT_RETRIES);
2341 wl1271_info("firmware booted (%s)", wl->chip.fw_ver_str);
2343 /* update hw/fw version info in wiphy struct */
2344 wiphy->hw_version = wl->chip.id;
2345 strncpy(wiphy->fw_version, wl->chip.fw_ver_str,
2346 sizeof(wiphy->fw_version));
2349 * Now we know if 11a is supported (info from the NVS), so disable
2350 * 11a channels if not supported
2352 if (!wl->enable_11a)
2353 wiphy->bands[IEEE80211_BAND_5GHZ]->n_channels = 0;
2355 wl1271_debug(DEBUG_MAC80211, "11a is %ssupported",
2356 wl->enable_11a ? "" : "not ");
2358 wl->state = WLCORE_STATE_ON;
2363 static bool wl12xx_dev_role_started(struct wl12xx_vif *wlvif)
2365 return wlvif->dev_hlid != WL12XX_INVALID_LINK_ID;
2369 * Check whether a fw switch (i.e. moving from one loaded
2370 * fw to another) is needed. This function is also responsible
2371 * for updating wl->last_vif_count, so it must be called before
2372 * loading a non-plt fw (so the correct fw (single-role/multi-role)
2375 static bool wl12xx_need_fw_change(struct wl1271 *wl,
2376 struct vif_counter_data vif_counter_data,
2379 enum wl12xx_fw_type current_fw = wl->fw_type;
2380 u8 vif_count = vif_counter_data.counter;
2382 if (test_bit(WL1271_FLAG_VIF_CHANGE_IN_PROGRESS, &wl->flags))
2385 /* increase the vif count if this is a new vif */
2386 if (add && !vif_counter_data.cur_vif_running)
2389 wl->last_vif_count = vif_count;
2391 /* no need for fw change if the device is OFF */
2392 if (wl->state == WLCORE_STATE_OFF)
2395 /* no need for fw change if a single fw is used */
2396 if (!wl->mr_fw_name)
2399 if (vif_count > 1 && current_fw == WL12XX_FW_TYPE_NORMAL)
2401 if (vif_count <= 1 && current_fw == WL12XX_FW_TYPE_MULTI)
2408 * Enter "forced psm". Make sure the sta is in psm against the ap,
2409 * to make the fw switch a bit more disconnection-persistent.
2411 static void wl12xx_force_active_psm(struct wl1271 *wl)
2413 struct wl12xx_vif *wlvif;
2415 wl12xx_for_each_wlvif_sta(wl, wlvif) {
2416 wl1271_ps_set_mode(wl, wlvif, STATION_POWER_SAVE_MODE);
2420 struct wlcore_hw_queue_iter_data {
2421 unsigned long hw_queue_map[BITS_TO_LONGS(WLCORE_NUM_MAC_ADDRESSES)];
2423 struct ieee80211_vif *vif;
2424 /* is the current vif among those iterated */
2428 static void wlcore_hw_queue_iter(void *data, u8 *mac,
2429 struct ieee80211_vif *vif)
2431 struct wlcore_hw_queue_iter_data *iter_data = data;
2433 if (WARN_ON_ONCE(vif->hw_queue[0] == IEEE80211_INVAL_HW_QUEUE))
2436 if (iter_data->cur_running || vif == iter_data->vif) {
2437 iter_data->cur_running = true;
2441 __set_bit(vif->hw_queue[0] / NUM_TX_QUEUES, iter_data->hw_queue_map);
2444 static int wlcore_allocate_hw_queue_base(struct wl1271 *wl,
2445 struct wl12xx_vif *wlvif)
2447 struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif);
2448 struct wlcore_hw_queue_iter_data iter_data = {};
2451 iter_data.vif = vif;
2453 /* mark all bits taken by active interfaces */
2454 ieee80211_iterate_active_interfaces_atomic(wl->hw,
2455 IEEE80211_IFACE_ITER_RESUME_ALL,
2456 wlcore_hw_queue_iter, &iter_data);
2458 /* the current vif is already running in mac80211 (resume/recovery) */
2459 if (iter_data.cur_running) {
2460 wlvif->hw_queue_base = vif->hw_queue[0];
2461 wl1271_debug(DEBUG_MAC80211,
2462 "using pre-allocated hw queue base %d",
2463 wlvif->hw_queue_base);
2465 /* interface type might have changed type */
2466 goto adjust_cab_queue;
2469 q_base = find_first_zero_bit(iter_data.hw_queue_map,
2470 WLCORE_NUM_MAC_ADDRESSES);
2471 if (q_base >= WLCORE_NUM_MAC_ADDRESSES)
2474 wlvif->hw_queue_base = q_base * NUM_TX_QUEUES;
2475 wl1271_debug(DEBUG_MAC80211, "allocating hw queue base: %d",
2476 wlvif->hw_queue_base);
2478 for (i = 0; i < NUM_TX_QUEUES; i++) {
2479 wl->queue_stop_reasons[wlvif->hw_queue_base + i] = 0;
2480 /* register hw queues in mac80211 */
2481 vif->hw_queue[i] = wlvif->hw_queue_base + i;
2485 /* the last places are reserved for cab queues per interface */
2486 if (wlvif->bss_type == BSS_TYPE_AP_BSS)
2487 vif->cab_queue = NUM_TX_QUEUES * WLCORE_NUM_MAC_ADDRESSES +
2488 wlvif->hw_queue_base / NUM_TX_QUEUES;
2490 vif->cab_queue = IEEE80211_INVAL_HW_QUEUE;
2495 static int wl1271_op_add_interface(struct ieee80211_hw *hw,
2496 struct ieee80211_vif *vif)
2498 struct wl1271 *wl = hw->priv;
2499 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
2500 struct vif_counter_data vif_count;
2505 wl1271_error("Adding Interface not allowed while in PLT mode");
2509 vif->driver_flags |= IEEE80211_VIF_BEACON_FILTER |
2510 IEEE80211_VIF_SUPPORTS_CQM_RSSI;
2512 wl1271_debug(DEBUG_MAC80211, "mac80211 add interface type %d mac %pM",
2513 ieee80211_vif_type_p2p(vif), vif->addr);
2515 wl12xx_get_vif_count(hw, vif, &vif_count);
2517 mutex_lock(&wl->mutex);
2518 ret = wl1271_ps_elp_wakeup(wl);
2523 * in some very corner case HW recovery scenarios its possible to
2524 * get here before __wl1271_op_remove_interface is complete, so
2525 * opt out if that is the case.
2527 if (test_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags) ||
2528 test_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags)) {
2534 ret = wl12xx_init_vif_data(wl, vif);
2539 role_type = wl12xx_get_role_type(wl, wlvif);
2540 if (role_type == WL12XX_INVALID_ROLE_TYPE) {
2545 ret = wlcore_allocate_hw_queue_base(wl, wlvif);
2549 if (wl12xx_need_fw_change(wl, vif_count, true)) {
2550 wl12xx_force_active_psm(wl);
2551 set_bit(WL1271_FLAG_INTENDED_FW_RECOVERY, &wl->flags);
2552 mutex_unlock(&wl->mutex);
2553 wl1271_recovery_work(&wl->recovery_work);
2558 * TODO: after the nvs issue will be solved, move this block
2559 * to start(), and make sure here the driver is ON.
2561 if (wl->state == WLCORE_STATE_OFF) {
2563 * we still need this in order to configure the fw
2564 * while uploading the nvs
2566 memcpy(wl->addresses[0].addr, vif->addr, ETH_ALEN);
2568 ret = wl12xx_init_fw(wl);
2573 ret = wl12xx_cmd_role_enable(wl, vif->addr,
2574 role_type, &wlvif->role_id);
2578 ret = wl1271_init_vif_specific(wl, vif);
2582 list_add(&wlvif->list, &wl->wlvif_list);
2583 set_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags);
2585 if (wlvif->bss_type == BSS_TYPE_AP_BSS)
2590 wl1271_ps_elp_sleep(wl);
2592 mutex_unlock(&wl->mutex);
2597 static void __wl1271_op_remove_interface(struct wl1271 *wl,
2598 struct ieee80211_vif *vif,
2599 bool reset_tx_queues)
2601 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
2603 bool is_ap = (wlvif->bss_type == BSS_TYPE_AP_BSS);
2605 wl1271_debug(DEBUG_MAC80211, "mac80211 remove interface");
2607 if (!test_and_clear_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags))
2610 /* because of hardware recovery, we may get here twice */
2611 if (wl->state == WLCORE_STATE_OFF)
2614 wl1271_info("down");
2616 if (wl->scan.state != WL1271_SCAN_STATE_IDLE &&
2617 wl->scan_wlvif == wlvif) {
2619 * Rearm the tx watchdog just before idling scan. This
2620 * prevents just-finished scans from triggering the watchdog
2622 wl12xx_rearm_tx_watchdog_locked(wl);
2624 wl->scan.state = WL1271_SCAN_STATE_IDLE;
2625 memset(wl->scan.scanned_ch, 0, sizeof(wl->scan.scanned_ch));
2626 wl->scan_wlvif = NULL;
2627 wl->scan.req = NULL;
2628 ieee80211_scan_completed(wl->hw, true);
2631 if (wl->sched_vif == wlvif)
2632 wl->sched_vif = NULL;
2634 if (wl->roc_vif == vif) {
2636 ieee80211_remain_on_channel_expired(wl->hw);
2639 if (!test_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags)) {
2640 /* disable active roles */
2641 ret = wl1271_ps_elp_wakeup(wl);
2645 if (wlvif->bss_type == BSS_TYPE_STA_BSS ||
2646 wlvif->bss_type == BSS_TYPE_IBSS) {
2647 if (wl12xx_dev_role_started(wlvif))
2648 wl12xx_stop_dev(wl, wlvif);
2651 ret = wl12xx_cmd_role_disable(wl, &wlvif->role_id);
2655 wl1271_ps_elp_sleep(wl);
2658 wl12xx_tx_reset_wlvif(wl, wlvif);
2660 /* clear all hlids (except system_hlid) */
2661 wlvif->dev_hlid = WL12XX_INVALID_LINK_ID;
2663 if (wlvif->bss_type == BSS_TYPE_STA_BSS ||
2664 wlvif->bss_type == BSS_TYPE_IBSS) {
2665 wlvif->sta.hlid = WL12XX_INVALID_LINK_ID;
2666 wl12xx_free_rate_policy(wl, &wlvif->sta.basic_rate_idx);
2667 wl12xx_free_rate_policy(wl, &wlvif->sta.ap_rate_idx);
2668 wl12xx_free_rate_policy(wl, &wlvif->sta.p2p_rate_idx);
2669 wlcore_free_klv_template(wl, &wlvif->sta.klv_template_id);
2671 wlvif->ap.bcast_hlid = WL12XX_INVALID_LINK_ID;
2672 wlvif->ap.global_hlid = WL12XX_INVALID_LINK_ID;
2673 wl12xx_free_rate_policy(wl, &wlvif->ap.mgmt_rate_idx);
2674 wl12xx_free_rate_policy(wl, &wlvif->ap.bcast_rate_idx);
2675 for (i = 0; i < CONF_TX_MAX_AC_COUNT; i++)
2676 wl12xx_free_rate_policy(wl,
2677 &wlvif->ap.ucast_rate_idx[i]);
2678 wl1271_free_ap_keys(wl, wlvif);
2681 dev_kfree_skb(wlvif->probereq);
2682 wlvif->probereq = NULL;
2683 if (wl->last_wlvif == wlvif)
2684 wl->last_wlvif = NULL;
2685 list_del(&wlvif->list);
2686 memset(wlvif->ap.sta_hlid_map, 0, sizeof(wlvif->ap.sta_hlid_map));
2687 wlvif->role_id = WL12XX_INVALID_ROLE_ID;
2688 wlvif->dev_role_id = WL12XX_INVALID_ROLE_ID;
2696 * Last AP, have more stations. Configure sleep auth according to STA.
2697 * Don't do thin on unintended recovery.
2699 if (test_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags) &&
2700 !test_bit(WL1271_FLAG_INTENDED_FW_RECOVERY, &wl->flags))
2703 if (wl->ap_count == 0 && is_ap) {
2704 /* mask ap events */
2705 wl->event_mask &= ~wl->ap_event_mask;
2706 wl1271_event_unmask(wl);
2709 if (wl->ap_count == 0 && is_ap && wl->sta_count) {
2710 u8 sta_auth = wl->conf.conn.sta_sleep_auth;
2711 /* Configure for power according to debugfs */
2712 if (sta_auth != WL1271_PSM_ILLEGAL)
2713 wl1271_acx_sleep_auth(wl, sta_auth);
2714 /* Configure for ELP power saving */
2716 wl1271_acx_sleep_auth(wl, WL1271_PSM_ELP);
2720 mutex_unlock(&wl->mutex);
2722 del_timer_sync(&wlvif->rx_streaming_timer);
2723 cancel_work_sync(&wlvif->rx_streaming_enable_work);
2724 cancel_work_sync(&wlvif->rx_streaming_disable_work);
2725 cancel_delayed_work_sync(&wlvif->connection_loss_work);
2726 cancel_delayed_work_sync(&wlvif->channel_switch_work);
2727 cancel_delayed_work_sync(&wlvif->pending_auth_complete_work);
2729 mutex_lock(&wl->mutex);
2732 static void wl1271_op_remove_interface(struct ieee80211_hw *hw,
2733 struct ieee80211_vif *vif)
2735 struct wl1271 *wl = hw->priv;
2736 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
2737 struct wl12xx_vif *iter;
2738 struct vif_counter_data vif_count;
2740 wl12xx_get_vif_count(hw, vif, &vif_count);
2741 mutex_lock(&wl->mutex);
2743 if (wl->state == WLCORE_STATE_OFF ||
2744 !test_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags))
2748 * wl->vif can be null here if someone shuts down the interface
2749 * just when hardware recovery has been started.
2751 wl12xx_for_each_wlvif(wl, iter) {
2755 __wl1271_op_remove_interface(wl, vif, true);
2758 WARN_ON(iter != wlvif);
2759 if (wl12xx_need_fw_change(wl, vif_count, false)) {
2760 wl12xx_force_active_psm(wl);
2761 set_bit(WL1271_FLAG_INTENDED_FW_RECOVERY, &wl->flags);
2762 wl12xx_queue_recovery_work(wl);
2765 mutex_unlock(&wl->mutex);
2768 static int wl12xx_op_change_interface(struct ieee80211_hw *hw,
2769 struct ieee80211_vif *vif,
2770 enum nl80211_iftype new_type, bool p2p)
2772 struct wl1271 *wl = hw->priv;
2775 set_bit(WL1271_FLAG_VIF_CHANGE_IN_PROGRESS, &wl->flags);
2776 wl1271_op_remove_interface(hw, vif);
2778 vif->type = new_type;
2780 ret = wl1271_op_add_interface(hw, vif);
2782 clear_bit(WL1271_FLAG_VIF_CHANGE_IN_PROGRESS, &wl->flags);
2786 static int wlcore_join(struct wl1271 *wl, struct wl12xx_vif *wlvif)
2789 bool is_ibss = (wlvif->bss_type == BSS_TYPE_IBSS);
2792 * One of the side effects of the JOIN command is that is clears
2793 * WPA/WPA2 keys from the chipset. Performing a JOIN while associated
2794 * to a WPA/WPA2 access point will therefore kill the data-path.
2795 * Currently the only valid scenario for JOIN during association
2796 * is on roaming, in which case we will also be given new keys.
2797 * Keep the below message for now, unless it starts bothering
2798 * users who really like to roam a lot :)
2800 if (test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags))
2801 wl1271_info("JOIN while associated.");
2803 /* clear encryption type */
2804 wlvif->encryption_type = KEY_NONE;
2807 ret = wl12xx_cmd_role_start_ibss(wl, wlvif);
2809 if (wl->quirks & WLCORE_QUIRK_START_STA_FAILS) {
2811 * TODO: this is an ugly workaround for wl12xx fw
2812 * bug - we are not able to tx/rx after the first
2813 * start_sta, so make dummy start+stop calls,
2814 * and then call start_sta again.
2815 * this should be fixed in the fw.
2817 wl12xx_cmd_role_start_sta(wl, wlvif);
2818 wl12xx_cmd_role_stop_sta(wl, wlvif);
2821 ret = wl12xx_cmd_role_start_sta(wl, wlvif);
2827 static int wl1271_ssid_set(struct wl12xx_vif *wlvif, struct sk_buff *skb,
2831 const u8 *ptr = cfg80211_find_ie(WLAN_EID_SSID, skb->data + offset,
2835 wl1271_error("No SSID in IEs!");
2840 if (ssid_len > IEEE80211_MAX_SSID_LEN) {
2841 wl1271_error("SSID is too long!");
2845 wlvif->ssid_len = ssid_len;
2846 memcpy(wlvif->ssid, ptr+2, ssid_len);
2850 static int wlcore_set_ssid(struct wl1271 *wl, struct wl12xx_vif *wlvif)
2852 struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif);
2853 struct sk_buff *skb;
2856 /* we currently only support setting the ssid from the ap probe req */
2857 if (wlvif->bss_type != BSS_TYPE_STA_BSS)
2860 skb = ieee80211_ap_probereq_get(wl->hw, vif);
2864 ieoffset = offsetof(struct ieee80211_mgmt,
2865 u.probe_req.variable);
2866 wl1271_ssid_set(wlvif, skb, ieoffset);
2872 static int wlcore_set_assoc(struct wl1271 *wl, struct wl12xx_vif *wlvif,
2873 struct ieee80211_bss_conf *bss_conf,
2879 wlvif->aid = bss_conf->aid;
2880 wlvif->channel_type = cfg80211_get_chandef_type(&bss_conf->chandef);
2881 wlvif->beacon_int = bss_conf->beacon_int;
2882 wlvif->wmm_enabled = bss_conf->qos;
2884 set_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags);
2887 * with wl1271, we don't need to update the
2888 * beacon_int and dtim_period, because the firmware
2889 * updates it by itself when the first beacon is
2890 * received after a join.
2892 ret = wl1271_cmd_build_ps_poll(wl, wlvif, wlvif->aid);
2897 * Get a template for hardware connection maintenance
2899 dev_kfree_skb(wlvif->probereq);
2900 wlvif->probereq = wl1271_cmd_build_ap_probe_req(wl,
2903 ieoffset = offsetof(struct ieee80211_mgmt,
2904 u.probe_req.variable);
2905 wl1271_ssid_set(wlvif, wlvif->probereq, ieoffset);
2907 /* enable the connection monitoring feature */
2908 ret = wl1271_acx_conn_monit_params(wl, wlvif, true);
2913 * The join command disable the keep-alive mode, shut down its process,
2914 * and also clear the template config, so we need to reset it all after
2915 * the join. The acx_aid starts the keep-alive process, and the order
2916 * of the commands below is relevant.
2918 ret = wl1271_acx_keep_alive_mode(wl, wlvif, true);
2922 ret = wl1271_acx_aid(wl, wlvif, wlvif->aid);
2926 ret = wl12xx_cmd_build_klv_null_data(wl, wlvif);
2930 ret = wl1271_acx_keep_alive_config(wl, wlvif,
2931 wlvif->sta.klv_template_id,
2932 ACX_KEEP_ALIVE_TPL_VALID);
2937 * The default fw psm configuration is AUTO, while mac80211 default
2938 * setting is off (ACTIVE), so sync the fw with the correct value.
2940 ret = wl1271_ps_set_mode(wl, wlvif, STATION_ACTIVE_MODE);
2946 wl1271_tx_enabled_rates_get(wl,
2949 ret = wl1271_acx_sta_rate_policies(wl, wlvif);
2957 static int wlcore_unset_assoc(struct wl1271 *wl, struct wl12xx_vif *wlvif)
2960 bool sta = wlvif->bss_type == BSS_TYPE_STA_BSS;
2962 /* make sure we are connected (sta) joined */
2964 !test_and_clear_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags))
2967 /* make sure we are joined (ibss) */
2969 test_and_clear_bit(WLVIF_FLAG_IBSS_JOINED, &wlvif->flags))
2973 /* use defaults when not associated */
2976 /* free probe-request template */
2977 dev_kfree_skb(wlvif->probereq);
2978 wlvif->probereq = NULL;
2980 /* disable connection monitor features */
2981 ret = wl1271_acx_conn_monit_params(wl, wlvif, false);
2985 /* Disable the keep-alive feature */
2986 ret = wl1271_acx_keep_alive_mode(wl, wlvif, false);
2990 /* disable beacon filtering */
2991 ret = wl1271_acx_beacon_filter_opt(wl, wlvif, false);
2996 if (test_and_clear_bit(WLVIF_FLAG_CS_PROGRESS, &wlvif->flags)) {
2997 struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif);
2999 wl12xx_cmd_stop_channel_switch(wl, wlvif);
3000 ieee80211_chswitch_done(vif, false);
3001 cancel_delayed_work(&wlvif->channel_switch_work);
3004 /* invalidate keep-alive template */
3005 wl1271_acx_keep_alive_config(wl, wlvif,
3006 wlvif->sta.klv_template_id,
3007 ACX_KEEP_ALIVE_TPL_INVALID);
3012 static void wl1271_set_band_rate(struct wl1271 *wl, struct wl12xx_vif *wlvif)
3014 wlvif->basic_rate_set = wlvif->bitrate_masks[wlvif->band];
3015 wlvif->rate_set = wlvif->basic_rate_set;
3018 static void wl1271_sta_handle_idle(struct wl1271 *wl, struct wl12xx_vif *wlvif,
3021 bool cur_idle = !test_bit(WLVIF_FLAG_ACTIVE, &wlvif->flags);
3023 if (idle == cur_idle)
3027 clear_bit(WLVIF_FLAG_ACTIVE, &wlvif->flags);
3029 /* The current firmware only supports sched_scan in idle */
3030 if (wl->sched_vif == wlvif)
3031 wl->ops->sched_scan_stop(wl, wlvif);
3033 set_bit(WLVIF_FLAG_ACTIVE, &wlvif->flags);
3037 static int wl12xx_config_vif(struct wl1271 *wl, struct wl12xx_vif *wlvif,
3038 struct ieee80211_conf *conf, u32 changed)
3042 if (conf->power_level != wlvif->power_level) {
3043 ret = wl1271_acx_tx_power(wl, wlvif, conf->power_level);
3047 wlvif->power_level = conf->power_level;
3053 static int wl1271_op_config(struct ieee80211_hw *hw, u32 changed)
3055 struct wl1271 *wl = hw->priv;
3056 struct wl12xx_vif *wlvif;
3057 struct ieee80211_conf *conf = &hw->conf;
3060 wl1271_debug(DEBUG_MAC80211, "mac80211 config psm %s power %d %s"
3062 conf->flags & IEEE80211_CONF_PS ? "on" : "off",
3064 conf->flags & IEEE80211_CONF_IDLE ? "idle" : "in use",
3067 mutex_lock(&wl->mutex);
3069 if (changed & IEEE80211_CONF_CHANGE_POWER)
3070 wl->power_level = conf->power_level;
3072 if (unlikely(wl->state != WLCORE_STATE_ON))
3075 ret = wl1271_ps_elp_wakeup(wl);
3079 /* configure each interface */
3080 wl12xx_for_each_wlvif(wl, wlvif) {
3081 ret = wl12xx_config_vif(wl, wlvif, conf, changed);
3087 wl1271_ps_elp_sleep(wl);
3090 mutex_unlock(&wl->mutex);
3095 struct wl1271_filter_params {
3098 u8 mc_list[ACX_MC_ADDRESS_GROUP_MAX][ETH_ALEN];
3101 static u64 wl1271_op_prepare_multicast(struct ieee80211_hw *hw,
3102 struct netdev_hw_addr_list *mc_list)
3104 struct wl1271_filter_params *fp;
3105 struct netdev_hw_addr *ha;
3107 fp = kzalloc(sizeof(*fp), GFP_ATOMIC);
3109 wl1271_error("Out of memory setting filters.");
3113 /* update multicast filtering parameters */
3114 fp->mc_list_length = 0;
3115 if (netdev_hw_addr_list_count(mc_list) > ACX_MC_ADDRESS_GROUP_MAX) {
3116 fp->enabled = false;
3119 netdev_hw_addr_list_for_each(ha, mc_list) {
3120 memcpy(fp->mc_list[fp->mc_list_length],
3121 ha->addr, ETH_ALEN);
3122 fp->mc_list_length++;
3126 return (u64)(unsigned long)fp;
3129 #define WL1271_SUPPORTED_FILTERS (FIF_PROMISC_IN_BSS | \
3132 FIF_BCN_PRBRESP_PROMISC | \
3136 static void wl1271_op_configure_filter(struct ieee80211_hw *hw,
3137 unsigned int changed,
3138 unsigned int *total, u64 multicast)
3140 struct wl1271_filter_params *fp = (void *)(unsigned long)multicast;
3141 struct wl1271 *wl = hw->priv;
3142 struct wl12xx_vif *wlvif;
3146 wl1271_debug(DEBUG_MAC80211, "mac80211 configure filter changed %x"
3147 " total %x", changed, *total);
3149 mutex_lock(&wl->mutex);
3151 *total &= WL1271_SUPPORTED_FILTERS;
3152 changed &= WL1271_SUPPORTED_FILTERS;
3154 if (unlikely(wl->state != WLCORE_STATE_ON))
3157 ret = wl1271_ps_elp_wakeup(wl);
3161 wl12xx_for_each_wlvif(wl, wlvif) {
3162 if (wlvif->bss_type != BSS_TYPE_AP_BSS) {
3163 if (*total & FIF_ALLMULTI)
3164 ret = wl1271_acx_group_address_tbl(wl, wlvif,
3168 ret = wl1271_acx_group_address_tbl(wl, wlvif,
3171 fp->mc_list_length);
3178 * the fw doesn't provide an api to configure the filters. instead,
3179 * the filters configuration is based on the active roles / ROC
3184 wl1271_ps_elp_sleep(wl);
3187 mutex_unlock(&wl->mutex);
3191 static int wl1271_record_ap_key(struct wl1271 *wl, struct wl12xx_vif *wlvif,
3192 u8 id, u8 key_type, u8 key_size,
3193 const u8 *key, u8 hlid, u32 tx_seq_32,
3196 struct wl1271_ap_key *ap_key;
3199 wl1271_debug(DEBUG_CRYPT, "record ap key id %d", (int)id);
3201 if (key_size > MAX_KEY_SIZE)
3205 * Find next free entry in ap_keys. Also check we are not replacing
3208 for (i = 0; i < MAX_NUM_KEYS; i++) {
3209 if (wlvif->ap.recorded_keys[i] == NULL)
3212 if (wlvif->ap.recorded_keys[i]->id == id) {
3213 wl1271_warning("trying to record key replacement");
3218 if (i == MAX_NUM_KEYS)
3221 ap_key = kzalloc(sizeof(*ap_key), GFP_KERNEL);
3226 ap_key->key_type = key_type;
3227 ap_key->key_size = key_size;
3228 memcpy(ap_key->key, key, key_size);
3229 ap_key->hlid = hlid;
3230 ap_key->tx_seq_32 = tx_seq_32;
3231 ap_key->tx_seq_16 = tx_seq_16;
3233 wlvif->ap.recorded_keys[i] = ap_key;
3237 static void wl1271_free_ap_keys(struct wl1271 *wl, struct wl12xx_vif *wlvif)
3241 for (i = 0; i < MAX_NUM_KEYS; i++) {
3242 kfree(wlvif->ap.recorded_keys[i]);
3243 wlvif->ap.recorded_keys[i] = NULL;
3247 static int wl1271_ap_init_hwenc(struct wl1271 *wl, struct wl12xx_vif *wlvif)
3250 struct wl1271_ap_key *key;
3251 bool wep_key_added = false;
3253 for (i = 0; i < MAX_NUM_KEYS; i++) {
3255 if (wlvif->ap.recorded_keys[i] == NULL)
3258 key = wlvif->ap.recorded_keys[i];
3260 if (hlid == WL12XX_INVALID_LINK_ID)
3261 hlid = wlvif->ap.bcast_hlid;
3263 ret = wl1271_cmd_set_ap_key(wl, wlvif, KEY_ADD_OR_REPLACE,
3264 key->id, key->key_type,
3265 key->key_size, key->key,
3266 hlid, key->tx_seq_32,
3271 if (key->key_type == KEY_WEP)
3272 wep_key_added = true;
3275 if (wep_key_added) {
3276 ret = wl12xx_cmd_set_default_wep_key(wl, wlvif->default_key,
3277 wlvif->ap.bcast_hlid);
3283 wl1271_free_ap_keys(wl, wlvif);
3287 static int wl1271_set_key(struct wl1271 *wl, struct wl12xx_vif *wlvif,
3288 u16 action, u8 id, u8 key_type,
3289 u8 key_size, const u8 *key, u32 tx_seq_32,
3290 u16 tx_seq_16, struct ieee80211_sta *sta)
3293 bool is_ap = (wlvif->bss_type == BSS_TYPE_AP_BSS);
3296 struct wl1271_station *wl_sta;
3300 wl_sta = (struct wl1271_station *)sta->drv_priv;
3301 hlid = wl_sta->hlid;
3303 hlid = wlvif->ap.bcast_hlid;
3306 if (!test_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags)) {
3308 * We do not support removing keys after AP shutdown.
3309 * Pretend we do to make mac80211 happy.
3311 if (action != KEY_ADD_OR_REPLACE)
3314 ret = wl1271_record_ap_key(wl, wlvif, id,
3316 key, hlid, tx_seq_32,
3319 ret = wl1271_cmd_set_ap_key(wl, wlvif, action,
3320 id, key_type, key_size,
3321 key, hlid, tx_seq_32,
3329 static const u8 bcast_addr[ETH_ALEN] = {
3330 0xff, 0xff, 0xff, 0xff, 0xff, 0xff
3333 addr = sta ? sta->addr : bcast_addr;
3335 if (is_zero_ether_addr(addr)) {
3336 /* We dont support TX only encryption */
3340 /* The wl1271 does not allow to remove unicast keys - they
3341 will be cleared automatically on next CMD_JOIN. Ignore the
3342 request silently, as we dont want the mac80211 to emit
3343 an error message. */
3344 if (action == KEY_REMOVE && !is_broadcast_ether_addr(addr))
3347 /* don't remove key if hlid was already deleted */
3348 if (action == KEY_REMOVE &&
3349 wlvif->sta.hlid == WL12XX_INVALID_LINK_ID)
3352 ret = wl1271_cmd_set_sta_key(wl, wlvif, action,
3353 id, key_type, key_size,
3354 key, addr, tx_seq_32,
3364 static int wlcore_op_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
3365 struct ieee80211_vif *vif,
3366 struct ieee80211_sta *sta,
3367 struct ieee80211_key_conf *key_conf)
3369 struct wl1271 *wl = hw->priv;
3371 bool might_change_spare =
3372 key_conf->cipher == WL1271_CIPHER_SUITE_GEM ||
3373 key_conf->cipher == WLAN_CIPHER_SUITE_TKIP;
3375 if (might_change_spare) {
3377 * stop the queues and flush to ensure the next packets are
3378 * in sync with FW spare block accounting
3380 wlcore_stop_queues(wl, WLCORE_QUEUE_STOP_REASON_SPARE_BLK);
3381 wl1271_tx_flush(wl);
3384 mutex_lock(&wl->mutex);
3386 if (unlikely(wl->state != WLCORE_STATE_ON)) {
3388 goto out_wake_queues;
3391 ret = wl1271_ps_elp_wakeup(wl);
3393 goto out_wake_queues;
3395 ret = wlcore_hw_set_key(wl, cmd, vif, sta, key_conf);
3397 wl1271_ps_elp_sleep(wl);
3400 if (might_change_spare)
3401 wlcore_wake_queues(wl, WLCORE_QUEUE_STOP_REASON_SPARE_BLK);
3403 mutex_unlock(&wl->mutex);
3408 int wlcore_set_key(struct wl1271 *wl, enum set_key_cmd cmd,
3409 struct ieee80211_vif *vif,
3410 struct ieee80211_sta *sta,
3411 struct ieee80211_key_conf *key_conf)
3413 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
3420 wl1271_debug(DEBUG_MAC80211, "mac80211 set key");
3422 wl1271_debug(DEBUG_CRYPT, "CMD: 0x%x sta: %p", cmd, sta);
3423 wl1271_debug(DEBUG_CRYPT, "Key: algo:0x%x, id:%d, len:%d flags 0x%x",
3424 key_conf->cipher, key_conf->keyidx,
3425 key_conf->keylen, key_conf->flags);
3426 wl1271_dump(DEBUG_CRYPT, "KEY: ", key_conf->key, key_conf->keylen);
3428 if (wlvif->bss_type == BSS_TYPE_AP_BSS)
3430 struct wl1271_station *wl_sta = (void *)sta->drv_priv;
3431 hlid = wl_sta->hlid;
3433 hlid = wlvif->ap.bcast_hlid;
3436 hlid = wlvif->sta.hlid;
3438 if (hlid != WL12XX_INVALID_LINK_ID) {
3439 u64 tx_seq = wl->links[hlid].total_freed_pkts;
3440 tx_seq_32 = WL1271_TX_SECURITY_HI32(tx_seq);
3441 tx_seq_16 = WL1271_TX_SECURITY_LO16(tx_seq);
3444 switch (key_conf->cipher) {
3445 case WLAN_CIPHER_SUITE_WEP40:
3446 case WLAN_CIPHER_SUITE_WEP104:
3449 key_conf->hw_key_idx = key_conf->keyidx;
3451 case WLAN_CIPHER_SUITE_TKIP:
3452 key_type = KEY_TKIP;
3453 key_conf->hw_key_idx = key_conf->keyidx;
3455 case WLAN_CIPHER_SUITE_CCMP:
3457 key_conf->flags |= IEEE80211_KEY_FLAG_PUT_IV_SPACE;
3459 case WL1271_CIPHER_SUITE_GEM:
3463 wl1271_error("Unknown key algo 0x%x", key_conf->cipher);
3470 ret = wl1271_set_key(wl, wlvif, KEY_ADD_OR_REPLACE,
3471 key_conf->keyidx, key_type,
3472 key_conf->keylen, key_conf->key,
3473 tx_seq_32, tx_seq_16, sta);
3475 wl1271_error("Could not add or replace key");
3480 * reconfiguring arp response if the unicast (or common)
3481 * encryption key type was changed
3483 if (wlvif->bss_type == BSS_TYPE_STA_BSS &&
3484 (sta || key_type == KEY_WEP) &&
3485 wlvif->encryption_type != key_type) {
3486 wlvif->encryption_type = key_type;
3487 ret = wl1271_cmd_build_arp_rsp(wl, wlvif);
3489 wl1271_warning("build arp rsp failed: %d", ret);
3496 ret = wl1271_set_key(wl, wlvif, KEY_REMOVE,
3497 key_conf->keyidx, key_type,
3498 key_conf->keylen, key_conf->key,
3501 wl1271_error("Could not remove key");
3507 wl1271_error("Unsupported key cmd 0x%x", cmd);
3513 EXPORT_SYMBOL_GPL(wlcore_set_key);
3515 static void wl1271_op_set_default_key_idx(struct ieee80211_hw *hw,
3516 struct ieee80211_vif *vif,
3519 struct wl1271 *wl = hw->priv;
3520 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
3523 wl1271_debug(DEBUG_MAC80211, "mac80211 set default key idx %d",
3526 /* we don't handle unsetting of default key */
3530 mutex_lock(&wl->mutex);
3532 if (unlikely(wl->state != WLCORE_STATE_ON)) {
3537 ret = wl1271_ps_elp_wakeup(wl);
3541 wlvif->default_key = key_idx;
3543 /* the default WEP key needs to be configured at least once */
3544 if (wlvif->encryption_type == KEY_WEP) {
3545 ret = wl12xx_cmd_set_default_wep_key(wl,
3553 wl1271_ps_elp_sleep(wl);
3556 mutex_unlock(&wl->mutex);
3559 void wlcore_regdomain_config(struct wl1271 *wl)
3563 if (!(wl->quirks & WLCORE_QUIRK_REGDOMAIN_CONF))
3566 mutex_lock(&wl->mutex);
3568 if (unlikely(wl->state != WLCORE_STATE_ON))
3571 ret = wl1271_ps_elp_wakeup(wl);
3575 ret = wlcore_cmd_regdomain_config_locked(wl);
3577 wl12xx_queue_recovery_work(wl);
3581 wl1271_ps_elp_sleep(wl);
3583 mutex_unlock(&wl->mutex);
3586 static int wl1271_op_hw_scan(struct ieee80211_hw *hw,
3587 struct ieee80211_vif *vif,
3588 struct ieee80211_scan_request *hw_req)
3590 struct cfg80211_scan_request *req = &hw_req->req;
3591 struct wl1271 *wl = hw->priv;
3596 wl1271_debug(DEBUG_MAC80211, "mac80211 hw scan");
3599 ssid = req->ssids[0].ssid;
3600 len = req->ssids[0].ssid_len;
3603 mutex_lock(&wl->mutex);
3605 if (unlikely(wl->state != WLCORE_STATE_ON)) {
3607 * We cannot return -EBUSY here because cfg80211 will expect
3608 * a call to ieee80211_scan_completed if we do - in this case
3609 * there won't be any call.
3615 ret = wl1271_ps_elp_wakeup(wl);
3619 /* fail if there is any role in ROC */
3620 if (find_first_bit(wl->roc_map, WL12XX_MAX_ROLES) < WL12XX_MAX_ROLES) {
3621 /* don't allow scanning right now */
3626 ret = wlcore_scan(hw->priv, vif, ssid, len, req);
3628 wl1271_ps_elp_sleep(wl);
3630 mutex_unlock(&wl->mutex);
3635 static void wl1271_op_cancel_hw_scan(struct ieee80211_hw *hw,
3636 struct ieee80211_vif *vif)
3638 struct wl1271 *wl = hw->priv;
3639 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
3642 wl1271_debug(DEBUG_MAC80211, "mac80211 cancel hw scan");
3644 mutex_lock(&wl->mutex);
3646 if (unlikely(wl->state != WLCORE_STATE_ON))
3649 if (wl->scan.state == WL1271_SCAN_STATE_IDLE)
3652 ret = wl1271_ps_elp_wakeup(wl);
3656 if (wl->scan.state != WL1271_SCAN_STATE_DONE) {
3657 ret = wl->ops->scan_stop(wl, wlvif);
3663 * Rearm the tx watchdog just before idling scan. This
3664 * prevents just-finished scans from triggering the watchdog
3666 wl12xx_rearm_tx_watchdog_locked(wl);
3668 wl->scan.state = WL1271_SCAN_STATE_IDLE;
3669 memset(wl->scan.scanned_ch, 0, sizeof(wl->scan.scanned_ch));
3670 wl->scan_wlvif = NULL;
3671 wl->scan.req = NULL;
3672 ieee80211_scan_completed(wl->hw, true);
3675 wl1271_ps_elp_sleep(wl);
3677 mutex_unlock(&wl->mutex);
3679 cancel_delayed_work_sync(&wl->scan_complete_work);
3682 static int wl1271_op_sched_scan_start(struct ieee80211_hw *hw,
3683 struct ieee80211_vif *vif,
3684 struct cfg80211_sched_scan_request *req,
3685 struct ieee80211_scan_ies *ies)
3687 struct wl1271 *wl = hw->priv;
3688 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
3691 wl1271_debug(DEBUG_MAC80211, "wl1271_op_sched_scan_start");
3693 mutex_lock(&wl->mutex);
3695 if (unlikely(wl->state != WLCORE_STATE_ON)) {
3700 ret = wl1271_ps_elp_wakeup(wl);
3704 ret = wl->ops->sched_scan_start(wl, wlvif, req, ies);
3708 wl->sched_vif = wlvif;
3711 wl1271_ps_elp_sleep(wl);
3713 mutex_unlock(&wl->mutex);
3717 static int wl1271_op_sched_scan_stop(struct ieee80211_hw *hw,
3718 struct ieee80211_vif *vif)
3720 struct wl1271 *wl = hw->priv;
3721 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
3724 wl1271_debug(DEBUG_MAC80211, "wl1271_op_sched_scan_stop");
3726 mutex_lock(&wl->mutex);
3728 if (unlikely(wl->state != WLCORE_STATE_ON))
3731 ret = wl1271_ps_elp_wakeup(wl);
3735 wl->ops->sched_scan_stop(wl, wlvif);
3737 wl1271_ps_elp_sleep(wl);
3739 mutex_unlock(&wl->mutex);
3744 static int wl1271_op_set_frag_threshold(struct ieee80211_hw *hw, u32 value)
3746 struct wl1271 *wl = hw->priv;
3749 mutex_lock(&wl->mutex);
3751 if (unlikely(wl->state != WLCORE_STATE_ON)) {
3756 ret = wl1271_ps_elp_wakeup(wl);
3760 ret = wl1271_acx_frag_threshold(wl, value);
3762 wl1271_warning("wl1271_op_set_frag_threshold failed: %d", ret);
3764 wl1271_ps_elp_sleep(wl);
3767 mutex_unlock(&wl->mutex);
3772 static int wl1271_op_set_rts_threshold(struct ieee80211_hw *hw, u32 value)
3774 struct wl1271 *wl = hw->priv;
3775 struct wl12xx_vif *wlvif;
3778 mutex_lock(&wl->mutex);
3780 if (unlikely(wl->state != WLCORE_STATE_ON)) {
3785 ret = wl1271_ps_elp_wakeup(wl);
3789 wl12xx_for_each_wlvif(wl, wlvif) {
3790 ret = wl1271_acx_rts_threshold(wl, wlvif, value);
3792 wl1271_warning("set rts threshold failed: %d", ret);
3794 wl1271_ps_elp_sleep(wl);
3797 mutex_unlock(&wl->mutex);
3802 static void wl12xx_remove_ie(struct sk_buff *skb, u8 eid, int ieoffset)
3805 const u8 *next, *end = skb->data + skb->len;
3806 u8 *ie = (u8 *)cfg80211_find_ie(eid, skb->data + ieoffset,
3807 skb->len - ieoffset);
3812 memmove(ie, next, end - next);
3813 skb_trim(skb, skb->len - len);
3816 static void wl12xx_remove_vendor_ie(struct sk_buff *skb,
3817 unsigned int oui, u8 oui_type,
3821 const u8 *next, *end = skb->data + skb->len;
3822 u8 *ie = (u8 *)cfg80211_find_vendor_ie(oui, oui_type,
3823 skb->data + ieoffset,
3824 skb->len - ieoffset);
3829 memmove(ie, next, end - next);
3830 skb_trim(skb, skb->len - len);
3833 static int wl1271_ap_set_probe_resp_tmpl(struct wl1271 *wl, u32 rates,
3834 struct ieee80211_vif *vif)
3836 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
3837 struct sk_buff *skb;
3840 skb = ieee80211_proberesp_get(wl->hw, vif);
3844 ret = wl1271_cmd_template_set(wl, wlvif->role_id,
3845 CMD_TEMPL_AP_PROBE_RESPONSE,
3854 wl1271_debug(DEBUG_AP, "probe response updated");
3855 set_bit(WLVIF_FLAG_AP_PROBE_RESP_SET, &wlvif->flags);
3861 static int wl1271_ap_set_probe_resp_tmpl_legacy(struct wl1271 *wl,
3862 struct ieee80211_vif *vif,
3864 size_t probe_rsp_len,
3867 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
3868 struct ieee80211_bss_conf *bss_conf = &vif->bss_conf;
3869 u8 probe_rsp_templ[WL1271_CMD_TEMPL_MAX_SIZE];
3870 int ssid_ie_offset, ie_offset, templ_len;
3873 /* no need to change probe response if the SSID is set correctly */
3874 if (wlvif->ssid_len > 0)
3875 return wl1271_cmd_template_set(wl, wlvif->role_id,
3876 CMD_TEMPL_AP_PROBE_RESPONSE,
3881 if (probe_rsp_len + bss_conf->ssid_len > WL1271_CMD_TEMPL_MAX_SIZE) {
3882 wl1271_error("probe_rsp template too big");
3886 /* start searching from IE offset */
3887 ie_offset = offsetof(struct ieee80211_mgmt, u.probe_resp.variable);
3889 ptr = cfg80211_find_ie(WLAN_EID_SSID, probe_rsp_data + ie_offset,
3890 probe_rsp_len - ie_offset);
3892 wl1271_error("No SSID in beacon!");
3896 ssid_ie_offset = ptr - probe_rsp_data;
3897 ptr += (ptr[1] + 2);
3899 memcpy(probe_rsp_templ, probe_rsp_data, ssid_ie_offset);
3901 /* insert SSID from bss_conf */
3902 probe_rsp_templ[ssid_ie_offset] = WLAN_EID_SSID;
3903 probe_rsp_templ[ssid_ie_offset + 1] = bss_conf->ssid_len;
3904 memcpy(probe_rsp_templ + ssid_ie_offset + 2,
3905 bss_conf->ssid, bss_conf->ssid_len);
3906 templ_len = ssid_ie_offset + 2 + bss_conf->ssid_len;
3908 memcpy(probe_rsp_templ + ssid_ie_offset + 2 + bss_conf->ssid_len,
3909 ptr, probe_rsp_len - (ptr - probe_rsp_data));
3910 templ_len += probe_rsp_len - (ptr - probe_rsp_data);
3912 return wl1271_cmd_template_set(wl, wlvif->role_id,
3913 CMD_TEMPL_AP_PROBE_RESPONSE,
3919 static int wl1271_bss_erp_info_changed(struct wl1271 *wl,
3920 struct ieee80211_vif *vif,
3921 struct ieee80211_bss_conf *bss_conf,
3924 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
3927 if (changed & BSS_CHANGED_ERP_SLOT) {
3928 if (bss_conf->use_short_slot)
3929 ret = wl1271_acx_slot(wl, wlvif, SLOT_TIME_SHORT);
3931 ret = wl1271_acx_slot(wl, wlvif, SLOT_TIME_LONG);
3933 wl1271_warning("Set slot time failed %d", ret);
3938 if (changed & BSS_CHANGED_ERP_PREAMBLE) {
3939 if (bss_conf->use_short_preamble)
3940 wl1271_acx_set_preamble(wl, wlvif, ACX_PREAMBLE_SHORT);
3942 wl1271_acx_set_preamble(wl, wlvif, ACX_PREAMBLE_LONG);
3945 if (changed & BSS_CHANGED_ERP_CTS_PROT) {
3946 if (bss_conf->use_cts_prot)
3947 ret = wl1271_acx_cts_protect(wl, wlvif,
3950 ret = wl1271_acx_cts_protect(wl, wlvif,
3951 CTSPROTECT_DISABLE);
3953 wl1271_warning("Set ctsprotect failed %d", ret);
3962 static int wlcore_set_beacon_template(struct wl1271 *wl,
3963 struct ieee80211_vif *vif,
3966 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
3967 struct ieee80211_hdr *hdr;
3970 int ieoffset = offsetof(struct ieee80211_mgmt, u.beacon.variable);
3971 struct sk_buff *beacon = ieee80211_beacon_get(wl->hw, vif);
3979 wl1271_debug(DEBUG_MASTER, "beacon updated");
3981 ret = wl1271_ssid_set(wlvif, beacon, ieoffset);
3983 dev_kfree_skb(beacon);
3986 min_rate = wl1271_tx_min_rate_get(wl, wlvif->basic_rate_set);
3987 tmpl_id = is_ap ? CMD_TEMPL_AP_BEACON :
3989 ret = wl1271_cmd_template_set(wl, wlvif->role_id, tmpl_id,
3994 dev_kfree_skb(beacon);
3998 wlvif->wmm_enabled =
3999 cfg80211_find_vendor_ie(WLAN_OUI_MICROSOFT,
4000 WLAN_OUI_TYPE_MICROSOFT_WMM,
4001 beacon->data + ieoffset,
4002 beacon->len - ieoffset);
4005 * In case we already have a probe-resp beacon set explicitly
4006 * by usermode, don't use the beacon data.
4008 if (test_bit(WLVIF_FLAG_AP_PROBE_RESP_SET, &wlvif->flags))
4011 /* remove TIM ie from probe response */
4012 wl12xx_remove_ie(beacon, WLAN_EID_TIM, ieoffset);
4015 * remove p2p ie from probe response.
4016 * the fw reponds to probe requests that don't include
4017 * the p2p ie. probe requests with p2p ie will be passed,
4018 * and will be responded by the supplicant (the spec
4019 * forbids including the p2p ie when responding to probe
4020 * requests that didn't include it).
4022 wl12xx_remove_vendor_ie(beacon, WLAN_OUI_WFA,
4023 WLAN_OUI_TYPE_WFA_P2P, ieoffset);
4025 hdr = (struct ieee80211_hdr *) beacon->data;
4026 hdr->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT |
4027 IEEE80211_STYPE_PROBE_RESP);
4029 ret = wl1271_ap_set_probe_resp_tmpl_legacy(wl, vif,
4034 ret = wl1271_cmd_template_set(wl, wlvif->role_id,
4035 CMD_TEMPL_PROBE_RESPONSE,
4040 dev_kfree_skb(beacon);
4048 static int wl1271_bss_beacon_info_changed(struct wl1271 *wl,
4049 struct ieee80211_vif *vif,
4050 struct ieee80211_bss_conf *bss_conf,
4053 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4054 bool is_ap = (wlvif->bss_type == BSS_TYPE_AP_BSS);
4057 if (changed & BSS_CHANGED_BEACON_INT) {
4058 wl1271_debug(DEBUG_MASTER, "beacon interval updated: %d",
4059 bss_conf->beacon_int);
4061 wlvif->beacon_int = bss_conf->beacon_int;
4064 if ((changed & BSS_CHANGED_AP_PROBE_RESP) && is_ap) {
4065 u32 rate = wl1271_tx_min_rate_get(wl, wlvif->basic_rate_set);
4067 wl1271_ap_set_probe_resp_tmpl(wl, rate, vif);
4070 if (changed & BSS_CHANGED_BEACON) {
4071 ret = wlcore_set_beacon_template(wl, vif, is_ap);
4078 wl1271_error("beacon info change failed: %d", ret);
4082 /* AP mode changes */
4083 static void wl1271_bss_info_changed_ap(struct wl1271 *wl,
4084 struct ieee80211_vif *vif,
4085 struct ieee80211_bss_conf *bss_conf,
4088 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4091 if (changed & BSS_CHANGED_BASIC_RATES) {
4092 u32 rates = bss_conf->basic_rates;
4094 wlvif->basic_rate_set = wl1271_tx_enabled_rates_get(wl, rates,
4096 wlvif->basic_rate = wl1271_tx_min_rate_get(wl,
4097 wlvif->basic_rate_set);
4099 ret = wl1271_init_ap_rates(wl, wlvif);
4101 wl1271_error("AP rate policy change failed %d", ret);
4105 ret = wl1271_ap_init_templates(wl, vif);
4109 ret = wl1271_ap_set_probe_resp_tmpl(wl, wlvif->basic_rate, vif);
4113 ret = wlcore_set_beacon_template(wl, vif, true);
4118 ret = wl1271_bss_beacon_info_changed(wl, vif, bss_conf, changed);
4122 if (changed & BSS_CHANGED_BEACON_ENABLED) {
4123 if (bss_conf->enable_beacon) {
4124 if (!test_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags)) {
4125 ret = wl12xx_cmd_role_start_ap(wl, wlvif);
4129 ret = wl1271_ap_init_hwenc(wl, wlvif);
4133 set_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags);
4134 wl1271_debug(DEBUG_AP, "started AP");
4137 if (test_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags)) {
4139 * AP might be in ROC in case we have just
4140 * sent auth reply. handle it.
4142 if (test_bit(wlvif->role_id, wl->roc_map))
4143 wl12xx_croc(wl, wlvif->role_id);
4145 ret = wl12xx_cmd_role_stop_ap(wl, wlvif);
4149 clear_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags);
4150 clear_bit(WLVIF_FLAG_AP_PROBE_RESP_SET,
4152 wl1271_debug(DEBUG_AP, "stopped AP");
4157 ret = wl1271_bss_erp_info_changed(wl, vif, bss_conf, changed);
4161 /* Handle HT information change */
4162 if ((changed & BSS_CHANGED_HT) &&
4163 (bss_conf->chandef.width != NL80211_CHAN_WIDTH_20_NOHT)) {
4164 ret = wl1271_acx_set_ht_information(wl, wlvif,
4165 bss_conf->ht_operation_mode);
4167 wl1271_warning("Set ht information failed %d", ret);
4176 static int wlcore_set_bssid(struct wl1271 *wl, struct wl12xx_vif *wlvif,
4177 struct ieee80211_bss_conf *bss_conf,
4183 wl1271_debug(DEBUG_MAC80211,
4184 "changed_bssid: %pM, aid: %d, bcn_int: %d, brates: 0x%x sta_rate_set: 0x%x",
4185 bss_conf->bssid, bss_conf->aid,
4186 bss_conf->beacon_int,
4187 bss_conf->basic_rates, sta_rate_set);
4189 wlvif->beacon_int = bss_conf->beacon_int;
4190 rates = bss_conf->basic_rates;
4191 wlvif->basic_rate_set =
4192 wl1271_tx_enabled_rates_get(wl, rates,
4195 wl1271_tx_min_rate_get(wl,
4196 wlvif->basic_rate_set);
4200 wl1271_tx_enabled_rates_get(wl,
4204 /* we only support sched_scan while not connected */
4205 if (wl->sched_vif == wlvif)
4206 wl->ops->sched_scan_stop(wl, wlvif);
4208 ret = wl1271_acx_sta_rate_policies(wl, wlvif);
4212 ret = wl12xx_cmd_build_null_data(wl, wlvif);
4216 ret = wl1271_build_qos_null_data(wl, wl12xx_wlvif_to_vif(wlvif));
4220 wlcore_set_ssid(wl, wlvif);
4222 set_bit(WLVIF_FLAG_IN_USE, &wlvif->flags);
4227 static int wlcore_clear_bssid(struct wl1271 *wl, struct wl12xx_vif *wlvif)
4231 /* revert back to minimum rates for the current band */
4232 wl1271_set_band_rate(wl, wlvif);
4233 wlvif->basic_rate = wl1271_tx_min_rate_get(wl, wlvif->basic_rate_set);
4235 ret = wl1271_acx_sta_rate_policies(wl, wlvif);
4239 if (wlvif->bss_type == BSS_TYPE_STA_BSS &&
4240 test_bit(WLVIF_FLAG_IN_USE, &wlvif->flags)) {
4241 ret = wl12xx_cmd_role_stop_sta(wl, wlvif);
4246 clear_bit(WLVIF_FLAG_IN_USE, &wlvif->flags);
4249 /* STA/IBSS mode changes */
4250 static void wl1271_bss_info_changed_sta(struct wl1271 *wl,
4251 struct ieee80211_vif *vif,
4252 struct ieee80211_bss_conf *bss_conf,
4255 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4256 bool do_join = false;
4257 bool is_ibss = (wlvif->bss_type == BSS_TYPE_IBSS);
4258 bool ibss_joined = false;
4259 u32 sta_rate_set = 0;
4261 struct ieee80211_sta *sta;
4262 bool sta_exists = false;
4263 struct ieee80211_sta_ht_cap sta_ht_cap;
4266 ret = wl1271_bss_beacon_info_changed(wl, vif, bss_conf,
4272 if (changed & BSS_CHANGED_IBSS) {
4273 if (bss_conf->ibss_joined) {
4274 set_bit(WLVIF_FLAG_IBSS_JOINED, &wlvif->flags);
4277 wlcore_unset_assoc(wl, wlvif);
4278 wl12xx_cmd_role_stop_sta(wl, wlvif);
4282 if ((changed & BSS_CHANGED_BEACON_INT) && ibss_joined)
4285 /* Need to update the SSID (for filtering etc) */
4286 if ((changed & BSS_CHANGED_BEACON) && ibss_joined)
4289 if ((changed & BSS_CHANGED_BEACON_ENABLED) && ibss_joined) {
4290 wl1271_debug(DEBUG_ADHOC, "ad-hoc beaconing: %s",
4291 bss_conf->enable_beacon ? "enabled" : "disabled");
4296 if (changed & BSS_CHANGED_IDLE && !is_ibss)
4297 wl1271_sta_handle_idle(wl, wlvif, bss_conf->idle);
4299 if (changed & BSS_CHANGED_CQM) {
4300 bool enable = false;
4301 if (bss_conf->cqm_rssi_thold)
4303 ret = wl1271_acx_rssi_snr_trigger(wl, wlvif, enable,
4304 bss_conf->cqm_rssi_thold,
4305 bss_conf->cqm_rssi_hyst);
4308 wlvif->rssi_thold = bss_conf->cqm_rssi_thold;
4311 if (changed & (BSS_CHANGED_BSSID | BSS_CHANGED_HT |
4312 BSS_CHANGED_ASSOC)) {
4314 sta = ieee80211_find_sta(vif, bss_conf->bssid);
4316 u8 *rx_mask = sta->ht_cap.mcs.rx_mask;
4318 /* save the supp_rates of the ap */
4319 sta_rate_set = sta->supp_rates[wlvif->band];
4320 if (sta->ht_cap.ht_supported)
4322 (rx_mask[0] << HW_HT_RATES_OFFSET) |
4323 (rx_mask[1] << HW_MIMO_RATES_OFFSET);
4324 sta_ht_cap = sta->ht_cap;
4331 if (changed & BSS_CHANGED_BSSID) {
4332 if (!is_zero_ether_addr(bss_conf->bssid)) {
4333 ret = wlcore_set_bssid(wl, wlvif, bss_conf,
4338 /* Need to update the BSSID (for filtering etc) */
4341 ret = wlcore_clear_bssid(wl, wlvif);
4347 if (changed & BSS_CHANGED_IBSS) {
4348 wl1271_debug(DEBUG_ADHOC, "ibss_joined: %d",
4349 bss_conf->ibss_joined);
4351 if (bss_conf->ibss_joined) {
4352 u32 rates = bss_conf->basic_rates;
4353 wlvif->basic_rate_set =
4354 wl1271_tx_enabled_rates_get(wl, rates,
4357 wl1271_tx_min_rate_get(wl,
4358 wlvif->basic_rate_set);
4360 /* by default, use 11b + OFDM rates */
4361 wlvif->rate_set = CONF_TX_IBSS_DEFAULT_RATES;
4362 ret = wl1271_acx_sta_rate_policies(wl, wlvif);
4368 if ((changed & BSS_CHANGED_BEACON_INFO) && bss_conf->dtim_period) {
4369 /* enable beacon filtering */
4370 ret = wl1271_acx_beacon_filter_opt(wl, wlvif, true);
4375 ret = wl1271_bss_erp_info_changed(wl, vif, bss_conf, changed);
4380 ret = wlcore_join(wl, wlvif);
4382 wl1271_warning("cmd join failed %d", ret);
4387 if (changed & BSS_CHANGED_ASSOC) {
4388 if (bss_conf->assoc) {
4389 ret = wlcore_set_assoc(wl, wlvif, bss_conf,
4394 if (test_bit(WLVIF_FLAG_STA_AUTHORIZED, &wlvif->flags))
4395 wl12xx_set_authorized(wl, wlvif);
4397 wlcore_unset_assoc(wl, wlvif);
4401 if (changed & BSS_CHANGED_PS) {
4402 if ((bss_conf->ps) &&
4403 test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags) &&
4404 !test_bit(WLVIF_FLAG_IN_PS, &wlvif->flags)) {
4408 if (wl->conf.conn.forced_ps) {
4409 ps_mode = STATION_POWER_SAVE_MODE;
4410 ps_mode_str = "forced";
4412 ps_mode = STATION_AUTO_PS_MODE;
4413 ps_mode_str = "auto";
4416 wl1271_debug(DEBUG_PSM, "%s ps enabled", ps_mode_str);
4418 ret = wl1271_ps_set_mode(wl, wlvif, ps_mode);
4420 wl1271_warning("enter %s ps failed %d",
4422 } else if (!bss_conf->ps &&
4423 test_bit(WLVIF_FLAG_IN_PS, &wlvif->flags)) {
4424 wl1271_debug(DEBUG_PSM, "auto ps disabled");
4426 ret = wl1271_ps_set_mode(wl, wlvif,
4427 STATION_ACTIVE_MODE);
4429 wl1271_warning("exit auto ps failed %d", ret);
4433 /* Handle new association with HT. Do this after join. */
4436 bss_conf->chandef.width != NL80211_CHAN_WIDTH_20_NOHT;
4438 ret = wlcore_hw_set_peer_cap(wl,
4444 wl1271_warning("Set ht cap failed %d", ret);
4450 ret = wl1271_acx_set_ht_information(wl, wlvif,
4451 bss_conf->ht_operation_mode);
4453 wl1271_warning("Set ht information failed %d",
4460 /* Handle arp filtering. Done after join. */
4461 if ((changed & BSS_CHANGED_ARP_FILTER) ||
4462 (!is_ibss && (changed & BSS_CHANGED_QOS))) {
4463 __be32 addr = bss_conf->arp_addr_list[0];
4464 wlvif->sta.qos = bss_conf->qos;
4465 WARN_ON(wlvif->bss_type != BSS_TYPE_STA_BSS);
4467 if (bss_conf->arp_addr_cnt == 1 && bss_conf->assoc) {
4468 wlvif->ip_addr = addr;
4470 * The template should have been configured only upon
4471 * association. however, it seems that the correct ip
4472 * isn't being set (when sending), so we have to
4473 * reconfigure the template upon every ip change.
4475 ret = wl1271_cmd_build_arp_rsp(wl, wlvif);
4477 wl1271_warning("build arp rsp failed: %d", ret);
4481 ret = wl1271_acx_arp_ip_filter(wl, wlvif,
4482 (ACX_ARP_FILTER_ARP_FILTERING |
4483 ACX_ARP_FILTER_AUTO_ARP),
4487 ret = wl1271_acx_arp_ip_filter(wl, wlvif, 0, addr);
4498 static void wl1271_op_bss_info_changed(struct ieee80211_hw *hw,
4499 struct ieee80211_vif *vif,
4500 struct ieee80211_bss_conf *bss_conf,
4503 struct wl1271 *wl = hw->priv;
4504 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4505 bool is_ap = (wlvif->bss_type == BSS_TYPE_AP_BSS);
4508 wl1271_debug(DEBUG_MAC80211, "mac80211 bss info role %d changed 0x%x",
4509 wlvif->role_id, (int)changed);
4512 * make sure to cancel pending disconnections if our association
4515 if (!is_ap && (changed & BSS_CHANGED_ASSOC))
4516 cancel_delayed_work_sync(&wlvif->connection_loss_work);
4518 if (is_ap && (changed & BSS_CHANGED_BEACON_ENABLED) &&
4519 !bss_conf->enable_beacon)
4520 wl1271_tx_flush(wl);
4522 mutex_lock(&wl->mutex);
4524 if (unlikely(wl->state != WLCORE_STATE_ON))
4527 if (unlikely(!test_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags)))
4530 ret = wl1271_ps_elp_wakeup(wl);
4534 if ((changed & BSS_CHANGED_TXPOWER) &&
4535 bss_conf->txpower != wlvif->power_level) {
4537 ret = wl1271_acx_tx_power(wl, wlvif, bss_conf->txpower);
4541 wlvif->power_level = bss_conf->txpower;
4545 wl1271_bss_info_changed_ap(wl, vif, bss_conf, changed);
4547 wl1271_bss_info_changed_sta(wl, vif, bss_conf, changed);
4549 wl1271_ps_elp_sleep(wl);
4552 mutex_unlock(&wl->mutex);
4555 static int wlcore_op_add_chanctx(struct ieee80211_hw *hw,
4556 struct ieee80211_chanctx_conf *ctx)
4558 wl1271_debug(DEBUG_MAC80211, "mac80211 add chanctx %d (type %d)",
4559 ieee80211_frequency_to_channel(ctx->def.chan->center_freq),
4560 cfg80211_get_chandef_type(&ctx->def));
4564 static void wlcore_op_remove_chanctx(struct ieee80211_hw *hw,
4565 struct ieee80211_chanctx_conf *ctx)
4567 wl1271_debug(DEBUG_MAC80211, "mac80211 remove chanctx %d (type %d)",
4568 ieee80211_frequency_to_channel(ctx->def.chan->center_freq),
4569 cfg80211_get_chandef_type(&ctx->def));
4572 static void wlcore_op_change_chanctx(struct ieee80211_hw *hw,
4573 struct ieee80211_chanctx_conf *ctx,
4576 wl1271_debug(DEBUG_MAC80211,
4577 "mac80211 change chanctx %d (type %d) changed 0x%x",
4578 ieee80211_frequency_to_channel(ctx->def.chan->center_freq),
4579 cfg80211_get_chandef_type(&ctx->def), changed);
4582 static int wlcore_op_assign_vif_chanctx(struct ieee80211_hw *hw,
4583 struct ieee80211_vif *vif,
4584 struct ieee80211_chanctx_conf *ctx)
4586 struct wl1271 *wl = hw->priv;
4587 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4588 int channel = ieee80211_frequency_to_channel(
4589 ctx->def.chan->center_freq);
4591 wl1271_debug(DEBUG_MAC80211,
4592 "mac80211 assign chanctx (role %d) %d (type %d)",
4593 wlvif->role_id, channel, cfg80211_get_chandef_type(&ctx->def));
4595 mutex_lock(&wl->mutex);
4597 wlvif->band = ctx->def.chan->band;
4598 wlvif->channel = channel;
4599 wlvif->channel_type = cfg80211_get_chandef_type(&ctx->def);
4601 /* update default rates according to the band */
4602 wl1271_set_band_rate(wl, wlvif);
4604 mutex_unlock(&wl->mutex);
4609 static void wlcore_op_unassign_vif_chanctx(struct ieee80211_hw *hw,
4610 struct ieee80211_vif *vif,
4611 struct ieee80211_chanctx_conf *ctx)
4613 struct wl1271 *wl = hw->priv;
4614 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4616 wl1271_debug(DEBUG_MAC80211,
4617 "mac80211 unassign chanctx (role %d) %d (type %d)",
4619 ieee80211_frequency_to_channel(ctx->def.chan->center_freq),
4620 cfg80211_get_chandef_type(&ctx->def));
4622 wl1271_tx_flush(wl);
4625 static int wl1271_op_conf_tx(struct ieee80211_hw *hw,
4626 struct ieee80211_vif *vif, u16 queue,
4627 const struct ieee80211_tx_queue_params *params)
4629 struct wl1271 *wl = hw->priv;
4630 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4634 mutex_lock(&wl->mutex);
4636 wl1271_debug(DEBUG_MAC80211, "mac80211 conf tx %d", queue);
4639 ps_scheme = CONF_PS_SCHEME_UPSD_TRIGGER;
4641 ps_scheme = CONF_PS_SCHEME_LEGACY;
4643 if (!test_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags))
4646 ret = wl1271_ps_elp_wakeup(wl);
4651 * the txop is confed in units of 32us by the mac80211,
4654 ret = wl1271_acx_ac_cfg(wl, wlvif, wl1271_tx_get_queue(queue),
4655 params->cw_min, params->cw_max,
4656 params->aifs, params->txop << 5);
4660 ret = wl1271_acx_tid_cfg(wl, wlvif, wl1271_tx_get_queue(queue),
4661 CONF_CHANNEL_TYPE_EDCF,
4662 wl1271_tx_get_queue(queue),
4663 ps_scheme, CONF_ACK_POLICY_LEGACY,
4667 wl1271_ps_elp_sleep(wl);
4670 mutex_unlock(&wl->mutex);
4675 static u64 wl1271_op_get_tsf(struct ieee80211_hw *hw,
4676 struct ieee80211_vif *vif)
4679 struct wl1271 *wl = hw->priv;
4680 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4681 u64 mactime = ULLONG_MAX;
4684 wl1271_debug(DEBUG_MAC80211, "mac80211 get tsf");
4686 mutex_lock(&wl->mutex);
4688 if (unlikely(wl->state != WLCORE_STATE_ON))
4691 ret = wl1271_ps_elp_wakeup(wl);
4695 ret = wl12xx_acx_tsf_info(wl, wlvif, &mactime);
4700 wl1271_ps_elp_sleep(wl);
4703 mutex_unlock(&wl->mutex);
4707 static int wl1271_op_get_survey(struct ieee80211_hw *hw, int idx,
4708 struct survey_info *survey)
4710 struct ieee80211_conf *conf = &hw->conf;
4715 survey->channel = conf->chandef.chan;
4720 static int wl1271_allocate_sta(struct wl1271 *wl,
4721 struct wl12xx_vif *wlvif,
4722 struct ieee80211_sta *sta)
4724 struct wl1271_station *wl_sta;
4728 if (wl->active_sta_count >= wl->max_ap_stations) {
4729 wl1271_warning("could not allocate HLID - too much stations");
4733 wl_sta = (struct wl1271_station *)sta->drv_priv;
4734 ret = wl12xx_allocate_link(wl, wlvif, &wl_sta->hlid);
4736 wl1271_warning("could not allocate HLID - too many links");
4740 /* use the previous security seq, if this is a recovery/resume */
4741 wl->links[wl_sta->hlid].total_freed_pkts = wl_sta->total_freed_pkts;
4743 set_bit(wl_sta->hlid, wlvif->ap.sta_hlid_map);
4744 memcpy(wl->links[wl_sta->hlid].addr, sta->addr, ETH_ALEN);
4745 wl->active_sta_count++;
4749 void wl1271_free_sta(struct wl1271 *wl, struct wl12xx_vif *wlvif, u8 hlid)
4751 if (!test_bit(hlid, wlvif->ap.sta_hlid_map))
4754 clear_bit(hlid, wlvif->ap.sta_hlid_map);
4755 __clear_bit(hlid, &wl->ap_ps_map);
4756 __clear_bit(hlid, (unsigned long *)&wl->ap_fw_ps_map);
4759 * save the last used PN in the private part of iee80211_sta,
4760 * in case of recovery/suspend
4762 wlcore_save_freed_pkts_addr(wl, wlvif, hlid, wl->links[hlid].addr);
4764 wl12xx_free_link(wl, wlvif, &hlid);
4765 wl->active_sta_count--;
4768 * rearm the tx watchdog when the last STA is freed - give the FW a
4769 * chance to return STA-buffered packets before complaining.
4771 if (wl->active_sta_count == 0)
4772 wl12xx_rearm_tx_watchdog_locked(wl);
4775 static int wl12xx_sta_add(struct wl1271 *wl,
4776 struct wl12xx_vif *wlvif,
4777 struct ieee80211_sta *sta)
4779 struct wl1271_station *wl_sta;
4783 wl1271_debug(DEBUG_MAC80211, "mac80211 add sta %d", (int)sta->aid);
4785 ret = wl1271_allocate_sta(wl, wlvif, sta);
4789 wl_sta = (struct wl1271_station *)sta->drv_priv;
4790 hlid = wl_sta->hlid;
4792 ret = wl12xx_cmd_add_peer(wl, wlvif, sta, hlid);
4794 wl1271_free_sta(wl, wlvif, hlid);
4799 static int wl12xx_sta_remove(struct wl1271 *wl,
4800 struct wl12xx_vif *wlvif,
4801 struct ieee80211_sta *sta)
4803 struct wl1271_station *wl_sta;
4806 wl1271_debug(DEBUG_MAC80211, "mac80211 remove sta %d", (int)sta->aid);
4808 wl_sta = (struct wl1271_station *)sta->drv_priv;
4810 if (WARN_ON(!test_bit(id, wlvif->ap.sta_hlid_map)))
4813 ret = wl12xx_cmd_remove_peer(wl, wlvif, wl_sta->hlid);
4817 wl1271_free_sta(wl, wlvif, wl_sta->hlid);
4821 static void wlcore_roc_if_possible(struct wl1271 *wl,
4822 struct wl12xx_vif *wlvif)
4824 if (find_first_bit(wl->roc_map,
4825 WL12XX_MAX_ROLES) < WL12XX_MAX_ROLES)
4828 if (WARN_ON(wlvif->role_id == WL12XX_INVALID_ROLE_ID))
4831 wl12xx_roc(wl, wlvif, wlvif->role_id, wlvif->band, wlvif->channel);
4835 * when wl_sta is NULL, we treat this call as if coming from a
4836 * pending auth reply.
4837 * wl->mutex must be taken and the FW must be awake when the call
4840 void wlcore_update_inconn_sta(struct wl1271 *wl, struct wl12xx_vif *wlvif,
4841 struct wl1271_station *wl_sta, bool in_conn)
4844 if (WARN_ON(wl_sta && wl_sta->in_connection))
4847 if (!wlvif->ap_pending_auth_reply &&
4848 !wlvif->inconn_count)
4849 wlcore_roc_if_possible(wl, wlvif);
4852 wl_sta->in_connection = true;
4853 wlvif->inconn_count++;
4855 wlvif->ap_pending_auth_reply = true;
4858 if (wl_sta && !wl_sta->in_connection)
4861 if (WARN_ON(!wl_sta && !wlvif->ap_pending_auth_reply))
4864 if (WARN_ON(wl_sta && !wlvif->inconn_count))
4868 wl_sta->in_connection = false;
4869 wlvif->inconn_count--;
4871 wlvif->ap_pending_auth_reply = false;
4874 if (!wlvif->inconn_count && !wlvif->ap_pending_auth_reply &&
4875 test_bit(wlvif->role_id, wl->roc_map))
4876 wl12xx_croc(wl, wlvif->role_id);
4880 static int wl12xx_update_sta_state(struct wl1271 *wl,
4881 struct wl12xx_vif *wlvif,
4882 struct ieee80211_sta *sta,
4883 enum ieee80211_sta_state old_state,
4884 enum ieee80211_sta_state new_state)
4886 struct wl1271_station *wl_sta;
4887 bool is_ap = wlvif->bss_type == BSS_TYPE_AP_BSS;
4888 bool is_sta = wlvif->bss_type == BSS_TYPE_STA_BSS;
4891 wl_sta = (struct wl1271_station *)sta->drv_priv;
4893 /* Add station (AP mode) */
4895 old_state == IEEE80211_STA_NOTEXIST &&
4896 new_state == IEEE80211_STA_NONE) {
4897 ret = wl12xx_sta_add(wl, wlvif, sta);
4901 wlcore_update_inconn_sta(wl, wlvif, wl_sta, true);
4904 /* Remove station (AP mode) */
4906 old_state == IEEE80211_STA_NONE &&
4907 new_state == IEEE80211_STA_NOTEXIST) {
4909 wl12xx_sta_remove(wl, wlvif, sta);
4911 wlcore_update_inconn_sta(wl, wlvif, wl_sta, false);
4914 /* Authorize station (AP mode) */
4916 new_state == IEEE80211_STA_AUTHORIZED) {
4917 ret = wl12xx_cmd_set_peer_state(wl, wlvif, wl_sta->hlid);
4921 ret = wl1271_acx_set_ht_capabilities(wl, &sta->ht_cap, true,
4926 wlcore_update_inconn_sta(wl, wlvif, wl_sta, false);
4929 /* Authorize station */
4931 new_state == IEEE80211_STA_AUTHORIZED) {
4932 set_bit(WLVIF_FLAG_STA_AUTHORIZED, &wlvif->flags);
4933 ret = wl12xx_set_authorized(wl, wlvif);
4939 old_state == IEEE80211_STA_AUTHORIZED &&
4940 new_state == IEEE80211_STA_ASSOC) {
4941 clear_bit(WLVIF_FLAG_STA_AUTHORIZED, &wlvif->flags);
4942 clear_bit(WLVIF_FLAG_STA_STATE_SENT, &wlvif->flags);
4945 /* save seq number on disassoc (suspend) */
4947 old_state == IEEE80211_STA_ASSOC &&
4948 new_state == IEEE80211_STA_AUTH) {
4949 wlcore_save_freed_pkts(wl, wlvif, wlvif->sta.hlid, sta);
4950 wlvif->total_freed_pkts = 0;
4953 /* restore seq number on assoc (resume) */
4955 old_state == IEEE80211_STA_AUTH &&
4956 new_state == IEEE80211_STA_ASSOC) {
4957 wlvif->total_freed_pkts = wl_sta->total_freed_pkts;
4960 /* clear ROCs on failure or authorization */
4962 (new_state == IEEE80211_STA_AUTHORIZED ||
4963 new_state == IEEE80211_STA_NOTEXIST)) {
4964 if (test_bit(wlvif->role_id, wl->roc_map))
4965 wl12xx_croc(wl, wlvif->role_id);
4969 old_state == IEEE80211_STA_NOTEXIST &&
4970 new_state == IEEE80211_STA_NONE) {
4971 if (find_first_bit(wl->roc_map,
4972 WL12XX_MAX_ROLES) >= WL12XX_MAX_ROLES) {
4973 WARN_ON(wlvif->role_id == WL12XX_INVALID_ROLE_ID);
4974 wl12xx_roc(wl, wlvif, wlvif->role_id,
4975 wlvif->band, wlvif->channel);
4981 static int wl12xx_op_sta_state(struct ieee80211_hw *hw,
4982 struct ieee80211_vif *vif,
4983 struct ieee80211_sta *sta,
4984 enum ieee80211_sta_state old_state,
4985 enum ieee80211_sta_state new_state)
4987 struct wl1271 *wl = hw->priv;
4988 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4991 wl1271_debug(DEBUG_MAC80211, "mac80211 sta %d state=%d->%d",
4992 sta->aid, old_state, new_state);
4994 mutex_lock(&wl->mutex);
4996 if (unlikely(wl->state != WLCORE_STATE_ON)) {
5001 ret = wl1271_ps_elp_wakeup(wl);
5005 ret = wl12xx_update_sta_state(wl, wlvif, sta, old_state, new_state);
5007 wl1271_ps_elp_sleep(wl);
5009 mutex_unlock(&wl->mutex);
5010 if (new_state < old_state)
5015 static int wl1271_op_ampdu_action(struct ieee80211_hw *hw,
5016 struct ieee80211_vif *vif,
5017 enum ieee80211_ampdu_mlme_action action,
5018 struct ieee80211_sta *sta, u16 tid, u16 *ssn,
5021 struct wl1271 *wl = hw->priv;
5022 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
5024 u8 hlid, *ba_bitmap;
5026 wl1271_debug(DEBUG_MAC80211, "mac80211 ampdu action %d tid %d", action,
5029 /* sanity check - the fields in FW are only 8bits wide */
5030 if (WARN_ON(tid > 0xFF))
5033 mutex_lock(&wl->mutex);
5035 if (unlikely(wl->state != WLCORE_STATE_ON)) {
5040 if (wlvif->bss_type == BSS_TYPE_STA_BSS) {
5041 hlid = wlvif->sta.hlid;
5042 } else if (wlvif->bss_type == BSS_TYPE_AP_BSS) {
5043 struct wl1271_station *wl_sta;
5045 wl_sta = (struct wl1271_station *)sta->drv_priv;
5046 hlid = wl_sta->hlid;
5052 ba_bitmap = &wl->links[hlid].ba_bitmap;
5054 ret = wl1271_ps_elp_wakeup(wl);
5058 wl1271_debug(DEBUG_MAC80211, "mac80211 ampdu: Rx tid %d action %d",
5062 case IEEE80211_AMPDU_RX_START:
5063 if (!wlvif->ba_support || !wlvif->ba_allowed) {
5068 if (wl->ba_rx_session_count >= wl->ba_rx_session_count_max) {
5070 wl1271_error("exceeded max RX BA sessions");
5074 if (*ba_bitmap & BIT(tid)) {
5076 wl1271_error("cannot enable RX BA session on active "
5081 ret = wl12xx_acx_set_ba_receiver_session(wl, tid, *ssn, true,
5084 *ba_bitmap |= BIT(tid);
5085 wl->ba_rx_session_count++;
5089 case IEEE80211_AMPDU_RX_STOP:
5090 if (!(*ba_bitmap & BIT(tid))) {
5092 * this happens on reconfig - so only output a debug
5093 * message for now, and don't fail the function.
5095 wl1271_debug(DEBUG_MAC80211,
5096 "no active RX BA session on tid: %d",
5102 ret = wl12xx_acx_set_ba_receiver_session(wl, tid, 0, false,
5105 *ba_bitmap &= ~BIT(tid);
5106 wl->ba_rx_session_count--;
5111 * The BA initiator session management in FW independently.
5112 * Falling break here on purpose for all TX APDU commands.
5114 case IEEE80211_AMPDU_TX_START:
5115 case IEEE80211_AMPDU_TX_STOP_CONT:
5116 case IEEE80211_AMPDU_TX_STOP_FLUSH:
5117 case IEEE80211_AMPDU_TX_STOP_FLUSH_CONT:
5118 case IEEE80211_AMPDU_TX_OPERATIONAL:
5123 wl1271_error("Incorrect ampdu action id=%x\n", action);
5127 wl1271_ps_elp_sleep(wl);
5130 mutex_unlock(&wl->mutex);
5135 static int wl12xx_set_bitrate_mask(struct ieee80211_hw *hw,
5136 struct ieee80211_vif *vif,
5137 const struct cfg80211_bitrate_mask *mask)
5139 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
5140 struct wl1271 *wl = hw->priv;
5143 wl1271_debug(DEBUG_MAC80211, "mac80211 set_bitrate_mask 0x%x 0x%x",
5144 mask->control[NL80211_BAND_2GHZ].legacy,
5145 mask->control[NL80211_BAND_5GHZ].legacy);
5147 mutex_lock(&wl->mutex);
5149 for (i = 0; i < WLCORE_NUM_BANDS; i++)
5150 wlvif->bitrate_masks[i] =
5151 wl1271_tx_enabled_rates_get(wl,
5152 mask->control[i].legacy,
5155 if (unlikely(wl->state != WLCORE_STATE_ON))
5158 if (wlvif->bss_type == BSS_TYPE_STA_BSS &&
5159 !test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags)) {
5161 ret = wl1271_ps_elp_wakeup(wl);
5165 wl1271_set_band_rate(wl, wlvif);
5167 wl1271_tx_min_rate_get(wl, wlvif->basic_rate_set);
5168 ret = wl1271_acx_sta_rate_policies(wl, wlvif);
5170 wl1271_ps_elp_sleep(wl);
5173 mutex_unlock(&wl->mutex);
5178 static void wl12xx_op_channel_switch(struct ieee80211_hw *hw,
5179 struct ieee80211_channel_switch *ch_switch)
5181 struct wl1271 *wl = hw->priv;
5182 struct wl12xx_vif *wlvif;
5185 wl1271_debug(DEBUG_MAC80211, "mac80211 channel switch");
5187 wl1271_tx_flush(wl);
5189 mutex_lock(&wl->mutex);
5191 if (unlikely(wl->state == WLCORE_STATE_OFF)) {
5192 wl12xx_for_each_wlvif_sta(wl, wlvif) {
5193 struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif);
5194 ieee80211_chswitch_done(vif, false);
5197 } else if (unlikely(wl->state != WLCORE_STATE_ON)) {
5201 ret = wl1271_ps_elp_wakeup(wl);
5205 /* TODO: change mac80211 to pass vif as param */
5206 wl12xx_for_each_wlvif_sta(wl, wlvif) {
5207 unsigned long delay_usec;
5209 ret = wl->ops->channel_switch(wl, wlvif, ch_switch);
5213 set_bit(WLVIF_FLAG_CS_PROGRESS, &wlvif->flags);
5215 /* indicate failure 5 seconds after channel switch time */
5216 delay_usec = ieee80211_tu_to_usec(wlvif->beacon_int) *
5218 ieee80211_queue_delayed_work(hw, &wlvif->channel_switch_work,
5219 usecs_to_jiffies(delay_usec) +
5220 msecs_to_jiffies(5000));
5224 wl1271_ps_elp_sleep(wl);
5227 mutex_unlock(&wl->mutex);
5230 static void wlcore_op_flush(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
5231 u32 queues, bool drop)
5233 struct wl1271 *wl = hw->priv;
5235 wl1271_tx_flush(wl);
5238 static int wlcore_op_remain_on_channel(struct ieee80211_hw *hw,
5239 struct ieee80211_vif *vif,
5240 struct ieee80211_channel *chan,
5242 enum ieee80211_roc_type type)
5244 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
5245 struct wl1271 *wl = hw->priv;
5246 int channel, ret = 0;
5248 channel = ieee80211_frequency_to_channel(chan->center_freq);
5250 wl1271_debug(DEBUG_MAC80211, "mac80211 roc %d (%d)",
5251 channel, wlvif->role_id);
5253 mutex_lock(&wl->mutex);
5255 if (unlikely(wl->state != WLCORE_STATE_ON))
5258 /* return EBUSY if we can't ROC right now */
5259 if (WARN_ON(wl->roc_vif ||
5260 find_first_bit(wl->roc_map,
5261 WL12XX_MAX_ROLES) < WL12XX_MAX_ROLES)) {
5266 ret = wl1271_ps_elp_wakeup(wl);
5270 ret = wl12xx_start_dev(wl, wlvif, chan->band, channel);
5275 ieee80211_queue_delayed_work(hw, &wl->roc_complete_work,
5276 msecs_to_jiffies(duration));
5278 wl1271_ps_elp_sleep(wl);
5280 mutex_unlock(&wl->mutex);
5284 static int __wlcore_roc_completed(struct wl1271 *wl)
5286 struct wl12xx_vif *wlvif;
5289 /* already completed */
5290 if (unlikely(!wl->roc_vif))
5293 wlvif = wl12xx_vif_to_data(wl->roc_vif);
5295 if (!test_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags))
5298 ret = wl12xx_stop_dev(wl, wlvif);
5307 static int wlcore_roc_completed(struct wl1271 *wl)
5311 wl1271_debug(DEBUG_MAC80211, "roc complete");
5313 mutex_lock(&wl->mutex);
5315 if (unlikely(wl->state != WLCORE_STATE_ON)) {
5320 ret = wl1271_ps_elp_wakeup(wl);
5324 ret = __wlcore_roc_completed(wl);
5326 wl1271_ps_elp_sleep(wl);
5328 mutex_unlock(&wl->mutex);
5333 static void wlcore_roc_complete_work(struct work_struct *work)
5335 struct delayed_work *dwork;
5339 dwork = container_of(work, struct delayed_work, work);
5340 wl = container_of(dwork, struct wl1271, roc_complete_work);
5342 ret = wlcore_roc_completed(wl);
5344 ieee80211_remain_on_channel_expired(wl->hw);
5347 static int wlcore_op_cancel_remain_on_channel(struct ieee80211_hw *hw)
5349 struct wl1271 *wl = hw->priv;
5351 wl1271_debug(DEBUG_MAC80211, "mac80211 croc");
5354 wl1271_tx_flush(wl);
5357 * we can't just flush_work here, because it might deadlock
5358 * (as we might get called from the same workqueue)
5360 cancel_delayed_work_sync(&wl->roc_complete_work);
5361 wlcore_roc_completed(wl);
5366 static void wlcore_op_sta_rc_update(struct ieee80211_hw *hw,
5367 struct ieee80211_vif *vif,
5368 struct ieee80211_sta *sta,
5371 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
5372 struct wl1271 *wl = hw->priv;
5374 wlcore_hw_sta_rc_update(wl, wlvif, sta, changed);
5377 static int wlcore_op_get_rssi(struct ieee80211_hw *hw,
5378 struct ieee80211_vif *vif,
5379 struct ieee80211_sta *sta,
5382 struct wl1271 *wl = hw->priv;
5383 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
5386 wl1271_debug(DEBUG_MAC80211, "mac80211 get_rssi");
5388 mutex_lock(&wl->mutex);
5390 if (unlikely(wl->state != WLCORE_STATE_ON))
5393 ret = wl1271_ps_elp_wakeup(wl);
5397 ret = wlcore_acx_average_rssi(wl, wlvif, rssi_dbm);
5402 wl1271_ps_elp_sleep(wl);
5405 mutex_unlock(&wl->mutex);
5410 static bool wl1271_tx_frames_pending(struct ieee80211_hw *hw)
5412 struct wl1271 *wl = hw->priv;
5415 mutex_lock(&wl->mutex);
5417 if (unlikely(wl->state != WLCORE_STATE_ON))
5420 /* packets are considered pending if in the TX queue or the FW */
5421 ret = (wl1271_tx_total_queue_count(wl) > 0) || (wl->tx_frames_cnt > 0);
5423 mutex_unlock(&wl->mutex);
5428 /* can't be const, mac80211 writes to this */
5429 static struct ieee80211_rate wl1271_rates[] = {
5431 .hw_value = CONF_HW_BIT_RATE_1MBPS,
5432 .hw_value_short = CONF_HW_BIT_RATE_1MBPS, },
5434 .hw_value = CONF_HW_BIT_RATE_2MBPS,
5435 .hw_value_short = CONF_HW_BIT_RATE_2MBPS,
5436 .flags = IEEE80211_RATE_SHORT_PREAMBLE },
5438 .hw_value = CONF_HW_BIT_RATE_5_5MBPS,
5439 .hw_value_short = CONF_HW_BIT_RATE_5_5MBPS,
5440 .flags = IEEE80211_RATE_SHORT_PREAMBLE },
5442 .hw_value = CONF_HW_BIT_RATE_11MBPS,
5443 .hw_value_short = CONF_HW_BIT_RATE_11MBPS,
5444 .flags = IEEE80211_RATE_SHORT_PREAMBLE },
5446 .hw_value = CONF_HW_BIT_RATE_6MBPS,
5447 .hw_value_short = CONF_HW_BIT_RATE_6MBPS, },
5449 .hw_value = CONF_HW_BIT_RATE_9MBPS,
5450 .hw_value_short = CONF_HW_BIT_RATE_9MBPS, },
5452 .hw_value = CONF_HW_BIT_RATE_12MBPS,
5453 .hw_value_short = CONF_HW_BIT_RATE_12MBPS, },
5455 .hw_value = CONF_HW_BIT_RATE_18MBPS,
5456 .hw_value_short = CONF_HW_BIT_RATE_18MBPS, },
5458 .hw_value = CONF_HW_BIT_RATE_24MBPS,
5459 .hw_value_short = CONF_HW_BIT_RATE_24MBPS, },
5461 .hw_value = CONF_HW_BIT_RATE_36MBPS,
5462 .hw_value_short = CONF_HW_BIT_RATE_36MBPS, },
5464 .hw_value = CONF_HW_BIT_RATE_48MBPS,
5465 .hw_value_short = CONF_HW_BIT_RATE_48MBPS, },
5467 .hw_value = CONF_HW_BIT_RATE_54MBPS,
5468 .hw_value_short = CONF_HW_BIT_RATE_54MBPS, },
5471 /* can't be const, mac80211 writes to this */
5472 static struct ieee80211_channel wl1271_channels[] = {
5473 { .hw_value = 1, .center_freq = 2412, .max_power = WLCORE_MAX_TXPWR },
5474 { .hw_value = 2, .center_freq = 2417, .max_power = WLCORE_MAX_TXPWR },
5475 { .hw_value = 3, .center_freq = 2422, .max_power = WLCORE_MAX_TXPWR },
5476 { .hw_value = 4, .center_freq = 2427, .max_power = WLCORE_MAX_TXPWR },
5477 { .hw_value = 5, .center_freq = 2432, .max_power = WLCORE_MAX_TXPWR },
5478 { .hw_value = 6, .center_freq = 2437, .max_power = WLCORE_MAX_TXPWR },
5479 { .hw_value = 7, .center_freq = 2442, .max_power = WLCORE_MAX_TXPWR },
5480 { .hw_value = 8, .center_freq = 2447, .max_power = WLCORE_MAX_TXPWR },
5481 { .hw_value = 9, .center_freq = 2452, .max_power = WLCORE_MAX_TXPWR },
5482 { .hw_value = 10, .center_freq = 2457, .max_power = WLCORE_MAX_TXPWR },
5483 { .hw_value = 11, .center_freq = 2462, .max_power = WLCORE_MAX_TXPWR },
5484 { .hw_value = 12, .center_freq = 2467, .max_power = WLCORE_MAX_TXPWR },
5485 { .hw_value = 13, .center_freq = 2472, .max_power = WLCORE_MAX_TXPWR },
5486 { .hw_value = 14, .center_freq = 2484, .max_power = WLCORE_MAX_TXPWR },
5489 /* can't be const, mac80211 writes to this */
5490 static struct ieee80211_supported_band wl1271_band_2ghz = {
5491 .channels = wl1271_channels,
5492 .n_channels = ARRAY_SIZE(wl1271_channels),
5493 .bitrates = wl1271_rates,
5494 .n_bitrates = ARRAY_SIZE(wl1271_rates),
5497 /* 5 GHz data rates for WL1273 */
5498 static struct ieee80211_rate wl1271_rates_5ghz[] = {
5500 .hw_value = CONF_HW_BIT_RATE_6MBPS,
5501 .hw_value_short = CONF_HW_BIT_RATE_6MBPS, },
5503 .hw_value = CONF_HW_BIT_RATE_9MBPS,
5504 .hw_value_short = CONF_HW_BIT_RATE_9MBPS, },
5506 .hw_value = CONF_HW_BIT_RATE_12MBPS,
5507 .hw_value_short = CONF_HW_BIT_RATE_12MBPS, },
5509 .hw_value = CONF_HW_BIT_RATE_18MBPS,
5510 .hw_value_short = CONF_HW_BIT_RATE_18MBPS, },
5512 .hw_value = CONF_HW_BIT_RATE_24MBPS,
5513 .hw_value_short = CONF_HW_BIT_RATE_24MBPS, },
5515 .hw_value = CONF_HW_BIT_RATE_36MBPS,
5516 .hw_value_short = CONF_HW_BIT_RATE_36MBPS, },
5518 .hw_value = CONF_HW_BIT_RATE_48MBPS,
5519 .hw_value_short = CONF_HW_BIT_RATE_48MBPS, },
5521 .hw_value = CONF_HW_BIT_RATE_54MBPS,
5522 .hw_value_short = CONF_HW_BIT_RATE_54MBPS, },
5525 /* 5 GHz band channels for WL1273 */
5526 static struct ieee80211_channel wl1271_channels_5ghz[] = {
5527 { .hw_value = 8, .center_freq = 5040, .max_power = WLCORE_MAX_TXPWR },
5528 { .hw_value = 12, .center_freq = 5060, .max_power = WLCORE_MAX_TXPWR },
5529 { .hw_value = 16, .center_freq = 5080, .max_power = WLCORE_MAX_TXPWR },
5530 { .hw_value = 34, .center_freq = 5170, .max_power = WLCORE_MAX_TXPWR },
5531 { .hw_value = 36, .center_freq = 5180, .max_power = WLCORE_MAX_TXPWR },
5532 { .hw_value = 38, .center_freq = 5190, .max_power = WLCORE_MAX_TXPWR },
5533 { .hw_value = 40, .center_freq = 5200, .max_power = WLCORE_MAX_TXPWR },
5534 { .hw_value = 42, .center_freq = 5210, .max_power = WLCORE_MAX_TXPWR },
5535 { .hw_value = 44, .center_freq = 5220, .max_power = WLCORE_MAX_TXPWR },
5536 { .hw_value = 46, .center_freq = 5230, .max_power = WLCORE_MAX_TXPWR },
5537 { .hw_value = 48, .center_freq = 5240, .max_power = WLCORE_MAX_TXPWR },
5538 { .hw_value = 52, .center_freq = 5260, .max_power = WLCORE_MAX_TXPWR },
5539 { .hw_value = 56, .center_freq = 5280, .max_power = WLCORE_MAX_TXPWR },
5540 { .hw_value = 60, .center_freq = 5300, .max_power = WLCORE_MAX_TXPWR },
5541 { .hw_value = 64, .center_freq = 5320, .max_power = WLCORE_MAX_TXPWR },
5542 { .hw_value = 100, .center_freq = 5500, .max_power = WLCORE_MAX_TXPWR },
5543 { .hw_value = 104, .center_freq = 5520, .max_power = WLCORE_MAX_TXPWR },
5544 { .hw_value = 108, .center_freq = 5540, .max_power = WLCORE_MAX_TXPWR },
5545 { .hw_value = 112, .center_freq = 5560, .max_power = WLCORE_MAX_TXPWR },
5546 { .hw_value = 116, .center_freq = 5580, .max_power = WLCORE_MAX_TXPWR },
5547 { .hw_value = 120, .center_freq = 5600, .max_power = WLCORE_MAX_TXPWR },
5548 { .hw_value = 124, .center_freq = 5620, .max_power = WLCORE_MAX_TXPWR },
5549 { .hw_value = 128, .center_freq = 5640, .max_power = WLCORE_MAX_TXPWR },
5550 { .hw_value = 132, .center_freq = 5660, .max_power = WLCORE_MAX_TXPWR },
5551 { .hw_value = 136, .center_freq = 5680, .max_power = WLCORE_MAX_TXPWR },
5552 { .hw_value = 140, .center_freq = 5700, .max_power = WLCORE_MAX_TXPWR },
5553 { .hw_value = 149, .center_freq = 5745, .max_power = WLCORE_MAX_TXPWR },
5554 { .hw_value = 153, .center_freq = 5765, .max_power = WLCORE_MAX_TXPWR },
5555 { .hw_value = 157, .center_freq = 5785, .max_power = WLCORE_MAX_TXPWR },
5556 { .hw_value = 161, .center_freq = 5805, .max_power = WLCORE_MAX_TXPWR },
5557 { .hw_value = 165, .center_freq = 5825, .max_power = WLCORE_MAX_TXPWR },
5560 static struct ieee80211_supported_band wl1271_band_5ghz = {
5561 .channels = wl1271_channels_5ghz,
5562 .n_channels = ARRAY_SIZE(wl1271_channels_5ghz),
5563 .bitrates = wl1271_rates_5ghz,
5564 .n_bitrates = ARRAY_SIZE(wl1271_rates_5ghz),
5567 static const struct ieee80211_ops wl1271_ops = {
5568 .start = wl1271_op_start,
5569 .stop = wlcore_op_stop,
5570 .add_interface = wl1271_op_add_interface,
5571 .remove_interface = wl1271_op_remove_interface,
5572 .change_interface = wl12xx_op_change_interface,
5574 .suspend = wl1271_op_suspend,
5575 .resume = wl1271_op_resume,
5577 .config = wl1271_op_config,
5578 .prepare_multicast = wl1271_op_prepare_multicast,
5579 .configure_filter = wl1271_op_configure_filter,
5581 .set_key = wlcore_op_set_key,
5582 .hw_scan = wl1271_op_hw_scan,
5583 .cancel_hw_scan = wl1271_op_cancel_hw_scan,
5584 .sched_scan_start = wl1271_op_sched_scan_start,
5585 .sched_scan_stop = wl1271_op_sched_scan_stop,
5586 .bss_info_changed = wl1271_op_bss_info_changed,
5587 .set_frag_threshold = wl1271_op_set_frag_threshold,
5588 .set_rts_threshold = wl1271_op_set_rts_threshold,
5589 .conf_tx = wl1271_op_conf_tx,
5590 .get_tsf = wl1271_op_get_tsf,
5591 .get_survey = wl1271_op_get_survey,
5592 .sta_state = wl12xx_op_sta_state,
5593 .ampdu_action = wl1271_op_ampdu_action,
5594 .tx_frames_pending = wl1271_tx_frames_pending,
5595 .set_bitrate_mask = wl12xx_set_bitrate_mask,
5596 .set_default_unicast_key = wl1271_op_set_default_key_idx,
5597 .channel_switch = wl12xx_op_channel_switch,
5598 .flush = wlcore_op_flush,
5599 .remain_on_channel = wlcore_op_remain_on_channel,
5600 .cancel_remain_on_channel = wlcore_op_cancel_remain_on_channel,
5601 .add_chanctx = wlcore_op_add_chanctx,
5602 .remove_chanctx = wlcore_op_remove_chanctx,
5603 .change_chanctx = wlcore_op_change_chanctx,
5604 .assign_vif_chanctx = wlcore_op_assign_vif_chanctx,
5605 .unassign_vif_chanctx = wlcore_op_unassign_vif_chanctx,
5606 .sta_rc_update = wlcore_op_sta_rc_update,
5607 .get_rssi = wlcore_op_get_rssi,
5608 CFG80211_TESTMODE_CMD(wl1271_tm_cmd)
5612 u8 wlcore_rate_to_idx(struct wl1271 *wl, u8 rate, enum ieee80211_band band)
5618 if (unlikely(rate >= wl->hw_tx_rate_tbl_size)) {
5619 wl1271_error("Illegal RX rate from HW: %d", rate);
5623 idx = wl->band_rate_to_idx[band][rate];
5624 if (unlikely(idx == CONF_HW_RXTX_RATE_UNSUPPORTED)) {
5625 wl1271_error("Unsupported RX rate from HW: %d", rate);
5632 static void wl12xx_derive_mac_addresses(struct wl1271 *wl, u32 oui, u32 nic)
5636 wl1271_debug(DEBUG_PROBE, "base address: oui %06x nic %06x",
5639 if (nic + WLCORE_NUM_MAC_ADDRESSES - wl->num_mac_addr > 0xffffff)
5640 wl1271_warning("NIC part of the MAC address wraps around!");
5642 for (i = 0; i < wl->num_mac_addr; i++) {
5643 wl->addresses[i].addr[0] = (u8)(oui >> 16);
5644 wl->addresses[i].addr[1] = (u8)(oui >> 8);
5645 wl->addresses[i].addr[2] = (u8) oui;
5646 wl->addresses[i].addr[3] = (u8)(nic >> 16);
5647 wl->addresses[i].addr[4] = (u8)(nic >> 8);
5648 wl->addresses[i].addr[5] = (u8) nic;
5652 /* we may be one address short at the most */
5653 WARN_ON(wl->num_mac_addr + 1 < WLCORE_NUM_MAC_ADDRESSES);
5656 * turn on the LAA bit in the first address and use it as
5659 if (wl->num_mac_addr < WLCORE_NUM_MAC_ADDRESSES) {
5660 int idx = WLCORE_NUM_MAC_ADDRESSES - 1;
5661 memcpy(&wl->addresses[idx], &wl->addresses[0],
5662 sizeof(wl->addresses[0]));
5664 wl->addresses[idx].addr[0] |= BIT(1);
5667 wl->hw->wiphy->n_addresses = WLCORE_NUM_MAC_ADDRESSES;
5668 wl->hw->wiphy->addresses = wl->addresses;
5671 static int wl12xx_get_hw_info(struct wl1271 *wl)
5675 ret = wl12xx_set_power_on(wl);
5679 ret = wlcore_read_reg(wl, REG_CHIP_ID_B, &wl->chip.id);
5683 wl->fuse_oui_addr = 0;
5684 wl->fuse_nic_addr = 0;
5686 ret = wl->ops->get_pg_ver(wl, &wl->hw_pg_ver);
5690 if (wl->ops->get_mac)
5691 ret = wl->ops->get_mac(wl);
5694 wl1271_power_off(wl);
5698 static int wl1271_register_hw(struct wl1271 *wl)
5701 u32 oui_addr = 0, nic_addr = 0;
5703 if (wl->mac80211_registered)
5706 if (wl->nvs_len >= 12) {
5707 /* NOTE: The wl->nvs->nvs element must be first, in
5708 * order to simplify the casting, we assume it is at
5709 * the beginning of the wl->nvs structure.
5711 u8 *nvs_ptr = (u8 *)wl->nvs;
5714 (nvs_ptr[11] << 16) + (nvs_ptr[10] << 8) + nvs_ptr[6];
5716 (nvs_ptr[5] << 16) + (nvs_ptr[4] << 8) + nvs_ptr[3];
5719 /* if the MAC address is zeroed in the NVS derive from fuse */
5720 if (oui_addr == 0 && nic_addr == 0) {
5721 oui_addr = wl->fuse_oui_addr;
5722 /* fuse has the BD_ADDR, the WLAN addresses are the next two */
5723 nic_addr = wl->fuse_nic_addr + 1;
5726 wl12xx_derive_mac_addresses(wl, oui_addr, nic_addr);
5728 ret = ieee80211_register_hw(wl->hw);
5730 wl1271_error("unable to register mac80211 hw: %d", ret);
5734 wl->mac80211_registered = true;
5736 wl1271_debugfs_init(wl);
5738 wl1271_notice("loaded");
5744 static void wl1271_unregister_hw(struct wl1271 *wl)
5747 wl1271_plt_stop(wl);
5749 ieee80211_unregister_hw(wl->hw);
5750 wl->mac80211_registered = false;
5754 static int wl1271_init_ieee80211(struct wl1271 *wl)
5757 static const u32 cipher_suites[] = {
5758 WLAN_CIPHER_SUITE_WEP40,
5759 WLAN_CIPHER_SUITE_WEP104,
5760 WLAN_CIPHER_SUITE_TKIP,
5761 WLAN_CIPHER_SUITE_CCMP,
5762 WL1271_CIPHER_SUITE_GEM,
5765 /* The tx descriptor buffer */
5766 wl->hw->extra_tx_headroom = sizeof(struct wl1271_tx_hw_descr);
5768 if (wl->quirks & WLCORE_QUIRK_TKIP_HEADER_SPACE)
5769 wl->hw->extra_tx_headroom += WL1271_EXTRA_SPACE_TKIP;
5772 /* FIXME: find a proper value */
5773 wl->hw->max_listen_interval = wl->conf.conn.max_listen_interval;
5775 wl->hw->flags = IEEE80211_HW_SIGNAL_DBM |
5776 IEEE80211_HW_SUPPORTS_PS |
5777 IEEE80211_HW_SUPPORTS_DYNAMIC_PS |
5778 IEEE80211_HW_SUPPORTS_UAPSD |
5779 IEEE80211_HW_HAS_RATE_CONTROL |
5780 IEEE80211_HW_CONNECTION_MONITOR |
5781 IEEE80211_HW_REPORTS_TX_ACK_STATUS |
5782 IEEE80211_HW_SPECTRUM_MGMT |
5783 IEEE80211_HW_AP_LINK_PS |
5784 IEEE80211_HW_AMPDU_AGGREGATION |
5785 IEEE80211_HW_TX_AMPDU_SETUP_IN_HW |
5786 IEEE80211_HW_QUEUE_CONTROL |
5787 IEEE80211_HW_CHANCTX_STA_CSA;
5789 wl->hw->wiphy->cipher_suites = cipher_suites;
5790 wl->hw->wiphy->n_cipher_suites = ARRAY_SIZE(cipher_suites);
5792 wl->hw->wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION) |
5793 BIT(NL80211_IFTYPE_ADHOC) | BIT(NL80211_IFTYPE_AP) |
5794 BIT(NL80211_IFTYPE_P2P_CLIENT) | BIT(NL80211_IFTYPE_P2P_GO);
5795 wl->hw->wiphy->max_scan_ssids = 1;
5796 wl->hw->wiphy->max_sched_scan_ssids = 16;
5797 wl->hw->wiphy->max_match_sets = 16;
5799 * Maximum length of elements in scanning probe request templates
5800 * should be the maximum length possible for a template, without
5801 * the IEEE80211 header of the template
5803 wl->hw->wiphy->max_scan_ie_len = WL1271_CMD_TEMPL_MAX_SIZE -
5804 sizeof(struct ieee80211_header);
5806 wl->hw->wiphy->max_sched_scan_ie_len = WL1271_CMD_TEMPL_MAX_SIZE -
5807 sizeof(struct ieee80211_header);
5809 wl->hw->wiphy->max_remain_on_channel_duration = 30000;
5811 wl->hw->wiphy->flags |= WIPHY_FLAG_AP_UAPSD |
5812 WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL |
5813 WIPHY_FLAG_SUPPORTS_SCHED_SCAN;
5815 /* make sure all our channels fit in the scanned_ch bitmask */
5816 BUILD_BUG_ON(ARRAY_SIZE(wl1271_channels) +
5817 ARRAY_SIZE(wl1271_channels_5ghz) >
5818 WL1271_MAX_CHANNELS);
5820 * clear channel flags from the previous usage
5821 * and restore max_power & max_antenna_gain values.
5823 for (i = 0; i < ARRAY_SIZE(wl1271_channels); i++) {
5824 wl1271_band_2ghz.channels[i].flags = 0;
5825 wl1271_band_2ghz.channels[i].max_power = WLCORE_MAX_TXPWR;
5826 wl1271_band_2ghz.channels[i].max_antenna_gain = 0;
5829 for (i = 0; i < ARRAY_SIZE(wl1271_channels_5ghz); i++) {
5830 wl1271_band_5ghz.channels[i].flags = 0;
5831 wl1271_band_5ghz.channels[i].max_power = WLCORE_MAX_TXPWR;
5832 wl1271_band_5ghz.channels[i].max_antenna_gain = 0;
5836 * We keep local copies of the band structs because we need to
5837 * modify them on a per-device basis.
5839 memcpy(&wl->bands[IEEE80211_BAND_2GHZ], &wl1271_band_2ghz,
5840 sizeof(wl1271_band_2ghz));
5841 memcpy(&wl->bands[IEEE80211_BAND_2GHZ].ht_cap,
5842 &wl->ht_cap[IEEE80211_BAND_2GHZ],
5843 sizeof(*wl->ht_cap));
5844 memcpy(&wl->bands[IEEE80211_BAND_5GHZ], &wl1271_band_5ghz,
5845 sizeof(wl1271_band_5ghz));
5846 memcpy(&wl->bands[IEEE80211_BAND_5GHZ].ht_cap,
5847 &wl->ht_cap[IEEE80211_BAND_5GHZ],
5848 sizeof(*wl->ht_cap));
5850 wl->hw->wiphy->bands[IEEE80211_BAND_2GHZ] =
5851 &wl->bands[IEEE80211_BAND_2GHZ];
5852 wl->hw->wiphy->bands[IEEE80211_BAND_5GHZ] =
5853 &wl->bands[IEEE80211_BAND_5GHZ];
5856 * allow 4 queues per mac address we support +
5857 * 1 cab queue per mac + one global offchannel Tx queue
5859 wl->hw->queues = (NUM_TX_QUEUES + 1) * WLCORE_NUM_MAC_ADDRESSES + 1;
5861 /* the last queue is the offchannel queue */
5862 wl->hw->offchannel_tx_hw_queue = wl->hw->queues - 1;
5863 wl->hw->max_rates = 1;
5865 wl->hw->wiphy->reg_notifier = wl1271_reg_notify;
5867 /* the FW answers probe-requests in AP-mode */
5868 wl->hw->wiphy->flags |= WIPHY_FLAG_AP_PROBE_RESP_OFFLOAD;
5869 wl->hw->wiphy->probe_resp_offload =
5870 NL80211_PROBE_RESP_OFFLOAD_SUPPORT_WPS |
5871 NL80211_PROBE_RESP_OFFLOAD_SUPPORT_WPS2 |
5872 NL80211_PROBE_RESP_OFFLOAD_SUPPORT_P2P;
5874 /* allowed interface combinations */
5875 wl->hw->wiphy->iface_combinations = wl->iface_combinations;
5876 wl->hw->wiphy->n_iface_combinations = wl->n_iface_combinations;
5878 SET_IEEE80211_DEV(wl->hw, wl->dev);
5880 wl->hw->sta_data_size = sizeof(struct wl1271_station);
5881 wl->hw->vif_data_size = sizeof(struct wl12xx_vif);
5883 wl->hw->max_rx_aggregation_subframes = wl->conf.ht.rx_ba_win_size;
5888 struct ieee80211_hw *wlcore_alloc_hw(size_t priv_size, u32 aggr_buf_size,
5891 struct ieee80211_hw *hw;
5896 hw = ieee80211_alloc_hw(sizeof(*wl), &wl1271_ops);
5898 wl1271_error("could not alloc ieee80211_hw");
5904 memset(wl, 0, sizeof(*wl));
5906 wl->priv = kzalloc(priv_size, GFP_KERNEL);
5908 wl1271_error("could not alloc wl priv");
5910 goto err_priv_alloc;
5913 INIT_LIST_HEAD(&wl->wlvif_list);
5918 * wl->num_links is not configured yet, so just use WLCORE_MAX_LINKS.
5919 * we don't allocate any additional resource here, so that's fine.
5921 for (i = 0; i < NUM_TX_QUEUES; i++)
5922 for (j = 0; j < WLCORE_MAX_LINKS; j++)
5923 skb_queue_head_init(&wl->links[j].tx_queue[i]);
5925 skb_queue_head_init(&wl->deferred_rx_queue);
5926 skb_queue_head_init(&wl->deferred_tx_queue);
5928 INIT_DELAYED_WORK(&wl->elp_work, wl1271_elp_work);
5929 INIT_WORK(&wl->netstack_work, wl1271_netstack_work);
5930 INIT_WORK(&wl->tx_work, wl1271_tx_work);
5931 INIT_WORK(&wl->recovery_work, wl1271_recovery_work);
5932 INIT_DELAYED_WORK(&wl->scan_complete_work, wl1271_scan_complete_work);
5933 INIT_DELAYED_WORK(&wl->roc_complete_work, wlcore_roc_complete_work);
5934 INIT_DELAYED_WORK(&wl->tx_watchdog_work, wl12xx_tx_watchdog_work);
5936 wl->freezable_wq = create_freezable_workqueue("wl12xx_wq");
5937 if (!wl->freezable_wq) {
5944 wl->power_level = WL1271_DEFAULT_POWER_LEVEL;
5945 wl->band = IEEE80211_BAND_2GHZ;
5946 wl->channel_type = NL80211_CHAN_NO_HT;
5948 wl->sg_enabled = true;
5949 wl->sleep_auth = WL1271_PSM_ILLEGAL;
5950 wl->recovery_count = 0;
5953 wl->ap_fw_ps_map = 0;
5955 wl->platform_quirks = 0;
5956 wl->system_hlid = WL12XX_SYSTEM_HLID;
5957 wl->active_sta_count = 0;
5958 wl->active_link_count = 0;
5960 init_waitqueue_head(&wl->fwlog_waitq);
5962 /* The system link is always allocated */
5963 __set_bit(WL12XX_SYSTEM_HLID, wl->links_map);
5965 memset(wl->tx_frames_map, 0, sizeof(wl->tx_frames_map));
5966 for (i = 0; i < wl->num_tx_desc; i++)
5967 wl->tx_frames[i] = NULL;
5969 spin_lock_init(&wl->wl_lock);
5971 wl->state = WLCORE_STATE_OFF;
5972 wl->fw_type = WL12XX_FW_TYPE_NONE;
5973 mutex_init(&wl->mutex);
5974 mutex_init(&wl->flush_mutex);
5975 init_completion(&wl->nvs_loading_complete);
5977 order = get_order(aggr_buf_size);
5978 wl->aggr_buf = (u8 *)__get_free_pages(GFP_KERNEL, order);
5979 if (!wl->aggr_buf) {
5983 wl->aggr_buf_size = aggr_buf_size;
5985 wl->dummy_packet = wl12xx_alloc_dummy_packet(wl);
5986 if (!wl->dummy_packet) {
5991 /* Allocate one page for the FW log */
5992 wl->fwlog = (u8 *)get_zeroed_page(GFP_KERNEL);
5995 goto err_dummy_packet;
5998 wl->mbox_size = mbox_size;
5999 wl->mbox = kmalloc(wl->mbox_size, GFP_KERNEL | GFP_DMA);
6005 wl->buffer_32 = kmalloc(sizeof(*wl->buffer_32), GFP_KERNEL);
6006 if (!wl->buffer_32) {
6017 free_page((unsigned long)wl->fwlog);
6020 dev_kfree_skb(wl->dummy_packet);
6023 free_pages((unsigned long)wl->aggr_buf, order);
6026 destroy_workqueue(wl->freezable_wq);
6029 wl1271_debugfs_exit(wl);
6033 ieee80211_free_hw(hw);
6037 return ERR_PTR(ret);
6039 EXPORT_SYMBOL_GPL(wlcore_alloc_hw);
6041 int wlcore_free_hw(struct wl1271 *wl)
6043 /* Unblock any fwlog readers */
6044 mutex_lock(&wl->mutex);
6045 wl->fwlog_size = -1;
6046 wake_up_interruptible_all(&wl->fwlog_waitq);
6047 mutex_unlock(&wl->mutex);
6049 wlcore_sysfs_free(wl);
6051 kfree(wl->buffer_32);
6053 free_page((unsigned long)wl->fwlog);
6054 dev_kfree_skb(wl->dummy_packet);
6055 free_pages((unsigned long)wl->aggr_buf, get_order(wl->aggr_buf_size));
6057 wl1271_debugfs_exit(wl);
6061 wl->fw_type = WL12XX_FW_TYPE_NONE;
6065 kfree(wl->raw_fw_status);
6066 kfree(wl->fw_status);
6067 kfree(wl->tx_res_if);
6068 destroy_workqueue(wl->freezable_wq);
6071 ieee80211_free_hw(wl->hw);
6075 EXPORT_SYMBOL_GPL(wlcore_free_hw);
6078 static const struct wiphy_wowlan_support wlcore_wowlan_support = {
6079 .flags = WIPHY_WOWLAN_ANY,
6080 .n_patterns = WL1271_MAX_RX_FILTERS,
6081 .pattern_min_len = 1,
6082 .pattern_max_len = WL1271_RX_FILTER_MAX_PATTERN_SIZE,
6086 static irqreturn_t wlcore_hardirq(int irq, void *cookie)
6088 return IRQ_WAKE_THREAD;
6091 static void wlcore_nvs_cb(const struct firmware *fw, void *context)
6093 struct wl1271 *wl = context;
6094 struct platform_device *pdev = wl->pdev;
6095 struct wlcore_platdev_data *pdev_data = dev_get_platdata(&pdev->dev);
6096 struct wl12xx_platform_data *pdata = pdev_data->pdata;
6097 unsigned long irqflags;
6099 irq_handler_t hardirq_fn = NULL;
6102 wl->nvs = kmemdup(fw->data, fw->size, GFP_KERNEL);
6104 wl1271_error("Could not allocate nvs data");
6107 wl->nvs_len = fw->size;
6109 wl1271_debug(DEBUG_BOOT, "Could not get nvs file %s",
6115 ret = wl->ops->setup(wl);
6119 BUG_ON(wl->num_tx_desc > WLCORE_MAX_TX_DESCRIPTORS);
6121 /* adjust some runtime configuration parameters */
6122 wlcore_adjust_conf(wl);
6124 wl->irq = platform_get_irq(pdev, 0);
6125 wl->platform_quirks = pdata->platform_quirks;
6126 wl->if_ops = pdev_data->if_ops;
6128 if (wl->platform_quirks & WL12XX_PLATFORM_QUIRK_EDGE_IRQ) {
6129 irqflags = IRQF_TRIGGER_RISING;
6130 hardirq_fn = wlcore_hardirq;
6132 irqflags = IRQF_TRIGGER_HIGH | IRQF_ONESHOT;
6135 ret = request_threaded_irq(wl->irq, hardirq_fn, wlcore_irq,
6136 irqflags, pdev->name, wl);
6138 wl1271_error("request_irq() failed: %d", ret);
6143 ret = enable_irq_wake(wl->irq);
6145 wl->irq_wake_enabled = true;
6146 device_init_wakeup(wl->dev, 1);
6147 if (pdata->pwr_in_suspend)
6148 wl->hw->wiphy->wowlan = &wlcore_wowlan_support;
6151 disable_irq(wl->irq);
6153 ret = wl12xx_get_hw_info(wl);
6155 wl1271_error("couldn't get hw info");
6159 ret = wl->ops->identify_chip(wl);
6163 ret = wl1271_init_ieee80211(wl);
6167 ret = wl1271_register_hw(wl);
6171 ret = wlcore_sysfs_init(wl);
6175 wl->initialized = true;
6179 wl1271_unregister_hw(wl);
6182 free_irq(wl->irq, wl);
6188 release_firmware(fw);
6189 complete_all(&wl->nvs_loading_complete);
6192 int wlcore_probe(struct wl1271 *wl, struct platform_device *pdev)
6196 if (!wl->ops || !wl->ptable)
6199 wl->dev = &pdev->dev;
6201 platform_set_drvdata(pdev, wl);
6203 ret = request_firmware_nowait(THIS_MODULE, FW_ACTION_HOTPLUG,
6204 WL12XX_NVS_NAME, &pdev->dev, GFP_KERNEL,
6207 wl1271_error("request_firmware_nowait failed: %d", ret);
6208 complete_all(&wl->nvs_loading_complete);
6213 EXPORT_SYMBOL_GPL(wlcore_probe);
6215 int wlcore_remove(struct platform_device *pdev)
6217 struct wl1271 *wl = platform_get_drvdata(pdev);
6219 wait_for_completion(&wl->nvs_loading_complete);
6220 if (!wl->initialized)
6223 if (wl->irq_wake_enabled) {
6224 device_init_wakeup(wl->dev, 0);
6225 disable_irq_wake(wl->irq);
6227 wl1271_unregister_hw(wl);
6228 free_irq(wl->irq, wl);
6233 EXPORT_SYMBOL_GPL(wlcore_remove);
6235 u32 wl12xx_debug_level = DEBUG_NONE;
6236 EXPORT_SYMBOL_GPL(wl12xx_debug_level);
6237 module_param_named(debug_level, wl12xx_debug_level, uint, S_IRUSR | S_IWUSR);
6238 MODULE_PARM_DESC(debug_level, "wl12xx debugging level");
6240 module_param_named(fwlog, fwlog_param, charp, 0);
6241 MODULE_PARM_DESC(fwlog,
6242 "FW logger options: continuous, ondemand, dbgpins or disable");
6244 module_param(fwlog_mem_blocks, int, S_IRUSR | S_IWUSR);
6245 MODULE_PARM_DESC(fwlog_mem_blocks, "fwlog mem_blocks");
6247 module_param(bug_on_recovery, int, S_IRUSR | S_IWUSR);
6248 MODULE_PARM_DESC(bug_on_recovery, "BUG() on fw recovery");
6250 module_param(no_recovery, int, S_IRUSR | S_IWUSR);
6251 MODULE_PARM_DESC(no_recovery, "Prevent HW recovery. FW will remain stuck.");
6253 MODULE_LICENSE("GPL");
6254 MODULE_AUTHOR("Luciano Coelho <coelho@ti.com>");
6255 MODULE_AUTHOR("Juuso Oikarinen <juuso.oikarinen@nokia.com>");
6256 MODULE_FIRMWARE(WL12XX_NVS_NAME);