3 * This file is part of wlcore
5 * Copyright (C) 2008-2010 Nokia Corporation
6 * Copyright (C) 2011-2013 Texas Instruments Inc.
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License
10 * version 2 as published by the Free Software Foundation.
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
24 #include <linux/module.h>
25 #include <linux/firmware.h>
26 #include <linux/etherdevice.h>
27 #include <linux/vmalloc.h>
28 #include <linux/wl12xx.h>
29 #include <linux/interrupt.h>
33 #include "wl12xx_80211.h"
40 #include "vendor_cmd.h"
45 #define WL1271_BOOT_RETRIES 3
47 static char *fwlog_param;
48 static int fwlog_mem_blocks = -1;
49 static int bug_on_recovery = -1;
50 static int no_recovery = -1;
52 static void __wl1271_op_remove_interface(struct wl1271 *wl,
53 struct ieee80211_vif *vif,
54 bool reset_tx_queues);
55 static void wlcore_op_stop_locked(struct wl1271 *wl);
56 static void wl1271_free_ap_keys(struct wl1271 *wl, struct wl12xx_vif *wlvif);
58 static int wl12xx_set_authorized(struct wl1271 *wl, struct wl12xx_vif *wlvif)
62 if (WARN_ON(wlvif->bss_type != BSS_TYPE_STA_BSS))
65 if (!test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags))
68 if (test_and_set_bit(WLVIF_FLAG_STA_STATE_SENT, &wlvif->flags))
71 ret = wl12xx_cmd_set_peer_state(wl, wlvif, wlvif->sta.hlid);
75 wl1271_info("Association completed.");
79 static void wl1271_reg_notify(struct wiphy *wiphy,
80 struct regulatory_request *request)
82 struct ieee80211_supported_band *band;
83 struct ieee80211_channel *ch;
85 struct ieee80211_hw *hw = wiphy_to_ieee80211_hw(wiphy);
86 struct wl1271 *wl = hw->priv;
88 band = wiphy->bands[IEEE80211_BAND_5GHZ];
89 for (i = 0; i < band->n_channels; i++) {
90 ch = &band->channels[i];
91 if (ch->flags & IEEE80211_CHAN_DISABLED)
94 if (ch->flags & IEEE80211_CHAN_RADAR)
95 ch->flags |= IEEE80211_CHAN_NO_IR;
99 wlcore_regdomain_config(wl);
102 static int wl1271_set_rx_streaming(struct wl1271 *wl, struct wl12xx_vif *wlvif,
107 /* we should hold wl->mutex */
108 ret = wl1271_acx_ps_rx_streaming(wl, wlvif, enable);
113 set_bit(WLVIF_FLAG_RX_STREAMING_STARTED, &wlvif->flags);
115 clear_bit(WLVIF_FLAG_RX_STREAMING_STARTED, &wlvif->flags);
121 * this function is being called when the rx_streaming interval
122 * has beed changed or rx_streaming should be disabled
124 int wl1271_recalc_rx_streaming(struct wl1271 *wl, struct wl12xx_vif *wlvif)
127 int period = wl->conf.rx_streaming.interval;
129 /* don't reconfigure if rx_streaming is disabled */
130 if (!test_bit(WLVIF_FLAG_RX_STREAMING_STARTED, &wlvif->flags))
133 /* reconfigure/disable according to new streaming_period */
135 test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags) &&
136 (wl->conf.rx_streaming.always ||
137 test_bit(WL1271_FLAG_SOFT_GEMINI, &wl->flags)))
138 ret = wl1271_set_rx_streaming(wl, wlvif, true);
140 ret = wl1271_set_rx_streaming(wl, wlvif, false);
141 /* don't cancel_work_sync since we might deadlock */
142 del_timer_sync(&wlvif->rx_streaming_timer);
148 static void wl1271_rx_streaming_enable_work(struct work_struct *work)
151 struct wl12xx_vif *wlvif = container_of(work, struct wl12xx_vif,
152 rx_streaming_enable_work);
153 struct wl1271 *wl = wlvif->wl;
155 mutex_lock(&wl->mutex);
157 if (test_bit(WLVIF_FLAG_RX_STREAMING_STARTED, &wlvif->flags) ||
158 !test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags) ||
159 (!wl->conf.rx_streaming.always &&
160 !test_bit(WL1271_FLAG_SOFT_GEMINI, &wl->flags)))
163 if (!wl->conf.rx_streaming.interval)
166 ret = wl1271_ps_elp_wakeup(wl);
170 ret = wl1271_set_rx_streaming(wl, wlvif, true);
174 /* stop it after some time of inactivity */
175 mod_timer(&wlvif->rx_streaming_timer,
176 jiffies + msecs_to_jiffies(wl->conf.rx_streaming.duration));
179 wl1271_ps_elp_sleep(wl);
181 mutex_unlock(&wl->mutex);
184 static void wl1271_rx_streaming_disable_work(struct work_struct *work)
187 struct wl12xx_vif *wlvif = container_of(work, struct wl12xx_vif,
188 rx_streaming_disable_work);
189 struct wl1271 *wl = wlvif->wl;
191 mutex_lock(&wl->mutex);
193 if (!test_bit(WLVIF_FLAG_RX_STREAMING_STARTED, &wlvif->flags))
196 ret = wl1271_ps_elp_wakeup(wl);
200 ret = wl1271_set_rx_streaming(wl, wlvif, false);
205 wl1271_ps_elp_sleep(wl);
207 mutex_unlock(&wl->mutex);
210 static void wl1271_rx_streaming_timer(unsigned long data)
212 struct wl12xx_vif *wlvif = (struct wl12xx_vif *)data;
213 struct wl1271 *wl = wlvif->wl;
214 ieee80211_queue_work(wl->hw, &wlvif->rx_streaming_disable_work);
217 /* wl->mutex must be taken */
218 void wl12xx_rearm_tx_watchdog_locked(struct wl1271 *wl)
220 /* if the watchdog is not armed, don't do anything */
221 if (wl->tx_allocated_blocks == 0)
224 cancel_delayed_work(&wl->tx_watchdog_work);
225 ieee80211_queue_delayed_work(wl->hw, &wl->tx_watchdog_work,
226 msecs_to_jiffies(wl->conf.tx.tx_watchdog_timeout));
229 static void wl12xx_tx_watchdog_work(struct work_struct *work)
231 struct delayed_work *dwork;
234 dwork = container_of(work, struct delayed_work, work);
235 wl = container_of(dwork, struct wl1271, tx_watchdog_work);
237 mutex_lock(&wl->mutex);
239 if (unlikely(wl->state != WLCORE_STATE_ON))
242 /* Tx went out in the meantime - everything is ok */
243 if (unlikely(wl->tx_allocated_blocks == 0))
247 * if a ROC is in progress, we might not have any Tx for a long
248 * time (e.g. pending Tx on the non-ROC channels)
250 if (find_first_bit(wl->roc_map, WL12XX_MAX_ROLES) < WL12XX_MAX_ROLES) {
251 wl1271_debug(DEBUG_TX, "No Tx (in FW) for %d ms due to ROC",
252 wl->conf.tx.tx_watchdog_timeout);
253 wl12xx_rearm_tx_watchdog_locked(wl);
258 * if a scan is in progress, we might not have any Tx for a long
261 if (wl->scan.state != WL1271_SCAN_STATE_IDLE) {
262 wl1271_debug(DEBUG_TX, "No Tx (in FW) for %d ms due to scan",
263 wl->conf.tx.tx_watchdog_timeout);
264 wl12xx_rearm_tx_watchdog_locked(wl);
269 * AP might cache a frame for a long time for a sleeping station,
270 * so rearm the timer if there's an AP interface with stations. If
271 * Tx is genuinely stuck we will most hopefully discover it when all
272 * stations are removed due to inactivity.
274 if (wl->active_sta_count) {
275 wl1271_debug(DEBUG_TX, "No Tx (in FW) for %d ms. AP has "
277 wl->conf.tx.tx_watchdog_timeout,
278 wl->active_sta_count);
279 wl12xx_rearm_tx_watchdog_locked(wl);
283 wl1271_error("Tx stuck (in FW) for %d ms. Starting recovery",
284 wl->conf.tx.tx_watchdog_timeout);
285 wl12xx_queue_recovery_work(wl);
288 mutex_unlock(&wl->mutex);
291 static void wlcore_adjust_conf(struct wl1271 *wl)
293 /* Adjust settings according to optional module parameters */
295 /* Firmware Logger params */
296 if (fwlog_mem_blocks != -1) {
297 if (fwlog_mem_blocks >= CONF_FWLOG_MIN_MEM_BLOCKS &&
298 fwlog_mem_blocks <= CONF_FWLOG_MAX_MEM_BLOCKS) {
299 wl->conf.fwlog.mem_blocks = fwlog_mem_blocks;
302 "Illegal fwlog_mem_blocks=%d using default %d",
303 fwlog_mem_blocks, wl->conf.fwlog.mem_blocks);
308 if (!strcmp(fwlog_param, "continuous")) {
309 wl->conf.fwlog.mode = WL12XX_FWLOG_CONTINUOUS;
310 } else if (!strcmp(fwlog_param, "ondemand")) {
311 wl->conf.fwlog.mode = WL12XX_FWLOG_ON_DEMAND;
312 } else if (!strcmp(fwlog_param, "dbgpins")) {
313 wl->conf.fwlog.mode = WL12XX_FWLOG_CONTINUOUS;
314 wl->conf.fwlog.output = WL12XX_FWLOG_OUTPUT_DBG_PINS;
315 } else if (!strcmp(fwlog_param, "disable")) {
316 wl->conf.fwlog.mem_blocks = 0;
317 wl->conf.fwlog.output = WL12XX_FWLOG_OUTPUT_NONE;
319 wl1271_error("Unknown fwlog parameter %s", fwlog_param);
323 if (bug_on_recovery != -1)
324 wl->conf.recovery.bug_on_recovery = (u8) bug_on_recovery;
326 if (no_recovery != -1)
327 wl->conf.recovery.no_recovery = (u8) no_recovery;
330 static void wl12xx_irq_ps_regulate_link(struct wl1271 *wl,
331 struct wl12xx_vif *wlvif,
336 fw_ps = test_bit(hlid, &wl->ap_fw_ps_map);
339 * Wake up from high level PS if the STA is asleep with too little
340 * packets in FW or if the STA is awake.
342 if (!fw_ps || tx_pkts < WL1271_PS_STA_MAX_PACKETS)
343 wl12xx_ps_link_end(wl, wlvif, hlid);
346 * Start high-level PS if the STA is asleep with enough blocks in FW.
347 * Make an exception if this is the only connected link. In this
348 * case FW-memory congestion is less of a problem.
349 * Note that a single connected STA means 2*ap_count + 1 active links,
350 * since we must account for the global and broadcast AP links
351 * for each AP. The "fw_ps" check assures us the other link is a STA
352 * connected to the AP. Otherwise the FW would not set the PSM bit.
354 else if (wl->active_link_count > (wl->ap_count*2 + 1) && fw_ps &&
355 tx_pkts >= WL1271_PS_STA_MAX_PACKETS)
356 wl12xx_ps_link_start(wl, wlvif, hlid, true);
359 static void wl12xx_irq_update_links_status(struct wl1271 *wl,
360 struct wl12xx_vif *wlvif,
361 struct wl_fw_status *status)
363 unsigned long cur_fw_ps_map;
366 cur_fw_ps_map = status->link_ps_bitmap;
367 if (wl->ap_fw_ps_map != cur_fw_ps_map) {
368 wl1271_debug(DEBUG_PSM,
369 "link ps prev 0x%lx cur 0x%lx changed 0x%lx",
370 wl->ap_fw_ps_map, cur_fw_ps_map,
371 wl->ap_fw_ps_map ^ cur_fw_ps_map);
373 wl->ap_fw_ps_map = cur_fw_ps_map;
376 for_each_set_bit(hlid, wlvif->ap.sta_hlid_map, wl->num_links)
377 wl12xx_irq_ps_regulate_link(wl, wlvif, hlid,
378 wl->links[hlid].allocated_pkts);
381 static int wlcore_fw_status(struct wl1271 *wl, struct wl_fw_status *status)
383 struct wl12xx_vif *wlvif;
385 u32 old_tx_blk_count = wl->tx_blocks_available;
386 int avail, freed_blocks;
389 struct wl1271_link *lnk;
391 ret = wlcore_raw_read_data(wl, REG_RAW_FW_STATUS_ADDR,
393 wl->fw_status_len, false);
397 wlcore_hw_convert_fw_status(wl, wl->raw_fw_status, wl->fw_status);
399 wl1271_debug(DEBUG_IRQ, "intr: 0x%x (fw_rx_counter = %d, "
400 "drv_rx_counter = %d, tx_results_counter = %d)",
402 status->fw_rx_counter,
403 status->drv_rx_counter,
404 status->tx_results_counter);
406 for (i = 0; i < NUM_TX_QUEUES; i++) {
407 /* prevent wrap-around in freed-packets counter */
408 wl->tx_allocated_pkts[i] -=
409 (status->counters.tx_released_pkts[i] -
410 wl->tx_pkts_freed[i]) & 0xff;
412 wl->tx_pkts_freed[i] = status->counters.tx_released_pkts[i];
416 for_each_set_bit(i, wl->links_map, wl->num_links) {
420 /* prevent wrap-around in freed-packets counter */
421 diff = (status->counters.tx_lnk_free_pkts[i] -
422 lnk->prev_freed_pkts) & 0xff;
427 lnk->allocated_pkts -= diff;
428 lnk->prev_freed_pkts = status->counters.tx_lnk_free_pkts[i];
430 /* accumulate the prev_freed_pkts counter */
431 lnk->total_freed_pkts += diff;
434 /* prevent wrap-around in total blocks counter */
435 if (likely(wl->tx_blocks_freed <= status->total_released_blks))
436 freed_blocks = status->total_released_blks -
439 freed_blocks = 0x100000000LL - wl->tx_blocks_freed +
440 status->total_released_blks;
442 wl->tx_blocks_freed = status->total_released_blks;
444 wl->tx_allocated_blocks -= freed_blocks;
447 * If the FW freed some blocks:
448 * If we still have allocated blocks - re-arm the timer, Tx is
449 * not stuck. Otherwise, cancel the timer (no Tx currently).
452 if (wl->tx_allocated_blocks)
453 wl12xx_rearm_tx_watchdog_locked(wl);
455 cancel_delayed_work(&wl->tx_watchdog_work);
458 avail = status->tx_total - wl->tx_allocated_blocks;
461 * The FW might change the total number of TX memblocks before
462 * we get a notification about blocks being released. Thus, the
463 * available blocks calculation might yield a temporary result
464 * which is lower than the actual available blocks. Keeping in
465 * mind that only blocks that were allocated can be moved from
466 * TX to RX, tx_blocks_available should never decrease here.
468 wl->tx_blocks_available = max((int)wl->tx_blocks_available,
471 /* if more blocks are available now, tx work can be scheduled */
472 if (wl->tx_blocks_available > old_tx_blk_count)
473 clear_bit(WL1271_FLAG_FW_TX_BUSY, &wl->flags);
475 /* for AP update num of allocated TX blocks per link and ps status */
476 wl12xx_for_each_wlvif_ap(wl, wlvif) {
477 wl12xx_irq_update_links_status(wl, wlvif, status);
480 /* update the host-chipset time offset */
482 wl->time_offset = (timespec_to_ns(&ts) >> 10) -
483 (s64)(status->fw_localtime);
485 wl->fw_fast_lnk_map = status->link_fast_bitmap;
490 static void wl1271_flush_deferred_work(struct wl1271 *wl)
494 /* Pass all received frames to the network stack */
495 while ((skb = skb_dequeue(&wl->deferred_rx_queue)))
496 ieee80211_rx_ni(wl->hw, skb);
498 /* Return sent skbs to the network stack */
499 while ((skb = skb_dequeue(&wl->deferred_tx_queue)))
500 ieee80211_tx_status_ni(wl->hw, skb);
503 static void wl1271_netstack_work(struct work_struct *work)
506 container_of(work, struct wl1271, netstack_work);
509 wl1271_flush_deferred_work(wl);
510 } while (skb_queue_len(&wl->deferred_rx_queue));
513 #define WL1271_IRQ_MAX_LOOPS 256
515 static int wlcore_irq_locked(struct wl1271 *wl)
519 int loopcount = WL1271_IRQ_MAX_LOOPS;
521 unsigned int defer_count;
525 * In case edge triggered interrupt must be used, we cannot iterate
526 * more than once without introducing race conditions with the hardirq.
528 if (wl->platform_quirks & WL12XX_PLATFORM_QUIRK_EDGE_IRQ)
531 wl1271_debug(DEBUG_IRQ, "IRQ work");
533 if (unlikely(wl->state != WLCORE_STATE_ON))
536 ret = wl1271_ps_elp_wakeup(wl);
540 while (!done && loopcount--) {
542 * In order to avoid a race with the hardirq, clear the flag
543 * before acknowledging the chip. Since the mutex is held,
544 * wl1271_ps_elp_wakeup cannot be called concurrently.
546 clear_bit(WL1271_FLAG_IRQ_RUNNING, &wl->flags);
547 smp_mb__after_atomic();
549 ret = wlcore_fw_status(wl, wl->fw_status);
553 wlcore_hw_tx_immediate_compl(wl);
555 intr = wl->fw_status->intr;
556 intr &= WLCORE_ALL_INTR_MASK;
562 if (unlikely(intr & WL1271_ACX_INTR_WATCHDOG)) {
563 wl1271_error("HW watchdog interrupt received! starting recovery.");
564 wl->watchdog_recovery = true;
567 /* restarting the chip. ignore any other interrupt. */
571 if (unlikely(intr & WL1271_ACX_SW_INTR_WATCHDOG)) {
572 wl1271_error("SW watchdog interrupt received! "
573 "starting recovery.");
574 wl->watchdog_recovery = true;
577 /* restarting the chip. ignore any other interrupt. */
581 if (likely(intr & WL1271_ACX_INTR_DATA)) {
582 wl1271_debug(DEBUG_IRQ, "WL1271_ACX_INTR_DATA");
584 ret = wlcore_rx(wl, wl->fw_status);
588 /* Check if any tx blocks were freed */
589 spin_lock_irqsave(&wl->wl_lock, flags);
590 if (!test_bit(WL1271_FLAG_FW_TX_BUSY, &wl->flags) &&
591 wl1271_tx_total_queue_count(wl) > 0) {
592 spin_unlock_irqrestore(&wl->wl_lock, flags);
594 * In order to avoid starvation of the TX path,
595 * call the work function directly.
597 ret = wlcore_tx_work_locked(wl);
601 spin_unlock_irqrestore(&wl->wl_lock, flags);
604 /* check for tx results */
605 ret = wlcore_hw_tx_delayed_compl(wl);
609 /* Make sure the deferred queues don't get too long */
610 defer_count = skb_queue_len(&wl->deferred_tx_queue) +
611 skb_queue_len(&wl->deferred_rx_queue);
612 if (defer_count > WL1271_DEFERRED_QUEUE_LIMIT)
613 wl1271_flush_deferred_work(wl);
616 if (intr & WL1271_ACX_INTR_EVENT_A) {
617 wl1271_debug(DEBUG_IRQ, "WL1271_ACX_INTR_EVENT_A");
618 ret = wl1271_event_handle(wl, 0);
623 if (intr & WL1271_ACX_INTR_EVENT_B) {
624 wl1271_debug(DEBUG_IRQ, "WL1271_ACX_INTR_EVENT_B");
625 ret = wl1271_event_handle(wl, 1);
630 if (intr & WL1271_ACX_INTR_INIT_COMPLETE)
631 wl1271_debug(DEBUG_IRQ,
632 "WL1271_ACX_INTR_INIT_COMPLETE");
634 if (intr & WL1271_ACX_INTR_HW_AVAILABLE)
635 wl1271_debug(DEBUG_IRQ, "WL1271_ACX_INTR_HW_AVAILABLE");
638 wl1271_ps_elp_sleep(wl);
644 static irqreturn_t wlcore_irq(int irq, void *cookie)
648 struct wl1271 *wl = cookie;
650 /* complete the ELP completion */
651 spin_lock_irqsave(&wl->wl_lock, flags);
652 set_bit(WL1271_FLAG_IRQ_RUNNING, &wl->flags);
654 complete(wl->elp_compl);
655 wl->elp_compl = NULL;
658 if (test_bit(WL1271_FLAG_SUSPENDED, &wl->flags)) {
659 /* don't enqueue a work right now. mark it as pending */
660 set_bit(WL1271_FLAG_PENDING_WORK, &wl->flags);
661 wl1271_debug(DEBUG_IRQ, "should not enqueue work");
662 disable_irq_nosync(wl->irq);
663 pm_wakeup_event(wl->dev, 0);
664 spin_unlock_irqrestore(&wl->wl_lock, flags);
667 spin_unlock_irqrestore(&wl->wl_lock, flags);
669 /* TX might be handled here, avoid redundant work */
670 set_bit(WL1271_FLAG_TX_PENDING, &wl->flags);
671 cancel_work_sync(&wl->tx_work);
673 mutex_lock(&wl->mutex);
675 ret = wlcore_irq_locked(wl);
677 wl12xx_queue_recovery_work(wl);
679 spin_lock_irqsave(&wl->wl_lock, flags);
680 /* In case TX was not handled here, queue TX work */
681 clear_bit(WL1271_FLAG_TX_PENDING, &wl->flags);
682 if (!test_bit(WL1271_FLAG_FW_TX_BUSY, &wl->flags) &&
683 wl1271_tx_total_queue_count(wl) > 0)
684 ieee80211_queue_work(wl->hw, &wl->tx_work);
685 spin_unlock_irqrestore(&wl->wl_lock, flags);
687 mutex_unlock(&wl->mutex);
692 struct vif_counter_data {
695 struct ieee80211_vif *cur_vif;
696 bool cur_vif_running;
699 static void wl12xx_vif_count_iter(void *data, u8 *mac,
700 struct ieee80211_vif *vif)
702 struct vif_counter_data *counter = data;
705 if (counter->cur_vif == vif)
706 counter->cur_vif_running = true;
709 /* caller must not hold wl->mutex, as it might deadlock */
710 static void wl12xx_get_vif_count(struct ieee80211_hw *hw,
711 struct ieee80211_vif *cur_vif,
712 struct vif_counter_data *data)
714 memset(data, 0, sizeof(*data));
715 data->cur_vif = cur_vif;
717 ieee80211_iterate_active_interfaces(hw, IEEE80211_IFACE_ITER_RESUME_ALL,
718 wl12xx_vif_count_iter, data);
721 static int wl12xx_fetch_firmware(struct wl1271 *wl, bool plt)
723 const struct firmware *fw;
725 enum wl12xx_fw_type fw_type;
729 fw_type = WL12XX_FW_TYPE_PLT;
730 fw_name = wl->plt_fw_name;
733 * we can't call wl12xx_get_vif_count() here because
734 * wl->mutex is taken, so use the cached last_vif_count value
736 if (wl->last_vif_count > 1 && wl->mr_fw_name) {
737 fw_type = WL12XX_FW_TYPE_MULTI;
738 fw_name = wl->mr_fw_name;
740 fw_type = WL12XX_FW_TYPE_NORMAL;
741 fw_name = wl->sr_fw_name;
745 if (wl->fw_type == fw_type)
748 wl1271_debug(DEBUG_BOOT, "booting firmware %s", fw_name);
750 ret = request_firmware(&fw, fw_name, wl->dev);
753 wl1271_error("could not get firmware %s: %d", fw_name, ret);
758 wl1271_error("firmware size is not multiple of 32 bits: %zu",
765 wl->fw_type = WL12XX_FW_TYPE_NONE;
766 wl->fw_len = fw->size;
767 wl->fw = vmalloc(wl->fw_len);
770 wl1271_error("could not allocate memory for the firmware");
775 memcpy(wl->fw, fw->data, wl->fw_len);
777 wl->fw_type = fw_type;
779 release_firmware(fw);
784 void wl12xx_queue_recovery_work(struct wl1271 *wl)
786 /* Avoid a recursive recovery */
787 if (wl->state == WLCORE_STATE_ON) {
788 WARN_ON(!test_bit(WL1271_FLAG_INTENDED_FW_RECOVERY,
791 wl->state = WLCORE_STATE_RESTARTING;
792 set_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags);
793 wl1271_ps_elp_wakeup(wl);
794 wlcore_disable_interrupts_nosync(wl);
795 ieee80211_queue_work(wl->hw, &wl->recovery_work);
799 size_t wl12xx_copy_fwlog(struct wl1271 *wl, u8 *memblock, size_t maxlen)
803 /* Make sure we have enough room */
804 len = min_t(size_t, maxlen, PAGE_SIZE - wl->fwlog_size);
806 /* Fill the FW log file, consumed by the sysfs fwlog entry */
807 memcpy(wl->fwlog + wl->fwlog_size, memblock, len);
808 wl->fwlog_size += len;
813 static void wl12xx_read_fwlog_panic(struct wl1271 *wl)
815 struct wlcore_partition_set part, old_part;
822 if ((wl->quirks & WLCORE_QUIRK_FWLOG_NOT_IMPLEMENTED) ||
823 (wl->conf.fwlog.mem_blocks == 0))
826 wl1271_info("Reading FW panic log");
828 block = kmalloc(wl->fw_mem_block_size, GFP_KERNEL);
833 * Make sure the chip is awake and the logger isn't active.
834 * Do not send a stop fwlog command if the fw is hanged or if
835 * dbgpins are used (due to some fw bug).
837 if (wl1271_ps_elp_wakeup(wl))
839 if (!wl->watchdog_recovery &&
840 wl->conf.fwlog.output != WL12XX_FWLOG_OUTPUT_DBG_PINS)
841 wl12xx_cmd_stop_fwlog(wl);
843 /* Read the first memory block address */
844 ret = wlcore_fw_status(wl, wl->fw_status);
848 addr = wl->fw_status->log_start_addr;
852 if (wl->conf.fwlog.mode == WL12XX_FWLOG_CONTINUOUS) {
853 offset = sizeof(addr) + sizeof(struct wl1271_rx_descriptor);
854 end_of_log = wl->fwlog_end;
856 offset = sizeof(addr);
860 old_part = wl->curr_part;
861 memset(&part, 0, sizeof(part));
863 /* Traverse the memory blocks linked list */
865 part.mem.start = wlcore_hw_convert_hwaddr(wl, addr);
866 part.mem.size = PAGE_SIZE;
868 ret = wlcore_set_partition(wl, &part);
870 wl1271_error("%s: set_partition start=0x%X size=%d",
871 __func__, part.mem.start, part.mem.size);
875 memset(block, 0, wl->fw_mem_block_size);
876 ret = wlcore_read_hwaddr(wl, addr, block,
877 wl->fw_mem_block_size, false);
883 * Memory blocks are linked to one another. The first 4 bytes
884 * of each memory block hold the hardware address of the next
885 * one. The last memory block points to the first one in
886 * on demand mode and is equal to 0x2000000 in continuous mode.
888 addr = le32_to_cpup((__le32 *)block);
890 if (!wl12xx_copy_fwlog(wl, block + offset,
891 wl->fw_mem_block_size - offset))
893 } while (addr && (addr != end_of_log));
895 wake_up_interruptible(&wl->fwlog_waitq);
899 wlcore_set_partition(wl, &old_part);
902 static void wlcore_save_freed_pkts(struct wl1271 *wl, struct wl12xx_vif *wlvif,
903 u8 hlid, struct ieee80211_sta *sta)
905 struct wl1271_station *wl_sta;
906 u32 sqn_recovery_padding = WL1271_TX_SQN_POST_RECOVERY_PADDING;
908 wl_sta = (void *)sta->drv_priv;
909 wl_sta->total_freed_pkts = wl->links[hlid].total_freed_pkts;
912 * increment the initial seq number on recovery to account for
913 * transmitted packets that we haven't yet got in the FW status
915 if (wlvif->encryption_type == KEY_GEM)
916 sqn_recovery_padding = WL1271_TX_SQN_POST_RECOVERY_PADDING_GEM;
918 if (test_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags))
919 wl_sta->total_freed_pkts += sqn_recovery_padding;
922 static void wlcore_save_freed_pkts_addr(struct wl1271 *wl,
923 struct wl12xx_vif *wlvif,
924 u8 hlid, const u8 *addr)
926 struct ieee80211_sta *sta;
927 struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif);
929 if (WARN_ON(hlid == WL12XX_INVALID_LINK_ID ||
930 is_zero_ether_addr(addr)))
934 sta = ieee80211_find_sta(vif, addr);
936 wlcore_save_freed_pkts(wl, wlvif, hlid, sta);
940 static void wlcore_print_recovery(struct wl1271 *wl)
946 wl1271_info("Hardware recovery in progress. FW ver: %s",
947 wl->chip.fw_ver_str);
949 /* change partitions momentarily so we can read the FW pc */
950 ret = wlcore_set_partition(wl, &wl->ptable[PART_BOOT]);
954 ret = wlcore_read_reg(wl, REG_PC_ON_RECOVERY, &pc);
958 ret = wlcore_read_reg(wl, REG_INTERRUPT_NO_CLEAR, &hint_sts);
962 wl1271_info("pc: 0x%x, hint_sts: 0x%08x count: %d",
963 pc, hint_sts, ++wl->recovery_count);
965 wlcore_set_partition(wl, &wl->ptable[PART_WORK]);
969 static void wl1271_recovery_work(struct work_struct *work)
972 container_of(work, struct wl1271, recovery_work);
973 struct wl12xx_vif *wlvif;
974 struct ieee80211_vif *vif;
976 mutex_lock(&wl->mutex);
978 if (wl->state == WLCORE_STATE_OFF || wl->plt)
981 if (!test_bit(WL1271_FLAG_INTENDED_FW_RECOVERY, &wl->flags)) {
982 if (wl->conf.fwlog.output == WL12XX_FWLOG_OUTPUT_HOST)
983 wl12xx_read_fwlog_panic(wl);
984 wlcore_print_recovery(wl);
987 BUG_ON(wl->conf.recovery.bug_on_recovery &&
988 !test_bit(WL1271_FLAG_INTENDED_FW_RECOVERY, &wl->flags));
990 if (wl->conf.recovery.no_recovery) {
991 wl1271_info("No recovery (chosen on module load). Fw will remain stuck.");
995 /* Prevent spurious TX during FW restart */
996 wlcore_stop_queues(wl, WLCORE_QUEUE_STOP_REASON_FW_RESTART);
998 /* reboot the chipset */
999 while (!list_empty(&wl->wlvif_list)) {
1000 wlvif = list_first_entry(&wl->wlvif_list,
1001 struct wl12xx_vif, list);
1002 vif = wl12xx_wlvif_to_vif(wlvif);
1004 if (wlvif->bss_type == BSS_TYPE_STA_BSS &&
1005 test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags)) {
1006 wlcore_save_freed_pkts_addr(wl, wlvif, wlvif->sta.hlid,
1007 vif->bss_conf.bssid);
1010 __wl1271_op_remove_interface(wl, vif, false);
1013 wlcore_op_stop_locked(wl);
1015 ieee80211_restart_hw(wl->hw);
1018 * Its safe to enable TX now - the queues are stopped after a request
1019 * to restart the HW.
1021 wlcore_wake_queues(wl, WLCORE_QUEUE_STOP_REASON_FW_RESTART);
1024 wl->watchdog_recovery = false;
1025 clear_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags);
1026 mutex_unlock(&wl->mutex);
1029 static int wlcore_fw_wakeup(struct wl1271 *wl)
1031 return wlcore_raw_write32(wl, HW_ACCESS_ELP_CTRL_REG, ELPCTRL_WAKE_UP);
1034 static int wl1271_setup(struct wl1271 *wl)
1036 wl->raw_fw_status = kzalloc(wl->fw_status_len, GFP_KERNEL);
1037 if (!wl->raw_fw_status)
1040 wl->fw_status = kzalloc(sizeof(*wl->fw_status), GFP_KERNEL);
1044 wl->tx_res_if = kzalloc(sizeof(*wl->tx_res_if), GFP_KERNEL);
1050 kfree(wl->fw_status);
1051 kfree(wl->raw_fw_status);
1055 static int wl12xx_set_power_on(struct wl1271 *wl)
1059 msleep(WL1271_PRE_POWER_ON_SLEEP);
1060 ret = wl1271_power_on(wl);
1063 msleep(WL1271_POWER_ON_SLEEP);
1064 wl1271_io_reset(wl);
1067 ret = wlcore_set_partition(wl, &wl->ptable[PART_BOOT]);
1071 /* ELP module wake up */
1072 ret = wlcore_fw_wakeup(wl);
1080 wl1271_power_off(wl);
1084 static int wl12xx_chip_wakeup(struct wl1271 *wl, bool plt)
1088 ret = wl12xx_set_power_on(wl);
1093 * For wl127x based devices we could use the default block
1094 * size (512 bytes), but due to a bug in the sdio driver, we
1095 * need to set it explicitly after the chip is powered on. To
1096 * simplify the code and since the performance impact is
1097 * negligible, we use the same block size for all different
1100 * Check if the bus supports blocksize alignment and, if it
1101 * doesn't, make sure we don't have the quirk.
1103 if (!wl1271_set_block_size(wl))
1104 wl->quirks &= ~WLCORE_QUIRK_TX_BLOCKSIZE_ALIGN;
1106 /* TODO: make sure the lower driver has set things up correctly */
1108 ret = wl1271_setup(wl);
1112 ret = wl12xx_fetch_firmware(wl, plt);
1120 int wl1271_plt_start(struct wl1271 *wl, const enum plt_mode plt_mode)
1122 int retries = WL1271_BOOT_RETRIES;
1123 struct wiphy *wiphy = wl->hw->wiphy;
1125 static const char* const PLT_MODE[] = {
1134 mutex_lock(&wl->mutex);
1136 wl1271_notice("power up");
1138 if (wl->state != WLCORE_STATE_OFF) {
1139 wl1271_error("cannot go into PLT state because not "
1140 "in off state: %d", wl->state);
1145 /* Indicate to lower levels that we are now in PLT mode */
1147 wl->plt_mode = plt_mode;
1151 ret = wl12xx_chip_wakeup(wl, true);
1155 if (plt_mode != PLT_CHIP_AWAKE) {
1156 ret = wl->ops->plt_init(wl);
1161 wl->state = WLCORE_STATE_ON;
1162 wl1271_notice("firmware booted in PLT mode %s (%s)",
1164 wl->chip.fw_ver_str);
1166 /* update hw/fw version info in wiphy struct */
1167 wiphy->hw_version = wl->chip.id;
1168 strncpy(wiphy->fw_version, wl->chip.fw_ver_str,
1169 sizeof(wiphy->fw_version));
1174 wl1271_power_off(wl);
1178 wl->plt_mode = PLT_OFF;
1180 wl1271_error("firmware boot in PLT mode failed despite %d retries",
1181 WL1271_BOOT_RETRIES);
1183 mutex_unlock(&wl->mutex);
1188 int wl1271_plt_stop(struct wl1271 *wl)
1192 wl1271_notice("power down");
1195 * Interrupts must be disabled before setting the state to OFF.
1196 * Otherwise, the interrupt handler might be called and exit without
1197 * reading the interrupt status.
1199 wlcore_disable_interrupts(wl);
1200 mutex_lock(&wl->mutex);
1202 mutex_unlock(&wl->mutex);
1205 * This will not necessarily enable interrupts as interrupts
1206 * may have been disabled when op_stop was called. It will,
1207 * however, balance the above call to disable_interrupts().
1209 wlcore_enable_interrupts(wl);
1211 wl1271_error("cannot power down because not in PLT "
1212 "state: %d", wl->state);
1217 mutex_unlock(&wl->mutex);
1219 wl1271_flush_deferred_work(wl);
1220 cancel_work_sync(&wl->netstack_work);
1221 cancel_work_sync(&wl->recovery_work);
1222 cancel_delayed_work_sync(&wl->elp_work);
1223 cancel_delayed_work_sync(&wl->tx_watchdog_work);
1225 mutex_lock(&wl->mutex);
1226 wl1271_power_off(wl);
1228 wl->sleep_auth = WL1271_PSM_ILLEGAL;
1229 wl->state = WLCORE_STATE_OFF;
1231 wl->plt_mode = PLT_OFF;
1233 mutex_unlock(&wl->mutex);
1239 static void wl1271_op_tx(struct ieee80211_hw *hw,
1240 struct ieee80211_tx_control *control,
1241 struct sk_buff *skb)
1243 struct wl1271 *wl = hw->priv;
1244 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
1245 struct ieee80211_vif *vif = info->control.vif;
1246 struct wl12xx_vif *wlvif = NULL;
1247 unsigned long flags;
1252 wl1271_debug(DEBUG_TX, "DROP skb with no vif");
1253 ieee80211_free_txskb(hw, skb);
1257 wlvif = wl12xx_vif_to_data(vif);
1258 mapping = skb_get_queue_mapping(skb);
1259 q = wl1271_tx_get_queue(mapping);
1261 hlid = wl12xx_tx_get_hlid(wl, wlvif, skb, control->sta);
1263 spin_lock_irqsave(&wl->wl_lock, flags);
1266 * drop the packet if the link is invalid or the queue is stopped
1267 * for any reason but watermark. Watermark is a "soft"-stop so we
1268 * allow these packets through.
1270 if (hlid == WL12XX_INVALID_LINK_ID ||
1271 (!test_bit(hlid, wlvif->links_map)) ||
1272 (wlcore_is_queue_stopped_locked(wl, wlvif, q) &&
1273 !wlcore_is_queue_stopped_by_reason_locked(wl, wlvif, q,
1274 WLCORE_QUEUE_STOP_REASON_WATERMARK))) {
1275 wl1271_debug(DEBUG_TX, "DROP skb hlid %d q %d", hlid, q);
1276 ieee80211_free_txskb(hw, skb);
1280 wl1271_debug(DEBUG_TX, "queue skb hlid %d q %d len %d",
1282 skb_queue_tail(&wl->links[hlid].tx_queue[q], skb);
1284 wl->tx_queue_count[q]++;
1285 wlvif->tx_queue_count[q]++;
1288 * The workqueue is slow to process the tx_queue and we need stop
1289 * the queue here, otherwise the queue will get too long.
1291 if (wlvif->tx_queue_count[q] >= WL1271_TX_QUEUE_HIGH_WATERMARK &&
1292 !wlcore_is_queue_stopped_by_reason_locked(wl, wlvif, q,
1293 WLCORE_QUEUE_STOP_REASON_WATERMARK)) {
1294 wl1271_debug(DEBUG_TX, "op_tx: stopping queues for q %d", q);
1295 wlcore_stop_queue_locked(wl, wlvif, q,
1296 WLCORE_QUEUE_STOP_REASON_WATERMARK);
1300 * The chip specific setup must run before the first TX packet -
1301 * before that, the tx_work will not be initialized!
1304 if (!test_bit(WL1271_FLAG_FW_TX_BUSY, &wl->flags) &&
1305 !test_bit(WL1271_FLAG_TX_PENDING, &wl->flags))
1306 ieee80211_queue_work(wl->hw, &wl->tx_work);
1309 spin_unlock_irqrestore(&wl->wl_lock, flags);
1312 int wl1271_tx_dummy_packet(struct wl1271 *wl)
1314 unsigned long flags;
1317 /* no need to queue a new dummy packet if one is already pending */
1318 if (test_bit(WL1271_FLAG_DUMMY_PACKET_PENDING, &wl->flags))
1321 q = wl1271_tx_get_queue(skb_get_queue_mapping(wl->dummy_packet));
1323 spin_lock_irqsave(&wl->wl_lock, flags);
1324 set_bit(WL1271_FLAG_DUMMY_PACKET_PENDING, &wl->flags);
1325 wl->tx_queue_count[q]++;
1326 spin_unlock_irqrestore(&wl->wl_lock, flags);
1328 /* The FW is low on RX memory blocks, so send the dummy packet asap */
1329 if (!test_bit(WL1271_FLAG_FW_TX_BUSY, &wl->flags))
1330 return wlcore_tx_work_locked(wl);
1333 * If the FW TX is busy, TX work will be scheduled by the threaded
1334 * interrupt handler function
1340 * The size of the dummy packet should be at least 1400 bytes. However, in
1341 * order to minimize the number of bus transactions, aligning it to 512 bytes
1342 * boundaries could be beneficial, performance wise
1344 #define TOTAL_TX_DUMMY_PACKET_SIZE (ALIGN(1400, 512))
1346 static struct sk_buff *wl12xx_alloc_dummy_packet(struct wl1271 *wl)
1348 struct sk_buff *skb;
1349 struct ieee80211_hdr_3addr *hdr;
1350 unsigned int dummy_packet_size;
1352 dummy_packet_size = TOTAL_TX_DUMMY_PACKET_SIZE -
1353 sizeof(struct wl1271_tx_hw_descr) - sizeof(*hdr);
1355 skb = dev_alloc_skb(TOTAL_TX_DUMMY_PACKET_SIZE);
1357 wl1271_warning("Failed to allocate a dummy packet skb");
1361 skb_reserve(skb, sizeof(struct wl1271_tx_hw_descr));
1363 hdr = (struct ieee80211_hdr_3addr *) skb_put(skb, sizeof(*hdr));
1364 memset(hdr, 0, sizeof(*hdr));
1365 hdr->frame_control = cpu_to_le16(IEEE80211_FTYPE_DATA |
1366 IEEE80211_STYPE_NULLFUNC |
1367 IEEE80211_FCTL_TODS);
1369 memset(skb_put(skb, dummy_packet_size), 0, dummy_packet_size);
1371 /* Dummy packets require the TID to be management */
1372 skb->priority = WL1271_TID_MGMT;
1374 /* Initialize all fields that might be used */
1375 skb_set_queue_mapping(skb, 0);
1376 memset(IEEE80211_SKB_CB(skb), 0, sizeof(struct ieee80211_tx_info));
1384 wl1271_validate_wowlan_pattern(struct cfg80211_pkt_pattern *p)
1386 int num_fields = 0, in_field = 0, fields_size = 0;
1387 int i, pattern_len = 0;
1390 wl1271_warning("No mask in WoWLAN pattern");
1395 * The pattern is broken up into segments of bytes at different offsets
1396 * that need to be checked by the FW filter. Each segment is called
1397 * a field in the FW API. We verify that the total number of fields
1398 * required for this pattern won't exceed FW limits (8)
1399 * as well as the total fields buffer won't exceed the FW limit.
1400 * Note that if there's a pattern which crosses Ethernet/IP header
1401 * boundary a new field is required.
1403 for (i = 0; i < p->pattern_len; i++) {
1404 if (test_bit(i, (unsigned long *)p->mask)) {
1409 if (i == WL1271_RX_FILTER_ETH_HEADER_SIZE) {
1411 fields_size += pattern_len +
1412 RX_FILTER_FIELD_OVERHEAD;
1420 fields_size += pattern_len +
1421 RX_FILTER_FIELD_OVERHEAD;
1428 fields_size += pattern_len + RX_FILTER_FIELD_OVERHEAD;
1432 if (num_fields > WL1271_RX_FILTER_MAX_FIELDS) {
1433 wl1271_warning("RX Filter too complex. Too many segments");
1437 if (fields_size > WL1271_RX_FILTER_MAX_FIELDS_SIZE) {
1438 wl1271_warning("RX filter pattern is too big");
1445 struct wl12xx_rx_filter *wl1271_rx_filter_alloc(void)
1447 return kzalloc(sizeof(struct wl12xx_rx_filter), GFP_KERNEL);
1450 void wl1271_rx_filter_free(struct wl12xx_rx_filter *filter)
1457 for (i = 0; i < filter->num_fields; i++)
1458 kfree(filter->fields[i].pattern);
1463 int wl1271_rx_filter_alloc_field(struct wl12xx_rx_filter *filter,
1464 u16 offset, u8 flags,
1465 const u8 *pattern, u8 len)
1467 struct wl12xx_rx_filter_field *field;
1469 if (filter->num_fields == WL1271_RX_FILTER_MAX_FIELDS) {
1470 wl1271_warning("Max fields per RX filter. can't alloc another");
1474 field = &filter->fields[filter->num_fields];
1476 field->pattern = kzalloc(len, GFP_KERNEL);
1477 if (!field->pattern) {
1478 wl1271_warning("Failed to allocate RX filter pattern");
1482 filter->num_fields++;
1484 field->offset = cpu_to_le16(offset);
1485 field->flags = flags;
1487 memcpy(field->pattern, pattern, len);
1492 int wl1271_rx_filter_get_fields_size(struct wl12xx_rx_filter *filter)
1494 int i, fields_size = 0;
1496 for (i = 0; i < filter->num_fields; i++)
1497 fields_size += filter->fields[i].len +
1498 sizeof(struct wl12xx_rx_filter_field) -
1504 void wl1271_rx_filter_flatten_fields(struct wl12xx_rx_filter *filter,
1508 struct wl12xx_rx_filter_field *field;
1510 for (i = 0; i < filter->num_fields; i++) {
1511 field = (struct wl12xx_rx_filter_field *)buf;
1513 field->offset = filter->fields[i].offset;
1514 field->flags = filter->fields[i].flags;
1515 field->len = filter->fields[i].len;
1517 memcpy(&field->pattern, filter->fields[i].pattern, field->len);
1518 buf += sizeof(struct wl12xx_rx_filter_field) -
1519 sizeof(u8 *) + field->len;
1524 * Allocates an RX filter returned through f
1525 * which needs to be freed using rx_filter_free()
1528 wl1271_convert_wowlan_pattern_to_rx_filter(struct cfg80211_pkt_pattern *p,
1529 struct wl12xx_rx_filter **f)
1532 struct wl12xx_rx_filter *filter;
1536 filter = wl1271_rx_filter_alloc();
1538 wl1271_warning("Failed to alloc rx filter");
1544 while (i < p->pattern_len) {
1545 if (!test_bit(i, (unsigned long *)p->mask)) {
1550 for (j = i; j < p->pattern_len; j++) {
1551 if (!test_bit(j, (unsigned long *)p->mask))
1554 if (i < WL1271_RX_FILTER_ETH_HEADER_SIZE &&
1555 j >= WL1271_RX_FILTER_ETH_HEADER_SIZE)
1559 if (i < WL1271_RX_FILTER_ETH_HEADER_SIZE) {
1561 flags = WL1271_RX_FILTER_FLAG_ETHERNET_HEADER;
1563 offset = i - WL1271_RX_FILTER_ETH_HEADER_SIZE;
1564 flags = WL1271_RX_FILTER_FLAG_IP_HEADER;
1569 ret = wl1271_rx_filter_alloc_field(filter,
1572 &p->pattern[i], len);
1579 filter->action = FILTER_SIGNAL;
1585 wl1271_rx_filter_free(filter);
1591 static int wl1271_configure_wowlan(struct wl1271 *wl,
1592 struct cfg80211_wowlan *wow)
1596 if (!wow || wow->any || !wow->n_patterns) {
1597 ret = wl1271_acx_default_rx_filter_enable(wl, 0,
1602 ret = wl1271_rx_filter_clear_all(wl);
1609 if (WARN_ON(wow->n_patterns > WL1271_MAX_RX_FILTERS))
1612 /* Validate all incoming patterns before clearing current FW state */
1613 for (i = 0; i < wow->n_patterns; i++) {
1614 ret = wl1271_validate_wowlan_pattern(&wow->patterns[i]);
1616 wl1271_warning("Bad wowlan pattern %d", i);
1621 ret = wl1271_acx_default_rx_filter_enable(wl, 0, FILTER_SIGNAL);
1625 ret = wl1271_rx_filter_clear_all(wl);
1629 /* Translate WoWLAN patterns into filters */
1630 for (i = 0; i < wow->n_patterns; i++) {
1631 struct cfg80211_pkt_pattern *p;
1632 struct wl12xx_rx_filter *filter = NULL;
1634 p = &wow->patterns[i];
1636 ret = wl1271_convert_wowlan_pattern_to_rx_filter(p, &filter);
1638 wl1271_warning("Failed to create an RX filter from "
1639 "wowlan pattern %d", i);
1643 ret = wl1271_rx_filter_enable(wl, i, 1, filter);
1645 wl1271_rx_filter_free(filter);
1650 ret = wl1271_acx_default_rx_filter_enable(wl, 1, FILTER_DROP);
1656 static int wl1271_configure_suspend_sta(struct wl1271 *wl,
1657 struct wl12xx_vif *wlvif,
1658 struct cfg80211_wowlan *wow)
1662 if (!test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags))
1665 ret = wl1271_ps_elp_wakeup(wl);
1669 ret = wl1271_configure_wowlan(wl, wow);
1673 if ((wl->conf.conn.suspend_wake_up_event ==
1674 wl->conf.conn.wake_up_event) &&
1675 (wl->conf.conn.suspend_listen_interval ==
1676 wl->conf.conn.listen_interval))
1679 ret = wl1271_acx_wake_up_conditions(wl, wlvif,
1680 wl->conf.conn.suspend_wake_up_event,
1681 wl->conf.conn.suspend_listen_interval);
1684 wl1271_error("suspend: set wake up conditions failed: %d", ret);
1687 wl1271_ps_elp_sleep(wl);
1693 static int wl1271_configure_suspend_ap(struct wl1271 *wl,
1694 struct wl12xx_vif *wlvif)
1698 if (!test_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags))
1701 ret = wl1271_ps_elp_wakeup(wl);
1705 ret = wl1271_acx_beacon_filter_opt(wl, wlvif, true);
1707 wl1271_ps_elp_sleep(wl);
1713 static int wl1271_configure_suspend(struct wl1271 *wl,
1714 struct wl12xx_vif *wlvif,
1715 struct cfg80211_wowlan *wow)
1717 if (wlvif->bss_type == BSS_TYPE_STA_BSS)
1718 return wl1271_configure_suspend_sta(wl, wlvif, wow);
1719 if (wlvif->bss_type == BSS_TYPE_AP_BSS)
1720 return wl1271_configure_suspend_ap(wl, wlvif);
1724 static void wl1271_configure_resume(struct wl1271 *wl, struct wl12xx_vif *wlvif)
1727 bool is_ap = wlvif->bss_type == BSS_TYPE_AP_BSS;
1728 bool is_sta = wlvif->bss_type == BSS_TYPE_STA_BSS;
1730 if ((!is_ap) && (!is_sta))
1733 if (is_sta && !test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags))
1736 ret = wl1271_ps_elp_wakeup(wl);
1741 wl1271_configure_wowlan(wl, NULL);
1743 if ((wl->conf.conn.suspend_wake_up_event ==
1744 wl->conf.conn.wake_up_event) &&
1745 (wl->conf.conn.suspend_listen_interval ==
1746 wl->conf.conn.listen_interval))
1749 ret = wl1271_acx_wake_up_conditions(wl, wlvif,
1750 wl->conf.conn.wake_up_event,
1751 wl->conf.conn.listen_interval);
1754 wl1271_error("resume: wake up conditions failed: %d",
1758 ret = wl1271_acx_beacon_filter_opt(wl, wlvif, false);
1762 wl1271_ps_elp_sleep(wl);
1765 static int wl1271_op_suspend(struct ieee80211_hw *hw,
1766 struct cfg80211_wowlan *wow)
1768 struct wl1271 *wl = hw->priv;
1769 struct wl12xx_vif *wlvif;
1772 wl1271_debug(DEBUG_MAC80211, "mac80211 suspend wow=%d", !!wow);
1775 /* we want to perform the recovery before suspending */
1776 if (test_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags)) {
1777 wl1271_warning("postponing suspend to perform recovery");
1781 wl1271_tx_flush(wl);
1783 mutex_lock(&wl->mutex);
1784 wl->wow_enabled = true;
1785 wl12xx_for_each_wlvif(wl, wlvif) {
1786 ret = wl1271_configure_suspend(wl, wlvif, wow);
1788 mutex_unlock(&wl->mutex);
1789 wl1271_warning("couldn't prepare device to suspend");
1793 mutex_unlock(&wl->mutex);
1794 /* flush any remaining work */
1795 wl1271_debug(DEBUG_MAC80211, "flushing remaining works");
1798 * disable and re-enable interrupts in order to flush
1801 wlcore_disable_interrupts(wl);
1804 * set suspended flag to avoid triggering a new threaded_irq
1805 * work. no need for spinlock as interrupts are disabled.
1807 set_bit(WL1271_FLAG_SUSPENDED, &wl->flags);
1809 wlcore_enable_interrupts(wl);
1810 flush_work(&wl->tx_work);
1811 flush_delayed_work(&wl->elp_work);
1814 * Cancel the watchdog even if above tx_flush failed. We will detect
1815 * it on resume anyway.
1817 cancel_delayed_work(&wl->tx_watchdog_work);
1822 static int wl1271_op_resume(struct ieee80211_hw *hw)
1824 struct wl1271 *wl = hw->priv;
1825 struct wl12xx_vif *wlvif;
1826 unsigned long flags;
1827 bool run_irq_work = false, pending_recovery;
1830 wl1271_debug(DEBUG_MAC80211, "mac80211 resume wow=%d",
1832 WARN_ON(!wl->wow_enabled);
1835 * re-enable irq_work enqueuing, and call irq_work directly if
1836 * there is a pending work.
1838 spin_lock_irqsave(&wl->wl_lock, flags);
1839 clear_bit(WL1271_FLAG_SUSPENDED, &wl->flags);
1840 if (test_and_clear_bit(WL1271_FLAG_PENDING_WORK, &wl->flags))
1841 run_irq_work = true;
1842 spin_unlock_irqrestore(&wl->wl_lock, flags);
1844 mutex_lock(&wl->mutex);
1846 /* test the recovery flag before calling any SDIO functions */
1847 pending_recovery = test_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS,
1851 wl1271_debug(DEBUG_MAC80211,
1852 "run postponed irq_work directly");
1854 /* don't talk to the HW if recovery is pending */
1855 if (!pending_recovery) {
1856 ret = wlcore_irq_locked(wl);
1858 wl12xx_queue_recovery_work(wl);
1861 wlcore_enable_interrupts(wl);
1864 if (pending_recovery) {
1865 wl1271_warning("queuing forgotten recovery on resume");
1866 ieee80211_queue_work(wl->hw, &wl->recovery_work);
1870 wl12xx_for_each_wlvif(wl, wlvif) {
1871 wl1271_configure_resume(wl, wlvif);
1875 wl->wow_enabled = false;
1878 * Set a flag to re-init the watchdog on the first Tx after resume.
1879 * That way we avoid possible conditions where Tx-complete interrupts
1880 * fail to arrive and we perform a spurious recovery.
1882 set_bit(WL1271_FLAG_REINIT_TX_WDOG, &wl->flags);
1883 mutex_unlock(&wl->mutex);
1889 static int wl1271_op_start(struct ieee80211_hw *hw)
1891 wl1271_debug(DEBUG_MAC80211, "mac80211 start");
1894 * We have to delay the booting of the hardware because
1895 * we need to know the local MAC address before downloading and
1896 * initializing the firmware. The MAC address cannot be changed
1897 * after boot, and without the proper MAC address, the firmware
1898 * will not function properly.
1900 * The MAC address is first known when the corresponding interface
1901 * is added. That is where we will initialize the hardware.
1907 static void wlcore_op_stop_locked(struct wl1271 *wl)
1911 if (wl->state == WLCORE_STATE_OFF) {
1912 if (test_and_clear_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS,
1914 wlcore_enable_interrupts(wl);
1920 * this must be before the cancel_work calls below, so that the work
1921 * functions don't perform further work.
1923 wl->state = WLCORE_STATE_OFF;
1926 * Use the nosync variant to disable interrupts, so the mutex could be
1927 * held while doing so without deadlocking.
1929 wlcore_disable_interrupts_nosync(wl);
1931 mutex_unlock(&wl->mutex);
1933 wlcore_synchronize_interrupts(wl);
1934 if (!test_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags))
1935 cancel_work_sync(&wl->recovery_work);
1936 wl1271_flush_deferred_work(wl);
1937 cancel_delayed_work_sync(&wl->scan_complete_work);
1938 cancel_work_sync(&wl->netstack_work);
1939 cancel_work_sync(&wl->tx_work);
1940 cancel_delayed_work_sync(&wl->elp_work);
1941 cancel_delayed_work_sync(&wl->tx_watchdog_work);
1943 /* let's notify MAC80211 about the remaining pending TX frames */
1944 mutex_lock(&wl->mutex);
1945 wl12xx_tx_reset(wl);
1947 wl1271_power_off(wl);
1949 * In case a recovery was scheduled, interrupts were disabled to avoid
1950 * an interrupt storm. Now that the power is down, it is safe to
1951 * re-enable interrupts to balance the disable depth
1953 if (test_and_clear_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags))
1954 wlcore_enable_interrupts(wl);
1956 wl->band = IEEE80211_BAND_2GHZ;
1959 wl->power_level = WL1271_DEFAULT_POWER_LEVEL;
1960 wl->channel_type = NL80211_CHAN_NO_HT;
1961 wl->tx_blocks_available = 0;
1962 wl->tx_allocated_blocks = 0;
1963 wl->tx_results_count = 0;
1964 wl->tx_packets_count = 0;
1965 wl->time_offset = 0;
1966 wl->ap_fw_ps_map = 0;
1968 wl->sleep_auth = WL1271_PSM_ILLEGAL;
1969 memset(wl->roles_map, 0, sizeof(wl->roles_map));
1970 memset(wl->links_map, 0, sizeof(wl->links_map));
1971 memset(wl->roc_map, 0, sizeof(wl->roc_map));
1972 memset(wl->session_ids, 0, sizeof(wl->session_ids));
1973 memset(wl->rx_filter_enabled, 0, sizeof(wl->rx_filter_enabled));
1974 wl->active_sta_count = 0;
1975 wl->active_link_count = 0;
1977 /* The system link is always allocated */
1978 wl->links[WL12XX_SYSTEM_HLID].allocated_pkts = 0;
1979 wl->links[WL12XX_SYSTEM_HLID].prev_freed_pkts = 0;
1980 __set_bit(WL12XX_SYSTEM_HLID, wl->links_map);
1983 * this is performed after the cancel_work calls and the associated
1984 * mutex_lock, so that wl1271_op_add_interface does not accidentally
1985 * get executed before all these vars have been reset.
1989 wl->tx_blocks_freed = 0;
1991 for (i = 0; i < NUM_TX_QUEUES; i++) {
1992 wl->tx_pkts_freed[i] = 0;
1993 wl->tx_allocated_pkts[i] = 0;
1996 wl1271_debugfs_reset(wl);
1998 kfree(wl->raw_fw_status);
1999 wl->raw_fw_status = NULL;
2000 kfree(wl->fw_status);
2001 wl->fw_status = NULL;
2002 kfree(wl->tx_res_if);
2003 wl->tx_res_if = NULL;
2004 kfree(wl->target_mem_map);
2005 wl->target_mem_map = NULL;
2008 * FW channels must be re-calibrated after recovery,
2009 * save current Reg-Domain channel configuration and clear it.
2011 memcpy(wl->reg_ch_conf_pending, wl->reg_ch_conf_last,
2012 sizeof(wl->reg_ch_conf_pending));
2013 memset(wl->reg_ch_conf_last, 0, sizeof(wl->reg_ch_conf_last));
2016 static void wlcore_op_stop(struct ieee80211_hw *hw)
2018 struct wl1271 *wl = hw->priv;
2020 wl1271_debug(DEBUG_MAC80211, "mac80211 stop");
2022 mutex_lock(&wl->mutex);
2024 wlcore_op_stop_locked(wl);
2026 mutex_unlock(&wl->mutex);
2029 static void wlcore_channel_switch_work(struct work_struct *work)
2031 struct delayed_work *dwork;
2033 struct ieee80211_vif *vif;
2034 struct wl12xx_vif *wlvif;
2037 dwork = container_of(work, struct delayed_work, work);
2038 wlvif = container_of(dwork, struct wl12xx_vif, channel_switch_work);
2041 wl1271_info("channel switch failed (role_id: %d).", wlvif->role_id);
2043 mutex_lock(&wl->mutex);
2045 if (unlikely(wl->state != WLCORE_STATE_ON))
2048 /* check the channel switch is still ongoing */
2049 if (!test_and_clear_bit(WLVIF_FLAG_CS_PROGRESS, &wlvif->flags))
2052 vif = wl12xx_wlvif_to_vif(wlvif);
2053 ieee80211_chswitch_done(vif, false);
2055 ret = wl1271_ps_elp_wakeup(wl);
2059 wl12xx_cmd_stop_channel_switch(wl, wlvif);
2061 wl1271_ps_elp_sleep(wl);
2063 mutex_unlock(&wl->mutex);
2066 static void wlcore_connection_loss_work(struct work_struct *work)
2068 struct delayed_work *dwork;
2070 struct ieee80211_vif *vif;
2071 struct wl12xx_vif *wlvif;
2073 dwork = container_of(work, struct delayed_work, work);
2074 wlvif = container_of(dwork, struct wl12xx_vif, connection_loss_work);
2077 wl1271_info("Connection loss work (role_id: %d).", wlvif->role_id);
2079 mutex_lock(&wl->mutex);
2081 if (unlikely(wl->state != WLCORE_STATE_ON))
2084 /* Call mac80211 connection loss */
2085 if (!test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags))
2088 vif = wl12xx_wlvif_to_vif(wlvif);
2089 ieee80211_connection_loss(vif);
2091 mutex_unlock(&wl->mutex);
2094 static void wlcore_pending_auth_complete_work(struct work_struct *work)
2096 struct delayed_work *dwork;
2098 struct wl12xx_vif *wlvif;
2099 unsigned long time_spare;
2102 dwork = container_of(work, struct delayed_work, work);
2103 wlvif = container_of(dwork, struct wl12xx_vif,
2104 pending_auth_complete_work);
2107 mutex_lock(&wl->mutex);
2109 if (unlikely(wl->state != WLCORE_STATE_ON))
2113 * Make sure a second really passed since the last auth reply. Maybe
2114 * a second auth reply arrived while we were stuck on the mutex.
2115 * Check for a little less than the timeout to protect from scheduler
2118 time_spare = jiffies +
2119 msecs_to_jiffies(WLCORE_PEND_AUTH_ROC_TIMEOUT - 50);
2120 if (!time_after(time_spare, wlvif->pending_auth_reply_time))
2123 ret = wl1271_ps_elp_wakeup(wl);
2127 /* cancel the ROC if active */
2128 wlcore_update_inconn_sta(wl, wlvif, NULL, false);
2130 wl1271_ps_elp_sleep(wl);
2132 mutex_unlock(&wl->mutex);
2135 static int wl12xx_allocate_rate_policy(struct wl1271 *wl, u8 *idx)
2137 u8 policy = find_first_zero_bit(wl->rate_policies_map,
2138 WL12XX_MAX_RATE_POLICIES);
2139 if (policy >= WL12XX_MAX_RATE_POLICIES)
2142 __set_bit(policy, wl->rate_policies_map);
2147 static void wl12xx_free_rate_policy(struct wl1271 *wl, u8 *idx)
2149 if (WARN_ON(*idx >= WL12XX_MAX_RATE_POLICIES))
2152 __clear_bit(*idx, wl->rate_policies_map);
2153 *idx = WL12XX_MAX_RATE_POLICIES;
2156 static int wlcore_allocate_klv_template(struct wl1271 *wl, u8 *idx)
2158 u8 policy = find_first_zero_bit(wl->klv_templates_map,
2159 WLCORE_MAX_KLV_TEMPLATES);
2160 if (policy >= WLCORE_MAX_KLV_TEMPLATES)
2163 __set_bit(policy, wl->klv_templates_map);
2168 static void wlcore_free_klv_template(struct wl1271 *wl, u8 *idx)
2170 if (WARN_ON(*idx >= WLCORE_MAX_KLV_TEMPLATES))
2173 __clear_bit(*idx, wl->klv_templates_map);
2174 *idx = WLCORE_MAX_KLV_TEMPLATES;
2177 static u8 wl12xx_get_role_type(struct wl1271 *wl, struct wl12xx_vif *wlvif)
2179 switch (wlvif->bss_type) {
2180 case BSS_TYPE_AP_BSS:
2182 return WL1271_ROLE_P2P_GO;
2184 return WL1271_ROLE_AP;
2186 case BSS_TYPE_STA_BSS:
2188 return WL1271_ROLE_P2P_CL;
2190 return WL1271_ROLE_STA;
2193 return WL1271_ROLE_IBSS;
2196 wl1271_error("invalid bss_type: %d", wlvif->bss_type);
2198 return WL12XX_INVALID_ROLE_TYPE;
2201 static int wl12xx_init_vif_data(struct wl1271 *wl, struct ieee80211_vif *vif)
2203 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
2206 /* clear everything but the persistent data */
2207 memset(wlvif, 0, offsetof(struct wl12xx_vif, persistent));
2209 switch (ieee80211_vif_type_p2p(vif)) {
2210 case NL80211_IFTYPE_P2P_CLIENT:
2213 case NL80211_IFTYPE_STATION:
2214 wlvif->bss_type = BSS_TYPE_STA_BSS;
2216 case NL80211_IFTYPE_ADHOC:
2217 wlvif->bss_type = BSS_TYPE_IBSS;
2219 case NL80211_IFTYPE_P2P_GO:
2222 case NL80211_IFTYPE_AP:
2223 wlvif->bss_type = BSS_TYPE_AP_BSS;
2226 wlvif->bss_type = MAX_BSS_TYPE;
2230 wlvif->role_id = WL12XX_INVALID_ROLE_ID;
2231 wlvif->dev_role_id = WL12XX_INVALID_ROLE_ID;
2232 wlvif->dev_hlid = WL12XX_INVALID_LINK_ID;
2234 if (wlvif->bss_type == BSS_TYPE_STA_BSS ||
2235 wlvif->bss_type == BSS_TYPE_IBSS) {
2236 /* init sta/ibss data */
2237 wlvif->sta.hlid = WL12XX_INVALID_LINK_ID;
2238 wl12xx_allocate_rate_policy(wl, &wlvif->sta.basic_rate_idx);
2239 wl12xx_allocate_rate_policy(wl, &wlvif->sta.ap_rate_idx);
2240 wl12xx_allocate_rate_policy(wl, &wlvif->sta.p2p_rate_idx);
2241 wlcore_allocate_klv_template(wl, &wlvif->sta.klv_template_id);
2242 wlvif->basic_rate_set = CONF_TX_RATE_MASK_BASIC;
2243 wlvif->basic_rate = CONF_TX_RATE_MASK_BASIC;
2244 wlvif->rate_set = CONF_TX_RATE_MASK_BASIC;
2247 wlvif->ap.bcast_hlid = WL12XX_INVALID_LINK_ID;
2248 wlvif->ap.global_hlid = WL12XX_INVALID_LINK_ID;
2249 wl12xx_allocate_rate_policy(wl, &wlvif->ap.mgmt_rate_idx);
2250 wl12xx_allocate_rate_policy(wl, &wlvif->ap.bcast_rate_idx);
2251 for (i = 0; i < CONF_TX_MAX_AC_COUNT; i++)
2252 wl12xx_allocate_rate_policy(wl,
2253 &wlvif->ap.ucast_rate_idx[i]);
2254 wlvif->basic_rate_set = CONF_TX_ENABLED_RATES;
2256 * TODO: check if basic_rate shouldn't be
2257 * wl1271_tx_min_rate_get(wl, wlvif->basic_rate_set);
2258 * instead (the same thing for STA above).
2260 wlvif->basic_rate = CONF_TX_ENABLED_RATES;
2261 /* TODO: this seems to be used only for STA, check it */
2262 wlvif->rate_set = CONF_TX_ENABLED_RATES;
2265 wlvif->bitrate_masks[IEEE80211_BAND_2GHZ] = wl->conf.tx.basic_rate;
2266 wlvif->bitrate_masks[IEEE80211_BAND_5GHZ] = wl->conf.tx.basic_rate_5;
2267 wlvif->beacon_int = WL1271_DEFAULT_BEACON_INT;
2270 * mac80211 configures some values globally, while we treat them
2271 * per-interface. thus, on init, we have to copy them from wl
2273 wlvif->band = wl->band;
2274 wlvif->channel = wl->channel;
2275 wlvif->power_level = wl->power_level;
2276 wlvif->channel_type = wl->channel_type;
2278 INIT_WORK(&wlvif->rx_streaming_enable_work,
2279 wl1271_rx_streaming_enable_work);
2280 INIT_WORK(&wlvif->rx_streaming_disable_work,
2281 wl1271_rx_streaming_disable_work);
2282 INIT_DELAYED_WORK(&wlvif->channel_switch_work,
2283 wlcore_channel_switch_work);
2284 INIT_DELAYED_WORK(&wlvif->connection_loss_work,
2285 wlcore_connection_loss_work);
2286 INIT_DELAYED_WORK(&wlvif->pending_auth_complete_work,
2287 wlcore_pending_auth_complete_work);
2288 INIT_LIST_HEAD(&wlvif->list);
2290 setup_timer(&wlvif->rx_streaming_timer, wl1271_rx_streaming_timer,
2291 (unsigned long) wlvif);
2295 static int wl12xx_init_fw(struct wl1271 *wl)
2297 int retries = WL1271_BOOT_RETRIES;
2298 bool booted = false;
2299 struct wiphy *wiphy = wl->hw->wiphy;
2304 ret = wl12xx_chip_wakeup(wl, false);
2308 ret = wl->ops->boot(wl);
2312 ret = wl1271_hw_init(wl);
2320 mutex_unlock(&wl->mutex);
2321 /* Unlocking the mutex in the middle of handling is
2322 inherently unsafe. In this case we deem it safe to do,
2323 because we need to let any possibly pending IRQ out of
2324 the system (and while we are WLCORE_STATE_OFF the IRQ
2325 work function will not do anything.) Also, any other
2326 possible concurrent operations will fail due to the
2327 current state, hence the wl1271 struct should be safe. */
2328 wlcore_disable_interrupts(wl);
2329 wl1271_flush_deferred_work(wl);
2330 cancel_work_sync(&wl->netstack_work);
2331 mutex_lock(&wl->mutex);
2333 wl1271_power_off(wl);
2337 wl1271_error("firmware boot failed despite %d retries",
2338 WL1271_BOOT_RETRIES);
2342 wl1271_info("firmware booted (%s)", wl->chip.fw_ver_str);
2344 /* update hw/fw version info in wiphy struct */
2345 wiphy->hw_version = wl->chip.id;
2346 strncpy(wiphy->fw_version, wl->chip.fw_ver_str,
2347 sizeof(wiphy->fw_version));
2350 * Now we know if 11a is supported (info from the NVS), so disable
2351 * 11a channels if not supported
2353 if (!wl->enable_11a)
2354 wiphy->bands[IEEE80211_BAND_5GHZ]->n_channels = 0;
2356 wl1271_debug(DEBUG_MAC80211, "11a is %ssupported",
2357 wl->enable_11a ? "" : "not ");
2359 wl->state = WLCORE_STATE_ON;
2364 static bool wl12xx_dev_role_started(struct wl12xx_vif *wlvif)
2366 return wlvif->dev_hlid != WL12XX_INVALID_LINK_ID;
2370 * Check whether a fw switch (i.e. moving from one loaded
2371 * fw to another) is needed. This function is also responsible
2372 * for updating wl->last_vif_count, so it must be called before
2373 * loading a non-plt fw (so the correct fw (single-role/multi-role)
2376 static bool wl12xx_need_fw_change(struct wl1271 *wl,
2377 struct vif_counter_data vif_counter_data,
2380 enum wl12xx_fw_type current_fw = wl->fw_type;
2381 u8 vif_count = vif_counter_data.counter;
2383 if (test_bit(WL1271_FLAG_VIF_CHANGE_IN_PROGRESS, &wl->flags))
2386 /* increase the vif count if this is a new vif */
2387 if (add && !vif_counter_data.cur_vif_running)
2390 wl->last_vif_count = vif_count;
2392 /* no need for fw change if the device is OFF */
2393 if (wl->state == WLCORE_STATE_OFF)
2396 /* no need for fw change if a single fw is used */
2397 if (!wl->mr_fw_name)
2400 if (vif_count > 1 && current_fw == WL12XX_FW_TYPE_NORMAL)
2402 if (vif_count <= 1 && current_fw == WL12XX_FW_TYPE_MULTI)
2409 * Enter "forced psm". Make sure the sta is in psm against the ap,
2410 * to make the fw switch a bit more disconnection-persistent.
2412 static void wl12xx_force_active_psm(struct wl1271 *wl)
2414 struct wl12xx_vif *wlvif;
2416 wl12xx_for_each_wlvif_sta(wl, wlvif) {
2417 wl1271_ps_set_mode(wl, wlvif, STATION_POWER_SAVE_MODE);
2421 struct wlcore_hw_queue_iter_data {
2422 unsigned long hw_queue_map[BITS_TO_LONGS(WLCORE_NUM_MAC_ADDRESSES)];
2424 struct ieee80211_vif *vif;
2425 /* is the current vif among those iterated */
2429 static void wlcore_hw_queue_iter(void *data, u8 *mac,
2430 struct ieee80211_vif *vif)
2432 struct wlcore_hw_queue_iter_data *iter_data = data;
2434 if (WARN_ON_ONCE(vif->hw_queue[0] == IEEE80211_INVAL_HW_QUEUE))
2437 if (iter_data->cur_running || vif == iter_data->vif) {
2438 iter_data->cur_running = true;
2442 __set_bit(vif->hw_queue[0] / NUM_TX_QUEUES, iter_data->hw_queue_map);
2445 static int wlcore_allocate_hw_queue_base(struct wl1271 *wl,
2446 struct wl12xx_vif *wlvif)
2448 struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif);
2449 struct wlcore_hw_queue_iter_data iter_data = {};
2452 iter_data.vif = vif;
2454 /* mark all bits taken by active interfaces */
2455 ieee80211_iterate_active_interfaces_atomic(wl->hw,
2456 IEEE80211_IFACE_ITER_RESUME_ALL,
2457 wlcore_hw_queue_iter, &iter_data);
2459 /* the current vif is already running in mac80211 (resume/recovery) */
2460 if (iter_data.cur_running) {
2461 wlvif->hw_queue_base = vif->hw_queue[0];
2462 wl1271_debug(DEBUG_MAC80211,
2463 "using pre-allocated hw queue base %d",
2464 wlvif->hw_queue_base);
2466 /* interface type might have changed type */
2467 goto adjust_cab_queue;
2470 q_base = find_first_zero_bit(iter_data.hw_queue_map,
2471 WLCORE_NUM_MAC_ADDRESSES);
2472 if (q_base >= WLCORE_NUM_MAC_ADDRESSES)
2475 wlvif->hw_queue_base = q_base * NUM_TX_QUEUES;
2476 wl1271_debug(DEBUG_MAC80211, "allocating hw queue base: %d",
2477 wlvif->hw_queue_base);
2479 for (i = 0; i < NUM_TX_QUEUES; i++) {
2480 wl->queue_stop_reasons[wlvif->hw_queue_base + i] = 0;
2481 /* register hw queues in mac80211 */
2482 vif->hw_queue[i] = wlvif->hw_queue_base + i;
2486 /* the last places are reserved for cab queues per interface */
2487 if (wlvif->bss_type == BSS_TYPE_AP_BSS)
2488 vif->cab_queue = NUM_TX_QUEUES * WLCORE_NUM_MAC_ADDRESSES +
2489 wlvif->hw_queue_base / NUM_TX_QUEUES;
2491 vif->cab_queue = IEEE80211_INVAL_HW_QUEUE;
2496 static int wl1271_op_add_interface(struct ieee80211_hw *hw,
2497 struct ieee80211_vif *vif)
2499 struct wl1271 *wl = hw->priv;
2500 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
2501 struct vif_counter_data vif_count;
2506 wl1271_error("Adding Interface not allowed while in PLT mode");
2510 vif->driver_flags |= IEEE80211_VIF_BEACON_FILTER |
2511 IEEE80211_VIF_SUPPORTS_UAPSD |
2512 IEEE80211_VIF_SUPPORTS_CQM_RSSI;
2514 wl1271_debug(DEBUG_MAC80211, "mac80211 add interface type %d mac %pM",
2515 ieee80211_vif_type_p2p(vif), vif->addr);
2517 wl12xx_get_vif_count(hw, vif, &vif_count);
2519 mutex_lock(&wl->mutex);
2520 ret = wl1271_ps_elp_wakeup(wl);
2525 * in some very corner case HW recovery scenarios its possible to
2526 * get here before __wl1271_op_remove_interface is complete, so
2527 * opt out if that is the case.
2529 if (test_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags) ||
2530 test_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags)) {
2536 ret = wl12xx_init_vif_data(wl, vif);
2541 role_type = wl12xx_get_role_type(wl, wlvif);
2542 if (role_type == WL12XX_INVALID_ROLE_TYPE) {
2547 ret = wlcore_allocate_hw_queue_base(wl, wlvif);
2551 if (wl12xx_need_fw_change(wl, vif_count, true)) {
2552 wl12xx_force_active_psm(wl);
2553 set_bit(WL1271_FLAG_INTENDED_FW_RECOVERY, &wl->flags);
2554 mutex_unlock(&wl->mutex);
2555 wl1271_recovery_work(&wl->recovery_work);
2560 * TODO: after the nvs issue will be solved, move this block
2561 * to start(), and make sure here the driver is ON.
2563 if (wl->state == WLCORE_STATE_OFF) {
2565 * we still need this in order to configure the fw
2566 * while uploading the nvs
2568 memcpy(wl->addresses[0].addr, vif->addr, ETH_ALEN);
2570 ret = wl12xx_init_fw(wl);
2575 ret = wl12xx_cmd_role_enable(wl, vif->addr,
2576 role_type, &wlvif->role_id);
2580 ret = wl1271_init_vif_specific(wl, vif);
2584 list_add(&wlvif->list, &wl->wlvif_list);
2585 set_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags);
2587 if (wlvif->bss_type == BSS_TYPE_AP_BSS)
2592 wl1271_ps_elp_sleep(wl);
2594 mutex_unlock(&wl->mutex);
2599 static void __wl1271_op_remove_interface(struct wl1271 *wl,
2600 struct ieee80211_vif *vif,
2601 bool reset_tx_queues)
2603 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
2605 bool is_ap = (wlvif->bss_type == BSS_TYPE_AP_BSS);
2607 wl1271_debug(DEBUG_MAC80211, "mac80211 remove interface");
2609 if (!test_and_clear_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags))
2612 /* because of hardware recovery, we may get here twice */
2613 if (wl->state == WLCORE_STATE_OFF)
2616 wl1271_info("down");
2618 if (wl->scan.state != WL1271_SCAN_STATE_IDLE &&
2619 wl->scan_wlvif == wlvif) {
2621 * Rearm the tx watchdog just before idling scan. This
2622 * prevents just-finished scans from triggering the watchdog
2624 wl12xx_rearm_tx_watchdog_locked(wl);
2626 wl->scan.state = WL1271_SCAN_STATE_IDLE;
2627 memset(wl->scan.scanned_ch, 0, sizeof(wl->scan.scanned_ch));
2628 wl->scan_wlvif = NULL;
2629 wl->scan.req = NULL;
2630 ieee80211_scan_completed(wl->hw, true);
2633 if (wl->sched_vif == wlvif)
2634 wl->sched_vif = NULL;
2636 if (wl->roc_vif == vif) {
2638 ieee80211_remain_on_channel_expired(wl->hw);
2641 if (!test_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags)) {
2642 /* disable active roles */
2643 ret = wl1271_ps_elp_wakeup(wl);
2647 if (wlvif->bss_type == BSS_TYPE_STA_BSS ||
2648 wlvif->bss_type == BSS_TYPE_IBSS) {
2649 if (wl12xx_dev_role_started(wlvif))
2650 wl12xx_stop_dev(wl, wlvif);
2653 ret = wl12xx_cmd_role_disable(wl, &wlvif->role_id);
2657 wl1271_ps_elp_sleep(wl);
2660 wl12xx_tx_reset_wlvif(wl, wlvif);
2662 /* clear all hlids (except system_hlid) */
2663 wlvif->dev_hlid = WL12XX_INVALID_LINK_ID;
2665 if (wlvif->bss_type == BSS_TYPE_STA_BSS ||
2666 wlvif->bss_type == BSS_TYPE_IBSS) {
2667 wlvif->sta.hlid = WL12XX_INVALID_LINK_ID;
2668 wl12xx_free_rate_policy(wl, &wlvif->sta.basic_rate_idx);
2669 wl12xx_free_rate_policy(wl, &wlvif->sta.ap_rate_idx);
2670 wl12xx_free_rate_policy(wl, &wlvif->sta.p2p_rate_idx);
2671 wlcore_free_klv_template(wl, &wlvif->sta.klv_template_id);
2673 wlvif->ap.bcast_hlid = WL12XX_INVALID_LINK_ID;
2674 wlvif->ap.global_hlid = WL12XX_INVALID_LINK_ID;
2675 wl12xx_free_rate_policy(wl, &wlvif->ap.mgmt_rate_idx);
2676 wl12xx_free_rate_policy(wl, &wlvif->ap.bcast_rate_idx);
2677 for (i = 0; i < CONF_TX_MAX_AC_COUNT; i++)
2678 wl12xx_free_rate_policy(wl,
2679 &wlvif->ap.ucast_rate_idx[i]);
2680 wl1271_free_ap_keys(wl, wlvif);
2683 dev_kfree_skb(wlvif->probereq);
2684 wlvif->probereq = NULL;
2685 if (wl->last_wlvif == wlvif)
2686 wl->last_wlvif = NULL;
2687 list_del(&wlvif->list);
2688 memset(wlvif->ap.sta_hlid_map, 0, sizeof(wlvif->ap.sta_hlid_map));
2689 wlvif->role_id = WL12XX_INVALID_ROLE_ID;
2690 wlvif->dev_role_id = WL12XX_INVALID_ROLE_ID;
2698 * Last AP, have more stations. Configure sleep auth according to STA.
2699 * Don't do thin on unintended recovery.
2701 if (test_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags) &&
2702 !test_bit(WL1271_FLAG_INTENDED_FW_RECOVERY, &wl->flags))
2705 if (wl->ap_count == 0 && is_ap) {
2706 /* mask ap events */
2707 wl->event_mask &= ~wl->ap_event_mask;
2708 wl1271_event_unmask(wl);
2711 if (wl->ap_count == 0 && is_ap && wl->sta_count) {
2712 u8 sta_auth = wl->conf.conn.sta_sleep_auth;
2713 /* Configure for power according to debugfs */
2714 if (sta_auth != WL1271_PSM_ILLEGAL)
2715 wl1271_acx_sleep_auth(wl, sta_auth);
2716 /* Configure for ELP power saving */
2718 wl1271_acx_sleep_auth(wl, WL1271_PSM_ELP);
2722 mutex_unlock(&wl->mutex);
2724 del_timer_sync(&wlvif->rx_streaming_timer);
2725 cancel_work_sync(&wlvif->rx_streaming_enable_work);
2726 cancel_work_sync(&wlvif->rx_streaming_disable_work);
2727 cancel_delayed_work_sync(&wlvif->connection_loss_work);
2728 cancel_delayed_work_sync(&wlvif->channel_switch_work);
2729 cancel_delayed_work_sync(&wlvif->pending_auth_complete_work);
2731 mutex_lock(&wl->mutex);
2734 static void wl1271_op_remove_interface(struct ieee80211_hw *hw,
2735 struct ieee80211_vif *vif)
2737 struct wl1271 *wl = hw->priv;
2738 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
2739 struct wl12xx_vif *iter;
2740 struct vif_counter_data vif_count;
2742 wl12xx_get_vif_count(hw, vif, &vif_count);
2743 mutex_lock(&wl->mutex);
2745 if (wl->state == WLCORE_STATE_OFF ||
2746 !test_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags))
2750 * wl->vif can be null here if someone shuts down the interface
2751 * just when hardware recovery has been started.
2753 wl12xx_for_each_wlvif(wl, iter) {
2757 __wl1271_op_remove_interface(wl, vif, true);
2760 WARN_ON(iter != wlvif);
2761 if (wl12xx_need_fw_change(wl, vif_count, false)) {
2762 wl12xx_force_active_psm(wl);
2763 set_bit(WL1271_FLAG_INTENDED_FW_RECOVERY, &wl->flags);
2764 wl12xx_queue_recovery_work(wl);
2767 mutex_unlock(&wl->mutex);
2770 static int wl12xx_op_change_interface(struct ieee80211_hw *hw,
2771 struct ieee80211_vif *vif,
2772 enum nl80211_iftype new_type, bool p2p)
2774 struct wl1271 *wl = hw->priv;
2777 set_bit(WL1271_FLAG_VIF_CHANGE_IN_PROGRESS, &wl->flags);
2778 wl1271_op_remove_interface(hw, vif);
2780 vif->type = new_type;
2782 ret = wl1271_op_add_interface(hw, vif);
2784 clear_bit(WL1271_FLAG_VIF_CHANGE_IN_PROGRESS, &wl->flags);
2788 static int wlcore_join(struct wl1271 *wl, struct wl12xx_vif *wlvif)
2791 bool is_ibss = (wlvif->bss_type == BSS_TYPE_IBSS);
2794 * One of the side effects of the JOIN command is that is clears
2795 * WPA/WPA2 keys from the chipset. Performing a JOIN while associated
2796 * to a WPA/WPA2 access point will therefore kill the data-path.
2797 * Currently the only valid scenario for JOIN during association
2798 * is on roaming, in which case we will also be given new keys.
2799 * Keep the below message for now, unless it starts bothering
2800 * users who really like to roam a lot :)
2802 if (test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags))
2803 wl1271_info("JOIN while associated.");
2805 /* clear encryption type */
2806 wlvif->encryption_type = KEY_NONE;
2809 ret = wl12xx_cmd_role_start_ibss(wl, wlvif);
2811 if (wl->quirks & WLCORE_QUIRK_START_STA_FAILS) {
2813 * TODO: this is an ugly workaround for wl12xx fw
2814 * bug - we are not able to tx/rx after the first
2815 * start_sta, so make dummy start+stop calls,
2816 * and then call start_sta again.
2817 * this should be fixed in the fw.
2819 wl12xx_cmd_role_start_sta(wl, wlvif);
2820 wl12xx_cmd_role_stop_sta(wl, wlvif);
2823 ret = wl12xx_cmd_role_start_sta(wl, wlvif);
2829 static int wl1271_ssid_set(struct wl12xx_vif *wlvif, struct sk_buff *skb,
2833 const u8 *ptr = cfg80211_find_ie(WLAN_EID_SSID, skb->data + offset,
2837 wl1271_error("No SSID in IEs!");
2842 if (ssid_len > IEEE80211_MAX_SSID_LEN) {
2843 wl1271_error("SSID is too long!");
2847 wlvif->ssid_len = ssid_len;
2848 memcpy(wlvif->ssid, ptr+2, ssid_len);
2852 static int wlcore_set_ssid(struct wl1271 *wl, struct wl12xx_vif *wlvif)
2854 struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif);
2855 struct sk_buff *skb;
2858 /* we currently only support setting the ssid from the ap probe req */
2859 if (wlvif->bss_type != BSS_TYPE_STA_BSS)
2862 skb = ieee80211_ap_probereq_get(wl->hw, vif);
2866 ieoffset = offsetof(struct ieee80211_mgmt,
2867 u.probe_req.variable);
2868 wl1271_ssid_set(wlvif, skb, ieoffset);
2874 static int wlcore_set_assoc(struct wl1271 *wl, struct wl12xx_vif *wlvif,
2875 struct ieee80211_bss_conf *bss_conf,
2881 wlvif->aid = bss_conf->aid;
2882 wlvif->channel_type = cfg80211_get_chandef_type(&bss_conf->chandef);
2883 wlvif->beacon_int = bss_conf->beacon_int;
2884 wlvif->wmm_enabled = bss_conf->qos;
2886 set_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags);
2889 * with wl1271, we don't need to update the
2890 * beacon_int and dtim_period, because the firmware
2891 * updates it by itself when the first beacon is
2892 * received after a join.
2894 ret = wl1271_cmd_build_ps_poll(wl, wlvif, wlvif->aid);
2899 * Get a template for hardware connection maintenance
2901 dev_kfree_skb(wlvif->probereq);
2902 wlvif->probereq = wl1271_cmd_build_ap_probe_req(wl,
2905 ieoffset = offsetof(struct ieee80211_mgmt,
2906 u.probe_req.variable);
2907 wl1271_ssid_set(wlvif, wlvif->probereq, ieoffset);
2909 /* enable the connection monitoring feature */
2910 ret = wl1271_acx_conn_monit_params(wl, wlvif, true);
2915 * The join command disable the keep-alive mode, shut down its process,
2916 * and also clear the template config, so we need to reset it all after
2917 * the join. The acx_aid starts the keep-alive process, and the order
2918 * of the commands below is relevant.
2920 ret = wl1271_acx_keep_alive_mode(wl, wlvif, true);
2924 ret = wl1271_acx_aid(wl, wlvif, wlvif->aid);
2928 ret = wl12xx_cmd_build_klv_null_data(wl, wlvif);
2932 ret = wl1271_acx_keep_alive_config(wl, wlvif,
2933 wlvif->sta.klv_template_id,
2934 ACX_KEEP_ALIVE_TPL_VALID);
2939 * The default fw psm configuration is AUTO, while mac80211 default
2940 * setting is off (ACTIVE), so sync the fw with the correct value.
2942 ret = wl1271_ps_set_mode(wl, wlvif, STATION_ACTIVE_MODE);
2948 wl1271_tx_enabled_rates_get(wl,
2951 ret = wl1271_acx_sta_rate_policies(wl, wlvif);
2959 static int wlcore_unset_assoc(struct wl1271 *wl, struct wl12xx_vif *wlvif)
2962 bool sta = wlvif->bss_type == BSS_TYPE_STA_BSS;
2964 /* make sure we are connected (sta) joined */
2966 !test_and_clear_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags))
2969 /* make sure we are joined (ibss) */
2971 test_and_clear_bit(WLVIF_FLAG_IBSS_JOINED, &wlvif->flags))
2975 /* use defaults when not associated */
2978 /* free probe-request template */
2979 dev_kfree_skb(wlvif->probereq);
2980 wlvif->probereq = NULL;
2982 /* disable connection monitor features */
2983 ret = wl1271_acx_conn_monit_params(wl, wlvif, false);
2987 /* Disable the keep-alive feature */
2988 ret = wl1271_acx_keep_alive_mode(wl, wlvif, false);
2992 /* disable beacon filtering */
2993 ret = wl1271_acx_beacon_filter_opt(wl, wlvif, false);
2998 if (test_and_clear_bit(WLVIF_FLAG_CS_PROGRESS, &wlvif->flags)) {
2999 struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif);
3001 wl12xx_cmd_stop_channel_switch(wl, wlvif);
3002 ieee80211_chswitch_done(vif, false);
3003 cancel_delayed_work(&wlvif->channel_switch_work);
3006 /* invalidate keep-alive template */
3007 wl1271_acx_keep_alive_config(wl, wlvif,
3008 wlvif->sta.klv_template_id,
3009 ACX_KEEP_ALIVE_TPL_INVALID);
3014 static void wl1271_set_band_rate(struct wl1271 *wl, struct wl12xx_vif *wlvif)
3016 wlvif->basic_rate_set = wlvif->bitrate_masks[wlvif->band];
3017 wlvif->rate_set = wlvif->basic_rate_set;
3020 static void wl1271_sta_handle_idle(struct wl1271 *wl, struct wl12xx_vif *wlvif,
3023 bool cur_idle = !test_bit(WLVIF_FLAG_ACTIVE, &wlvif->flags);
3025 if (idle == cur_idle)
3029 clear_bit(WLVIF_FLAG_ACTIVE, &wlvif->flags);
3031 /* The current firmware only supports sched_scan in idle */
3032 if (wl->sched_vif == wlvif)
3033 wl->ops->sched_scan_stop(wl, wlvif);
3035 set_bit(WLVIF_FLAG_ACTIVE, &wlvif->flags);
3039 static int wl12xx_config_vif(struct wl1271 *wl, struct wl12xx_vif *wlvif,
3040 struct ieee80211_conf *conf, u32 changed)
3044 if (conf->power_level != wlvif->power_level) {
3045 ret = wl1271_acx_tx_power(wl, wlvif, conf->power_level);
3049 wlvif->power_level = conf->power_level;
3055 static int wl1271_op_config(struct ieee80211_hw *hw, u32 changed)
3057 struct wl1271 *wl = hw->priv;
3058 struct wl12xx_vif *wlvif;
3059 struct ieee80211_conf *conf = &hw->conf;
3062 wl1271_debug(DEBUG_MAC80211, "mac80211 config psm %s power %d %s"
3064 conf->flags & IEEE80211_CONF_PS ? "on" : "off",
3066 conf->flags & IEEE80211_CONF_IDLE ? "idle" : "in use",
3069 mutex_lock(&wl->mutex);
3071 if (changed & IEEE80211_CONF_CHANGE_POWER)
3072 wl->power_level = conf->power_level;
3074 if (unlikely(wl->state != WLCORE_STATE_ON))
3077 ret = wl1271_ps_elp_wakeup(wl);
3081 /* configure each interface */
3082 wl12xx_for_each_wlvif(wl, wlvif) {
3083 ret = wl12xx_config_vif(wl, wlvif, conf, changed);
3089 wl1271_ps_elp_sleep(wl);
3092 mutex_unlock(&wl->mutex);
3097 struct wl1271_filter_params {
3100 u8 mc_list[ACX_MC_ADDRESS_GROUP_MAX][ETH_ALEN];
3103 static u64 wl1271_op_prepare_multicast(struct ieee80211_hw *hw,
3104 struct netdev_hw_addr_list *mc_list)
3106 struct wl1271_filter_params *fp;
3107 struct netdev_hw_addr *ha;
3109 fp = kzalloc(sizeof(*fp), GFP_ATOMIC);
3111 wl1271_error("Out of memory setting filters.");
3115 /* update multicast filtering parameters */
3116 fp->mc_list_length = 0;
3117 if (netdev_hw_addr_list_count(mc_list) > ACX_MC_ADDRESS_GROUP_MAX) {
3118 fp->enabled = false;
3121 netdev_hw_addr_list_for_each(ha, mc_list) {
3122 memcpy(fp->mc_list[fp->mc_list_length],
3123 ha->addr, ETH_ALEN);
3124 fp->mc_list_length++;
3128 return (u64)(unsigned long)fp;
3131 #define WL1271_SUPPORTED_FILTERS (FIF_PROMISC_IN_BSS | \
3134 FIF_BCN_PRBRESP_PROMISC | \
3138 static void wl1271_op_configure_filter(struct ieee80211_hw *hw,
3139 unsigned int changed,
3140 unsigned int *total, u64 multicast)
3142 struct wl1271_filter_params *fp = (void *)(unsigned long)multicast;
3143 struct wl1271 *wl = hw->priv;
3144 struct wl12xx_vif *wlvif;
3148 wl1271_debug(DEBUG_MAC80211, "mac80211 configure filter changed %x"
3149 " total %x", changed, *total);
3151 mutex_lock(&wl->mutex);
3153 *total &= WL1271_SUPPORTED_FILTERS;
3154 changed &= WL1271_SUPPORTED_FILTERS;
3156 if (unlikely(wl->state != WLCORE_STATE_ON))
3159 ret = wl1271_ps_elp_wakeup(wl);
3163 wl12xx_for_each_wlvif(wl, wlvif) {
3164 if (wlvif->bss_type != BSS_TYPE_AP_BSS) {
3165 if (*total & FIF_ALLMULTI)
3166 ret = wl1271_acx_group_address_tbl(wl, wlvif,
3170 ret = wl1271_acx_group_address_tbl(wl, wlvif,
3173 fp->mc_list_length);
3180 * the fw doesn't provide an api to configure the filters. instead,
3181 * the filters configuration is based on the active roles / ROC
3186 wl1271_ps_elp_sleep(wl);
3189 mutex_unlock(&wl->mutex);
3193 static int wl1271_record_ap_key(struct wl1271 *wl, struct wl12xx_vif *wlvif,
3194 u8 id, u8 key_type, u8 key_size,
3195 const u8 *key, u8 hlid, u32 tx_seq_32,
3198 struct wl1271_ap_key *ap_key;
3201 wl1271_debug(DEBUG_CRYPT, "record ap key id %d", (int)id);
3203 if (key_size > MAX_KEY_SIZE)
3207 * Find next free entry in ap_keys. Also check we are not replacing
3210 for (i = 0; i < MAX_NUM_KEYS; i++) {
3211 if (wlvif->ap.recorded_keys[i] == NULL)
3214 if (wlvif->ap.recorded_keys[i]->id == id) {
3215 wl1271_warning("trying to record key replacement");
3220 if (i == MAX_NUM_KEYS)
3223 ap_key = kzalloc(sizeof(*ap_key), GFP_KERNEL);
3228 ap_key->key_type = key_type;
3229 ap_key->key_size = key_size;
3230 memcpy(ap_key->key, key, key_size);
3231 ap_key->hlid = hlid;
3232 ap_key->tx_seq_32 = tx_seq_32;
3233 ap_key->tx_seq_16 = tx_seq_16;
3235 wlvif->ap.recorded_keys[i] = ap_key;
3239 static void wl1271_free_ap_keys(struct wl1271 *wl, struct wl12xx_vif *wlvif)
3243 for (i = 0; i < MAX_NUM_KEYS; i++) {
3244 kfree(wlvif->ap.recorded_keys[i]);
3245 wlvif->ap.recorded_keys[i] = NULL;
3249 static int wl1271_ap_init_hwenc(struct wl1271 *wl, struct wl12xx_vif *wlvif)
3252 struct wl1271_ap_key *key;
3253 bool wep_key_added = false;
3255 for (i = 0; i < MAX_NUM_KEYS; i++) {
3257 if (wlvif->ap.recorded_keys[i] == NULL)
3260 key = wlvif->ap.recorded_keys[i];
3262 if (hlid == WL12XX_INVALID_LINK_ID)
3263 hlid = wlvif->ap.bcast_hlid;
3265 ret = wl1271_cmd_set_ap_key(wl, wlvif, KEY_ADD_OR_REPLACE,
3266 key->id, key->key_type,
3267 key->key_size, key->key,
3268 hlid, key->tx_seq_32,
3273 if (key->key_type == KEY_WEP)
3274 wep_key_added = true;
3277 if (wep_key_added) {
3278 ret = wl12xx_cmd_set_default_wep_key(wl, wlvif->default_key,
3279 wlvif->ap.bcast_hlid);
3285 wl1271_free_ap_keys(wl, wlvif);
3289 static int wl1271_set_key(struct wl1271 *wl, struct wl12xx_vif *wlvif,
3290 u16 action, u8 id, u8 key_type,
3291 u8 key_size, const u8 *key, u32 tx_seq_32,
3292 u16 tx_seq_16, struct ieee80211_sta *sta)
3295 bool is_ap = (wlvif->bss_type == BSS_TYPE_AP_BSS);
3298 struct wl1271_station *wl_sta;
3302 wl_sta = (struct wl1271_station *)sta->drv_priv;
3303 hlid = wl_sta->hlid;
3305 hlid = wlvif->ap.bcast_hlid;
3308 if (!test_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags)) {
3310 * We do not support removing keys after AP shutdown.
3311 * Pretend we do to make mac80211 happy.
3313 if (action != KEY_ADD_OR_REPLACE)
3316 ret = wl1271_record_ap_key(wl, wlvif, id,
3318 key, hlid, tx_seq_32,
3321 ret = wl1271_cmd_set_ap_key(wl, wlvif, action,
3322 id, key_type, key_size,
3323 key, hlid, tx_seq_32,
3331 static const u8 bcast_addr[ETH_ALEN] = {
3332 0xff, 0xff, 0xff, 0xff, 0xff, 0xff
3335 addr = sta ? sta->addr : bcast_addr;
3337 if (is_zero_ether_addr(addr)) {
3338 /* We dont support TX only encryption */
3342 /* The wl1271 does not allow to remove unicast keys - they
3343 will be cleared automatically on next CMD_JOIN. Ignore the
3344 request silently, as we dont want the mac80211 to emit
3345 an error message. */
3346 if (action == KEY_REMOVE && !is_broadcast_ether_addr(addr))
3349 /* don't remove key if hlid was already deleted */
3350 if (action == KEY_REMOVE &&
3351 wlvif->sta.hlid == WL12XX_INVALID_LINK_ID)
3354 ret = wl1271_cmd_set_sta_key(wl, wlvif, action,
3355 id, key_type, key_size,
3356 key, addr, tx_seq_32,
3366 static int wlcore_op_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
3367 struct ieee80211_vif *vif,
3368 struct ieee80211_sta *sta,
3369 struct ieee80211_key_conf *key_conf)
3371 struct wl1271 *wl = hw->priv;
3373 bool might_change_spare =
3374 key_conf->cipher == WL1271_CIPHER_SUITE_GEM ||
3375 key_conf->cipher == WLAN_CIPHER_SUITE_TKIP;
3377 if (might_change_spare) {
3379 * stop the queues and flush to ensure the next packets are
3380 * in sync with FW spare block accounting
3382 wlcore_stop_queues(wl, WLCORE_QUEUE_STOP_REASON_SPARE_BLK);
3383 wl1271_tx_flush(wl);
3386 mutex_lock(&wl->mutex);
3388 if (unlikely(wl->state != WLCORE_STATE_ON)) {
3390 goto out_wake_queues;
3393 ret = wl1271_ps_elp_wakeup(wl);
3395 goto out_wake_queues;
3397 ret = wlcore_hw_set_key(wl, cmd, vif, sta, key_conf);
3399 wl1271_ps_elp_sleep(wl);
3402 if (might_change_spare)
3403 wlcore_wake_queues(wl, WLCORE_QUEUE_STOP_REASON_SPARE_BLK);
3405 mutex_unlock(&wl->mutex);
3410 int wlcore_set_key(struct wl1271 *wl, enum set_key_cmd cmd,
3411 struct ieee80211_vif *vif,
3412 struct ieee80211_sta *sta,
3413 struct ieee80211_key_conf *key_conf)
3415 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
3422 wl1271_debug(DEBUG_MAC80211, "mac80211 set key");
3424 wl1271_debug(DEBUG_CRYPT, "CMD: 0x%x sta: %p", cmd, sta);
3425 wl1271_debug(DEBUG_CRYPT, "Key: algo:0x%x, id:%d, len:%d flags 0x%x",
3426 key_conf->cipher, key_conf->keyidx,
3427 key_conf->keylen, key_conf->flags);
3428 wl1271_dump(DEBUG_CRYPT, "KEY: ", key_conf->key, key_conf->keylen);
3430 if (wlvif->bss_type == BSS_TYPE_AP_BSS)
3432 struct wl1271_station *wl_sta = (void *)sta->drv_priv;
3433 hlid = wl_sta->hlid;
3435 hlid = wlvif->ap.bcast_hlid;
3438 hlid = wlvif->sta.hlid;
3440 if (hlid != WL12XX_INVALID_LINK_ID) {
3441 u64 tx_seq = wl->links[hlid].total_freed_pkts;
3442 tx_seq_32 = WL1271_TX_SECURITY_HI32(tx_seq);
3443 tx_seq_16 = WL1271_TX_SECURITY_LO16(tx_seq);
3446 switch (key_conf->cipher) {
3447 case WLAN_CIPHER_SUITE_WEP40:
3448 case WLAN_CIPHER_SUITE_WEP104:
3451 key_conf->hw_key_idx = key_conf->keyidx;
3453 case WLAN_CIPHER_SUITE_TKIP:
3454 key_type = KEY_TKIP;
3455 key_conf->hw_key_idx = key_conf->keyidx;
3457 case WLAN_CIPHER_SUITE_CCMP:
3459 key_conf->flags |= IEEE80211_KEY_FLAG_PUT_IV_SPACE;
3461 case WL1271_CIPHER_SUITE_GEM:
3465 wl1271_error("Unknown key algo 0x%x", key_conf->cipher);
3472 ret = wl1271_set_key(wl, wlvif, KEY_ADD_OR_REPLACE,
3473 key_conf->keyidx, key_type,
3474 key_conf->keylen, key_conf->key,
3475 tx_seq_32, tx_seq_16, sta);
3477 wl1271_error("Could not add or replace key");
3482 * reconfiguring arp response if the unicast (or common)
3483 * encryption key type was changed
3485 if (wlvif->bss_type == BSS_TYPE_STA_BSS &&
3486 (sta || key_type == KEY_WEP) &&
3487 wlvif->encryption_type != key_type) {
3488 wlvif->encryption_type = key_type;
3489 ret = wl1271_cmd_build_arp_rsp(wl, wlvif);
3491 wl1271_warning("build arp rsp failed: %d", ret);
3498 ret = wl1271_set_key(wl, wlvif, KEY_REMOVE,
3499 key_conf->keyidx, key_type,
3500 key_conf->keylen, key_conf->key,
3503 wl1271_error("Could not remove key");
3509 wl1271_error("Unsupported key cmd 0x%x", cmd);
3515 EXPORT_SYMBOL_GPL(wlcore_set_key);
3517 static void wl1271_op_set_default_key_idx(struct ieee80211_hw *hw,
3518 struct ieee80211_vif *vif,
3521 struct wl1271 *wl = hw->priv;
3522 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
3525 wl1271_debug(DEBUG_MAC80211, "mac80211 set default key idx %d",
3528 /* we don't handle unsetting of default key */
3532 mutex_lock(&wl->mutex);
3534 if (unlikely(wl->state != WLCORE_STATE_ON)) {
3539 ret = wl1271_ps_elp_wakeup(wl);
3543 wlvif->default_key = key_idx;
3545 /* the default WEP key needs to be configured at least once */
3546 if (wlvif->encryption_type == KEY_WEP) {
3547 ret = wl12xx_cmd_set_default_wep_key(wl,
3555 wl1271_ps_elp_sleep(wl);
3558 mutex_unlock(&wl->mutex);
3561 void wlcore_regdomain_config(struct wl1271 *wl)
3565 if (!(wl->quirks & WLCORE_QUIRK_REGDOMAIN_CONF))
3568 mutex_lock(&wl->mutex);
3570 if (unlikely(wl->state != WLCORE_STATE_ON))
3573 ret = wl1271_ps_elp_wakeup(wl);
3577 ret = wlcore_cmd_regdomain_config_locked(wl);
3579 wl12xx_queue_recovery_work(wl);
3583 wl1271_ps_elp_sleep(wl);
3585 mutex_unlock(&wl->mutex);
3588 static int wl1271_op_hw_scan(struct ieee80211_hw *hw,
3589 struct ieee80211_vif *vif,
3590 struct ieee80211_scan_request *hw_req)
3592 struct cfg80211_scan_request *req = &hw_req->req;
3593 struct wl1271 *wl = hw->priv;
3598 wl1271_debug(DEBUG_MAC80211, "mac80211 hw scan");
3601 ssid = req->ssids[0].ssid;
3602 len = req->ssids[0].ssid_len;
3605 mutex_lock(&wl->mutex);
3607 if (unlikely(wl->state != WLCORE_STATE_ON)) {
3609 * We cannot return -EBUSY here because cfg80211 will expect
3610 * a call to ieee80211_scan_completed if we do - in this case
3611 * there won't be any call.
3617 ret = wl1271_ps_elp_wakeup(wl);
3621 /* fail if there is any role in ROC */
3622 if (find_first_bit(wl->roc_map, WL12XX_MAX_ROLES) < WL12XX_MAX_ROLES) {
3623 /* don't allow scanning right now */
3628 ret = wlcore_scan(hw->priv, vif, ssid, len, req);
3630 wl1271_ps_elp_sleep(wl);
3632 mutex_unlock(&wl->mutex);
3637 static void wl1271_op_cancel_hw_scan(struct ieee80211_hw *hw,
3638 struct ieee80211_vif *vif)
3640 struct wl1271 *wl = hw->priv;
3641 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
3644 wl1271_debug(DEBUG_MAC80211, "mac80211 cancel hw scan");
3646 mutex_lock(&wl->mutex);
3648 if (unlikely(wl->state != WLCORE_STATE_ON))
3651 if (wl->scan.state == WL1271_SCAN_STATE_IDLE)
3654 ret = wl1271_ps_elp_wakeup(wl);
3658 if (wl->scan.state != WL1271_SCAN_STATE_DONE) {
3659 ret = wl->ops->scan_stop(wl, wlvif);
3665 * Rearm the tx watchdog just before idling scan. This
3666 * prevents just-finished scans from triggering the watchdog
3668 wl12xx_rearm_tx_watchdog_locked(wl);
3670 wl->scan.state = WL1271_SCAN_STATE_IDLE;
3671 memset(wl->scan.scanned_ch, 0, sizeof(wl->scan.scanned_ch));
3672 wl->scan_wlvif = NULL;
3673 wl->scan.req = NULL;
3674 ieee80211_scan_completed(wl->hw, true);
3677 wl1271_ps_elp_sleep(wl);
3679 mutex_unlock(&wl->mutex);
3681 cancel_delayed_work_sync(&wl->scan_complete_work);
3684 static int wl1271_op_sched_scan_start(struct ieee80211_hw *hw,
3685 struct ieee80211_vif *vif,
3686 struct cfg80211_sched_scan_request *req,
3687 struct ieee80211_scan_ies *ies)
3689 struct wl1271 *wl = hw->priv;
3690 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
3693 wl1271_debug(DEBUG_MAC80211, "wl1271_op_sched_scan_start");
3695 mutex_lock(&wl->mutex);
3697 if (unlikely(wl->state != WLCORE_STATE_ON)) {
3702 ret = wl1271_ps_elp_wakeup(wl);
3706 ret = wl->ops->sched_scan_start(wl, wlvif, req, ies);
3710 wl->sched_vif = wlvif;
3713 wl1271_ps_elp_sleep(wl);
3715 mutex_unlock(&wl->mutex);
3719 static int wl1271_op_sched_scan_stop(struct ieee80211_hw *hw,
3720 struct ieee80211_vif *vif)
3722 struct wl1271 *wl = hw->priv;
3723 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
3726 wl1271_debug(DEBUG_MAC80211, "wl1271_op_sched_scan_stop");
3728 mutex_lock(&wl->mutex);
3730 if (unlikely(wl->state != WLCORE_STATE_ON))
3733 ret = wl1271_ps_elp_wakeup(wl);
3737 wl->ops->sched_scan_stop(wl, wlvif);
3739 wl1271_ps_elp_sleep(wl);
3741 mutex_unlock(&wl->mutex);
3746 static int wl1271_op_set_frag_threshold(struct ieee80211_hw *hw, u32 value)
3748 struct wl1271 *wl = hw->priv;
3751 mutex_lock(&wl->mutex);
3753 if (unlikely(wl->state != WLCORE_STATE_ON)) {
3758 ret = wl1271_ps_elp_wakeup(wl);
3762 ret = wl1271_acx_frag_threshold(wl, value);
3764 wl1271_warning("wl1271_op_set_frag_threshold failed: %d", ret);
3766 wl1271_ps_elp_sleep(wl);
3769 mutex_unlock(&wl->mutex);
3774 static int wl1271_op_set_rts_threshold(struct ieee80211_hw *hw, u32 value)
3776 struct wl1271 *wl = hw->priv;
3777 struct wl12xx_vif *wlvif;
3780 mutex_lock(&wl->mutex);
3782 if (unlikely(wl->state != WLCORE_STATE_ON)) {
3787 ret = wl1271_ps_elp_wakeup(wl);
3791 wl12xx_for_each_wlvif(wl, wlvif) {
3792 ret = wl1271_acx_rts_threshold(wl, wlvif, value);
3794 wl1271_warning("set rts threshold failed: %d", ret);
3796 wl1271_ps_elp_sleep(wl);
3799 mutex_unlock(&wl->mutex);
3804 static void wl12xx_remove_ie(struct sk_buff *skb, u8 eid, int ieoffset)
3807 const u8 *next, *end = skb->data + skb->len;
3808 u8 *ie = (u8 *)cfg80211_find_ie(eid, skb->data + ieoffset,
3809 skb->len - ieoffset);
3814 memmove(ie, next, end - next);
3815 skb_trim(skb, skb->len - len);
3818 static void wl12xx_remove_vendor_ie(struct sk_buff *skb,
3819 unsigned int oui, u8 oui_type,
3823 const u8 *next, *end = skb->data + skb->len;
3824 u8 *ie = (u8 *)cfg80211_find_vendor_ie(oui, oui_type,
3825 skb->data + ieoffset,
3826 skb->len - ieoffset);
3831 memmove(ie, next, end - next);
3832 skb_trim(skb, skb->len - len);
3835 static int wl1271_ap_set_probe_resp_tmpl(struct wl1271 *wl, u32 rates,
3836 struct ieee80211_vif *vif)
3838 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
3839 struct sk_buff *skb;
3842 skb = ieee80211_proberesp_get(wl->hw, vif);
3846 ret = wl1271_cmd_template_set(wl, wlvif->role_id,
3847 CMD_TEMPL_AP_PROBE_RESPONSE,
3856 wl1271_debug(DEBUG_AP, "probe response updated");
3857 set_bit(WLVIF_FLAG_AP_PROBE_RESP_SET, &wlvif->flags);
3863 static int wl1271_ap_set_probe_resp_tmpl_legacy(struct wl1271 *wl,
3864 struct ieee80211_vif *vif,
3866 size_t probe_rsp_len,
3869 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
3870 struct ieee80211_bss_conf *bss_conf = &vif->bss_conf;
3871 u8 probe_rsp_templ[WL1271_CMD_TEMPL_MAX_SIZE];
3872 int ssid_ie_offset, ie_offset, templ_len;
3875 /* no need to change probe response if the SSID is set correctly */
3876 if (wlvif->ssid_len > 0)
3877 return wl1271_cmd_template_set(wl, wlvif->role_id,
3878 CMD_TEMPL_AP_PROBE_RESPONSE,
3883 if (probe_rsp_len + bss_conf->ssid_len > WL1271_CMD_TEMPL_MAX_SIZE) {
3884 wl1271_error("probe_rsp template too big");
3888 /* start searching from IE offset */
3889 ie_offset = offsetof(struct ieee80211_mgmt, u.probe_resp.variable);
3891 ptr = cfg80211_find_ie(WLAN_EID_SSID, probe_rsp_data + ie_offset,
3892 probe_rsp_len - ie_offset);
3894 wl1271_error("No SSID in beacon!");
3898 ssid_ie_offset = ptr - probe_rsp_data;
3899 ptr += (ptr[1] + 2);
3901 memcpy(probe_rsp_templ, probe_rsp_data, ssid_ie_offset);
3903 /* insert SSID from bss_conf */
3904 probe_rsp_templ[ssid_ie_offset] = WLAN_EID_SSID;
3905 probe_rsp_templ[ssid_ie_offset + 1] = bss_conf->ssid_len;
3906 memcpy(probe_rsp_templ + ssid_ie_offset + 2,
3907 bss_conf->ssid, bss_conf->ssid_len);
3908 templ_len = ssid_ie_offset + 2 + bss_conf->ssid_len;
3910 memcpy(probe_rsp_templ + ssid_ie_offset + 2 + bss_conf->ssid_len,
3911 ptr, probe_rsp_len - (ptr - probe_rsp_data));
3912 templ_len += probe_rsp_len - (ptr - probe_rsp_data);
3914 return wl1271_cmd_template_set(wl, wlvif->role_id,
3915 CMD_TEMPL_AP_PROBE_RESPONSE,
3921 static int wl1271_bss_erp_info_changed(struct wl1271 *wl,
3922 struct ieee80211_vif *vif,
3923 struct ieee80211_bss_conf *bss_conf,
3926 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
3929 if (changed & BSS_CHANGED_ERP_SLOT) {
3930 if (bss_conf->use_short_slot)
3931 ret = wl1271_acx_slot(wl, wlvif, SLOT_TIME_SHORT);
3933 ret = wl1271_acx_slot(wl, wlvif, SLOT_TIME_LONG);
3935 wl1271_warning("Set slot time failed %d", ret);
3940 if (changed & BSS_CHANGED_ERP_PREAMBLE) {
3941 if (bss_conf->use_short_preamble)
3942 wl1271_acx_set_preamble(wl, wlvif, ACX_PREAMBLE_SHORT);
3944 wl1271_acx_set_preamble(wl, wlvif, ACX_PREAMBLE_LONG);
3947 if (changed & BSS_CHANGED_ERP_CTS_PROT) {
3948 if (bss_conf->use_cts_prot)
3949 ret = wl1271_acx_cts_protect(wl, wlvif,
3952 ret = wl1271_acx_cts_protect(wl, wlvif,
3953 CTSPROTECT_DISABLE);
3955 wl1271_warning("Set ctsprotect failed %d", ret);
3964 static int wlcore_set_beacon_template(struct wl1271 *wl,
3965 struct ieee80211_vif *vif,
3968 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
3969 struct ieee80211_hdr *hdr;
3972 int ieoffset = offsetof(struct ieee80211_mgmt, u.beacon.variable);
3973 struct sk_buff *beacon = ieee80211_beacon_get(wl->hw, vif);
3981 wl1271_debug(DEBUG_MASTER, "beacon updated");
3983 ret = wl1271_ssid_set(wlvif, beacon, ieoffset);
3985 dev_kfree_skb(beacon);
3988 min_rate = wl1271_tx_min_rate_get(wl, wlvif->basic_rate_set);
3989 tmpl_id = is_ap ? CMD_TEMPL_AP_BEACON :
3991 ret = wl1271_cmd_template_set(wl, wlvif->role_id, tmpl_id,
3996 dev_kfree_skb(beacon);
4000 wlvif->wmm_enabled =
4001 cfg80211_find_vendor_ie(WLAN_OUI_MICROSOFT,
4002 WLAN_OUI_TYPE_MICROSOFT_WMM,
4003 beacon->data + ieoffset,
4004 beacon->len - ieoffset);
4007 * In case we already have a probe-resp beacon set explicitly
4008 * by usermode, don't use the beacon data.
4010 if (test_bit(WLVIF_FLAG_AP_PROBE_RESP_SET, &wlvif->flags))
4013 /* remove TIM ie from probe response */
4014 wl12xx_remove_ie(beacon, WLAN_EID_TIM, ieoffset);
4017 * remove p2p ie from probe response.
4018 * the fw reponds to probe requests that don't include
4019 * the p2p ie. probe requests with p2p ie will be passed,
4020 * and will be responded by the supplicant (the spec
4021 * forbids including the p2p ie when responding to probe
4022 * requests that didn't include it).
4024 wl12xx_remove_vendor_ie(beacon, WLAN_OUI_WFA,
4025 WLAN_OUI_TYPE_WFA_P2P, ieoffset);
4027 hdr = (struct ieee80211_hdr *) beacon->data;
4028 hdr->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT |
4029 IEEE80211_STYPE_PROBE_RESP);
4031 ret = wl1271_ap_set_probe_resp_tmpl_legacy(wl, vif,
4036 ret = wl1271_cmd_template_set(wl, wlvif->role_id,
4037 CMD_TEMPL_PROBE_RESPONSE,
4042 dev_kfree_skb(beacon);
4050 static int wl1271_bss_beacon_info_changed(struct wl1271 *wl,
4051 struct ieee80211_vif *vif,
4052 struct ieee80211_bss_conf *bss_conf,
4055 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4056 bool is_ap = (wlvif->bss_type == BSS_TYPE_AP_BSS);
4059 if (changed & BSS_CHANGED_BEACON_INT) {
4060 wl1271_debug(DEBUG_MASTER, "beacon interval updated: %d",
4061 bss_conf->beacon_int);
4063 wlvif->beacon_int = bss_conf->beacon_int;
4066 if ((changed & BSS_CHANGED_AP_PROBE_RESP) && is_ap) {
4067 u32 rate = wl1271_tx_min_rate_get(wl, wlvif->basic_rate_set);
4069 wl1271_ap_set_probe_resp_tmpl(wl, rate, vif);
4072 if (changed & BSS_CHANGED_BEACON) {
4073 ret = wlcore_set_beacon_template(wl, vif, is_ap);
4080 wl1271_error("beacon info change failed: %d", ret);
4084 /* AP mode changes */
4085 static void wl1271_bss_info_changed_ap(struct wl1271 *wl,
4086 struct ieee80211_vif *vif,
4087 struct ieee80211_bss_conf *bss_conf,
4090 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4093 if (changed & BSS_CHANGED_BASIC_RATES) {
4094 u32 rates = bss_conf->basic_rates;
4096 wlvif->basic_rate_set = wl1271_tx_enabled_rates_get(wl, rates,
4098 wlvif->basic_rate = wl1271_tx_min_rate_get(wl,
4099 wlvif->basic_rate_set);
4101 ret = wl1271_init_ap_rates(wl, wlvif);
4103 wl1271_error("AP rate policy change failed %d", ret);
4107 ret = wl1271_ap_init_templates(wl, vif);
4111 ret = wl1271_ap_set_probe_resp_tmpl(wl, wlvif->basic_rate, vif);
4115 ret = wlcore_set_beacon_template(wl, vif, true);
4120 ret = wl1271_bss_beacon_info_changed(wl, vif, bss_conf, changed);
4124 if (changed & BSS_CHANGED_BEACON_ENABLED) {
4125 if (bss_conf->enable_beacon) {
4126 if (!test_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags)) {
4127 ret = wl12xx_cmd_role_start_ap(wl, wlvif);
4131 ret = wl1271_ap_init_hwenc(wl, wlvif);
4135 set_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags);
4136 wl1271_debug(DEBUG_AP, "started AP");
4139 if (test_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags)) {
4141 * AP might be in ROC in case we have just
4142 * sent auth reply. handle it.
4144 if (test_bit(wlvif->role_id, wl->roc_map))
4145 wl12xx_croc(wl, wlvif->role_id);
4147 ret = wl12xx_cmd_role_stop_ap(wl, wlvif);
4151 clear_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags);
4152 clear_bit(WLVIF_FLAG_AP_PROBE_RESP_SET,
4154 wl1271_debug(DEBUG_AP, "stopped AP");
4159 ret = wl1271_bss_erp_info_changed(wl, vif, bss_conf, changed);
4163 /* Handle HT information change */
4164 if ((changed & BSS_CHANGED_HT) &&
4165 (bss_conf->chandef.width != NL80211_CHAN_WIDTH_20_NOHT)) {
4166 ret = wl1271_acx_set_ht_information(wl, wlvif,
4167 bss_conf->ht_operation_mode);
4169 wl1271_warning("Set ht information failed %d", ret);
4178 static int wlcore_set_bssid(struct wl1271 *wl, struct wl12xx_vif *wlvif,
4179 struct ieee80211_bss_conf *bss_conf,
4185 wl1271_debug(DEBUG_MAC80211,
4186 "changed_bssid: %pM, aid: %d, bcn_int: %d, brates: 0x%x sta_rate_set: 0x%x",
4187 bss_conf->bssid, bss_conf->aid,
4188 bss_conf->beacon_int,
4189 bss_conf->basic_rates, sta_rate_set);
4191 wlvif->beacon_int = bss_conf->beacon_int;
4192 rates = bss_conf->basic_rates;
4193 wlvif->basic_rate_set =
4194 wl1271_tx_enabled_rates_get(wl, rates,
4197 wl1271_tx_min_rate_get(wl,
4198 wlvif->basic_rate_set);
4202 wl1271_tx_enabled_rates_get(wl,
4206 /* we only support sched_scan while not connected */
4207 if (wl->sched_vif == wlvif)
4208 wl->ops->sched_scan_stop(wl, wlvif);
4210 ret = wl1271_acx_sta_rate_policies(wl, wlvif);
4214 ret = wl12xx_cmd_build_null_data(wl, wlvif);
4218 ret = wl1271_build_qos_null_data(wl, wl12xx_wlvif_to_vif(wlvif));
4222 wlcore_set_ssid(wl, wlvif);
4224 set_bit(WLVIF_FLAG_IN_USE, &wlvif->flags);
4229 static int wlcore_clear_bssid(struct wl1271 *wl, struct wl12xx_vif *wlvif)
4233 /* revert back to minimum rates for the current band */
4234 wl1271_set_band_rate(wl, wlvif);
4235 wlvif->basic_rate = wl1271_tx_min_rate_get(wl, wlvif->basic_rate_set);
4237 ret = wl1271_acx_sta_rate_policies(wl, wlvif);
4241 if (wlvif->bss_type == BSS_TYPE_STA_BSS &&
4242 test_bit(WLVIF_FLAG_IN_USE, &wlvif->flags)) {
4243 ret = wl12xx_cmd_role_stop_sta(wl, wlvif);
4248 clear_bit(WLVIF_FLAG_IN_USE, &wlvif->flags);
4251 /* STA/IBSS mode changes */
4252 static void wl1271_bss_info_changed_sta(struct wl1271 *wl,
4253 struct ieee80211_vif *vif,
4254 struct ieee80211_bss_conf *bss_conf,
4257 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4258 bool do_join = false;
4259 bool is_ibss = (wlvif->bss_type == BSS_TYPE_IBSS);
4260 bool ibss_joined = false;
4261 u32 sta_rate_set = 0;
4263 struct ieee80211_sta *sta;
4264 bool sta_exists = false;
4265 struct ieee80211_sta_ht_cap sta_ht_cap;
4268 ret = wl1271_bss_beacon_info_changed(wl, vif, bss_conf,
4274 if (changed & BSS_CHANGED_IBSS) {
4275 if (bss_conf->ibss_joined) {
4276 set_bit(WLVIF_FLAG_IBSS_JOINED, &wlvif->flags);
4279 wlcore_unset_assoc(wl, wlvif);
4280 wl12xx_cmd_role_stop_sta(wl, wlvif);
4284 if ((changed & BSS_CHANGED_BEACON_INT) && ibss_joined)
4287 /* Need to update the SSID (for filtering etc) */
4288 if ((changed & BSS_CHANGED_BEACON) && ibss_joined)
4291 if ((changed & BSS_CHANGED_BEACON_ENABLED) && ibss_joined) {
4292 wl1271_debug(DEBUG_ADHOC, "ad-hoc beaconing: %s",
4293 bss_conf->enable_beacon ? "enabled" : "disabled");
4298 if (changed & BSS_CHANGED_IDLE && !is_ibss)
4299 wl1271_sta_handle_idle(wl, wlvif, bss_conf->idle);
4301 if (changed & BSS_CHANGED_CQM) {
4302 bool enable = false;
4303 if (bss_conf->cqm_rssi_thold)
4305 ret = wl1271_acx_rssi_snr_trigger(wl, wlvif, enable,
4306 bss_conf->cqm_rssi_thold,
4307 bss_conf->cqm_rssi_hyst);
4310 wlvif->rssi_thold = bss_conf->cqm_rssi_thold;
4313 if (changed & (BSS_CHANGED_BSSID | BSS_CHANGED_HT |
4314 BSS_CHANGED_ASSOC)) {
4316 sta = ieee80211_find_sta(vif, bss_conf->bssid);
4318 u8 *rx_mask = sta->ht_cap.mcs.rx_mask;
4320 /* save the supp_rates of the ap */
4321 sta_rate_set = sta->supp_rates[wlvif->band];
4322 if (sta->ht_cap.ht_supported)
4324 (rx_mask[0] << HW_HT_RATES_OFFSET) |
4325 (rx_mask[1] << HW_MIMO_RATES_OFFSET);
4326 sta_ht_cap = sta->ht_cap;
4333 if (changed & BSS_CHANGED_BSSID) {
4334 if (!is_zero_ether_addr(bss_conf->bssid)) {
4335 ret = wlcore_set_bssid(wl, wlvif, bss_conf,
4340 /* Need to update the BSSID (for filtering etc) */
4343 ret = wlcore_clear_bssid(wl, wlvif);
4349 if (changed & BSS_CHANGED_IBSS) {
4350 wl1271_debug(DEBUG_ADHOC, "ibss_joined: %d",
4351 bss_conf->ibss_joined);
4353 if (bss_conf->ibss_joined) {
4354 u32 rates = bss_conf->basic_rates;
4355 wlvif->basic_rate_set =
4356 wl1271_tx_enabled_rates_get(wl, rates,
4359 wl1271_tx_min_rate_get(wl,
4360 wlvif->basic_rate_set);
4362 /* by default, use 11b + OFDM rates */
4363 wlvif->rate_set = CONF_TX_IBSS_DEFAULT_RATES;
4364 ret = wl1271_acx_sta_rate_policies(wl, wlvif);
4370 if ((changed & BSS_CHANGED_BEACON_INFO) && bss_conf->dtim_period) {
4371 /* enable beacon filtering */
4372 ret = wl1271_acx_beacon_filter_opt(wl, wlvif, true);
4377 ret = wl1271_bss_erp_info_changed(wl, vif, bss_conf, changed);
4382 ret = wlcore_join(wl, wlvif);
4384 wl1271_warning("cmd join failed %d", ret);
4389 if (changed & BSS_CHANGED_ASSOC) {
4390 if (bss_conf->assoc) {
4391 ret = wlcore_set_assoc(wl, wlvif, bss_conf,
4396 if (test_bit(WLVIF_FLAG_STA_AUTHORIZED, &wlvif->flags))
4397 wl12xx_set_authorized(wl, wlvif);
4399 wlcore_unset_assoc(wl, wlvif);
4403 if (changed & BSS_CHANGED_PS) {
4404 if ((bss_conf->ps) &&
4405 test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags) &&
4406 !test_bit(WLVIF_FLAG_IN_PS, &wlvif->flags)) {
4410 if (wl->conf.conn.forced_ps) {
4411 ps_mode = STATION_POWER_SAVE_MODE;
4412 ps_mode_str = "forced";
4414 ps_mode = STATION_AUTO_PS_MODE;
4415 ps_mode_str = "auto";
4418 wl1271_debug(DEBUG_PSM, "%s ps enabled", ps_mode_str);
4420 ret = wl1271_ps_set_mode(wl, wlvif, ps_mode);
4422 wl1271_warning("enter %s ps failed %d",
4424 } else if (!bss_conf->ps &&
4425 test_bit(WLVIF_FLAG_IN_PS, &wlvif->flags)) {
4426 wl1271_debug(DEBUG_PSM, "auto ps disabled");
4428 ret = wl1271_ps_set_mode(wl, wlvif,
4429 STATION_ACTIVE_MODE);
4431 wl1271_warning("exit auto ps failed %d", ret);
4435 /* Handle new association with HT. Do this after join. */
4438 bss_conf->chandef.width != NL80211_CHAN_WIDTH_20_NOHT;
4440 ret = wlcore_hw_set_peer_cap(wl,
4446 wl1271_warning("Set ht cap failed %d", ret);
4452 ret = wl1271_acx_set_ht_information(wl, wlvif,
4453 bss_conf->ht_operation_mode);
4455 wl1271_warning("Set ht information failed %d",
4462 /* Handle arp filtering. Done after join. */
4463 if ((changed & BSS_CHANGED_ARP_FILTER) ||
4464 (!is_ibss && (changed & BSS_CHANGED_QOS))) {
4465 __be32 addr = bss_conf->arp_addr_list[0];
4466 wlvif->sta.qos = bss_conf->qos;
4467 WARN_ON(wlvif->bss_type != BSS_TYPE_STA_BSS);
4469 if (bss_conf->arp_addr_cnt == 1 && bss_conf->assoc) {
4470 wlvif->ip_addr = addr;
4472 * The template should have been configured only upon
4473 * association. however, it seems that the correct ip
4474 * isn't being set (when sending), so we have to
4475 * reconfigure the template upon every ip change.
4477 ret = wl1271_cmd_build_arp_rsp(wl, wlvif);
4479 wl1271_warning("build arp rsp failed: %d", ret);
4483 ret = wl1271_acx_arp_ip_filter(wl, wlvif,
4484 (ACX_ARP_FILTER_ARP_FILTERING |
4485 ACX_ARP_FILTER_AUTO_ARP),
4489 ret = wl1271_acx_arp_ip_filter(wl, wlvif, 0, addr);
4500 static void wl1271_op_bss_info_changed(struct ieee80211_hw *hw,
4501 struct ieee80211_vif *vif,
4502 struct ieee80211_bss_conf *bss_conf,
4505 struct wl1271 *wl = hw->priv;
4506 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4507 bool is_ap = (wlvif->bss_type == BSS_TYPE_AP_BSS);
4510 wl1271_debug(DEBUG_MAC80211, "mac80211 bss info role %d changed 0x%x",
4511 wlvif->role_id, (int)changed);
4514 * make sure to cancel pending disconnections if our association
4517 if (!is_ap && (changed & BSS_CHANGED_ASSOC))
4518 cancel_delayed_work_sync(&wlvif->connection_loss_work);
4520 if (is_ap && (changed & BSS_CHANGED_BEACON_ENABLED) &&
4521 !bss_conf->enable_beacon)
4522 wl1271_tx_flush(wl);
4524 mutex_lock(&wl->mutex);
4526 if (unlikely(wl->state != WLCORE_STATE_ON))
4529 if (unlikely(!test_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags)))
4532 ret = wl1271_ps_elp_wakeup(wl);
4536 if ((changed & BSS_CHANGED_TXPOWER) &&
4537 bss_conf->txpower != wlvif->power_level) {
4539 ret = wl1271_acx_tx_power(wl, wlvif, bss_conf->txpower);
4543 wlvif->power_level = bss_conf->txpower;
4547 wl1271_bss_info_changed_ap(wl, vif, bss_conf, changed);
4549 wl1271_bss_info_changed_sta(wl, vif, bss_conf, changed);
4551 wl1271_ps_elp_sleep(wl);
4554 mutex_unlock(&wl->mutex);
4557 static int wlcore_op_add_chanctx(struct ieee80211_hw *hw,
4558 struct ieee80211_chanctx_conf *ctx)
4560 wl1271_debug(DEBUG_MAC80211, "mac80211 add chanctx %d (type %d)",
4561 ieee80211_frequency_to_channel(ctx->def.chan->center_freq),
4562 cfg80211_get_chandef_type(&ctx->def));
4566 static void wlcore_op_remove_chanctx(struct ieee80211_hw *hw,
4567 struct ieee80211_chanctx_conf *ctx)
4569 wl1271_debug(DEBUG_MAC80211, "mac80211 remove chanctx %d (type %d)",
4570 ieee80211_frequency_to_channel(ctx->def.chan->center_freq),
4571 cfg80211_get_chandef_type(&ctx->def));
4574 static void wlcore_op_change_chanctx(struct ieee80211_hw *hw,
4575 struct ieee80211_chanctx_conf *ctx,
4578 wl1271_debug(DEBUG_MAC80211,
4579 "mac80211 change chanctx %d (type %d) changed 0x%x",
4580 ieee80211_frequency_to_channel(ctx->def.chan->center_freq),
4581 cfg80211_get_chandef_type(&ctx->def), changed);
4584 static int wlcore_op_assign_vif_chanctx(struct ieee80211_hw *hw,
4585 struct ieee80211_vif *vif,
4586 struct ieee80211_chanctx_conf *ctx)
4588 struct wl1271 *wl = hw->priv;
4589 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4590 int channel = ieee80211_frequency_to_channel(
4591 ctx->def.chan->center_freq);
4593 wl1271_debug(DEBUG_MAC80211,
4594 "mac80211 assign chanctx (role %d) %d (type %d)",
4595 wlvif->role_id, channel, cfg80211_get_chandef_type(&ctx->def));
4597 mutex_lock(&wl->mutex);
4599 wlvif->band = ctx->def.chan->band;
4600 wlvif->channel = channel;
4601 wlvif->channel_type = cfg80211_get_chandef_type(&ctx->def);
4603 /* update default rates according to the band */
4604 wl1271_set_band_rate(wl, wlvif);
4606 mutex_unlock(&wl->mutex);
4611 static void wlcore_op_unassign_vif_chanctx(struct ieee80211_hw *hw,
4612 struct ieee80211_vif *vif,
4613 struct ieee80211_chanctx_conf *ctx)
4615 struct wl1271 *wl = hw->priv;
4616 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4618 wl1271_debug(DEBUG_MAC80211,
4619 "mac80211 unassign chanctx (role %d) %d (type %d)",
4621 ieee80211_frequency_to_channel(ctx->def.chan->center_freq),
4622 cfg80211_get_chandef_type(&ctx->def));
4624 wl1271_tx_flush(wl);
4627 static int wl1271_op_conf_tx(struct ieee80211_hw *hw,
4628 struct ieee80211_vif *vif, u16 queue,
4629 const struct ieee80211_tx_queue_params *params)
4631 struct wl1271 *wl = hw->priv;
4632 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4636 mutex_lock(&wl->mutex);
4638 wl1271_debug(DEBUG_MAC80211, "mac80211 conf tx %d", queue);
4641 ps_scheme = CONF_PS_SCHEME_UPSD_TRIGGER;
4643 ps_scheme = CONF_PS_SCHEME_LEGACY;
4645 if (!test_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags))
4648 ret = wl1271_ps_elp_wakeup(wl);
4653 * the txop is confed in units of 32us by the mac80211,
4656 ret = wl1271_acx_ac_cfg(wl, wlvif, wl1271_tx_get_queue(queue),
4657 params->cw_min, params->cw_max,
4658 params->aifs, params->txop << 5);
4662 ret = wl1271_acx_tid_cfg(wl, wlvif, wl1271_tx_get_queue(queue),
4663 CONF_CHANNEL_TYPE_EDCF,
4664 wl1271_tx_get_queue(queue),
4665 ps_scheme, CONF_ACK_POLICY_LEGACY,
4669 wl1271_ps_elp_sleep(wl);
4672 mutex_unlock(&wl->mutex);
4677 static u64 wl1271_op_get_tsf(struct ieee80211_hw *hw,
4678 struct ieee80211_vif *vif)
4681 struct wl1271 *wl = hw->priv;
4682 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4683 u64 mactime = ULLONG_MAX;
4686 wl1271_debug(DEBUG_MAC80211, "mac80211 get tsf");
4688 mutex_lock(&wl->mutex);
4690 if (unlikely(wl->state != WLCORE_STATE_ON))
4693 ret = wl1271_ps_elp_wakeup(wl);
4697 ret = wl12xx_acx_tsf_info(wl, wlvif, &mactime);
4702 wl1271_ps_elp_sleep(wl);
4705 mutex_unlock(&wl->mutex);
4709 static int wl1271_op_get_survey(struct ieee80211_hw *hw, int idx,
4710 struct survey_info *survey)
4712 struct ieee80211_conf *conf = &hw->conf;
4717 survey->channel = conf->chandef.chan;
4722 static int wl1271_allocate_sta(struct wl1271 *wl,
4723 struct wl12xx_vif *wlvif,
4724 struct ieee80211_sta *sta)
4726 struct wl1271_station *wl_sta;
4730 if (wl->active_sta_count >= wl->max_ap_stations) {
4731 wl1271_warning("could not allocate HLID - too much stations");
4735 wl_sta = (struct wl1271_station *)sta->drv_priv;
4736 ret = wl12xx_allocate_link(wl, wlvif, &wl_sta->hlid);
4738 wl1271_warning("could not allocate HLID - too many links");
4742 /* use the previous security seq, if this is a recovery/resume */
4743 wl->links[wl_sta->hlid].total_freed_pkts = wl_sta->total_freed_pkts;
4745 set_bit(wl_sta->hlid, wlvif->ap.sta_hlid_map);
4746 memcpy(wl->links[wl_sta->hlid].addr, sta->addr, ETH_ALEN);
4747 wl->active_sta_count++;
4751 void wl1271_free_sta(struct wl1271 *wl, struct wl12xx_vif *wlvif, u8 hlid)
4753 if (!test_bit(hlid, wlvif->ap.sta_hlid_map))
4756 clear_bit(hlid, wlvif->ap.sta_hlid_map);
4757 __clear_bit(hlid, &wl->ap_ps_map);
4758 __clear_bit(hlid, &wl->ap_fw_ps_map);
4761 * save the last used PN in the private part of iee80211_sta,
4762 * in case of recovery/suspend
4764 wlcore_save_freed_pkts_addr(wl, wlvif, hlid, wl->links[hlid].addr);
4766 wl12xx_free_link(wl, wlvif, &hlid);
4767 wl->active_sta_count--;
4770 * rearm the tx watchdog when the last STA is freed - give the FW a
4771 * chance to return STA-buffered packets before complaining.
4773 if (wl->active_sta_count == 0)
4774 wl12xx_rearm_tx_watchdog_locked(wl);
4777 static int wl12xx_sta_add(struct wl1271 *wl,
4778 struct wl12xx_vif *wlvif,
4779 struct ieee80211_sta *sta)
4781 struct wl1271_station *wl_sta;
4785 wl1271_debug(DEBUG_MAC80211, "mac80211 add sta %d", (int)sta->aid);
4787 ret = wl1271_allocate_sta(wl, wlvif, sta);
4791 wl_sta = (struct wl1271_station *)sta->drv_priv;
4792 hlid = wl_sta->hlid;
4794 ret = wl12xx_cmd_add_peer(wl, wlvif, sta, hlid);
4796 wl1271_free_sta(wl, wlvif, hlid);
4801 static int wl12xx_sta_remove(struct wl1271 *wl,
4802 struct wl12xx_vif *wlvif,
4803 struct ieee80211_sta *sta)
4805 struct wl1271_station *wl_sta;
4808 wl1271_debug(DEBUG_MAC80211, "mac80211 remove sta %d", (int)sta->aid);
4810 wl_sta = (struct wl1271_station *)sta->drv_priv;
4812 if (WARN_ON(!test_bit(id, wlvif->ap.sta_hlid_map)))
4815 ret = wl12xx_cmd_remove_peer(wl, wlvif, wl_sta->hlid);
4819 wl1271_free_sta(wl, wlvif, wl_sta->hlid);
4823 static void wlcore_roc_if_possible(struct wl1271 *wl,
4824 struct wl12xx_vif *wlvif)
4826 if (find_first_bit(wl->roc_map,
4827 WL12XX_MAX_ROLES) < WL12XX_MAX_ROLES)
4830 if (WARN_ON(wlvif->role_id == WL12XX_INVALID_ROLE_ID))
4833 wl12xx_roc(wl, wlvif, wlvif->role_id, wlvif->band, wlvif->channel);
4837 * when wl_sta is NULL, we treat this call as if coming from a
4838 * pending auth reply.
4839 * wl->mutex must be taken and the FW must be awake when the call
4842 void wlcore_update_inconn_sta(struct wl1271 *wl, struct wl12xx_vif *wlvif,
4843 struct wl1271_station *wl_sta, bool in_conn)
4846 if (WARN_ON(wl_sta && wl_sta->in_connection))
4849 if (!wlvif->ap_pending_auth_reply &&
4850 !wlvif->inconn_count)
4851 wlcore_roc_if_possible(wl, wlvif);
4854 wl_sta->in_connection = true;
4855 wlvif->inconn_count++;
4857 wlvif->ap_pending_auth_reply = true;
4860 if (wl_sta && !wl_sta->in_connection)
4863 if (WARN_ON(!wl_sta && !wlvif->ap_pending_auth_reply))
4866 if (WARN_ON(wl_sta && !wlvif->inconn_count))
4870 wl_sta->in_connection = false;
4871 wlvif->inconn_count--;
4873 wlvif->ap_pending_auth_reply = false;
4876 if (!wlvif->inconn_count && !wlvif->ap_pending_auth_reply &&
4877 test_bit(wlvif->role_id, wl->roc_map))
4878 wl12xx_croc(wl, wlvif->role_id);
4882 static int wl12xx_update_sta_state(struct wl1271 *wl,
4883 struct wl12xx_vif *wlvif,
4884 struct ieee80211_sta *sta,
4885 enum ieee80211_sta_state old_state,
4886 enum ieee80211_sta_state new_state)
4888 struct wl1271_station *wl_sta;
4889 bool is_ap = wlvif->bss_type == BSS_TYPE_AP_BSS;
4890 bool is_sta = wlvif->bss_type == BSS_TYPE_STA_BSS;
4893 wl_sta = (struct wl1271_station *)sta->drv_priv;
4895 /* Add station (AP mode) */
4897 old_state == IEEE80211_STA_NOTEXIST &&
4898 new_state == IEEE80211_STA_NONE) {
4899 ret = wl12xx_sta_add(wl, wlvif, sta);
4903 wlcore_update_inconn_sta(wl, wlvif, wl_sta, true);
4906 /* Remove station (AP mode) */
4908 old_state == IEEE80211_STA_NONE &&
4909 new_state == IEEE80211_STA_NOTEXIST) {
4911 wl12xx_sta_remove(wl, wlvif, sta);
4913 wlcore_update_inconn_sta(wl, wlvif, wl_sta, false);
4916 /* Authorize station (AP mode) */
4918 new_state == IEEE80211_STA_AUTHORIZED) {
4919 ret = wl12xx_cmd_set_peer_state(wl, wlvif, wl_sta->hlid);
4923 ret = wl1271_acx_set_ht_capabilities(wl, &sta->ht_cap, true,
4928 wlcore_update_inconn_sta(wl, wlvif, wl_sta, false);
4931 /* Authorize station */
4933 new_state == IEEE80211_STA_AUTHORIZED) {
4934 set_bit(WLVIF_FLAG_STA_AUTHORIZED, &wlvif->flags);
4935 ret = wl12xx_set_authorized(wl, wlvif);
4941 old_state == IEEE80211_STA_AUTHORIZED &&
4942 new_state == IEEE80211_STA_ASSOC) {
4943 clear_bit(WLVIF_FLAG_STA_AUTHORIZED, &wlvif->flags);
4944 clear_bit(WLVIF_FLAG_STA_STATE_SENT, &wlvif->flags);
4947 /* save seq number on disassoc (suspend) */
4949 old_state == IEEE80211_STA_ASSOC &&
4950 new_state == IEEE80211_STA_AUTH) {
4951 wlcore_save_freed_pkts(wl, wlvif, wlvif->sta.hlid, sta);
4952 wlvif->total_freed_pkts = 0;
4955 /* restore seq number on assoc (resume) */
4957 old_state == IEEE80211_STA_AUTH &&
4958 new_state == IEEE80211_STA_ASSOC) {
4959 wlvif->total_freed_pkts = wl_sta->total_freed_pkts;
4962 /* clear ROCs on failure or authorization */
4964 (new_state == IEEE80211_STA_AUTHORIZED ||
4965 new_state == IEEE80211_STA_NOTEXIST)) {
4966 if (test_bit(wlvif->role_id, wl->roc_map))
4967 wl12xx_croc(wl, wlvif->role_id);
4971 old_state == IEEE80211_STA_NOTEXIST &&
4972 new_state == IEEE80211_STA_NONE) {
4973 if (find_first_bit(wl->roc_map,
4974 WL12XX_MAX_ROLES) >= WL12XX_MAX_ROLES) {
4975 WARN_ON(wlvif->role_id == WL12XX_INVALID_ROLE_ID);
4976 wl12xx_roc(wl, wlvif, wlvif->role_id,
4977 wlvif->band, wlvif->channel);
4983 static int wl12xx_op_sta_state(struct ieee80211_hw *hw,
4984 struct ieee80211_vif *vif,
4985 struct ieee80211_sta *sta,
4986 enum ieee80211_sta_state old_state,
4987 enum ieee80211_sta_state new_state)
4989 struct wl1271 *wl = hw->priv;
4990 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4993 wl1271_debug(DEBUG_MAC80211, "mac80211 sta %d state=%d->%d",
4994 sta->aid, old_state, new_state);
4996 mutex_lock(&wl->mutex);
4998 if (unlikely(wl->state != WLCORE_STATE_ON)) {
5003 ret = wl1271_ps_elp_wakeup(wl);
5007 ret = wl12xx_update_sta_state(wl, wlvif, sta, old_state, new_state);
5009 wl1271_ps_elp_sleep(wl);
5011 mutex_unlock(&wl->mutex);
5012 if (new_state < old_state)
5017 static int wl1271_op_ampdu_action(struct ieee80211_hw *hw,
5018 struct ieee80211_vif *vif,
5019 enum ieee80211_ampdu_mlme_action action,
5020 struct ieee80211_sta *sta, u16 tid, u16 *ssn,
5023 struct wl1271 *wl = hw->priv;
5024 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
5026 u8 hlid, *ba_bitmap;
5028 wl1271_debug(DEBUG_MAC80211, "mac80211 ampdu action %d tid %d", action,
5031 /* sanity check - the fields in FW are only 8bits wide */
5032 if (WARN_ON(tid > 0xFF))
5035 mutex_lock(&wl->mutex);
5037 if (unlikely(wl->state != WLCORE_STATE_ON)) {
5042 if (wlvif->bss_type == BSS_TYPE_STA_BSS) {
5043 hlid = wlvif->sta.hlid;
5044 } else if (wlvif->bss_type == BSS_TYPE_AP_BSS) {
5045 struct wl1271_station *wl_sta;
5047 wl_sta = (struct wl1271_station *)sta->drv_priv;
5048 hlid = wl_sta->hlid;
5054 ba_bitmap = &wl->links[hlid].ba_bitmap;
5056 ret = wl1271_ps_elp_wakeup(wl);
5060 wl1271_debug(DEBUG_MAC80211, "mac80211 ampdu: Rx tid %d action %d",
5064 case IEEE80211_AMPDU_RX_START:
5065 if (!wlvif->ba_support || !wlvif->ba_allowed) {
5070 if (wl->ba_rx_session_count >= wl->ba_rx_session_count_max) {
5072 wl1271_error("exceeded max RX BA sessions");
5076 if (*ba_bitmap & BIT(tid)) {
5078 wl1271_error("cannot enable RX BA session on active "
5083 ret = wl12xx_acx_set_ba_receiver_session(wl, tid, *ssn, true,
5086 *ba_bitmap |= BIT(tid);
5087 wl->ba_rx_session_count++;
5091 case IEEE80211_AMPDU_RX_STOP:
5092 if (!(*ba_bitmap & BIT(tid))) {
5094 * this happens on reconfig - so only output a debug
5095 * message for now, and don't fail the function.
5097 wl1271_debug(DEBUG_MAC80211,
5098 "no active RX BA session on tid: %d",
5104 ret = wl12xx_acx_set_ba_receiver_session(wl, tid, 0, false,
5107 *ba_bitmap &= ~BIT(tid);
5108 wl->ba_rx_session_count--;
5113 * The BA initiator session management in FW independently.
5114 * Falling break here on purpose for all TX APDU commands.
5116 case IEEE80211_AMPDU_TX_START:
5117 case IEEE80211_AMPDU_TX_STOP_CONT:
5118 case IEEE80211_AMPDU_TX_STOP_FLUSH:
5119 case IEEE80211_AMPDU_TX_STOP_FLUSH_CONT:
5120 case IEEE80211_AMPDU_TX_OPERATIONAL:
5125 wl1271_error("Incorrect ampdu action id=%x\n", action);
5129 wl1271_ps_elp_sleep(wl);
5132 mutex_unlock(&wl->mutex);
5137 static int wl12xx_set_bitrate_mask(struct ieee80211_hw *hw,
5138 struct ieee80211_vif *vif,
5139 const struct cfg80211_bitrate_mask *mask)
5141 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
5142 struct wl1271 *wl = hw->priv;
5145 wl1271_debug(DEBUG_MAC80211, "mac80211 set_bitrate_mask 0x%x 0x%x",
5146 mask->control[NL80211_BAND_2GHZ].legacy,
5147 mask->control[NL80211_BAND_5GHZ].legacy);
5149 mutex_lock(&wl->mutex);
5151 for (i = 0; i < WLCORE_NUM_BANDS; i++)
5152 wlvif->bitrate_masks[i] =
5153 wl1271_tx_enabled_rates_get(wl,
5154 mask->control[i].legacy,
5157 if (unlikely(wl->state != WLCORE_STATE_ON))
5160 if (wlvif->bss_type == BSS_TYPE_STA_BSS &&
5161 !test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags)) {
5163 ret = wl1271_ps_elp_wakeup(wl);
5167 wl1271_set_band_rate(wl, wlvif);
5169 wl1271_tx_min_rate_get(wl, wlvif->basic_rate_set);
5170 ret = wl1271_acx_sta_rate_policies(wl, wlvif);
5172 wl1271_ps_elp_sleep(wl);
5175 mutex_unlock(&wl->mutex);
5180 static void wl12xx_op_channel_switch(struct ieee80211_hw *hw,
5181 struct ieee80211_vif *vif,
5182 struct ieee80211_channel_switch *ch_switch)
5184 struct wl1271 *wl = hw->priv;
5185 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
5188 wl1271_debug(DEBUG_MAC80211, "mac80211 channel switch");
5190 wl1271_tx_flush(wl);
5192 mutex_lock(&wl->mutex);
5194 if (unlikely(wl->state == WLCORE_STATE_OFF)) {
5195 if (test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags))
5196 ieee80211_chswitch_done(vif, false);
5198 } else if (unlikely(wl->state != WLCORE_STATE_ON)) {
5202 ret = wl1271_ps_elp_wakeup(wl);
5206 /* TODO: change mac80211 to pass vif as param */
5208 if (test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags)) {
5209 unsigned long delay_usec;
5211 ret = wl->ops->channel_switch(wl, wlvif, ch_switch);
5215 set_bit(WLVIF_FLAG_CS_PROGRESS, &wlvif->flags);
5217 /* indicate failure 5 seconds after channel switch time */
5218 delay_usec = ieee80211_tu_to_usec(wlvif->beacon_int) *
5220 ieee80211_queue_delayed_work(hw, &wlvif->channel_switch_work,
5221 usecs_to_jiffies(delay_usec) +
5222 msecs_to_jiffies(5000));
5226 wl1271_ps_elp_sleep(wl);
5229 mutex_unlock(&wl->mutex);
5232 static void wlcore_op_flush(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
5233 u32 queues, bool drop)
5235 struct wl1271 *wl = hw->priv;
5237 wl1271_tx_flush(wl);
5240 static int wlcore_op_remain_on_channel(struct ieee80211_hw *hw,
5241 struct ieee80211_vif *vif,
5242 struct ieee80211_channel *chan,
5244 enum ieee80211_roc_type type)
5246 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
5247 struct wl1271 *wl = hw->priv;
5248 int channel, ret = 0;
5250 channel = ieee80211_frequency_to_channel(chan->center_freq);
5252 wl1271_debug(DEBUG_MAC80211, "mac80211 roc %d (%d)",
5253 channel, wlvif->role_id);
5255 mutex_lock(&wl->mutex);
5257 if (unlikely(wl->state != WLCORE_STATE_ON))
5260 /* return EBUSY if we can't ROC right now */
5261 if (WARN_ON(wl->roc_vif ||
5262 find_first_bit(wl->roc_map,
5263 WL12XX_MAX_ROLES) < WL12XX_MAX_ROLES)) {
5268 ret = wl1271_ps_elp_wakeup(wl);
5272 ret = wl12xx_start_dev(wl, wlvif, chan->band, channel);
5277 ieee80211_queue_delayed_work(hw, &wl->roc_complete_work,
5278 msecs_to_jiffies(duration));
5280 wl1271_ps_elp_sleep(wl);
5282 mutex_unlock(&wl->mutex);
5286 static int __wlcore_roc_completed(struct wl1271 *wl)
5288 struct wl12xx_vif *wlvif;
5291 /* already completed */
5292 if (unlikely(!wl->roc_vif))
5295 wlvif = wl12xx_vif_to_data(wl->roc_vif);
5297 if (!test_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags))
5300 ret = wl12xx_stop_dev(wl, wlvif);
5309 static int wlcore_roc_completed(struct wl1271 *wl)
5313 wl1271_debug(DEBUG_MAC80211, "roc complete");
5315 mutex_lock(&wl->mutex);
5317 if (unlikely(wl->state != WLCORE_STATE_ON)) {
5322 ret = wl1271_ps_elp_wakeup(wl);
5326 ret = __wlcore_roc_completed(wl);
5328 wl1271_ps_elp_sleep(wl);
5330 mutex_unlock(&wl->mutex);
5335 static void wlcore_roc_complete_work(struct work_struct *work)
5337 struct delayed_work *dwork;
5341 dwork = container_of(work, struct delayed_work, work);
5342 wl = container_of(dwork, struct wl1271, roc_complete_work);
5344 ret = wlcore_roc_completed(wl);
5346 ieee80211_remain_on_channel_expired(wl->hw);
5349 static int wlcore_op_cancel_remain_on_channel(struct ieee80211_hw *hw)
5351 struct wl1271 *wl = hw->priv;
5353 wl1271_debug(DEBUG_MAC80211, "mac80211 croc");
5356 wl1271_tx_flush(wl);
5359 * we can't just flush_work here, because it might deadlock
5360 * (as we might get called from the same workqueue)
5362 cancel_delayed_work_sync(&wl->roc_complete_work);
5363 wlcore_roc_completed(wl);
5368 static void wlcore_op_sta_rc_update(struct ieee80211_hw *hw,
5369 struct ieee80211_vif *vif,
5370 struct ieee80211_sta *sta,
5373 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
5374 struct wl1271 *wl = hw->priv;
5376 wlcore_hw_sta_rc_update(wl, wlvif, sta, changed);
5379 static void wlcore_op_sta_statistics(struct ieee80211_hw *hw,
5380 struct ieee80211_vif *vif,
5381 struct ieee80211_sta *sta,
5382 struct station_info *sinfo)
5384 struct wl1271 *wl = hw->priv;
5385 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
5389 wl1271_debug(DEBUG_MAC80211, "mac80211 get_rssi");
5391 mutex_lock(&wl->mutex);
5393 if (unlikely(wl->state != WLCORE_STATE_ON))
5396 ret = wl1271_ps_elp_wakeup(wl);
5400 ret = wlcore_acx_average_rssi(wl, wlvif, &rssi_dbm);
5404 sinfo->filled |= BIT(NL80211_STA_INFO_SIGNAL);
5405 sinfo->signal = rssi_dbm;
5408 wl1271_ps_elp_sleep(wl);
5411 mutex_unlock(&wl->mutex);
5414 static bool wl1271_tx_frames_pending(struct ieee80211_hw *hw)
5416 struct wl1271 *wl = hw->priv;
5419 mutex_lock(&wl->mutex);
5421 if (unlikely(wl->state != WLCORE_STATE_ON))
5424 /* packets are considered pending if in the TX queue or the FW */
5425 ret = (wl1271_tx_total_queue_count(wl) > 0) || (wl->tx_frames_cnt > 0);
5427 mutex_unlock(&wl->mutex);
5432 /* can't be const, mac80211 writes to this */
5433 static struct ieee80211_rate wl1271_rates[] = {
5435 .hw_value = CONF_HW_BIT_RATE_1MBPS,
5436 .hw_value_short = CONF_HW_BIT_RATE_1MBPS, },
5438 .hw_value = CONF_HW_BIT_RATE_2MBPS,
5439 .hw_value_short = CONF_HW_BIT_RATE_2MBPS,
5440 .flags = IEEE80211_RATE_SHORT_PREAMBLE },
5442 .hw_value = CONF_HW_BIT_RATE_5_5MBPS,
5443 .hw_value_short = CONF_HW_BIT_RATE_5_5MBPS,
5444 .flags = IEEE80211_RATE_SHORT_PREAMBLE },
5446 .hw_value = CONF_HW_BIT_RATE_11MBPS,
5447 .hw_value_short = CONF_HW_BIT_RATE_11MBPS,
5448 .flags = IEEE80211_RATE_SHORT_PREAMBLE },
5450 .hw_value = CONF_HW_BIT_RATE_6MBPS,
5451 .hw_value_short = CONF_HW_BIT_RATE_6MBPS, },
5453 .hw_value = CONF_HW_BIT_RATE_9MBPS,
5454 .hw_value_short = CONF_HW_BIT_RATE_9MBPS, },
5456 .hw_value = CONF_HW_BIT_RATE_12MBPS,
5457 .hw_value_short = CONF_HW_BIT_RATE_12MBPS, },
5459 .hw_value = CONF_HW_BIT_RATE_18MBPS,
5460 .hw_value_short = CONF_HW_BIT_RATE_18MBPS, },
5462 .hw_value = CONF_HW_BIT_RATE_24MBPS,
5463 .hw_value_short = CONF_HW_BIT_RATE_24MBPS, },
5465 .hw_value = CONF_HW_BIT_RATE_36MBPS,
5466 .hw_value_short = CONF_HW_BIT_RATE_36MBPS, },
5468 .hw_value = CONF_HW_BIT_RATE_48MBPS,
5469 .hw_value_short = CONF_HW_BIT_RATE_48MBPS, },
5471 .hw_value = CONF_HW_BIT_RATE_54MBPS,
5472 .hw_value_short = CONF_HW_BIT_RATE_54MBPS, },
5475 /* can't be const, mac80211 writes to this */
5476 static struct ieee80211_channel wl1271_channels[] = {
5477 { .hw_value = 1, .center_freq = 2412, .max_power = WLCORE_MAX_TXPWR },
5478 { .hw_value = 2, .center_freq = 2417, .max_power = WLCORE_MAX_TXPWR },
5479 { .hw_value = 3, .center_freq = 2422, .max_power = WLCORE_MAX_TXPWR },
5480 { .hw_value = 4, .center_freq = 2427, .max_power = WLCORE_MAX_TXPWR },
5481 { .hw_value = 5, .center_freq = 2432, .max_power = WLCORE_MAX_TXPWR },
5482 { .hw_value = 6, .center_freq = 2437, .max_power = WLCORE_MAX_TXPWR },
5483 { .hw_value = 7, .center_freq = 2442, .max_power = WLCORE_MAX_TXPWR },
5484 { .hw_value = 8, .center_freq = 2447, .max_power = WLCORE_MAX_TXPWR },
5485 { .hw_value = 9, .center_freq = 2452, .max_power = WLCORE_MAX_TXPWR },
5486 { .hw_value = 10, .center_freq = 2457, .max_power = WLCORE_MAX_TXPWR },
5487 { .hw_value = 11, .center_freq = 2462, .max_power = WLCORE_MAX_TXPWR },
5488 { .hw_value = 12, .center_freq = 2467, .max_power = WLCORE_MAX_TXPWR },
5489 { .hw_value = 13, .center_freq = 2472, .max_power = WLCORE_MAX_TXPWR },
5490 { .hw_value = 14, .center_freq = 2484, .max_power = WLCORE_MAX_TXPWR },
5493 /* can't be const, mac80211 writes to this */
5494 static struct ieee80211_supported_band wl1271_band_2ghz = {
5495 .channels = wl1271_channels,
5496 .n_channels = ARRAY_SIZE(wl1271_channels),
5497 .bitrates = wl1271_rates,
5498 .n_bitrates = ARRAY_SIZE(wl1271_rates),
5501 /* 5 GHz data rates for WL1273 */
5502 static struct ieee80211_rate wl1271_rates_5ghz[] = {
5504 .hw_value = CONF_HW_BIT_RATE_6MBPS,
5505 .hw_value_short = CONF_HW_BIT_RATE_6MBPS, },
5507 .hw_value = CONF_HW_BIT_RATE_9MBPS,
5508 .hw_value_short = CONF_HW_BIT_RATE_9MBPS, },
5510 .hw_value = CONF_HW_BIT_RATE_12MBPS,
5511 .hw_value_short = CONF_HW_BIT_RATE_12MBPS, },
5513 .hw_value = CONF_HW_BIT_RATE_18MBPS,
5514 .hw_value_short = CONF_HW_BIT_RATE_18MBPS, },
5516 .hw_value = CONF_HW_BIT_RATE_24MBPS,
5517 .hw_value_short = CONF_HW_BIT_RATE_24MBPS, },
5519 .hw_value = CONF_HW_BIT_RATE_36MBPS,
5520 .hw_value_short = CONF_HW_BIT_RATE_36MBPS, },
5522 .hw_value = CONF_HW_BIT_RATE_48MBPS,
5523 .hw_value_short = CONF_HW_BIT_RATE_48MBPS, },
5525 .hw_value = CONF_HW_BIT_RATE_54MBPS,
5526 .hw_value_short = CONF_HW_BIT_RATE_54MBPS, },
5529 /* 5 GHz band channels for WL1273 */
5530 static struct ieee80211_channel wl1271_channels_5ghz[] = {
5531 { .hw_value = 8, .center_freq = 5040, .max_power = WLCORE_MAX_TXPWR },
5532 { .hw_value = 12, .center_freq = 5060, .max_power = WLCORE_MAX_TXPWR },
5533 { .hw_value = 16, .center_freq = 5080, .max_power = WLCORE_MAX_TXPWR },
5534 { .hw_value = 34, .center_freq = 5170, .max_power = WLCORE_MAX_TXPWR },
5535 { .hw_value = 36, .center_freq = 5180, .max_power = WLCORE_MAX_TXPWR },
5536 { .hw_value = 38, .center_freq = 5190, .max_power = WLCORE_MAX_TXPWR },
5537 { .hw_value = 40, .center_freq = 5200, .max_power = WLCORE_MAX_TXPWR },
5538 { .hw_value = 42, .center_freq = 5210, .max_power = WLCORE_MAX_TXPWR },
5539 { .hw_value = 44, .center_freq = 5220, .max_power = WLCORE_MAX_TXPWR },
5540 { .hw_value = 46, .center_freq = 5230, .max_power = WLCORE_MAX_TXPWR },
5541 { .hw_value = 48, .center_freq = 5240, .max_power = WLCORE_MAX_TXPWR },
5542 { .hw_value = 52, .center_freq = 5260, .max_power = WLCORE_MAX_TXPWR },
5543 { .hw_value = 56, .center_freq = 5280, .max_power = WLCORE_MAX_TXPWR },
5544 { .hw_value = 60, .center_freq = 5300, .max_power = WLCORE_MAX_TXPWR },
5545 { .hw_value = 64, .center_freq = 5320, .max_power = WLCORE_MAX_TXPWR },
5546 { .hw_value = 100, .center_freq = 5500, .max_power = WLCORE_MAX_TXPWR },
5547 { .hw_value = 104, .center_freq = 5520, .max_power = WLCORE_MAX_TXPWR },
5548 { .hw_value = 108, .center_freq = 5540, .max_power = WLCORE_MAX_TXPWR },
5549 { .hw_value = 112, .center_freq = 5560, .max_power = WLCORE_MAX_TXPWR },
5550 { .hw_value = 116, .center_freq = 5580, .max_power = WLCORE_MAX_TXPWR },
5551 { .hw_value = 120, .center_freq = 5600, .max_power = WLCORE_MAX_TXPWR },
5552 { .hw_value = 124, .center_freq = 5620, .max_power = WLCORE_MAX_TXPWR },
5553 { .hw_value = 128, .center_freq = 5640, .max_power = WLCORE_MAX_TXPWR },
5554 { .hw_value = 132, .center_freq = 5660, .max_power = WLCORE_MAX_TXPWR },
5555 { .hw_value = 136, .center_freq = 5680, .max_power = WLCORE_MAX_TXPWR },
5556 { .hw_value = 140, .center_freq = 5700, .max_power = WLCORE_MAX_TXPWR },
5557 { .hw_value = 149, .center_freq = 5745, .max_power = WLCORE_MAX_TXPWR },
5558 { .hw_value = 153, .center_freq = 5765, .max_power = WLCORE_MAX_TXPWR },
5559 { .hw_value = 157, .center_freq = 5785, .max_power = WLCORE_MAX_TXPWR },
5560 { .hw_value = 161, .center_freq = 5805, .max_power = WLCORE_MAX_TXPWR },
5561 { .hw_value = 165, .center_freq = 5825, .max_power = WLCORE_MAX_TXPWR },
5564 static struct ieee80211_supported_band wl1271_band_5ghz = {
5565 .channels = wl1271_channels_5ghz,
5566 .n_channels = ARRAY_SIZE(wl1271_channels_5ghz),
5567 .bitrates = wl1271_rates_5ghz,
5568 .n_bitrates = ARRAY_SIZE(wl1271_rates_5ghz),
5571 static const struct ieee80211_ops wl1271_ops = {
5572 .start = wl1271_op_start,
5573 .stop = wlcore_op_stop,
5574 .add_interface = wl1271_op_add_interface,
5575 .remove_interface = wl1271_op_remove_interface,
5576 .change_interface = wl12xx_op_change_interface,
5578 .suspend = wl1271_op_suspend,
5579 .resume = wl1271_op_resume,
5581 .config = wl1271_op_config,
5582 .prepare_multicast = wl1271_op_prepare_multicast,
5583 .configure_filter = wl1271_op_configure_filter,
5585 .set_key = wlcore_op_set_key,
5586 .hw_scan = wl1271_op_hw_scan,
5587 .cancel_hw_scan = wl1271_op_cancel_hw_scan,
5588 .sched_scan_start = wl1271_op_sched_scan_start,
5589 .sched_scan_stop = wl1271_op_sched_scan_stop,
5590 .bss_info_changed = wl1271_op_bss_info_changed,
5591 .set_frag_threshold = wl1271_op_set_frag_threshold,
5592 .set_rts_threshold = wl1271_op_set_rts_threshold,
5593 .conf_tx = wl1271_op_conf_tx,
5594 .get_tsf = wl1271_op_get_tsf,
5595 .get_survey = wl1271_op_get_survey,
5596 .sta_state = wl12xx_op_sta_state,
5597 .ampdu_action = wl1271_op_ampdu_action,
5598 .tx_frames_pending = wl1271_tx_frames_pending,
5599 .set_bitrate_mask = wl12xx_set_bitrate_mask,
5600 .set_default_unicast_key = wl1271_op_set_default_key_idx,
5601 .channel_switch = wl12xx_op_channel_switch,
5602 .flush = wlcore_op_flush,
5603 .remain_on_channel = wlcore_op_remain_on_channel,
5604 .cancel_remain_on_channel = wlcore_op_cancel_remain_on_channel,
5605 .add_chanctx = wlcore_op_add_chanctx,
5606 .remove_chanctx = wlcore_op_remove_chanctx,
5607 .change_chanctx = wlcore_op_change_chanctx,
5608 .assign_vif_chanctx = wlcore_op_assign_vif_chanctx,
5609 .unassign_vif_chanctx = wlcore_op_unassign_vif_chanctx,
5610 .sta_rc_update = wlcore_op_sta_rc_update,
5611 .sta_statistics = wlcore_op_sta_statistics,
5612 CFG80211_TESTMODE_CMD(wl1271_tm_cmd)
5616 u8 wlcore_rate_to_idx(struct wl1271 *wl, u8 rate, enum ieee80211_band band)
5622 if (unlikely(rate >= wl->hw_tx_rate_tbl_size)) {
5623 wl1271_error("Illegal RX rate from HW: %d", rate);
5627 idx = wl->band_rate_to_idx[band][rate];
5628 if (unlikely(idx == CONF_HW_RXTX_RATE_UNSUPPORTED)) {
5629 wl1271_error("Unsupported RX rate from HW: %d", rate);
5636 static void wl12xx_derive_mac_addresses(struct wl1271 *wl, u32 oui, u32 nic)
5640 wl1271_debug(DEBUG_PROBE, "base address: oui %06x nic %06x",
5643 if (nic + WLCORE_NUM_MAC_ADDRESSES - wl->num_mac_addr > 0xffffff)
5644 wl1271_warning("NIC part of the MAC address wraps around!");
5646 for (i = 0; i < wl->num_mac_addr; i++) {
5647 wl->addresses[i].addr[0] = (u8)(oui >> 16);
5648 wl->addresses[i].addr[1] = (u8)(oui >> 8);
5649 wl->addresses[i].addr[2] = (u8) oui;
5650 wl->addresses[i].addr[3] = (u8)(nic >> 16);
5651 wl->addresses[i].addr[4] = (u8)(nic >> 8);
5652 wl->addresses[i].addr[5] = (u8) nic;
5656 /* we may be one address short at the most */
5657 WARN_ON(wl->num_mac_addr + 1 < WLCORE_NUM_MAC_ADDRESSES);
5660 * turn on the LAA bit in the first address and use it as
5663 if (wl->num_mac_addr < WLCORE_NUM_MAC_ADDRESSES) {
5664 int idx = WLCORE_NUM_MAC_ADDRESSES - 1;
5665 memcpy(&wl->addresses[idx], &wl->addresses[0],
5666 sizeof(wl->addresses[0]));
5668 wl->addresses[idx].addr[0] |= BIT(1);
5671 wl->hw->wiphy->n_addresses = WLCORE_NUM_MAC_ADDRESSES;
5672 wl->hw->wiphy->addresses = wl->addresses;
5675 static int wl12xx_get_hw_info(struct wl1271 *wl)
5679 ret = wl12xx_set_power_on(wl);
5683 ret = wlcore_read_reg(wl, REG_CHIP_ID_B, &wl->chip.id);
5687 wl->fuse_oui_addr = 0;
5688 wl->fuse_nic_addr = 0;
5690 ret = wl->ops->get_pg_ver(wl, &wl->hw_pg_ver);
5694 if (wl->ops->get_mac)
5695 ret = wl->ops->get_mac(wl);
5698 wl1271_power_off(wl);
5702 static int wl1271_register_hw(struct wl1271 *wl)
5705 u32 oui_addr = 0, nic_addr = 0;
5707 if (wl->mac80211_registered)
5710 if (wl->nvs_len >= 12) {
5711 /* NOTE: The wl->nvs->nvs element must be first, in
5712 * order to simplify the casting, we assume it is at
5713 * the beginning of the wl->nvs structure.
5715 u8 *nvs_ptr = (u8 *)wl->nvs;
5718 (nvs_ptr[11] << 16) + (nvs_ptr[10] << 8) + nvs_ptr[6];
5720 (nvs_ptr[5] << 16) + (nvs_ptr[4] << 8) + nvs_ptr[3];
5723 /* if the MAC address is zeroed in the NVS derive from fuse */
5724 if (oui_addr == 0 && nic_addr == 0) {
5725 oui_addr = wl->fuse_oui_addr;
5726 /* fuse has the BD_ADDR, the WLAN addresses are the next two */
5727 nic_addr = wl->fuse_nic_addr + 1;
5730 wl12xx_derive_mac_addresses(wl, oui_addr, nic_addr);
5732 ret = ieee80211_register_hw(wl->hw);
5734 wl1271_error("unable to register mac80211 hw: %d", ret);
5738 wl->mac80211_registered = true;
5740 wl1271_debugfs_init(wl);
5742 wl1271_notice("loaded");
5748 static void wl1271_unregister_hw(struct wl1271 *wl)
5751 wl1271_plt_stop(wl);
5753 ieee80211_unregister_hw(wl->hw);
5754 wl->mac80211_registered = false;
5758 static int wl1271_init_ieee80211(struct wl1271 *wl)
5761 static const u32 cipher_suites[] = {
5762 WLAN_CIPHER_SUITE_WEP40,
5763 WLAN_CIPHER_SUITE_WEP104,
5764 WLAN_CIPHER_SUITE_TKIP,
5765 WLAN_CIPHER_SUITE_CCMP,
5766 WL1271_CIPHER_SUITE_GEM,
5769 /* The tx descriptor buffer */
5770 wl->hw->extra_tx_headroom = sizeof(struct wl1271_tx_hw_descr);
5772 if (wl->quirks & WLCORE_QUIRK_TKIP_HEADER_SPACE)
5773 wl->hw->extra_tx_headroom += WL1271_EXTRA_SPACE_TKIP;
5776 /* FIXME: find a proper value */
5777 wl->hw->max_listen_interval = wl->conf.conn.max_listen_interval;
5779 wl->hw->flags = IEEE80211_HW_SIGNAL_DBM |
5780 IEEE80211_HW_SUPPORTS_PS |
5781 IEEE80211_HW_SUPPORTS_DYNAMIC_PS |
5782 IEEE80211_HW_HAS_RATE_CONTROL |
5783 IEEE80211_HW_CONNECTION_MONITOR |
5784 IEEE80211_HW_REPORTS_TX_ACK_STATUS |
5785 IEEE80211_HW_SPECTRUM_MGMT |
5786 IEEE80211_HW_AP_LINK_PS |
5787 IEEE80211_HW_AMPDU_AGGREGATION |
5788 IEEE80211_HW_TX_AMPDU_SETUP_IN_HW |
5789 IEEE80211_HW_QUEUE_CONTROL |
5790 IEEE80211_HW_CHANCTX_STA_CSA;
5792 wl->hw->wiphy->cipher_suites = cipher_suites;
5793 wl->hw->wiphy->n_cipher_suites = ARRAY_SIZE(cipher_suites);
5795 wl->hw->wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION) |
5796 BIT(NL80211_IFTYPE_ADHOC) | BIT(NL80211_IFTYPE_AP) |
5797 BIT(NL80211_IFTYPE_P2P_CLIENT) | BIT(NL80211_IFTYPE_P2P_GO);
5798 wl->hw->wiphy->max_scan_ssids = 1;
5799 wl->hw->wiphy->max_sched_scan_ssids = 16;
5800 wl->hw->wiphy->max_match_sets = 16;
5802 * Maximum length of elements in scanning probe request templates
5803 * should be the maximum length possible for a template, without
5804 * the IEEE80211 header of the template
5806 wl->hw->wiphy->max_scan_ie_len = WL1271_CMD_TEMPL_MAX_SIZE -
5807 sizeof(struct ieee80211_header);
5809 wl->hw->wiphy->max_sched_scan_ie_len = WL1271_CMD_TEMPL_MAX_SIZE -
5810 sizeof(struct ieee80211_header);
5812 wl->hw->wiphy->max_remain_on_channel_duration = 30000;
5814 wl->hw->wiphy->flags |= WIPHY_FLAG_AP_UAPSD |
5815 WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL |
5816 WIPHY_FLAG_SUPPORTS_SCHED_SCAN;
5818 /* make sure all our channels fit in the scanned_ch bitmask */
5819 BUILD_BUG_ON(ARRAY_SIZE(wl1271_channels) +
5820 ARRAY_SIZE(wl1271_channels_5ghz) >
5821 WL1271_MAX_CHANNELS);
5823 * clear channel flags from the previous usage
5824 * and restore max_power & max_antenna_gain values.
5826 for (i = 0; i < ARRAY_SIZE(wl1271_channels); i++) {
5827 wl1271_band_2ghz.channels[i].flags = 0;
5828 wl1271_band_2ghz.channels[i].max_power = WLCORE_MAX_TXPWR;
5829 wl1271_band_2ghz.channels[i].max_antenna_gain = 0;
5832 for (i = 0; i < ARRAY_SIZE(wl1271_channels_5ghz); i++) {
5833 wl1271_band_5ghz.channels[i].flags = 0;
5834 wl1271_band_5ghz.channels[i].max_power = WLCORE_MAX_TXPWR;
5835 wl1271_band_5ghz.channels[i].max_antenna_gain = 0;
5839 * We keep local copies of the band structs because we need to
5840 * modify them on a per-device basis.
5842 memcpy(&wl->bands[IEEE80211_BAND_2GHZ], &wl1271_band_2ghz,
5843 sizeof(wl1271_band_2ghz));
5844 memcpy(&wl->bands[IEEE80211_BAND_2GHZ].ht_cap,
5845 &wl->ht_cap[IEEE80211_BAND_2GHZ],
5846 sizeof(*wl->ht_cap));
5847 memcpy(&wl->bands[IEEE80211_BAND_5GHZ], &wl1271_band_5ghz,
5848 sizeof(wl1271_band_5ghz));
5849 memcpy(&wl->bands[IEEE80211_BAND_5GHZ].ht_cap,
5850 &wl->ht_cap[IEEE80211_BAND_5GHZ],
5851 sizeof(*wl->ht_cap));
5853 wl->hw->wiphy->bands[IEEE80211_BAND_2GHZ] =
5854 &wl->bands[IEEE80211_BAND_2GHZ];
5855 wl->hw->wiphy->bands[IEEE80211_BAND_5GHZ] =
5856 &wl->bands[IEEE80211_BAND_5GHZ];
5859 * allow 4 queues per mac address we support +
5860 * 1 cab queue per mac + one global offchannel Tx queue
5862 wl->hw->queues = (NUM_TX_QUEUES + 1) * WLCORE_NUM_MAC_ADDRESSES + 1;
5864 /* the last queue is the offchannel queue */
5865 wl->hw->offchannel_tx_hw_queue = wl->hw->queues - 1;
5866 wl->hw->max_rates = 1;
5868 wl->hw->wiphy->reg_notifier = wl1271_reg_notify;
5870 /* the FW answers probe-requests in AP-mode */
5871 wl->hw->wiphy->flags |= WIPHY_FLAG_AP_PROBE_RESP_OFFLOAD;
5872 wl->hw->wiphy->probe_resp_offload =
5873 NL80211_PROBE_RESP_OFFLOAD_SUPPORT_WPS |
5874 NL80211_PROBE_RESP_OFFLOAD_SUPPORT_WPS2 |
5875 NL80211_PROBE_RESP_OFFLOAD_SUPPORT_P2P;
5877 /* allowed interface combinations */
5878 wl->hw->wiphy->iface_combinations = wl->iface_combinations;
5879 wl->hw->wiphy->n_iface_combinations = wl->n_iface_combinations;
5881 /* register vendor commands */
5882 wlcore_set_vendor_commands(wl->hw->wiphy);
5884 SET_IEEE80211_DEV(wl->hw, wl->dev);
5886 wl->hw->sta_data_size = sizeof(struct wl1271_station);
5887 wl->hw->vif_data_size = sizeof(struct wl12xx_vif);
5889 wl->hw->max_rx_aggregation_subframes = wl->conf.ht.rx_ba_win_size;
5894 struct ieee80211_hw *wlcore_alloc_hw(size_t priv_size, u32 aggr_buf_size,
5897 struct ieee80211_hw *hw;
5902 hw = ieee80211_alloc_hw(sizeof(*wl), &wl1271_ops);
5904 wl1271_error("could not alloc ieee80211_hw");
5910 memset(wl, 0, sizeof(*wl));
5912 wl->priv = kzalloc(priv_size, GFP_KERNEL);
5914 wl1271_error("could not alloc wl priv");
5916 goto err_priv_alloc;
5919 INIT_LIST_HEAD(&wl->wlvif_list);
5924 * wl->num_links is not configured yet, so just use WLCORE_MAX_LINKS.
5925 * we don't allocate any additional resource here, so that's fine.
5927 for (i = 0; i < NUM_TX_QUEUES; i++)
5928 for (j = 0; j < WLCORE_MAX_LINKS; j++)
5929 skb_queue_head_init(&wl->links[j].tx_queue[i]);
5931 skb_queue_head_init(&wl->deferred_rx_queue);
5932 skb_queue_head_init(&wl->deferred_tx_queue);
5934 INIT_DELAYED_WORK(&wl->elp_work, wl1271_elp_work);
5935 INIT_WORK(&wl->netstack_work, wl1271_netstack_work);
5936 INIT_WORK(&wl->tx_work, wl1271_tx_work);
5937 INIT_WORK(&wl->recovery_work, wl1271_recovery_work);
5938 INIT_DELAYED_WORK(&wl->scan_complete_work, wl1271_scan_complete_work);
5939 INIT_DELAYED_WORK(&wl->roc_complete_work, wlcore_roc_complete_work);
5940 INIT_DELAYED_WORK(&wl->tx_watchdog_work, wl12xx_tx_watchdog_work);
5942 wl->freezable_wq = create_freezable_workqueue("wl12xx_wq");
5943 if (!wl->freezable_wq) {
5950 wl->power_level = WL1271_DEFAULT_POWER_LEVEL;
5951 wl->band = IEEE80211_BAND_2GHZ;
5952 wl->channel_type = NL80211_CHAN_NO_HT;
5954 wl->sg_enabled = true;
5955 wl->sleep_auth = WL1271_PSM_ILLEGAL;
5956 wl->recovery_count = 0;
5959 wl->ap_fw_ps_map = 0;
5961 wl->platform_quirks = 0;
5962 wl->system_hlid = WL12XX_SYSTEM_HLID;
5963 wl->active_sta_count = 0;
5964 wl->active_link_count = 0;
5966 init_waitqueue_head(&wl->fwlog_waitq);
5968 /* The system link is always allocated */
5969 __set_bit(WL12XX_SYSTEM_HLID, wl->links_map);
5971 memset(wl->tx_frames_map, 0, sizeof(wl->tx_frames_map));
5972 for (i = 0; i < wl->num_tx_desc; i++)
5973 wl->tx_frames[i] = NULL;
5975 spin_lock_init(&wl->wl_lock);
5977 wl->state = WLCORE_STATE_OFF;
5978 wl->fw_type = WL12XX_FW_TYPE_NONE;
5979 mutex_init(&wl->mutex);
5980 mutex_init(&wl->flush_mutex);
5981 init_completion(&wl->nvs_loading_complete);
5983 order = get_order(aggr_buf_size);
5984 wl->aggr_buf = (u8 *)__get_free_pages(GFP_KERNEL, order);
5985 if (!wl->aggr_buf) {
5989 wl->aggr_buf_size = aggr_buf_size;
5991 wl->dummy_packet = wl12xx_alloc_dummy_packet(wl);
5992 if (!wl->dummy_packet) {
5997 /* Allocate one page for the FW log */
5998 wl->fwlog = (u8 *)get_zeroed_page(GFP_KERNEL);
6001 goto err_dummy_packet;
6004 wl->mbox_size = mbox_size;
6005 wl->mbox = kmalloc(wl->mbox_size, GFP_KERNEL | GFP_DMA);
6011 wl->buffer_32 = kmalloc(sizeof(*wl->buffer_32), GFP_KERNEL);
6012 if (!wl->buffer_32) {
6023 free_page((unsigned long)wl->fwlog);
6026 dev_kfree_skb(wl->dummy_packet);
6029 free_pages((unsigned long)wl->aggr_buf, order);
6032 destroy_workqueue(wl->freezable_wq);
6035 wl1271_debugfs_exit(wl);
6039 ieee80211_free_hw(hw);
6043 return ERR_PTR(ret);
6045 EXPORT_SYMBOL_GPL(wlcore_alloc_hw);
6047 int wlcore_free_hw(struct wl1271 *wl)
6049 /* Unblock any fwlog readers */
6050 mutex_lock(&wl->mutex);
6051 wl->fwlog_size = -1;
6052 wake_up_interruptible_all(&wl->fwlog_waitq);
6053 mutex_unlock(&wl->mutex);
6055 wlcore_sysfs_free(wl);
6057 kfree(wl->buffer_32);
6059 free_page((unsigned long)wl->fwlog);
6060 dev_kfree_skb(wl->dummy_packet);
6061 free_pages((unsigned long)wl->aggr_buf, get_order(wl->aggr_buf_size));
6063 wl1271_debugfs_exit(wl);
6067 wl->fw_type = WL12XX_FW_TYPE_NONE;
6071 kfree(wl->raw_fw_status);
6072 kfree(wl->fw_status);
6073 kfree(wl->tx_res_if);
6074 destroy_workqueue(wl->freezable_wq);
6077 ieee80211_free_hw(wl->hw);
6081 EXPORT_SYMBOL_GPL(wlcore_free_hw);
6084 static const struct wiphy_wowlan_support wlcore_wowlan_support = {
6085 .flags = WIPHY_WOWLAN_ANY,
6086 .n_patterns = WL1271_MAX_RX_FILTERS,
6087 .pattern_min_len = 1,
6088 .pattern_max_len = WL1271_RX_FILTER_MAX_PATTERN_SIZE,
6092 static irqreturn_t wlcore_hardirq(int irq, void *cookie)
6094 return IRQ_WAKE_THREAD;
6097 static void wlcore_nvs_cb(const struct firmware *fw, void *context)
6099 struct wl1271 *wl = context;
6100 struct platform_device *pdev = wl->pdev;
6101 struct wlcore_platdev_data *pdev_data = dev_get_platdata(&pdev->dev);
6102 struct wl12xx_platform_data *pdata = pdev_data->pdata;
6103 unsigned long irqflags;
6105 irq_handler_t hardirq_fn = NULL;
6108 wl->nvs = kmemdup(fw->data, fw->size, GFP_KERNEL);
6110 wl1271_error("Could not allocate nvs data");
6113 wl->nvs_len = fw->size;
6115 wl1271_debug(DEBUG_BOOT, "Could not get nvs file %s",
6121 ret = wl->ops->setup(wl);
6125 BUG_ON(wl->num_tx_desc > WLCORE_MAX_TX_DESCRIPTORS);
6127 /* adjust some runtime configuration parameters */
6128 wlcore_adjust_conf(wl);
6130 wl->irq = platform_get_irq(pdev, 0);
6131 wl->platform_quirks = pdata->platform_quirks;
6132 wl->if_ops = pdev_data->if_ops;
6134 if (wl->platform_quirks & WL12XX_PLATFORM_QUIRK_EDGE_IRQ) {
6135 irqflags = IRQF_TRIGGER_RISING;
6136 hardirq_fn = wlcore_hardirq;
6138 irqflags = IRQF_TRIGGER_HIGH | IRQF_ONESHOT;
6141 ret = request_threaded_irq(wl->irq, hardirq_fn, wlcore_irq,
6142 irqflags, pdev->name, wl);
6144 wl1271_error("request_irq() failed: %d", ret);
6149 ret = enable_irq_wake(wl->irq);
6151 wl->irq_wake_enabled = true;
6152 device_init_wakeup(wl->dev, 1);
6153 if (pdata->pwr_in_suspend)
6154 wl->hw->wiphy->wowlan = &wlcore_wowlan_support;
6157 disable_irq(wl->irq);
6159 ret = wl12xx_get_hw_info(wl);
6161 wl1271_error("couldn't get hw info");
6165 ret = wl->ops->identify_chip(wl);
6169 ret = wl1271_init_ieee80211(wl);
6173 ret = wl1271_register_hw(wl);
6177 ret = wlcore_sysfs_init(wl);
6181 wl->initialized = true;
6185 wl1271_unregister_hw(wl);
6188 free_irq(wl->irq, wl);
6194 release_firmware(fw);
6195 complete_all(&wl->nvs_loading_complete);
6198 int wlcore_probe(struct wl1271 *wl, struct platform_device *pdev)
6202 if (!wl->ops || !wl->ptable)
6205 wl->dev = &pdev->dev;
6207 platform_set_drvdata(pdev, wl);
6209 ret = request_firmware_nowait(THIS_MODULE, FW_ACTION_HOTPLUG,
6210 WL12XX_NVS_NAME, &pdev->dev, GFP_KERNEL,
6213 wl1271_error("request_firmware_nowait failed: %d", ret);
6214 complete_all(&wl->nvs_loading_complete);
6219 EXPORT_SYMBOL_GPL(wlcore_probe);
6221 int wlcore_remove(struct platform_device *pdev)
6223 struct wl1271 *wl = platform_get_drvdata(pdev);
6225 wait_for_completion(&wl->nvs_loading_complete);
6226 if (!wl->initialized)
6229 if (wl->irq_wake_enabled) {
6230 device_init_wakeup(wl->dev, 0);
6231 disable_irq_wake(wl->irq);
6233 wl1271_unregister_hw(wl);
6234 free_irq(wl->irq, wl);
6239 EXPORT_SYMBOL_GPL(wlcore_remove);
6241 u32 wl12xx_debug_level = DEBUG_NONE;
6242 EXPORT_SYMBOL_GPL(wl12xx_debug_level);
6243 module_param_named(debug_level, wl12xx_debug_level, uint, S_IRUSR | S_IWUSR);
6244 MODULE_PARM_DESC(debug_level, "wl12xx debugging level");
6246 module_param_named(fwlog, fwlog_param, charp, 0);
6247 MODULE_PARM_DESC(fwlog,
6248 "FW logger options: continuous, ondemand, dbgpins or disable");
6250 module_param(fwlog_mem_blocks, int, S_IRUSR | S_IWUSR);
6251 MODULE_PARM_DESC(fwlog_mem_blocks, "fwlog mem_blocks");
6253 module_param(bug_on_recovery, int, S_IRUSR | S_IWUSR);
6254 MODULE_PARM_DESC(bug_on_recovery, "BUG() on fw recovery");
6256 module_param(no_recovery, int, S_IRUSR | S_IWUSR);
6257 MODULE_PARM_DESC(no_recovery, "Prevent HW recovery. FW will remain stuck.");
6259 MODULE_LICENSE("GPL");
6260 MODULE_AUTHOR("Luciano Coelho <coelho@ti.com>");
6261 MODULE_AUTHOR("Juuso Oikarinen <juuso.oikarinen@nokia.com>");
6262 MODULE_FIRMWARE(WL12XX_NVS_NAME);