3 * This file is part of wlcore
5 * Copyright (C) 2008-2010 Nokia Corporation
6 * Copyright (C) 2011-2013 Texas Instruments Inc.
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License
10 * version 2 as published by the Free Software Foundation.
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
24 #include <linux/module.h>
25 #include <linux/firmware.h>
26 #include <linux/etherdevice.h>
27 #include <linux/vmalloc.h>
28 #include <linux/wl12xx.h>
29 #include <linux/interrupt.h>
33 #include "wl12xx_80211.h"
44 #define WL1271_BOOT_RETRIES 3
46 static char *fwlog_param;
47 static int fwlog_mem_blocks = -1;
48 static int bug_on_recovery = -1;
49 static int no_recovery = -1;
51 static void __wl1271_op_remove_interface(struct wl1271 *wl,
52 struct ieee80211_vif *vif,
53 bool reset_tx_queues);
54 static void wlcore_op_stop_locked(struct wl1271 *wl);
55 static void wl1271_free_ap_keys(struct wl1271 *wl, struct wl12xx_vif *wlvif);
57 static int wl12xx_set_authorized(struct wl1271 *wl, struct wl12xx_vif *wlvif)
61 if (WARN_ON(wlvif->bss_type != BSS_TYPE_STA_BSS))
64 if (!test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags))
67 if (test_and_set_bit(WLVIF_FLAG_STA_STATE_SENT, &wlvif->flags))
70 ret = wl12xx_cmd_set_peer_state(wl, wlvif, wlvif->sta.hlid);
74 wl1271_info("Association completed.");
78 static void wl1271_reg_notify(struct wiphy *wiphy,
79 struct regulatory_request *request)
81 struct ieee80211_supported_band *band;
82 struct ieee80211_channel *ch;
84 struct ieee80211_hw *hw = wiphy_to_ieee80211_hw(wiphy);
85 struct wl1271 *wl = hw->priv;
87 band = wiphy->bands[IEEE80211_BAND_5GHZ];
88 for (i = 0; i < band->n_channels; i++) {
89 ch = &band->channels[i];
90 if (ch->flags & IEEE80211_CHAN_DISABLED)
93 if (ch->flags & IEEE80211_CHAN_RADAR)
94 ch->flags |= IEEE80211_CHAN_NO_IR;
98 wlcore_regdomain_config(wl);
101 static int wl1271_set_rx_streaming(struct wl1271 *wl, struct wl12xx_vif *wlvif,
106 /* we should hold wl->mutex */
107 ret = wl1271_acx_ps_rx_streaming(wl, wlvif, enable);
112 set_bit(WLVIF_FLAG_RX_STREAMING_STARTED, &wlvif->flags);
114 clear_bit(WLVIF_FLAG_RX_STREAMING_STARTED, &wlvif->flags);
120 * this function is being called when the rx_streaming interval
121 * has beed changed or rx_streaming should be disabled
123 int wl1271_recalc_rx_streaming(struct wl1271 *wl, struct wl12xx_vif *wlvif)
126 int period = wl->conf.rx_streaming.interval;
128 /* don't reconfigure if rx_streaming is disabled */
129 if (!test_bit(WLVIF_FLAG_RX_STREAMING_STARTED, &wlvif->flags))
132 /* reconfigure/disable according to new streaming_period */
134 test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags) &&
135 (wl->conf.rx_streaming.always ||
136 test_bit(WL1271_FLAG_SOFT_GEMINI, &wl->flags)))
137 ret = wl1271_set_rx_streaming(wl, wlvif, true);
139 ret = wl1271_set_rx_streaming(wl, wlvif, false);
140 /* don't cancel_work_sync since we might deadlock */
141 del_timer_sync(&wlvif->rx_streaming_timer);
147 static void wl1271_rx_streaming_enable_work(struct work_struct *work)
150 struct wl12xx_vif *wlvif = container_of(work, struct wl12xx_vif,
151 rx_streaming_enable_work);
152 struct wl1271 *wl = wlvif->wl;
154 mutex_lock(&wl->mutex);
156 if (test_bit(WLVIF_FLAG_RX_STREAMING_STARTED, &wlvif->flags) ||
157 !test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags) ||
158 (!wl->conf.rx_streaming.always &&
159 !test_bit(WL1271_FLAG_SOFT_GEMINI, &wl->flags)))
162 if (!wl->conf.rx_streaming.interval)
165 ret = wl1271_ps_elp_wakeup(wl);
169 ret = wl1271_set_rx_streaming(wl, wlvif, true);
173 /* stop it after some time of inactivity */
174 mod_timer(&wlvif->rx_streaming_timer,
175 jiffies + msecs_to_jiffies(wl->conf.rx_streaming.duration));
178 wl1271_ps_elp_sleep(wl);
180 mutex_unlock(&wl->mutex);
183 static void wl1271_rx_streaming_disable_work(struct work_struct *work)
186 struct wl12xx_vif *wlvif = container_of(work, struct wl12xx_vif,
187 rx_streaming_disable_work);
188 struct wl1271 *wl = wlvif->wl;
190 mutex_lock(&wl->mutex);
192 if (!test_bit(WLVIF_FLAG_RX_STREAMING_STARTED, &wlvif->flags))
195 ret = wl1271_ps_elp_wakeup(wl);
199 ret = wl1271_set_rx_streaming(wl, wlvif, false);
204 wl1271_ps_elp_sleep(wl);
206 mutex_unlock(&wl->mutex);
209 static void wl1271_rx_streaming_timer(unsigned long data)
211 struct wl12xx_vif *wlvif = (struct wl12xx_vif *)data;
212 struct wl1271 *wl = wlvif->wl;
213 ieee80211_queue_work(wl->hw, &wlvif->rx_streaming_disable_work);
216 /* wl->mutex must be taken */
217 void wl12xx_rearm_tx_watchdog_locked(struct wl1271 *wl)
219 /* if the watchdog is not armed, don't do anything */
220 if (wl->tx_allocated_blocks == 0)
223 cancel_delayed_work(&wl->tx_watchdog_work);
224 ieee80211_queue_delayed_work(wl->hw, &wl->tx_watchdog_work,
225 msecs_to_jiffies(wl->conf.tx.tx_watchdog_timeout));
228 static void wl12xx_tx_watchdog_work(struct work_struct *work)
230 struct delayed_work *dwork;
233 dwork = container_of(work, struct delayed_work, work);
234 wl = container_of(dwork, struct wl1271, tx_watchdog_work);
236 mutex_lock(&wl->mutex);
238 if (unlikely(wl->state != WLCORE_STATE_ON))
241 /* Tx went out in the meantime - everything is ok */
242 if (unlikely(wl->tx_allocated_blocks == 0))
246 * if a ROC is in progress, we might not have any Tx for a long
247 * time (e.g. pending Tx on the non-ROC channels)
249 if (find_first_bit(wl->roc_map, WL12XX_MAX_ROLES) < WL12XX_MAX_ROLES) {
250 wl1271_debug(DEBUG_TX, "No Tx (in FW) for %d ms due to ROC",
251 wl->conf.tx.tx_watchdog_timeout);
252 wl12xx_rearm_tx_watchdog_locked(wl);
257 * if a scan is in progress, we might not have any Tx for a long
260 if (wl->scan.state != WL1271_SCAN_STATE_IDLE) {
261 wl1271_debug(DEBUG_TX, "No Tx (in FW) for %d ms due to scan",
262 wl->conf.tx.tx_watchdog_timeout);
263 wl12xx_rearm_tx_watchdog_locked(wl);
268 * AP might cache a frame for a long time for a sleeping station,
269 * so rearm the timer if there's an AP interface with stations. If
270 * Tx is genuinely stuck we will most hopefully discover it when all
271 * stations are removed due to inactivity.
273 if (wl->active_sta_count) {
274 wl1271_debug(DEBUG_TX, "No Tx (in FW) for %d ms. AP has "
276 wl->conf.tx.tx_watchdog_timeout,
277 wl->active_sta_count);
278 wl12xx_rearm_tx_watchdog_locked(wl);
282 wl1271_error("Tx stuck (in FW) for %d ms. Starting recovery",
283 wl->conf.tx.tx_watchdog_timeout);
284 wl12xx_queue_recovery_work(wl);
287 mutex_unlock(&wl->mutex);
290 static void wlcore_adjust_conf(struct wl1271 *wl)
292 /* Adjust settings according to optional module parameters */
294 /* Firmware Logger params */
295 if (fwlog_mem_blocks != -1) {
296 if (fwlog_mem_blocks >= CONF_FWLOG_MIN_MEM_BLOCKS &&
297 fwlog_mem_blocks <= CONF_FWLOG_MAX_MEM_BLOCKS) {
298 wl->conf.fwlog.mem_blocks = fwlog_mem_blocks;
301 "Illegal fwlog_mem_blocks=%d using default %d",
302 fwlog_mem_blocks, wl->conf.fwlog.mem_blocks);
307 if (!strcmp(fwlog_param, "continuous")) {
308 wl->conf.fwlog.mode = WL12XX_FWLOG_CONTINUOUS;
309 } else if (!strcmp(fwlog_param, "ondemand")) {
310 wl->conf.fwlog.mode = WL12XX_FWLOG_ON_DEMAND;
311 } else if (!strcmp(fwlog_param, "dbgpins")) {
312 wl->conf.fwlog.mode = WL12XX_FWLOG_CONTINUOUS;
313 wl->conf.fwlog.output = WL12XX_FWLOG_OUTPUT_DBG_PINS;
314 } else if (!strcmp(fwlog_param, "disable")) {
315 wl->conf.fwlog.mem_blocks = 0;
316 wl->conf.fwlog.output = WL12XX_FWLOG_OUTPUT_NONE;
318 wl1271_error("Unknown fwlog parameter %s", fwlog_param);
322 if (bug_on_recovery != -1)
323 wl->conf.recovery.bug_on_recovery = (u8) bug_on_recovery;
325 if (no_recovery != -1)
326 wl->conf.recovery.no_recovery = (u8) no_recovery;
329 static void wl12xx_irq_ps_regulate_link(struct wl1271 *wl,
330 struct wl12xx_vif *wlvif,
335 fw_ps = test_bit(hlid, (unsigned long *)&wl->ap_fw_ps_map);
338 * Wake up from high level PS if the STA is asleep with too little
339 * packets in FW or if the STA is awake.
341 if (!fw_ps || tx_pkts < WL1271_PS_STA_MAX_PACKETS)
342 wl12xx_ps_link_end(wl, wlvif, hlid);
345 * Start high-level PS if the STA is asleep with enough blocks in FW.
346 * Make an exception if this is the only connected link. In this
347 * case FW-memory congestion is less of a problem.
348 * Note that a single connected STA means 2*ap_count + 1 active links,
349 * since we must account for the global and broadcast AP links
350 * for each AP. The "fw_ps" check assures us the other link is a STA
351 * connected to the AP. Otherwise the FW would not set the PSM bit.
353 else if (wl->active_link_count > (wl->ap_count*2 + 1) && fw_ps &&
354 tx_pkts >= WL1271_PS_STA_MAX_PACKETS)
355 wl12xx_ps_link_start(wl, wlvif, hlid, true);
358 static void wl12xx_irq_update_links_status(struct wl1271 *wl,
359 struct wl12xx_vif *wlvif,
360 struct wl_fw_status *status)
365 cur_fw_ps_map = status->link_ps_bitmap;
366 if (wl->ap_fw_ps_map != cur_fw_ps_map) {
367 wl1271_debug(DEBUG_PSM,
368 "link ps prev 0x%x cur 0x%x changed 0x%x",
369 wl->ap_fw_ps_map, cur_fw_ps_map,
370 wl->ap_fw_ps_map ^ cur_fw_ps_map);
372 wl->ap_fw_ps_map = cur_fw_ps_map;
375 for_each_set_bit(hlid, wlvif->ap.sta_hlid_map, wl->num_links)
376 wl12xx_irq_ps_regulate_link(wl, wlvif, hlid,
377 wl->links[hlid].allocated_pkts);
380 static int wlcore_fw_status(struct wl1271 *wl, struct wl_fw_status *status)
382 struct wl12xx_vif *wlvif;
384 u32 old_tx_blk_count = wl->tx_blocks_available;
385 int avail, freed_blocks;
388 struct wl1271_link *lnk;
390 ret = wlcore_raw_read_data(wl, REG_RAW_FW_STATUS_ADDR,
392 wl->fw_status_len, false);
396 wlcore_hw_convert_fw_status(wl, wl->raw_fw_status, wl->fw_status);
398 wl1271_debug(DEBUG_IRQ, "intr: 0x%x (fw_rx_counter = %d, "
399 "drv_rx_counter = %d, tx_results_counter = %d)",
401 status->fw_rx_counter,
402 status->drv_rx_counter,
403 status->tx_results_counter);
405 for (i = 0; i < NUM_TX_QUEUES; i++) {
406 /* prevent wrap-around in freed-packets counter */
407 wl->tx_allocated_pkts[i] -=
408 (status->counters.tx_released_pkts[i] -
409 wl->tx_pkts_freed[i]) & 0xff;
411 wl->tx_pkts_freed[i] = status->counters.tx_released_pkts[i];
415 for_each_set_bit(i, wl->links_map, wl->num_links) {
419 /* prevent wrap-around in freed-packets counter */
420 diff = (status->counters.tx_lnk_free_pkts[i] -
421 lnk->prev_freed_pkts) & 0xff;
426 lnk->allocated_pkts -= diff;
427 lnk->prev_freed_pkts = status->counters.tx_lnk_free_pkts[i];
429 /* accumulate the prev_freed_pkts counter */
430 lnk->total_freed_pkts += diff;
433 /* prevent wrap-around in total blocks counter */
434 if (likely(wl->tx_blocks_freed <= status->total_released_blks))
435 freed_blocks = status->total_released_blks -
438 freed_blocks = 0x100000000LL - wl->tx_blocks_freed +
439 status->total_released_blks;
441 wl->tx_blocks_freed = status->total_released_blks;
443 wl->tx_allocated_blocks -= freed_blocks;
446 * If the FW freed some blocks:
447 * If we still have allocated blocks - re-arm the timer, Tx is
448 * not stuck. Otherwise, cancel the timer (no Tx currently).
451 if (wl->tx_allocated_blocks)
452 wl12xx_rearm_tx_watchdog_locked(wl);
454 cancel_delayed_work(&wl->tx_watchdog_work);
457 avail = status->tx_total - wl->tx_allocated_blocks;
460 * The FW might change the total number of TX memblocks before
461 * we get a notification about blocks being released. Thus, the
462 * available blocks calculation might yield a temporary result
463 * which is lower than the actual available blocks. Keeping in
464 * mind that only blocks that were allocated can be moved from
465 * TX to RX, tx_blocks_available should never decrease here.
467 wl->tx_blocks_available = max((int)wl->tx_blocks_available,
470 /* if more blocks are available now, tx work can be scheduled */
471 if (wl->tx_blocks_available > old_tx_blk_count)
472 clear_bit(WL1271_FLAG_FW_TX_BUSY, &wl->flags);
474 /* for AP update num of allocated TX blocks per link and ps status */
475 wl12xx_for_each_wlvif_ap(wl, wlvif) {
476 wl12xx_irq_update_links_status(wl, wlvif, status);
479 /* update the host-chipset time offset */
481 wl->time_offset = (timespec_to_ns(&ts) >> 10) -
482 (s64)(status->fw_localtime);
484 wl->fw_fast_lnk_map = status->link_fast_bitmap;
489 static void wl1271_flush_deferred_work(struct wl1271 *wl)
493 /* Pass all received frames to the network stack */
494 while ((skb = skb_dequeue(&wl->deferred_rx_queue)))
495 ieee80211_rx_ni(wl->hw, skb);
497 /* Return sent skbs to the network stack */
498 while ((skb = skb_dequeue(&wl->deferred_tx_queue)))
499 ieee80211_tx_status_ni(wl->hw, skb);
502 static void wl1271_netstack_work(struct work_struct *work)
505 container_of(work, struct wl1271, netstack_work);
508 wl1271_flush_deferred_work(wl);
509 } while (skb_queue_len(&wl->deferred_rx_queue));
512 #define WL1271_IRQ_MAX_LOOPS 256
514 static int wlcore_irq_locked(struct wl1271 *wl)
518 int loopcount = WL1271_IRQ_MAX_LOOPS;
520 unsigned int defer_count;
524 * In case edge triggered interrupt must be used, we cannot iterate
525 * more than once without introducing race conditions with the hardirq.
527 if (wl->platform_quirks & WL12XX_PLATFORM_QUIRK_EDGE_IRQ)
530 wl1271_debug(DEBUG_IRQ, "IRQ work");
532 if (unlikely(wl->state != WLCORE_STATE_ON))
535 ret = wl1271_ps_elp_wakeup(wl);
539 while (!done && loopcount--) {
541 * In order to avoid a race with the hardirq, clear the flag
542 * before acknowledging the chip. Since the mutex is held,
543 * wl1271_ps_elp_wakeup cannot be called concurrently.
545 clear_bit(WL1271_FLAG_IRQ_RUNNING, &wl->flags);
546 smp_mb__after_clear_bit();
548 ret = wlcore_fw_status(wl, wl->fw_status);
552 wlcore_hw_tx_immediate_compl(wl);
554 intr = wl->fw_status->intr;
555 intr &= WLCORE_ALL_INTR_MASK;
561 if (unlikely(intr & WL1271_ACX_INTR_WATCHDOG)) {
562 wl1271_error("HW watchdog interrupt received! starting recovery.");
563 wl->watchdog_recovery = true;
566 /* restarting the chip. ignore any other interrupt. */
570 if (unlikely(intr & WL1271_ACX_SW_INTR_WATCHDOG)) {
571 wl1271_error("SW watchdog interrupt received! "
572 "starting recovery.");
573 wl->watchdog_recovery = true;
576 /* restarting the chip. ignore any other interrupt. */
580 if (likely(intr & WL1271_ACX_INTR_DATA)) {
581 wl1271_debug(DEBUG_IRQ, "WL1271_ACX_INTR_DATA");
583 ret = wlcore_rx(wl, wl->fw_status);
587 /* Check if any tx blocks were freed */
588 spin_lock_irqsave(&wl->wl_lock, flags);
589 if (!test_bit(WL1271_FLAG_FW_TX_BUSY, &wl->flags) &&
590 wl1271_tx_total_queue_count(wl) > 0) {
591 spin_unlock_irqrestore(&wl->wl_lock, flags);
593 * In order to avoid starvation of the TX path,
594 * call the work function directly.
596 ret = wlcore_tx_work_locked(wl);
600 spin_unlock_irqrestore(&wl->wl_lock, flags);
603 /* check for tx results */
604 ret = wlcore_hw_tx_delayed_compl(wl);
608 /* Make sure the deferred queues don't get too long */
609 defer_count = skb_queue_len(&wl->deferred_tx_queue) +
610 skb_queue_len(&wl->deferred_rx_queue);
611 if (defer_count > WL1271_DEFERRED_QUEUE_LIMIT)
612 wl1271_flush_deferred_work(wl);
615 if (intr & WL1271_ACX_INTR_EVENT_A) {
616 wl1271_debug(DEBUG_IRQ, "WL1271_ACX_INTR_EVENT_A");
617 ret = wl1271_event_handle(wl, 0);
622 if (intr & WL1271_ACX_INTR_EVENT_B) {
623 wl1271_debug(DEBUG_IRQ, "WL1271_ACX_INTR_EVENT_B");
624 ret = wl1271_event_handle(wl, 1);
629 if (intr & WL1271_ACX_INTR_INIT_COMPLETE)
630 wl1271_debug(DEBUG_IRQ,
631 "WL1271_ACX_INTR_INIT_COMPLETE");
633 if (intr & WL1271_ACX_INTR_HW_AVAILABLE)
634 wl1271_debug(DEBUG_IRQ, "WL1271_ACX_INTR_HW_AVAILABLE");
637 wl1271_ps_elp_sleep(wl);
643 static irqreturn_t wlcore_irq(int irq, void *cookie)
647 struct wl1271 *wl = cookie;
649 /* complete the ELP completion */
650 spin_lock_irqsave(&wl->wl_lock, flags);
651 set_bit(WL1271_FLAG_IRQ_RUNNING, &wl->flags);
653 complete(wl->elp_compl);
654 wl->elp_compl = NULL;
657 if (test_bit(WL1271_FLAG_SUSPENDED, &wl->flags)) {
658 /* don't enqueue a work right now. mark it as pending */
659 set_bit(WL1271_FLAG_PENDING_WORK, &wl->flags);
660 wl1271_debug(DEBUG_IRQ, "should not enqueue work");
661 disable_irq_nosync(wl->irq);
662 pm_wakeup_event(wl->dev, 0);
663 spin_unlock_irqrestore(&wl->wl_lock, flags);
666 spin_unlock_irqrestore(&wl->wl_lock, flags);
668 /* TX might be handled here, avoid redundant work */
669 set_bit(WL1271_FLAG_TX_PENDING, &wl->flags);
670 cancel_work_sync(&wl->tx_work);
672 mutex_lock(&wl->mutex);
674 ret = wlcore_irq_locked(wl);
676 wl12xx_queue_recovery_work(wl);
678 spin_lock_irqsave(&wl->wl_lock, flags);
679 /* In case TX was not handled here, queue TX work */
680 clear_bit(WL1271_FLAG_TX_PENDING, &wl->flags);
681 if (!test_bit(WL1271_FLAG_FW_TX_BUSY, &wl->flags) &&
682 wl1271_tx_total_queue_count(wl) > 0)
683 ieee80211_queue_work(wl->hw, &wl->tx_work);
684 spin_unlock_irqrestore(&wl->wl_lock, flags);
686 mutex_unlock(&wl->mutex);
691 struct vif_counter_data {
694 struct ieee80211_vif *cur_vif;
695 bool cur_vif_running;
698 static void wl12xx_vif_count_iter(void *data, u8 *mac,
699 struct ieee80211_vif *vif)
701 struct vif_counter_data *counter = data;
704 if (counter->cur_vif == vif)
705 counter->cur_vif_running = true;
708 /* caller must not hold wl->mutex, as it might deadlock */
709 static void wl12xx_get_vif_count(struct ieee80211_hw *hw,
710 struct ieee80211_vif *cur_vif,
711 struct vif_counter_data *data)
713 memset(data, 0, sizeof(*data));
714 data->cur_vif = cur_vif;
716 ieee80211_iterate_active_interfaces(hw, IEEE80211_IFACE_ITER_RESUME_ALL,
717 wl12xx_vif_count_iter, data);
720 static int wl12xx_fetch_firmware(struct wl1271 *wl, bool plt)
722 const struct firmware *fw;
724 enum wl12xx_fw_type fw_type;
728 fw_type = WL12XX_FW_TYPE_PLT;
729 fw_name = wl->plt_fw_name;
732 * we can't call wl12xx_get_vif_count() here because
733 * wl->mutex is taken, so use the cached last_vif_count value
735 if (wl->last_vif_count > 1 && wl->mr_fw_name) {
736 fw_type = WL12XX_FW_TYPE_MULTI;
737 fw_name = wl->mr_fw_name;
739 fw_type = WL12XX_FW_TYPE_NORMAL;
740 fw_name = wl->sr_fw_name;
744 if (wl->fw_type == fw_type)
747 wl1271_debug(DEBUG_BOOT, "booting firmware %s", fw_name);
749 ret = request_firmware(&fw, fw_name, wl->dev);
752 wl1271_error("could not get firmware %s: %d", fw_name, ret);
757 wl1271_error("firmware size is not multiple of 32 bits: %zu",
764 wl->fw_type = WL12XX_FW_TYPE_NONE;
765 wl->fw_len = fw->size;
766 wl->fw = vmalloc(wl->fw_len);
769 wl1271_error("could not allocate memory for the firmware");
774 memcpy(wl->fw, fw->data, wl->fw_len);
776 wl->fw_type = fw_type;
778 release_firmware(fw);
783 void wl12xx_queue_recovery_work(struct wl1271 *wl)
785 /* Avoid a recursive recovery */
786 if (wl->state == WLCORE_STATE_ON) {
787 WARN_ON(!test_bit(WL1271_FLAG_INTENDED_FW_RECOVERY,
790 wl->state = WLCORE_STATE_RESTARTING;
791 set_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags);
792 wl1271_ps_elp_wakeup(wl);
793 wlcore_disable_interrupts_nosync(wl);
794 ieee80211_queue_work(wl->hw, &wl->recovery_work);
798 size_t wl12xx_copy_fwlog(struct wl1271 *wl, u8 *memblock, size_t maxlen)
802 /* Make sure we have enough room */
803 len = min(maxlen, (size_t)(PAGE_SIZE - wl->fwlog_size));
805 /* Fill the FW log file, consumed by the sysfs fwlog entry */
806 memcpy(wl->fwlog + wl->fwlog_size, memblock, len);
807 wl->fwlog_size += len;
812 static void wl12xx_read_fwlog_panic(struct wl1271 *wl)
814 struct wlcore_partition_set part, old_part;
821 if ((wl->quirks & WLCORE_QUIRK_FWLOG_NOT_IMPLEMENTED) ||
822 (wl->conf.fwlog.mem_blocks == 0))
825 wl1271_info("Reading FW panic log");
827 block = kmalloc(wl->fw_mem_block_size, GFP_KERNEL);
832 * Make sure the chip is awake and the logger isn't active.
833 * Do not send a stop fwlog command if the fw is hanged or if
834 * dbgpins are used (due to some fw bug).
836 if (wl1271_ps_elp_wakeup(wl))
838 if (!wl->watchdog_recovery &&
839 wl->conf.fwlog.output != WL12XX_FWLOG_OUTPUT_DBG_PINS)
840 wl12xx_cmd_stop_fwlog(wl);
842 /* Read the first memory block address */
843 ret = wlcore_fw_status(wl, wl->fw_status);
847 addr = wl->fw_status->log_start_addr;
851 if (wl->conf.fwlog.mode == WL12XX_FWLOG_CONTINUOUS) {
852 offset = sizeof(addr) + sizeof(struct wl1271_rx_descriptor);
853 end_of_log = wl->fwlog_end;
855 offset = sizeof(addr);
859 old_part = wl->curr_part;
860 memset(&part, 0, sizeof(part));
862 /* Traverse the memory blocks linked list */
864 part.mem.start = wlcore_hw_convert_hwaddr(wl, addr);
865 part.mem.size = PAGE_SIZE;
867 ret = wlcore_set_partition(wl, &part);
869 wl1271_error("%s: set_partition start=0x%X size=%d",
870 __func__, part.mem.start, part.mem.size);
874 memset(block, 0, wl->fw_mem_block_size);
875 ret = wlcore_read_hwaddr(wl, addr, block,
876 wl->fw_mem_block_size, false);
882 * Memory blocks are linked to one another. The first 4 bytes
883 * of each memory block hold the hardware address of the next
884 * one. The last memory block points to the first one in
885 * on demand mode and is equal to 0x2000000 in continuous mode.
887 addr = le32_to_cpup((__le32 *)block);
889 if (!wl12xx_copy_fwlog(wl, block + offset,
890 wl->fw_mem_block_size - offset))
892 } while (addr && (addr != end_of_log));
894 wake_up_interruptible(&wl->fwlog_waitq);
898 wlcore_set_partition(wl, &old_part);
901 static void wlcore_print_recovery(struct wl1271 *wl)
907 wl1271_info("Hardware recovery in progress. FW ver: %s",
908 wl->chip.fw_ver_str);
910 /* change partitions momentarily so we can read the FW pc */
911 ret = wlcore_set_partition(wl, &wl->ptable[PART_BOOT]);
915 ret = wlcore_read_reg(wl, REG_PC_ON_RECOVERY, &pc);
919 ret = wlcore_read_reg(wl, REG_INTERRUPT_NO_CLEAR, &hint_sts);
923 wl1271_info("pc: 0x%x, hint_sts: 0x%08x count: %d",
924 pc, hint_sts, ++wl->recovery_count);
926 wlcore_set_partition(wl, &wl->ptable[PART_WORK]);
930 static void wl1271_recovery_work(struct work_struct *work)
933 container_of(work, struct wl1271, recovery_work);
934 struct wl12xx_vif *wlvif;
935 struct ieee80211_vif *vif;
937 mutex_lock(&wl->mutex);
939 if (wl->state == WLCORE_STATE_OFF || wl->plt)
942 if (!test_bit(WL1271_FLAG_INTENDED_FW_RECOVERY, &wl->flags)) {
943 if (wl->conf.fwlog.output == WL12XX_FWLOG_OUTPUT_HOST)
944 wl12xx_read_fwlog_panic(wl);
945 wlcore_print_recovery(wl);
948 BUG_ON(wl->conf.recovery.bug_on_recovery &&
949 !test_bit(WL1271_FLAG_INTENDED_FW_RECOVERY, &wl->flags));
951 if (wl->conf.recovery.no_recovery) {
952 wl1271_info("No recovery (chosen on module load). Fw will remain stuck.");
956 /* Prevent spurious TX during FW restart */
957 wlcore_stop_queues(wl, WLCORE_QUEUE_STOP_REASON_FW_RESTART);
959 /* reboot the chipset */
960 while (!list_empty(&wl->wlvif_list)) {
961 wlvif = list_first_entry(&wl->wlvif_list,
962 struct wl12xx_vif, list);
963 vif = wl12xx_wlvif_to_vif(wlvif);
964 __wl1271_op_remove_interface(wl, vif, false);
967 wlcore_op_stop_locked(wl);
969 ieee80211_restart_hw(wl->hw);
972 * Its safe to enable TX now - the queues are stopped after a request
975 wlcore_wake_queues(wl, WLCORE_QUEUE_STOP_REASON_FW_RESTART);
978 wl->watchdog_recovery = false;
979 clear_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags);
980 mutex_unlock(&wl->mutex);
983 static int wlcore_fw_wakeup(struct wl1271 *wl)
985 return wlcore_raw_write32(wl, HW_ACCESS_ELP_CTRL_REG, ELPCTRL_WAKE_UP);
988 static int wl1271_setup(struct wl1271 *wl)
990 wl->raw_fw_status = kzalloc(wl->fw_status_len, GFP_KERNEL);
991 if (!wl->raw_fw_status)
994 wl->fw_status = kzalloc(sizeof(*wl->fw_status), GFP_KERNEL);
998 wl->tx_res_if = kzalloc(sizeof(*wl->tx_res_if), GFP_KERNEL);
1004 kfree(wl->fw_status);
1005 kfree(wl->raw_fw_status);
1009 static int wl12xx_set_power_on(struct wl1271 *wl)
1013 msleep(WL1271_PRE_POWER_ON_SLEEP);
1014 ret = wl1271_power_on(wl);
1017 msleep(WL1271_POWER_ON_SLEEP);
1018 wl1271_io_reset(wl);
1021 ret = wlcore_set_partition(wl, &wl->ptable[PART_BOOT]);
1025 /* ELP module wake up */
1026 ret = wlcore_fw_wakeup(wl);
1034 wl1271_power_off(wl);
1038 static int wl12xx_chip_wakeup(struct wl1271 *wl, bool plt)
1042 ret = wl12xx_set_power_on(wl);
1047 * For wl127x based devices we could use the default block
1048 * size (512 bytes), but due to a bug in the sdio driver, we
1049 * need to set it explicitly after the chip is powered on. To
1050 * simplify the code and since the performance impact is
1051 * negligible, we use the same block size for all different
1054 * Check if the bus supports blocksize alignment and, if it
1055 * doesn't, make sure we don't have the quirk.
1057 if (!wl1271_set_block_size(wl))
1058 wl->quirks &= ~WLCORE_QUIRK_TX_BLOCKSIZE_ALIGN;
1060 /* TODO: make sure the lower driver has set things up correctly */
1062 ret = wl1271_setup(wl);
1066 ret = wl12xx_fetch_firmware(wl, plt);
1074 int wl1271_plt_start(struct wl1271 *wl, const enum plt_mode plt_mode)
1076 int retries = WL1271_BOOT_RETRIES;
1077 struct wiphy *wiphy = wl->hw->wiphy;
1079 static const char* const PLT_MODE[] = {
1088 mutex_lock(&wl->mutex);
1090 wl1271_notice("power up");
1092 if (wl->state != WLCORE_STATE_OFF) {
1093 wl1271_error("cannot go into PLT state because not "
1094 "in off state: %d", wl->state);
1099 /* Indicate to lower levels that we are now in PLT mode */
1101 wl->plt_mode = plt_mode;
1105 ret = wl12xx_chip_wakeup(wl, true);
1109 if (plt_mode != PLT_CHIP_AWAKE) {
1110 ret = wl->ops->plt_init(wl);
1115 wl->state = WLCORE_STATE_ON;
1116 wl1271_notice("firmware booted in PLT mode %s (%s)",
1118 wl->chip.fw_ver_str);
1120 /* update hw/fw version info in wiphy struct */
1121 wiphy->hw_version = wl->chip.id;
1122 strncpy(wiphy->fw_version, wl->chip.fw_ver_str,
1123 sizeof(wiphy->fw_version));
1128 wl1271_power_off(wl);
1132 wl->plt_mode = PLT_OFF;
1134 wl1271_error("firmware boot in PLT mode failed despite %d retries",
1135 WL1271_BOOT_RETRIES);
1137 mutex_unlock(&wl->mutex);
1142 int wl1271_plt_stop(struct wl1271 *wl)
1146 wl1271_notice("power down");
1149 * Interrupts must be disabled before setting the state to OFF.
1150 * Otherwise, the interrupt handler might be called and exit without
1151 * reading the interrupt status.
1153 wlcore_disable_interrupts(wl);
1154 mutex_lock(&wl->mutex);
1156 mutex_unlock(&wl->mutex);
1159 * This will not necessarily enable interrupts as interrupts
1160 * may have been disabled when op_stop was called. It will,
1161 * however, balance the above call to disable_interrupts().
1163 wlcore_enable_interrupts(wl);
1165 wl1271_error("cannot power down because not in PLT "
1166 "state: %d", wl->state);
1171 mutex_unlock(&wl->mutex);
1173 wl1271_flush_deferred_work(wl);
1174 cancel_work_sync(&wl->netstack_work);
1175 cancel_work_sync(&wl->recovery_work);
1176 cancel_delayed_work_sync(&wl->elp_work);
1177 cancel_delayed_work_sync(&wl->tx_watchdog_work);
1179 mutex_lock(&wl->mutex);
1180 wl1271_power_off(wl);
1182 wl->sleep_auth = WL1271_PSM_ILLEGAL;
1183 wl->state = WLCORE_STATE_OFF;
1185 wl->plt_mode = PLT_OFF;
1187 mutex_unlock(&wl->mutex);
1193 static void wl1271_op_tx(struct ieee80211_hw *hw,
1194 struct ieee80211_tx_control *control,
1195 struct sk_buff *skb)
1197 struct wl1271 *wl = hw->priv;
1198 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
1199 struct ieee80211_vif *vif = info->control.vif;
1200 struct wl12xx_vif *wlvif = NULL;
1201 unsigned long flags;
1206 wl1271_debug(DEBUG_TX, "DROP skb with no vif");
1207 ieee80211_free_txskb(hw, skb);
1211 wlvif = wl12xx_vif_to_data(vif);
1212 mapping = skb_get_queue_mapping(skb);
1213 q = wl1271_tx_get_queue(mapping);
1215 hlid = wl12xx_tx_get_hlid(wl, wlvif, skb, control->sta);
1217 spin_lock_irqsave(&wl->wl_lock, flags);
1220 * drop the packet if the link is invalid or the queue is stopped
1221 * for any reason but watermark. Watermark is a "soft"-stop so we
1222 * allow these packets through.
1224 if (hlid == WL12XX_INVALID_LINK_ID ||
1225 (!test_bit(hlid, wlvif->links_map)) ||
1226 (wlcore_is_queue_stopped_locked(wl, wlvif, q) &&
1227 !wlcore_is_queue_stopped_by_reason_locked(wl, wlvif, q,
1228 WLCORE_QUEUE_STOP_REASON_WATERMARK))) {
1229 wl1271_debug(DEBUG_TX, "DROP skb hlid %d q %d", hlid, q);
1230 ieee80211_free_txskb(hw, skb);
1234 wl1271_debug(DEBUG_TX, "queue skb hlid %d q %d len %d",
1236 skb_queue_tail(&wl->links[hlid].tx_queue[q], skb);
1238 wl->tx_queue_count[q]++;
1239 wlvif->tx_queue_count[q]++;
1242 * The workqueue is slow to process the tx_queue and we need stop
1243 * the queue here, otherwise the queue will get too long.
1245 if (wlvif->tx_queue_count[q] >= WL1271_TX_QUEUE_HIGH_WATERMARK &&
1246 !wlcore_is_queue_stopped_by_reason_locked(wl, wlvif, q,
1247 WLCORE_QUEUE_STOP_REASON_WATERMARK)) {
1248 wl1271_debug(DEBUG_TX, "op_tx: stopping queues for q %d", q);
1249 wlcore_stop_queue_locked(wl, wlvif, q,
1250 WLCORE_QUEUE_STOP_REASON_WATERMARK);
1254 * The chip specific setup must run before the first TX packet -
1255 * before that, the tx_work will not be initialized!
1258 if (!test_bit(WL1271_FLAG_FW_TX_BUSY, &wl->flags) &&
1259 !test_bit(WL1271_FLAG_TX_PENDING, &wl->flags))
1260 ieee80211_queue_work(wl->hw, &wl->tx_work);
1263 spin_unlock_irqrestore(&wl->wl_lock, flags);
1266 int wl1271_tx_dummy_packet(struct wl1271 *wl)
1268 unsigned long flags;
1271 /* no need to queue a new dummy packet if one is already pending */
1272 if (test_bit(WL1271_FLAG_DUMMY_PACKET_PENDING, &wl->flags))
1275 q = wl1271_tx_get_queue(skb_get_queue_mapping(wl->dummy_packet));
1277 spin_lock_irqsave(&wl->wl_lock, flags);
1278 set_bit(WL1271_FLAG_DUMMY_PACKET_PENDING, &wl->flags);
1279 wl->tx_queue_count[q]++;
1280 spin_unlock_irqrestore(&wl->wl_lock, flags);
1282 /* The FW is low on RX memory blocks, so send the dummy packet asap */
1283 if (!test_bit(WL1271_FLAG_FW_TX_BUSY, &wl->flags))
1284 return wlcore_tx_work_locked(wl);
1287 * If the FW TX is busy, TX work will be scheduled by the threaded
1288 * interrupt handler function
1294 * The size of the dummy packet should be at least 1400 bytes. However, in
1295 * order to minimize the number of bus transactions, aligning it to 512 bytes
1296 * boundaries could be beneficial, performance wise
1298 #define TOTAL_TX_DUMMY_PACKET_SIZE (ALIGN(1400, 512))
1300 static struct sk_buff *wl12xx_alloc_dummy_packet(struct wl1271 *wl)
1302 struct sk_buff *skb;
1303 struct ieee80211_hdr_3addr *hdr;
1304 unsigned int dummy_packet_size;
1306 dummy_packet_size = TOTAL_TX_DUMMY_PACKET_SIZE -
1307 sizeof(struct wl1271_tx_hw_descr) - sizeof(*hdr);
1309 skb = dev_alloc_skb(TOTAL_TX_DUMMY_PACKET_SIZE);
1311 wl1271_warning("Failed to allocate a dummy packet skb");
1315 skb_reserve(skb, sizeof(struct wl1271_tx_hw_descr));
1317 hdr = (struct ieee80211_hdr_3addr *) skb_put(skb, sizeof(*hdr));
1318 memset(hdr, 0, sizeof(*hdr));
1319 hdr->frame_control = cpu_to_le16(IEEE80211_FTYPE_DATA |
1320 IEEE80211_STYPE_NULLFUNC |
1321 IEEE80211_FCTL_TODS);
1323 memset(skb_put(skb, dummy_packet_size), 0, dummy_packet_size);
1325 /* Dummy packets require the TID to be management */
1326 skb->priority = WL1271_TID_MGMT;
1328 /* Initialize all fields that might be used */
1329 skb_set_queue_mapping(skb, 0);
1330 memset(IEEE80211_SKB_CB(skb), 0, sizeof(struct ieee80211_tx_info));
1338 wl1271_validate_wowlan_pattern(struct cfg80211_pkt_pattern *p)
1340 int num_fields = 0, in_field = 0, fields_size = 0;
1341 int i, pattern_len = 0;
1344 wl1271_warning("No mask in WoWLAN pattern");
1349 * The pattern is broken up into segments of bytes at different offsets
1350 * that need to be checked by the FW filter. Each segment is called
1351 * a field in the FW API. We verify that the total number of fields
1352 * required for this pattern won't exceed FW limits (8)
1353 * as well as the total fields buffer won't exceed the FW limit.
1354 * Note that if there's a pattern which crosses Ethernet/IP header
1355 * boundary a new field is required.
1357 for (i = 0; i < p->pattern_len; i++) {
1358 if (test_bit(i, (unsigned long *)p->mask)) {
1363 if (i == WL1271_RX_FILTER_ETH_HEADER_SIZE) {
1365 fields_size += pattern_len +
1366 RX_FILTER_FIELD_OVERHEAD;
1374 fields_size += pattern_len +
1375 RX_FILTER_FIELD_OVERHEAD;
1382 fields_size += pattern_len + RX_FILTER_FIELD_OVERHEAD;
1386 if (num_fields > WL1271_RX_FILTER_MAX_FIELDS) {
1387 wl1271_warning("RX Filter too complex. Too many segments");
1391 if (fields_size > WL1271_RX_FILTER_MAX_FIELDS_SIZE) {
1392 wl1271_warning("RX filter pattern is too big");
1399 struct wl12xx_rx_filter *wl1271_rx_filter_alloc(void)
1401 return kzalloc(sizeof(struct wl12xx_rx_filter), GFP_KERNEL);
1404 void wl1271_rx_filter_free(struct wl12xx_rx_filter *filter)
1411 for (i = 0; i < filter->num_fields; i++)
1412 kfree(filter->fields[i].pattern);
1417 int wl1271_rx_filter_alloc_field(struct wl12xx_rx_filter *filter,
1418 u16 offset, u8 flags,
1419 u8 *pattern, u8 len)
1421 struct wl12xx_rx_filter_field *field;
1423 if (filter->num_fields == WL1271_RX_FILTER_MAX_FIELDS) {
1424 wl1271_warning("Max fields per RX filter. can't alloc another");
1428 field = &filter->fields[filter->num_fields];
1430 field->pattern = kzalloc(len, GFP_KERNEL);
1431 if (!field->pattern) {
1432 wl1271_warning("Failed to allocate RX filter pattern");
1436 filter->num_fields++;
1438 field->offset = cpu_to_le16(offset);
1439 field->flags = flags;
1441 memcpy(field->pattern, pattern, len);
1446 int wl1271_rx_filter_get_fields_size(struct wl12xx_rx_filter *filter)
1448 int i, fields_size = 0;
1450 for (i = 0; i < filter->num_fields; i++)
1451 fields_size += filter->fields[i].len +
1452 sizeof(struct wl12xx_rx_filter_field) -
1458 void wl1271_rx_filter_flatten_fields(struct wl12xx_rx_filter *filter,
1462 struct wl12xx_rx_filter_field *field;
1464 for (i = 0; i < filter->num_fields; i++) {
1465 field = (struct wl12xx_rx_filter_field *)buf;
1467 field->offset = filter->fields[i].offset;
1468 field->flags = filter->fields[i].flags;
1469 field->len = filter->fields[i].len;
1471 memcpy(&field->pattern, filter->fields[i].pattern, field->len);
1472 buf += sizeof(struct wl12xx_rx_filter_field) -
1473 sizeof(u8 *) + field->len;
1478 * Allocates an RX filter returned through f
1479 * which needs to be freed using rx_filter_free()
1482 wl1271_convert_wowlan_pattern_to_rx_filter(struct cfg80211_pkt_pattern *p,
1483 struct wl12xx_rx_filter **f)
1486 struct wl12xx_rx_filter *filter;
1490 filter = wl1271_rx_filter_alloc();
1492 wl1271_warning("Failed to alloc rx filter");
1498 while (i < p->pattern_len) {
1499 if (!test_bit(i, (unsigned long *)p->mask)) {
1504 for (j = i; j < p->pattern_len; j++) {
1505 if (!test_bit(j, (unsigned long *)p->mask))
1508 if (i < WL1271_RX_FILTER_ETH_HEADER_SIZE &&
1509 j >= WL1271_RX_FILTER_ETH_HEADER_SIZE)
1513 if (i < WL1271_RX_FILTER_ETH_HEADER_SIZE) {
1515 flags = WL1271_RX_FILTER_FLAG_ETHERNET_HEADER;
1517 offset = i - WL1271_RX_FILTER_ETH_HEADER_SIZE;
1518 flags = WL1271_RX_FILTER_FLAG_IP_HEADER;
1523 ret = wl1271_rx_filter_alloc_field(filter,
1526 &p->pattern[i], len);
1533 filter->action = FILTER_SIGNAL;
1539 wl1271_rx_filter_free(filter);
1545 static int wl1271_configure_wowlan(struct wl1271 *wl,
1546 struct cfg80211_wowlan *wow)
1550 if (!wow || wow->any || !wow->n_patterns) {
1551 ret = wl1271_acx_default_rx_filter_enable(wl, 0,
1556 ret = wl1271_rx_filter_clear_all(wl);
1563 if (WARN_ON(wow->n_patterns > WL1271_MAX_RX_FILTERS))
1566 /* Validate all incoming patterns before clearing current FW state */
1567 for (i = 0; i < wow->n_patterns; i++) {
1568 ret = wl1271_validate_wowlan_pattern(&wow->patterns[i]);
1570 wl1271_warning("Bad wowlan pattern %d", i);
1575 ret = wl1271_acx_default_rx_filter_enable(wl, 0, FILTER_SIGNAL);
1579 ret = wl1271_rx_filter_clear_all(wl);
1583 /* Translate WoWLAN patterns into filters */
1584 for (i = 0; i < wow->n_patterns; i++) {
1585 struct cfg80211_pkt_pattern *p;
1586 struct wl12xx_rx_filter *filter = NULL;
1588 p = &wow->patterns[i];
1590 ret = wl1271_convert_wowlan_pattern_to_rx_filter(p, &filter);
1592 wl1271_warning("Failed to create an RX filter from "
1593 "wowlan pattern %d", i);
1597 ret = wl1271_rx_filter_enable(wl, i, 1, filter);
1599 wl1271_rx_filter_free(filter);
1604 ret = wl1271_acx_default_rx_filter_enable(wl, 1, FILTER_DROP);
1610 static int wl1271_configure_suspend_sta(struct wl1271 *wl,
1611 struct wl12xx_vif *wlvif,
1612 struct cfg80211_wowlan *wow)
1616 if (!test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags))
1619 ret = wl1271_ps_elp_wakeup(wl);
1623 ret = wl1271_configure_wowlan(wl, wow);
1627 if ((wl->conf.conn.suspend_wake_up_event ==
1628 wl->conf.conn.wake_up_event) &&
1629 (wl->conf.conn.suspend_listen_interval ==
1630 wl->conf.conn.listen_interval))
1633 ret = wl1271_acx_wake_up_conditions(wl, wlvif,
1634 wl->conf.conn.suspend_wake_up_event,
1635 wl->conf.conn.suspend_listen_interval);
1638 wl1271_error("suspend: set wake up conditions failed: %d", ret);
1641 wl1271_ps_elp_sleep(wl);
1647 static int wl1271_configure_suspend_ap(struct wl1271 *wl,
1648 struct wl12xx_vif *wlvif)
1652 if (!test_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags))
1655 ret = wl1271_ps_elp_wakeup(wl);
1659 ret = wl1271_acx_beacon_filter_opt(wl, wlvif, true);
1661 wl1271_ps_elp_sleep(wl);
1667 static int wl1271_configure_suspend(struct wl1271 *wl,
1668 struct wl12xx_vif *wlvif,
1669 struct cfg80211_wowlan *wow)
1671 if (wlvif->bss_type == BSS_TYPE_STA_BSS)
1672 return wl1271_configure_suspend_sta(wl, wlvif, wow);
1673 if (wlvif->bss_type == BSS_TYPE_AP_BSS)
1674 return wl1271_configure_suspend_ap(wl, wlvif);
1678 static void wl1271_configure_resume(struct wl1271 *wl, struct wl12xx_vif *wlvif)
1681 bool is_ap = wlvif->bss_type == BSS_TYPE_AP_BSS;
1682 bool is_sta = wlvif->bss_type == BSS_TYPE_STA_BSS;
1684 if ((!is_ap) && (!is_sta))
1687 if (is_sta && !test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags))
1690 ret = wl1271_ps_elp_wakeup(wl);
1695 wl1271_configure_wowlan(wl, NULL);
1697 if ((wl->conf.conn.suspend_wake_up_event ==
1698 wl->conf.conn.wake_up_event) &&
1699 (wl->conf.conn.suspend_listen_interval ==
1700 wl->conf.conn.listen_interval))
1703 ret = wl1271_acx_wake_up_conditions(wl, wlvif,
1704 wl->conf.conn.wake_up_event,
1705 wl->conf.conn.listen_interval);
1708 wl1271_error("resume: wake up conditions failed: %d",
1712 ret = wl1271_acx_beacon_filter_opt(wl, wlvif, false);
1716 wl1271_ps_elp_sleep(wl);
1719 static int wl1271_op_suspend(struct ieee80211_hw *hw,
1720 struct cfg80211_wowlan *wow)
1722 struct wl1271 *wl = hw->priv;
1723 struct wl12xx_vif *wlvif;
1726 wl1271_debug(DEBUG_MAC80211, "mac80211 suspend wow=%d", !!wow);
1729 /* we want to perform the recovery before suspending */
1730 if (test_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags)) {
1731 wl1271_warning("postponing suspend to perform recovery");
1735 wl1271_tx_flush(wl);
1737 mutex_lock(&wl->mutex);
1738 wl->wow_enabled = true;
1739 wl12xx_for_each_wlvif(wl, wlvif) {
1740 ret = wl1271_configure_suspend(wl, wlvif, wow);
1742 mutex_unlock(&wl->mutex);
1743 wl1271_warning("couldn't prepare device to suspend");
1747 mutex_unlock(&wl->mutex);
1748 /* flush any remaining work */
1749 wl1271_debug(DEBUG_MAC80211, "flushing remaining works");
1752 * disable and re-enable interrupts in order to flush
1755 wlcore_disable_interrupts(wl);
1758 * set suspended flag to avoid triggering a new threaded_irq
1759 * work. no need for spinlock as interrupts are disabled.
1761 set_bit(WL1271_FLAG_SUSPENDED, &wl->flags);
1763 wlcore_enable_interrupts(wl);
1764 flush_work(&wl->tx_work);
1765 flush_delayed_work(&wl->elp_work);
1768 * Cancel the watchdog even if above tx_flush failed. We will detect
1769 * it on resume anyway.
1771 cancel_delayed_work(&wl->tx_watchdog_work);
1776 static int wl1271_op_resume(struct ieee80211_hw *hw)
1778 struct wl1271 *wl = hw->priv;
1779 struct wl12xx_vif *wlvif;
1780 unsigned long flags;
1781 bool run_irq_work = false, pending_recovery;
1784 wl1271_debug(DEBUG_MAC80211, "mac80211 resume wow=%d",
1786 WARN_ON(!wl->wow_enabled);
1789 * re-enable irq_work enqueuing, and call irq_work directly if
1790 * there is a pending work.
1792 spin_lock_irqsave(&wl->wl_lock, flags);
1793 clear_bit(WL1271_FLAG_SUSPENDED, &wl->flags);
1794 if (test_and_clear_bit(WL1271_FLAG_PENDING_WORK, &wl->flags))
1795 run_irq_work = true;
1796 spin_unlock_irqrestore(&wl->wl_lock, flags);
1798 mutex_lock(&wl->mutex);
1800 /* test the recovery flag before calling any SDIO functions */
1801 pending_recovery = test_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS,
1805 wl1271_debug(DEBUG_MAC80211,
1806 "run postponed irq_work directly");
1808 /* don't talk to the HW if recovery is pending */
1809 if (!pending_recovery) {
1810 ret = wlcore_irq_locked(wl);
1812 wl12xx_queue_recovery_work(wl);
1815 wlcore_enable_interrupts(wl);
1818 if (pending_recovery) {
1819 wl1271_warning("queuing forgotten recovery on resume");
1820 ieee80211_queue_work(wl->hw, &wl->recovery_work);
1824 wl12xx_for_each_wlvif(wl, wlvif) {
1825 wl1271_configure_resume(wl, wlvif);
1829 wl->wow_enabled = false;
1832 * Set a flag to re-init the watchdog on the first Tx after resume.
1833 * That way we avoid possible conditions where Tx-complete interrupts
1834 * fail to arrive and we perform a spurious recovery.
1836 set_bit(WL1271_FLAG_REINIT_TX_WDOG, &wl->flags);
1837 mutex_unlock(&wl->mutex);
1843 static int wl1271_op_start(struct ieee80211_hw *hw)
1845 wl1271_debug(DEBUG_MAC80211, "mac80211 start");
1848 * We have to delay the booting of the hardware because
1849 * we need to know the local MAC address before downloading and
1850 * initializing the firmware. The MAC address cannot be changed
1851 * after boot, and without the proper MAC address, the firmware
1852 * will not function properly.
1854 * The MAC address is first known when the corresponding interface
1855 * is added. That is where we will initialize the hardware.
1861 static void wlcore_op_stop_locked(struct wl1271 *wl)
1865 if (wl->state == WLCORE_STATE_OFF) {
1866 if (test_and_clear_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS,
1868 wlcore_enable_interrupts(wl);
1874 * this must be before the cancel_work calls below, so that the work
1875 * functions don't perform further work.
1877 wl->state = WLCORE_STATE_OFF;
1880 * Use the nosync variant to disable interrupts, so the mutex could be
1881 * held while doing so without deadlocking.
1883 wlcore_disable_interrupts_nosync(wl);
1885 mutex_unlock(&wl->mutex);
1887 wlcore_synchronize_interrupts(wl);
1888 if (!test_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags))
1889 cancel_work_sync(&wl->recovery_work);
1890 wl1271_flush_deferred_work(wl);
1891 cancel_delayed_work_sync(&wl->scan_complete_work);
1892 cancel_work_sync(&wl->netstack_work);
1893 cancel_work_sync(&wl->tx_work);
1894 cancel_delayed_work_sync(&wl->elp_work);
1895 cancel_delayed_work_sync(&wl->tx_watchdog_work);
1897 /* let's notify MAC80211 about the remaining pending TX frames */
1898 mutex_lock(&wl->mutex);
1899 wl12xx_tx_reset(wl);
1901 wl1271_power_off(wl);
1903 * In case a recovery was scheduled, interrupts were disabled to avoid
1904 * an interrupt storm. Now that the power is down, it is safe to
1905 * re-enable interrupts to balance the disable depth
1907 if (test_and_clear_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags))
1908 wlcore_enable_interrupts(wl);
1910 wl->band = IEEE80211_BAND_2GHZ;
1913 wl->power_level = WL1271_DEFAULT_POWER_LEVEL;
1914 wl->channel_type = NL80211_CHAN_NO_HT;
1915 wl->tx_blocks_available = 0;
1916 wl->tx_allocated_blocks = 0;
1917 wl->tx_results_count = 0;
1918 wl->tx_packets_count = 0;
1919 wl->time_offset = 0;
1920 wl->ap_fw_ps_map = 0;
1922 wl->sleep_auth = WL1271_PSM_ILLEGAL;
1923 memset(wl->roles_map, 0, sizeof(wl->roles_map));
1924 memset(wl->links_map, 0, sizeof(wl->links_map));
1925 memset(wl->roc_map, 0, sizeof(wl->roc_map));
1926 memset(wl->session_ids, 0, sizeof(wl->session_ids));
1927 memset(wl->rx_filter_enabled, 0, sizeof(wl->rx_filter_enabled));
1928 wl->active_sta_count = 0;
1929 wl->active_link_count = 0;
1931 /* The system link is always allocated */
1932 wl->links[WL12XX_SYSTEM_HLID].allocated_pkts = 0;
1933 wl->links[WL12XX_SYSTEM_HLID].prev_freed_pkts = 0;
1934 __set_bit(WL12XX_SYSTEM_HLID, wl->links_map);
1937 * this is performed after the cancel_work calls and the associated
1938 * mutex_lock, so that wl1271_op_add_interface does not accidentally
1939 * get executed before all these vars have been reset.
1943 wl->tx_blocks_freed = 0;
1945 for (i = 0; i < NUM_TX_QUEUES; i++) {
1946 wl->tx_pkts_freed[i] = 0;
1947 wl->tx_allocated_pkts[i] = 0;
1950 wl1271_debugfs_reset(wl);
1952 kfree(wl->raw_fw_status);
1953 wl->raw_fw_status = NULL;
1954 kfree(wl->fw_status);
1955 wl->fw_status = NULL;
1956 kfree(wl->tx_res_if);
1957 wl->tx_res_if = NULL;
1958 kfree(wl->target_mem_map);
1959 wl->target_mem_map = NULL;
1962 * FW channels must be re-calibrated after recovery,
1963 * save current Reg-Domain channel configuration and clear it.
1965 memcpy(wl->reg_ch_conf_pending, wl->reg_ch_conf_last,
1966 sizeof(wl->reg_ch_conf_pending));
1967 memset(wl->reg_ch_conf_last, 0, sizeof(wl->reg_ch_conf_last));
1970 static void wlcore_op_stop(struct ieee80211_hw *hw)
1972 struct wl1271 *wl = hw->priv;
1974 wl1271_debug(DEBUG_MAC80211, "mac80211 stop");
1976 mutex_lock(&wl->mutex);
1978 wlcore_op_stop_locked(wl);
1980 mutex_unlock(&wl->mutex);
1983 static void wlcore_channel_switch_work(struct work_struct *work)
1985 struct delayed_work *dwork;
1987 struct ieee80211_vif *vif;
1988 struct wl12xx_vif *wlvif;
1991 dwork = container_of(work, struct delayed_work, work);
1992 wlvif = container_of(dwork, struct wl12xx_vif, channel_switch_work);
1995 wl1271_info("channel switch failed (role_id: %d).", wlvif->role_id);
1997 mutex_lock(&wl->mutex);
1999 if (unlikely(wl->state != WLCORE_STATE_ON))
2002 /* check the channel switch is still ongoing */
2003 if (!test_and_clear_bit(WLVIF_FLAG_CS_PROGRESS, &wlvif->flags))
2006 vif = wl12xx_wlvif_to_vif(wlvif);
2007 ieee80211_chswitch_done(vif, false);
2009 ret = wl1271_ps_elp_wakeup(wl);
2013 wl12xx_cmd_stop_channel_switch(wl, wlvif);
2015 wl1271_ps_elp_sleep(wl);
2017 mutex_unlock(&wl->mutex);
2020 static void wlcore_connection_loss_work(struct work_struct *work)
2022 struct delayed_work *dwork;
2024 struct ieee80211_vif *vif;
2025 struct wl12xx_vif *wlvif;
2027 dwork = container_of(work, struct delayed_work, work);
2028 wlvif = container_of(dwork, struct wl12xx_vif, connection_loss_work);
2031 wl1271_info("Connection loss work (role_id: %d).", wlvif->role_id);
2033 mutex_lock(&wl->mutex);
2035 if (unlikely(wl->state != WLCORE_STATE_ON))
2038 /* Call mac80211 connection loss */
2039 if (!test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags))
2042 vif = wl12xx_wlvif_to_vif(wlvif);
2043 ieee80211_connection_loss(vif);
2045 mutex_unlock(&wl->mutex);
2048 static void wlcore_pending_auth_complete_work(struct work_struct *work)
2050 struct delayed_work *dwork;
2052 struct wl12xx_vif *wlvif;
2053 unsigned long time_spare;
2056 dwork = container_of(work, struct delayed_work, work);
2057 wlvif = container_of(dwork, struct wl12xx_vif,
2058 pending_auth_complete_work);
2061 mutex_lock(&wl->mutex);
2063 if (unlikely(wl->state != WLCORE_STATE_ON))
2067 * Make sure a second really passed since the last auth reply. Maybe
2068 * a second auth reply arrived while we were stuck on the mutex.
2069 * Check for a little less than the timeout to protect from scheduler
2072 time_spare = jiffies +
2073 msecs_to_jiffies(WLCORE_PEND_AUTH_ROC_TIMEOUT - 50);
2074 if (!time_after(time_spare, wlvif->pending_auth_reply_time))
2077 ret = wl1271_ps_elp_wakeup(wl);
2081 /* cancel the ROC if active */
2082 wlcore_update_inconn_sta(wl, wlvif, NULL, false);
2084 wl1271_ps_elp_sleep(wl);
2086 mutex_unlock(&wl->mutex);
2089 static int wl12xx_allocate_rate_policy(struct wl1271 *wl, u8 *idx)
2091 u8 policy = find_first_zero_bit(wl->rate_policies_map,
2092 WL12XX_MAX_RATE_POLICIES);
2093 if (policy >= WL12XX_MAX_RATE_POLICIES)
2096 __set_bit(policy, wl->rate_policies_map);
2101 static void wl12xx_free_rate_policy(struct wl1271 *wl, u8 *idx)
2103 if (WARN_ON(*idx >= WL12XX_MAX_RATE_POLICIES))
2106 __clear_bit(*idx, wl->rate_policies_map);
2107 *idx = WL12XX_MAX_RATE_POLICIES;
2110 static int wlcore_allocate_klv_template(struct wl1271 *wl, u8 *idx)
2112 u8 policy = find_first_zero_bit(wl->klv_templates_map,
2113 WLCORE_MAX_KLV_TEMPLATES);
2114 if (policy >= WLCORE_MAX_KLV_TEMPLATES)
2117 __set_bit(policy, wl->klv_templates_map);
2122 static void wlcore_free_klv_template(struct wl1271 *wl, u8 *idx)
2124 if (WARN_ON(*idx >= WLCORE_MAX_KLV_TEMPLATES))
2127 __clear_bit(*idx, wl->klv_templates_map);
2128 *idx = WLCORE_MAX_KLV_TEMPLATES;
2131 static u8 wl12xx_get_role_type(struct wl1271 *wl, struct wl12xx_vif *wlvif)
2133 switch (wlvif->bss_type) {
2134 case BSS_TYPE_AP_BSS:
2136 return WL1271_ROLE_P2P_GO;
2138 return WL1271_ROLE_AP;
2140 case BSS_TYPE_STA_BSS:
2142 return WL1271_ROLE_P2P_CL;
2144 return WL1271_ROLE_STA;
2147 return WL1271_ROLE_IBSS;
2150 wl1271_error("invalid bss_type: %d", wlvif->bss_type);
2152 return WL12XX_INVALID_ROLE_TYPE;
2155 static int wl12xx_init_vif_data(struct wl1271 *wl, struct ieee80211_vif *vif)
2157 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
2160 /* clear everything but the persistent data */
2161 memset(wlvif, 0, offsetof(struct wl12xx_vif, persistent));
2163 switch (ieee80211_vif_type_p2p(vif)) {
2164 case NL80211_IFTYPE_P2P_CLIENT:
2167 case NL80211_IFTYPE_STATION:
2168 wlvif->bss_type = BSS_TYPE_STA_BSS;
2170 case NL80211_IFTYPE_ADHOC:
2171 wlvif->bss_type = BSS_TYPE_IBSS;
2173 case NL80211_IFTYPE_P2P_GO:
2176 case NL80211_IFTYPE_AP:
2177 wlvif->bss_type = BSS_TYPE_AP_BSS;
2180 wlvif->bss_type = MAX_BSS_TYPE;
2184 wlvif->role_id = WL12XX_INVALID_ROLE_ID;
2185 wlvif->dev_role_id = WL12XX_INVALID_ROLE_ID;
2186 wlvif->dev_hlid = WL12XX_INVALID_LINK_ID;
2188 if (wlvif->bss_type == BSS_TYPE_STA_BSS ||
2189 wlvif->bss_type == BSS_TYPE_IBSS) {
2190 /* init sta/ibss data */
2191 wlvif->sta.hlid = WL12XX_INVALID_LINK_ID;
2192 wl12xx_allocate_rate_policy(wl, &wlvif->sta.basic_rate_idx);
2193 wl12xx_allocate_rate_policy(wl, &wlvif->sta.ap_rate_idx);
2194 wl12xx_allocate_rate_policy(wl, &wlvif->sta.p2p_rate_idx);
2195 wlcore_allocate_klv_template(wl, &wlvif->sta.klv_template_id);
2196 wlvif->basic_rate_set = CONF_TX_RATE_MASK_BASIC;
2197 wlvif->basic_rate = CONF_TX_RATE_MASK_BASIC;
2198 wlvif->rate_set = CONF_TX_RATE_MASK_BASIC;
2201 wlvif->ap.bcast_hlid = WL12XX_INVALID_LINK_ID;
2202 wlvif->ap.global_hlid = WL12XX_INVALID_LINK_ID;
2203 wl12xx_allocate_rate_policy(wl, &wlvif->ap.mgmt_rate_idx);
2204 wl12xx_allocate_rate_policy(wl, &wlvif->ap.bcast_rate_idx);
2205 for (i = 0; i < CONF_TX_MAX_AC_COUNT; i++)
2206 wl12xx_allocate_rate_policy(wl,
2207 &wlvif->ap.ucast_rate_idx[i]);
2208 wlvif->basic_rate_set = CONF_TX_ENABLED_RATES;
2210 * TODO: check if basic_rate shouldn't be
2211 * wl1271_tx_min_rate_get(wl, wlvif->basic_rate_set);
2212 * instead (the same thing for STA above).
2214 wlvif->basic_rate = CONF_TX_ENABLED_RATES;
2215 /* TODO: this seems to be used only for STA, check it */
2216 wlvif->rate_set = CONF_TX_ENABLED_RATES;
2219 wlvif->bitrate_masks[IEEE80211_BAND_2GHZ] = wl->conf.tx.basic_rate;
2220 wlvif->bitrate_masks[IEEE80211_BAND_5GHZ] = wl->conf.tx.basic_rate_5;
2221 wlvif->beacon_int = WL1271_DEFAULT_BEACON_INT;
2224 * mac80211 configures some values globally, while we treat them
2225 * per-interface. thus, on init, we have to copy them from wl
2227 wlvif->band = wl->band;
2228 wlvif->channel = wl->channel;
2229 wlvif->power_level = wl->power_level;
2230 wlvif->channel_type = wl->channel_type;
2232 INIT_WORK(&wlvif->rx_streaming_enable_work,
2233 wl1271_rx_streaming_enable_work);
2234 INIT_WORK(&wlvif->rx_streaming_disable_work,
2235 wl1271_rx_streaming_disable_work);
2236 INIT_DELAYED_WORK(&wlvif->channel_switch_work,
2237 wlcore_channel_switch_work);
2238 INIT_DELAYED_WORK(&wlvif->connection_loss_work,
2239 wlcore_connection_loss_work);
2240 INIT_DELAYED_WORK(&wlvif->pending_auth_complete_work,
2241 wlcore_pending_auth_complete_work);
2242 INIT_LIST_HEAD(&wlvif->list);
2244 setup_timer(&wlvif->rx_streaming_timer, wl1271_rx_streaming_timer,
2245 (unsigned long) wlvif);
2249 static int wl12xx_init_fw(struct wl1271 *wl)
2251 int retries = WL1271_BOOT_RETRIES;
2252 bool booted = false;
2253 struct wiphy *wiphy = wl->hw->wiphy;
2258 ret = wl12xx_chip_wakeup(wl, false);
2262 ret = wl->ops->boot(wl);
2266 ret = wl1271_hw_init(wl);
2274 mutex_unlock(&wl->mutex);
2275 /* Unlocking the mutex in the middle of handling is
2276 inherently unsafe. In this case we deem it safe to do,
2277 because we need to let any possibly pending IRQ out of
2278 the system (and while we are WLCORE_STATE_OFF the IRQ
2279 work function will not do anything.) Also, any other
2280 possible concurrent operations will fail due to the
2281 current state, hence the wl1271 struct should be safe. */
2282 wlcore_disable_interrupts(wl);
2283 wl1271_flush_deferred_work(wl);
2284 cancel_work_sync(&wl->netstack_work);
2285 mutex_lock(&wl->mutex);
2287 wl1271_power_off(wl);
2291 wl1271_error("firmware boot failed despite %d retries",
2292 WL1271_BOOT_RETRIES);
2296 wl1271_info("firmware booted (%s)", wl->chip.fw_ver_str);
2298 /* update hw/fw version info in wiphy struct */
2299 wiphy->hw_version = wl->chip.id;
2300 strncpy(wiphy->fw_version, wl->chip.fw_ver_str,
2301 sizeof(wiphy->fw_version));
2304 * Now we know if 11a is supported (info from the NVS), so disable
2305 * 11a channels if not supported
2307 if (!wl->enable_11a)
2308 wiphy->bands[IEEE80211_BAND_5GHZ]->n_channels = 0;
2310 wl1271_debug(DEBUG_MAC80211, "11a is %ssupported",
2311 wl->enable_11a ? "" : "not ");
2313 wl->state = WLCORE_STATE_ON;
2318 static bool wl12xx_dev_role_started(struct wl12xx_vif *wlvif)
2320 return wlvif->dev_hlid != WL12XX_INVALID_LINK_ID;
2324 * Check whether a fw switch (i.e. moving from one loaded
2325 * fw to another) is needed. This function is also responsible
2326 * for updating wl->last_vif_count, so it must be called before
2327 * loading a non-plt fw (so the correct fw (single-role/multi-role)
2330 static bool wl12xx_need_fw_change(struct wl1271 *wl,
2331 struct vif_counter_data vif_counter_data,
2334 enum wl12xx_fw_type current_fw = wl->fw_type;
2335 u8 vif_count = vif_counter_data.counter;
2337 if (test_bit(WL1271_FLAG_VIF_CHANGE_IN_PROGRESS, &wl->flags))
2340 /* increase the vif count if this is a new vif */
2341 if (add && !vif_counter_data.cur_vif_running)
2344 wl->last_vif_count = vif_count;
2346 /* no need for fw change if the device is OFF */
2347 if (wl->state == WLCORE_STATE_OFF)
2350 /* no need for fw change if a single fw is used */
2351 if (!wl->mr_fw_name)
2354 if (vif_count > 1 && current_fw == WL12XX_FW_TYPE_NORMAL)
2356 if (vif_count <= 1 && current_fw == WL12XX_FW_TYPE_MULTI)
2363 * Enter "forced psm". Make sure the sta is in psm against the ap,
2364 * to make the fw switch a bit more disconnection-persistent.
2366 static void wl12xx_force_active_psm(struct wl1271 *wl)
2368 struct wl12xx_vif *wlvif;
2370 wl12xx_for_each_wlvif_sta(wl, wlvif) {
2371 wl1271_ps_set_mode(wl, wlvif, STATION_POWER_SAVE_MODE);
2375 struct wlcore_hw_queue_iter_data {
2376 unsigned long hw_queue_map[BITS_TO_LONGS(WLCORE_NUM_MAC_ADDRESSES)];
2378 struct ieee80211_vif *vif;
2379 /* is the current vif among those iterated */
2383 static void wlcore_hw_queue_iter(void *data, u8 *mac,
2384 struct ieee80211_vif *vif)
2386 struct wlcore_hw_queue_iter_data *iter_data = data;
2388 if (WARN_ON_ONCE(vif->hw_queue[0] == IEEE80211_INVAL_HW_QUEUE))
2391 if (iter_data->cur_running || vif == iter_data->vif) {
2392 iter_data->cur_running = true;
2396 __set_bit(vif->hw_queue[0] / NUM_TX_QUEUES, iter_data->hw_queue_map);
2399 static int wlcore_allocate_hw_queue_base(struct wl1271 *wl,
2400 struct wl12xx_vif *wlvif)
2402 struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif);
2403 struct wlcore_hw_queue_iter_data iter_data = {};
2406 iter_data.vif = vif;
2408 /* mark all bits taken by active interfaces */
2409 ieee80211_iterate_active_interfaces_atomic(wl->hw,
2410 IEEE80211_IFACE_ITER_RESUME_ALL,
2411 wlcore_hw_queue_iter, &iter_data);
2413 /* the current vif is already running in mac80211 (resume/recovery) */
2414 if (iter_data.cur_running) {
2415 wlvif->hw_queue_base = vif->hw_queue[0];
2416 wl1271_debug(DEBUG_MAC80211,
2417 "using pre-allocated hw queue base %d",
2418 wlvif->hw_queue_base);
2420 /* interface type might have changed type */
2421 goto adjust_cab_queue;
2424 q_base = find_first_zero_bit(iter_data.hw_queue_map,
2425 WLCORE_NUM_MAC_ADDRESSES);
2426 if (q_base >= WLCORE_NUM_MAC_ADDRESSES)
2429 wlvif->hw_queue_base = q_base * NUM_TX_QUEUES;
2430 wl1271_debug(DEBUG_MAC80211, "allocating hw queue base: %d",
2431 wlvif->hw_queue_base);
2433 for (i = 0; i < NUM_TX_QUEUES; i++) {
2434 wl->queue_stop_reasons[wlvif->hw_queue_base + i] = 0;
2435 /* register hw queues in mac80211 */
2436 vif->hw_queue[i] = wlvif->hw_queue_base + i;
2440 /* the last places are reserved for cab queues per interface */
2441 if (wlvif->bss_type == BSS_TYPE_AP_BSS)
2442 vif->cab_queue = NUM_TX_QUEUES * WLCORE_NUM_MAC_ADDRESSES +
2443 wlvif->hw_queue_base / NUM_TX_QUEUES;
2445 vif->cab_queue = IEEE80211_INVAL_HW_QUEUE;
2450 static int wl1271_op_add_interface(struct ieee80211_hw *hw,
2451 struct ieee80211_vif *vif)
2453 struct wl1271 *wl = hw->priv;
2454 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
2455 struct vif_counter_data vif_count;
2460 wl1271_error("Adding Interface not allowed while in PLT mode");
2464 vif->driver_flags |= IEEE80211_VIF_BEACON_FILTER |
2465 IEEE80211_VIF_SUPPORTS_CQM_RSSI;
2467 wl1271_debug(DEBUG_MAC80211, "mac80211 add interface type %d mac %pM",
2468 ieee80211_vif_type_p2p(vif), vif->addr);
2470 wl12xx_get_vif_count(hw, vif, &vif_count);
2472 mutex_lock(&wl->mutex);
2473 ret = wl1271_ps_elp_wakeup(wl);
2478 * in some very corner case HW recovery scenarios its possible to
2479 * get here before __wl1271_op_remove_interface is complete, so
2480 * opt out if that is the case.
2482 if (test_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags) ||
2483 test_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags)) {
2489 ret = wl12xx_init_vif_data(wl, vif);
2494 role_type = wl12xx_get_role_type(wl, wlvif);
2495 if (role_type == WL12XX_INVALID_ROLE_TYPE) {
2500 ret = wlcore_allocate_hw_queue_base(wl, wlvif);
2504 if (wl12xx_need_fw_change(wl, vif_count, true)) {
2505 wl12xx_force_active_psm(wl);
2506 set_bit(WL1271_FLAG_INTENDED_FW_RECOVERY, &wl->flags);
2507 mutex_unlock(&wl->mutex);
2508 wl1271_recovery_work(&wl->recovery_work);
2513 * TODO: after the nvs issue will be solved, move this block
2514 * to start(), and make sure here the driver is ON.
2516 if (wl->state == WLCORE_STATE_OFF) {
2518 * we still need this in order to configure the fw
2519 * while uploading the nvs
2521 memcpy(wl->addresses[0].addr, vif->addr, ETH_ALEN);
2523 ret = wl12xx_init_fw(wl);
2528 ret = wl12xx_cmd_role_enable(wl, vif->addr,
2529 role_type, &wlvif->role_id);
2533 ret = wl1271_init_vif_specific(wl, vif);
2537 list_add(&wlvif->list, &wl->wlvif_list);
2538 set_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags);
2540 if (wlvif->bss_type == BSS_TYPE_AP_BSS)
2545 wl1271_ps_elp_sleep(wl);
2547 mutex_unlock(&wl->mutex);
2552 static void __wl1271_op_remove_interface(struct wl1271 *wl,
2553 struct ieee80211_vif *vif,
2554 bool reset_tx_queues)
2556 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
2558 bool is_ap = (wlvif->bss_type == BSS_TYPE_AP_BSS);
2560 wl1271_debug(DEBUG_MAC80211, "mac80211 remove interface");
2562 if (!test_and_clear_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags))
2565 /* because of hardware recovery, we may get here twice */
2566 if (wl->state == WLCORE_STATE_OFF)
2569 wl1271_info("down");
2571 if (wl->scan.state != WL1271_SCAN_STATE_IDLE &&
2572 wl->scan_wlvif == wlvif) {
2574 * Rearm the tx watchdog just before idling scan. This
2575 * prevents just-finished scans from triggering the watchdog
2577 wl12xx_rearm_tx_watchdog_locked(wl);
2579 wl->scan.state = WL1271_SCAN_STATE_IDLE;
2580 memset(wl->scan.scanned_ch, 0, sizeof(wl->scan.scanned_ch));
2581 wl->scan_wlvif = NULL;
2582 wl->scan.req = NULL;
2583 ieee80211_scan_completed(wl->hw, true);
2586 if (wl->sched_vif == wlvif)
2587 wl->sched_vif = NULL;
2589 if (wl->roc_vif == vif) {
2591 ieee80211_remain_on_channel_expired(wl->hw);
2594 if (!test_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags)) {
2595 /* disable active roles */
2596 ret = wl1271_ps_elp_wakeup(wl);
2600 if (wlvif->bss_type == BSS_TYPE_STA_BSS ||
2601 wlvif->bss_type == BSS_TYPE_IBSS) {
2602 if (wl12xx_dev_role_started(wlvif))
2603 wl12xx_stop_dev(wl, wlvif);
2606 ret = wl12xx_cmd_role_disable(wl, &wlvif->role_id);
2610 wl1271_ps_elp_sleep(wl);
2613 wl12xx_tx_reset_wlvif(wl, wlvif);
2615 /* clear all hlids (except system_hlid) */
2616 wlvif->dev_hlid = WL12XX_INVALID_LINK_ID;
2618 if (wlvif->bss_type == BSS_TYPE_STA_BSS ||
2619 wlvif->bss_type == BSS_TYPE_IBSS) {
2620 wlvif->sta.hlid = WL12XX_INVALID_LINK_ID;
2621 wl12xx_free_rate_policy(wl, &wlvif->sta.basic_rate_idx);
2622 wl12xx_free_rate_policy(wl, &wlvif->sta.ap_rate_idx);
2623 wl12xx_free_rate_policy(wl, &wlvif->sta.p2p_rate_idx);
2624 wlcore_free_klv_template(wl, &wlvif->sta.klv_template_id);
2626 wlvif->ap.bcast_hlid = WL12XX_INVALID_LINK_ID;
2627 wlvif->ap.global_hlid = WL12XX_INVALID_LINK_ID;
2628 wl12xx_free_rate_policy(wl, &wlvif->ap.mgmt_rate_idx);
2629 wl12xx_free_rate_policy(wl, &wlvif->ap.bcast_rate_idx);
2630 for (i = 0; i < CONF_TX_MAX_AC_COUNT; i++)
2631 wl12xx_free_rate_policy(wl,
2632 &wlvif->ap.ucast_rate_idx[i]);
2633 wl1271_free_ap_keys(wl, wlvif);
2636 dev_kfree_skb(wlvif->probereq);
2637 wlvif->probereq = NULL;
2638 if (wl->last_wlvif == wlvif)
2639 wl->last_wlvif = NULL;
2640 list_del(&wlvif->list);
2641 memset(wlvif->ap.sta_hlid_map, 0, sizeof(wlvif->ap.sta_hlid_map));
2642 wlvif->role_id = WL12XX_INVALID_ROLE_ID;
2643 wlvif->dev_role_id = WL12XX_INVALID_ROLE_ID;
2651 * Last AP, have more stations. Configure sleep auth according to STA.
2652 * Don't do thin on unintended recovery.
2654 if (test_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags) &&
2655 !test_bit(WL1271_FLAG_INTENDED_FW_RECOVERY, &wl->flags))
2658 if (wl->ap_count == 0 && is_ap) {
2659 /* mask ap events */
2660 wl->event_mask &= ~wl->ap_event_mask;
2661 wl1271_event_unmask(wl);
2664 if (wl->ap_count == 0 && is_ap && wl->sta_count) {
2665 u8 sta_auth = wl->conf.conn.sta_sleep_auth;
2666 /* Configure for power according to debugfs */
2667 if (sta_auth != WL1271_PSM_ILLEGAL)
2668 wl1271_acx_sleep_auth(wl, sta_auth);
2669 /* Configure for ELP power saving */
2671 wl1271_acx_sleep_auth(wl, WL1271_PSM_ELP);
2675 mutex_unlock(&wl->mutex);
2677 del_timer_sync(&wlvif->rx_streaming_timer);
2678 cancel_work_sync(&wlvif->rx_streaming_enable_work);
2679 cancel_work_sync(&wlvif->rx_streaming_disable_work);
2680 cancel_delayed_work_sync(&wlvif->connection_loss_work);
2681 cancel_delayed_work_sync(&wlvif->channel_switch_work);
2682 cancel_delayed_work_sync(&wlvif->pending_auth_complete_work);
2684 mutex_lock(&wl->mutex);
2687 static void wl1271_op_remove_interface(struct ieee80211_hw *hw,
2688 struct ieee80211_vif *vif)
2690 struct wl1271 *wl = hw->priv;
2691 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
2692 struct wl12xx_vif *iter;
2693 struct vif_counter_data vif_count;
2695 wl12xx_get_vif_count(hw, vif, &vif_count);
2696 mutex_lock(&wl->mutex);
2698 if (wl->state == WLCORE_STATE_OFF ||
2699 !test_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags))
2703 * wl->vif can be null here if someone shuts down the interface
2704 * just when hardware recovery has been started.
2706 wl12xx_for_each_wlvif(wl, iter) {
2710 __wl1271_op_remove_interface(wl, vif, true);
2713 WARN_ON(iter != wlvif);
2714 if (wl12xx_need_fw_change(wl, vif_count, false)) {
2715 wl12xx_force_active_psm(wl);
2716 set_bit(WL1271_FLAG_INTENDED_FW_RECOVERY, &wl->flags);
2717 wl12xx_queue_recovery_work(wl);
2720 mutex_unlock(&wl->mutex);
2723 static int wl12xx_op_change_interface(struct ieee80211_hw *hw,
2724 struct ieee80211_vif *vif,
2725 enum nl80211_iftype new_type, bool p2p)
2727 struct wl1271 *wl = hw->priv;
2730 set_bit(WL1271_FLAG_VIF_CHANGE_IN_PROGRESS, &wl->flags);
2731 wl1271_op_remove_interface(hw, vif);
2733 vif->type = new_type;
2735 ret = wl1271_op_add_interface(hw, vif);
2737 clear_bit(WL1271_FLAG_VIF_CHANGE_IN_PROGRESS, &wl->flags);
2741 static int wlcore_join(struct wl1271 *wl, struct wl12xx_vif *wlvif)
2744 bool is_ibss = (wlvif->bss_type == BSS_TYPE_IBSS);
2747 * One of the side effects of the JOIN command is that is clears
2748 * WPA/WPA2 keys from the chipset. Performing a JOIN while associated
2749 * to a WPA/WPA2 access point will therefore kill the data-path.
2750 * Currently the only valid scenario for JOIN during association
2751 * is on roaming, in which case we will also be given new keys.
2752 * Keep the below message for now, unless it starts bothering
2753 * users who really like to roam a lot :)
2755 if (test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags))
2756 wl1271_info("JOIN while associated.");
2758 /* clear encryption type */
2759 wlvif->encryption_type = KEY_NONE;
2762 ret = wl12xx_cmd_role_start_ibss(wl, wlvif);
2764 if (wl->quirks & WLCORE_QUIRK_START_STA_FAILS) {
2766 * TODO: this is an ugly workaround for wl12xx fw
2767 * bug - we are not able to tx/rx after the first
2768 * start_sta, so make dummy start+stop calls,
2769 * and then call start_sta again.
2770 * this should be fixed in the fw.
2772 wl12xx_cmd_role_start_sta(wl, wlvif);
2773 wl12xx_cmd_role_stop_sta(wl, wlvif);
2776 ret = wl12xx_cmd_role_start_sta(wl, wlvif);
2782 static int wl1271_ssid_set(struct wl12xx_vif *wlvif, struct sk_buff *skb,
2786 const u8 *ptr = cfg80211_find_ie(WLAN_EID_SSID, skb->data + offset,
2790 wl1271_error("No SSID in IEs!");
2795 if (ssid_len > IEEE80211_MAX_SSID_LEN) {
2796 wl1271_error("SSID is too long!");
2800 wlvif->ssid_len = ssid_len;
2801 memcpy(wlvif->ssid, ptr+2, ssid_len);
2805 static int wlcore_set_ssid(struct wl1271 *wl, struct wl12xx_vif *wlvif)
2807 struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif);
2808 struct sk_buff *skb;
2811 /* we currently only support setting the ssid from the ap probe req */
2812 if (wlvif->bss_type != BSS_TYPE_STA_BSS)
2815 skb = ieee80211_ap_probereq_get(wl->hw, vif);
2819 ieoffset = offsetof(struct ieee80211_mgmt,
2820 u.probe_req.variable);
2821 wl1271_ssid_set(wlvif, skb, ieoffset);
2827 static int wlcore_set_assoc(struct wl1271 *wl, struct wl12xx_vif *wlvif,
2828 struct ieee80211_bss_conf *bss_conf,
2834 wlvif->aid = bss_conf->aid;
2835 wlvif->channel_type = cfg80211_get_chandef_type(&bss_conf->chandef);
2836 wlvif->beacon_int = bss_conf->beacon_int;
2837 wlvif->wmm_enabled = bss_conf->qos;
2839 set_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags);
2842 * with wl1271, we don't need to update the
2843 * beacon_int and dtim_period, because the firmware
2844 * updates it by itself when the first beacon is
2845 * received after a join.
2847 ret = wl1271_cmd_build_ps_poll(wl, wlvif, wlvif->aid);
2852 * Get a template for hardware connection maintenance
2854 dev_kfree_skb(wlvif->probereq);
2855 wlvif->probereq = wl1271_cmd_build_ap_probe_req(wl,
2858 ieoffset = offsetof(struct ieee80211_mgmt,
2859 u.probe_req.variable);
2860 wl1271_ssid_set(wlvif, wlvif->probereq, ieoffset);
2862 /* enable the connection monitoring feature */
2863 ret = wl1271_acx_conn_monit_params(wl, wlvif, true);
2868 * The join command disable the keep-alive mode, shut down its process,
2869 * and also clear the template config, so we need to reset it all after
2870 * the join. The acx_aid starts the keep-alive process, and the order
2871 * of the commands below is relevant.
2873 ret = wl1271_acx_keep_alive_mode(wl, wlvif, true);
2877 ret = wl1271_acx_aid(wl, wlvif, wlvif->aid);
2881 ret = wl12xx_cmd_build_klv_null_data(wl, wlvif);
2885 ret = wl1271_acx_keep_alive_config(wl, wlvif,
2886 wlvif->sta.klv_template_id,
2887 ACX_KEEP_ALIVE_TPL_VALID);
2892 * The default fw psm configuration is AUTO, while mac80211 default
2893 * setting is off (ACTIVE), so sync the fw with the correct value.
2895 ret = wl1271_ps_set_mode(wl, wlvif, STATION_ACTIVE_MODE);
2901 wl1271_tx_enabled_rates_get(wl,
2904 ret = wl1271_acx_sta_rate_policies(wl, wlvif);
2912 static int wlcore_unset_assoc(struct wl1271 *wl, struct wl12xx_vif *wlvif)
2915 bool sta = wlvif->bss_type == BSS_TYPE_STA_BSS;
2917 /* make sure we are connected (sta) joined */
2919 !test_and_clear_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags))
2922 /* make sure we are joined (ibss) */
2924 test_and_clear_bit(WLVIF_FLAG_IBSS_JOINED, &wlvif->flags))
2928 /* use defaults when not associated */
2931 /* free probe-request template */
2932 dev_kfree_skb(wlvif->probereq);
2933 wlvif->probereq = NULL;
2935 /* disable connection monitor features */
2936 ret = wl1271_acx_conn_monit_params(wl, wlvif, false);
2940 /* Disable the keep-alive feature */
2941 ret = wl1271_acx_keep_alive_mode(wl, wlvif, false);
2946 if (test_and_clear_bit(WLVIF_FLAG_CS_PROGRESS, &wlvif->flags)) {
2947 struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif);
2949 wl12xx_cmd_stop_channel_switch(wl, wlvif);
2950 ieee80211_chswitch_done(vif, false);
2951 cancel_delayed_work(&wlvif->channel_switch_work);
2954 /* invalidate keep-alive template */
2955 wl1271_acx_keep_alive_config(wl, wlvif,
2956 wlvif->sta.klv_template_id,
2957 ACX_KEEP_ALIVE_TPL_INVALID);
2962 static void wl1271_set_band_rate(struct wl1271 *wl, struct wl12xx_vif *wlvif)
2964 wlvif->basic_rate_set = wlvif->bitrate_masks[wlvif->band];
2965 wlvif->rate_set = wlvif->basic_rate_set;
2968 static void wl1271_sta_handle_idle(struct wl1271 *wl, struct wl12xx_vif *wlvif,
2971 bool cur_idle = !test_bit(WLVIF_FLAG_ACTIVE, &wlvif->flags);
2973 if (idle == cur_idle)
2977 clear_bit(WLVIF_FLAG_ACTIVE, &wlvif->flags);
2979 /* The current firmware only supports sched_scan in idle */
2980 if (wl->sched_vif == wlvif)
2981 wl->ops->sched_scan_stop(wl, wlvif);
2983 set_bit(WLVIF_FLAG_ACTIVE, &wlvif->flags);
2987 static int wl12xx_config_vif(struct wl1271 *wl, struct wl12xx_vif *wlvif,
2988 struct ieee80211_conf *conf, u32 changed)
2992 if (conf->power_level != wlvif->power_level) {
2993 ret = wl1271_acx_tx_power(wl, wlvif, conf->power_level);
2997 wlvif->power_level = conf->power_level;
3003 static int wl1271_op_config(struct ieee80211_hw *hw, u32 changed)
3005 struct wl1271 *wl = hw->priv;
3006 struct wl12xx_vif *wlvif;
3007 struct ieee80211_conf *conf = &hw->conf;
3010 wl1271_debug(DEBUG_MAC80211, "mac80211 config psm %s power %d %s"
3012 conf->flags & IEEE80211_CONF_PS ? "on" : "off",
3014 conf->flags & IEEE80211_CONF_IDLE ? "idle" : "in use",
3017 mutex_lock(&wl->mutex);
3019 if (changed & IEEE80211_CONF_CHANGE_POWER)
3020 wl->power_level = conf->power_level;
3022 if (unlikely(wl->state != WLCORE_STATE_ON))
3025 ret = wl1271_ps_elp_wakeup(wl);
3029 /* configure each interface */
3030 wl12xx_for_each_wlvif(wl, wlvif) {
3031 ret = wl12xx_config_vif(wl, wlvif, conf, changed);
3037 wl1271_ps_elp_sleep(wl);
3040 mutex_unlock(&wl->mutex);
3045 struct wl1271_filter_params {
3048 u8 mc_list[ACX_MC_ADDRESS_GROUP_MAX][ETH_ALEN];
3051 static u64 wl1271_op_prepare_multicast(struct ieee80211_hw *hw,
3052 struct netdev_hw_addr_list *mc_list)
3054 struct wl1271_filter_params *fp;
3055 struct netdev_hw_addr *ha;
3057 fp = kzalloc(sizeof(*fp), GFP_ATOMIC);
3059 wl1271_error("Out of memory setting filters.");
3063 /* update multicast filtering parameters */
3064 fp->mc_list_length = 0;
3065 if (netdev_hw_addr_list_count(mc_list) > ACX_MC_ADDRESS_GROUP_MAX) {
3066 fp->enabled = false;
3069 netdev_hw_addr_list_for_each(ha, mc_list) {
3070 memcpy(fp->mc_list[fp->mc_list_length],
3071 ha->addr, ETH_ALEN);
3072 fp->mc_list_length++;
3076 return (u64)(unsigned long)fp;
3079 #define WL1271_SUPPORTED_FILTERS (FIF_PROMISC_IN_BSS | \
3082 FIF_BCN_PRBRESP_PROMISC | \
3086 static void wl1271_op_configure_filter(struct ieee80211_hw *hw,
3087 unsigned int changed,
3088 unsigned int *total, u64 multicast)
3090 struct wl1271_filter_params *fp = (void *)(unsigned long)multicast;
3091 struct wl1271 *wl = hw->priv;
3092 struct wl12xx_vif *wlvif;
3096 wl1271_debug(DEBUG_MAC80211, "mac80211 configure filter changed %x"
3097 " total %x", changed, *total);
3099 mutex_lock(&wl->mutex);
3101 *total &= WL1271_SUPPORTED_FILTERS;
3102 changed &= WL1271_SUPPORTED_FILTERS;
3104 if (unlikely(wl->state != WLCORE_STATE_ON))
3107 ret = wl1271_ps_elp_wakeup(wl);
3111 wl12xx_for_each_wlvif(wl, wlvif) {
3112 if (wlvif->bss_type != BSS_TYPE_AP_BSS) {
3113 if (*total & FIF_ALLMULTI)
3114 ret = wl1271_acx_group_address_tbl(wl, wlvif,
3118 ret = wl1271_acx_group_address_tbl(wl, wlvif,
3121 fp->mc_list_length);
3128 * the fw doesn't provide an api to configure the filters. instead,
3129 * the filters configuration is based on the active roles / ROC
3134 wl1271_ps_elp_sleep(wl);
3137 mutex_unlock(&wl->mutex);
3141 static int wl1271_record_ap_key(struct wl1271 *wl, struct wl12xx_vif *wlvif,
3142 u8 id, u8 key_type, u8 key_size,
3143 const u8 *key, u8 hlid, u32 tx_seq_32,
3146 struct wl1271_ap_key *ap_key;
3149 wl1271_debug(DEBUG_CRYPT, "record ap key id %d", (int)id);
3151 if (key_size > MAX_KEY_SIZE)
3155 * Find next free entry in ap_keys. Also check we are not replacing
3158 for (i = 0; i < MAX_NUM_KEYS; i++) {
3159 if (wlvif->ap.recorded_keys[i] == NULL)
3162 if (wlvif->ap.recorded_keys[i]->id == id) {
3163 wl1271_warning("trying to record key replacement");
3168 if (i == MAX_NUM_KEYS)
3171 ap_key = kzalloc(sizeof(*ap_key), GFP_KERNEL);
3176 ap_key->key_type = key_type;
3177 ap_key->key_size = key_size;
3178 memcpy(ap_key->key, key, key_size);
3179 ap_key->hlid = hlid;
3180 ap_key->tx_seq_32 = tx_seq_32;
3181 ap_key->tx_seq_16 = tx_seq_16;
3183 wlvif->ap.recorded_keys[i] = ap_key;
3187 static void wl1271_free_ap_keys(struct wl1271 *wl, struct wl12xx_vif *wlvif)
3191 for (i = 0; i < MAX_NUM_KEYS; i++) {
3192 kfree(wlvif->ap.recorded_keys[i]);
3193 wlvif->ap.recorded_keys[i] = NULL;
3197 static int wl1271_ap_init_hwenc(struct wl1271 *wl, struct wl12xx_vif *wlvif)
3200 struct wl1271_ap_key *key;
3201 bool wep_key_added = false;
3203 for (i = 0; i < MAX_NUM_KEYS; i++) {
3205 if (wlvif->ap.recorded_keys[i] == NULL)
3208 key = wlvif->ap.recorded_keys[i];
3210 if (hlid == WL12XX_INVALID_LINK_ID)
3211 hlid = wlvif->ap.bcast_hlid;
3213 ret = wl1271_cmd_set_ap_key(wl, wlvif, KEY_ADD_OR_REPLACE,
3214 key->id, key->key_type,
3215 key->key_size, key->key,
3216 hlid, key->tx_seq_32,
3221 if (key->key_type == KEY_WEP)
3222 wep_key_added = true;
3225 if (wep_key_added) {
3226 ret = wl12xx_cmd_set_default_wep_key(wl, wlvif->default_key,
3227 wlvif->ap.bcast_hlid);
3233 wl1271_free_ap_keys(wl, wlvif);
3237 static int wl1271_set_key(struct wl1271 *wl, struct wl12xx_vif *wlvif,
3238 u16 action, u8 id, u8 key_type,
3239 u8 key_size, const u8 *key, u32 tx_seq_32,
3240 u16 tx_seq_16, struct ieee80211_sta *sta)
3243 bool is_ap = (wlvif->bss_type == BSS_TYPE_AP_BSS);
3246 struct wl1271_station *wl_sta;
3250 wl_sta = (struct wl1271_station *)sta->drv_priv;
3251 hlid = wl_sta->hlid;
3253 hlid = wlvif->ap.bcast_hlid;
3256 if (!test_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags)) {
3258 * We do not support removing keys after AP shutdown.
3259 * Pretend we do to make mac80211 happy.
3261 if (action != KEY_ADD_OR_REPLACE)
3264 ret = wl1271_record_ap_key(wl, wlvif, id,
3266 key, hlid, tx_seq_32,
3269 ret = wl1271_cmd_set_ap_key(wl, wlvif, action,
3270 id, key_type, key_size,
3271 key, hlid, tx_seq_32,
3279 static const u8 bcast_addr[ETH_ALEN] = {
3280 0xff, 0xff, 0xff, 0xff, 0xff, 0xff
3283 addr = sta ? sta->addr : bcast_addr;
3285 if (is_zero_ether_addr(addr)) {
3286 /* We dont support TX only encryption */
3290 /* The wl1271 does not allow to remove unicast keys - they
3291 will be cleared automatically on next CMD_JOIN. Ignore the
3292 request silently, as we dont want the mac80211 to emit
3293 an error message. */
3294 if (action == KEY_REMOVE && !is_broadcast_ether_addr(addr))
3297 /* don't remove key if hlid was already deleted */
3298 if (action == KEY_REMOVE &&
3299 wlvif->sta.hlid == WL12XX_INVALID_LINK_ID)
3302 ret = wl1271_cmd_set_sta_key(wl, wlvif, action,
3303 id, key_type, key_size,
3304 key, addr, tx_seq_32,
3314 static int wlcore_op_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
3315 struct ieee80211_vif *vif,
3316 struct ieee80211_sta *sta,
3317 struct ieee80211_key_conf *key_conf)
3319 struct wl1271 *wl = hw->priv;
3321 bool might_change_spare =
3322 key_conf->cipher == WL1271_CIPHER_SUITE_GEM ||
3323 key_conf->cipher == WLAN_CIPHER_SUITE_TKIP;
3325 if (might_change_spare) {
3327 * stop the queues and flush to ensure the next packets are
3328 * in sync with FW spare block accounting
3330 wlcore_stop_queues(wl, WLCORE_QUEUE_STOP_REASON_SPARE_BLK);
3331 wl1271_tx_flush(wl);
3334 mutex_lock(&wl->mutex);
3336 if (unlikely(wl->state != WLCORE_STATE_ON)) {
3338 goto out_wake_queues;
3341 ret = wl1271_ps_elp_wakeup(wl);
3343 goto out_wake_queues;
3345 ret = wlcore_hw_set_key(wl, cmd, vif, sta, key_conf);
3347 wl1271_ps_elp_sleep(wl);
3350 if (might_change_spare)
3351 wlcore_wake_queues(wl, WLCORE_QUEUE_STOP_REASON_SPARE_BLK);
3353 mutex_unlock(&wl->mutex);
3358 int wlcore_set_key(struct wl1271 *wl, enum set_key_cmd cmd,
3359 struct ieee80211_vif *vif,
3360 struct ieee80211_sta *sta,
3361 struct ieee80211_key_conf *key_conf)
3363 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
3370 wl1271_debug(DEBUG_MAC80211, "mac80211 set key");
3372 wl1271_debug(DEBUG_CRYPT, "CMD: 0x%x sta: %p", cmd, sta);
3373 wl1271_debug(DEBUG_CRYPT, "Key: algo:0x%x, id:%d, len:%d flags 0x%x",
3374 key_conf->cipher, key_conf->keyidx,
3375 key_conf->keylen, key_conf->flags);
3376 wl1271_dump(DEBUG_CRYPT, "KEY: ", key_conf->key, key_conf->keylen);
3378 if (wlvif->bss_type == BSS_TYPE_AP_BSS)
3380 struct wl1271_station *wl_sta = (void *)sta->drv_priv;
3381 hlid = wl_sta->hlid;
3383 hlid = wlvif->ap.bcast_hlid;
3386 hlid = wlvif->sta.hlid;
3388 if (hlid != WL12XX_INVALID_LINK_ID) {
3389 u64 tx_seq = wl->links[hlid].total_freed_pkts;
3390 tx_seq_32 = WL1271_TX_SECURITY_HI32(tx_seq);
3391 tx_seq_16 = WL1271_TX_SECURITY_LO16(tx_seq);
3394 switch (key_conf->cipher) {
3395 case WLAN_CIPHER_SUITE_WEP40:
3396 case WLAN_CIPHER_SUITE_WEP104:
3399 key_conf->hw_key_idx = key_conf->keyidx;
3401 case WLAN_CIPHER_SUITE_TKIP:
3402 key_type = KEY_TKIP;
3403 key_conf->hw_key_idx = key_conf->keyidx;
3405 case WLAN_CIPHER_SUITE_CCMP:
3407 key_conf->flags |= IEEE80211_KEY_FLAG_PUT_IV_SPACE;
3409 case WL1271_CIPHER_SUITE_GEM:
3413 wl1271_error("Unknown key algo 0x%x", key_conf->cipher);
3420 ret = wl1271_set_key(wl, wlvif, KEY_ADD_OR_REPLACE,
3421 key_conf->keyidx, key_type,
3422 key_conf->keylen, key_conf->key,
3423 tx_seq_32, tx_seq_16, sta);
3425 wl1271_error("Could not add or replace key");
3430 * reconfiguring arp response if the unicast (or common)
3431 * encryption key type was changed
3433 if (wlvif->bss_type == BSS_TYPE_STA_BSS &&
3434 (sta || key_type == KEY_WEP) &&
3435 wlvif->encryption_type != key_type) {
3436 wlvif->encryption_type = key_type;
3437 ret = wl1271_cmd_build_arp_rsp(wl, wlvif);
3439 wl1271_warning("build arp rsp failed: %d", ret);
3446 ret = wl1271_set_key(wl, wlvif, KEY_REMOVE,
3447 key_conf->keyidx, key_type,
3448 key_conf->keylen, key_conf->key,
3451 wl1271_error("Could not remove key");
3457 wl1271_error("Unsupported key cmd 0x%x", cmd);
3463 EXPORT_SYMBOL_GPL(wlcore_set_key);
3465 static void wl1271_op_set_default_key_idx(struct ieee80211_hw *hw,
3466 struct ieee80211_vif *vif,
3469 struct wl1271 *wl = hw->priv;
3470 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
3473 wl1271_debug(DEBUG_MAC80211, "mac80211 set default key idx %d",
3476 /* we don't handle unsetting of default key */
3480 mutex_lock(&wl->mutex);
3482 if (unlikely(wl->state != WLCORE_STATE_ON)) {
3487 ret = wl1271_ps_elp_wakeup(wl);
3491 wlvif->default_key = key_idx;
3493 /* the default WEP key needs to be configured at least once */
3494 if (wlvif->encryption_type == KEY_WEP) {
3495 ret = wl12xx_cmd_set_default_wep_key(wl,
3503 wl1271_ps_elp_sleep(wl);
3506 mutex_unlock(&wl->mutex);
3509 void wlcore_regdomain_config(struct wl1271 *wl)
3513 if (!(wl->quirks & WLCORE_QUIRK_REGDOMAIN_CONF))
3516 mutex_lock(&wl->mutex);
3518 if (unlikely(wl->state != WLCORE_STATE_ON))
3521 ret = wl1271_ps_elp_wakeup(wl);
3525 ret = wlcore_cmd_regdomain_config_locked(wl);
3527 wl12xx_queue_recovery_work(wl);
3531 wl1271_ps_elp_sleep(wl);
3533 mutex_unlock(&wl->mutex);
3536 static int wl1271_op_hw_scan(struct ieee80211_hw *hw,
3537 struct ieee80211_vif *vif,
3538 struct cfg80211_scan_request *req)
3540 struct wl1271 *wl = hw->priv;
3545 wl1271_debug(DEBUG_MAC80211, "mac80211 hw scan");
3548 ssid = req->ssids[0].ssid;
3549 len = req->ssids[0].ssid_len;
3552 mutex_lock(&wl->mutex);
3554 if (unlikely(wl->state != WLCORE_STATE_ON)) {
3556 * We cannot return -EBUSY here because cfg80211 will expect
3557 * a call to ieee80211_scan_completed if we do - in this case
3558 * there won't be any call.
3564 ret = wl1271_ps_elp_wakeup(wl);
3568 /* fail if there is any role in ROC */
3569 if (find_first_bit(wl->roc_map, WL12XX_MAX_ROLES) < WL12XX_MAX_ROLES) {
3570 /* don't allow scanning right now */
3575 ret = wlcore_scan(hw->priv, vif, ssid, len, req);
3577 wl1271_ps_elp_sleep(wl);
3579 mutex_unlock(&wl->mutex);
3584 static void wl1271_op_cancel_hw_scan(struct ieee80211_hw *hw,
3585 struct ieee80211_vif *vif)
3587 struct wl1271 *wl = hw->priv;
3588 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
3591 wl1271_debug(DEBUG_MAC80211, "mac80211 cancel hw scan");
3593 mutex_lock(&wl->mutex);
3595 if (unlikely(wl->state != WLCORE_STATE_ON))
3598 if (wl->scan.state == WL1271_SCAN_STATE_IDLE)
3601 ret = wl1271_ps_elp_wakeup(wl);
3605 if (wl->scan.state != WL1271_SCAN_STATE_DONE) {
3606 ret = wl->ops->scan_stop(wl, wlvif);
3612 * Rearm the tx watchdog just before idling scan. This
3613 * prevents just-finished scans from triggering the watchdog
3615 wl12xx_rearm_tx_watchdog_locked(wl);
3617 wl->scan.state = WL1271_SCAN_STATE_IDLE;
3618 memset(wl->scan.scanned_ch, 0, sizeof(wl->scan.scanned_ch));
3619 wl->scan_wlvif = NULL;
3620 wl->scan.req = NULL;
3621 ieee80211_scan_completed(wl->hw, true);
3624 wl1271_ps_elp_sleep(wl);
3626 mutex_unlock(&wl->mutex);
3628 cancel_delayed_work_sync(&wl->scan_complete_work);
3631 static int wl1271_op_sched_scan_start(struct ieee80211_hw *hw,
3632 struct ieee80211_vif *vif,
3633 struct cfg80211_sched_scan_request *req,
3634 struct ieee80211_sched_scan_ies *ies)
3636 struct wl1271 *wl = hw->priv;
3637 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
3640 wl1271_debug(DEBUG_MAC80211, "wl1271_op_sched_scan_start");
3642 mutex_lock(&wl->mutex);
3644 if (unlikely(wl->state != WLCORE_STATE_ON)) {
3649 ret = wl1271_ps_elp_wakeup(wl);
3653 ret = wl->ops->sched_scan_start(wl, wlvif, req, ies);
3657 wl->sched_vif = wlvif;
3660 wl1271_ps_elp_sleep(wl);
3662 mutex_unlock(&wl->mutex);
3666 static void wl1271_op_sched_scan_stop(struct ieee80211_hw *hw,
3667 struct ieee80211_vif *vif)
3669 struct wl1271 *wl = hw->priv;
3670 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
3673 wl1271_debug(DEBUG_MAC80211, "wl1271_op_sched_scan_stop");
3675 mutex_lock(&wl->mutex);
3677 if (unlikely(wl->state != WLCORE_STATE_ON))
3680 ret = wl1271_ps_elp_wakeup(wl);
3684 wl->ops->sched_scan_stop(wl, wlvif);
3686 wl1271_ps_elp_sleep(wl);
3688 mutex_unlock(&wl->mutex);
3691 static int wl1271_op_set_frag_threshold(struct ieee80211_hw *hw, u32 value)
3693 struct wl1271 *wl = hw->priv;
3696 mutex_lock(&wl->mutex);
3698 if (unlikely(wl->state != WLCORE_STATE_ON)) {
3703 ret = wl1271_ps_elp_wakeup(wl);
3707 ret = wl1271_acx_frag_threshold(wl, value);
3709 wl1271_warning("wl1271_op_set_frag_threshold failed: %d", ret);
3711 wl1271_ps_elp_sleep(wl);
3714 mutex_unlock(&wl->mutex);
3719 static int wl1271_op_set_rts_threshold(struct ieee80211_hw *hw, u32 value)
3721 struct wl1271 *wl = hw->priv;
3722 struct wl12xx_vif *wlvif;
3725 mutex_lock(&wl->mutex);
3727 if (unlikely(wl->state != WLCORE_STATE_ON)) {
3732 ret = wl1271_ps_elp_wakeup(wl);
3736 wl12xx_for_each_wlvif(wl, wlvif) {
3737 ret = wl1271_acx_rts_threshold(wl, wlvif, value);
3739 wl1271_warning("set rts threshold failed: %d", ret);
3741 wl1271_ps_elp_sleep(wl);
3744 mutex_unlock(&wl->mutex);
3749 static void wl12xx_remove_ie(struct sk_buff *skb, u8 eid, int ieoffset)
3752 const u8 *next, *end = skb->data + skb->len;
3753 u8 *ie = (u8 *)cfg80211_find_ie(eid, skb->data + ieoffset,
3754 skb->len - ieoffset);
3759 memmove(ie, next, end - next);
3760 skb_trim(skb, skb->len - len);
3763 static void wl12xx_remove_vendor_ie(struct sk_buff *skb,
3764 unsigned int oui, u8 oui_type,
3768 const u8 *next, *end = skb->data + skb->len;
3769 u8 *ie = (u8 *)cfg80211_find_vendor_ie(oui, oui_type,
3770 skb->data + ieoffset,
3771 skb->len - ieoffset);
3776 memmove(ie, next, end - next);
3777 skb_trim(skb, skb->len - len);
3780 static int wl1271_ap_set_probe_resp_tmpl(struct wl1271 *wl, u32 rates,
3781 struct ieee80211_vif *vif)
3783 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
3784 struct sk_buff *skb;
3787 skb = ieee80211_proberesp_get(wl->hw, vif);
3791 ret = wl1271_cmd_template_set(wl, wlvif->role_id,
3792 CMD_TEMPL_AP_PROBE_RESPONSE,
3801 wl1271_debug(DEBUG_AP, "probe response updated");
3802 set_bit(WLVIF_FLAG_AP_PROBE_RESP_SET, &wlvif->flags);
3808 static int wl1271_ap_set_probe_resp_tmpl_legacy(struct wl1271 *wl,
3809 struct ieee80211_vif *vif,
3811 size_t probe_rsp_len,
3814 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
3815 struct ieee80211_bss_conf *bss_conf = &vif->bss_conf;
3816 u8 probe_rsp_templ[WL1271_CMD_TEMPL_MAX_SIZE];
3817 int ssid_ie_offset, ie_offset, templ_len;
3820 /* no need to change probe response if the SSID is set correctly */
3821 if (wlvif->ssid_len > 0)
3822 return wl1271_cmd_template_set(wl, wlvif->role_id,
3823 CMD_TEMPL_AP_PROBE_RESPONSE,
3828 if (probe_rsp_len + bss_conf->ssid_len > WL1271_CMD_TEMPL_MAX_SIZE) {
3829 wl1271_error("probe_rsp template too big");
3833 /* start searching from IE offset */
3834 ie_offset = offsetof(struct ieee80211_mgmt, u.probe_resp.variable);
3836 ptr = cfg80211_find_ie(WLAN_EID_SSID, probe_rsp_data + ie_offset,
3837 probe_rsp_len - ie_offset);
3839 wl1271_error("No SSID in beacon!");
3843 ssid_ie_offset = ptr - probe_rsp_data;
3844 ptr += (ptr[1] + 2);
3846 memcpy(probe_rsp_templ, probe_rsp_data, ssid_ie_offset);
3848 /* insert SSID from bss_conf */
3849 probe_rsp_templ[ssid_ie_offset] = WLAN_EID_SSID;
3850 probe_rsp_templ[ssid_ie_offset + 1] = bss_conf->ssid_len;
3851 memcpy(probe_rsp_templ + ssid_ie_offset + 2,
3852 bss_conf->ssid, bss_conf->ssid_len);
3853 templ_len = ssid_ie_offset + 2 + bss_conf->ssid_len;
3855 memcpy(probe_rsp_templ + ssid_ie_offset + 2 + bss_conf->ssid_len,
3856 ptr, probe_rsp_len - (ptr - probe_rsp_data));
3857 templ_len += probe_rsp_len - (ptr - probe_rsp_data);
3859 return wl1271_cmd_template_set(wl, wlvif->role_id,
3860 CMD_TEMPL_AP_PROBE_RESPONSE,
3866 static int wl1271_bss_erp_info_changed(struct wl1271 *wl,
3867 struct ieee80211_vif *vif,
3868 struct ieee80211_bss_conf *bss_conf,
3871 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
3874 if (changed & BSS_CHANGED_ERP_SLOT) {
3875 if (bss_conf->use_short_slot)
3876 ret = wl1271_acx_slot(wl, wlvif, SLOT_TIME_SHORT);
3878 ret = wl1271_acx_slot(wl, wlvif, SLOT_TIME_LONG);
3880 wl1271_warning("Set slot time failed %d", ret);
3885 if (changed & BSS_CHANGED_ERP_PREAMBLE) {
3886 if (bss_conf->use_short_preamble)
3887 wl1271_acx_set_preamble(wl, wlvif, ACX_PREAMBLE_SHORT);
3889 wl1271_acx_set_preamble(wl, wlvif, ACX_PREAMBLE_LONG);
3892 if (changed & BSS_CHANGED_ERP_CTS_PROT) {
3893 if (bss_conf->use_cts_prot)
3894 ret = wl1271_acx_cts_protect(wl, wlvif,
3897 ret = wl1271_acx_cts_protect(wl, wlvif,
3898 CTSPROTECT_DISABLE);
3900 wl1271_warning("Set ctsprotect failed %d", ret);
3909 static int wlcore_set_beacon_template(struct wl1271 *wl,
3910 struct ieee80211_vif *vif,
3913 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
3914 struct ieee80211_hdr *hdr;
3917 int ieoffset = offsetof(struct ieee80211_mgmt, u.beacon.variable);
3918 struct sk_buff *beacon = ieee80211_beacon_get(wl->hw, vif);
3926 wl1271_debug(DEBUG_MASTER, "beacon updated");
3928 ret = wl1271_ssid_set(wlvif, beacon, ieoffset);
3930 dev_kfree_skb(beacon);
3933 min_rate = wl1271_tx_min_rate_get(wl, wlvif->basic_rate_set);
3934 tmpl_id = is_ap ? CMD_TEMPL_AP_BEACON :
3936 ret = wl1271_cmd_template_set(wl, wlvif->role_id, tmpl_id,
3941 dev_kfree_skb(beacon);
3945 wlvif->wmm_enabled =
3946 cfg80211_find_vendor_ie(WLAN_OUI_MICROSOFT,
3947 WLAN_OUI_TYPE_MICROSOFT_WMM,
3948 beacon->data + ieoffset,
3949 beacon->len - ieoffset);
3952 * In case we already have a probe-resp beacon set explicitly
3953 * by usermode, don't use the beacon data.
3955 if (test_bit(WLVIF_FLAG_AP_PROBE_RESP_SET, &wlvif->flags))
3958 /* remove TIM ie from probe response */
3959 wl12xx_remove_ie(beacon, WLAN_EID_TIM, ieoffset);
3962 * remove p2p ie from probe response.
3963 * the fw reponds to probe requests that don't include
3964 * the p2p ie. probe requests with p2p ie will be passed,
3965 * and will be responded by the supplicant (the spec
3966 * forbids including the p2p ie when responding to probe
3967 * requests that didn't include it).
3969 wl12xx_remove_vendor_ie(beacon, WLAN_OUI_WFA,
3970 WLAN_OUI_TYPE_WFA_P2P, ieoffset);
3972 hdr = (struct ieee80211_hdr *) beacon->data;
3973 hdr->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT |
3974 IEEE80211_STYPE_PROBE_RESP);
3976 ret = wl1271_ap_set_probe_resp_tmpl_legacy(wl, vif,
3981 ret = wl1271_cmd_template_set(wl, wlvif->role_id,
3982 CMD_TEMPL_PROBE_RESPONSE,
3987 dev_kfree_skb(beacon);
3995 static int wl1271_bss_beacon_info_changed(struct wl1271 *wl,
3996 struct ieee80211_vif *vif,
3997 struct ieee80211_bss_conf *bss_conf,
4000 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4001 bool is_ap = (wlvif->bss_type == BSS_TYPE_AP_BSS);
4004 if (changed & BSS_CHANGED_BEACON_INT) {
4005 wl1271_debug(DEBUG_MASTER, "beacon interval updated: %d",
4006 bss_conf->beacon_int);
4008 wlvif->beacon_int = bss_conf->beacon_int;
4011 if ((changed & BSS_CHANGED_AP_PROBE_RESP) && is_ap) {
4012 u32 rate = wl1271_tx_min_rate_get(wl, wlvif->basic_rate_set);
4014 wl1271_ap_set_probe_resp_tmpl(wl, rate, vif);
4017 if (changed & BSS_CHANGED_BEACON) {
4018 ret = wlcore_set_beacon_template(wl, vif, is_ap);
4025 wl1271_error("beacon info change failed: %d", ret);
4029 /* AP mode changes */
4030 static void wl1271_bss_info_changed_ap(struct wl1271 *wl,
4031 struct ieee80211_vif *vif,
4032 struct ieee80211_bss_conf *bss_conf,
4035 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4038 if (changed & BSS_CHANGED_BASIC_RATES) {
4039 u32 rates = bss_conf->basic_rates;
4041 wlvif->basic_rate_set = wl1271_tx_enabled_rates_get(wl, rates,
4043 wlvif->basic_rate = wl1271_tx_min_rate_get(wl,
4044 wlvif->basic_rate_set);
4046 ret = wl1271_init_ap_rates(wl, wlvif);
4048 wl1271_error("AP rate policy change failed %d", ret);
4052 ret = wl1271_ap_init_templates(wl, vif);
4056 ret = wl1271_ap_set_probe_resp_tmpl(wl, wlvif->basic_rate, vif);
4060 ret = wlcore_set_beacon_template(wl, vif, true);
4065 ret = wl1271_bss_beacon_info_changed(wl, vif, bss_conf, changed);
4069 if (changed & BSS_CHANGED_BEACON_ENABLED) {
4070 if (bss_conf->enable_beacon) {
4071 if (!test_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags)) {
4072 ret = wl12xx_cmd_role_start_ap(wl, wlvif);
4076 ret = wl1271_ap_init_hwenc(wl, wlvif);
4080 set_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags);
4081 wl1271_debug(DEBUG_AP, "started AP");
4084 if (test_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags)) {
4086 * AP might be in ROC in case we have just
4087 * sent auth reply. handle it.
4089 if (test_bit(wlvif->role_id, wl->roc_map))
4090 wl12xx_croc(wl, wlvif->role_id);
4092 ret = wl12xx_cmd_role_stop_ap(wl, wlvif);
4096 clear_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags);
4097 clear_bit(WLVIF_FLAG_AP_PROBE_RESP_SET,
4099 wl1271_debug(DEBUG_AP, "stopped AP");
4104 ret = wl1271_bss_erp_info_changed(wl, vif, bss_conf, changed);
4108 /* Handle HT information change */
4109 if ((changed & BSS_CHANGED_HT) &&
4110 (bss_conf->chandef.width != NL80211_CHAN_WIDTH_20_NOHT)) {
4111 ret = wl1271_acx_set_ht_information(wl, wlvif,
4112 bss_conf->ht_operation_mode);
4114 wl1271_warning("Set ht information failed %d", ret);
4123 static int wlcore_set_bssid(struct wl1271 *wl, struct wl12xx_vif *wlvif,
4124 struct ieee80211_bss_conf *bss_conf,
4130 wl1271_debug(DEBUG_MAC80211,
4131 "changed_bssid: %pM, aid: %d, bcn_int: %d, brates: 0x%x sta_rate_set: 0x%x",
4132 bss_conf->bssid, bss_conf->aid,
4133 bss_conf->beacon_int,
4134 bss_conf->basic_rates, sta_rate_set);
4136 wlvif->beacon_int = bss_conf->beacon_int;
4137 rates = bss_conf->basic_rates;
4138 wlvif->basic_rate_set =
4139 wl1271_tx_enabled_rates_get(wl, rates,
4142 wl1271_tx_min_rate_get(wl,
4143 wlvif->basic_rate_set);
4147 wl1271_tx_enabled_rates_get(wl,
4151 /* we only support sched_scan while not connected */
4152 if (wl->sched_vif == wlvif)
4153 wl->ops->sched_scan_stop(wl, wlvif);
4155 ret = wl1271_acx_sta_rate_policies(wl, wlvif);
4159 ret = wl12xx_cmd_build_null_data(wl, wlvif);
4163 ret = wl1271_build_qos_null_data(wl, wl12xx_wlvif_to_vif(wlvif));
4167 wlcore_set_ssid(wl, wlvif);
4169 set_bit(WLVIF_FLAG_IN_USE, &wlvif->flags);
4174 static int wlcore_clear_bssid(struct wl1271 *wl, struct wl12xx_vif *wlvif)
4178 /* revert back to minimum rates for the current band */
4179 wl1271_set_band_rate(wl, wlvif);
4180 wlvif->basic_rate = wl1271_tx_min_rate_get(wl, wlvif->basic_rate_set);
4182 ret = wl1271_acx_sta_rate_policies(wl, wlvif);
4186 if (wlvif->bss_type == BSS_TYPE_STA_BSS &&
4187 test_bit(WLVIF_FLAG_IN_USE, &wlvif->flags)) {
4188 ret = wl12xx_cmd_role_stop_sta(wl, wlvif);
4193 clear_bit(WLVIF_FLAG_IN_USE, &wlvif->flags);
4196 /* STA/IBSS mode changes */
4197 static void wl1271_bss_info_changed_sta(struct wl1271 *wl,
4198 struct ieee80211_vif *vif,
4199 struct ieee80211_bss_conf *bss_conf,
4202 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4203 bool do_join = false;
4204 bool is_ibss = (wlvif->bss_type == BSS_TYPE_IBSS);
4205 bool ibss_joined = false;
4206 u32 sta_rate_set = 0;
4208 struct ieee80211_sta *sta;
4209 bool sta_exists = false;
4210 struct ieee80211_sta_ht_cap sta_ht_cap;
4213 ret = wl1271_bss_beacon_info_changed(wl, vif, bss_conf,
4219 if (changed & BSS_CHANGED_IBSS) {
4220 if (bss_conf->ibss_joined) {
4221 set_bit(WLVIF_FLAG_IBSS_JOINED, &wlvif->flags);
4224 wlcore_unset_assoc(wl, wlvif);
4225 wl12xx_cmd_role_stop_sta(wl, wlvif);
4229 if ((changed & BSS_CHANGED_BEACON_INT) && ibss_joined)
4232 /* Need to update the SSID (for filtering etc) */
4233 if ((changed & BSS_CHANGED_BEACON) && ibss_joined)
4236 if ((changed & BSS_CHANGED_BEACON_ENABLED) && ibss_joined) {
4237 wl1271_debug(DEBUG_ADHOC, "ad-hoc beaconing: %s",
4238 bss_conf->enable_beacon ? "enabled" : "disabled");
4243 if (changed & BSS_CHANGED_IDLE && !is_ibss)
4244 wl1271_sta_handle_idle(wl, wlvif, bss_conf->idle);
4246 if (changed & BSS_CHANGED_CQM) {
4247 bool enable = false;
4248 if (bss_conf->cqm_rssi_thold)
4250 ret = wl1271_acx_rssi_snr_trigger(wl, wlvif, enable,
4251 bss_conf->cqm_rssi_thold,
4252 bss_conf->cqm_rssi_hyst);
4255 wlvif->rssi_thold = bss_conf->cqm_rssi_thold;
4258 if (changed & (BSS_CHANGED_BSSID | BSS_CHANGED_HT |
4259 BSS_CHANGED_ASSOC)) {
4261 sta = ieee80211_find_sta(vif, bss_conf->bssid);
4263 u8 *rx_mask = sta->ht_cap.mcs.rx_mask;
4265 /* save the supp_rates of the ap */
4266 sta_rate_set = sta->supp_rates[wlvif->band];
4267 if (sta->ht_cap.ht_supported)
4269 (rx_mask[0] << HW_HT_RATES_OFFSET) |
4270 (rx_mask[1] << HW_MIMO_RATES_OFFSET);
4271 sta_ht_cap = sta->ht_cap;
4278 if (changed & BSS_CHANGED_BSSID) {
4279 if (!is_zero_ether_addr(bss_conf->bssid)) {
4280 ret = wlcore_set_bssid(wl, wlvif, bss_conf,
4285 /* Need to update the BSSID (for filtering etc) */
4288 ret = wlcore_clear_bssid(wl, wlvif);
4294 if (changed & BSS_CHANGED_IBSS) {
4295 wl1271_debug(DEBUG_ADHOC, "ibss_joined: %d",
4296 bss_conf->ibss_joined);
4298 if (bss_conf->ibss_joined) {
4299 u32 rates = bss_conf->basic_rates;
4300 wlvif->basic_rate_set =
4301 wl1271_tx_enabled_rates_get(wl, rates,
4304 wl1271_tx_min_rate_get(wl,
4305 wlvif->basic_rate_set);
4307 /* by default, use 11b + OFDM rates */
4308 wlvif->rate_set = CONF_TX_IBSS_DEFAULT_RATES;
4309 ret = wl1271_acx_sta_rate_policies(wl, wlvif);
4315 ret = wl1271_bss_erp_info_changed(wl, vif, bss_conf, changed);
4320 ret = wlcore_join(wl, wlvif);
4322 wl1271_warning("cmd join failed %d", ret);
4327 if (changed & BSS_CHANGED_ASSOC) {
4328 if (bss_conf->assoc) {
4329 ret = wlcore_set_assoc(wl, wlvif, bss_conf,
4334 if (test_bit(WLVIF_FLAG_STA_AUTHORIZED, &wlvif->flags))
4335 wl12xx_set_authorized(wl, wlvif);
4337 wlcore_unset_assoc(wl, wlvif);
4341 if (changed & BSS_CHANGED_PS) {
4342 if ((bss_conf->ps) &&
4343 test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags) &&
4344 !test_bit(WLVIF_FLAG_IN_PS, &wlvif->flags)) {
4348 if (wl->conf.conn.forced_ps) {
4349 ps_mode = STATION_POWER_SAVE_MODE;
4350 ps_mode_str = "forced";
4352 ps_mode = STATION_AUTO_PS_MODE;
4353 ps_mode_str = "auto";
4356 wl1271_debug(DEBUG_PSM, "%s ps enabled", ps_mode_str);
4358 ret = wl1271_ps_set_mode(wl, wlvif, ps_mode);
4360 wl1271_warning("enter %s ps failed %d",
4362 } else if (!bss_conf->ps &&
4363 test_bit(WLVIF_FLAG_IN_PS, &wlvif->flags)) {
4364 wl1271_debug(DEBUG_PSM, "auto ps disabled");
4366 ret = wl1271_ps_set_mode(wl, wlvif,
4367 STATION_ACTIVE_MODE);
4369 wl1271_warning("exit auto ps failed %d", ret);
4373 /* Handle new association with HT. Do this after join. */
4376 bss_conf->chandef.width != NL80211_CHAN_WIDTH_20_NOHT;
4378 ret = wlcore_hw_set_peer_cap(wl,
4384 wl1271_warning("Set ht cap failed %d", ret);
4390 ret = wl1271_acx_set_ht_information(wl, wlvif,
4391 bss_conf->ht_operation_mode);
4393 wl1271_warning("Set ht information failed %d",
4400 /* Handle arp filtering. Done after join. */
4401 if ((changed & BSS_CHANGED_ARP_FILTER) ||
4402 (!is_ibss && (changed & BSS_CHANGED_QOS))) {
4403 __be32 addr = bss_conf->arp_addr_list[0];
4404 wlvif->sta.qos = bss_conf->qos;
4405 WARN_ON(wlvif->bss_type != BSS_TYPE_STA_BSS);
4407 if (bss_conf->arp_addr_cnt == 1 && bss_conf->assoc) {
4408 wlvif->ip_addr = addr;
4410 * The template should have been configured only upon
4411 * association. however, it seems that the correct ip
4412 * isn't being set (when sending), so we have to
4413 * reconfigure the template upon every ip change.
4415 ret = wl1271_cmd_build_arp_rsp(wl, wlvif);
4417 wl1271_warning("build arp rsp failed: %d", ret);
4421 ret = wl1271_acx_arp_ip_filter(wl, wlvif,
4422 (ACX_ARP_FILTER_ARP_FILTERING |
4423 ACX_ARP_FILTER_AUTO_ARP),
4427 ret = wl1271_acx_arp_ip_filter(wl, wlvif, 0, addr);
4438 static void wl1271_op_bss_info_changed(struct ieee80211_hw *hw,
4439 struct ieee80211_vif *vif,
4440 struct ieee80211_bss_conf *bss_conf,
4443 struct wl1271 *wl = hw->priv;
4444 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4445 bool is_ap = (wlvif->bss_type == BSS_TYPE_AP_BSS);
4448 wl1271_debug(DEBUG_MAC80211, "mac80211 bss info role %d changed 0x%x",
4449 wlvif->role_id, (int)changed);
4452 * make sure to cancel pending disconnections if our association
4455 if (!is_ap && (changed & BSS_CHANGED_ASSOC))
4456 cancel_delayed_work_sync(&wlvif->connection_loss_work);
4458 if (is_ap && (changed & BSS_CHANGED_BEACON_ENABLED) &&
4459 !bss_conf->enable_beacon)
4460 wl1271_tx_flush(wl);
4462 mutex_lock(&wl->mutex);
4464 if (unlikely(wl->state != WLCORE_STATE_ON))
4467 if (unlikely(!test_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags)))
4470 ret = wl1271_ps_elp_wakeup(wl);
4474 if ((changed & BSS_CHANGED_TXPOWER) &&
4475 bss_conf->txpower != wlvif->power_level) {
4477 ret = wl1271_acx_tx_power(wl, wlvif, bss_conf->txpower);
4481 wlvif->power_level = bss_conf->txpower;
4485 wl1271_bss_info_changed_ap(wl, vif, bss_conf, changed);
4487 wl1271_bss_info_changed_sta(wl, vif, bss_conf, changed);
4489 wl1271_ps_elp_sleep(wl);
4492 mutex_unlock(&wl->mutex);
4495 static int wlcore_op_add_chanctx(struct ieee80211_hw *hw,
4496 struct ieee80211_chanctx_conf *ctx)
4498 wl1271_debug(DEBUG_MAC80211, "mac80211 add chanctx %d (type %d)",
4499 ieee80211_frequency_to_channel(ctx->def.chan->center_freq),
4500 cfg80211_get_chandef_type(&ctx->def));
4504 static void wlcore_op_remove_chanctx(struct ieee80211_hw *hw,
4505 struct ieee80211_chanctx_conf *ctx)
4507 wl1271_debug(DEBUG_MAC80211, "mac80211 remove chanctx %d (type %d)",
4508 ieee80211_frequency_to_channel(ctx->def.chan->center_freq),
4509 cfg80211_get_chandef_type(&ctx->def));
4512 static void wlcore_op_change_chanctx(struct ieee80211_hw *hw,
4513 struct ieee80211_chanctx_conf *ctx,
4516 wl1271_debug(DEBUG_MAC80211,
4517 "mac80211 change chanctx %d (type %d) changed 0x%x",
4518 ieee80211_frequency_to_channel(ctx->def.chan->center_freq),
4519 cfg80211_get_chandef_type(&ctx->def), changed);
4522 static int wlcore_op_assign_vif_chanctx(struct ieee80211_hw *hw,
4523 struct ieee80211_vif *vif,
4524 struct ieee80211_chanctx_conf *ctx)
4526 struct wl1271 *wl = hw->priv;
4527 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4528 int channel = ieee80211_frequency_to_channel(
4529 ctx->def.chan->center_freq);
4531 wl1271_debug(DEBUG_MAC80211,
4532 "mac80211 assign chanctx (role %d) %d (type %d)",
4533 wlvif->role_id, channel, cfg80211_get_chandef_type(&ctx->def));
4535 mutex_lock(&wl->mutex);
4537 wlvif->band = ctx->def.chan->band;
4538 wlvif->channel = channel;
4539 wlvif->channel_type = cfg80211_get_chandef_type(&ctx->def);
4541 /* update default rates according to the band */
4542 wl1271_set_band_rate(wl, wlvif);
4544 mutex_unlock(&wl->mutex);
4549 static void wlcore_op_unassign_vif_chanctx(struct ieee80211_hw *hw,
4550 struct ieee80211_vif *vif,
4551 struct ieee80211_chanctx_conf *ctx)
4553 struct wl1271 *wl = hw->priv;
4554 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4556 wl1271_debug(DEBUG_MAC80211,
4557 "mac80211 unassign chanctx (role %d) %d (type %d)",
4559 ieee80211_frequency_to_channel(ctx->def.chan->center_freq),
4560 cfg80211_get_chandef_type(&ctx->def));
4562 wl1271_tx_flush(wl);
4565 static int wl1271_op_conf_tx(struct ieee80211_hw *hw,
4566 struct ieee80211_vif *vif, u16 queue,
4567 const struct ieee80211_tx_queue_params *params)
4569 struct wl1271 *wl = hw->priv;
4570 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4574 mutex_lock(&wl->mutex);
4576 wl1271_debug(DEBUG_MAC80211, "mac80211 conf tx %d", queue);
4579 ps_scheme = CONF_PS_SCHEME_UPSD_TRIGGER;
4581 ps_scheme = CONF_PS_SCHEME_LEGACY;
4583 if (!test_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags))
4586 ret = wl1271_ps_elp_wakeup(wl);
4591 * the txop is confed in units of 32us by the mac80211,
4594 ret = wl1271_acx_ac_cfg(wl, wlvif, wl1271_tx_get_queue(queue),
4595 params->cw_min, params->cw_max,
4596 params->aifs, params->txop << 5);
4600 ret = wl1271_acx_tid_cfg(wl, wlvif, wl1271_tx_get_queue(queue),
4601 CONF_CHANNEL_TYPE_EDCF,
4602 wl1271_tx_get_queue(queue),
4603 ps_scheme, CONF_ACK_POLICY_LEGACY,
4607 wl1271_ps_elp_sleep(wl);
4610 mutex_unlock(&wl->mutex);
4615 static u64 wl1271_op_get_tsf(struct ieee80211_hw *hw,
4616 struct ieee80211_vif *vif)
4619 struct wl1271 *wl = hw->priv;
4620 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4621 u64 mactime = ULLONG_MAX;
4624 wl1271_debug(DEBUG_MAC80211, "mac80211 get tsf");
4626 mutex_lock(&wl->mutex);
4628 if (unlikely(wl->state != WLCORE_STATE_ON))
4631 ret = wl1271_ps_elp_wakeup(wl);
4635 ret = wl12xx_acx_tsf_info(wl, wlvif, &mactime);
4640 wl1271_ps_elp_sleep(wl);
4643 mutex_unlock(&wl->mutex);
4647 static int wl1271_op_get_survey(struct ieee80211_hw *hw, int idx,
4648 struct survey_info *survey)
4650 struct ieee80211_conf *conf = &hw->conf;
4655 survey->channel = conf->chandef.chan;
4660 static int wl1271_allocate_sta(struct wl1271 *wl,
4661 struct wl12xx_vif *wlvif,
4662 struct ieee80211_sta *sta)
4664 struct wl1271_station *wl_sta;
4668 if (wl->active_sta_count >= wl->max_ap_stations) {
4669 wl1271_warning("could not allocate HLID - too much stations");
4673 wl_sta = (struct wl1271_station *)sta->drv_priv;
4674 ret = wl12xx_allocate_link(wl, wlvif, &wl_sta->hlid);
4676 wl1271_warning("could not allocate HLID - too many links");
4680 /* use the previous security seq, if this is a recovery/resume */
4681 wl->links[wl_sta->hlid].total_freed_pkts = wl_sta->total_freed_pkts;
4683 set_bit(wl_sta->hlid, wlvif->ap.sta_hlid_map);
4684 memcpy(wl->links[wl_sta->hlid].addr, sta->addr, ETH_ALEN);
4685 wl->active_sta_count++;
4689 void wl1271_free_sta(struct wl1271 *wl, struct wl12xx_vif *wlvif, u8 hlid)
4691 struct wl1271_station *wl_sta;
4692 struct ieee80211_sta *sta;
4693 struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif);
4695 if (!test_bit(hlid, wlvif->ap.sta_hlid_map))
4698 clear_bit(hlid, wlvif->ap.sta_hlid_map);
4699 __clear_bit(hlid, &wl->ap_ps_map);
4700 __clear_bit(hlid, (unsigned long *)&wl->ap_fw_ps_map);
4703 * save the last used PN in the private part of iee80211_sta,
4704 * in case of recovery/suspend
4707 sta = ieee80211_find_sta(vif, wl->links[hlid].addr);
4709 wl_sta = (void *)sta->drv_priv;
4710 wl_sta->total_freed_pkts = wl->links[hlid].total_freed_pkts;
4713 * increment the initial seq number on recovery to account for
4714 * transmitted packets that we haven't yet got in the FW status
4716 if (test_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags))
4717 wl_sta->total_freed_pkts +=
4718 WL1271_TX_SQN_POST_RECOVERY_PADDING;
4722 wl12xx_free_link(wl, wlvif, &hlid);
4723 wl->active_sta_count--;
4726 * rearm the tx watchdog when the last STA is freed - give the FW a
4727 * chance to return STA-buffered packets before complaining.
4729 if (wl->active_sta_count == 0)
4730 wl12xx_rearm_tx_watchdog_locked(wl);
4733 static int wl12xx_sta_add(struct wl1271 *wl,
4734 struct wl12xx_vif *wlvif,
4735 struct ieee80211_sta *sta)
4737 struct wl1271_station *wl_sta;
4741 wl1271_debug(DEBUG_MAC80211, "mac80211 add sta %d", (int)sta->aid);
4743 ret = wl1271_allocate_sta(wl, wlvif, sta);
4747 wl_sta = (struct wl1271_station *)sta->drv_priv;
4748 hlid = wl_sta->hlid;
4750 ret = wl12xx_cmd_add_peer(wl, wlvif, sta, hlid);
4752 wl1271_free_sta(wl, wlvif, hlid);
4757 static int wl12xx_sta_remove(struct wl1271 *wl,
4758 struct wl12xx_vif *wlvif,
4759 struct ieee80211_sta *sta)
4761 struct wl1271_station *wl_sta;
4764 wl1271_debug(DEBUG_MAC80211, "mac80211 remove sta %d", (int)sta->aid);
4766 wl_sta = (struct wl1271_station *)sta->drv_priv;
4768 if (WARN_ON(!test_bit(id, wlvif->ap.sta_hlid_map)))
4771 ret = wl12xx_cmd_remove_peer(wl, wlvif, wl_sta->hlid);
4775 wl1271_free_sta(wl, wlvif, wl_sta->hlid);
4779 static void wlcore_roc_if_possible(struct wl1271 *wl,
4780 struct wl12xx_vif *wlvif)
4782 if (find_first_bit(wl->roc_map,
4783 WL12XX_MAX_ROLES) < WL12XX_MAX_ROLES)
4786 if (WARN_ON(wlvif->role_id == WL12XX_INVALID_ROLE_ID))
4789 wl12xx_roc(wl, wlvif, wlvif->role_id, wlvif->band, wlvif->channel);
4793 * when wl_sta is NULL, we treat this call as if coming from a
4794 * pending auth reply.
4795 * wl->mutex must be taken and the FW must be awake when the call
4798 void wlcore_update_inconn_sta(struct wl1271 *wl, struct wl12xx_vif *wlvif,
4799 struct wl1271_station *wl_sta, bool in_conn)
4802 if (WARN_ON(wl_sta && wl_sta->in_connection))
4805 if (!wlvif->ap_pending_auth_reply &&
4806 !wlvif->inconn_count)
4807 wlcore_roc_if_possible(wl, wlvif);
4810 wl_sta->in_connection = true;
4811 wlvif->inconn_count++;
4813 wlvif->ap_pending_auth_reply = true;
4816 if (wl_sta && !wl_sta->in_connection)
4819 if (WARN_ON(!wl_sta && !wlvif->ap_pending_auth_reply))
4822 if (WARN_ON(wl_sta && !wlvif->inconn_count))
4826 wl_sta->in_connection = false;
4827 wlvif->inconn_count--;
4829 wlvif->ap_pending_auth_reply = false;
4832 if (!wlvif->inconn_count && !wlvif->ap_pending_auth_reply &&
4833 test_bit(wlvif->role_id, wl->roc_map))
4834 wl12xx_croc(wl, wlvif->role_id);
4838 static int wl12xx_update_sta_state(struct wl1271 *wl,
4839 struct wl12xx_vif *wlvif,
4840 struct ieee80211_sta *sta,
4841 enum ieee80211_sta_state old_state,
4842 enum ieee80211_sta_state new_state)
4844 struct wl1271_station *wl_sta;
4845 bool is_ap = wlvif->bss_type == BSS_TYPE_AP_BSS;
4846 bool is_sta = wlvif->bss_type == BSS_TYPE_STA_BSS;
4849 wl_sta = (struct wl1271_station *)sta->drv_priv;
4851 /* Add station (AP mode) */
4853 old_state == IEEE80211_STA_NOTEXIST &&
4854 new_state == IEEE80211_STA_NONE) {
4855 ret = wl12xx_sta_add(wl, wlvif, sta);
4859 wlcore_update_inconn_sta(wl, wlvif, wl_sta, true);
4862 /* Remove station (AP mode) */
4864 old_state == IEEE80211_STA_NONE &&
4865 new_state == IEEE80211_STA_NOTEXIST) {
4867 wl12xx_sta_remove(wl, wlvif, sta);
4869 wlcore_update_inconn_sta(wl, wlvif, wl_sta, false);
4872 /* Authorize station (AP mode) */
4874 new_state == IEEE80211_STA_AUTHORIZED) {
4875 ret = wl12xx_cmd_set_peer_state(wl, wlvif, wl_sta->hlid);
4879 ret = wl1271_acx_set_ht_capabilities(wl, &sta->ht_cap, true,
4884 wlcore_update_inconn_sta(wl, wlvif, wl_sta, false);
4887 /* Authorize station */
4889 new_state == IEEE80211_STA_AUTHORIZED) {
4890 set_bit(WLVIF_FLAG_STA_AUTHORIZED, &wlvif->flags);
4891 ret = wl12xx_set_authorized(wl, wlvif);
4897 old_state == IEEE80211_STA_AUTHORIZED &&
4898 new_state == IEEE80211_STA_ASSOC) {
4899 clear_bit(WLVIF_FLAG_STA_AUTHORIZED, &wlvif->flags);
4900 clear_bit(WLVIF_FLAG_STA_STATE_SENT, &wlvif->flags);
4903 /* clear ROCs on failure or authorization */
4905 (new_state == IEEE80211_STA_AUTHORIZED ||
4906 new_state == IEEE80211_STA_NOTEXIST)) {
4907 if (test_bit(wlvif->role_id, wl->roc_map))
4908 wl12xx_croc(wl, wlvif->role_id);
4912 old_state == IEEE80211_STA_NOTEXIST &&
4913 new_state == IEEE80211_STA_NONE) {
4914 if (find_first_bit(wl->roc_map,
4915 WL12XX_MAX_ROLES) >= WL12XX_MAX_ROLES) {
4916 WARN_ON(wlvif->role_id == WL12XX_INVALID_ROLE_ID);
4917 wl12xx_roc(wl, wlvif, wlvif->role_id,
4918 wlvif->band, wlvif->channel);
4924 static int wl12xx_op_sta_state(struct ieee80211_hw *hw,
4925 struct ieee80211_vif *vif,
4926 struct ieee80211_sta *sta,
4927 enum ieee80211_sta_state old_state,
4928 enum ieee80211_sta_state new_state)
4930 struct wl1271 *wl = hw->priv;
4931 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4934 wl1271_debug(DEBUG_MAC80211, "mac80211 sta %d state=%d->%d",
4935 sta->aid, old_state, new_state);
4937 mutex_lock(&wl->mutex);
4939 if (unlikely(wl->state != WLCORE_STATE_ON)) {
4944 ret = wl1271_ps_elp_wakeup(wl);
4948 ret = wl12xx_update_sta_state(wl, wlvif, sta, old_state, new_state);
4950 wl1271_ps_elp_sleep(wl);
4952 mutex_unlock(&wl->mutex);
4953 if (new_state < old_state)
4958 static int wl1271_op_ampdu_action(struct ieee80211_hw *hw,
4959 struct ieee80211_vif *vif,
4960 enum ieee80211_ampdu_mlme_action action,
4961 struct ieee80211_sta *sta, u16 tid, u16 *ssn,
4964 struct wl1271 *wl = hw->priv;
4965 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4967 u8 hlid, *ba_bitmap;
4969 wl1271_debug(DEBUG_MAC80211, "mac80211 ampdu action %d tid %d", action,
4972 /* sanity check - the fields in FW are only 8bits wide */
4973 if (WARN_ON(tid > 0xFF))
4976 mutex_lock(&wl->mutex);
4978 if (unlikely(wl->state != WLCORE_STATE_ON)) {
4983 if (wlvif->bss_type == BSS_TYPE_STA_BSS) {
4984 hlid = wlvif->sta.hlid;
4985 } else if (wlvif->bss_type == BSS_TYPE_AP_BSS) {
4986 struct wl1271_station *wl_sta;
4988 wl_sta = (struct wl1271_station *)sta->drv_priv;
4989 hlid = wl_sta->hlid;
4995 ba_bitmap = &wl->links[hlid].ba_bitmap;
4997 ret = wl1271_ps_elp_wakeup(wl);
5001 wl1271_debug(DEBUG_MAC80211, "mac80211 ampdu: Rx tid %d action %d",
5005 case IEEE80211_AMPDU_RX_START:
5006 if (!wlvif->ba_support || !wlvif->ba_allowed) {
5011 if (wl->ba_rx_session_count >= wl->ba_rx_session_count_max) {
5013 wl1271_error("exceeded max RX BA sessions");
5017 if (*ba_bitmap & BIT(tid)) {
5019 wl1271_error("cannot enable RX BA session on active "
5024 ret = wl12xx_acx_set_ba_receiver_session(wl, tid, *ssn, true,
5027 *ba_bitmap |= BIT(tid);
5028 wl->ba_rx_session_count++;
5032 case IEEE80211_AMPDU_RX_STOP:
5033 if (!(*ba_bitmap & BIT(tid))) {
5035 * this happens on reconfig - so only output a debug
5036 * message for now, and don't fail the function.
5038 wl1271_debug(DEBUG_MAC80211,
5039 "no active RX BA session on tid: %d",
5045 ret = wl12xx_acx_set_ba_receiver_session(wl, tid, 0, false,
5048 *ba_bitmap &= ~BIT(tid);
5049 wl->ba_rx_session_count--;
5054 * The BA initiator session management in FW independently.
5055 * Falling break here on purpose for all TX APDU commands.
5057 case IEEE80211_AMPDU_TX_START:
5058 case IEEE80211_AMPDU_TX_STOP_CONT:
5059 case IEEE80211_AMPDU_TX_STOP_FLUSH:
5060 case IEEE80211_AMPDU_TX_STOP_FLUSH_CONT:
5061 case IEEE80211_AMPDU_TX_OPERATIONAL:
5066 wl1271_error("Incorrect ampdu action id=%x\n", action);
5070 wl1271_ps_elp_sleep(wl);
5073 mutex_unlock(&wl->mutex);
5078 static int wl12xx_set_bitrate_mask(struct ieee80211_hw *hw,
5079 struct ieee80211_vif *vif,
5080 const struct cfg80211_bitrate_mask *mask)
5082 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
5083 struct wl1271 *wl = hw->priv;
5086 wl1271_debug(DEBUG_MAC80211, "mac80211 set_bitrate_mask 0x%x 0x%x",
5087 mask->control[NL80211_BAND_2GHZ].legacy,
5088 mask->control[NL80211_BAND_5GHZ].legacy);
5090 mutex_lock(&wl->mutex);
5092 for (i = 0; i < WLCORE_NUM_BANDS; i++)
5093 wlvif->bitrate_masks[i] =
5094 wl1271_tx_enabled_rates_get(wl,
5095 mask->control[i].legacy,
5098 if (unlikely(wl->state != WLCORE_STATE_ON))
5101 if (wlvif->bss_type == BSS_TYPE_STA_BSS &&
5102 !test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags)) {
5104 ret = wl1271_ps_elp_wakeup(wl);
5108 wl1271_set_band_rate(wl, wlvif);
5110 wl1271_tx_min_rate_get(wl, wlvif->basic_rate_set);
5111 ret = wl1271_acx_sta_rate_policies(wl, wlvif);
5113 wl1271_ps_elp_sleep(wl);
5116 mutex_unlock(&wl->mutex);
5121 static void wl12xx_op_channel_switch(struct ieee80211_hw *hw,
5122 struct ieee80211_channel_switch *ch_switch)
5124 struct wl1271 *wl = hw->priv;
5125 struct wl12xx_vif *wlvif;
5128 wl1271_debug(DEBUG_MAC80211, "mac80211 channel switch");
5130 wl1271_tx_flush(wl);
5132 mutex_lock(&wl->mutex);
5134 if (unlikely(wl->state == WLCORE_STATE_OFF)) {
5135 wl12xx_for_each_wlvif_sta(wl, wlvif) {
5136 struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif);
5137 ieee80211_chswitch_done(vif, false);
5140 } else if (unlikely(wl->state != WLCORE_STATE_ON)) {
5144 ret = wl1271_ps_elp_wakeup(wl);
5148 /* TODO: change mac80211 to pass vif as param */
5149 wl12xx_for_each_wlvif_sta(wl, wlvif) {
5150 unsigned long delay_usec;
5152 ret = wl->ops->channel_switch(wl, wlvif, ch_switch);
5156 set_bit(WLVIF_FLAG_CS_PROGRESS, &wlvif->flags);
5158 /* indicate failure 5 seconds after channel switch time */
5159 delay_usec = ieee80211_tu_to_usec(wlvif->beacon_int) *
5161 ieee80211_queue_delayed_work(hw, &wlvif->channel_switch_work,
5162 usecs_to_jiffies(delay_usec) +
5163 msecs_to_jiffies(5000));
5167 wl1271_ps_elp_sleep(wl);
5170 mutex_unlock(&wl->mutex);
5173 static void wlcore_op_flush(struct ieee80211_hw *hw, u32 queues, bool drop)
5175 struct wl1271 *wl = hw->priv;
5177 wl1271_tx_flush(wl);
5180 static int wlcore_op_remain_on_channel(struct ieee80211_hw *hw,
5181 struct ieee80211_vif *vif,
5182 struct ieee80211_channel *chan,
5184 enum ieee80211_roc_type type)
5186 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
5187 struct wl1271 *wl = hw->priv;
5188 int channel, ret = 0;
5190 channel = ieee80211_frequency_to_channel(chan->center_freq);
5192 wl1271_debug(DEBUG_MAC80211, "mac80211 roc %d (%d)",
5193 channel, wlvif->role_id);
5195 mutex_lock(&wl->mutex);
5197 if (unlikely(wl->state != WLCORE_STATE_ON))
5200 /* return EBUSY if we can't ROC right now */
5201 if (WARN_ON(wl->roc_vif ||
5202 find_first_bit(wl->roc_map,
5203 WL12XX_MAX_ROLES) < WL12XX_MAX_ROLES)) {
5208 ret = wl1271_ps_elp_wakeup(wl);
5212 ret = wl12xx_start_dev(wl, wlvif, chan->band, channel);
5217 ieee80211_queue_delayed_work(hw, &wl->roc_complete_work,
5218 msecs_to_jiffies(duration));
5220 wl1271_ps_elp_sleep(wl);
5222 mutex_unlock(&wl->mutex);
5226 static int __wlcore_roc_completed(struct wl1271 *wl)
5228 struct wl12xx_vif *wlvif;
5231 /* already completed */
5232 if (unlikely(!wl->roc_vif))
5235 wlvif = wl12xx_vif_to_data(wl->roc_vif);
5237 if (!test_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags))
5240 ret = wl12xx_stop_dev(wl, wlvif);
5249 static int wlcore_roc_completed(struct wl1271 *wl)
5253 wl1271_debug(DEBUG_MAC80211, "roc complete");
5255 mutex_lock(&wl->mutex);
5257 if (unlikely(wl->state != WLCORE_STATE_ON)) {
5262 ret = wl1271_ps_elp_wakeup(wl);
5266 ret = __wlcore_roc_completed(wl);
5268 wl1271_ps_elp_sleep(wl);
5270 mutex_unlock(&wl->mutex);
5275 static void wlcore_roc_complete_work(struct work_struct *work)
5277 struct delayed_work *dwork;
5281 dwork = container_of(work, struct delayed_work, work);
5282 wl = container_of(dwork, struct wl1271, roc_complete_work);
5284 ret = wlcore_roc_completed(wl);
5286 ieee80211_remain_on_channel_expired(wl->hw);
5289 static int wlcore_op_cancel_remain_on_channel(struct ieee80211_hw *hw)
5291 struct wl1271 *wl = hw->priv;
5293 wl1271_debug(DEBUG_MAC80211, "mac80211 croc");
5296 wl1271_tx_flush(wl);
5299 * we can't just flush_work here, because it might deadlock
5300 * (as we might get called from the same workqueue)
5302 cancel_delayed_work_sync(&wl->roc_complete_work);
5303 wlcore_roc_completed(wl);
5308 static void wlcore_op_sta_rc_update(struct ieee80211_hw *hw,
5309 struct ieee80211_vif *vif,
5310 struct ieee80211_sta *sta,
5313 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
5314 struct wl1271 *wl = hw->priv;
5316 wlcore_hw_sta_rc_update(wl, wlvif, sta, changed);
5319 static int wlcore_op_get_rssi(struct ieee80211_hw *hw,
5320 struct ieee80211_vif *vif,
5321 struct ieee80211_sta *sta,
5324 struct wl1271 *wl = hw->priv;
5325 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
5328 wl1271_debug(DEBUG_MAC80211, "mac80211 get_rssi");
5330 mutex_lock(&wl->mutex);
5332 if (unlikely(wl->state != WLCORE_STATE_ON))
5335 ret = wl1271_ps_elp_wakeup(wl);
5339 ret = wlcore_acx_average_rssi(wl, wlvif, rssi_dbm);
5344 wl1271_ps_elp_sleep(wl);
5347 mutex_unlock(&wl->mutex);
5352 static bool wl1271_tx_frames_pending(struct ieee80211_hw *hw)
5354 struct wl1271 *wl = hw->priv;
5357 mutex_lock(&wl->mutex);
5359 if (unlikely(wl->state != WLCORE_STATE_ON))
5362 /* packets are considered pending if in the TX queue or the FW */
5363 ret = (wl1271_tx_total_queue_count(wl) > 0) || (wl->tx_frames_cnt > 0);
5365 mutex_unlock(&wl->mutex);
5370 /* can't be const, mac80211 writes to this */
5371 static struct ieee80211_rate wl1271_rates[] = {
5373 .hw_value = CONF_HW_BIT_RATE_1MBPS,
5374 .hw_value_short = CONF_HW_BIT_RATE_1MBPS, },
5376 .hw_value = CONF_HW_BIT_RATE_2MBPS,
5377 .hw_value_short = CONF_HW_BIT_RATE_2MBPS,
5378 .flags = IEEE80211_RATE_SHORT_PREAMBLE },
5380 .hw_value = CONF_HW_BIT_RATE_5_5MBPS,
5381 .hw_value_short = CONF_HW_BIT_RATE_5_5MBPS,
5382 .flags = IEEE80211_RATE_SHORT_PREAMBLE },
5384 .hw_value = CONF_HW_BIT_RATE_11MBPS,
5385 .hw_value_short = CONF_HW_BIT_RATE_11MBPS,
5386 .flags = IEEE80211_RATE_SHORT_PREAMBLE },
5388 .hw_value = CONF_HW_BIT_RATE_6MBPS,
5389 .hw_value_short = CONF_HW_BIT_RATE_6MBPS, },
5391 .hw_value = CONF_HW_BIT_RATE_9MBPS,
5392 .hw_value_short = CONF_HW_BIT_RATE_9MBPS, },
5394 .hw_value = CONF_HW_BIT_RATE_12MBPS,
5395 .hw_value_short = CONF_HW_BIT_RATE_12MBPS, },
5397 .hw_value = CONF_HW_BIT_RATE_18MBPS,
5398 .hw_value_short = CONF_HW_BIT_RATE_18MBPS, },
5400 .hw_value = CONF_HW_BIT_RATE_24MBPS,
5401 .hw_value_short = CONF_HW_BIT_RATE_24MBPS, },
5403 .hw_value = CONF_HW_BIT_RATE_36MBPS,
5404 .hw_value_short = CONF_HW_BIT_RATE_36MBPS, },
5406 .hw_value = CONF_HW_BIT_RATE_48MBPS,
5407 .hw_value_short = CONF_HW_BIT_RATE_48MBPS, },
5409 .hw_value = CONF_HW_BIT_RATE_54MBPS,
5410 .hw_value_short = CONF_HW_BIT_RATE_54MBPS, },
5413 /* can't be const, mac80211 writes to this */
5414 static struct ieee80211_channel wl1271_channels[] = {
5415 { .hw_value = 1, .center_freq = 2412, .max_power = WLCORE_MAX_TXPWR },
5416 { .hw_value = 2, .center_freq = 2417, .max_power = WLCORE_MAX_TXPWR },
5417 { .hw_value = 3, .center_freq = 2422, .max_power = WLCORE_MAX_TXPWR },
5418 { .hw_value = 4, .center_freq = 2427, .max_power = WLCORE_MAX_TXPWR },
5419 { .hw_value = 5, .center_freq = 2432, .max_power = WLCORE_MAX_TXPWR },
5420 { .hw_value = 6, .center_freq = 2437, .max_power = WLCORE_MAX_TXPWR },
5421 { .hw_value = 7, .center_freq = 2442, .max_power = WLCORE_MAX_TXPWR },
5422 { .hw_value = 8, .center_freq = 2447, .max_power = WLCORE_MAX_TXPWR },
5423 { .hw_value = 9, .center_freq = 2452, .max_power = WLCORE_MAX_TXPWR },
5424 { .hw_value = 10, .center_freq = 2457, .max_power = WLCORE_MAX_TXPWR },
5425 { .hw_value = 11, .center_freq = 2462, .max_power = WLCORE_MAX_TXPWR },
5426 { .hw_value = 12, .center_freq = 2467, .max_power = WLCORE_MAX_TXPWR },
5427 { .hw_value = 13, .center_freq = 2472, .max_power = WLCORE_MAX_TXPWR },
5428 { .hw_value = 14, .center_freq = 2484, .max_power = WLCORE_MAX_TXPWR },
5431 /* can't be const, mac80211 writes to this */
5432 static struct ieee80211_supported_band wl1271_band_2ghz = {
5433 .channels = wl1271_channels,
5434 .n_channels = ARRAY_SIZE(wl1271_channels),
5435 .bitrates = wl1271_rates,
5436 .n_bitrates = ARRAY_SIZE(wl1271_rates),
5439 /* 5 GHz data rates for WL1273 */
5440 static struct ieee80211_rate wl1271_rates_5ghz[] = {
5442 .hw_value = CONF_HW_BIT_RATE_6MBPS,
5443 .hw_value_short = CONF_HW_BIT_RATE_6MBPS, },
5445 .hw_value = CONF_HW_BIT_RATE_9MBPS,
5446 .hw_value_short = CONF_HW_BIT_RATE_9MBPS, },
5448 .hw_value = CONF_HW_BIT_RATE_12MBPS,
5449 .hw_value_short = CONF_HW_BIT_RATE_12MBPS, },
5451 .hw_value = CONF_HW_BIT_RATE_18MBPS,
5452 .hw_value_short = CONF_HW_BIT_RATE_18MBPS, },
5454 .hw_value = CONF_HW_BIT_RATE_24MBPS,
5455 .hw_value_short = CONF_HW_BIT_RATE_24MBPS, },
5457 .hw_value = CONF_HW_BIT_RATE_36MBPS,
5458 .hw_value_short = CONF_HW_BIT_RATE_36MBPS, },
5460 .hw_value = CONF_HW_BIT_RATE_48MBPS,
5461 .hw_value_short = CONF_HW_BIT_RATE_48MBPS, },
5463 .hw_value = CONF_HW_BIT_RATE_54MBPS,
5464 .hw_value_short = CONF_HW_BIT_RATE_54MBPS, },
5467 /* 5 GHz band channels for WL1273 */
5468 static struct ieee80211_channel wl1271_channels_5ghz[] = {
5469 { .hw_value = 8, .center_freq = 5040, .max_power = WLCORE_MAX_TXPWR },
5470 { .hw_value = 12, .center_freq = 5060, .max_power = WLCORE_MAX_TXPWR },
5471 { .hw_value = 16, .center_freq = 5080, .max_power = WLCORE_MAX_TXPWR },
5472 { .hw_value = 34, .center_freq = 5170, .max_power = WLCORE_MAX_TXPWR },
5473 { .hw_value = 36, .center_freq = 5180, .max_power = WLCORE_MAX_TXPWR },
5474 { .hw_value = 38, .center_freq = 5190, .max_power = WLCORE_MAX_TXPWR },
5475 { .hw_value = 40, .center_freq = 5200, .max_power = WLCORE_MAX_TXPWR },
5476 { .hw_value = 42, .center_freq = 5210, .max_power = WLCORE_MAX_TXPWR },
5477 { .hw_value = 44, .center_freq = 5220, .max_power = WLCORE_MAX_TXPWR },
5478 { .hw_value = 46, .center_freq = 5230, .max_power = WLCORE_MAX_TXPWR },
5479 { .hw_value = 48, .center_freq = 5240, .max_power = WLCORE_MAX_TXPWR },
5480 { .hw_value = 52, .center_freq = 5260, .max_power = WLCORE_MAX_TXPWR },
5481 { .hw_value = 56, .center_freq = 5280, .max_power = WLCORE_MAX_TXPWR },
5482 { .hw_value = 60, .center_freq = 5300, .max_power = WLCORE_MAX_TXPWR },
5483 { .hw_value = 64, .center_freq = 5320, .max_power = WLCORE_MAX_TXPWR },
5484 { .hw_value = 100, .center_freq = 5500, .max_power = WLCORE_MAX_TXPWR },
5485 { .hw_value = 104, .center_freq = 5520, .max_power = WLCORE_MAX_TXPWR },
5486 { .hw_value = 108, .center_freq = 5540, .max_power = WLCORE_MAX_TXPWR },
5487 { .hw_value = 112, .center_freq = 5560, .max_power = WLCORE_MAX_TXPWR },
5488 { .hw_value = 116, .center_freq = 5580, .max_power = WLCORE_MAX_TXPWR },
5489 { .hw_value = 120, .center_freq = 5600, .max_power = WLCORE_MAX_TXPWR },
5490 { .hw_value = 124, .center_freq = 5620, .max_power = WLCORE_MAX_TXPWR },
5491 { .hw_value = 128, .center_freq = 5640, .max_power = WLCORE_MAX_TXPWR },
5492 { .hw_value = 132, .center_freq = 5660, .max_power = WLCORE_MAX_TXPWR },
5493 { .hw_value = 136, .center_freq = 5680, .max_power = WLCORE_MAX_TXPWR },
5494 { .hw_value = 140, .center_freq = 5700, .max_power = WLCORE_MAX_TXPWR },
5495 { .hw_value = 149, .center_freq = 5745, .max_power = WLCORE_MAX_TXPWR },
5496 { .hw_value = 153, .center_freq = 5765, .max_power = WLCORE_MAX_TXPWR },
5497 { .hw_value = 157, .center_freq = 5785, .max_power = WLCORE_MAX_TXPWR },
5498 { .hw_value = 161, .center_freq = 5805, .max_power = WLCORE_MAX_TXPWR },
5499 { .hw_value = 165, .center_freq = 5825, .max_power = WLCORE_MAX_TXPWR },
5502 static struct ieee80211_supported_band wl1271_band_5ghz = {
5503 .channels = wl1271_channels_5ghz,
5504 .n_channels = ARRAY_SIZE(wl1271_channels_5ghz),
5505 .bitrates = wl1271_rates_5ghz,
5506 .n_bitrates = ARRAY_SIZE(wl1271_rates_5ghz),
5509 static const struct ieee80211_ops wl1271_ops = {
5510 .start = wl1271_op_start,
5511 .stop = wlcore_op_stop,
5512 .add_interface = wl1271_op_add_interface,
5513 .remove_interface = wl1271_op_remove_interface,
5514 .change_interface = wl12xx_op_change_interface,
5516 .suspend = wl1271_op_suspend,
5517 .resume = wl1271_op_resume,
5519 .config = wl1271_op_config,
5520 .prepare_multicast = wl1271_op_prepare_multicast,
5521 .configure_filter = wl1271_op_configure_filter,
5523 .set_key = wlcore_op_set_key,
5524 .hw_scan = wl1271_op_hw_scan,
5525 .cancel_hw_scan = wl1271_op_cancel_hw_scan,
5526 .sched_scan_start = wl1271_op_sched_scan_start,
5527 .sched_scan_stop = wl1271_op_sched_scan_stop,
5528 .bss_info_changed = wl1271_op_bss_info_changed,
5529 .set_frag_threshold = wl1271_op_set_frag_threshold,
5530 .set_rts_threshold = wl1271_op_set_rts_threshold,
5531 .conf_tx = wl1271_op_conf_tx,
5532 .get_tsf = wl1271_op_get_tsf,
5533 .get_survey = wl1271_op_get_survey,
5534 .sta_state = wl12xx_op_sta_state,
5535 .ampdu_action = wl1271_op_ampdu_action,
5536 .tx_frames_pending = wl1271_tx_frames_pending,
5537 .set_bitrate_mask = wl12xx_set_bitrate_mask,
5538 .set_default_unicast_key = wl1271_op_set_default_key_idx,
5539 .channel_switch = wl12xx_op_channel_switch,
5540 .flush = wlcore_op_flush,
5541 .remain_on_channel = wlcore_op_remain_on_channel,
5542 .cancel_remain_on_channel = wlcore_op_cancel_remain_on_channel,
5543 .add_chanctx = wlcore_op_add_chanctx,
5544 .remove_chanctx = wlcore_op_remove_chanctx,
5545 .change_chanctx = wlcore_op_change_chanctx,
5546 .assign_vif_chanctx = wlcore_op_assign_vif_chanctx,
5547 .unassign_vif_chanctx = wlcore_op_unassign_vif_chanctx,
5548 .sta_rc_update = wlcore_op_sta_rc_update,
5549 .get_rssi = wlcore_op_get_rssi,
5550 CFG80211_TESTMODE_CMD(wl1271_tm_cmd)
5554 u8 wlcore_rate_to_idx(struct wl1271 *wl, u8 rate, enum ieee80211_band band)
5560 if (unlikely(rate >= wl->hw_tx_rate_tbl_size)) {
5561 wl1271_error("Illegal RX rate from HW: %d", rate);
5565 idx = wl->band_rate_to_idx[band][rate];
5566 if (unlikely(idx == CONF_HW_RXTX_RATE_UNSUPPORTED)) {
5567 wl1271_error("Unsupported RX rate from HW: %d", rate);
5574 static void wl12xx_derive_mac_addresses(struct wl1271 *wl, u32 oui, u32 nic)
5578 wl1271_debug(DEBUG_PROBE, "base address: oui %06x nic %06x",
5581 if (nic + WLCORE_NUM_MAC_ADDRESSES - wl->num_mac_addr > 0xffffff)
5582 wl1271_warning("NIC part of the MAC address wraps around!");
5584 for (i = 0; i < wl->num_mac_addr; i++) {
5585 wl->addresses[i].addr[0] = (u8)(oui >> 16);
5586 wl->addresses[i].addr[1] = (u8)(oui >> 8);
5587 wl->addresses[i].addr[2] = (u8) oui;
5588 wl->addresses[i].addr[3] = (u8)(nic >> 16);
5589 wl->addresses[i].addr[4] = (u8)(nic >> 8);
5590 wl->addresses[i].addr[5] = (u8) nic;
5594 /* we may be one address short at the most */
5595 WARN_ON(wl->num_mac_addr + 1 < WLCORE_NUM_MAC_ADDRESSES);
5598 * turn on the LAA bit in the first address and use it as
5601 if (wl->num_mac_addr < WLCORE_NUM_MAC_ADDRESSES) {
5602 int idx = WLCORE_NUM_MAC_ADDRESSES - 1;
5603 memcpy(&wl->addresses[idx], &wl->addresses[0],
5604 sizeof(wl->addresses[0]));
5606 wl->addresses[idx].addr[2] |= BIT(1);
5609 wl->hw->wiphy->n_addresses = WLCORE_NUM_MAC_ADDRESSES;
5610 wl->hw->wiphy->addresses = wl->addresses;
5613 static int wl12xx_get_hw_info(struct wl1271 *wl)
5617 ret = wl12xx_set_power_on(wl);
5621 ret = wlcore_read_reg(wl, REG_CHIP_ID_B, &wl->chip.id);
5625 wl->fuse_oui_addr = 0;
5626 wl->fuse_nic_addr = 0;
5628 ret = wl->ops->get_pg_ver(wl, &wl->hw_pg_ver);
5632 if (wl->ops->get_mac)
5633 ret = wl->ops->get_mac(wl);
5636 wl1271_power_off(wl);
5640 static int wl1271_register_hw(struct wl1271 *wl)
5643 u32 oui_addr = 0, nic_addr = 0;
5645 if (wl->mac80211_registered)
5648 if (wl->nvs_len >= 12) {
5649 /* NOTE: The wl->nvs->nvs element must be first, in
5650 * order to simplify the casting, we assume it is at
5651 * the beginning of the wl->nvs structure.
5653 u8 *nvs_ptr = (u8 *)wl->nvs;
5656 (nvs_ptr[11] << 16) + (nvs_ptr[10] << 8) + nvs_ptr[6];
5658 (nvs_ptr[5] << 16) + (nvs_ptr[4] << 8) + nvs_ptr[3];
5661 /* if the MAC address is zeroed in the NVS derive from fuse */
5662 if (oui_addr == 0 && nic_addr == 0) {
5663 oui_addr = wl->fuse_oui_addr;
5664 /* fuse has the BD_ADDR, the WLAN addresses are the next two */
5665 nic_addr = wl->fuse_nic_addr + 1;
5668 wl12xx_derive_mac_addresses(wl, oui_addr, nic_addr);
5670 ret = ieee80211_register_hw(wl->hw);
5672 wl1271_error("unable to register mac80211 hw: %d", ret);
5676 wl->mac80211_registered = true;
5678 wl1271_debugfs_init(wl);
5680 wl1271_notice("loaded");
5686 static void wl1271_unregister_hw(struct wl1271 *wl)
5689 wl1271_plt_stop(wl);
5691 ieee80211_unregister_hw(wl->hw);
5692 wl->mac80211_registered = false;
5696 static int wl1271_init_ieee80211(struct wl1271 *wl)
5699 static const u32 cipher_suites[] = {
5700 WLAN_CIPHER_SUITE_WEP40,
5701 WLAN_CIPHER_SUITE_WEP104,
5702 WLAN_CIPHER_SUITE_TKIP,
5703 WLAN_CIPHER_SUITE_CCMP,
5704 WL1271_CIPHER_SUITE_GEM,
5707 /* The tx descriptor buffer */
5708 wl->hw->extra_tx_headroom = sizeof(struct wl1271_tx_hw_descr);
5710 if (wl->quirks & WLCORE_QUIRK_TKIP_HEADER_SPACE)
5711 wl->hw->extra_tx_headroom += WL1271_EXTRA_SPACE_TKIP;
5714 /* FIXME: find a proper value */
5715 wl->hw->max_listen_interval = wl->conf.conn.max_listen_interval;
5717 wl->hw->flags = IEEE80211_HW_SIGNAL_DBM |
5718 IEEE80211_HW_SUPPORTS_PS |
5719 IEEE80211_HW_SUPPORTS_DYNAMIC_PS |
5720 IEEE80211_HW_SUPPORTS_UAPSD |
5721 IEEE80211_HW_HAS_RATE_CONTROL |
5722 IEEE80211_HW_CONNECTION_MONITOR |
5723 IEEE80211_HW_REPORTS_TX_ACK_STATUS |
5724 IEEE80211_HW_SPECTRUM_MGMT |
5725 IEEE80211_HW_AP_LINK_PS |
5726 IEEE80211_HW_AMPDU_AGGREGATION |
5727 IEEE80211_HW_TX_AMPDU_SETUP_IN_HW |
5728 IEEE80211_HW_QUEUE_CONTROL;
5730 wl->hw->wiphy->cipher_suites = cipher_suites;
5731 wl->hw->wiphy->n_cipher_suites = ARRAY_SIZE(cipher_suites);
5733 wl->hw->wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION) |
5734 BIT(NL80211_IFTYPE_ADHOC) | BIT(NL80211_IFTYPE_AP) |
5735 BIT(NL80211_IFTYPE_P2P_CLIENT) | BIT(NL80211_IFTYPE_P2P_GO);
5736 wl->hw->wiphy->max_scan_ssids = 1;
5737 wl->hw->wiphy->max_sched_scan_ssids = 16;
5738 wl->hw->wiphy->max_match_sets = 16;
5740 * Maximum length of elements in scanning probe request templates
5741 * should be the maximum length possible for a template, without
5742 * the IEEE80211 header of the template
5744 wl->hw->wiphy->max_scan_ie_len = WL1271_CMD_TEMPL_MAX_SIZE -
5745 sizeof(struct ieee80211_header);
5747 wl->hw->wiphy->max_sched_scan_ie_len = WL1271_CMD_TEMPL_MAX_SIZE -
5748 sizeof(struct ieee80211_header);
5750 wl->hw->wiphy->max_remain_on_channel_duration = 5000;
5752 wl->hw->wiphy->flags |= WIPHY_FLAG_AP_UAPSD |
5753 WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL |
5754 WIPHY_FLAG_SUPPORTS_SCHED_SCAN;
5756 /* make sure all our channels fit in the scanned_ch bitmask */
5757 BUILD_BUG_ON(ARRAY_SIZE(wl1271_channels) +
5758 ARRAY_SIZE(wl1271_channels_5ghz) >
5759 WL1271_MAX_CHANNELS);
5761 * clear channel flags from the previous usage
5762 * and restore max_power & max_antenna_gain values.
5764 for (i = 0; i < ARRAY_SIZE(wl1271_channels); i++) {
5765 wl1271_band_2ghz.channels[i].flags = 0;
5766 wl1271_band_2ghz.channels[i].max_power = WLCORE_MAX_TXPWR;
5767 wl1271_band_2ghz.channels[i].max_antenna_gain = 0;
5770 for (i = 0; i < ARRAY_SIZE(wl1271_channels_5ghz); i++) {
5771 wl1271_band_5ghz.channels[i].flags = 0;
5772 wl1271_band_5ghz.channels[i].max_power = WLCORE_MAX_TXPWR;
5773 wl1271_band_5ghz.channels[i].max_antenna_gain = 0;
5777 * We keep local copies of the band structs because we need to
5778 * modify them on a per-device basis.
5780 memcpy(&wl->bands[IEEE80211_BAND_2GHZ], &wl1271_band_2ghz,
5781 sizeof(wl1271_band_2ghz));
5782 memcpy(&wl->bands[IEEE80211_BAND_2GHZ].ht_cap,
5783 &wl->ht_cap[IEEE80211_BAND_2GHZ],
5784 sizeof(*wl->ht_cap));
5785 memcpy(&wl->bands[IEEE80211_BAND_5GHZ], &wl1271_band_5ghz,
5786 sizeof(wl1271_band_5ghz));
5787 memcpy(&wl->bands[IEEE80211_BAND_5GHZ].ht_cap,
5788 &wl->ht_cap[IEEE80211_BAND_5GHZ],
5789 sizeof(*wl->ht_cap));
5791 wl->hw->wiphy->bands[IEEE80211_BAND_2GHZ] =
5792 &wl->bands[IEEE80211_BAND_2GHZ];
5793 wl->hw->wiphy->bands[IEEE80211_BAND_5GHZ] =
5794 &wl->bands[IEEE80211_BAND_5GHZ];
5797 * allow 4 queues per mac address we support +
5798 * 1 cab queue per mac + one global offchannel Tx queue
5800 wl->hw->queues = (NUM_TX_QUEUES + 1) * WLCORE_NUM_MAC_ADDRESSES + 1;
5802 /* the last queue is the offchannel queue */
5803 wl->hw->offchannel_tx_hw_queue = wl->hw->queues - 1;
5804 wl->hw->max_rates = 1;
5806 wl->hw->wiphy->reg_notifier = wl1271_reg_notify;
5808 /* the FW answers probe-requests in AP-mode */
5809 wl->hw->wiphy->flags |= WIPHY_FLAG_AP_PROBE_RESP_OFFLOAD;
5810 wl->hw->wiphy->probe_resp_offload =
5811 NL80211_PROBE_RESP_OFFLOAD_SUPPORT_WPS |
5812 NL80211_PROBE_RESP_OFFLOAD_SUPPORT_WPS2 |
5813 NL80211_PROBE_RESP_OFFLOAD_SUPPORT_P2P;
5815 /* allowed interface combinations */
5816 wl->hw->wiphy->iface_combinations = wl->iface_combinations;
5817 wl->hw->wiphy->n_iface_combinations = wl->n_iface_combinations;
5819 SET_IEEE80211_DEV(wl->hw, wl->dev);
5821 wl->hw->sta_data_size = sizeof(struct wl1271_station);
5822 wl->hw->vif_data_size = sizeof(struct wl12xx_vif);
5824 wl->hw->max_rx_aggregation_subframes = wl->conf.ht.rx_ba_win_size;
5829 struct ieee80211_hw *wlcore_alloc_hw(size_t priv_size, u32 aggr_buf_size,
5832 struct ieee80211_hw *hw;
5837 hw = ieee80211_alloc_hw(sizeof(*wl), &wl1271_ops);
5839 wl1271_error("could not alloc ieee80211_hw");
5845 memset(wl, 0, sizeof(*wl));
5847 wl->priv = kzalloc(priv_size, GFP_KERNEL);
5849 wl1271_error("could not alloc wl priv");
5851 goto err_priv_alloc;
5854 INIT_LIST_HEAD(&wl->wlvif_list);
5859 * wl->num_links is not configured yet, so just use WLCORE_MAX_LINKS.
5860 * we don't allocate any additional resource here, so that's fine.
5862 for (i = 0; i < NUM_TX_QUEUES; i++)
5863 for (j = 0; j < WLCORE_MAX_LINKS; j++)
5864 skb_queue_head_init(&wl->links[j].tx_queue[i]);
5866 skb_queue_head_init(&wl->deferred_rx_queue);
5867 skb_queue_head_init(&wl->deferred_tx_queue);
5869 INIT_DELAYED_WORK(&wl->elp_work, wl1271_elp_work);
5870 INIT_WORK(&wl->netstack_work, wl1271_netstack_work);
5871 INIT_WORK(&wl->tx_work, wl1271_tx_work);
5872 INIT_WORK(&wl->recovery_work, wl1271_recovery_work);
5873 INIT_DELAYED_WORK(&wl->scan_complete_work, wl1271_scan_complete_work);
5874 INIT_DELAYED_WORK(&wl->roc_complete_work, wlcore_roc_complete_work);
5875 INIT_DELAYED_WORK(&wl->tx_watchdog_work, wl12xx_tx_watchdog_work);
5877 wl->freezable_wq = create_freezable_workqueue("wl12xx_wq");
5878 if (!wl->freezable_wq) {
5885 wl->power_level = WL1271_DEFAULT_POWER_LEVEL;
5886 wl->band = IEEE80211_BAND_2GHZ;
5887 wl->channel_type = NL80211_CHAN_NO_HT;
5889 wl->sg_enabled = true;
5890 wl->sleep_auth = WL1271_PSM_ILLEGAL;
5891 wl->recovery_count = 0;
5894 wl->ap_fw_ps_map = 0;
5896 wl->platform_quirks = 0;
5897 wl->system_hlid = WL12XX_SYSTEM_HLID;
5898 wl->active_sta_count = 0;
5899 wl->active_link_count = 0;
5901 init_waitqueue_head(&wl->fwlog_waitq);
5903 /* The system link is always allocated */
5904 __set_bit(WL12XX_SYSTEM_HLID, wl->links_map);
5906 memset(wl->tx_frames_map, 0, sizeof(wl->tx_frames_map));
5907 for (i = 0; i < wl->num_tx_desc; i++)
5908 wl->tx_frames[i] = NULL;
5910 spin_lock_init(&wl->wl_lock);
5912 wl->state = WLCORE_STATE_OFF;
5913 wl->fw_type = WL12XX_FW_TYPE_NONE;
5914 mutex_init(&wl->mutex);
5915 mutex_init(&wl->flush_mutex);
5916 init_completion(&wl->nvs_loading_complete);
5918 order = get_order(aggr_buf_size);
5919 wl->aggr_buf = (u8 *)__get_free_pages(GFP_KERNEL, order);
5920 if (!wl->aggr_buf) {
5924 wl->aggr_buf_size = aggr_buf_size;
5926 wl->dummy_packet = wl12xx_alloc_dummy_packet(wl);
5927 if (!wl->dummy_packet) {
5932 /* Allocate one page for the FW log */
5933 wl->fwlog = (u8 *)get_zeroed_page(GFP_KERNEL);
5936 goto err_dummy_packet;
5939 wl->mbox_size = mbox_size;
5940 wl->mbox = kmalloc(wl->mbox_size, GFP_KERNEL | GFP_DMA);
5946 wl->buffer_32 = kmalloc(sizeof(*wl->buffer_32), GFP_KERNEL);
5947 if (!wl->buffer_32) {
5958 free_page((unsigned long)wl->fwlog);
5961 dev_kfree_skb(wl->dummy_packet);
5964 free_pages((unsigned long)wl->aggr_buf, order);
5967 destroy_workqueue(wl->freezable_wq);
5970 wl1271_debugfs_exit(wl);
5974 ieee80211_free_hw(hw);
5978 return ERR_PTR(ret);
5980 EXPORT_SYMBOL_GPL(wlcore_alloc_hw);
5982 int wlcore_free_hw(struct wl1271 *wl)
5984 /* Unblock any fwlog readers */
5985 mutex_lock(&wl->mutex);
5986 wl->fwlog_size = -1;
5987 wake_up_interruptible_all(&wl->fwlog_waitq);
5988 mutex_unlock(&wl->mutex);
5990 wlcore_sysfs_free(wl);
5992 kfree(wl->buffer_32);
5994 free_page((unsigned long)wl->fwlog);
5995 dev_kfree_skb(wl->dummy_packet);
5996 free_pages((unsigned long)wl->aggr_buf, get_order(wl->aggr_buf_size));
5998 wl1271_debugfs_exit(wl);
6002 wl->fw_type = WL12XX_FW_TYPE_NONE;
6006 kfree(wl->raw_fw_status);
6007 kfree(wl->fw_status);
6008 kfree(wl->tx_res_if);
6009 destroy_workqueue(wl->freezable_wq);
6012 ieee80211_free_hw(wl->hw);
6016 EXPORT_SYMBOL_GPL(wlcore_free_hw);
6019 static const struct wiphy_wowlan_support wlcore_wowlan_support = {
6020 .flags = WIPHY_WOWLAN_ANY,
6021 .n_patterns = WL1271_MAX_RX_FILTERS,
6022 .pattern_min_len = 1,
6023 .pattern_max_len = WL1271_RX_FILTER_MAX_PATTERN_SIZE,
6027 static irqreturn_t wlcore_hardirq(int irq, void *cookie)
6029 return IRQ_WAKE_THREAD;
6032 static void wlcore_nvs_cb(const struct firmware *fw, void *context)
6034 struct wl1271 *wl = context;
6035 struct platform_device *pdev = wl->pdev;
6036 struct wlcore_platdev_data *pdev_data = dev_get_platdata(&pdev->dev);
6037 struct wl12xx_platform_data *pdata = pdev_data->pdata;
6038 unsigned long irqflags;
6040 irq_handler_t hardirq_fn = NULL;
6043 wl->nvs = kmemdup(fw->data, fw->size, GFP_KERNEL);
6045 wl1271_error("Could not allocate nvs data");
6048 wl->nvs_len = fw->size;
6050 wl1271_debug(DEBUG_BOOT, "Could not get nvs file %s",
6056 ret = wl->ops->setup(wl);
6060 BUG_ON(wl->num_tx_desc > WLCORE_MAX_TX_DESCRIPTORS);
6062 /* adjust some runtime configuration parameters */
6063 wlcore_adjust_conf(wl);
6065 wl->irq = platform_get_irq(pdev, 0);
6066 wl->platform_quirks = pdata->platform_quirks;
6067 wl->if_ops = pdev_data->if_ops;
6069 if (wl->platform_quirks & WL12XX_PLATFORM_QUIRK_EDGE_IRQ) {
6070 irqflags = IRQF_TRIGGER_RISING;
6071 hardirq_fn = wlcore_hardirq;
6073 irqflags = IRQF_TRIGGER_HIGH | IRQF_ONESHOT;
6076 ret = request_threaded_irq(wl->irq, hardirq_fn, wlcore_irq,
6077 irqflags, pdev->name, wl);
6079 wl1271_error("request_irq() failed: %d", ret);
6084 ret = enable_irq_wake(wl->irq);
6086 wl->irq_wake_enabled = true;
6087 device_init_wakeup(wl->dev, 1);
6088 if (pdata->pwr_in_suspend)
6089 wl->hw->wiphy->wowlan = &wlcore_wowlan_support;
6092 disable_irq(wl->irq);
6094 ret = wl12xx_get_hw_info(wl);
6096 wl1271_error("couldn't get hw info");
6100 ret = wl->ops->identify_chip(wl);
6104 ret = wl1271_init_ieee80211(wl);
6108 ret = wl1271_register_hw(wl);
6112 ret = wlcore_sysfs_init(wl);
6116 wl->initialized = true;
6120 wl1271_unregister_hw(wl);
6123 free_irq(wl->irq, wl);
6129 release_firmware(fw);
6130 complete_all(&wl->nvs_loading_complete);
6133 int wlcore_probe(struct wl1271 *wl, struct platform_device *pdev)
6137 if (!wl->ops || !wl->ptable)
6140 wl->dev = &pdev->dev;
6142 platform_set_drvdata(pdev, wl);
6144 ret = request_firmware_nowait(THIS_MODULE, FW_ACTION_HOTPLUG,
6145 WL12XX_NVS_NAME, &pdev->dev, GFP_KERNEL,
6148 wl1271_error("request_firmware_nowait failed: %d", ret);
6149 complete_all(&wl->nvs_loading_complete);
6154 EXPORT_SYMBOL_GPL(wlcore_probe);
6156 int wlcore_remove(struct platform_device *pdev)
6158 struct wl1271 *wl = platform_get_drvdata(pdev);
6160 wait_for_completion(&wl->nvs_loading_complete);
6161 if (!wl->initialized)
6164 if (wl->irq_wake_enabled) {
6165 device_init_wakeup(wl->dev, 0);
6166 disable_irq_wake(wl->irq);
6168 wl1271_unregister_hw(wl);
6169 free_irq(wl->irq, wl);
6174 EXPORT_SYMBOL_GPL(wlcore_remove);
6176 u32 wl12xx_debug_level = DEBUG_NONE;
6177 EXPORT_SYMBOL_GPL(wl12xx_debug_level);
6178 module_param_named(debug_level, wl12xx_debug_level, uint, S_IRUSR | S_IWUSR);
6179 MODULE_PARM_DESC(debug_level, "wl12xx debugging level");
6181 module_param_named(fwlog, fwlog_param, charp, 0);
6182 MODULE_PARM_DESC(fwlog,
6183 "FW logger options: continuous, ondemand, dbgpins or disable");
6185 module_param(fwlog_mem_blocks, int, S_IRUSR | S_IWUSR);
6186 MODULE_PARM_DESC(fwlog_mem_blocks, "fwlog mem_blocks");
6188 module_param(bug_on_recovery, int, S_IRUSR | S_IWUSR);
6189 MODULE_PARM_DESC(bug_on_recovery, "BUG() on fw recovery");
6191 module_param(no_recovery, int, S_IRUSR | S_IWUSR);
6192 MODULE_PARM_DESC(no_recovery, "Prevent HW recovery. FW will remain stuck.");
6194 MODULE_LICENSE("GPL");
6195 MODULE_AUTHOR("Luciano Coelho <coelho@ti.com>");
6196 MODULE_AUTHOR("Juuso Oikarinen <juuso.oikarinen@nokia.com>");
6197 MODULE_FIRMWARE(WL12XX_NVS_NAME);