3 * This file is part of wl1271
5 * Copyright (C) 2008-2010 Nokia Corporation
7 * Contact: Luciano Coelho <luciano.coelho@nokia.com>
9 * This program is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU General Public License
11 * version 2 as published by the Free Software Foundation.
13 * This program is distributed in the hope that it will be useful, but
14 * WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * General Public License for more details.
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
25 #include <linux/module.h>
26 #include <linux/firmware.h>
27 #include <linux/delay.h>
28 #include <linux/spi/spi.h>
29 #include <linux/crc32.h>
30 #include <linux/etherdevice.h>
31 #include <linux/vmalloc.h>
32 #include <linux/platform_device.h>
33 #include <linux/slab.h>
34 #include <linux/wl12xx.h>
35 #include <linux/sched.h>
36 #include <linux/interrupt.h>
40 #include "wl12xx_80211.h"
54 #define WL1271_BOOT_RETRIES 3
56 #define WL1271_BOOT_RETRIES 3
58 static char *fwlog_param;
59 static int bug_on_recovery = -1;
60 static int no_recovery = -1;
62 static void __wl1271_op_remove_interface(struct wl1271 *wl,
63 struct ieee80211_vif *vif,
64 bool reset_tx_queues);
65 static void wlcore_op_stop_locked(struct wl1271 *wl);
66 static void wl1271_free_ap_keys(struct wl1271 *wl, struct wl12xx_vif *wlvif);
68 static int wl12xx_set_authorized(struct wl1271 *wl,
69 struct wl12xx_vif *wlvif)
73 if (WARN_ON(wlvif->bss_type != BSS_TYPE_STA_BSS))
76 if (!test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags))
79 if (test_and_set_bit(WLVIF_FLAG_STA_STATE_SENT, &wlvif->flags))
82 ret = wl12xx_cmd_set_peer_state(wl, wlvif, wlvif->sta.hlid);
86 wl1271_info("Association completed.");
90 static int wl1271_reg_notify(struct wiphy *wiphy,
91 struct regulatory_request *request)
93 struct ieee80211_supported_band *band;
94 struct ieee80211_channel *ch;
96 struct ieee80211_hw *hw = wiphy_to_ieee80211_hw(wiphy);
97 struct wl1271 *wl = hw->priv;
99 band = wiphy->bands[IEEE80211_BAND_5GHZ];
100 for (i = 0; i < band->n_channels; i++) {
101 ch = &band->channels[i];
102 if (ch->flags & IEEE80211_CHAN_DISABLED)
105 if (ch->flags & IEEE80211_CHAN_RADAR)
106 ch->flags |= IEEE80211_CHAN_NO_IBSS |
107 IEEE80211_CHAN_PASSIVE_SCAN;
111 if (likely(wl->state == WLCORE_STATE_ON))
112 wlcore_regdomain_config(wl);
117 static int wl1271_set_rx_streaming(struct wl1271 *wl, struct wl12xx_vif *wlvif,
122 /* we should hold wl->mutex */
123 ret = wl1271_acx_ps_rx_streaming(wl, wlvif, enable);
128 set_bit(WLVIF_FLAG_RX_STREAMING_STARTED, &wlvif->flags);
130 clear_bit(WLVIF_FLAG_RX_STREAMING_STARTED, &wlvif->flags);
136 * this function is being called when the rx_streaming interval
137 * has beed changed or rx_streaming should be disabled
139 int wl1271_recalc_rx_streaming(struct wl1271 *wl, struct wl12xx_vif *wlvif)
142 int period = wl->conf.rx_streaming.interval;
144 /* don't reconfigure if rx_streaming is disabled */
145 if (!test_bit(WLVIF_FLAG_RX_STREAMING_STARTED, &wlvif->flags))
148 /* reconfigure/disable according to new streaming_period */
150 test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags) &&
151 (wl->conf.rx_streaming.always ||
152 test_bit(WL1271_FLAG_SOFT_GEMINI, &wl->flags)))
153 ret = wl1271_set_rx_streaming(wl, wlvif, true);
155 ret = wl1271_set_rx_streaming(wl, wlvif, false);
156 /* don't cancel_work_sync since we might deadlock */
157 del_timer_sync(&wlvif->rx_streaming_timer);
163 static void wl1271_rx_streaming_enable_work(struct work_struct *work)
166 struct wl12xx_vif *wlvif = container_of(work, struct wl12xx_vif,
167 rx_streaming_enable_work);
168 struct wl1271 *wl = wlvif->wl;
170 mutex_lock(&wl->mutex);
172 if (test_bit(WLVIF_FLAG_RX_STREAMING_STARTED, &wlvif->flags) ||
173 !test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags) ||
174 (!wl->conf.rx_streaming.always &&
175 !test_bit(WL1271_FLAG_SOFT_GEMINI, &wl->flags)))
178 if (!wl->conf.rx_streaming.interval)
181 ret = wl1271_ps_elp_wakeup(wl);
185 ret = wl1271_set_rx_streaming(wl, wlvif, true);
189 /* stop it after some time of inactivity */
190 mod_timer(&wlvif->rx_streaming_timer,
191 jiffies + msecs_to_jiffies(wl->conf.rx_streaming.duration));
194 wl1271_ps_elp_sleep(wl);
196 mutex_unlock(&wl->mutex);
199 static void wl1271_rx_streaming_disable_work(struct work_struct *work)
202 struct wl12xx_vif *wlvif = container_of(work, struct wl12xx_vif,
203 rx_streaming_disable_work);
204 struct wl1271 *wl = wlvif->wl;
206 mutex_lock(&wl->mutex);
208 if (!test_bit(WLVIF_FLAG_RX_STREAMING_STARTED, &wlvif->flags))
211 ret = wl1271_ps_elp_wakeup(wl);
215 ret = wl1271_set_rx_streaming(wl, wlvif, false);
220 wl1271_ps_elp_sleep(wl);
222 mutex_unlock(&wl->mutex);
225 static void wl1271_rx_streaming_timer(unsigned long data)
227 struct wl12xx_vif *wlvif = (struct wl12xx_vif *)data;
228 struct wl1271 *wl = wlvif->wl;
229 ieee80211_queue_work(wl->hw, &wlvif->rx_streaming_disable_work);
232 /* wl->mutex must be taken */
233 void wl12xx_rearm_tx_watchdog_locked(struct wl1271 *wl)
235 /* if the watchdog is not armed, don't do anything */
236 if (wl->tx_allocated_blocks == 0)
239 cancel_delayed_work(&wl->tx_watchdog_work);
240 ieee80211_queue_delayed_work(wl->hw, &wl->tx_watchdog_work,
241 msecs_to_jiffies(wl->conf.tx.tx_watchdog_timeout));
244 static void wl12xx_tx_watchdog_work(struct work_struct *work)
246 struct delayed_work *dwork;
249 dwork = container_of(work, struct delayed_work, work);
250 wl = container_of(dwork, struct wl1271, tx_watchdog_work);
252 mutex_lock(&wl->mutex);
254 if (unlikely(wl->state != WLCORE_STATE_ON))
257 /* Tx went out in the meantime - everything is ok */
258 if (unlikely(wl->tx_allocated_blocks == 0))
262 * if a ROC is in progress, we might not have any Tx for a long
263 * time (e.g. pending Tx on the non-ROC channels)
265 if (find_first_bit(wl->roc_map, WL12XX_MAX_ROLES) < WL12XX_MAX_ROLES) {
266 wl1271_debug(DEBUG_TX, "No Tx (in FW) for %d ms due to ROC",
267 wl->conf.tx.tx_watchdog_timeout);
268 wl12xx_rearm_tx_watchdog_locked(wl);
273 * if a scan is in progress, we might not have any Tx for a long
276 if (wl->scan.state != WL1271_SCAN_STATE_IDLE) {
277 wl1271_debug(DEBUG_TX, "No Tx (in FW) for %d ms due to scan",
278 wl->conf.tx.tx_watchdog_timeout);
279 wl12xx_rearm_tx_watchdog_locked(wl);
284 * AP might cache a frame for a long time for a sleeping station,
285 * so rearm the timer if there's an AP interface with stations. If
286 * Tx is genuinely stuck we will most hopefully discover it when all
287 * stations are removed due to inactivity.
289 if (wl->active_sta_count) {
290 wl1271_debug(DEBUG_TX, "No Tx (in FW) for %d ms. AP has "
292 wl->conf.tx.tx_watchdog_timeout,
293 wl->active_sta_count);
294 wl12xx_rearm_tx_watchdog_locked(wl);
298 wl1271_error("Tx stuck (in FW) for %d ms. Starting recovery",
299 wl->conf.tx.tx_watchdog_timeout);
300 wl12xx_queue_recovery_work(wl);
303 mutex_unlock(&wl->mutex);
306 static void wlcore_adjust_conf(struct wl1271 *wl)
308 /* Adjust settings according to optional module parameters */
311 if (!strcmp(fwlog_param, "continuous")) {
312 wl->conf.fwlog.mode = WL12XX_FWLOG_CONTINUOUS;
313 } else if (!strcmp(fwlog_param, "ondemand")) {
314 wl->conf.fwlog.mode = WL12XX_FWLOG_ON_DEMAND;
315 } else if (!strcmp(fwlog_param, "dbgpins")) {
316 wl->conf.fwlog.mode = WL12XX_FWLOG_CONTINUOUS;
317 wl->conf.fwlog.output = WL12XX_FWLOG_OUTPUT_DBG_PINS;
318 } else if (!strcmp(fwlog_param, "disable")) {
319 wl->conf.fwlog.mem_blocks = 0;
320 wl->conf.fwlog.output = WL12XX_FWLOG_OUTPUT_NONE;
322 wl1271_error("Unknown fwlog parameter %s", fwlog_param);
326 if (bug_on_recovery != -1)
327 wl->conf.recovery.bug_on_recovery = (u8) bug_on_recovery;
329 if (no_recovery != -1)
330 wl->conf.recovery.no_recovery = (u8) no_recovery;
333 static void wl12xx_irq_ps_regulate_link(struct wl1271 *wl,
334 struct wl12xx_vif *wlvif,
337 bool fw_ps, single_sta;
339 fw_ps = test_bit(hlid, (unsigned long *)&wl->ap_fw_ps_map);
340 single_sta = (wl->active_sta_count == 1);
343 * Wake up from high level PS if the STA is asleep with too little
344 * packets in FW or if the STA is awake.
346 if (!fw_ps || tx_pkts < WL1271_PS_STA_MAX_PACKETS)
347 wl12xx_ps_link_end(wl, wlvif, hlid);
350 * Start high-level PS if the STA is asleep with enough blocks in FW.
351 * Make an exception if this is the only connected station. In this
352 * case FW-memory congestion is not a problem.
354 else if (!single_sta && fw_ps && tx_pkts >= WL1271_PS_STA_MAX_PACKETS)
355 wl12xx_ps_link_start(wl, wlvif, hlid, true);
358 static void wl12xx_irq_update_links_status(struct wl1271 *wl,
359 struct wl12xx_vif *wlvif,
360 struct wl_fw_status_2 *status)
365 /* TODO: also use link_fast_bitmap here */
367 cur_fw_ps_map = le32_to_cpu(status->link_ps_bitmap);
368 if (wl->ap_fw_ps_map != cur_fw_ps_map) {
369 wl1271_debug(DEBUG_PSM,
370 "link ps prev 0x%x cur 0x%x changed 0x%x",
371 wl->ap_fw_ps_map, cur_fw_ps_map,
372 wl->ap_fw_ps_map ^ cur_fw_ps_map);
374 wl->ap_fw_ps_map = cur_fw_ps_map;
377 for_each_set_bit(hlid, wlvif->ap.sta_hlid_map, WL12XX_MAX_LINKS)
378 wl12xx_irq_ps_regulate_link(wl, wlvif, hlid,
379 wl->links[hlid].allocated_pkts);
382 static int wlcore_fw_status(struct wl1271 *wl,
383 struct wl_fw_status_1 *status_1,
384 struct wl_fw_status_2 *status_2)
386 struct wl12xx_vif *wlvif;
388 u32 old_tx_blk_count = wl->tx_blocks_available;
389 int avail, freed_blocks;
393 struct wl1271_link *lnk;
395 status_len = WLCORE_FW_STATUS_1_LEN(wl->num_rx_desc) +
396 sizeof(*status_2) + wl->fw_status_priv_len;
398 ret = wlcore_raw_read_data(wl, REG_RAW_FW_STATUS_ADDR, status_1,
403 wl1271_debug(DEBUG_IRQ, "intr: 0x%x (fw_rx_counter = %d, "
404 "drv_rx_counter = %d, tx_results_counter = %d)",
406 status_1->fw_rx_counter,
407 status_1->drv_rx_counter,
408 status_1->tx_results_counter);
410 for (i = 0; i < NUM_TX_QUEUES; i++) {
411 /* prevent wrap-around in freed-packets counter */
412 wl->tx_allocated_pkts[i] -=
413 (status_2->counters.tx_released_pkts[i] -
414 wl->tx_pkts_freed[i]) & 0xff;
416 wl->tx_pkts_freed[i] = status_2->counters.tx_released_pkts[i];
420 for_each_set_bit(i, wl->links_map, WL12XX_MAX_LINKS) {
422 /* prevent wrap-around in freed-packets counter */
423 lnk->allocated_pkts -=
424 (status_2->counters.tx_lnk_free_pkts[i] -
425 lnk->prev_freed_pkts) & 0xff;
427 lnk->prev_freed_pkts = status_2->counters.tx_lnk_free_pkts[i];
430 /* prevent wrap-around in total blocks counter */
431 if (likely(wl->tx_blocks_freed <=
432 le32_to_cpu(status_2->total_released_blks)))
433 freed_blocks = le32_to_cpu(status_2->total_released_blks) -
436 freed_blocks = 0x100000000LL - wl->tx_blocks_freed +
437 le32_to_cpu(status_2->total_released_blks);
439 wl->tx_blocks_freed = le32_to_cpu(status_2->total_released_blks);
441 wl->tx_allocated_blocks -= freed_blocks;
444 * If the FW freed some blocks:
445 * If we still have allocated blocks - re-arm the timer, Tx is
446 * not stuck. Otherwise, cancel the timer (no Tx currently).
449 if (wl->tx_allocated_blocks)
450 wl12xx_rearm_tx_watchdog_locked(wl);
452 cancel_delayed_work(&wl->tx_watchdog_work);
455 avail = le32_to_cpu(status_2->tx_total) - wl->tx_allocated_blocks;
458 * The FW might change the total number of TX memblocks before
459 * we get a notification about blocks being released. Thus, the
460 * available blocks calculation might yield a temporary result
461 * which is lower than the actual available blocks. Keeping in
462 * mind that only blocks that were allocated can be moved from
463 * TX to RX, tx_blocks_available should never decrease here.
465 wl->tx_blocks_available = max((int)wl->tx_blocks_available,
468 /* if more blocks are available now, tx work can be scheduled */
469 if (wl->tx_blocks_available > old_tx_blk_count)
470 clear_bit(WL1271_FLAG_FW_TX_BUSY, &wl->flags);
472 /* for AP update num of allocated TX blocks per link and ps status */
473 wl12xx_for_each_wlvif_ap(wl, wlvif) {
474 wl12xx_irq_update_links_status(wl, wlvif, status_2);
477 /* update the host-chipset time offset */
479 wl->time_offset = (timespec_to_ns(&ts) >> 10) -
480 (s64)le32_to_cpu(status_2->fw_localtime);
485 static void wl1271_flush_deferred_work(struct wl1271 *wl)
489 /* Pass all received frames to the network stack */
490 while ((skb = skb_dequeue(&wl->deferred_rx_queue)))
491 ieee80211_rx_ni(wl->hw, skb);
493 /* Return sent skbs to the network stack */
494 while ((skb = skb_dequeue(&wl->deferred_tx_queue)))
495 ieee80211_tx_status_ni(wl->hw, skb);
498 static void wl1271_netstack_work(struct work_struct *work)
501 container_of(work, struct wl1271, netstack_work);
504 wl1271_flush_deferred_work(wl);
505 } while (skb_queue_len(&wl->deferred_rx_queue));
508 #define WL1271_IRQ_MAX_LOOPS 256
510 static int wlcore_irq_locked(struct wl1271 *wl)
514 int loopcount = WL1271_IRQ_MAX_LOOPS;
516 unsigned int defer_count;
520 * In case edge triggered interrupt must be used, we cannot iterate
521 * more than once without introducing race conditions with the hardirq.
523 if (wl->platform_quirks & WL12XX_PLATFORM_QUIRK_EDGE_IRQ)
526 wl1271_debug(DEBUG_IRQ, "IRQ work");
528 if (unlikely(wl->state != WLCORE_STATE_ON))
531 ret = wl1271_ps_elp_wakeup(wl);
535 while (!done && loopcount--) {
537 * In order to avoid a race with the hardirq, clear the flag
538 * before acknowledging the chip. Since the mutex is held,
539 * wl1271_ps_elp_wakeup cannot be called concurrently.
541 clear_bit(WL1271_FLAG_IRQ_RUNNING, &wl->flags);
542 smp_mb__after_clear_bit();
544 ret = wlcore_fw_status(wl, wl->fw_status_1, wl->fw_status_2);
548 wlcore_hw_tx_immediate_compl(wl);
550 intr = le32_to_cpu(wl->fw_status_1->intr);
551 intr &= WLCORE_ALL_INTR_MASK;
557 if (unlikely(intr & WL1271_ACX_INTR_WATCHDOG)) {
558 wl1271_error("HW watchdog interrupt received! starting recovery.");
559 wl->watchdog_recovery = true;
562 /* restarting the chip. ignore any other interrupt. */
566 if (unlikely(intr & WL1271_ACX_SW_INTR_WATCHDOG)) {
567 wl1271_error("SW watchdog interrupt received! "
568 "starting recovery.");
569 wl->watchdog_recovery = true;
572 /* restarting the chip. ignore any other interrupt. */
576 if (likely(intr & WL1271_ACX_INTR_DATA)) {
577 wl1271_debug(DEBUG_IRQ, "WL1271_ACX_INTR_DATA");
579 ret = wlcore_rx(wl, wl->fw_status_1);
583 /* Check if any tx blocks were freed */
584 spin_lock_irqsave(&wl->wl_lock, flags);
585 if (!test_bit(WL1271_FLAG_FW_TX_BUSY, &wl->flags) &&
586 wl1271_tx_total_queue_count(wl) > 0) {
587 spin_unlock_irqrestore(&wl->wl_lock, flags);
589 * In order to avoid starvation of the TX path,
590 * call the work function directly.
592 ret = wlcore_tx_work_locked(wl);
596 spin_unlock_irqrestore(&wl->wl_lock, flags);
599 /* check for tx results */
600 ret = wlcore_hw_tx_delayed_compl(wl);
604 /* Make sure the deferred queues don't get too long */
605 defer_count = skb_queue_len(&wl->deferred_tx_queue) +
606 skb_queue_len(&wl->deferred_rx_queue);
607 if (defer_count > WL1271_DEFERRED_QUEUE_LIMIT)
608 wl1271_flush_deferred_work(wl);
611 if (intr & WL1271_ACX_INTR_EVENT_A) {
612 wl1271_debug(DEBUG_IRQ, "WL1271_ACX_INTR_EVENT_A");
613 ret = wl1271_event_handle(wl, 0);
618 if (intr & WL1271_ACX_INTR_EVENT_B) {
619 wl1271_debug(DEBUG_IRQ, "WL1271_ACX_INTR_EVENT_B");
620 ret = wl1271_event_handle(wl, 1);
625 if (intr & WL1271_ACX_INTR_INIT_COMPLETE)
626 wl1271_debug(DEBUG_IRQ,
627 "WL1271_ACX_INTR_INIT_COMPLETE");
629 if (intr & WL1271_ACX_INTR_HW_AVAILABLE)
630 wl1271_debug(DEBUG_IRQ, "WL1271_ACX_INTR_HW_AVAILABLE");
633 wl1271_ps_elp_sleep(wl);
639 static irqreturn_t wlcore_irq(int irq, void *cookie)
643 struct wl1271 *wl = cookie;
645 /* TX might be handled here, avoid redundant work */
646 set_bit(WL1271_FLAG_TX_PENDING, &wl->flags);
647 cancel_work_sync(&wl->tx_work);
649 mutex_lock(&wl->mutex);
651 ret = wlcore_irq_locked(wl);
653 wl12xx_queue_recovery_work(wl);
655 spin_lock_irqsave(&wl->wl_lock, flags);
656 /* In case TX was not handled here, queue TX work */
657 clear_bit(WL1271_FLAG_TX_PENDING, &wl->flags);
658 if (!test_bit(WL1271_FLAG_FW_TX_BUSY, &wl->flags) &&
659 wl1271_tx_total_queue_count(wl) > 0)
660 ieee80211_queue_work(wl->hw, &wl->tx_work);
661 spin_unlock_irqrestore(&wl->wl_lock, flags);
663 mutex_unlock(&wl->mutex);
668 struct vif_counter_data {
671 struct ieee80211_vif *cur_vif;
672 bool cur_vif_running;
675 static void wl12xx_vif_count_iter(void *data, u8 *mac,
676 struct ieee80211_vif *vif)
678 struct vif_counter_data *counter = data;
681 if (counter->cur_vif == vif)
682 counter->cur_vif_running = true;
685 /* caller must not hold wl->mutex, as it might deadlock */
686 static void wl12xx_get_vif_count(struct ieee80211_hw *hw,
687 struct ieee80211_vif *cur_vif,
688 struct vif_counter_data *data)
690 memset(data, 0, sizeof(*data));
691 data->cur_vif = cur_vif;
693 ieee80211_iterate_active_interfaces(hw, IEEE80211_IFACE_ITER_RESUME_ALL,
694 wl12xx_vif_count_iter, data);
697 static int wl12xx_fetch_firmware(struct wl1271 *wl, bool plt)
699 const struct firmware *fw;
701 enum wl12xx_fw_type fw_type;
705 fw_type = WL12XX_FW_TYPE_PLT;
706 fw_name = wl->plt_fw_name;
709 * we can't call wl12xx_get_vif_count() here because
710 * wl->mutex is taken, so use the cached last_vif_count value
712 if (wl->last_vif_count > 1 && wl->mr_fw_name) {
713 fw_type = WL12XX_FW_TYPE_MULTI;
714 fw_name = wl->mr_fw_name;
716 fw_type = WL12XX_FW_TYPE_NORMAL;
717 fw_name = wl->sr_fw_name;
721 if (wl->fw_type == fw_type)
724 wl1271_debug(DEBUG_BOOT, "booting firmware %s", fw_name);
726 ret = request_firmware(&fw, fw_name, wl->dev);
729 wl1271_error("could not get firmware %s: %d", fw_name, ret);
734 wl1271_error("firmware size is not multiple of 32 bits: %zu",
741 wl->fw_type = WL12XX_FW_TYPE_NONE;
742 wl->fw_len = fw->size;
743 wl->fw = vmalloc(wl->fw_len);
746 wl1271_error("could not allocate memory for the firmware");
751 memcpy(wl->fw, fw->data, wl->fw_len);
753 wl->fw_type = fw_type;
755 release_firmware(fw);
760 void wl12xx_queue_recovery_work(struct wl1271 *wl)
762 WARN_ON(!test_bit(WL1271_FLAG_INTENDED_FW_RECOVERY, &wl->flags));
764 /* Avoid a recursive recovery */
765 if (wl->state == WLCORE_STATE_ON) {
766 wl->state = WLCORE_STATE_RESTARTING;
767 set_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags);
768 wlcore_disable_interrupts_nosync(wl);
769 ieee80211_queue_work(wl->hw, &wl->recovery_work);
773 size_t wl12xx_copy_fwlog(struct wl1271 *wl, u8 *memblock, size_t maxlen)
777 /* The FW log is a length-value list, find where the log end */
778 while (len < maxlen) {
779 if (memblock[len] == 0)
781 if (len + memblock[len] + 1 > maxlen)
783 len += memblock[len] + 1;
786 /* Make sure we have enough room */
787 len = min(len, (size_t)(PAGE_SIZE - wl->fwlog_size));
789 /* Fill the FW log file, consumed by the sysfs fwlog entry */
790 memcpy(wl->fwlog + wl->fwlog_size, memblock, len);
791 wl->fwlog_size += len;
796 #define WLCORE_FW_LOG_END 0x2000000
798 static void wl12xx_read_fwlog_panic(struct wl1271 *wl)
806 if ((wl->quirks & WLCORE_QUIRK_FWLOG_NOT_IMPLEMENTED) ||
807 (wl->conf.fwlog.mem_blocks == 0))
810 wl1271_info("Reading FW panic log");
812 block = kmalloc(WL12XX_HW_BLOCK_SIZE, GFP_KERNEL);
817 * Make sure the chip is awake and the logger isn't active.
818 * Do not send a stop fwlog command if the fw is hanged or if
819 * dbgpins are used (due to some fw bug).
821 if (wl1271_ps_elp_wakeup(wl))
823 if (!wl->watchdog_recovery &&
824 wl->conf.fwlog.output != WL12XX_FWLOG_OUTPUT_DBG_PINS)
825 wl12xx_cmd_stop_fwlog(wl);
827 /* Read the first memory block address */
828 ret = wlcore_fw_status(wl, wl->fw_status_1, wl->fw_status_2);
832 addr = le32_to_cpu(wl->fw_status_2->log_start_addr);
836 if (wl->conf.fwlog.mode == WL12XX_FWLOG_CONTINUOUS) {
837 offset = sizeof(addr) + sizeof(struct wl1271_rx_descriptor);
838 end_of_log = WLCORE_FW_LOG_END;
840 offset = sizeof(addr);
844 /* Traverse the memory blocks linked list */
846 memset(block, 0, WL12XX_HW_BLOCK_SIZE);
847 ret = wlcore_read_hwaddr(wl, addr, block, WL12XX_HW_BLOCK_SIZE,
853 * Memory blocks are linked to one another. The first 4 bytes
854 * of each memory block hold the hardware address of the next
855 * one. The last memory block points to the first one in
856 * on demand mode and is equal to 0x2000000 in continuous mode.
858 addr = le32_to_cpup((__le32 *)block);
859 if (!wl12xx_copy_fwlog(wl, block + offset,
860 WL12XX_HW_BLOCK_SIZE - offset))
862 } while (addr && (addr != end_of_log));
864 wake_up_interruptible(&wl->fwlog_waitq);
870 static void wlcore_print_recovery(struct wl1271 *wl)
876 wl1271_info("Hardware recovery in progress. FW ver: %s",
877 wl->chip.fw_ver_str);
879 /* change partitions momentarily so we can read the FW pc */
880 ret = wlcore_set_partition(wl, &wl->ptable[PART_BOOT]);
884 ret = wlcore_read_reg(wl, REG_PC_ON_RECOVERY, &pc);
888 ret = wlcore_read_reg(wl, REG_INTERRUPT_NO_CLEAR, &hint_sts);
892 wl1271_info("pc: 0x%x, hint_sts: 0x%08x count: %d",
893 pc, hint_sts, ++wl->recovery_count);
895 wlcore_set_partition(wl, &wl->ptable[PART_WORK]);
899 static void wl1271_recovery_work(struct work_struct *work)
902 container_of(work, struct wl1271, recovery_work);
903 struct wl12xx_vif *wlvif;
904 struct ieee80211_vif *vif;
906 mutex_lock(&wl->mutex);
908 if (wl->state == WLCORE_STATE_OFF || wl->plt)
911 if (!test_bit(WL1271_FLAG_INTENDED_FW_RECOVERY, &wl->flags)) {
912 wl12xx_read_fwlog_panic(wl);
913 wlcore_print_recovery(wl);
916 BUG_ON(wl->conf.recovery.bug_on_recovery &&
917 !test_bit(WL1271_FLAG_INTENDED_FW_RECOVERY, &wl->flags));
919 if (wl->conf.recovery.no_recovery) {
920 wl1271_info("No recovery (chosen on module load). Fw will remain stuck.");
925 * Advance security sequence number to overcome potential progress
926 * in the firmware during recovery. This doens't hurt if the network is
929 wl12xx_for_each_wlvif(wl, wlvif) {
930 if (test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags) ||
931 test_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags))
932 wlvif->tx_security_seq +=
933 WL1271_TX_SQN_POST_RECOVERY_PADDING;
936 /* Prevent spurious TX during FW restart */
937 wlcore_stop_queues(wl, WLCORE_QUEUE_STOP_REASON_FW_RESTART);
939 /* reboot the chipset */
940 while (!list_empty(&wl->wlvif_list)) {
941 wlvif = list_first_entry(&wl->wlvif_list,
942 struct wl12xx_vif, list);
943 vif = wl12xx_wlvif_to_vif(wlvif);
944 __wl1271_op_remove_interface(wl, vif, false);
947 wlcore_op_stop_locked(wl);
949 ieee80211_restart_hw(wl->hw);
952 * Its safe to enable TX now - the queues are stopped after a request
955 wlcore_wake_queues(wl, WLCORE_QUEUE_STOP_REASON_FW_RESTART);
958 wl->watchdog_recovery = false;
959 clear_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags);
960 mutex_unlock(&wl->mutex);
963 static int wlcore_fw_wakeup(struct wl1271 *wl)
965 return wlcore_raw_write32(wl, HW_ACCESS_ELP_CTRL_REG, ELPCTRL_WAKE_UP);
968 static int wl1271_setup(struct wl1271 *wl)
970 wl->fw_status_1 = kmalloc(WLCORE_FW_STATUS_1_LEN(wl->num_rx_desc) +
971 sizeof(*wl->fw_status_2) +
972 wl->fw_status_priv_len, GFP_KERNEL);
973 if (!wl->fw_status_1)
976 wl->fw_status_2 = (struct wl_fw_status_2 *)
977 (((u8 *) wl->fw_status_1) +
978 WLCORE_FW_STATUS_1_LEN(wl->num_rx_desc));
980 wl->tx_res_if = kmalloc(sizeof(*wl->tx_res_if), GFP_KERNEL);
981 if (!wl->tx_res_if) {
982 kfree(wl->fw_status_1);
989 static int wl12xx_set_power_on(struct wl1271 *wl)
993 msleep(WL1271_PRE_POWER_ON_SLEEP);
994 ret = wl1271_power_on(wl);
997 msleep(WL1271_POWER_ON_SLEEP);
1001 ret = wlcore_set_partition(wl, &wl->ptable[PART_BOOT]);
1005 /* ELP module wake up */
1006 ret = wlcore_fw_wakeup(wl);
1014 wl1271_power_off(wl);
1018 static int wl12xx_chip_wakeup(struct wl1271 *wl, bool plt)
1022 ret = wl12xx_set_power_on(wl);
1027 * For wl127x based devices we could use the default block
1028 * size (512 bytes), but due to a bug in the sdio driver, we
1029 * need to set it explicitly after the chip is powered on. To
1030 * simplify the code and since the performance impact is
1031 * negligible, we use the same block size for all different
1034 * Check if the bus supports blocksize alignment and, if it
1035 * doesn't, make sure we don't have the quirk.
1037 if (!wl1271_set_block_size(wl))
1038 wl->quirks &= ~WLCORE_QUIRK_TX_BLOCKSIZE_ALIGN;
1040 /* TODO: make sure the lower driver has set things up correctly */
1042 ret = wl1271_setup(wl);
1046 ret = wl12xx_fetch_firmware(wl, plt);
1054 int wl1271_plt_start(struct wl1271 *wl, const enum plt_mode plt_mode)
1056 int retries = WL1271_BOOT_RETRIES;
1057 struct wiphy *wiphy = wl->hw->wiphy;
1059 static const char* const PLT_MODE[] = {
1067 mutex_lock(&wl->mutex);
1069 wl1271_notice("power up");
1071 if (wl->state != WLCORE_STATE_OFF) {
1072 wl1271_error("cannot go into PLT state because not "
1073 "in off state: %d", wl->state);
1078 /* Indicate to lower levels that we are now in PLT mode */
1080 wl->plt_mode = plt_mode;
1084 ret = wl12xx_chip_wakeup(wl, true);
1088 ret = wl->ops->plt_init(wl);
1092 wl->state = WLCORE_STATE_ON;
1093 wl1271_notice("firmware booted in PLT mode %s (%s)",
1095 wl->chip.fw_ver_str);
1097 /* update hw/fw version info in wiphy struct */
1098 wiphy->hw_version = wl->chip.id;
1099 strncpy(wiphy->fw_version, wl->chip.fw_ver_str,
1100 sizeof(wiphy->fw_version));
1105 wl1271_power_off(wl);
1109 wl->plt_mode = PLT_OFF;
1111 wl1271_error("firmware boot in PLT mode failed despite %d retries",
1112 WL1271_BOOT_RETRIES);
1114 mutex_unlock(&wl->mutex);
1119 int wl1271_plt_stop(struct wl1271 *wl)
1123 wl1271_notice("power down");
1126 * Interrupts must be disabled before setting the state to OFF.
1127 * Otherwise, the interrupt handler might be called and exit without
1128 * reading the interrupt status.
1130 wlcore_disable_interrupts(wl);
1131 mutex_lock(&wl->mutex);
1133 mutex_unlock(&wl->mutex);
1136 * This will not necessarily enable interrupts as interrupts
1137 * may have been disabled when op_stop was called. It will,
1138 * however, balance the above call to disable_interrupts().
1140 wlcore_enable_interrupts(wl);
1142 wl1271_error("cannot power down because not in PLT "
1143 "state: %d", wl->state);
1148 mutex_unlock(&wl->mutex);
1150 wl1271_flush_deferred_work(wl);
1151 cancel_work_sync(&wl->netstack_work);
1152 cancel_work_sync(&wl->recovery_work);
1153 cancel_delayed_work_sync(&wl->elp_work);
1154 cancel_delayed_work_sync(&wl->tx_watchdog_work);
1156 mutex_lock(&wl->mutex);
1157 wl1271_power_off(wl);
1159 wl->sleep_auth = WL1271_PSM_ILLEGAL;
1160 wl->state = WLCORE_STATE_OFF;
1162 wl->plt_mode = PLT_OFF;
1164 mutex_unlock(&wl->mutex);
1170 static void wl1271_op_tx(struct ieee80211_hw *hw,
1171 struct ieee80211_tx_control *control,
1172 struct sk_buff *skb)
1174 struct wl1271 *wl = hw->priv;
1175 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
1176 struct ieee80211_vif *vif = info->control.vif;
1177 struct wl12xx_vif *wlvif = NULL;
1178 unsigned long flags;
1183 wlvif = wl12xx_vif_to_data(vif);
1185 mapping = skb_get_queue_mapping(skb);
1186 q = wl1271_tx_get_queue(mapping);
1188 hlid = wl12xx_tx_get_hlid(wl, wlvif, skb, control->sta);
1190 spin_lock_irqsave(&wl->wl_lock, flags);
1193 * drop the packet if the link is invalid or the queue is stopped
1194 * for any reason but watermark. Watermark is a "soft"-stop so we
1195 * allow these packets through.
1197 if (hlid == WL12XX_INVALID_LINK_ID ||
1198 (wlvif && !test_bit(hlid, wlvif->links_map)) ||
1199 (wlcore_is_queue_stopped(wl, q) &&
1200 !wlcore_is_queue_stopped_by_reason(wl, q,
1201 WLCORE_QUEUE_STOP_REASON_WATERMARK))) {
1202 wl1271_debug(DEBUG_TX, "DROP skb hlid %d q %d", hlid, q);
1203 ieee80211_free_txskb(hw, skb);
1207 wl1271_debug(DEBUG_TX, "queue skb hlid %d q %d len %d",
1209 skb_queue_tail(&wl->links[hlid].tx_queue[q], skb);
1211 wl->tx_queue_count[q]++;
1213 wlvif->tx_queue_count[q]++;
1216 * The workqueue is slow to process the tx_queue and we need stop
1217 * the queue here, otherwise the queue will get too long.
1219 if (wl->tx_queue_count[q] >= WL1271_TX_QUEUE_HIGH_WATERMARK &&
1220 !wlcore_is_queue_stopped_by_reason(wl, q,
1221 WLCORE_QUEUE_STOP_REASON_WATERMARK)) {
1222 wl1271_debug(DEBUG_TX, "op_tx: stopping queues for q %d", q);
1223 wlcore_stop_queue_locked(wl, q,
1224 WLCORE_QUEUE_STOP_REASON_WATERMARK);
1228 * The chip specific setup must run before the first TX packet -
1229 * before that, the tx_work will not be initialized!
1232 if (!test_bit(WL1271_FLAG_FW_TX_BUSY, &wl->flags) &&
1233 !test_bit(WL1271_FLAG_TX_PENDING, &wl->flags))
1234 ieee80211_queue_work(wl->hw, &wl->tx_work);
1237 spin_unlock_irqrestore(&wl->wl_lock, flags);
1240 int wl1271_tx_dummy_packet(struct wl1271 *wl)
1242 unsigned long flags;
1245 /* no need to queue a new dummy packet if one is already pending */
1246 if (test_bit(WL1271_FLAG_DUMMY_PACKET_PENDING, &wl->flags))
1249 q = wl1271_tx_get_queue(skb_get_queue_mapping(wl->dummy_packet));
1251 spin_lock_irqsave(&wl->wl_lock, flags);
1252 set_bit(WL1271_FLAG_DUMMY_PACKET_PENDING, &wl->flags);
1253 wl->tx_queue_count[q]++;
1254 spin_unlock_irqrestore(&wl->wl_lock, flags);
1256 /* The FW is low on RX memory blocks, so send the dummy packet asap */
1257 if (!test_bit(WL1271_FLAG_FW_TX_BUSY, &wl->flags))
1258 return wlcore_tx_work_locked(wl);
1261 * If the FW TX is busy, TX work will be scheduled by the threaded
1262 * interrupt handler function
1268 * The size of the dummy packet should be at least 1400 bytes. However, in
1269 * order to minimize the number of bus transactions, aligning it to 512 bytes
1270 * boundaries could be beneficial, performance wise
1272 #define TOTAL_TX_DUMMY_PACKET_SIZE (ALIGN(1400, 512))
1274 static struct sk_buff *wl12xx_alloc_dummy_packet(struct wl1271 *wl)
1276 struct sk_buff *skb;
1277 struct ieee80211_hdr_3addr *hdr;
1278 unsigned int dummy_packet_size;
1280 dummy_packet_size = TOTAL_TX_DUMMY_PACKET_SIZE -
1281 sizeof(struct wl1271_tx_hw_descr) - sizeof(*hdr);
1283 skb = dev_alloc_skb(TOTAL_TX_DUMMY_PACKET_SIZE);
1285 wl1271_warning("Failed to allocate a dummy packet skb");
1289 skb_reserve(skb, sizeof(struct wl1271_tx_hw_descr));
1291 hdr = (struct ieee80211_hdr_3addr *) skb_put(skb, sizeof(*hdr));
1292 memset(hdr, 0, sizeof(*hdr));
1293 hdr->frame_control = cpu_to_le16(IEEE80211_FTYPE_DATA |
1294 IEEE80211_STYPE_NULLFUNC |
1295 IEEE80211_FCTL_TODS);
1297 memset(skb_put(skb, dummy_packet_size), 0, dummy_packet_size);
1299 /* Dummy packets require the TID to be management */
1300 skb->priority = WL1271_TID_MGMT;
1302 /* Initialize all fields that might be used */
1303 skb_set_queue_mapping(skb, 0);
1304 memset(IEEE80211_SKB_CB(skb), 0, sizeof(struct ieee80211_tx_info));
1312 wl1271_validate_wowlan_pattern(struct cfg80211_wowlan_trig_pkt_pattern *p)
1314 int num_fields = 0, in_field = 0, fields_size = 0;
1315 int i, pattern_len = 0;
1318 wl1271_warning("No mask in WoWLAN pattern");
1323 * The pattern is broken up into segments of bytes at different offsets
1324 * that need to be checked by the FW filter. Each segment is called
1325 * a field in the FW API. We verify that the total number of fields
1326 * required for this pattern won't exceed FW limits (8)
1327 * as well as the total fields buffer won't exceed the FW limit.
1328 * Note that if there's a pattern which crosses Ethernet/IP header
1329 * boundary a new field is required.
1331 for (i = 0; i < p->pattern_len; i++) {
1332 if (test_bit(i, (unsigned long *)p->mask)) {
1337 if (i == WL1271_RX_FILTER_ETH_HEADER_SIZE) {
1339 fields_size += pattern_len +
1340 RX_FILTER_FIELD_OVERHEAD;
1348 fields_size += pattern_len +
1349 RX_FILTER_FIELD_OVERHEAD;
1356 fields_size += pattern_len + RX_FILTER_FIELD_OVERHEAD;
1360 if (num_fields > WL1271_RX_FILTER_MAX_FIELDS) {
1361 wl1271_warning("RX Filter too complex. Too many segments");
1365 if (fields_size > WL1271_RX_FILTER_MAX_FIELDS_SIZE) {
1366 wl1271_warning("RX filter pattern is too big");
1373 struct wl12xx_rx_filter *wl1271_rx_filter_alloc(void)
1375 return kzalloc(sizeof(struct wl12xx_rx_filter), GFP_KERNEL);
1378 void wl1271_rx_filter_free(struct wl12xx_rx_filter *filter)
1385 for (i = 0; i < filter->num_fields; i++)
1386 kfree(filter->fields[i].pattern);
1391 int wl1271_rx_filter_alloc_field(struct wl12xx_rx_filter *filter,
1392 u16 offset, u8 flags,
1393 u8 *pattern, u8 len)
1395 struct wl12xx_rx_filter_field *field;
1397 if (filter->num_fields == WL1271_RX_FILTER_MAX_FIELDS) {
1398 wl1271_warning("Max fields per RX filter. can't alloc another");
1402 field = &filter->fields[filter->num_fields];
1404 field->pattern = kzalloc(len, GFP_KERNEL);
1405 if (!field->pattern) {
1406 wl1271_warning("Failed to allocate RX filter pattern");
1410 filter->num_fields++;
1412 field->offset = cpu_to_le16(offset);
1413 field->flags = flags;
1415 memcpy(field->pattern, pattern, len);
1420 int wl1271_rx_filter_get_fields_size(struct wl12xx_rx_filter *filter)
1422 int i, fields_size = 0;
1424 for (i = 0; i < filter->num_fields; i++)
1425 fields_size += filter->fields[i].len +
1426 sizeof(struct wl12xx_rx_filter_field) -
1432 void wl1271_rx_filter_flatten_fields(struct wl12xx_rx_filter *filter,
1436 struct wl12xx_rx_filter_field *field;
1438 for (i = 0; i < filter->num_fields; i++) {
1439 field = (struct wl12xx_rx_filter_field *)buf;
1441 field->offset = filter->fields[i].offset;
1442 field->flags = filter->fields[i].flags;
1443 field->len = filter->fields[i].len;
1445 memcpy(&field->pattern, filter->fields[i].pattern, field->len);
1446 buf += sizeof(struct wl12xx_rx_filter_field) -
1447 sizeof(u8 *) + field->len;
1452 * Allocates an RX filter returned through f
1453 * which needs to be freed using rx_filter_free()
1455 static int wl1271_convert_wowlan_pattern_to_rx_filter(
1456 struct cfg80211_wowlan_trig_pkt_pattern *p,
1457 struct wl12xx_rx_filter **f)
1460 struct wl12xx_rx_filter *filter;
1464 filter = wl1271_rx_filter_alloc();
1466 wl1271_warning("Failed to alloc rx filter");
1472 while (i < p->pattern_len) {
1473 if (!test_bit(i, (unsigned long *)p->mask)) {
1478 for (j = i; j < p->pattern_len; j++) {
1479 if (!test_bit(j, (unsigned long *)p->mask))
1482 if (i < WL1271_RX_FILTER_ETH_HEADER_SIZE &&
1483 j >= WL1271_RX_FILTER_ETH_HEADER_SIZE)
1487 if (i < WL1271_RX_FILTER_ETH_HEADER_SIZE) {
1489 flags = WL1271_RX_FILTER_FLAG_ETHERNET_HEADER;
1491 offset = i - WL1271_RX_FILTER_ETH_HEADER_SIZE;
1492 flags = WL1271_RX_FILTER_FLAG_IP_HEADER;
1497 ret = wl1271_rx_filter_alloc_field(filter,
1500 &p->pattern[i], len);
1507 filter->action = FILTER_SIGNAL;
1513 wl1271_rx_filter_free(filter);
1519 static int wl1271_configure_wowlan(struct wl1271 *wl,
1520 struct cfg80211_wowlan *wow)
1524 if (!wow || wow->any || !wow->n_patterns) {
1525 ret = wl1271_acx_default_rx_filter_enable(wl, 0,
1530 ret = wl1271_rx_filter_clear_all(wl);
1537 if (WARN_ON(wow->n_patterns > WL1271_MAX_RX_FILTERS))
1540 /* Validate all incoming patterns before clearing current FW state */
1541 for (i = 0; i < wow->n_patterns; i++) {
1542 ret = wl1271_validate_wowlan_pattern(&wow->patterns[i]);
1544 wl1271_warning("Bad wowlan pattern %d", i);
1549 ret = wl1271_acx_default_rx_filter_enable(wl, 0, FILTER_SIGNAL);
1553 ret = wl1271_rx_filter_clear_all(wl);
1557 /* Translate WoWLAN patterns into filters */
1558 for (i = 0; i < wow->n_patterns; i++) {
1559 struct cfg80211_wowlan_trig_pkt_pattern *p;
1560 struct wl12xx_rx_filter *filter = NULL;
1562 p = &wow->patterns[i];
1564 ret = wl1271_convert_wowlan_pattern_to_rx_filter(p, &filter);
1566 wl1271_warning("Failed to create an RX filter from "
1567 "wowlan pattern %d", i);
1571 ret = wl1271_rx_filter_enable(wl, i, 1, filter);
1573 wl1271_rx_filter_free(filter);
1578 ret = wl1271_acx_default_rx_filter_enable(wl, 1, FILTER_DROP);
1584 static int wl1271_configure_suspend_sta(struct wl1271 *wl,
1585 struct wl12xx_vif *wlvif,
1586 struct cfg80211_wowlan *wow)
1590 if (!test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags))
1593 ret = wl1271_ps_elp_wakeup(wl);
1597 ret = wl1271_configure_wowlan(wl, wow);
1601 if ((wl->conf.conn.suspend_wake_up_event ==
1602 wl->conf.conn.wake_up_event) &&
1603 (wl->conf.conn.suspend_listen_interval ==
1604 wl->conf.conn.listen_interval))
1607 ret = wl1271_acx_wake_up_conditions(wl, wlvif,
1608 wl->conf.conn.suspend_wake_up_event,
1609 wl->conf.conn.suspend_listen_interval);
1612 wl1271_error("suspend: set wake up conditions failed: %d", ret);
1615 wl1271_ps_elp_sleep(wl);
1621 static int wl1271_configure_suspend_ap(struct wl1271 *wl,
1622 struct wl12xx_vif *wlvif)
1626 if (!test_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags))
1629 ret = wl1271_ps_elp_wakeup(wl);
1633 ret = wl1271_acx_beacon_filter_opt(wl, wlvif, true);
1635 wl1271_ps_elp_sleep(wl);
1641 static int wl1271_configure_suspend(struct wl1271 *wl,
1642 struct wl12xx_vif *wlvif,
1643 struct cfg80211_wowlan *wow)
1645 if (wlvif->bss_type == BSS_TYPE_STA_BSS)
1646 return wl1271_configure_suspend_sta(wl, wlvif, wow);
1647 if (wlvif->bss_type == BSS_TYPE_AP_BSS)
1648 return wl1271_configure_suspend_ap(wl, wlvif);
1652 static void wl1271_configure_resume(struct wl1271 *wl,
1653 struct wl12xx_vif *wlvif)
1656 bool is_ap = wlvif->bss_type == BSS_TYPE_AP_BSS;
1657 bool is_sta = wlvif->bss_type == BSS_TYPE_STA_BSS;
1659 if ((!is_ap) && (!is_sta))
1662 if (is_sta && !test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags))
1665 ret = wl1271_ps_elp_wakeup(wl);
1670 wl1271_configure_wowlan(wl, NULL);
1672 if ((wl->conf.conn.suspend_wake_up_event ==
1673 wl->conf.conn.wake_up_event) &&
1674 (wl->conf.conn.suspend_listen_interval ==
1675 wl->conf.conn.listen_interval))
1678 ret = wl1271_acx_wake_up_conditions(wl, wlvif,
1679 wl->conf.conn.wake_up_event,
1680 wl->conf.conn.listen_interval);
1683 wl1271_error("resume: wake up conditions failed: %d",
1687 ret = wl1271_acx_beacon_filter_opt(wl, wlvif, false);
1691 wl1271_ps_elp_sleep(wl);
1694 static int wl1271_op_suspend(struct ieee80211_hw *hw,
1695 struct cfg80211_wowlan *wow)
1697 struct wl1271 *wl = hw->priv;
1698 struct wl12xx_vif *wlvif;
1701 wl1271_debug(DEBUG_MAC80211, "mac80211 suspend wow=%d", !!wow);
1704 /* we want to perform the recovery before suspending */
1705 if (test_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags)) {
1706 wl1271_warning("postponing suspend to perform recovery");
1710 wl1271_tx_flush(wl);
1712 mutex_lock(&wl->mutex);
1713 wl->wow_enabled = true;
1714 wl12xx_for_each_wlvif(wl, wlvif) {
1715 ret = wl1271_configure_suspend(wl, wlvif, wow);
1717 mutex_unlock(&wl->mutex);
1718 wl1271_warning("couldn't prepare device to suspend");
1722 mutex_unlock(&wl->mutex);
1723 /* flush any remaining work */
1724 wl1271_debug(DEBUG_MAC80211, "flushing remaining works");
1727 * disable and re-enable interrupts in order to flush
1730 wlcore_disable_interrupts(wl);
1733 * set suspended flag to avoid triggering a new threaded_irq
1734 * work. no need for spinlock as interrupts are disabled.
1736 set_bit(WL1271_FLAG_SUSPENDED, &wl->flags);
1738 wlcore_enable_interrupts(wl);
1739 flush_work(&wl->tx_work);
1740 flush_delayed_work(&wl->elp_work);
1745 static int wl1271_op_resume(struct ieee80211_hw *hw)
1747 struct wl1271 *wl = hw->priv;
1748 struct wl12xx_vif *wlvif;
1749 unsigned long flags;
1750 bool run_irq_work = false, pending_recovery;
1753 wl1271_debug(DEBUG_MAC80211, "mac80211 resume wow=%d",
1755 WARN_ON(!wl->wow_enabled);
1758 * re-enable irq_work enqueuing, and call irq_work directly if
1759 * there is a pending work.
1761 spin_lock_irqsave(&wl->wl_lock, flags);
1762 clear_bit(WL1271_FLAG_SUSPENDED, &wl->flags);
1763 if (test_and_clear_bit(WL1271_FLAG_PENDING_WORK, &wl->flags))
1764 run_irq_work = true;
1765 spin_unlock_irqrestore(&wl->wl_lock, flags);
1767 mutex_lock(&wl->mutex);
1769 /* test the recovery flag before calling any SDIO functions */
1770 pending_recovery = test_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS,
1774 wl1271_debug(DEBUG_MAC80211,
1775 "run postponed irq_work directly");
1777 /* don't talk to the HW if recovery is pending */
1778 if (!pending_recovery) {
1779 ret = wlcore_irq_locked(wl);
1781 wl12xx_queue_recovery_work(wl);
1784 wlcore_enable_interrupts(wl);
1787 if (pending_recovery) {
1788 wl1271_warning("queuing forgotten recovery on resume");
1789 ieee80211_queue_work(wl->hw, &wl->recovery_work);
1793 wl12xx_for_each_wlvif(wl, wlvif) {
1794 wl1271_configure_resume(wl, wlvif);
1798 wl->wow_enabled = false;
1799 mutex_unlock(&wl->mutex);
1805 static int wl1271_op_start(struct ieee80211_hw *hw)
1807 wl1271_debug(DEBUG_MAC80211, "mac80211 start");
1810 * We have to delay the booting of the hardware because
1811 * we need to know the local MAC address before downloading and
1812 * initializing the firmware. The MAC address cannot be changed
1813 * after boot, and without the proper MAC address, the firmware
1814 * will not function properly.
1816 * The MAC address is first known when the corresponding interface
1817 * is added. That is where we will initialize the hardware.
1823 static void wlcore_op_stop_locked(struct wl1271 *wl)
1827 if (wl->state == WLCORE_STATE_OFF) {
1828 if (test_and_clear_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS,
1830 wlcore_enable_interrupts(wl);
1836 * this must be before the cancel_work calls below, so that the work
1837 * functions don't perform further work.
1839 wl->state = WLCORE_STATE_OFF;
1842 * Use the nosync variant to disable interrupts, so the mutex could be
1843 * held while doing so without deadlocking.
1845 wlcore_disable_interrupts_nosync(wl);
1847 mutex_unlock(&wl->mutex);
1849 wlcore_synchronize_interrupts(wl);
1850 if (!test_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags))
1851 cancel_work_sync(&wl->recovery_work);
1852 wl1271_flush_deferred_work(wl);
1853 cancel_delayed_work_sync(&wl->scan_complete_work);
1854 cancel_work_sync(&wl->netstack_work);
1855 cancel_work_sync(&wl->tx_work);
1856 cancel_delayed_work_sync(&wl->elp_work);
1857 cancel_delayed_work_sync(&wl->tx_watchdog_work);
1859 /* let's notify MAC80211 about the remaining pending TX frames */
1860 mutex_lock(&wl->mutex);
1861 wl12xx_tx_reset(wl);
1863 wl1271_power_off(wl);
1865 * In case a recovery was scheduled, interrupts were disabled to avoid
1866 * an interrupt storm. Now that the power is down, it is safe to
1867 * re-enable interrupts to balance the disable depth
1869 if (test_and_clear_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags))
1870 wlcore_enable_interrupts(wl);
1872 wl->band = IEEE80211_BAND_2GHZ;
1875 wl->power_level = WL1271_DEFAULT_POWER_LEVEL;
1876 wl->channel_type = NL80211_CHAN_NO_HT;
1877 wl->tx_blocks_available = 0;
1878 wl->tx_allocated_blocks = 0;
1879 wl->tx_results_count = 0;
1880 wl->tx_packets_count = 0;
1881 wl->time_offset = 0;
1882 wl->ap_fw_ps_map = 0;
1884 wl->sleep_auth = WL1271_PSM_ILLEGAL;
1885 memset(wl->roles_map, 0, sizeof(wl->roles_map));
1886 memset(wl->links_map, 0, sizeof(wl->links_map));
1887 memset(wl->roc_map, 0, sizeof(wl->roc_map));
1888 memset(wl->session_ids, 0, sizeof(wl->session_ids));
1889 wl->active_sta_count = 0;
1891 /* The system link is always allocated */
1892 wl->links[WL12XX_SYSTEM_HLID].allocated_pkts = 0;
1893 wl->links[WL12XX_SYSTEM_HLID].prev_freed_pkts = 0;
1894 __set_bit(WL12XX_SYSTEM_HLID, wl->links_map);
1897 * this is performed after the cancel_work calls and the associated
1898 * mutex_lock, so that wl1271_op_add_interface does not accidentally
1899 * get executed before all these vars have been reset.
1903 wl->tx_blocks_freed = 0;
1905 for (i = 0; i < NUM_TX_QUEUES; i++) {
1906 wl->tx_pkts_freed[i] = 0;
1907 wl->tx_allocated_pkts[i] = 0;
1910 wl1271_debugfs_reset(wl);
1912 kfree(wl->fw_status_1);
1913 wl->fw_status_1 = NULL;
1914 wl->fw_status_2 = NULL;
1915 kfree(wl->tx_res_if);
1916 wl->tx_res_if = NULL;
1917 kfree(wl->target_mem_map);
1918 wl->target_mem_map = NULL;
1921 * FW channels must be re-calibrated after recovery,
1922 * clear the last Reg-Domain channel configuration.
1924 memset(wl->reg_ch_conf_last, 0, sizeof(wl->reg_ch_conf_last));
1927 static void wlcore_op_stop(struct ieee80211_hw *hw)
1929 struct wl1271 *wl = hw->priv;
1931 wl1271_debug(DEBUG_MAC80211, "mac80211 stop");
1933 mutex_lock(&wl->mutex);
1935 wlcore_op_stop_locked(wl);
1937 mutex_unlock(&wl->mutex);
1940 static void wlcore_channel_switch_work(struct work_struct *work)
1942 struct delayed_work *dwork;
1944 struct ieee80211_vif *vif;
1945 struct wl12xx_vif *wlvif;
1948 dwork = container_of(work, struct delayed_work, work);
1949 wlvif = container_of(dwork, struct wl12xx_vif, channel_switch_work);
1952 wl1271_info("channel switch failed (role_id: %d).", wlvif->role_id);
1954 mutex_lock(&wl->mutex);
1956 if (unlikely(wl->state != WLCORE_STATE_ON))
1959 /* check the channel switch is still ongoing */
1960 if (!test_and_clear_bit(WLVIF_FLAG_CS_PROGRESS, &wlvif->flags))
1963 vif = wl12xx_wlvif_to_vif(wlvif);
1964 ieee80211_chswitch_done(vif, false);
1966 ret = wl1271_ps_elp_wakeup(wl);
1970 wl12xx_cmd_stop_channel_switch(wl, wlvif);
1972 wl1271_ps_elp_sleep(wl);
1974 mutex_unlock(&wl->mutex);
1977 static void wlcore_connection_loss_work(struct work_struct *work)
1979 struct delayed_work *dwork;
1981 struct ieee80211_vif *vif;
1982 struct wl12xx_vif *wlvif;
1984 dwork = container_of(work, struct delayed_work, work);
1985 wlvif = container_of(dwork, struct wl12xx_vif, connection_loss_work);
1988 wl1271_info("Connection loss work (role_id: %d).", wlvif->role_id);
1990 mutex_lock(&wl->mutex);
1992 if (unlikely(wl->state != WLCORE_STATE_ON))
1995 /* Call mac80211 connection loss */
1996 if (!test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags))
1999 vif = wl12xx_wlvif_to_vif(wlvif);
2000 ieee80211_connection_loss(vif);
2002 mutex_unlock(&wl->mutex);
2005 static int wl12xx_allocate_rate_policy(struct wl1271 *wl, u8 *idx)
2007 u8 policy = find_first_zero_bit(wl->rate_policies_map,
2008 WL12XX_MAX_RATE_POLICIES);
2009 if (policy >= WL12XX_MAX_RATE_POLICIES)
2012 __set_bit(policy, wl->rate_policies_map);
2017 static void wl12xx_free_rate_policy(struct wl1271 *wl, u8 *idx)
2019 if (WARN_ON(*idx >= WL12XX_MAX_RATE_POLICIES))
2022 __clear_bit(*idx, wl->rate_policies_map);
2023 *idx = WL12XX_MAX_RATE_POLICIES;
2026 static int wlcore_allocate_klv_template(struct wl1271 *wl, u8 *idx)
2028 u8 policy = find_first_zero_bit(wl->klv_templates_map,
2029 WLCORE_MAX_KLV_TEMPLATES);
2030 if (policy >= WLCORE_MAX_KLV_TEMPLATES)
2033 __set_bit(policy, wl->klv_templates_map);
2038 static void wlcore_free_klv_template(struct wl1271 *wl, u8 *idx)
2040 if (WARN_ON(*idx >= WLCORE_MAX_KLV_TEMPLATES))
2043 __clear_bit(*idx, wl->klv_templates_map);
2044 *idx = WLCORE_MAX_KLV_TEMPLATES;
2047 static u8 wl12xx_get_role_type(struct wl1271 *wl, struct wl12xx_vif *wlvif)
2049 switch (wlvif->bss_type) {
2050 case BSS_TYPE_AP_BSS:
2052 return WL1271_ROLE_P2P_GO;
2054 return WL1271_ROLE_AP;
2056 case BSS_TYPE_STA_BSS:
2058 return WL1271_ROLE_P2P_CL;
2060 return WL1271_ROLE_STA;
2063 return WL1271_ROLE_IBSS;
2066 wl1271_error("invalid bss_type: %d", wlvif->bss_type);
2068 return WL12XX_INVALID_ROLE_TYPE;
2071 static int wl12xx_init_vif_data(struct wl1271 *wl, struct ieee80211_vif *vif)
2073 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
2076 /* clear everything but the persistent data */
2077 memset(wlvif, 0, offsetof(struct wl12xx_vif, persistent));
2079 switch (ieee80211_vif_type_p2p(vif)) {
2080 case NL80211_IFTYPE_P2P_CLIENT:
2083 case NL80211_IFTYPE_STATION:
2084 wlvif->bss_type = BSS_TYPE_STA_BSS;
2086 case NL80211_IFTYPE_ADHOC:
2087 wlvif->bss_type = BSS_TYPE_IBSS;
2089 case NL80211_IFTYPE_P2P_GO:
2092 case NL80211_IFTYPE_AP:
2093 wlvif->bss_type = BSS_TYPE_AP_BSS;
2096 wlvif->bss_type = MAX_BSS_TYPE;
2100 wlvif->role_id = WL12XX_INVALID_ROLE_ID;
2101 wlvif->dev_role_id = WL12XX_INVALID_ROLE_ID;
2102 wlvif->dev_hlid = WL12XX_INVALID_LINK_ID;
2104 if (wlvif->bss_type == BSS_TYPE_STA_BSS ||
2105 wlvif->bss_type == BSS_TYPE_IBSS) {
2106 /* init sta/ibss data */
2107 wlvif->sta.hlid = WL12XX_INVALID_LINK_ID;
2108 wl12xx_allocate_rate_policy(wl, &wlvif->sta.basic_rate_idx);
2109 wl12xx_allocate_rate_policy(wl, &wlvif->sta.ap_rate_idx);
2110 wl12xx_allocate_rate_policy(wl, &wlvif->sta.p2p_rate_idx);
2111 wlcore_allocate_klv_template(wl, &wlvif->sta.klv_template_id);
2112 wlvif->basic_rate_set = CONF_TX_RATE_MASK_BASIC;
2113 wlvif->basic_rate = CONF_TX_RATE_MASK_BASIC;
2114 wlvif->rate_set = CONF_TX_RATE_MASK_BASIC;
2117 wlvif->ap.bcast_hlid = WL12XX_INVALID_LINK_ID;
2118 wlvif->ap.global_hlid = WL12XX_INVALID_LINK_ID;
2119 wl12xx_allocate_rate_policy(wl, &wlvif->ap.mgmt_rate_idx);
2120 wl12xx_allocate_rate_policy(wl, &wlvif->ap.bcast_rate_idx);
2121 for (i = 0; i < CONF_TX_MAX_AC_COUNT; i++)
2122 wl12xx_allocate_rate_policy(wl,
2123 &wlvif->ap.ucast_rate_idx[i]);
2124 wlvif->basic_rate_set = CONF_TX_ENABLED_RATES;
2126 * TODO: check if basic_rate shouldn't be
2127 * wl1271_tx_min_rate_get(wl, wlvif->basic_rate_set);
2128 * instead (the same thing for STA above).
2130 wlvif->basic_rate = CONF_TX_ENABLED_RATES;
2131 /* TODO: this seems to be used only for STA, check it */
2132 wlvif->rate_set = CONF_TX_ENABLED_RATES;
2135 wlvif->bitrate_masks[IEEE80211_BAND_2GHZ] = wl->conf.tx.basic_rate;
2136 wlvif->bitrate_masks[IEEE80211_BAND_5GHZ] = wl->conf.tx.basic_rate_5;
2137 wlvif->beacon_int = WL1271_DEFAULT_BEACON_INT;
2140 * mac80211 configures some values globally, while we treat them
2141 * per-interface. thus, on init, we have to copy them from wl
2143 wlvif->band = wl->band;
2144 wlvif->channel = wl->channel;
2145 wlvif->power_level = wl->power_level;
2146 wlvif->channel_type = wl->channel_type;
2148 INIT_WORK(&wlvif->rx_streaming_enable_work,
2149 wl1271_rx_streaming_enable_work);
2150 INIT_WORK(&wlvif->rx_streaming_disable_work,
2151 wl1271_rx_streaming_disable_work);
2152 INIT_DELAYED_WORK(&wlvif->channel_switch_work,
2153 wlcore_channel_switch_work);
2154 INIT_DELAYED_WORK(&wlvif->connection_loss_work,
2155 wlcore_connection_loss_work);
2156 INIT_LIST_HEAD(&wlvif->list);
2158 setup_timer(&wlvif->rx_streaming_timer, wl1271_rx_streaming_timer,
2159 (unsigned long) wlvif);
2163 static bool wl12xx_init_fw(struct wl1271 *wl)
2165 int retries = WL1271_BOOT_RETRIES;
2166 bool booted = false;
2167 struct wiphy *wiphy = wl->hw->wiphy;
2172 ret = wl12xx_chip_wakeup(wl, false);
2176 ret = wl->ops->boot(wl);
2180 ret = wl1271_hw_init(wl);
2188 mutex_unlock(&wl->mutex);
2189 /* Unlocking the mutex in the middle of handling is
2190 inherently unsafe. In this case we deem it safe to do,
2191 because we need to let any possibly pending IRQ out of
2192 the system (and while we are WLCORE_STATE_OFF the IRQ
2193 work function will not do anything.) Also, any other
2194 possible concurrent operations will fail due to the
2195 current state, hence the wl1271 struct should be safe. */
2196 wlcore_disable_interrupts(wl);
2197 wl1271_flush_deferred_work(wl);
2198 cancel_work_sync(&wl->netstack_work);
2199 mutex_lock(&wl->mutex);
2201 wl1271_power_off(wl);
2205 wl1271_error("firmware boot failed despite %d retries",
2206 WL1271_BOOT_RETRIES);
2210 wl1271_info("firmware booted (%s)", wl->chip.fw_ver_str);
2212 /* update hw/fw version info in wiphy struct */
2213 wiphy->hw_version = wl->chip.id;
2214 strncpy(wiphy->fw_version, wl->chip.fw_ver_str,
2215 sizeof(wiphy->fw_version));
2218 * Now we know if 11a is supported (info from the NVS), so disable
2219 * 11a channels if not supported
2221 if (!wl->enable_11a)
2222 wiphy->bands[IEEE80211_BAND_5GHZ]->n_channels = 0;
2224 wl1271_debug(DEBUG_MAC80211, "11a is %ssupported",
2225 wl->enable_11a ? "" : "not ");
2227 wl->state = WLCORE_STATE_ON;
2232 static bool wl12xx_dev_role_started(struct wl12xx_vif *wlvif)
2234 return wlvif->dev_hlid != WL12XX_INVALID_LINK_ID;
2238 * Check whether a fw switch (i.e. moving from one loaded
2239 * fw to another) is needed. This function is also responsible
2240 * for updating wl->last_vif_count, so it must be called before
2241 * loading a non-plt fw (so the correct fw (single-role/multi-role)
2244 static bool wl12xx_need_fw_change(struct wl1271 *wl,
2245 struct vif_counter_data vif_counter_data,
2248 enum wl12xx_fw_type current_fw = wl->fw_type;
2249 u8 vif_count = vif_counter_data.counter;
2251 if (test_bit(WL1271_FLAG_VIF_CHANGE_IN_PROGRESS, &wl->flags))
2254 /* increase the vif count if this is a new vif */
2255 if (add && !vif_counter_data.cur_vif_running)
2258 wl->last_vif_count = vif_count;
2260 /* no need for fw change if the device is OFF */
2261 if (wl->state == WLCORE_STATE_OFF)
2264 /* no need for fw change if a single fw is used */
2265 if (!wl->mr_fw_name)
2268 if (vif_count > 1 && current_fw == WL12XX_FW_TYPE_NORMAL)
2270 if (vif_count <= 1 && current_fw == WL12XX_FW_TYPE_MULTI)
2277 * Enter "forced psm". Make sure the sta is in psm against the ap,
2278 * to make the fw switch a bit more disconnection-persistent.
2280 static void wl12xx_force_active_psm(struct wl1271 *wl)
2282 struct wl12xx_vif *wlvif;
2284 wl12xx_for_each_wlvif_sta(wl, wlvif) {
2285 wl1271_ps_set_mode(wl, wlvif, STATION_POWER_SAVE_MODE);
2289 static int wl1271_op_add_interface(struct ieee80211_hw *hw,
2290 struct ieee80211_vif *vif)
2292 struct wl1271 *wl = hw->priv;
2293 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
2294 struct vif_counter_data vif_count;
2297 bool booted = false;
2299 vif->driver_flags |= IEEE80211_VIF_BEACON_FILTER |
2300 IEEE80211_VIF_SUPPORTS_CQM_RSSI;
2302 wl1271_debug(DEBUG_MAC80211, "mac80211 add interface type %d mac %pM",
2303 ieee80211_vif_type_p2p(vif), vif->addr);
2305 wl12xx_get_vif_count(hw, vif, &vif_count);
2307 mutex_lock(&wl->mutex);
2308 ret = wl1271_ps_elp_wakeup(wl);
2313 * in some very corner case HW recovery scenarios its possible to
2314 * get here before __wl1271_op_remove_interface is complete, so
2315 * opt out if that is the case.
2317 if (test_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags) ||
2318 test_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags)) {
2324 ret = wl12xx_init_vif_data(wl, vif);
2329 role_type = wl12xx_get_role_type(wl, wlvif);
2330 if (role_type == WL12XX_INVALID_ROLE_TYPE) {
2335 if (wl12xx_need_fw_change(wl, vif_count, true)) {
2336 wl12xx_force_active_psm(wl);
2337 set_bit(WL1271_FLAG_INTENDED_FW_RECOVERY, &wl->flags);
2338 mutex_unlock(&wl->mutex);
2339 wl1271_recovery_work(&wl->recovery_work);
2344 * TODO: after the nvs issue will be solved, move this block
2345 * to start(), and make sure here the driver is ON.
2347 if (wl->state == WLCORE_STATE_OFF) {
2349 * we still need this in order to configure the fw
2350 * while uploading the nvs
2352 memcpy(wl->addresses[0].addr, vif->addr, ETH_ALEN);
2354 booted = wl12xx_init_fw(wl);
2361 ret = wl12xx_cmd_role_enable(wl, vif->addr,
2362 role_type, &wlvif->role_id);
2366 ret = wl1271_init_vif_specific(wl, vif);
2370 list_add(&wlvif->list, &wl->wlvif_list);
2371 set_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags);
2373 if (wlvif->bss_type == BSS_TYPE_AP_BSS)
2378 wl1271_ps_elp_sleep(wl);
2380 mutex_unlock(&wl->mutex);
2385 static void __wl1271_op_remove_interface(struct wl1271 *wl,
2386 struct ieee80211_vif *vif,
2387 bool reset_tx_queues)
2389 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
2391 bool is_ap = (wlvif->bss_type == BSS_TYPE_AP_BSS);
2393 wl1271_debug(DEBUG_MAC80211, "mac80211 remove interface");
2395 if (!test_and_clear_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags))
2398 /* because of hardware recovery, we may get here twice */
2399 if (wl->state == WLCORE_STATE_OFF)
2402 wl1271_info("down");
2404 if (wl->scan.state != WL1271_SCAN_STATE_IDLE &&
2405 wl->scan_wlvif == wlvif) {
2407 * Rearm the tx watchdog just before idling scan. This
2408 * prevents just-finished scans from triggering the watchdog
2410 wl12xx_rearm_tx_watchdog_locked(wl);
2412 wl->scan.state = WL1271_SCAN_STATE_IDLE;
2413 memset(wl->scan.scanned_ch, 0, sizeof(wl->scan.scanned_ch));
2414 wl->scan_wlvif = NULL;
2415 wl->scan.req = NULL;
2416 ieee80211_scan_completed(wl->hw, true);
2419 if (wl->sched_vif == wlvif) {
2420 ieee80211_sched_scan_stopped(wl->hw);
2421 wl->sched_vif = NULL;
2424 if (wl->roc_vif == vif) {
2426 ieee80211_remain_on_channel_expired(wl->hw);
2429 if (!test_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags)) {
2430 /* disable active roles */
2431 ret = wl1271_ps_elp_wakeup(wl);
2435 if (wlvif->bss_type == BSS_TYPE_STA_BSS ||
2436 wlvif->bss_type == BSS_TYPE_IBSS) {
2437 if (wl12xx_dev_role_started(wlvif))
2438 wl12xx_stop_dev(wl, wlvif);
2441 ret = wl12xx_cmd_role_disable(wl, &wlvif->role_id);
2445 wl1271_ps_elp_sleep(wl);
2448 /* clear all hlids (except system_hlid) */
2449 wlvif->dev_hlid = WL12XX_INVALID_LINK_ID;
2451 if (wlvif->bss_type == BSS_TYPE_STA_BSS ||
2452 wlvif->bss_type == BSS_TYPE_IBSS) {
2453 wlvif->sta.hlid = WL12XX_INVALID_LINK_ID;
2454 wl12xx_free_rate_policy(wl, &wlvif->sta.basic_rate_idx);
2455 wl12xx_free_rate_policy(wl, &wlvif->sta.ap_rate_idx);
2456 wl12xx_free_rate_policy(wl, &wlvif->sta.p2p_rate_idx);
2457 wlcore_free_klv_template(wl, &wlvif->sta.klv_template_id);
2459 wlvif->ap.bcast_hlid = WL12XX_INVALID_LINK_ID;
2460 wlvif->ap.global_hlid = WL12XX_INVALID_LINK_ID;
2461 wl12xx_free_rate_policy(wl, &wlvif->ap.mgmt_rate_idx);
2462 wl12xx_free_rate_policy(wl, &wlvif->ap.bcast_rate_idx);
2463 for (i = 0; i < CONF_TX_MAX_AC_COUNT; i++)
2464 wl12xx_free_rate_policy(wl,
2465 &wlvif->ap.ucast_rate_idx[i]);
2466 wl1271_free_ap_keys(wl, wlvif);
2469 dev_kfree_skb(wlvif->probereq);
2470 wlvif->probereq = NULL;
2471 wl12xx_tx_reset_wlvif(wl, wlvif);
2472 if (wl->last_wlvif == wlvif)
2473 wl->last_wlvif = NULL;
2474 list_del(&wlvif->list);
2475 memset(wlvif->ap.sta_hlid_map, 0, sizeof(wlvif->ap.sta_hlid_map));
2476 wlvif->role_id = WL12XX_INVALID_ROLE_ID;
2477 wlvif->dev_role_id = WL12XX_INVALID_ROLE_ID;
2485 * Last AP, have more stations. Configure sleep auth according to STA.
2486 * Don't do thin on unintended recovery.
2488 if (test_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags) &&
2489 !test_bit(WL1271_FLAG_INTENDED_FW_RECOVERY, &wl->flags))
2492 if (wl->ap_count == 0 && is_ap && wl->sta_count) {
2493 u8 sta_auth = wl->conf.conn.sta_sleep_auth;
2494 /* Configure for power according to debugfs */
2495 if (sta_auth != WL1271_PSM_ILLEGAL)
2496 wl1271_acx_sleep_auth(wl, sta_auth);
2497 /* Configure for ELP power saving */
2499 wl1271_acx_sleep_auth(wl, WL1271_PSM_ELP);
2503 mutex_unlock(&wl->mutex);
2505 del_timer_sync(&wlvif->rx_streaming_timer);
2506 cancel_work_sync(&wlvif->rx_streaming_enable_work);
2507 cancel_work_sync(&wlvif->rx_streaming_disable_work);
2508 cancel_delayed_work_sync(&wlvif->connection_loss_work);
2510 mutex_lock(&wl->mutex);
2513 static void wl1271_op_remove_interface(struct ieee80211_hw *hw,
2514 struct ieee80211_vif *vif)
2516 struct wl1271 *wl = hw->priv;
2517 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
2518 struct wl12xx_vif *iter;
2519 struct vif_counter_data vif_count;
2521 wl12xx_get_vif_count(hw, vif, &vif_count);
2522 mutex_lock(&wl->mutex);
2524 if (wl->state == WLCORE_STATE_OFF ||
2525 !test_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags))
2529 * wl->vif can be null here if someone shuts down the interface
2530 * just when hardware recovery has been started.
2532 wl12xx_for_each_wlvif(wl, iter) {
2536 __wl1271_op_remove_interface(wl, vif, true);
2539 WARN_ON(iter != wlvif);
2540 if (wl12xx_need_fw_change(wl, vif_count, false)) {
2541 wl12xx_force_active_psm(wl);
2542 set_bit(WL1271_FLAG_INTENDED_FW_RECOVERY, &wl->flags);
2543 wl12xx_queue_recovery_work(wl);
2546 mutex_unlock(&wl->mutex);
2549 static int wl12xx_op_change_interface(struct ieee80211_hw *hw,
2550 struct ieee80211_vif *vif,
2551 enum nl80211_iftype new_type, bool p2p)
2553 struct wl1271 *wl = hw->priv;
2556 set_bit(WL1271_FLAG_VIF_CHANGE_IN_PROGRESS, &wl->flags);
2557 wl1271_op_remove_interface(hw, vif);
2559 vif->type = new_type;
2561 ret = wl1271_op_add_interface(hw, vif);
2563 clear_bit(WL1271_FLAG_VIF_CHANGE_IN_PROGRESS, &wl->flags);
2567 static int wlcore_join(struct wl1271 *wl, struct wl12xx_vif *wlvif)
2570 bool is_ibss = (wlvif->bss_type == BSS_TYPE_IBSS);
2573 * One of the side effects of the JOIN command is that is clears
2574 * WPA/WPA2 keys from the chipset. Performing a JOIN while associated
2575 * to a WPA/WPA2 access point will therefore kill the data-path.
2576 * Currently the only valid scenario for JOIN during association
2577 * is on roaming, in which case we will also be given new keys.
2578 * Keep the below message for now, unless it starts bothering
2579 * users who really like to roam a lot :)
2581 if (test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags))
2582 wl1271_info("JOIN while associated.");
2584 /* clear encryption type */
2585 wlvif->encryption_type = KEY_NONE;
2588 ret = wl12xx_cmd_role_start_ibss(wl, wlvif);
2590 if (wl->quirks & WLCORE_QUIRK_START_STA_FAILS) {
2592 * TODO: this is an ugly workaround for wl12xx fw
2593 * bug - we are not able to tx/rx after the first
2594 * start_sta, so make dummy start+stop calls,
2595 * and then call start_sta again.
2596 * this should be fixed in the fw.
2598 wl12xx_cmd_role_start_sta(wl, wlvif);
2599 wl12xx_cmd_role_stop_sta(wl, wlvif);
2602 ret = wl12xx_cmd_role_start_sta(wl, wlvif);
2608 static int wl1271_ssid_set(struct wl12xx_vif *wlvif, struct sk_buff *skb,
2612 const u8 *ptr = cfg80211_find_ie(WLAN_EID_SSID, skb->data + offset,
2616 wl1271_error("No SSID in IEs!");
2621 if (ssid_len > IEEE80211_MAX_SSID_LEN) {
2622 wl1271_error("SSID is too long!");
2626 wlvif->ssid_len = ssid_len;
2627 memcpy(wlvif->ssid, ptr+2, ssid_len);
2631 static int wlcore_set_ssid(struct wl1271 *wl, struct wl12xx_vif *wlvif)
2633 struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif);
2634 struct sk_buff *skb;
2637 /* we currently only support setting the ssid from the ap probe req */
2638 if (wlvif->bss_type != BSS_TYPE_STA_BSS)
2641 skb = ieee80211_ap_probereq_get(wl->hw, vif);
2645 ieoffset = offsetof(struct ieee80211_mgmt,
2646 u.probe_req.variable);
2647 wl1271_ssid_set(wlvif, skb, ieoffset);
2653 static int wlcore_set_assoc(struct wl1271 *wl, struct wl12xx_vif *wlvif,
2654 struct ieee80211_bss_conf *bss_conf,
2660 wlvif->aid = bss_conf->aid;
2661 wlvif->channel_type = cfg80211_get_chandef_type(&bss_conf->chandef);
2662 wlvif->beacon_int = bss_conf->beacon_int;
2663 wlvif->wmm_enabled = bss_conf->qos;
2665 set_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags);
2668 * with wl1271, we don't need to update the
2669 * beacon_int and dtim_period, because the firmware
2670 * updates it by itself when the first beacon is
2671 * received after a join.
2673 ret = wl1271_cmd_build_ps_poll(wl, wlvif, wlvif->aid);
2678 * Get a template for hardware connection maintenance
2680 dev_kfree_skb(wlvif->probereq);
2681 wlvif->probereq = wl1271_cmd_build_ap_probe_req(wl,
2684 ieoffset = offsetof(struct ieee80211_mgmt,
2685 u.probe_req.variable);
2686 wl1271_ssid_set(wlvif, wlvif->probereq, ieoffset);
2688 /* enable the connection monitoring feature */
2689 ret = wl1271_acx_conn_monit_params(wl, wlvif, true);
2694 * The join command disable the keep-alive mode, shut down its process,
2695 * and also clear the template config, so we need to reset it all after
2696 * the join. The acx_aid starts the keep-alive process, and the order
2697 * of the commands below is relevant.
2699 ret = wl1271_acx_keep_alive_mode(wl, wlvif, true);
2703 ret = wl1271_acx_aid(wl, wlvif, wlvif->aid);
2707 ret = wl12xx_cmd_build_klv_null_data(wl, wlvif);
2711 ret = wl1271_acx_keep_alive_config(wl, wlvif,
2712 wlvif->sta.klv_template_id,
2713 ACX_KEEP_ALIVE_TPL_VALID);
2718 * The default fw psm configuration is AUTO, while mac80211 default
2719 * setting is off (ACTIVE), so sync the fw with the correct value.
2721 ret = wl1271_ps_set_mode(wl, wlvif, STATION_ACTIVE_MODE);
2727 wl1271_tx_enabled_rates_get(wl,
2730 ret = wl1271_acx_sta_rate_policies(wl, wlvif);
2738 static int wlcore_unset_assoc(struct wl1271 *wl, struct wl12xx_vif *wlvif)
2741 bool sta = wlvif->bss_type == BSS_TYPE_STA_BSS;
2743 /* make sure we are connected (sta) joined */
2745 !test_and_clear_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags))
2748 /* make sure we are joined (ibss) */
2750 test_and_clear_bit(WLVIF_FLAG_IBSS_JOINED, &wlvif->flags))
2754 /* use defaults when not associated */
2757 /* free probe-request template */
2758 dev_kfree_skb(wlvif->probereq);
2759 wlvif->probereq = NULL;
2761 /* disable connection monitor features */
2762 ret = wl1271_acx_conn_monit_params(wl, wlvif, false);
2766 /* Disable the keep-alive feature */
2767 ret = wl1271_acx_keep_alive_mode(wl, wlvif, false);
2772 if (test_and_clear_bit(WLVIF_FLAG_CS_PROGRESS, &wlvif->flags)) {
2773 struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif);
2775 wl12xx_cmd_stop_channel_switch(wl, wlvif);
2776 ieee80211_chswitch_done(vif, false);
2777 cancel_delayed_work(&wlvif->channel_switch_work);
2780 /* invalidate keep-alive template */
2781 wl1271_acx_keep_alive_config(wl, wlvif,
2782 wlvif->sta.klv_template_id,
2783 ACX_KEEP_ALIVE_TPL_INVALID);
2785 /* reset TX security counters on a clean disconnect */
2786 wlvif->tx_security_last_seq_lsb = 0;
2787 wlvif->tx_security_seq = 0;
2792 static void wl1271_set_band_rate(struct wl1271 *wl, struct wl12xx_vif *wlvif)
2794 wlvif->basic_rate_set = wlvif->bitrate_masks[wlvif->band];
2795 wlvif->rate_set = wlvif->basic_rate_set;
2798 static int wl12xx_config_vif(struct wl1271 *wl, struct wl12xx_vif *wlvif,
2799 struct ieee80211_conf *conf, u32 changed)
2803 if (conf->power_level != wlvif->power_level) {
2804 ret = wl1271_acx_tx_power(wl, wlvif, conf->power_level);
2808 wlvif->power_level = conf->power_level;
2814 static int wl1271_op_config(struct ieee80211_hw *hw, u32 changed)
2816 struct wl1271 *wl = hw->priv;
2817 struct wl12xx_vif *wlvif;
2818 struct ieee80211_conf *conf = &hw->conf;
2821 wl1271_debug(DEBUG_MAC80211, "mac80211 config psm %s power %d %s"
2823 conf->flags & IEEE80211_CONF_PS ? "on" : "off",
2825 conf->flags & IEEE80211_CONF_IDLE ? "idle" : "in use",
2828 mutex_lock(&wl->mutex);
2830 if (changed & IEEE80211_CONF_CHANGE_POWER)
2831 wl->power_level = conf->power_level;
2833 if (unlikely(wl->state != WLCORE_STATE_ON))
2836 ret = wl1271_ps_elp_wakeup(wl);
2840 /* configure each interface */
2841 wl12xx_for_each_wlvif(wl, wlvif) {
2842 ret = wl12xx_config_vif(wl, wlvif, conf, changed);
2848 wl1271_ps_elp_sleep(wl);
2851 mutex_unlock(&wl->mutex);
2856 struct wl1271_filter_params {
2859 u8 mc_list[ACX_MC_ADDRESS_GROUP_MAX][ETH_ALEN];
2862 static u64 wl1271_op_prepare_multicast(struct ieee80211_hw *hw,
2863 struct netdev_hw_addr_list *mc_list)
2865 struct wl1271_filter_params *fp;
2866 struct netdev_hw_addr *ha;
2868 fp = kzalloc(sizeof(*fp), GFP_ATOMIC);
2870 wl1271_error("Out of memory setting filters.");
2874 /* update multicast filtering parameters */
2875 fp->mc_list_length = 0;
2876 if (netdev_hw_addr_list_count(mc_list) > ACX_MC_ADDRESS_GROUP_MAX) {
2877 fp->enabled = false;
2880 netdev_hw_addr_list_for_each(ha, mc_list) {
2881 memcpy(fp->mc_list[fp->mc_list_length],
2882 ha->addr, ETH_ALEN);
2883 fp->mc_list_length++;
2887 return (u64)(unsigned long)fp;
2890 #define WL1271_SUPPORTED_FILTERS (FIF_PROMISC_IN_BSS | \
2893 FIF_BCN_PRBRESP_PROMISC | \
2897 static void wl1271_op_configure_filter(struct ieee80211_hw *hw,
2898 unsigned int changed,
2899 unsigned int *total, u64 multicast)
2901 struct wl1271_filter_params *fp = (void *)(unsigned long)multicast;
2902 struct wl1271 *wl = hw->priv;
2903 struct wl12xx_vif *wlvif;
2907 wl1271_debug(DEBUG_MAC80211, "mac80211 configure filter changed %x"
2908 " total %x", changed, *total);
2910 mutex_lock(&wl->mutex);
2912 *total &= WL1271_SUPPORTED_FILTERS;
2913 changed &= WL1271_SUPPORTED_FILTERS;
2915 if (unlikely(wl->state != WLCORE_STATE_ON))
2918 ret = wl1271_ps_elp_wakeup(wl);
2922 wl12xx_for_each_wlvif(wl, wlvif) {
2923 if (wlvif->bss_type != BSS_TYPE_AP_BSS) {
2924 if (*total & FIF_ALLMULTI)
2925 ret = wl1271_acx_group_address_tbl(wl, wlvif,
2929 ret = wl1271_acx_group_address_tbl(wl, wlvif,
2932 fp->mc_list_length);
2939 * the fw doesn't provide an api to configure the filters. instead,
2940 * the filters configuration is based on the active roles / ROC
2945 wl1271_ps_elp_sleep(wl);
2948 mutex_unlock(&wl->mutex);
2952 static int wl1271_record_ap_key(struct wl1271 *wl, struct wl12xx_vif *wlvif,
2953 u8 id, u8 key_type, u8 key_size,
2954 const u8 *key, u8 hlid, u32 tx_seq_32,
2957 struct wl1271_ap_key *ap_key;
2960 wl1271_debug(DEBUG_CRYPT, "record ap key id %d", (int)id);
2962 if (key_size > MAX_KEY_SIZE)
2966 * Find next free entry in ap_keys. Also check we are not replacing
2969 for (i = 0; i < MAX_NUM_KEYS; i++) {
2970 if (wlvif->ap.recorded_keys[i] == NULL)
2973 if (wlvif->ap.recorded_keys[i]->id == id) {
2974 wl1271_warning("trying to record key replacement");
2979 if (i == MAX_NUM_KEYS)
2982 ap_key = kzalloc(sizeof(*ap_key), GFP_KERNEL);
2987 ap_key->key_type = key_type;
2988 ap_key->key_size = key_size;
2989 memcpy(ap_key->key, key, key_size);
2990 ap_key->hlid = hlid;
2991 ap_key->tx_seq_32 = tx_seq_32;
2992 ap_key->tx_seq_16 = tx_seq_16;
2994 wlvif->ap.recorded_keys[i] = ap_key;
2998 static void wl1271_free_ap_keys(struct wl1271 *wl, struct wl12xx_vif *wlvif)
3002 for (i = 0; i < MAX_NUM_KEYS; i++) {
3003 kfree(wlvif->ap.recorded_keys[i]);
3004 wlvif->ap.recorded_keys[i] = NULL;
3008 static int wl1271_ap_init_hwenc(struct wl1271 *wl, struct wl12xx_vif *wlvif)
3011 struct wl1271_ap_key *key;
3012 bool wep_key_added = false;
3014 for (i = 0; i < MAX_NUM_KEYS; i++) {
3016 if (wlvif->ap.recorded_keys[i] == NULL)
3019 key = wlvif->ap.recorded_keys[i];
3021 if (hlid == WL12XX_INVALID_LINK_ID)
3022 hlid = wlvif->ap.bcast_hlid;
3024 ret = wl1271_cmd_set_ap_key(wl, wlvif, KEY_ADD_OR_REPLACE,
3025 key->id, key->key_type,
3026 key->key_size, key->key,
3027 hlid, key->tx_seq_32,
3032 if (key->key_type == KEY_WEP)
3033 wep_key_added = true;
3036 if (wep_key_added) {
3037 ret = wl12xx_cmd_set_default_wep_key(wl, wlvif->default_key,
3038 wlvif->ap.bcast_hlid);
3044 wl1271_free_ap_keys(wl, wlvif);
3048 static int wl1271_set_key(struct wl1271 *wl, struct wl12xx_vif *wlvif,
3049 u16 action, u8 id, u8 key_type,
3050 u8 key_size, const u8 *key, u32 tx_seq_32,
3051 u16 tx_seq_16, struct ieee80211_sta *sta)
3054 bool is_ap = (wlvif->bss_type == BSS_TYPE_AP_BSS);
3057 struct wl1271_station *wl_sta;
3061 wl_sta = (struct wl1271_station *)sta->drv_priv;
3062 hlid = wl_sta->hlid;
3064 hlid = wlvif->ap.bcast_hlid;
3067 if (!test_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags)) {
3069 * We do not support removing keys after AP shutdown.
3070 * Pretend we do to make mac80211 happy.
3072 if (action != KEY_ADD_OR_REPLACE)
3075 ret = wl1271_record_ap_key(wl, wlvif, id,
3077 key, hlid, tx_seq_32,
3080 ret = wl1271_cmd_set_ap_key(wl, wlvif, action,
3081 id, key_type, key_size,
3082 key, hlid, tx_seq_32,
3090 static const u8 bcast_addr[ETH_ALEN] = {
3091 0xff, 0xff, 0xff, 0xff, 0xff, 0xff
3094 addr = sta ? sta->addr : bcast_addr;
3096 if (is_zero_ether_addr(addr)) {
3097 /* We dont support TX only encryption */
3101 /* The wl1271 does not allow to remove unicast keys - they
3102 will be cleared automatically on next CMD_JOIN. Ignore the
3103 request silently, as we dont want the mac80211 to emit
3104 an error message. */
3105 if (action == KEY_REMOVE && !is_broadcast_ether_addr(addr))
3108 /* don't remove key if hlid was already deleted */
3109 if (action == KEY_REMOVE &&
3110 wlvif->sta.hlid == WL12XX_INVALID_LINK_ID)
3113 ret = wl1271_cmd_set_sta_key(wl, wlvif, action,
3114 id, key_type, key_size,
3115 key, addr, tx_seq_32,
3120 /* the default WEP key needs to be configured at least once */
3121 if (key_type == KEY_WEP) {
3122 ret = wl12xx_cmd_set_default_wep_key(wl,
3133 static int wlcore_op_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
3134 struct ieee80211_vif *vif,
3135 struct ieee80211_sta *sta,
3136 struct ieee80211_key_conf *key_conf)
3138 struct wl1271 *wl = hw->priv;
3140 bool might_change_spare =
3141 key_conf->cipher == WL1271_CIPHER_SUITE_GEM ||
3142 key_conf->cipher == WLAN_CIPHER_SUITE_TKIP;
3144 if (might_change_spare) {
3146 * stop the queues and flush to ensure the next packets are
3147 * in sync with FW spare block accounting
3149 mutex_lock(&wl->mutex);
3150 wlcore_stop_queues(wl, WLCORE_QUEUE_STOP_REASON_SPARE_BLK);
3151 mutex_unlock(&wl->mutex);
3153 wl1271_tx_flush(wl);
3156 mutex_lock(&wl->mutex);
3158 if (unlikely(wl->state != WLCORE_STATE_ON)) {
3160 goto out_wake_queues;
3163 ret = wl1271_ps_elp_wakeup(wl);
3165 goto out_wake_queues;
3167 ret = wlcore_hw_set_key(wl, cmd, vif, sta, key_conf);
3169 wl1271_ps_elp_sleep(wl);
3172 if (might_change_spare)
3173 wlcore_wake_queues(wl, WLCORE_QUEUE_STOP_REASON_SPARE_BLK);
3175 mutex_unlock(&wl->mutex);
3180 int wlcore_set_key(struct wl1271 *wl, enum set_key_cmd cmd,
3181 struct ieee80211_vif *vif,
3182 struct ieee80211_sta *sta,
3183 struct ieee80211_key_conf *key_conf)
3185 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
3191 wl1271_debug(DEBUG_MAC80211, "mac80211 set key");
3193 wl1271_debug(DEBUG_CRYPT, "CMD: 0x%x sta: %p", cmd, sta);
3194 wl1271_debug(DEBUG_CRYPT, "Key: algo:0x%x, id:%d, len:%d flags 0x%x",
3195 key_conf->cipher, key_conf->keyidx,
3196 key_conf->keylen, key_conf->flags);
3197 wl1271_dump(DEBUG_CRYPT, "KEY: ", key_conf->key, key_conf->keylen);
3199 switch (key_conf->cipher) {
3200 case WLAN_CIPHER_SUITE_WEP40:
3201 case WLAN_CIPHER_SUITE_WEP104:
3204 key_conf->hw_key_idx = key_conf->keyidx;
3206 case WLAN_CIPHER_SUITE_TKIP:
3207 key_type = KEY_TKIP;
3209 key_conf->hw_key_idx = key_conf->keyidx;
3210 tx_seq_32 = WL1271_TX_SECURITY_HI32(wlvif->tx_security_seq);
3211 tx_seq_16 = WL1271_TX_SECURITY_LO16(wlvif->tx_security_seq);
3213 case WLAN_CIPHER_SUITE_CCMP:
3216 key_conf->flags |= IEEE80211_KEY_FLAG_PUT_IV_SPACE;
3217 tx_seq_32 = WL1271_TX_SECURITY_HI32(wlvif->tx_security_seq);
3218 tx_seq_16 = WL1271_TX_SECURITY_LO16(wlvif->tx_security_seq);
3220 case WL1271_CIPHER_SUITE_GEM:
3222 tx_seq_32 = WL1271_TX_SECURITY_HI32(wlvif->tx_security_seq);
3223 tx_seq_16 = WL1271_TX_SECURITY_LO16(wlvif->tx_security_seq);
3226 wl1271_error("Unknown key algo 0x%x", key_conf->cipher);
3233 ret = wl1271_set_key(wl, wlvif, KEY_ADD_OR_REPLACE,
3234 key_conf->keyidx, key_type,
3235 key_conf->keylen, key_conf->key,
3236 tx_seq_32, tx_seq_16, sta);
3238 wl1271_error("Could not add or replace key");
3243 * reconfiguring arp response if the unicast (or common)
3244 * encryption key type was changed
3246 if (wlvif->bss_type == BSS_TYPE_STA_BSS &&
3247 (sta || key_type == KEY_WEP) &&
3248 wlvif->encryption_type != key_type) {
3249 wlvif->encryption_type = key_type;
3250 ret = wl1271_cmd_build_arp_rsp(wl, wlvif);
3252 wl1271_warning("build arp rsp failed: %d", ret);
3259 ret = wl1271_set_key(wl, wlvif, KEY_REMOVE,
3260 key_conf->keyidx, key_type,
3261 key_conf->keylen, key_conf->key,
3264 wl1271_error("Could not remove key");
3270 wl1271_error("Unsupported key cmd 0x%x", cmd);
3276 EXPORT_SYMBOL_GPL(wlcore_set_key);
3278 void wlcore_regdomain_config(struct wl1271 *wl)
3282 if (!(wl->quirks & WLCORE_QUIRK_REGDOMAIN_CONF))
3285 mutex_lock(&wl->mutex);
3286 ret = wl1271_ps_elp_wakeup(wl);
3290 ret = wlcore_cmd_regdomain_config_locked(wl);
3292 wl12xx_queue_recovery_work(wl);
3296 wl1271_ps_elp_sleep(wl);
3298 mutex_unlock(&wl->mutex);
3301 static int wl1271_op_hw_scan(struct ieee80211_hw *hw,
3302 struct ieee80211_vif *vif,
3303 struct cfg80211_scan_request *req)
3305 struct wl1271 *wl = hw->priv;
3310 wl1271_debug(DEBUG_MAC80211, "mac80211 hw scan");
3313 ssid = req->ssids[0].ssid;
3314 len = req->ssids[0].ssid_len;
3317 mutex_lock(&wl->mutex);
3319 if (unlikely(wl->state != WLCORE_STATE_ON)) {
3321 * We cannot return -EBUSY here because cfg80211 will expect
3322 * a call to ieee80211_scan_completed if we do - in this case
3323 * there won't be any call.
3329 ret = wl1271_ps_elp_wakeup(wl);
3333 /* fail if there is any role in ROC */
3334 if (find_first_bit(wl->roc_map, WL12XX_MAX_ROLES) < WL12XX_MAX_ROLES) {
3335 /* don't allow scanning right now */
3340 ret = wlcore_scan(hw->priv, vif, ssid, len, req);
3342 wl1271_ps_elp_sleep(wl);
3344 mutex_unlock(&wl->mutex);
3349 static void wl1271_op_cancel_hw_scan(struct ieee80211_hw *hw,
3350 struct ieee80211_vif *vif)
3352 struct wl1271 *wl = hw->priv;
3353 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
3356 wl1271_debug(DEBUG_MAC80211, "mac80211 cancel hw scan");
3358 mutex_lock(&wl->mutex);
3360 if (unlikely(wl->state != WLCORE_STATE_ON))
3363 if (wl->scan.state == WL1271_SCAN_STATE_IDLE)
3366 ret = wl1271_ps_elp_wakeup(wl);
3370 if (wl->scan.state != WL1271_SCAN_STATE_DONE) {
3371 ret = wl->ops->scan_stop(wl, wlvif);
3377 * Rearm the tx watchdog just before idling scan. This
3378 * prevents just-finished scans from triggering the watchdog
3380 wl12xx_rearm_tx_watchdog_locked(wl);
3382 wl->scan.state = WL1271_SCAN_STATE_IDLE;
3383 memset(wl->scan.scanned_ch, 0, sizeof(wl->scan.scanned_ch));
3384 wl->scan_wlvif = NULL;
3385 wl->scan.req = NULL;
3386 ieee80211_scan_completed(wl->hw, true);
3389 wl1271_ps_elp_sleep(wl);
3391 mutex_unlock(&wl->mutex);
3393 cancel_delayed_work_sync(&wl->scan_complete_work);
3396 static int wl1271_op_sched_scan_start(struct ieee80211_hw *hw,
3397 struct ieee80211_vif *vif,
3398 struct cfg80211_sched_scan_request *req,
3399 struct ieee80211_sched_scan_ies *ies)
3401 struct wl1271 *wl = hw->priv;
3402 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
3405 wl1271_debug(DEBUG_MAC80211, "wl1271_op_sched_scan_start");
3407 mutex_lock(&wl->mutex);
3409 if (unlikely(wl->state != WLCORE_STATE_ON)) {
3414 ret = wl1271_ps_elp_wakeup(wl);
3418 ret = wl->ops->sched_scan_start(wl, wlvif, req, ies);
3422 wl->sched_vif = wlvif;
3425 wl1271_ps_elp_sleep(wl);
3427 mutex_unlock(&wl->mutex);
3431 static void wl1271_op_sched_scan_stop(struct ieee80211_hw *hw,
3432 struct ieee80211_vif *vif)
3434 struct wl1271 *wl = hw->priv;
3435 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
3438 wl1271_debug(DEBUG_MAC80211, "wl1271_op_sched_scan_stop");
3440 mutex_lock(&wl->mutex);
3442 if (unlikely(wl->state != WLCORE_STATE_ON))
3445 ret = wl1271_ps_elp_wakeup(wl);
3449 wl->ops->sched_scan_stop(wl, wlvif);
3451 wl1271_ps_elp_sleep(wl);
3453 mutex_unlock(&wl->mutex);
3456 static int wl1271_op_set_frag_threshold(struct ieee80211_hw *hw, u32 value)
3458 struct wl1271 *wl = hw->priv;
3461 mutex_lock(&wl->mutex);
3463 if (unlikely(wl->state != WLCORE_STATE_ON)) {
3468 ret = wl1271_ps_elp_wakeup(wl);
3472 ret = wl1271_acx_frag_threshold(wl, value);
3474 wl1271_warning("wl1271_op_set_frag_threshold failed: %d", ret);
3476 wl1271_ps_elp_sleep(wl);
3479 mutex_unlock(&wl->mutex);
3484 static int wl1271_op_set_rts_threshold(struct ieee80211_hw *hw, u32 value)
3486 struct wl1271 *wl = hw->priv;
3487 struct wl12xx_vif *wlvif;
3490 mutex_lock(&wl->mutex);
3492 if (unlikely(wl->state != WLCORE_STATE_ON)) {
3497 ret = wl1271_ps_elp_wakeup(wl);
3501 wl12xx_for_each_wlvif(wl, wlvif) {
3502 ret = wl1271_acx_rts_threshold(wl, wlvif, value);
3504 wl1271_warning("set rts threshold failed: %d", ret);
3506 wl1271_ps_elp_sleep(wl);
3509 mutex_unlock(&wl->mutex);
3514 static void wl12xx_remove_ie(struct sk_buff *skb, u8 eid, int ieoffset)
3517 const u8 *next, *end = skb->data + skb->len;
3518 u8 *ie = (u8 *)cfg80211_find_ie(eid, skb->data + ieoffset,
3519 skb->len - ieoffset);
3524 memmove(ie, next, end - next);
3525 skb_trim(skb, skb->len - len);
3528 static void wl12xx_remove_vendor_ie(struct sk_buff *skb,
3529 unsigned int oui, u8 oui_type,
3533 const u8 *next, *end = skb->data + skb->len;
3534 u8 *ie = (u8 *)cfg80211_find_vendor_ie(oui, oui_type,
3535 skb->data + ieoffset,
3536 skb->len - ieoffset);
3541 memmove(ie, next, end - next);
3542 skb_trim(skb, skb->len - len);
3545 static int wl1271_ap_set_probe_resp_tmpl(struct wl1271 *wl, u32 rates,
3546 struct ieee80211_vif *vif)
3548 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
3549 struct sk_buff *skb;
3552 skb = ieee80211_proberesp_get(wl->hw, vif);
3556 ret = wl1271_cmd_template_set(wl, wlvif->role_id,
3557 CMD_TEMPL_AP_PROBE_RESPONSE,
3566 wl1271_debug(DEBUG_AP, "probe response updated");
3567 set_bit(WLVIF_FLAG_AP_PROBE_RESP_SET, &wlvif->flags);
3573 static int wl1271_ap_set_probe_resp_tmpl_legacy(struct wl1271 *wl,
3574 struct ieee80211_vif *vif,
3576 size_t probe_rsp_len,
3579 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
3580 struct ieee80211_bss_conf *bss_conf = &vif->bss_conf;
3581 u8 probe_rsp_templ[WL1271_CMD_TEMPL_MAX_SIZE];
3582 int ssid_ie_offset, ie_offset, templ_len;
3585 /* no need to change probe response if the SSID is set correctly */
3586 if (wlvif->ssid_len > 0)
3587 return wl1271_cmd_template_set(wl, wlvif->role_id,
3588 CMD_TEMPL_AP_PROBE_RESPONSE,
3593 if (probe_rsp_len + bss_conf->ssid_len > WL1271_CMD_TEMPL_MAX_SIZE) {
3594 wl1271_error("probe_rsp template too big");
3598 /* start searching from IE offset */
3599 ie_offset = offsetof(struct ieee80211_mgmt, u.probe_resp.variable);
3601 ptr = cfg80211_find_ie(WLAN_EID_SSID, probe_rsp_data + ie_offset,
3602 probe_rsp_len - ie_offset);
3604 wl1271_error("No SSID in beacon!");
3608 ssid_ie_offset = ptr - probe_rsp_data;
3609 ptr += (ptr[1] + 2);
3611 memcpy(probe_rsp_templ, probe_rsp_data, ssid_ie_offset);
3613 /* insert SSID from bss_conf */
3614 probe_rsp_templ[ssid_ie_offset] = WLAN_EID_SSID;
3615 probe_rsp_templ[ssid_ie_offset + 1] = bss_conf->ssid_len;
3616 memcpy(probe_rsp_templ + ssid_ie_offset + 2,
3617 bss_conf->ssid, bss_conf->ssid_len);
3618 templ_len = ssid_ie_offset + 2 + bss_conf->ssid_len;
3620 memcpy(probe_rsp_templ + ssid_ie_offset + 2 + bss_conf->ssid_len,
3621 ptr, probe_rsp_len - (ptr - probe_rsp_data));
3622 templ_len += probe_rsp_len - (ptr - probe_rsp_data);
3624 return wl1271_cmd_template_set(wl, wlvif->role_id,
3625 CMD_TEMPL_AP_PROBE_RESPONSE,
3631 static int wl1271_bss_erp_info_changed(struct wl1271 *wl,
3632 struct ieee80211_vif *vif,
3633 struct ieee80211_bss_conf *bss_conf,
3636 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
3639 if (changed & BSS_CHANGED_ERP_SLOT) {
3640 if (bss_conf->use_short_slot)
3641 ret = wl1271_acx_slot(wl, wlvif, SLOT_TIME_SHORT);
3643 ret = wl1271_acx_slot(wl, wlvif, SLOT_TIME_LONG);
3645 wl1271_warning("Set slot time failed %d", ret);
3650 if (changed & BSS_CHANGED_ERP_PREAMBLE) {
3651 if (bss_conf->use_short_preamble)
3652 wl1271_acx_set_preamble(wl, wlvif, ACX_PREAMBLE_SHORT);
3654 wl1271_acx_set_preamble(wl, wlvif, ACX_PREAMBLE_LONG);
3657 if (changed & BSS_CHANGED_ERP_CTS_PROT) {
3658 if (bss_conf->use_cts_prot)
3659 ret = wl1271_acx_cts_protect(wl, wlvif,
3662 ret = wl1271_acx_cts_protect(wl, wlvif,
3663 CTSPROTECT_DISABLE);
3665 wl1271_warning("Set ctsprotect failed %d", ret);
3674 static int wlcore_set_beacon_template(struct wl1271 *wl,
3675 struct ieee80211_vif *vif,
3678 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
3679 struct ieee80211_hdr *hdr;
3682 int ieoffset = offsetof(struct ieee80211_mgmt,
3684 struct sk_buff *beacon = ieee80211_beacon_get(wl->hw, vif);
3692 wl1271_debug(DEBUG_MASTER, "beacon updated");
3694 ret = wl1271_ssid_set(wlvif, beacon, ieoffset);
3696 dev_kfree_skb(beacon);
3699 min_rate = wl1271_tx_min_rate_get(wl, wlvif->basic_rate_set);
3700 tmpl_id = is_ap ? CMD_TEMPL_AP_BEACON :
3702 ret = wl1271_cmd_template_set(wl, wlvif->role_id, tmpl_id,
3707 dev_kfree_skb(beacon);
3711 wlvif->wmm_enabled =
3712 cfg80211_find_vendor_ie(WLAN_OUI_MICROSOFT,
3713 WLAN_OUI_TYPE_MICROSOFT_WMM,
3714 beacon->data + ieoffset,
3715 beacon->len - ieoffset);
3718 * In case we already have a probe-resp beacon set explicitly
3719 * by usermode, don't use the beacon data.
3721 if (test_bit(WLVIF_FLAG_AP_PROBE_RESP_SET, &wlvif->flags))
3724 /* remove TIM ie from probe response */
3725 wl12xx_remove_ie(beacon, WLAN_EID_TIM, ieoffset);
3728 * remove p2p ie from probe response.
3729 * the fw reponds to probe requests that don't include
3730 * the p2p ie. probe requests with p2p ie will be passed,
3731 * and will be responded by the supplicant (the spec
3732 * forbids including the p2p ie when responding to probe
3733 * requests that didn't include it).
3735 wl12xx_remove_vendor_ie(beacon, WLAN_OUI_WFA,
3736 WLAN_OUI_TYPE_WFA_P2P, ieoffset);
3738 hdr = (struct ieee80211_hdr *) beacon->data;
3739 hdr->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT |
3740 IEEE80211_STYPE_PROBE_RESP);
3742 ret = wl1271_ap_set_probe_resp_tmpl_legacy(wl, vif,
3747 ret = wl1271_cmd_template_set(wl, wlvif->role_id,
3748 CMD_TEMPL_PROBE_RESPONSE,
3753 dev_kfree_skb(beacon);
3761 static int wl1271_bss_beacon_info_changed(struct wl1271 *wl,
3762 struct ieee80211_vif *vif,
3763 struct ieee80211_bss_conf *bss_conf,
3766 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
3767 bool is_ap = (wlvif->bss_type == BSS_TYPE_AP_BSS);
3770 if (changed & BSS_CHANGED_BEACON_INT) {
3771 wl1271_debug(DEBUG_MASTER, "beacon interval updated: %d",
3772 bss_conf->beacon_int);
3774 wlvif->beacon_int = bss_conf->beacon_int;
3777 if ((changed & BSS_CHANGED_AP_PROBE_RESP) && is_ap) {
3778 u32 rate = wl1271_tx_min_rate_get(wl, wlvif->basic_rate_set);
3780 wl1271_ap_set_probe_resp_tmpl(wl, rate, vif);
3783 if (changed & BSS_CHANGED_BEACON) {
3784 ret = wlcore_set_beacon_template(wl, vif, is_ap);
3791 wl1271_error("beacon info change failed: %d", ret);
3795 /* AP mode changes */
3796 static void wl1271_bss_info_changed_ap(struct wl1271 *wl,
3797 struct ieee80211_vif *vif,
3798 struct ieee80211_bss_conf *bss_conf,
3801 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
3804 if (changed & BSS_CHANGED_BASIC_RATES) {
3805 u32 rates = bss_conf->basic_rates;
3807 wlvif->basic_rate_set = wl1271_tx_enabled_rates_get(wl, rates,
3809 wlvif->basic_rate = wl1271_tx_min_rate_get(wl,
3810 wlvif->basic_rate_set);
3812 ret = wl1271_init_ap_rates(wl, wlvif);
3814 wl1271_error("AP rate policy change failed %d", ret);
3818 ret = wl1271_ap_init_templates(wl, vif);
3822 ret = wl1271_ap_set_probe_resp_tmpl(wl, wlvif->basic_rate, vif);
3826 ret = wlcore_set_beacon_template(wl, vif, true);
3831 ret = wl1271_bss_beacon_info_changed(wl, vif, bss_conf, changed);
3835 if (changed & BSS_CHANGED_BEACON_ENABLED) {
3836 if (bss_conf->enable_beacon) {
3837 if (!test_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags)) {
3838 ret = wl12xx_cmd_role_start_ap(wl, wlvif);
3842 ret = wl1271_ap_init_hwenc(wl, wlvif);
3846 set_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags);
3847 wl1271_debug(DEBUG_AP, "started AP");
3850 if (test_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags)) {
3851 ret = wl12xx_cmd_role_stop_ap(wl, wlvif);
3855 clear_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags);
3856 clear_bit(WLVIF_FLAG_AP_PROBE_RESP_SET,
3858 wl1271_debug(DEBUG_AP, "stopped AP");
3863 ret = wl1271_bss_erp_info_changed(wl, vif, bss_conf, changed);
3867 /* Handle HT information change */
3868 if ((changed & BSS_CHANGED_HT) &&
3869 (bss_conf->chandef.width != NL80211_CHAN_WIDTH_20_NOHT)) {
3870 ret = wl1271_acx_set_ht_information(wl, wlvif,
3871 bss_conf->ht_operation_mode);
3873 wl1271_warning("Set ht information failed %d", ret);
3882 static int wlcore_set_bssid(struct wl1271 *wl, struct wl12xx_vif *wlvif,
3883 struct ieee80211_bss_conf *bss_conf,
3889 wl1271_debug(DEBUG_MAC80211,
3890 "changed_bssid: %pM, aid: %d, bcn_int: %d, brates: 0x%x sta_rate_set: 0x%x",
3891 bss_conf->bssid, bss_conf->aid,
3892 bss_conf->beacon_int,
3893 bss_conf->basic_rates, sta_rate_set);
3895 wlvif->beacon_int = bss_conf->beacon_int;
3896 rates = bss_conf->basic_rates;
3897 wlvif->basic_rate_set =
3898 wl1271_tx_enabled_rates_get(wl, rates,
3901 wl1271_tx_min_rate_get(wl,
3902 wlvif->basic_rate_set);
3906 wl1271_tx_enabled_rates_get(wl,
3910 /* we only support sched_scan while not connected */
3911 if (wl->sched_vif == wlvif)
3912 wl->ops->sched_scan_stop(wl, wlvif);
3914 ret = wl1271_acx_sta_rate_policies(wl, wlvif);
3918 ret = wl12xx_cmd_build_null_data(wl, wlvif);
3922 ret = wl1271_build_qos_null_data(wl, wl12xx_wlvif_to_vif(wlvif));
3926 wlcore_set_ssid(wl, wlvif);
3928 set_bit(WLVIF_FLAG_IN_USE, &wlvif->flags);
3933 static int wlcore_clear_bssid(struct wl1271 *wl, struct wl12xx_vif *wlvif)
3937 /* revert back to minimum rates for the current band */
3938 wl1271_set_band_rate(wl, wlvif);
3939 wlvif->basic_rate = wl1271_tx_min_rate_get(wl, wlvif->basic_rate_set);
3941 ret = wl1271_acx_sta_rate_policies(wl, wlvif);
3945 if (wlvif->bss_type == BSS_TYPE_STA_BSS &&
3946 test_bit(WLVIF_FLAG_IN_USE, &wlvif->flags)) {
3947 ret = wl12xx_cmd_role_stop_sta(wl, wlvif);
3952 clear_bit(WLVIF_FLAG_IN_USE, &wlvif->flags);
3955 /* STA/IBSS mode changes */
3956 static void wl1271_bss_info_changed_sta(struct wl1271 *wl,
3957 struct ieee80211_vif *vif,
3958 struct ieee80211_bss_conf *bss_conf,
3961 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
3962 bool do_join = false;
3963 bool is_ibss = (wlvif->bss_type == BSS_TYPE_IBSS);
3964 bool ibss_joined = false;
3965 u32 sta_rate_set = 0;
3967 struct ieee80211_sta *sta;
3968 bool sta_exists = false;
3969 struct ieee80211_sta_ht_cap sta_ht_cap;
3972 ret = wl1271_bss_beacon_info_changed(wl, vif, bss_conf,
3978 if (changed & BSS_CHANGED_IBSS) {
3979 if (bss_conf->ibss_joined) {
3980 set_bit(WLVIF_FLAG_IBSS_JOINED, &wlvif->flags);
3983 wlcore_unset_assoc(wl, wlvif);
3984 wl12xx_cmd_role_stop_sta(wl, wlvif);
3988 if ((changed & BSS_CHANGED_BEACON_INT) && ibss_joined)
3991 /* Need to update the SSID (for filtering etc) */
3992 if ((changed & BSS_CHANGED_BEACON) && ibss_joined)
3995 if ((changed & BSS_CHANGED_BEACON_ENABLED) && ibss_joined) {
3996 wl1271_debug(DEBUG_ADHOC, "ad-hoc beaconing: %s",
3997 bss_conf->enable_beacon ? "enabled" : "disabled");
4002 if (changed & BSS_CHANGED_CQM) {
4003 bool enable = false;
4004 if (bss_conf->cqm_rssi_thold)
4006 ret = wl1271_acx_rssi_snr_trigger(wl, wlvif, enable,
4007 bss_conf->cqm_rssi_thold,
4008 bss_conf->cqm_rssi_hyst);
4011 wlvif->rssi_thold = bss_conf->cqm_rssi_thold;
4014 if (changed & (BSS_CHANGED_BSSID | BSS_CHANGED_HT |
4015 BSS_CHANGED_ASSOC)) {
4017 sta = ieee80211_find_sta(vif, bss_conf->bssid);
4019 u8 *rx_mask = sta->ht_cap.mcs.rx_mask;
4021 /* save the supp_rates of the ap */
4022 sta_rate_set = sta->supp_rates[wlvif->band];
4023 if (sta->ht_cap.ht_supported)
4025 (rx_mask[0] << HW_HT_RATES_OFFSET) |
4026 (rx_mask[1] << HW_MIMO_RATES_OFFSET);
4027 sta_ht_cap = sta->ht_cap;
4034 if (changed & BSS_CHANGED_BSSID) {
4035 if (!is_zero_ether_addr(bss_conf->bssid)) {
4036 ret = wlcore_set_bssid(wl, wlvif, bss_conf,
4041 /* Need to update the BSSID (for filtering etc) */
4044 ret = wlcore_clear_bssid(wl, wlvif);
4050 if (changed & BSS_CHANGED_IBSS) {
4051 wl1271_debug(DEBUG_ADHOC, "ibss_joined: %d",
4052 bss_conf->ibss_joined);
4054 if (bss_conf->ibss_joined) {
4055 u32 rates = bss_conf->basic_rates;
4056 wlvif->basic_rate_set =
4057 wl1271_tx_enabled_rates_get(wl, rates,
4060 wl1271_tx_min_rate_get(wl,
4061 wlvif->basic_rate_set);
4063 /* by default, use 11b + OFDM rates */
4064 wlvif->rate_set = CONF_TX_IBSS_DEFAULT_RATES;
4065 ret = wl1271_acx_sta_rate_policies(wl, wlvif);
4071 ret = wl1271_bss_erp_info_changed(wl, vif, bss_conf, changed);
4076 ret = wlcore_join(wl, wlvif);
4078 wl1271_warning("cmd join failed %d", ret);
4083 if (changed & BSS_CHANGED_ASSOC) {
4084 if (bss_conf->assoc) {
4085 ret = wlcore_set_assoc(wl, wlvif, bss_conf,
4090 if (test_bit(WLVIF_FLAG_STA_AUTHORIZED, &wlvif->flags))
4091 wl12xx_set_authorized(wl, wlvif);
4093 wlcore_unset_assoc(wl, wlvif);
4097 if (changed & BSS_CHANGED_PS) {
4098 if ((bss_conf->ps) &&
4099 test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags) &&
4100 !test_bit(WLVIF_FLAG_IN_PS, &wlvif->flags)) {
4104 if (wl->conf.conn.forced_ps) {
4105 ps_mode = STATION_POWER_SAVE_MODE;
4106 ps_mode_str = "forced";
4108 ps_mode = STATION_AUTO_PS_MODE;
4109 ps_mode_str = "auto";
4112 wl1271_debug(DEBUG_PSM, "%s ps enabled", ps_mode_str);
4114 ret = wl1271_ps_set_mode(wl, wlvif, ps_mode);
4116 wl1271_warning("enter %s ps failed %d",
4118 } else if (!bss_conf->ps &&
4119 test_bit(WLVIF_FLAG_IN_PS, &wlvif->flags)) {
4120 wl1271_debug(DEBUG_PSM, "auto ps disabled");
4122 ret = wl1271_ps_set_mode(wl, wlvif,
4123 STATION_ACTIVE_MODE);
4125 wl1271_warning("exit auto ps failed %d", ret);
4129 /* Handle new association with HT. Do this after join. */
4131 (changed & BSS_CHANGED_HT)) {
4133 bss_conf->chandef.width != NL80211_CHAN_WIDTH_20_NOHT;
4135 ret = wl1271_acx_set_ht_capabilities(wl,
4140 wl1271_warning("Set ht cap failed %d", ret);
4146 ret = wl1271_acx_set_ht_information(wl, wlvif,
4147 bss_conf->ht_operation_mode);
4149 wl1271_warning("Set ht information failed %d",
4156 /* Handle arp filtering. Done after join. */
4157 if ((changed & BSS_CHANGED_ARP_FILTER) ||
4158 (!is_ibss && (changed & BSS_CHANGED_QOS))) {
4159 __be32 addr = bss_conf->arp_addr_list[0];
4160 wlvif->sta.qos = bss_conf->qos;
4161 WARN_ON(wlvif->bss_type != BSS_TYPE_STA_BSS);
4163 if (bss_conf->arp_addr_cnt == 1 &&
4164 bss_conf->arp_filter_enabled) {
4165 wlvif->ip_addr = addr;
4167 * The template should have been configured only upon
4168 * association. however, it seems that the correct ip
4169 * isn't being set (when sending), so we have to
4170 * reconfigure the template upon every ip change.
4172 ret = wl1271_cmd_build_arp_rsp(wl, wlvif);
4174 wl1271_warning("build arp rsp failed: %d", ret);
4178 ret = wl1271_acx_arp_ip_filter(wl, wlvif,
4179 (ACX_ARP_FILTER_ARP_FILTERING |
4180 ACX_ARP_FILTER_AUTO_ARP),
4184 ret = wl1271_acx_arp_ip_filter(wl, wlvif, 0, addr);
4195 static void wl1271_op_bss_info_changed(struct ieee80211_hw *hw,
4196 struct ieee80211_vif *vif,
4197 struct ieee80211_bss_conf *bss_conf,
4200 struct wl1271 *wl = hw->priv;
4201 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4202 bool is_ap = (wlvif->bss_type == BSS_TYPE_AP_BSS);
4205 wl1271_debug(DEBUG_MAC80211, "mac80211 bss info role %d changed 0x%x",
4206 wlvif->role_id, (int)changed);
4209 * make sure to cancel pending disconnections if our association
4212 if (!is_ap && (changed & BSS_CHANGED_ASSOC))
4213 cancel_delayed_work_sync(&wlvif->connection_loss_work);
4215 if (is_ap && (changed & BSS_CHANGED_BEACON_ENABLED) &&
4216 !bss_conf->enable_beacon)
4217 wl1271_tx_flush(wl);
4219 mutex_lock(&wl->mutex);
4221 if (unlikely(wl->state != WLCORE_STATE_ON))
4224 if (unlikely(!test_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags)))
4227 ret = wl1271_ps_elp_wakeup(wl);
4232 wl1271_bss_info_changed_ap(wl, vif, bss_conf, changed);
4234 wl1271_bss_info_changed_sta(wl, vif, bss_conf, changed);
4236 wl1271_ps_elp_sleep(wl);
4239 mutex_unlock(&wl->mutex);
4242 static int wlcore_op_add_chanctx(struct ieee80211_hw *hw,
4243 struct ieee80211_chanctx_conf *ctx)
4245 wl1271_debug(DEBUG_MAC80211, "mac80211 add chanctx %d (type %d)",
4246 ieee80211_frequency_to_channel(ctx->def.chan->center_freq),
4247 cfg80211_get_chandef_type(&ctx->def));
4251 static void wlcore_op_remove_chanctx(struct ieee80211_hw *hw,
4252 struct ieee80211_chanctx_conf *ctx)
4254 wl1271_debug(DEBUG_MAC80211, "mac80211 remove chanctx %d (type %d)",
4255 ieee80211_frequency_to_channel(ctx->def.chan->center_freq),
4256 cfg80211_get_chandef_type(&ctx->def));
4259 static void wlcore_op_change_chanctx(struct ieee80211_hw *hw,
4260 struct ieee80211_chanctx_conf *ctx,
4263 wl1271_debug(DEBUG_MAC80211,
4264 "mac80211 change chanctx %d (type %d) changed 0x%x",
4265 ieee80211_frequency_to_channel(ctx->def.chan->center_freq),
4266 cfg80211_get_chandef_type(&ctx->def), changed);
4269 static int wlcore_op_assign_vif_chanctx(struct ieee80211_hw *hw,
4270 struct ieee80211_vif *vif,
4271 struct ieee80211_chanctx_conf *ctx)
4273 struct wl1271 *wl = hw->priv;
4274 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4275 int channel = ieee80211_frequency_to_channel(
4276 ctx->def.chan->center_freq);
4278 wl1271_debug(DEBUG_MAC80211,
4279 "mac80211 assign chanctx (role %d) %d (type %d)",
4280 wlvif->role_id, channel, cfg80211_get_chandef_type(&ctx->def));
4282 mutex_lock(&wl->mutex);
4284 wlvif->band = ctx->def.chan->band;
4285 wlvif->channel = channel;
4286 wlvif->channel_type = cfg80211_get_chandef_type(&ctx->def);
4288 /* update default rates according to the band */
4289 wl1271_set_band_rate(wl, wlvif);
4291 mutex_unlock(&wl->mutex);
4296 static void wlcore_op_unassign_vif_chanctx(struct ieee80211_hw *hw,
4297 struct ieee80211_vif *vif,
4298 struct ieee80211_chanctx_conf *ctx)
4300 struct wl1271 *wl = hw->priv;
4301 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4303 wl1271_debug(DEBUG_MAC80211,
4304 "mac80211 unassign chanctx (role %d) %d (type %d)",
4306 ieee80211_frequency_to_channel(ctx->def.chan->center_freq),
4307 cfg80211_get_chandef_type(&ctx->def));
4309 wl1271_tx_flush(wl);
4312 static int wl1271_op_conf_tx(struct ieee80211_hw *hw,
4313 struct ieee80211_vif *vif, u16 queue,
4314 const struct ieee80211_tx_queue_params *params)
4316 struct wl1271 *wl = hw->priv;
4317 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4321 mutex_lock(&wl->mutex);
4323 wl1271_debug(DEBUG_MAC80211, "mac80211 conf tx %d", queue);
4326 ps_scheme = CONF_PS_SCHEME_UPSD_TRIGGER;
4328 ps_scheme = CONF_PS_SCHEME_LEGACY;
4330 if (!test_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags))
4333 ret = wl1271_ps_elp_wakeup(wl);
4338 * the txop is confed in units of 32us by the mac80211,
4341 ret = wl1271_acx_ac_cfg(wl, wlvif, wl1271_tx_get_queue(queue),
4342 params->cw_min, params->cw_max,
4343 params->aifs, params->txop << 5);
4347 ret = wl1271_acx_tid_cfg(wl, wlvif, wl1271_tx_get_queue(queue),
4348 CONF_CHANNEL_TYPE_EDCF,
4349 wl1271_tx_get_queue(queue),
4350 ps_scheme, CONF_ACK_POLICY_LEGACY,
4354 wl1271_ps_elp_sleep(wl);
4357 mutex_unlock(&wl->mutex);
4362 static u64 wl1271_op_get_tsf(struct ieee80211_hw *hw,
4363 struct ieee80211_vif *vif)
4366 struct wl1271 *wl = hw->priv;
4367 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4368 u64 mactime = ULLONG_MAX;
4371 wl1271_debug(DEBUG_MAC80211, "mac80211 get tsf");
4373 mutex_lock(&wl->mutex);
4375 if (unlikely(wl->state != WLCORE_STATE_ON))
4378 ret = wl1271_ps_elp_wakeup(wl);
4382 ret = wl12xx_acx_tsf_info(wl, wlvif, &mactime);
4387 wl1271_ps_elp_sleep(wl);
4390 mutex_unlock(&wl->mutex);
4394 static int wl1271_op_get_survey(struct ieee80211_hw *hw, int idx,
4395 struct survey_info *survey)
4397 struct ieee80211_conf *conf = &hw->conf;
4402 survey->channel = conf->channel;
4407 static int wl1271_allocate_sta(struct wl1271 *wl,
4408 struct wl12xx_vif *wlvif,
4409 struct ieee80211_sta *sta)
4411 struct wl1271_station *wl_sta;
4415 if (wl->active_sta_count >= AP_MAX_STATIONS) {
4416 wl1271_warning("could not allocate HLID - too much stations");
4420 wl_sta = (struct wl1271_station *)sta->drv_priv;
4421 ret = wl12xx_allocate_link(wl, wlvif, &wl_sta->hlid);
4423 wl1271_warning("could not allocate HLID - too many links");
4427 set_bit(wl_sta->hlid, wlvif->ap.sta_hlid_map);
4428 memcpy(wl->links[wl_sta->hlid].addr, sta->addr, ETH_ALEN);
4429 wl->active_sta_count++;
4433 void wl1271_free_sta(struct wl1271 *wl, struct wl12xx_vif *wlvif, u8 hlid)
4435 if (!test_bit(hlid, wlvif->ap.sta_hlid_map))
4438 clear_bit(hlid, wlvif->ap.sta_hlid_map);
4439 __clear_bit(hlid, &wl->ap_ps_map);
4440 __clear_bit(hlid, (unsigned long *)&wl->ap_fw_ps_map);
4441 wl12xx_free_link(wl, wlvif, &hlid);
4442 wl->active_sta_count--;
4445 * rearm the tx watchdog when the last STA is freed - give the FW a
4446 * chance to return STA-buffered packets before complaining.
4448 if (wl->active_sta_count == 0)
4449 wl12xx_rearm_tx_watchdog_locked(wl);
4452 static int wl12xx_sta_add(struct wl1271 *wl,
4453 struct wl12xx_vif *wlvif,
4454 struct ieee80211_sta *sta)
4456 struct wl1271_station *wl_sta;
4460 wl1271_debug(DEBUG_MAC80211, "mac80211 add sta %d", (int)sta->aid);
4462 ret = wl1271_allocate_sta(wl, wlvif, sta);
4466 wl_sta = (struct wl1271_station *)sta->drv_priv;
4467 hlid = wl_sta->hlid;
4469 ret = wl12xx_cmd_add_peer(wl, wlvif, sta, hlid);
4471 wl1271_free_sta(wl, wlvif, hlid);
4476 static int wl12xx_sta_remove(struct wl1271 *wl,
4477 struct wl12xx_vif *wlvif,
4478 struct ieee80211_sta *sta)
4480 struct wl1271_station *wl_sta;
4483 wl1271_debug(DEBUG_MAC80211, "mac80211 remove sta %d", (int)sta->aid);
4485 wl_sta = (struct wl1271_station *)sta->drv_priv;
4487 if (WARN_ON(!test_bit(id, wlvif->ap.sta_hlid_map)))
4490 ret = wl12xx_cmd_remove_peer(wl, wl_sta->hlid);
4494 wl1271_free_sta(wl, wlvif, wl_sta->hlid);
4498 static void wlcore_roc_if_possible(struct wl1271 *wl,
4499 struct wl12xx_vif *wlvif)
4501 if (find_first_bit(wl->roc_map,
4502 WL12XX_MAX_ROLES) < WL12XX_MAX_ROLES)
4505 if (WARN_ON(wlvif->role_id == WL12XX_INVALID_ROLE_ID))
4508 wl12xx_roc(wl, wlvif, wlvif->role_id, wlvif->band, wlvif->channel);
4511 static void wlcore_update_inconn_sta(struct wl1271 *wl,
4512 struct wl12xx_vif *wlvif,
4513 struct wl1271_station *wl_sta,
4516 if (in_connection) {
4517 if (WARN_ON(wl_sta->in_connection))
4519 wl_sta->in_connection = true;
4520 if (!wlvif->inconn_count++)
4521 wlcore_roc_if_possible(wl, wlvif);
4523 if (!wl_sta->in_connection)
4526 wl_sta->in_connection = false;
4527 wlvif->inconn_count--;
4528 if (WARN_ON(wlvif->inconn_count < 0))
4531 if (!wlvif->inconn_count)
4532 if (test_bit(wlvif->role_id, wl->roc_map))
4533 wl12xx_croc(wl, wlvif->role_id);
4537 static int wl12xx_update_sta_state(struct wl1271 *wl,
4538 struct wl12xx_vif *wlvif,
4539 struct ieee80211_sta *sta,
4540 enum ieee80211_sta_state old_state,
4541 enum ieee80211_sta_state new_state)
4543 struct wl1271_station *wl_sta;
4545 bool is_ap = wlvif->bss_type == BSS_TYPE_AP_BSS;
4546 bool is_sta = wlvif->bss_type == BSS_TYPE_STA_BSS;
4549 wl_sta = (struct wl1271_station *)sta->drv_priv;
4550 hlid = wl_sta->hlid;
4552 /* Add station (AP mode) */
4554 old_state == IEEE80211_STA_NOTEXIST &&
4555 new_state == IEEE80211_STA_NONE) {
4556 ret = wl12xx_sta_add(wl, wlvif, sta);
4560 wlcore_update_inconn_sta(wl, wlvif, wl_sta, true);
4563 /* Remove station (AP mode) */
4565 old_state == IEEE80211_STA_NONE &&
4566 new_state == IEEE80211_STA_NOTEXIST) {
4568 wl12xx_sta_remove(wl, wlvif, sta);
4570 wlcore_update_inconn_sta(wl, wlvif, wl_sta, false);
4573 /* Authorize station (AP mode) */
4575 new_state == IEEE80211_STA_AUTHORIZED) {
4576 ret = wl12xx_cmd_set_peer_state(wl, wlvif, hlid);
4580 ret = wl1271_acx_set_ht_capabilities(wl, &sta->ht_cap, true,
4585 wlcore_update_inconn_sta(wl, wlvif, wl_sta, false);
4588 /* Authorize station */
4590 new_state == IEEE80211_STA_AUTHORIZED) {
4591 set_bit(WLVIF_FLAG_STA_AUTHORIZED, &wlvif->flags);
4592 ret = wl12xx_set_authorized(wl, wlvif);
4598 old_state == IEEE80211_STA_AUTHORIZED &&
4599 new_state == IEEE80211_STA_ASSOC) {
4600 clear_bit(WLVIF_FLAG_STA_AUTHORIZED, &wlvif->flags);
4601 clear_bit(WLVIF_FLAG_STA_STATE_SENT, &wlvif->flags);
4604 /* clear ROCs on failure or authorization */
4606 (new_state == IEEE80211_STA_AUTHORIZED ||
4607 new_state == IEEE80211_STA_NOTEXIST)) {
4608 if (test_bit(wlvif->role_id, wl->roc_map))
4609 wl12xx_croc(wl, wlvif->role_id);
4613 old_state == IEEE80211_STA_NOTEXIST &&
4614 new_state == IEEE80211_STA_NONE) {
4615 if (find_first_bit(wl->roc_map,
4616 WL12XX_MAX_ROLES) >= WL12XX_MAX_ROLES) {
4617 WARN_ON(wlvif->role_id == WL12XX_INVALID_ROLE_ID);
4618 wl12xx_roc(wl, wlvif, wlvif->role_id,
4619 wlvif->band, wlvif->channel);
4625 static int wl12xx_op_sta_state(struct ieee80211_hw *hw,
4626 struct ieee80211_vif *vif,
4627 struct ieee80211_sta *sta,
4628 enum ieee80211_sta_state old_state,
4629 enum ieee80211_sta_state new_state)
4631 struct wl1271 *wl = hw->priv;
4632 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4635 wl1271_debug(DEBUG_MAC80211, "mac80211 sta %d state=%d->%d",
4636 sta->aid, old_state, new_state);
4638 mutex_lock(&wl->mutex);
4640 if (unlikely(wl->state != WLCORE_STATE_ON)) {
4645 ret = wl1271_ps_elp_wakeup(wl);
4649 ret = wl12xx_update_sta_state(wl, wlvif, sta, old_state, new_state);
4651 wl1271_ps_elp_sleep(wl);
4653 mutex_unlock(&wl->mutex);
4654 if (new_state < old_state)
4659 static int wl1271_op_ampdu_action(struct ieee80211_hw *hw,
4660 struct ieee80211_vif *vif,
4661 enum ieee80211_ampdu_mlme_action action,
4662 struct ieee80211_sta *sta, u16 tid, u16 *ssn,
4665 struct wl1271 *wl = hw->priv;
4666 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4668 u8 hlid, *ba_bitmap;
4670 wl1271_debug(DEBUG_MAC80211, "mac80211 ampdu action %d tid %d", action,
4673 /* sanity check - the fields in FW are only 8bits wide */
4674 if (WARN_ON(tid > 0xFF))
4677 mutex_lock(&wl->mutex);
4679 if (unlikely(wl->state != WLCORE_STATE_ON)) {
4684 if (wlvif->bss_type == BSS_TYPE_STA_BSS) {
4685 hlid = wlvif->sta.hlid;
4686 ba_bitmap = &wlvif->sta.ba_rx_bitmap;
4687 } else if (wlvif->bss_type == BSS_TYPE_AP_BSS) {
4688 struct wl1271_station *wl_sta;
4690 wl_sta = (struct wl1271_station *)sta->drv_priv;
4691 hlid = wl_sta->hlid;
4692 ba_bitmap = &wl->links[hlid].ba_bitmap;
4698 ret = wl1271_ps_elp_wakeup(wl);
4702 wl1271_debug(DEBUG_MAC80211, "mac80211 ampdu: Rx tid %d action %d",
4706 case IEEE80211_AMPDU_RX_START:
4707 if (!wlvif->ba_support || !wlvif->ba_allowed) {
4712 if (wl->ba_rx_session_count >= RX_BA_MAX_SESSIONS) {
4714 wl1271_error("exceeded max RX BA sessions");
4718 if (*ba_bitmap & BIT(tid)) {
4720 wl1271_error("cannot enable RX BA session on active "
4725 ret = wl12xx_acx_set_ba_receiver_session(wl, tid, *ssn, true,
4728 *ba_bitmap |= BIT(tid);
4729 wl->ba_rx_session_count++;
4733 case IEEE80211_AMPDU_RX_STOP:
4734 if (!(*ba_bitmap & BIT(tid))) {
4736 * this happens on reconfig - so only output a debug
4737 * message for now, and don't fail the function.
4739 wl1271_debug(DEBUG_MAC80211,
4740 "no active RX BA session on tid: %d",
4746 ret = wl12xx_acx_set_ba_receiver_session(wl, tid, 0, false,
4749 *ba_bitmap &= ~BIT(tid);
4750 wl->ba_rx_session_count--;
4755 * The BA initiator session management in FW independently.
4756 * Falling break here on purpose for all TX APDU commands.
4758 case IEEE80211_AMPDU_TX_START:
4759 case IEEE80211_AMPDU_TX_STOP:
4760 case IEEE80211_AMPDU_TX_OPERATIONAL:
4765 wl1271_error("Incorrect ampdu action id=%x\n", action);
4769 wl1271_ps_elp_sleep(wl);
4772 mutex_unlock(&wl->mutex);
4777 static int wl12xx_set_bitrate_mask(struct ieee80211_hw *hw,
4778 struct ieee80211_vif *vif,
4779 const struct cfg80211_bitrate_mask *mask)
4781 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4782 struct wl1271 *wl = hw->priv;
4785 wl1271_debug(DEBUG_MAC80211, "mac80211 set_bitrate_mask 0x%x 0x%x",
4786 mask->control[NL80211_BAND_2GHZ].legacy,
4787 mask->control[NL80211_BAND_5GHZ].legacy);
4789 mutex_lock(&wl->mutex);
4791 for (i = 0; i < WLCORE_NUM_BANDS; i++)
4792 wlvif->bitrate_masks[i] =
4793 wl1271_tx_enabled_rates_get(wl,
4794 mask->control[i].legacy,
4797 if (unlikely(wl->state != WLCORE_STATE_ON))
4800 if (wlvif->bss_type == BSS_TYPE_STA_BSS &&
4801 !test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags)) {
4803 ret = wl1271_ps_elp_wakeup(wl);
4807 wl1271_set_band_rate(wl, wlvif);
4809 wl1271_tx_min_rate_get(wl, wlvif->basic_rate_set);
4810 ret = wl1271_acx_sta_rate_policies(wl, wlvif);
4812 wl1271_ps_elp_sleep(wl);
4815 mutex_unlock(&wl->mutex);
4820 static void wl12xx_op_channel_switch(struct ieee80211_hw *hw,
4821 struct ieee80211_channel_switch *ch_switch)
4823 struct wl1271 *wl = hw->priv;
4824 struct wl12xx_vif *wlvif;
4827 wl1271_debug(DEBUG_MAC80211, "mac80211 channel switch");
4829 wl1271_tx_flush(wl);
4831 mutex_lock(&wl->mutex);
4833 if (unlikely(wl->state == WLCORE_STATE_OFF)) {
4834 wl12xx_for_each_wlvif_sta(wl, wlvif) {
4835 struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif);
4836 ieee80211_chswitch_done(vif, false);
4839 } else if (unlikely(wl->state != WLCORE_STATE_ON)) {
4843 ret = wl1271_ps_elp_wakeup(wl);
4847 /* TODO: change mac80211 to pass vif as param */
4848 wl12xx_for_each_wlvif_sta(wl, wlvif) {
4849 unsigned long delay_usec;
4851 ret = wl->ops->channel_switch(wl, wlvif, ch_switch);
4855 set_bit(WLVIF_FLAG_CS_PROGRESS, &wlvif->flags);
4857 /* indicate failure 5 seconds after channel switch time */
4858 delay_usec = ieee80211_tu_to_usec(wlvif->beacon_int) *
4860 ieee80211_queue_delayed_work(hw, &wlvif->channel_switch_work,
4861 usecs_to_jiffies(delay_usec) +
4862 msecs_to_jiffies(5000));
4866 wl1271_ps_elp_sleep(wl);
4869 mutex_unlock(&wl->mutex);
4872 static void wlcore_op_flush(struct ieee80211_hw *hw, bool drop)
4874 struct wl1271 *wl = hw->priv;
4876 wl1271_tx_flush(wl);
4879 static int wlcore_op_remain_on_channel(struct ieee80211_hw *hw,
4880 struct ieee80211_vif *vif,
4881 struct ieee80211_channel *chan,
4884 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4885 struct wl1271 *wl = hw->priv;
4886 int channel, ret = 0;
4888 channel = ieee80211_frequency_to_channel(chan->center_freq);
4890 wl1271_debug(DEBUG_MAC80211, "mac80211 roc %d (%d)",
4891 channel, wlvif->role_id);
4893 mutex_lock(&wl->mutex);
4895 if (unlikely(wl->state != WLCORE_STATE_ON))
4898 /* return EBUSY if we can't ROC right now */
4899 if (WARN_ON(wl->roc_vif ||
4900 find_first_bit(wl->roc_map,
4901 WL12XX_MAX_ROLES) < WL12XX_MAX_ROLES)) {
4906 ret = wl1271_ps_elp_wakeup(wl);
4910 ret = wl12xx_start_dev(wl, wlvif, chan->band, channel);
4915 ieee80211_queue_delayed_work(hw, &wl->roc_complete_work,
4916 msecs_to_jiffies(duration));
4918 wl1271_ps_elp_sleep(wl);
4920 mutex_unlock(&wl->mutex);
4924 static int __wlcore_roc_completed(struct wl1271 *wl)
4926 struct wl12xx_vif *wlvif;
4929 /* already completed */
4930 if (unlikely(!wl->roc_vif))
4933 wlvif = wl12xx_vif_to_data(wl->roc_vif);
4935 if (!test_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags))
4938 ret = wl12xx_stop_dev(wl, wlvif);
4947 static int wlcore_roc_completed(struct wl1271 *wl)
4951 wl1271_debug(DEBUG_MAC80211, "roc complete");
4953 mutex_lock(&wl->mutex);
4955 if (unlikely(wl->state != WLCORE_STATE_ON)) {
4960 ret = wl1271_ps_elp_wakeup(wl);
4964 ret = __wlcore_roc_completed(wl);
4966 wl1271_ps_elp_sleep(wl);
4968 mutex_unlock(&wl->mutex);
4973 static void wlcore_roc_complete_work(struct work_struct *work)
4975 struct delayed_work *dwork;
4979 dwork = container_of(work, struct delayed_work, work);
4980 wl = container_of(dwork, struct wl1271, roc_complete_work);
4982 ret = wlcore_roc_completed(wl);
4984 ieee80211_remain_on_channel_expired(wl->hw);
4987 static int wlcore_op_cancel_remain_on_channel(struct ieee80211_hw *hw)
4989 struct wl1271 *wl = hw->priv;
4991 wl1271_debug(DEBUG_MAC80211, "mac80211 croc");
4994 wl1271_tx_flush(wl);
4997 * we can't just flush_work here, because it might deadlock
4998 * (as we might get called from the same workqueue)
5000 cancel_delayed_work_sync(&wl->roc_complete_work);
5001 wlcore_roc_completed(wl);
5006 static void wlcore_op_sta_rc_update(struct ieee80211_hw *hw,
5007 struct ieee80211_vif *vif,
5008 struct ieee80211_sta *sta,
5011 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
5012 struct wl1271 *wl = hw->priv;
5014 wlcore_hw_sta_rc_update(wl, wlvif, sta, changed);
5017 static bool wl1271_tx_frames_pending(struct ieee80211_hw *hw)
5019 struct wl1271 *wl = hw->priv;
5022 mutex_lock(&wl->mutex);
5024 if (unlikely(wl->state != WLCORE_STATE_ON))
5027 /* packets are considered pending if in the TX queue or the FW */
5028 ret = (wl1271_tx_total_queue_count(wl) > 0) || (wl->tx_frames_cnt > 0);
5030 mutex_unlock(&wl->mutex);
5035 /* can't be const, mac80211 writes to this */
5036 static struct ieee80211_rate wl1271_rates[] = {
5038 .hw_value = CONF_HW_BIT_RATE_1MBPS,
5039 .hw_value_short = CONF_HW_BIT_RATE_1MBPS, },
5041 .hw_value = CONF_HW_BIT_RATE_2MBPS,
5042 .hw_value_short = CONF_HW_BIT_RATE_2MBPS,
5043 .flags = IEEE80211_RATE_SHORT_PREAMBLE },
5045 .hw_value = CONF_HW_BIT_RATE_5_5MBPS,
5046 .hw_value_short = CONF_HW_BIT_RATE_5_5MBPS,
5047 .flags = IEEE80211_RATE_SHORT_PREAMBLE },
5049 .hw_value = CONF_HW_BIT_RATE_11MBPS,
5050 .hw_value_short = CONF_HW_BIT_RATE_11MBPS,
5051 .flags = IEEE80211_RATE_SHORT_PREAMBLE },
5053 .hw_value = CONF_HW_BIT_RATE_6MBPS,
5054 .hw_value_short = CONF_HW_BIT_RATE_6MBPS, },
5056 .hw_value = CONF_HW_BIT_RATE_9MBPS,
5057 .hw_value_short = CONF_HW_BIT_RATE_9MBPS, },
5059 .hw_value = CONF_HW_BIT_RATE_12MBPS,
5060 .hw_value_short = CONF_HW_BIT_RATE_12MBPS, },
5062 .hw_value = CONF_HW_BIT_RATE_18MBPS,
5063 .hw_value_short = CONF_HW_BIT_RATE_18MBPS, },
5065 .hw_value = CONF_HW_BIT_RATE_24MBPS,
5066 .hw_value_short = CONF_HW_BIT_RATE_24MBPS, },
5068 .hw_value = CONF_HW_BIT_RATE_36MBPS,
5069 .hw_value_short = CONF_HW_BIT_RATE_36MBPS, },
5071 .hw_value = CONF_HW_BIT_RATE_48MBPS,
5072 .hw_value_short = CONF_HW_BIT_RATE_48MBPS, },
5074 .hw_value = CONF_HW_BIT_RATE_54MBPS,
5075 .hw_value_short = CONF_HW_BIT_RATE_54MBPS, },
5078 /* can't be const, mac80211 writes to this */
5079 static struct ieee80211_channel wl1271_channels[] = {
5080 { .hw_value = 1, .center_freq = 2412, .max_power = WLCORE_MAX_TXPWR },
5081 { .hw_value = 2, .center_freq = 2417, .max_power = WLCORE_MAX_TXPWR },
5082 { .hw_value = 3, .center_freq = 2422, .max_power = WLCORE_MAX_TXPWR },
5083 { .hw_value = 4, .center_freq = 2427, .max_power = WLCORE_MAX_TXPWR },
5084 { .hw_value = 5, .center_freq = 2432, .max_power = WLCORE_MAX_TXPWR },
5085 { .hw_value = 6, .center_freq = 2437, .max_power = WLCORE_MAX_TXPWR },
5086 { .hw_value = 7, .center_freq = 2442, .max_power = WLCORE_MAX_TXPWR },
5087 { .hw_value = 8, .center_freq = 2447, .max_power = WLCORE_MAX_TXPWR },
5088 { .hw_value = 9, .center_freq = 2452, .max_power = WLCORE_MAX_TXPWR },
5089 { .hw_value = 10, .center_freq = 2457, .max_power = WLCORE_MAX_TXPWR },
5090 { .hw_value = 11, .center_freq = 2462, .max_power = WLCORE_MAX_TXPWR },
5091 { .hw_value = 12, .center_freq = 2467, .max_power = WLCORE_MAX_TXPWR },
5092 { .hw_value = 13, .center_freq = 2472, .max_power = WLCORE_MAX_TXPWR },
5093 { .hw_value = 14, .center_freq = 2484, .max_power = WLCORE_MAX_TXPWR },
5096 /* can't be const, mac80211 writes to this */
5097 static struct ieee80211_supported_band wl1271_band_2ghz = {
5098 .channels = wl1271_channels,
5099 .n_channels = ARRAY_SIZE(wl1271_channels),
5100 .bitrates = wl1271_rates,
5101 .n_bitrates = ARRAY_SIZE(wl1271_rates),
5104 /* 5 GHz data rates for WL1273 */
5105 static struct ieee80211_rate wl1271_rates_5ghz[] = {
5107 .hw_value = CONF_HW_BIT_RATE_6MBPS,
5108 .hw_value_short = CONF_HW_BIT_RATE_6MBPS, },
5110 .hw_value = CONF_HW_BIT_RATE_9MBPS,
5111 .hw_value_short = CONF_HW_BIT_RATE_9MBPS, },
5113 .hw_value = CONF_HW_BIT_RATE_12MBPS,
5114 .hw_value_short = CONF_HW_BIT_RATE_12MBPS, },
5116 .hw_value = CONF_HW_BIT_RATE_18MBPS,
5117 .hw_value_short = CONF_HW_BIT_RATE_18MBPS, },
5119 .hw_value = CONF_HW_BIT_RATE_24MBPS,
5120 .hw_value_short = CONF_HW_BIT_RATE_24MBPS, },
5122 .hw_value = CONF_HW_BIT_RATE_36MBPS,
5123 .hw_value_short = CONF_HW_BIT_RATE_36MBPS, },
5125 .hw_value = CONF_HW_BIT_RATE_48MBPS,
5126 .hw_value_short = CONF_HW_BIT_RATE_48MBPS, },
5128 .hw_value = CONF_HW_BIT_RATE_54MBPS,
5129 .hw_value_short = CONF_HW_BIT_RATE_54MBPS, },
5132 /* 5 GHz band channels for WL1273 */
5133 static struct ieee80211_channel wl1271_channels_5ghz[] = {
5134 { .hw_value = 7, .center_freq = 5035, .max_power = WLCORE_MAX_TXPWR },
5135 { .hw_value = 8, .center_freq = 5040, .max_power = WLCORE_MAX_TXPWR },
5136 { .hw_value = 9, .center_freq = 5045, .max_power = WLCORE_MAX_TXPWR },
5137 { .hw_value = 11, .center_freq = 5055, .max_power = WLCORE_MAX_TXPWR },
5138 { .hw_value = 12, .center_freq = 5060, .max_power = WLCORE_MAX_TXPWR },
5139 { .hw_value = 16, .center_freq = 5080, .max_power = WLCORE_MAX_TXPWR },
5140 { .hw_value = 34, .center_freq = 5170, .max_power = WLCORE_MAX_TXPWR },
5141 { .hw_value = 36, .center_freq = 5180, .max_power = WLCORE_MAX_TXPWR },
5142 { .hw_value = 38, .center_freq = 5190, .max_power = WLCORE_MAX_TXPWR },
5143 { .hw_value = 40, .center_freq = 5200, .max_power = WLCORE_MAX_TXPWR },
5144 { .hw_value = 42, .center_freq = 5210, .max_power = WLCORE_MAX_TXPWR },
5145 { .hw_value = 44, .center_freq = 5220, .max_power = WLCORE_MAX_TXPWR },
5146 { .hw_value = 46, .center_freq = 5230, .max_power = WLCORE_MAX_TXPWR },
5147 { .hw_value = 48, .center_freq = 5240, .max_power = WLCORE_MAX_TXPWR },
5148 { .hw_value = 52, .center_freq = 5260, .max_power = WLCORE_MAX_TXPWR },
5149 { .hw_value = 56, .center_freq = 5280, .max_power = WLCORE_MAX_TXPWR },
5150 { .hw_value = 60, .center_freq = 5300, .max_power = WLCORE_MAX_TXPWR },
5151 { .hw_value = 64, .center_freq = 5320, .max_power = WLCORE_MAX_TXPWR },
5152 { .hw_value = 100, .center_freq = 5500, .max_power = WLCORE_MAX_TXPWR },
5153 { .hw_value = 104, .center_freq = 5520, .max_power = WLCORE_MAX_TXPWR },
5154 { .hw_value = 108, .center_freq = 5540, .max_power = WLCORE_MAX_TXPWR },
5155 { .hw_value = 112, .center_freq = 5560, .max_power = WLCORE_MAX_TXPWR },
5156 { .hw_value = 116, .center_freq = 5580, .max_power = WLCORE_MAX_TXPWR },
5157 { .hw_value = 120, .center_freq = 5600, .max_power = WLCORE_MAX_TXPWR },
5158 { .hw_value = 124, .center_freq = 5620, .max_power = WLCORE_MAX_TXPWR },
5159 { .hw_value = 128, .center_freq = 5640, .max_power = WLCORE_MAX_TXPWR },
5160 { .hw_value = 132, .center_freq = 5660, .max_power = WLCORE_MAX_TXPWR },
5161 { .hw_value = 136, .center_freq = 5680, .max_power = WLCORE_MAX_TXPWR },
5162 { .hw_value = 140, .center_freq = 5700, .max_power = WLCORE_MAX_TXPWR },
5163 { .hw_value = 149, .center_freq = 5745, .max_power = WLCORE_MAX_TXPWR },
5164 { .hw_value = 153, .center_freq = 5765, .max_power = WLCORE_MAX_TXPWR },
5165 { .hw_value = 157, .center_freq = 5785, .max_power = WLCORE_MAX_TXPWR },
5166 { .hw_value = 161, .center_freq = 5805, .max_power = WLCORE_MAX_TXPWR },
5167 { .hw_value = 165, .center_freq = 5825, .max_power = WLCORE_MAX_TXPWR },
5170 static struct ieee80211_supported_band wl1271_band_5ghz = {
5171 .channels = wl1271_channels_5ghz,
5172 .n_channels = ARRAY_SIZE(wl1271_channels_5ghz),
5173 .bitrates = wl1271_rates_5ghz,
5174 .n_bitrates = ARRAY_SIZE(wl1271_rates_5ghz),
5177 static const struct ieee80211_ops wl1271_ops = {
5178 .start = wl1271_op_start,
5179 .stop = wlcore_op_stop,
5180 .add_interface = wl1271_op_add_interface,
5181 .remove_interface = wl1271_op_remove_interface,
5182 .change_interface = wl12xx_op_change_interface,
5184 .suspend = wl1271_op_suspend,
5185 .resume = wl1271_op_resume,
5187 .config = wl1271_op_config,
5188 .prepare_multicast = wl1271_op_prepare_multicast,
5189 .configure_filter = wl1271_op_configure_filter,
5191 .set_key = wlcore_op_set_key,
5192 .hw_scan = wl1271_op_hw_scan,
5193 .cancel_hw_scan = wl1271_op_cancel_hw_scan,
5194 .sched_scan_start = wl1271_op_sched_scan_start,
5195 .sched_scan_stop = wl1271_op_sched_scan_stop,
5196 .bss_info_changed = wl1271_op_bss_info_changed,
5197 .set_frag_threshold = wl1271_op_set_frag_threshold,
5198 .set_rts_threshold = wl1271_op_set_rts_threshold,
5199 .conf_tx = wl1271_op_conf_tx,
5200 .get_tsf = wl1271_op_get_tsf,
5201 .get_survey = wl1271_op_get_survey,
5202 .sta_state = wl12xx_op_sta_state,
5203 .ampdu_action = wl1271_op_ampdu_action,
5204 .tx_frames_pending = wl1271_tx_frames_pending,
5205 .set_bitrate_mask = wl12xx_set_bitrate_mask,
5206 .channel_switch = wl12xx_op_channel_switch,
5207 .flush = wlcore_op_flush,
5208 .remain_on_channel = wlcore_op_remain_on_channel,
5209 .cancel_remain_on_channel = wlcore_op_cancel_remain_on_channel,
5210 .add_chanctx = wlcore_op_add_chanctx,
5211 .remove_chanctx = wlcore_op_remove_chanctx,
5212 .change_chanctx = wlcore_op_change_chanctx,
5213 .assign_vif_chanctx = wlcore_op_assign_vif_chanctx,
5214 .unassign_vif_chanctx = wlcore_op_unassign_vif_chanctx,
5215 .sta_rc_update = wlcore_op_sta_rc_update,
5216 CFG80211_TESTMODE_CMD(wl1271_tm_cmd)
5220 u8 wlcore_rate_to_idx(struct wl1271 *wl, u8 rate, enum ieee80211_band band)
5226 if (unlikely(rate >= wl->hw_tx_rate_tbl_size)) {
5227 wl1271_error("Illegal RX rate from HW: %d", rate);
5231 idx = wl->band_rate_to_idx[band][rate];
5232 if (unlikely(idx == CONF_HW_RXTX_RATE_UNSUPPORTED)) {
5233 wl1271_error("Unsupported RX rate from HW: %d", rate);
5240 static ssize_t wl1271_sysfs_show_bt_coex_state(struct device *dev,
5241 struct device_attribute *attr,
5244 struct wl1271 *wl = dev_get_drvdata(dev);
5249 mutex_lock(&wl->mutex);
5250 len = snprintf(buf, len, "%d\n\n0 - off\n1 - on\n",
5252 mutex_unlock(&wl->mutex);
5258 static ssize_t wl1271_sysfs_store_bt_coex_state(struct device *dev,
5259 struct device_attribute *attr,
5260 const char *buf, size_t count)
5262 struct wl1271 *wl = dev_get_drvdata(dev);
5266 ret = kstrtoul(buf, 10, &res);
5268 wl1271_warning("incorrect value written to bt_coex_mode");
5272 mutex_lock(&wl->mutex);
5276 if (res == wl->sg_enabled)
5279 wl->sg_enabled = res;
5281 if (unlikely(wl->state != WLCORE_STATE_ON))
5284 ret = wl1271_ps_elp_wakeup(wl);
5288 wl1271_acx_sg_enable(wl, wl->sg_enabled);
5289 wl1271_ps_elp_sleep(wl);
5292 mutex_unlock(&wl->mutex);
5296 static DEVICE_ATTR(bt_coex_state, S_IRUGO | S_IWUSR,
5297 wl1271_sysfs_show_bt_coex_state,
5298 wl1271_sysfs_store_bt_coex_state);
5300 static ssize_t wl1271_sysfs_show_hw_pg_ver(struct device *dev,
5301 struct device_attribute *attr,
5304 struct wl1271 *wl = dev_get_drvdata(dev);
5309 mutex_lock(&wl->mutex);
5310 if (wl->hw_pg_ver >= 0)
5311 len = snprintf(buf, len, "%d\n", wl->hw_pg_ver);
5313 len = snprintf(buf, len, "n/a\n");
5314 mutex_unlock(&wl->mutex);
5319 static DEVICE_ATTR(hw_pg_ver, S_IRUGO,
5320 wl1271_sysfs_show_hw_pg_ver, NULL);
5322 static ssize_t wl1271_sysfs_read_fwlog(struct file *filp, struct kobject *kobj,
5323 struct bin_attribute *bin_attr,
5324 char *buffer, loff_t pos, size_t count)
5326 struct device *dev = container_of(kobj, struct device, kobj);
5327 struct wl1271 *wl = dev_get_drvdata(dev);
5331 ret = mutex_lock_interruptible(&wl->mutex);
5333 return -ERESTARTSYS;
5335 /* Let only one thread read the log at a time, blocking others */
5336 while (wl->fwlog_size == 0) {
5339 prepare_to_wait_exclusive(&wl->fwlog_waitq,
5341 TASK_INTERRUPTIBLE);
5343 if (wl->fwlog_size != 0) {
5344 finish_wait(&wl->fwlog_waitq, &wait);
5348 mutex_unlock(&wl->mutex);
5351 finish_wait(&wl->fwlog_waitq, &wait);
5353 if (signal_pending(current))
5354 return -ERESTARTSYS;
5356 ret = mutex_lock_interruptible(&wl->mutex);
5358 return -ERESTARTSYS;
5361 /* Check if the fwlog is still valid */
5362 if (wl->fwlog_size < 0) {
5363 mutex_unlock(&wl->mutex);
5367 /* Seeking is not supported - old logs are not kept. Disregard pos. */
5368 len = min(count, (size_t)wl->fwlog_size);
5369 wl->fwlog_size -= len;
5370 memcpy(buffer, wl->fwlog, len);
5372 /* Make room for new messages */
5373 memmove(wl->fwlog, wl->fwlog + len, wl->fwlog_size);
5375 mutex_unlock(&wl->mutex);
5380 static struct bin_attribute fwlog_attr = {
5381 .attr = {.name = "fwlog", .mode = S_IRUSR},
5382 .read = wl1271_sysfs_read_fwlog,
5385 static void wl12xx_derive_mac_addresses(struct wl1271 *wl, u32 oui, u32 nic)
5389 wl1271_debug(DEBUG_PROBE, "base address: oui %06x nic %06x",
5392 if (nic + WLCORE_NUM_MAC_ADDRESSES - wl->num_mac_addr > 0xffffff)
5393 wl1271_warning("NIC part of the MAC address wraps around!");
5395 for (i = 0; i < wl->num_mac_addr; i++) {
5396 wl->addresses[i].addr[0] = (u8)(oui >> 16);
5397 wl->addresses[i].addr[1] = (u8)(oui >> 8);
5398 wl->addresses[i].addr[2] = (u8) oui;
5399 wl->addresses[i].addr[3] = (u8)(nic >> 16);
5400 wl->addresses[i].addr[4] = (u8)(nic >> 8);
5401 wl->addresses[i].addr[5] = (u8) nic;
5405 /* we may be one address short at the most */
5406 WARN_ON(wl->num_mac_addr + 1 < WLCORE_NUM_MAC_ADDRESSES);
5409 * turn on the LAA bit in the first address and use it as
5412 if (wl->num_mac_addr < WLCORE_NUM_MAC_ADDRESSES) {
5413 int idx = WLCORE_NUM_MAC_ADDRESSES - 1;
5414 memcpy(&wl->addresses[idx], &wl->addresses[0],
5415 sizeof(wl->addresses[0]));
5417 wl->addresses[idx].addr[2] |= BIT(1);
5420 wl->hw->wiphy->n_addresses = WLCORE_NUM_MAC_ADDRESSES;
5421 wl->hw->wiphy->addresses = wl->addresses;
5424 static int wl12xx_get_hw_info(struct wl1271 *wl)
5428 ret = wl12xx_set_power_on(wl);
5432 ret = wlcore_read_reg(wl, REG_CHIP_ID_B, &wl->chip.id);
5436 wl->fuse_oui_addr = 0;
5437 wl->fuse_nic_addr = 0;
5439 ret = wl->ops->get_pg_ver(wl, &wl->hw_pg_ver);
5443 if (wl->ops->get_mac)
5444 ret = wl->ops->get_mac(wl);
5447 wl1271_power_off(wl);
5451 static int wl1271_register_hw(struct wl1271 *wl)
5454 u32 oui_addr = 0, nic_addr = 0;
5456 if (wl->mac80211_registered)
5459 if (wl->nvs_len >= 12) {
5460 /* NOTE: The wl->nvs->nvs element must be first, in
5461 * order to simplify the casting, we assume it is at
5462 * the beginning of the wl->nvs structure.
5464 u8 *nvs_ptr = (u8 *)wl->nvs;
5467 (nvs_ptr[11] << 16) + (nvs_ptr[10] << 8) + nvs_ptr[6];
5469 (nvs_ptr[5] << 16) + (nvs_ptr[4] << 8) + nvs_ptr[3];
5472 /* if the MAC address is zeroed in the NVS derive from fuse */
5473 if (oui_addr == 0 && nic_addr == 0) {
5474 oui_addr = wl->fuse_oui_addr;
5475 /* fuse has the BD_ADDR, the WLAN addresses are the next two */
5476 nic_addr = wl->fuse_nic_addr + 1;
5479 wl12xx_derive_mac_addresses(wl, oui_addr, nic_addr);
5481 ret = ieee80211_register_hw(wl->hw);
5483 wl1271_error("unable to register mac80211 hw: %d", ret);
5487 wl->mac80211_registered = true;
5489 wl1271_debugfs_init(wl);
5491 wl1271_notice("loaded");
5497 static void wl1271_unregister_hw(struct wl1271 *wl)
5500 wl1271_plt_stop(wl);
5502 ieee80211_unregister_hw(wl->hw);
5503 wl->mac80211_registered = false;
5507 static const struct ieee80211_iface_limit wlcore_iface_limits[] = {
5510 .types = BIT(NL80211_IFTYPE_STATION),
5514 .types = BIT(NL80211_IFTYPE_AP) |
5515 BIT(NL80211_IFTYPE_P2P_GO) |
5516 BIT(NL80211_IFTYPE_P2P_CLIENT),
5520 static struct ieee80211_iface_combination
5521 wlcore_iface_combinations[] = {
5523 .max_interfaces = 3,
5524 .limits = wlcore_iface_limits,
5525 .n_limits = ARRAY_SIZE(wlcore_iface_limits),
5529 static int wl1271_init_ieee80211(struct wl1271 *wl)
5532 static const u32 cipher_suites[] = {
5533 WLAN_CIPHER_SUITE_WEP40,
5534 WLAN_CIPHER_SUITE_WEP104,
5535 WLAN_CIPHER_SUITE_TKIP,
5536 WLAN_CIPHER_SUITE_CCMP,
5537 WL1271_CIPHER_SUITE_GEM,
5540 /* The tx descriptor buffer */
5541 wl->hw->extra_tx_headroom = sizeof(struct wl1271_tx_hw_descr);
5543 if (wl->quirks & WLCORE_QUIRK_TKIP_HEADER_SPACE)
5544 wl->hw->extra_tx_headroom += WL1271_EXTRA_SPACE_TKIP;
5547 /* FIXME: find a proper value */
5548 wl->hw->channel_change_time = 10000;
5549 wl->hw->max_listen_interval = wl->conf.conn.max_listen_interval;
5551 wl->hw->flags = IEEE80211_HW_SIGNAL_DBM |
5552 IEEE80211_HW_SUPPORTS_PS |
5553 IEEE80211_HW_SUPPORTS_DYNAMIC_PS |
5554 IEEE80211_HW_SUPPORTS_UAPSD |
5555 IEEE80211_HW_HAS_RATE_CONTROL |
5556 IEEE80211_HW_CONNECTION_MONITOR |
5557 IEEE80211_HW_REPORTS_TX_ACK_STATUS |
5558 IEEE80211_HW_SPECTRUM_MGMT |
5559 IEEE80211_HW_AP_LINK_PS |
5560 IEEE80211_HW_AMPDU_AGGREGATION |
5561 IEEE80211_HW_TX_AMPDU_SETUP_IN_HW |
5562 IEEE80211_HW_SCAN_WHILE_IDLE;
5564 wl->hw->wiphy->cipher_suites = cipher_suites;
5565 wl->hw->wiphy->n_cipher_suites = ARRAY_SIZE(cipher_suites);
5567 wl->hw->wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION) |
5568 BIT(NL80211_IFTYPE_ADHOC) | BIT(NL80211_IFTYPE_AP) |
5569 BIT(NL80211_IFTYPE_P2P_CLIENT) | BIT(NL80211_IFTYPE_P2P_GO);
5570 wl->hw->wiphy->max_scan_ssids = 1;
5571 wl->hw->wiphy->max_sched_scan_ssids = 16;
5572 wl->hw->wiphy->max_match_sets = 16;
5574 * Maximum length of elements in scanning probe request templates
5575 * should be the maximum length possible for a template, without
5576 * the IEEE80211 header of the template
5578 wl->hw->wiphy->max_scan_ie_len = WL1271_CMD_TEMPL_MAX_SIZE -
5579 sizeof(struct ieee80211_header);
5581 wl->hw->wiphy->max_sched_scan_ie_len = WL1271_CMD_TEMPL_MAX_SIZE -
5582 sizeof(struct ieee80211_header);
5584 wl->hw->wiphy->max_remain_on_channel_duration = 5000;
5586 wl->hw->wiphy->flags |= WIPHY_FLAG_AP_UAPSD |
5587 WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL;
5589 /* make sure all our channels fit in the scanned_ch bitmask */
5590 BUILD_BUG_ON(ARRAY_SIZE(wl1271_channels) +
5591 ARRAY_SIZE(wl1271_channels_5ghz) >
5592 WL1271_MAX_CHANNELS);
5594 * clear channel flags from the previous usage
5595 * and restore max_power & max_antenna_gain values.
5597 for (i = 0; i < ARRAY_SIZE(wl1271_channels); i++) {
5598 wl1271_band_2ghz.channels[i].flags = 0;
5599 wl1271_band_2ghz.channels[i].max_power = WLCORE_MAX_TXPWR;
5600 wl1271_band_2ghz.channels[i].max_antenna_gain = 0;
5603 for (i = 0; i < ARRAY_SIZE(wl1271_channels_5ghz); i++) {
5604 wl1271_band_5ghz.channels[i].flags = 0;
5605 wl1271_band_5ghz.channels[i].max_power = WLCORE_MAX_TXPWR;
5606 wl1271_band_5ghz.channels[i].max_antenna_gain = 0;
5610 * We keep local copies of the band structs because we need to
5611 * modify them on a per-device basis.
5613 memcpy(&wl->bands[IEEE80211_BAND_2GHZ], &wl1271_band_2ghz,
5614 sizeof(wl1271_band_2ghz));
5615 memcpy(&wl->bands[IEEE80211_BAND_2GHZ].ht_cap,
5616 &wl->ht_cap[IEEE80211_BAND_2GHZ],
5617 sizeof(*wl->ht_cap));
5618 memcpy(&wl->bands[IEEE80211_BAND_5GHZ], &wl1271_band_5ghz,
5619 sizeof(wl1271_band_5ghz));
5620 memcpy(&wl->bands[IEEE80211_BAND_5GHZ].ht_cap,
5621 &wl->ht_cap[IEEE80211_BAND_5GHZ],
5622 sizeof(*wl->ht_cap));
5624 wl->hw->wiphy->bands[IEEE80211_BAND_2GHZ] =
5625 &wl->bands[IEEE80211_BAND_2GHZ];
5626 wl->hw->wiphy->bands[IEEE80211_BAND_5GHZ] =
5627 &wl->bands[IEEE80211_BAND_5GHZ];
5630 wl->hw->max_rates = 1;
5632 wl->hw->wiphy->reg_notifier = wl1271_reg_notify;
5634 /* the FW answers probe-requests in AP-mode */
5635 wl->hw->wiphy->flags |= WIPHY_FLAG_AP_PROBE_RESP_OFFLOAD;
5636 wl->hw->wiphy->probe_resp_offload =
5637 NL80211_PROBE_RESP_OFFLOAD_SUPPORT_WPS |
5638 NL80211_PROBE_RESP_OFFLOAD_SUPPORT_WPS2 |
5639 NL80211_PROBE_RESP_OFFLOAD_SUPPORT_P2P;
5641 /* allowed interface combinations */
5642 wlcore_iface_combinations[0].num_different_channels = wl->num_channels;
5643 wl->hw->wiphy->iface_combinations = wlcore_iface_combinations;
5644 wl->hw->wiphy->n_iface_combinations =
5645 ARRAY_SIZE(wlcore_iface_combinations);
5647 SET_IEEE80211_DEV(wl->hw, wl->dev);
5649 wl->hw->sta_data_size = sizeof(struct wl1271_station);
5650 wl->hw->vif_data_size = sizeof(struct wl12xx_vif);
5652 wl->hw->max_rx_aggregation_subframes = wl->conf.ht.rx_ba_win_size;
5657 #define WL1271_DEFAULT_CHANNEL 0
5659 struct ieee80211_hw *wlcore_alloc_hw(size_t priv_size, u32 aggr_buf_size,
5662 struct ieee80211_hw *hw;
5667 BUILD_BUG_ON(AP_MAX_STATIONS > WL12XX_MAX_LINKS);
5669 hw = ieee80211_alloc_hw(sizeof(*wl), &wl1271_ops);
5671 wl1271_error("could not alloc ieee80211_hw");
5677 memset(wl, 0, sizeof(*wl));
5679 wl->priv = kzalloc(priv_size, GFP_KERNEL);
5681 wl1271_error("could not alloc wl priv");
5683 goto err_priv_alloc;
5686 INIT_LIST_HEAD(&wl->wlvif_list);
5690 for (i = 0; i < NUM_TX_QUEUES; i++)
5691 for (j = 0; j < WL12XX_MAX_LINKS; j++)
5692 skb_queue_head_init(&wl->links[j].tx_queue[i]);
5694 skb_queue_head_init(&wl->deferred_rx_queue);
5695 skb_queue_head_init(&wl->deferred_tx_queue);
5697 INIT_DELAYED_WORK(&wl->elp_work, wl1271_elp_work);
5698 INIT_WORK(&wl->netstack_work, wl1271_netstack_work);
5699 INIT_WORK(&wl->tx_work, wl1271_tx_work);
5700 INIT_WORK(&wl->recovery_work, wl1271_recovery_work);
5701 INIT_DELAYED_WORK(&wl->scan_complete_work, wl1271_scan_complete_work);
5702 INIT_DELAYED_WORK(&wl->roc_complete_work, wlcore_roc_complete_work);
5703 INIT_DELAYED_WORK(&wl->tx_watchdog_work, wl12xx_tx_watchdog_work);
5705 wl->freezable_wq = create_freezable_workqueue("wl12xx_wq");
5706 if (!wl->freezable_wq) {
5711 wl->channel = WL1271_DEFAULT_CHANNEL;
5713 wl->power_level = WL1271_DEFAULT_POWER_LEVEL;
5714 wl->band = IEEE80211_BAND_2GHZ;
5715 wl->channel_type = NL80211_CHAN_NO_HT;
5717 wl->sg_enabled = true;
5718 wl->sleep_auth = WL1271_PSM_ILLEGAL;
5719 wl->recovery_count = 0;
5722 wl->ap_fw_ps_map = 0;
5724 wl->platform_quirks = 0;
5725 wl->system_hlid = WL12XX_SYSTEM_HLID;
5726 wl->active_sta_count = 0;
5728 init_waitqueue_head(&wl->fwlog_waitq);
5730 /* The system link is always allocated */
5731 __set_bit(WL12XX_SYSTEM_HLID, wl->links_map);
5733 memset(wl->tx_frames_map, 0, sizeof(wl->tx_frames_map));
5734 for (i = 0; i < wl->num_tx_desc; i++)
5735 wl->tx_frames[i] = NULL;
5737 spin_lock_init(&wl->wl_lock);
5739 wl->state = WLCORE_STATE_OFF;
5740 wl->fw_type = WL12XX_FW_TYPE_NONE;
5741 mutex_init(&wl->mutex);
5742 mutex_init(&wl->flush_mutex);
5743 init_completion(&wl->nvs_loading_complete);
5745 order = get_order(aggr_buf_size);
5746 wl->aggr_buf = (u8 *)__get_free_pages(GFP_KERNEL, order);
5747 if (!wl->aggr_buf) {
5751 wl->aggr_buf_size = aggr_buf_size;
5753 wl->dummy_packet = wl12xx_alloc_dummy_packet(wl);
5754 if (!wl->dummy_packet) {
5759 /* Allocate one page for the FW log */
5760 wl->fwlog = (u8 *)get_zeroed_page(GFP_KERNEL);
5763 goto err_dummy_packet;
5766 wl->mbox_size = mbox_size;
5767 wl->mbox = kmalloc(wl->mbox_size, GFP_KERNEL | GFP_DMA);
5776 free_page((unsigned long)wl->fwlog);
5779 dev_kfree_skb(wl->dummy_packet);
5782 free_pages((unsigned long)wl->aggr_buf, order);
5785 destroy_workqueue(wl->freezable_wq);
5788 wl1271_debugfs_exit(wl);
5792 ieee80211_free_hw(hw);
5796 return ERR_PTR(ret);
5798 EXPORT_SYMBOL_GPL(wlcore_alloc_hw);
5800 int wlcore_free_hw(struct wl1271 *wl)
5802 /* Unblock any fwlog readers */
5803 mutex_lock(&wl->mutex);
5804 wl->fwlog_size = -1;
5805 wake_up_interruptible_all(&wl->fwlog_waitq);
5806 mutex_unlock(&wl->mutex);
5808 device_remove_bin_file(wl->dev, &fwlog_attr);
5810 device_remove_file(wl->dev, &dev_attr_hw_pg_ver);
5812 device_remove_file(wl->dev, &dev_attr_bt_coex_state);
5814 free_page((unsigned long)wl->fwlog);
5815 dev_kfree_skb(wl->dummy_packet);
5816 free_pages((unsigned long)wl->aggr_buf, get_order(wl->aggr_buf_size));
5818 wl1271_debugfs_exit(wl);
5822 wl->fw_type = WL12XX_FW_TYPE_NONE;
5826 kfree(wl->fw_status_1);
5827 kfree(wl->tx_res_if);
5828 destroy_workqueue(wl->freezable_wq);
5831 ieee80211_free_hw(wl->hw);
5835 EXPORT_SYMBOL_GPL(wlcore_free_hw);
5837 static irqreturn_t wl12xx_hardirq(int irq, void *cookie)
5839 struct wl1271 *wl = cookie;
5840 unsigned long flags;
5842 wl1271_debug(DEBUG_IRQ, "IRQ");
5844 /* complete the ELP completion */
5845 spin_lock_irqsave(&wl->wl_lock, flags);
5846 set_bit(WL1271_FLAG_IRQ_RUNNING, &wl->flags);
5847 if (wl->elp_compl) {
5848 complete(wl->elp_compl);
5849 wl->elp_compl = NULL;
5852 if (test_bit(WL1271_FLAG_SUSPENDED, &wl->flags)) {
5853 /* don't enqueue a work right now. mark it as pending */
5854 set_bit(WL1271_FLAG_PENDING_WORK, &wl->flags);
5855 wl1271_debug(DEBUG_IRQ, "should not enqueue work");
5856 disable_irq_nosync(wl->irq);
5857 pm_wakeup_event(wl->dev, 0);
5858 spin_unlock_irqrestore(&wl->wl_lock, flags);
5861 spin_unlock_irqrestore(&wl->wl_lock, flags);
5863 return IRQ_WAKE_THREAD;
5866 static void wlcore_nvs_cb(const struct firmware *fw, void *context)
5868 struct wl1271 *wl = context;
5869 struct platform_device *pdev = wl->pdev;
5870 struct wl12xx_platform_data *pdata = pdev->dev.platform_data;
5871 unsigned long irqflags;
5875 wl->nvs = kmemdup(fw->data, fw->size, GFP_KERNEL);
5877 wl1271_error("Could not allocate nvs data");
5880 wl->nvs_len = fw->size;
5882 wl1271_debug(DEBUG_BOOT, "Could not get nvs file %s",
5888 ret = wl->ops->setup(wl);
5892 BUG_ON(wl->num_tx_desc > WLCORE_MAX_TX_DESCRIPTORS);
5894 /* adjust some runtime configuration parameters */
5895 wlcore_adjust_conf(wl);
5897 wl->irq = platform_get_irq(pdev, 0);
5898 wl->platform_quirks = pdata->platform_quirks;
5899 wl->set_power = pdata->set_power;
5900 wl->if_ops = pdata->ops;
5902 if (wl->platform_quirks & WL12XX_PLATFORM_QUIRK_EDGE_IRQ)
5903 irqflags = IRQF_TRIGGER_RISING;
5905 irqflags = IRQF_TRIGGER_HIGH | IRQF_ONESHOT;
5907 ret = request_threaded_irq(wl->irq, wl12xx_hardirq, wlcore_irq,
5911 wl1271_error("request_irq() failed: %d", ret);
5916 ret = enable_irq_wake(wl->irq);
5918 wl->irq_wake_enabled = true;
5919 device_init_wakeup(wl->dev, 1);
5920 if (pdata->pwr_in_suspend) {
5921 wl->hw->wiphy->wowlan.flags = WIPHY_WOWLAN_ANY;
5922 wl->hw->wiphy->wowlan.n_patterns =
5923 WL1271_MAX_RX_FILTERS;
5924 wl->hw->wiphy->wowlan.pattern_min_len = 1;
5925 wl->hw->wiphy->wowlan.pattern_max_len =
5926 WL1271_RX_FILTER_MAX_PATTERN_SIZE;
5930 disable_irq(wl->irq);
5932 ret = wl12xx_get_hw_info(wl);
5934 wl1271_error("couldn't get hw info");
5938 ret = wl->ops->identify_chip(wl);
5942 ret = wl1271_init_ieee80211(wl);
5946 ret = wl1271_register_hw(wl);
5950 /* Create sysfs file to control bt coex state */
5951 ret = device_create_file(wl->dev, &dev_attr_bt_coex_state);
5953 wl1271_error("failed to create sysfs file bt_coex_state");
5957 /* Create sysfs file to get HW PG version */
5958 ret = device_create_file(wl->dev, &dev_attr_hw_pg_ver);
5960 wl1271_error("failed to create sysfs file hw_pg_ver");
5961 goto out_bt_coex_state;
5964 /* Create sysfs file for the FW log */
5965 ret = device_create_bin_file(wl->dev, &fwlog_attr);
5967 wl1271_error("failed to create sysfs file fwlog");
5971 wl->initialized = true;
5975 device_remove_file(wl->dev, &dev_attr_hw_pg_ver);
5978 device_remove_file(wl->dev, &dev_attr_bt_coex_state);
5981 wl1271_unregister_hw(wl);
5984 free_irq(wl->irq, wl);
5990 release_firmware(fw);
5991 complete_all(&wl->nvs_loading_complete);
5994 int __devinit wlcore_probe(struct wl1271 *wl, struct platform_device *pdev)
5998 if (!wl->ops || !wl->ptable)
6001 wl->dev = &pdev->dev;
6003 platform_set_drvdata(pdev, wl);
6005 ret = request_firmware_nowait(THIS_MODULE, FW_ACTION_HOTPLUG,
6006 WL12XX_NVS_NAME, &pdev->dev, GFP_KERNEL,
6009 wl1271_error("request_firmware_nowait failed: %d", ret);
6010 complete_all(&wl->nvs_loading_complete);
6015 EXPORT_SYMBOL_GPL(wlcore_probe);
6017 int __devexit wlcore_remove(struct platform_device *pdev)
6019 struct wl1271 *wl = platform_get_drvdata(pdev);
6021 wait_for_completion(&wl->nvs_loading_complete);
6022 if (!wl->initialized)
6025 if (wl->irq_wake_enabled) {
6026 device_init_wakeup(wl->dev, 0);
6027 disable_irq_wake(wl->irq);
6029 wl1271_unregister_hw(wl);
6030 free_irq(wl->irq, wl);
6035 EXPORT_SYMBOL_GPL(wlcore_remove);
6037 u32 wl12xx_debug_level = DEBUG_NONE;
6038 EXPORT_SYMBOL_GPL(wl12xx_debug_level);
6039 module_param_named(debug_level, wl12xx_debug_level, uint, S_IRUSR | S_IWUSR);
6040 MODULE_PARM_DESC(debug_level, "wl12xx debugging level");
6042 module_param_named(fwlog, fwlog_param, charp, 0);
6043 MODULE_PARM_DESC(fwlog,
6044 "FW logger options: continuous, ondemand, dbgpins or disable");
6046 module_param(bug_on_recovery, int, S_IRUSR | S_IWUSR);
6047 MODULE_PARM_DESC(bug_on_recovery, "BUG() on fw recovery");
6049 module_param(no_recovery, int, S_IRUSR | S_IWUSR);
6050 MODULE_PARM_DESC(no_recovery, "Prevent HW recovery. FW will remain stuck.");
6052 MODULE_LICENSE("GPL");
6053 MODULE_AUTHOR("Luciano Coelho <coelho@ti.com>");
6054 MODULE_AUTHOR("Juuso Oikarinen <juuso.oikarinen@nokia.com>");
6055 MODULE_FIRMWARE(WL12XX_NVS_NAME);