3 * This file is part of wl1271
5 * Copyright (C) 2008-2010 Nokia Corporation
7 * Contact: Luciano Coelho <luciano.coelho@nokia.com>
9 * This program is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU General Public License
11 * version 2 as published by the Free Software Foundation.
13 * This program is distributed in the hope that it will be useful, but
14 * WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * General Public License for more details.
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
25 #include <linux/module.h>
26 #include <linux/firmware.h>
27 #include <linux/delay.h>
28 #include <linux/spi/spi.h>
29 #include <linux/crc32.h>
30 #include <linux/etherdevice.h>
31 #include <linux/vmalloc.h>
32 #include <linux/platform_device.h>
33 #include <linux/slab.h>
34 #include <linux/wl12xx.h>
35 #include <linux/sched.h>
36 #include <linux/interrupt.h>
40 #include "wl12xx_80211.h"
54 #define WL1271_BOOT_RETRIES 3
56 #define WL1271_BOOT_RETRIES 3
58 static char *fwlog_param;
59 static bool bug_on_recovery;
60 static bool no_recovery;
62 static void __wl1271_op_remove_interface(struct wl1271 *wl,
63 struct ieee80211_vif *vif,
64 bool reset_tx_queues);
65 static void wlcore_op_stop_locked(struct wl1271 *wl);
66 static void wl1271_free_ap_keys(struct wl1271 *wl, struct wl12xx_vif *wlvif);
68 static int wl12xx_set_authorized(struct wl1271 *wl,
69 struct wl12xx_vif *wlvif)
73 if (WARN_ON(wlvif->bss_type != BSS_TYPE_STA_BSS))
76 if (!test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags))
79 if (test_and_set_bit(WLVIF_FLAG_STA_STATE_SENT, &wlvif->flags))
82 ret = wl12xx_cmd_set_peer_state(wl, wlvif, wlvif->sta.hlid);
86 wl1271_info("Association completed.");
90 static int wl1271_reg_notify(struct wiphy *wiphy,
91 struct regulatory_request *request)
93 struct ieee80211_supported_band *band;
94 struct ieee80211_channel *ch;
97 band = wiphy->bands[IEEE80211_BAND_5GHZ];
98 for (i = 0; i < band->n_channels; i++) {
99 ch = &band->channels[i];
100 if (ch->flags & IEEE80211_CHAN_DISABLED)
103 if (ch->flags & IEEE80211_CHAN_RADAR)
104 ch->flags |= IEEE80211_CHAN_NO_IBSS |
105 IEEE80211_CHAN_PASSIVE_SCAN;
112 static int wl1271_set_rx_streaming(struct wl1271 *wl, struct wl12xx_vif *wlvif,
117 /* we should hold wl->mutex */
118 ret = wl1271_acx_ps_rx_streaming(wl, wlvif, enable);
123 set_bit(WLVIF_FLAG_RX_STREAMING_STARTED, &wlvif->flags);
125 clear_bit(WLVIF_FLAG_RX_STREAMING_STARTED, &wlvif->flags);
131 * this function is being called when the rx_streaming interval
132 * has beed changed or rx_streaming should be disabled
134 int wl1271_recalc_rx_streaming(struct wl1271 *wl, struct wl12xx_vif *wlvif)
137 int period = wl->conf.rx_streaming.interval;
139 /* don't reconfigure if rx_streaming is disabled */
140 if (!test_bit(WLVIF_FLAG_RX_STREAMING_STARTED, &wlvif->flags))
143 /* reconfigure/disable according to new streaming_period */
145 test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags) &&
146 (wl->conf.rx_streaming.always ||
147 test_bit(WL1271_FLAG_SOFT_GEMINI, &wl->flags)))
148 ret = wl1271_set_rx_streaming(wl, wlvif, true);
150 ret = wl1271_set_rx_streaming(wl, wlvif, false);
151 /* don't cancel_work_sync since we might deadlock */
152 del_timer_sync(&wlvif->rx_streaming_timer);
158 static void wl1271_rx_streaming_enable_work(struct work_struct *work)
161 struct wl12xx_vif *wlvif = container_of(work, struct wl12xx_vif,
162 rx_streaming_enable_work);
163 struct wl1271 *wl = wlvif->wl;
165 mutex_lock(&wl->mutex);
167 if (test_bit(WLVIF_FLAG_RX_STREAMING_STARTED, &wlvif->flags) ||
168 !test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags) ||
169 (!wl->conf.rx_streaming.always &&
170 !test_bit(WL1271_FLAG_SOFT_GEMINI, &wl->flags)))
173 if (!wl->conf.rx_streaming.interval)
176 ret = wl1271_ps_elp_wakeup(wl);
180 ret = wl1271_set_rx_streaming(wl, wlvif, true);
184 /* stop it after some time of inactivity */
185 mod_timer(&wlvif->rx_streaming_timer,
186 jiffies + msecs_to_jiffies(wl->conf.rx_streaming.duration));
189 wl1271_ps_elp_sleep(wl);
191 mutex_unlock(&wl->mutex);
194 static void wl1271_rx_streaming_disable_work(struct work_struct *work)
197 struct wl12xx_vif *wlvif = container_of(work, struct wl12xx_vif,
198 rx_streaming_disable_work);
199 struct wl1271 *wl = wlvif->wl;
201 mutex_lock(&wl->mutex);
203 if (!test_bit(WLVIF_FLAG_RX_STREAMING_STARTED, &wlvif->flags))
206 ret = wl1271_ps_elp_wakeup(wl);
210 ret = wl1271_set_rx_streaming(wl, wlvif, false);
215 wl1271_ps_elp_sleep(wl);
217 mutex_unlock(&wl->mutex);
220 static void wl1271_rx_streaming_timer(unsigned long data)
222 struct wl12xx_vif *wlvif = (struct wl12xx_vif *)data;
223 struct wl1271 *wl = wlvif->wl;
224 ieee80211_queue_work(wl->hw, &wlvif->rx_streaming_disable_work);
227 /* wl->mutex must be taken */
228 void wl12xx_rearm_tx_watchdog_locked(struct wl1271 *wl)
230 /* if the watchdog is not armed, don't do anything */
231 if (wl->tx_allocated_blocks == 0)
234 cancel_delayed_work(&wl->tx_watchdog_work);
235 ieee80211_queue_delayed_work(wl->hw, &wl->tx_watchdog_work,
236 msecs_to_jiffies(wl->conf.tx.tx_watchdog_timeout));
239 static void wl12xx_tx_watchdog_work(struct work_struct *work)
241 struct delayed_work *dwork;
244 dwork = container_of(work, struct delayed_work, work);
245 wl = container_of(dwork, struct wl1271, tx_watchdog_work);
247 mutex_lock(&wl->mutex);
249 if (unlikely(wl->state != WLCORE_STATE_ON))
252 /* Tx went out in the meantime - everything is ok */
253 if (unlikely(wl->tx_allocated_blocks == 0))
257 * if a ROC is in progress, we might not have any Tx for a long
258 * time (e.g. pending Tx on the non-ROC channels)
260 if (find_first_bit(wl->roc_map, WL12XX_MAX_ROLES) < WL12XX_MAX_ROLES) {
261 wl1271_debug(DEBUG_TX, "No Tx (in FW) for %d ms due to ROC",
262 wl->conf.tx.tx_watchdog_timeout);
263 wl12xx_rearm_tx_watchdog_locked(wl);
268 * if a scan is in progress, we might not have any Tx for a long
271 if (wl->scan.state != WL1271_SCAN_STATE_IDLE) {
272 wl1271_debug(DEBUG_TX, "No Tx (in FW) for %d ms due to scan",
273 wl->conf.tx.tx_watchdog_timeout);
274 wl12xx_rearm_tx_watchdog_locked(wl);
279 * AP might cache a frame for a long time for a sleeping station,
280 * so rearm the timer if there's an AP interface with stations. If
281 * Tx is genuinely stuck we will most hopefully discover it when all
282 * stations are removed due to inactivity.
284 if (wl->active_sta_count) {
285 wl1271_debug(DEBUG_TX, "No Tx (in FW) for %d ms. AP has "
287 wl->conf.tx.tx_watchdog_timeout,
288 wl->active_sta_count);
289 wl12xx_rearm_tx_watchdog_locked(wl);
293 wl1271_error("Tx stuck (in FW) for %d ms. Starting recovery",
294 wl->conf.tx.tx_watchdog_timeout);
295 wl12xx_queue_recovery_work(wl);
298 mutex_unlock(&wl->mutex);
301 static void wlcore_adjust_conf(struct wl1271 *wl)
303 /* Adjust settings according to optional module parameters */
305 if (!strcmp(fwlog_param, "continuous")) {
306 wl->conf.fwlog.mode = WL12XX_FWLOG_CONTINUOUS;
307 } else if (!strcmp(fwlog_param, "ondemand")) {
308 wl->conf.fwlog.mode = WL12XX_FWLOG_ON_DEMAND;
309 } else if (!strcmp(fwlog_param, "dbgpins")) {
310 wl->conf.fwlog.mode = WL12XX_FWLOG_CONTINUOUS;
311 wl->conf.fwlog.output = WL12XX_FWLOG_OUTPUT_DBG_PINS;
312 } else if (!strcmp(fwlog_param, "disable")) {
313 wl->conf.fwlog.mem_blocks = 0;
314 wl->conf.fwlog.output = WL12XX_FWLOG_OUTPUT_NONE;
316 wl1271_error("Unknown fwlog parameter %s", fwlog_param);
321 static void wl12xx_irq_ps_regulate_link(struct wl1271 *wl,
322 struct wl12xx_vif *wlvif,
325 bool fw_ps, single_sta;
327 fw_ps = test_bit(hlid, (unsigned long *)&wl->ap_fw_ps_map);
328 single_sta = (wl->active_sta_count == 1);
331 * Wake up from high level PS if the STA is asleep with too little
332 * packets in FW or if the STA is awake.
334 if (!fw_ps || tx_pkts < WL1271_PS_STA_MAX_PACKETS)
335 wl12xx_ps_link_end(wl, wlvif, hlid);
338 * Start high-level PS if the STA is asleep with enough blocks in FW.
339 * Make an exception if this is the only connected station. In this
340 * case FW-memory congestion is not a problem.
342 else if (!single_sta && fw_ps && tx_pkts >= WL1271_PS_STA_MAX_PACKETS)
343 wl12xx_ps_link_start(wl, wlvif, hlid, true);
346 static void wl12xx_irq_update_links_status(struct wl1271 *wl,
347 struct wl12xx_vif *wlvif,
348 struct wl_fw_status_2 *status)
350 struct wl1271_link *lnk;
354 /* TODO: also use link_fast_bitmap here */
356 cur_fw_ps_map = le32_to_cpu(status->link_ps_bitmap);
357 if (wl->ap_fw_ps_map != cur_fw_ps_map) {
358 wl1271_debug(DEBUG_PSM,
359 "link ps prev 0x%x cur 0x%x changed 0x%x",
360 wl->ap_fw_ps_map, cur_fw_ps_map,
361 wl->ap_fw_ps_map ^ cur_fw_ps_map);
363 wl->ap_fw_ps_map = cur_fw_ps_map;
366 for_each_set_bit(hlid, wlvif->ap.sta_hlid_map, WL12XX_MAX_LINKS) {
367 lnk = &wl->links[hlid];
368 cnt = status->counters.tx_lnk_free_pkts[hlid] -
369 lnk->prev_freed_pkts;
371 lnk->prev_freed_pkts = status->counters.tx_lnk_free_pkts[hlid];
372 lnk->allocated_pkts -= cnt;
374 wl12xx_irq_ps_regulate_link(wl, wlvif, hlid,
375 lnk->allocated_pkts);
379 static int wlcore_fw_status(struct wl1271 *wl,
380 struct wl_fw_status_1 *status_1,
381 struct wl_fw_status_2 *status_2)
383 struct wl12xx_vif *wlvif;
385 u32 old_tx_blk_count = wl->tx_blocks_available;
386 int avail, freed_blocks;
391 status_len = WLCORE_FW_STATUS_1_LEN(wl->num_rx_desc) +
392 sizeof(*status_2) + wl->fw_status_priv_len;
394 ret = wlcore_raw_read_data(wl, REG_RAW_FW_STATUS_ADDR, status_1,
399 wl1271_debug(DEBUG_IRQ, "intr: 0x%x (fw_rx_counter = %d, "
400 "drv_rx_counter = %d, tx_results_counter = %d)",
402 status_1->fw_rx_counter,
403 status_1->drv_rx_counter,
404 status_1->tx_results_counter);
406 for (i = 0; i < NUM_TX_QUEUES; i++) {
407 /* prevent wrap-around in freed-packets counter */
408 wl->tx_allocated_pkts[i] -=
409 (status_2->counters.tx_released_pkts[i] -
410 wl->tx_pkts_freed[i]) & 0xff;
412 wl->tx_pkts_freed[i] = status_2->counters.tx_released_pkts[i];
415 /* prevent wrap-around in total blocks counter */
416 if (likely(wl->tx_blocks_freed <=
417 le32_to_cpu(status_2->total_released_blks)))
418 freed_blocks = le32_to_cpu(status_2->total_released_blks) -
421 freed_blocks = 0x100000000LL - wl->tx_blocks_freed +
422 le32_to_cpu(status_2->total_released_blks);
424 wl->tx_blocks_freed = le32_to_cpu(status_2->total_released_blks);
426 wl->tx_allocated_blocks -= freed_blocks;
429 * If the FW freed some blocks:
430 * If we still have allocated blocks - re-arm the timer, Tx is
431 * not stuck. Otherwise, cancel the timer (no Tx currently).
434 if (wl->tx_allocated_blocks)
435 wl12xx_rearm_tx_watchdog_locked(wl);
437 cancel_delayed_work(&wl->tx_watchdog_work);
440 avail = le32_to_cpu(status_2->tx_total) - wl->tx_allocated_blocks;
443 * The FW might change the total number of TX memblocks before
444 * we get a notification about blocks being released. Thus, the
445 * available blocks calculation might yield a temporary result
446 * which is lower than the actual available blocks. Keeping in
447 * mind that only blocks that were allocated can be moved from
448 * TX to RX, tx_blocks_available should never decrease here.
450 wl->tx_blocks_available = max((int)wl->tx_blocks_available,
453 /* if more blocks are available now, tx work can be scheduled */
454 if (wl->tx_blocks_available > old_tx_blk_count)
455 clear_bit(WL1271_FLAG_FW_TX_BUSY, &wl->flags);
457 /* for AP update num of allocated TX blocks per link and ps status */
458 wl12xx_for_each_wlvif_ap(wl, wlvif) {
459 wl12xx_irq_update_links_status(wl, wlvif, status_2);
462 /* update the host-chipset time offset */
464 wl->time_offset = (timespec_to_ns(&ts) >> 10) -
465 (s64)le32_to_cpu(status_2->fw_localtime);
470 static void wl1271_flush_deferred_work(struct wl1271 *wl)
474 /* Pass all received frames to the network stack */
475 while ((skb = skb_dequeue(&wl->deferred_rx_queue)))
476 ieee80211_rx_ni(wl->hw, skb);
478 /* Return sent skbs to the network stack */
479 while ((skb = skb_dequeue(&wl->deferred_tx_queue)))
480 ieee80211_tx_status_ni(wl->hw, skb);
483 static void wl1271_netstack_work(struct work_struct *work)
486 container_of(work, struct wl1271, netstack_work);
489 wl1271_flush_deferred_work(wl);
490 } while (skb_queue_len(&wl->deferred_rx_queue));
493 #define WL1271_IRQ_MAX_LOOPS 256
495 static int wlcore_irq_locked(struct wl1271 *wl)
499 int loopcount = WL1271_IRQ_MAX_LOOPS;
501 unsigned int defer_count;
505 * In case edge triggered interrupt must be used, we cannot iterate
506 * more than once without introducing race conditions with the hardirq.
508 if (wl->platform_quirks & WL12XX_PLATFORM_QUIRK_EDGE_IRQ)
511 wl1271_debug(DEBUG_IRQ, "IRQ work");
513 if (unlikely(wl->state != WLCORE_STATE_ON))
516 ret = wl1271_ps_elp_wakeup(wl);
520 while (!done && loopcount--) {
522 * In order to avoid a race with the hardirq, clear the flag
523 * before acknowledging the chip. Since the mutex is held,
524 * wl1271_ps_elp_wakeup cannot be called concurrently.
526 clear_bit(WL1271_FLAG_IRQ_RUNNING, &wl->flags);
527 smp_mb__after_clear_bit();
529 ret = wlcore_fw_status(wl, wl->fw_status_1, wl->fw_status_2);
533 wlcore_hw_tx_immediate_compl(wl);
535 intr = le32_to_cpu(wl->fw_status_1->intr);
536 intr &= WLCORE_ALL_INTR_MASK;
542 if (unlikely(intr & WL1271_ACX_INTR_WATCHDOG)) {
543 wl1271_error("HW watchdog interrupt received! starting recovery.");
544 wl->watchdog_recovery = true;
547 /* restarting the chip. ignore any other interrupt. */
551 if (unlikely(intr & WL1271_ACX_SW_INTR_WATCHDOG)) {
552 wl1271_error("SW watchdog interrupt received! "
553 "starting recovery.");
554 wl->watchdog_recovery = true;
557 /* restarting the chip. ignore any other interrupt. */
561 if (likely(intr & WL1271_ACX_INTR_DATA)) {
562 wl1271_debug(DEBUG_IRQ, "WL1271_ACX_INTR_DATA");
564 ret = wlcore_rx(wl, wl->fw_status_1);
568 /* Check if any tx blocks were freed */
569 spin_lock_irqsave(&wl->wl_lock, flags);
570 if (!test_bit(WL1271_FLAG_FW_TX_BUSY, &wl->flags) &&
571 wl1271_tx_total_queue_count(wl) > 0) {
572 spin_unlock_irqrestore(&wl->wl_lock, flags);
574 * In order to avoid starvation of the TX path,
575 * call the work function directly.
577 ret = wlcore_tx_work_locked(wl);
581 spin_unlock_irqrestore(&wl->wl_lock, flags);
584 /* check for tx results */
585 ret = wlcore_hw_tx_delayed_compl(wl);
589 /* Make sure the deferred queues don't get too long */
590 defer_count = skb_queue_len(&wl->deferred_tx_queue) +
591 skb_queue_len(&wl->deferred_rx_queue);
592 if (defer_count > WL1271_DEFERRED_QUEUE_LIMIT)
593 wl1271_flush_deferred_work(wl);
596 if (intr & WL1271_ACX_INTR_EVENT_A) {
597 wl1271_debug(DEBUG_IRQ, "WL1271_ACX_INTR_EVENT_A");
598 ret = wl1271_event_handle(wl, 0);
603 if (intr & WL1271_ACX_INTR_EVENT_B) {
604 wl1271_debug(DEBUG_IRQ, "WL1271_ACX_INTR_EVENT_B");
605 ret = wl1271_event_handle(wl, 1);
610 if (intr & WL1271_ACX_INTR_INIT_COMPLETE)
611 wl1271_debug(DEBUG_IRQ,
612 "WL1271_ACX_INTR_INIT_COMPLETE");
614 if (intr & WL1271_ACX_INTR_HW_AVAILABLE)
615 wl1271_debug(DEBUG_IRQ, "WL1271_ACX_INTR_HW_AVAILABLE");
618 wl1271_ps_elp_sleep(wl);
624 static irqreturn_t wlcore_irq(int irq, void *cookie)
628 struct wl1271 *wl = cookie;
630 /* TX might be handled here, avoid redundant work */
631 set_bit(WL1271_FLAG_TX_PENDING, &wl->flags);
632 cancel_work_sync(&wl->tx_work);
634 mutex_lock(&wl->mutex);
636 ret = wlcore_irq_locked(wl);
638 wl12xx_queue_recovery_work(wl);
640 spin_lock_irqsave(&wl->wl_lock, flags);
641 /* In case TX was not handled here, queue TX work */
642 clear_bit(WL1271_FLAG_TX_PENDING, &wl->flags);
643 if (!test_bit(WL1271_FLAG_FW_TX_BUSY, &wl->flags) &&
644 wl1271_tx_total_queue_count(wl) > 0)
645 ieee80211_queue_work(wl->hw, &wl->tx_work);
646 spin_unlock_irqrestore(&wl->wl_lock, flags);
648 mutex_unlock(&wl->mutex);
653 struct vif_counter_data {
656 struct ieee80211_vif *cur_vif;
657 bool cur_vif_running;
660 static void wl12xx_vif_count_iter(void *data, u8 *mac,
661 struct ieee80211_vif *vif)
663 struct vif_counter_data *counter = data;
666 if (counter->cur_vif == vif)
667 counter->cur_vif_running = true;
670 /* caller must not hold wl->mutex, as it might deadlock */
671 static void wl12xx_get_vif_count(struct ieee80211_hw *hw,
672 struct ieee80211_vif *cur_vif,
673 struct vif_counter_data *data)
675 memset(data, 0, sizeof(*data));
676 data->cur_vif = cur_vif;
678 ieee80211_iterate_active_interfaces(hw,
679 wl12xx_vif_count_iter, data);
682 static int wl12xx_fetch_firmware(struct wl1271 *wl, bool plt)
684 const struct firmware *fw;
686 enum wl12xx_fw_type fw_type;
690 fw_type = WL12XX_FW_TYPE_PLT;
691 fw_name = wl->plt_fw_name;
694 * we can't call wl12xx_get_vif_count() here because
695 * wl->mutex is taken, so use the cached last_vif_count value
697 if (wl->last_vif_count > 1 && wl->mr_fw_name) {
698 fw_type = WL12XX_FW_TYPE_MULTI;
699 fw_name = wl->mr_fw_name;
701 fw_type = WL12XX_FW_TYPE_NORMAL;
702 fw_name = wl->sr_fw_name;
706 if (wl->fw_type == fw_type)
709 wl1271_debug(DEBUG_BOOT, "booting firmware %s", fw_name);
711 ret = request_firmware(&fw, fw_name, wl->dev);
714 wl1271_error("could not get firmware %s: %d", fw_name, ret);
719 wl1271_error("firmware size is not multiple of 32 bits: %zu",
726 wl->fw_type = WL12XX_FW_TYPE_NONE;
727 wl->fw_len = fw->size;
728 wl->fw = vmalloc(wl->fw_len);
731 wl1271_error("could not allocate memory for the firmware");
736 memcpy(wl->fw, fw->data, wl->fw_len);
738 wl->fw_type = fw_type;
740 release_firmware(fw);
745 void wl12xx_queue_recovery_work(struct wl1271 *wl)
747 WARN_ON(!test_bit(WL1271_FLAG_INTENDED_FW_RECOVERY, &wl->flags));
749 /* Avoid a recursive recovery */
750 if (wl->state == WLCORE_STATE_ON) {
751 wl->state = WLCORE_STATE_RESTARTING;
752 set_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags);
753 wlcore_disable_interrupts_nosync(wl);
754 ieee80211_queue_work(wl->hw, &wl->recovery_work);
758 size_t wl12xx_copy_fwlog(struct wl1271 *wl, u8 *memblock, size_t maxlen)
762 /* The FW log is a length-value list, find where the log end */
763 while (len < maxlen) {
764 if (memblock[len] == 0)
766 if (len + memblock[len] + 1 > maxlen)
768 len += memblock[len] + 1;
771 /* Make sure we have enough room */
772 len = min(len, (size_t)(PAGE_SIZE - wl->fwlog_size));
774 /* Fill the FW log file, consumed by the sysfs fwlog entry */
775 memcpy(wl->fwlog + wl->fwlog_size, memblock, len);
776 wl->fwlog_size += len;
781 #define WLCORE_FW_LOG_END 0x2000000
783 static void wl12xx_read_fwlog_panic(struct wl1271 *wl)
791 if ((wl->quirks & WLCORE_QUIRK_FWLOG_NOT_IMPLEMENTED) ||
792 (wl->conf.fwlog.mem_blocks == 0))
795 wl1271_info("Reading FW panic log");
797 block = kmalloc(WL12XX_HW_BLOCK_SIZE, GFP_KERNEL);
802 * Make sure the chip is awake and the logger isn't active.
803 * Do not send a stop fwlog command if the fw is hanged.
805 if (wl1271_ps_elp_wakeup(wl))
807 if (!wl->watchdog_recovery)
808 wl12xx_cmd_stop_fwlog(wl);
810 /* Read the first memory block address */
811 ret = wlcore_fw_status(wl, wl->fw_status_1, wl->fw_status_2);
815 addr = le32_to_cpu(wl->fw_status_2->log_start_addr);
819 if (wl->conf.fwlog.mode == WL12XX_FWLOG_CONTINUOUS) {
820 offset = sizeof(addr) + sizeof(struct wl1271_rx_descriptor);
821 end_of_log = WLCORE_FW_LOG_END;
823 offset = sizeof(addr);
827 /* Traverse the memory blocks linked list */
829 memset(block, 0, WL12XX_HW_BLOCK_SIZE);
830 ret = wlcore_read_hwaddr(wl, addr, block, WL12XX_HW_BLOCK_SIZE,
836 * Memory blocks are linked to one another. The first 4 bytes
837 * of each memory block hold the hardware address of the next
838 * one. The last memory block points to the first one in
839 * on demand mode and is equal to 0x2000000 in continuous mode.
841 addr = le32_to_cpup((__le32 *)block);
842 if (!wl12xx_copy_fwlog(wl, block + offset,
843 WL12XX_HW_BLOCK_SIZE - offset))
845 } while (addr && (addr != end_of_log));
847 wake_up_interruptible(&wl->fwlog_waitq);
853 static void wlcore_print_recovery(struct wl1271 *wl)
859 wl1271_info("Hardware recovery in progress. FW ver: %s",
860 wl->chip.fw_ver_str);
862 /* change partitions momentarily so we can read the FW pc */
863 ret = wlcore_set_partition(wl, &wl->ptable[PART_BOOT]);
867 ret = wlcore_read_reg(wl, REG_PC_ON_RECOVERY, &pc);
871 ret = wlcore_read_reg(wl, REG_INTERRUPT_NO_CLEAR, &hint_sts);
875 wl1271_info("pc: 0x%x, hint_sts: 0x%08x", pc, hint_sts);
877 wlcore_set_partition(wl, &wl->ptable[PART_WORK]);
881 static void wl1271_recovery_work(struct work_struct *work)
884 container_of(work, struct wl1271, recovery_work);
885 struct wl12xx_vif *wlvif;
886 struct ieee80211_vif *vif;
888 mutex_lock(&wl->mutex);
890 if (wl->state == WLCORE_STATE_OFF || wl->plt)
893 if (!test_bit(WL1271_FLAG_INTENDED_FW_RECOVERY, &wl->flags)) {
894 wl12xx_read_fwlog_panic(wl);
895 wlcore_print_recovery(wl);
898 BUG_ON(bug_on_recovery &&
899 !test_bit(WL1271_FLAG_INTENDED_FW_RECOVERY, &wl->flags));
902 wl1271_info("No recovery (chosen on module load). Fw will remain stuck.");
907 * Advance security sequence number to overcome potential progress
908 * in the firmware during recovery. This doens't hurt if the network is
911 wl12xx_for_each_wlvif(wl, wlvif) {
912 if (test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags) ||
913 test_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags))
914 wlvif->tx_security_seq +=
915 WL1271_TX_SQN_POST_RECOVERY_PADDING;
918 /* Prevent spurious TX during FW restart */
919 wlcore_stop_queues(wl, WLCORE_QUEUE_STOP_REASON_FW_RESTART);
921 if (wl->sched_scanning) {
922 ieee80211_sched_scan_stopped(wl->hw);
923 wl->sched_scanning = false;
926 /* reboot the chipset */
927 while (!list_empty(&wl->wlvif_list)) {
928 wlvif = list_first_entry(&wl->wlvif_list,
929 struct wl12xx_vif, list);
930 vif = wl12xx_wlvif_to_vif(wlvif);
931 __wl1271_op_remove_interface(wl, vif, false);
934 wlcore_op_stop_locked(wl);
936 ieee80211_restart_hw(wl->hw);
939 * Its safe to enable TX now - the queues are stopped after a request
942 wlcore_wake_queues(wl, WLCORE_QUEUE_STOP_REASON_FW_RESTART);
945 wl->watchdog_recovery = false;
946 clear_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags);
947 mutex_unlock(&wl->mutex);
950 static int wlcore_fw_wakeup(struct wl1271 *wl)
952 return wlcore_raw_write32(wl, HW_ACCESS_ELP_CTRL_REG, ELPCTRL_WAKE_UP);
955 static int wl1271_setup(struct wl1271 *wl)
957 wl->fw_status_1 = kmalloc(WLCORE_FW_STATUS_1_LEN(wl->num_rx_desc) +
958 sizeof(*wl->fw_status_2) +
959 wl->fw_status_priv_len, GFP_KERNEL);
960 if (!wl->fw_status_1)
963 wl->fw_status_2 = (struct wl_fw_status_2 *)
964 (((u8 *) wl->fw_status_1) +
965 WLCORE_FW_STATUS_1_LEN(wl->num_rx_desc));
967 wl->tx_res_if = kmalloc(sizeof(*wl->tx_res_if), GFP_KERNEL);
968 if (!wl->tx_res_if) {
969 kfree(wl->fw_status_1);
976 static int wl12xx_set_power_on(struct wl1271 *wl)
980 msleep(WL1271_PRE_POWER_ON_SLEEP);
981 ret = wl1271_power_on(wl);
984 msleep(WL1271_POWER_ON_SLEEP);
988 ret = wlcore_set_partition(wl, &wl->ptable[PART_BOOT]);
992 /* ELP module wake up */
993 ret = wlcore_fw_wakeup(wl);
1001 wl1271_power_off(wl);
1005 static int wl12xx_chip_wakeup(struct wl1271 *wl, bool plt)
1009 ret = wl12xx_set_power_on(wl);
1014 * For wl127x based devices we could use the default block
1015 * size (512 bytes), but due to a bug in the sdio driver, we
1016 * need to set it explicitly after the chip is powered on. To
1017 * simplify the code and since the performance impact is
1018 * negligible, we use the same block size for all different
1021 * Check if the bus supports blocksize alignment and, if it
1022 * doesn't, make sure we don't have the quirk.
1024 if (!wl1271_set_block_size(wl))
1025 wl->quirks &= ~WLCORE_QUIRK_TX_BLOCKSIZE_ALIGN;
1027 /* TODO: make sure the lower driver has set things up correctly */
1029 ret = wl1271_setup(wl);
1033 ret = wl12xx_fetch_firmware(wl, plt);
1041 int wl1271_plt_start(struct wl1271 *wl, const enum plt_mode plt_mode)
1043 int retries = WL1271_BOOT_RETRIES;
1044 struct wiphy *wiphy = wl->hw->wiphy;
1046 static const char* const PLT_MODE[] = {
1054 mutex_lock(&wl->mutex);
1056 wl1271_notice("power up");
1058 if (wl->state != WLCORE_STATE_OFF) {
1059 wl1271_error("cannot go into PLT state because not "
1060 "in off state: %d", wl->state);
1065 /* Indicate to lower levels that we are now in PLT mode */
1067 wl->plt_mode = plt_mode;
1071 ret = wl12xx_chip_wakeup(wl, true);
1075 ret = wl->ops->plt_init(wl);
1079 wl->state = WLCORE_STATE_ON;
1080 wl1271_notice("firmware booted in PLT mode %s (%s)",
1082 wl->chip.fw_ver_str);
1084 /* update hw/fw version info in wiphy struct */
1085 wiphy->hw_version = wl->chip.id;
1086 strncpy(wiphy->fw_version, wl->chip.fw_ver_str,
1087 sizeof(wiphy->fw_version));
1092 wl1271_power_off(wl);
1096 wl->plt_mode = PLT_OFF;
1098 wl1271_error("firmware boot in PLT mode failed despite %d retries",
1099 WL1271_BOOT_RETRIES);
1101 mutex_unlock(&wl->mutex);
1106 int wl1271_plt_stop(struct wl1271 *wl)
1110 wl1271_notice("power down");
1113 * Interrupts must be disabled before setting the state to OFF.
1114 * Otherwise, the interrupt handler might be called and exit without
1115 * reading the interrupt status.
1117 wlcore_disable_interrupts(wl);
1118 mutex_lock(&wl->mutex);
1120 mutex_unlock(&wl->mutex);
1123 * This will not necessarily enable interrupts as interrupts
1124 * may have been disabled when op_stop was called. It will,
1125 * however, balance the above call to disable_interrupts().
1127 wlcore_enable_interrupts(wl);
1129 wl1271_error("cannot power down because not in PLT "
1130 "state: %d", wl->state);
1135 mutex_unlock(&wl->mutex);
1137 wl1271_flush_deferred_work(wl);
1138 cancel_work_sync(&wl->netstack_work);
1139 cancel_work_sync(&wl->recovery_work);
1140 cancel_delayed_work_sync(&wl->elp_work);
1141 cancel_delayed_work_sync(&wl->tx_watchdog_work);
1143 mutex_lock(&wl->mutex);
1144 wl1271_power_off(wl);
1146 wl->sleep_auth = WL1271_PSM_ILLEGAL;
1147 wl->state = WLCORE_STATE_OFF;
1149 wl->plt_mode = PLT_OFF;
1151 mutex_unlock(&wl->mutex);
1157 static void wl1271_op_tx(struct ieee80211_hw *hw,
1158 struct ieee80211_tx_control *control,
1159 struct sk_buff *skb)
1161 struct wl1271 *wl = hw->priv;
1162 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
1163 struct ieee80211_vif *vif = info->control.vif;
1164 struct wl12xx_vif *wlvif = NULL;
1165 unsigned long flags;
1170 wlvif = wl12xx_vif_to_data(vif);
1172 mapping = skb_get_queue_mapping(skb);
1173 q = wl1271_tx_get_queue(mapping);
1175 hlid = wl12xx_tx_get_hlid(wl, wlvif, skb, control->sta);
1177 spin_lock_irqsave(&wl->wl_lock, flags);
1180 * drop the packet if the link is invalid or the queue is stopped
1181 * for any reason but watermark. Watermark is a "soft"-stop so we
1182 * allow these packets through.
1184 if (hlid == WL12XX_INVALID_LINK_ID ||
1185 (wlvif && !test_bit(hlid, wlvif->links_map)) ||
1186 (wlcore_is_queue_stopped(wl, q) &&
1187 !wlcore_is_queue_stopped_by_reason(wl, q,
1188 WLCORE_QUEUE_STOP_REASON_WATERMARK))) {
1189 wl1271_debug(DEBUG_TX, "DROP skb hlid %d q %d", hlid, q);
1190 ieee80211_free_txskb(hw, skb);
1194 wl1271_debug(DEBUG_TX, "queue skb hlid %d q %d len %d",
1196 skb_queue_tail(&wl->links[hlid].tx_queue[q], skb);
1198 wl->tx_queue_count[q]++;
1201 * The workqueue is slow to process the tx_queue and we need stop
1202 * the queue here, otherwise the queue will get too long.
1204 if (wl->tx_queue_count[q] >= WL1271_TX_QUEUE_HIGH_WATERMARK &&
1205 !wlcore_is_queue_stopped_by_reason(wl, q,
1206 WLCORE_QUEUE_STOP_REASON_WATERMARK)) {
1207 wl1271_debug(DEBUG_TX, "op_tx: stopping queues for q %d", q);
1208 wlcore_stop_queue_locked(wl, q,
1209 WLCORE_QUEUE_STOP_REASON_WATERMARK);
1213 * The chip specific setup must run before the first TX packet -
1214 * before that, the tx_work will not be initialized!
1217 if (!test_bit(WL1271_FLAG_FW_TX_BUSY, &wl->flags) &&
1218 !test_bit(WL1271_FLAG_TX_PENDING, &wl->flags))
1219 ieee80211_queue_work(wl->hw, &wl->tx_work);
1222 spin_unlock_irqrestore(&wl->wl_lock, flags);
1225 int wl1271_tx_dummy_packet(struct wl1271 *wl)
1227 unsigned long flags;
1230 /* no need to queue a new dummy packet if one is already pending */
1231 if (test_bit(WL1271_FLAG_DUMMY_PACKET_PENDING, &wl->flags))
1234 q = wl1271_tx_get_queue(skb_get_queue_mapping(wl->dummy_packet));
1236 spin_lock_irqsave(&wl->wl_lock, flags);
1237 set_bit(WL1271_FLAG_DUMMY_PACKET_PENDING, &wl->flags);
1238 wl->tx_queue_count[q]++;
1239 spin_unlock_irqrestore(&wl->wl_lock, flags);
1241 /* The FW is low on RX memory blocks, so send the dummy packet asap */
1242 if (!test_bit(WL1271_FLAG_FW_TX_BUSY, &wl->flags))
1243 return wlcore_tx_work_locked(wl);
1246 * If the FW TX is busy, TX work will be scheduled by the threaded
1247 * interrupt handler function
1253 * The size of the dummy packet should be at least 1400 bytes. However, in
1254 * order to minimize the number of bus transactions, aligning it to 512 bytes
1255 * boundaries could be beneficial, performance wise
1257 #define TOTAL_TX_DUMMY_PACKET_SIZE (ALIGN(1400, 512))
1259 static struct sk_buff *wl12xx_alloc_dummy_packet(struct wl1271 *wl)
1261 struct sk_buff *skb;
1262 struct ieee80211_hdr_3addr *hdr;
1263 unsigned int dummy_packet_size;
1265 dummy_packet_size = TOTAL_TX_DUMMY_PACKET_SIZE -
1266 sizeof(struct wl1271_tx_hw_descr) - sizeof(*hdr);
1268 skb = dev_alloc_skb(TOTAL_TX_DUMMY_PACKET_SIZE);
1270 wl1271_warning("Failed to allocate a dummy packet skb");
1274 skb_reserve(skb, sizeof(struct wl1271_tx_hw_descr));
1276 hdr = (struct ieee80211_hdr_3addr *) skb_put(skb, sizeof(*hdr));
1277 memset(hdr, 0, sizeof(*hdr));
1278 hdr->frame_control = cpu_to_le16(IEEE80211_FTYPE_DATA |
1279 IEEE80211_STYPE_NULLFUNC |
1280 IEEE80211_FCTL_TODS);
1282 memset(skb_put(skb, dummy_packet_size), 0, dummy_packet_size);
1284 /* Dummy packets require the TID to be management */
1285 skb->priority = WL1271_TID_MGMT;
1287 /* Initialize all fields that might be used */
1288 skb_set_queue_mapping(skb, 0);
1289 memset(IEEE80211_SKB_CB(skb), 0, sizeof(struct ieee80211_tx_info));
1297 wl1271_validate_wowlan_pattern(struct cfg80211_wowlan_trig_pkt_pattern *p)
1299 int num_fields = 0, in_field = 0, fields_size = 0;
1300 int i, pattern_len = 0;
1303 wl1271_warning("No mask in WoWLAN pattern");
1308 * The pattern is broken up into segments of bytes at different offsets
1309 * that need to be checked by the FW filter. Each segment is called
1310 * a field in the FW API. We verify that the total number of fields
1311 * required for this pattern won't exceed FW limits (8)
1312 * as well as the total fields buffer won't exceed the FW limit.
1313 * Note that if there's a pattern which crosses Ethernet/IP header
1314 * boundary a new field is required.
1316 for (i = 0; i < p->pattern_len; i++) {
1317 if (test_bit(i, (unsigned long *)p->mask)) {
1322 if (i == WL1271_RX_FILTER_ETH_HEADER_SIZE) {
1324 fields_size += pattern_len +
1325 RX_FILTER_FIELD_OVERHEAD;
1333 fields_size += pattern_len +
1334 RX_FILTER_FIELD_OVERHEAD;
1341 fields_size += pattern_len + RX_FILTER_FIELD_OVERHEAD;
1345 if (num_fields > WL1271_RX_FILTER_MAX_FIELDS) {
1346 wl1271_warning("RX Filter too complex. Too many segments");
1350 if (fields_size > WL1271_RX_FILTER_MAX_FIELDS_SIZE) {
1351 wl1271_warning("RX filter pattern is too big");
1358 struct wl12xx_rx_filter *wl1271_rx_filter_alloc(void)
1360 return kzalloc(sizeof(struct wl12xx_rx_filter), GFP_KERNEL);
1363 void wl1271_rx_filter_free(struct wl12xx_rx_filter *filter)
1370 for (i = 0; i < filter->num_fields; i++)
1371 kfree(filter->fields[i].pattern);
1376 int wl1271_rx_filter_alloc_field(struct wl12xx_rx_filter *filter,
1377 u16 offset, u8 flags,
1378 u8 *pattern, u8 len)
1380 struct wl12xx_rx_filter_field *field;
1382 if (filter->num_fields == WL1271_RX_FILTER_MAX_FIELDS) {
1383 wl1271_warning("Max fields per RX filter. can't alloc another");
1387 field = &filter->fields[filter->num_fields];
1389 field->pattern = kzalloc(len, GFP_KERNEL);
1390 if (!field->pattern) {
1391 wl1271_warning("Failed to allocate RX filter pattern");
1395 filter->num_fields++;
1397 field->offset = cpu_to_le16(offset);
1398 field->flags = flags;
1400 memcpy(field->pattern, pattern, len);
1405 int wl1271_rx_filter_get_fields_size(struct wl12xx_rx_filter *filter)
1407 int i, fields_size = 0;
1409 for (i = 0; i < filter->num_fields; i++)
1410 fields_size += filter->fields[i].len +
1411 sizeof(struct wl12xx_rx_filter_field) -
1417 void wl1271_rx_filter_flatten_fields(struct wl12xx_rx_filter *filter,
1421 struct wl12xx_rx_filter_field *field;
1423 for (i = 0; i < filter->num_fields; i++) {
1424 field = (struct wl12xx_rx_filter_field *)buf;
1426 field->offset = filter->fields[i].offset;
1427 field->flags = filter->fields[i].flags;
1428 field->len = filter->fields[i].len;
1430 memcpy(&field->pattern, filter->fields[i].pattern, field->len);
1431 buf += sizeof(struct wl12xx_rx_filter_field) -
1432 sizeof(u8 *) + field->len;
1437 * Allocates an RX filter returned through f
1438 * which needs to be freed using rx_filter_free()
1440 static int wl1271_convert_wowlan_pattern_to_rx_filter(
1441 struct cfg80211_wowlan_trig_pkt_pattern *p,
1442 struct wl12xx_rx_filter **f)
1445 struct wl12xx_rx_filter *filter;
1449 filter = wl1271_rx_filter_alloc();
1451 wl1271_warning("Failed to alloc rx filter");
1457 while (i < p->pattern_len) {
1458 if (!test_bit(i, (unsigned long *)p->mask)) {
1463 for (j = i; j < p->pattern_len; j++) {
1464 if (!test_bit(j, (unsigned long *)p->mask))
1467 if (i < WL1271_RX_FILTER_ETH_HEADER_SIZE &&
1468 j >= WL1271_RX_FILTER_ETH_HEADER_SIZE)
1472 if (i < WL1271_RX_FILTER_ETH_HEADER_SIZE) {
1474 flags = WL1271_RX_FILTER_FLAG_ETHERNET_HEADER;
1476 offset = i - WL1271_RX_FILTER_ETH_HEADER_SIZE;
1477 flags = WL1271_RX_FILTER_FLAG_IP_HEADER;
1482 ret = wl1271_rx_filter_alloc_field(filter,
1485 &p->pattern[i], len);
1492 filter->action = FILTER_SIGNAL;
1498 wl1271_rx_filter_free(filter);
1504 static int wl1271_configure_wowlan(struct wl1271 *wl,
1505 struct cfg80211_wowlan *wow)
1509 if (!wow || wow->any || !wow->n_patterns) {
1510 ret = wl1271_acx_default_rx_filter_enable(wl, 0,
1515 ret = wl1271_rx_filter_clear_all(wl);
1522 if (WARN_ON(wow->n_patterns > WL1271_MAX_RX_FILTERS))
1525 /* Validate all incoming patterns before clearing current FW state */
1526 for (i = 0; i < wow->n_patterns; i++) {
1527 ret = wl1271_validate_wowlan_pattern(&wow->patterns[i]);
1529 wl1271_warning("Bad wowlan pattern %d", i);
1534 ret = wl1271_acx_default_rx_filter_enable(wl, 0, FILTER_SIGNAL);
1538 ret = wl1271_rx_filter_clear_all(wl);
1542 /* Translate WoWLAN patterns into filters */
1543 for (i = 0; i < wow->n_patterns; i++) {
1544 struct cfg80211_wowlan_trig_pkt_pattern *p;
1545 struct wl12xx_rx_filter *filter = NULL;
1547 p = &wow->patterns[i];
1549 ret = wl1271_convert_wowlan_pattern_to_rx_filter(p, &filter);
1551 wl1271_warning("Failed to create an RX filter from "
1552 "wowlan pattern %d", i);
1556 ret = wl1271_rx_filter_enable(wl, i, 1, filter);
1558 wl1271_rx_filter_free(filter);
1563 ret = wl1271_acx_default_rx_filter_enable(wl, 1, FILTER_DROP);
1569 static int wl1271_configure_suspend_sta(struct wl1271 *wl,
1570 struct wl12xx_vif *wlvif,
1571 struct cfg80211_wowlan *wow)
1575 if (!test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags))
1578 ret = wl1271_ps_elp_wakeup(wl);
1582 ret = wl1271_configure_wowlan(wl, wow);
1586 if ((wl->conf.conn.suspend_wake_up_event ==
1587 wl->conf.conn.wake_up_event) &&
1588 (wl->conf.conn.suspend_listen_interval ==
1589 wl->conf.conn.listen_interval))
1592 ret = wl1271_acx_wake_up_conditions(wl, wlvif,
1593 wl->conf.conn.suspend_wake_up_event,
1594 wl->conf.conn.suspend_listen_interval);
1597 wl1271_error("suspend: set wake up conditions failed: %d", ret);
1600 wl1271_ps_elp_sleep(wl);
1606 static int wl1271_configure_suspend_ap(struct wl1271 *wl,
1607 struct wl12xx_vif *wlvif)
1611 if (!test_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags))
1614 ret = wl1271_ps_elp_wakeup(wl);
1618 ret = wl1271_acx_beacon_filter_opt(wl, wlvif, true);
1620 wl1271_ps_elp_sleep(wl);
1626 static int wl1271_configure_suspend(struct wl1271 *wl,
1627 struct wl12xx_vif *wlvif,
1628 struct cfg80211_wowlan *wow)
1630 if (wlvif->bss_type == BSS_TYPE_STA_BSS)
1631 return wl1271_configure_suspend_sta(wl, wlvif, wow);
1632 if (wlvif->bss_type == BSS_TYPE_AP_BSS)
1633 return wl1271_configure_suspend_ap(wl, wlvif);
1637 static void wl1271_configure_resume(struct wl1271 *wl,
1638 struct wl12xx_vif *wlvif)
1641 bool is_ap = wlvif->bss_type == BSS_TYPE_AP_BSS;
1642 bool is_sta = wlvif->bss_type == BSS_TYPE_STA_BSS;
1644 if ((!is_ap) && (!is_sta))
1647 if (is_sta && !test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags))
1650 ret = wl1271_ps_elp_wakeup(wl);
1655 wl1271_configure_wowlan(wl, NULL);
1657 if ((wl->conf.conn.suspend_wake_up_event ==
1658 wl->conf.conn.wake_up_event) &&
1659 (wl->conf.conn.suspend_listen_interval ==
1660 wl->conf.conn.listen_interval))
1663 ret = wl1271_acx_wake_up_conditions(wl, wlvif,
1664 wl->conf.conn.wake_up_event,
1665 wl->conf.conn.listen_interval);
1668 wl1271_error("resume: wake up conditions failed: %d",
1672 ret = wl1271_acx_beacon_filter_opt(wl, wlvif, false);
1676 wl1271_ps_elp_sleep(wl);
1679 static int wl1271_op_suspend(struct ieee80211_hw *hw,
1680 struct cfg80211_wowlan *wow)
1682 struct wl1271 *wl = hw->priv;
1683 struct wl12xx_vif *wlvif;
1686 wl1271_debug(DEBUG_MAC80211, "mac80211 suspend wow=%d", !!wow);
1689 /* we want to perform the recovery before suspending */
1690 if (test_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags)) {
1691 wl1271_warning("postponing suspend to perform recovery");
1695 wl1271_tx_flush(wl);
1697 mutex_lock(&wl->mutex);
1698 wl->wow_enabled = true;
1699 wl12xx_for_each_wlvif(wl, wlvif) {
1700 ret = wl1271_configure_suspend(wl, wlvif, wow);
1702 mutex_unlock(&wl->mutex);
1703 wl1271_warning("couldn't prepare device to suspend");
1707 mutex_unlock(&wl->mutex);
1708 /* flush any remaining work */
1709 wl1271_debug(DEBUG_MAC80211, "flushing remaining works");
1712 * disable and re-enable interrupts in order to flush
1715 wlcore_disable_interrupts(wl);
1718 * set suspended flag to avoid triggering a new threaded_irq
1719 * work. no need for spinlock as interrupts are disabled.
1721 set_bit(WL1271_FLAG_SUSPENDED, &wl->flags);
1723 wlcore_enable_interrupts(wl);
1724 flush_work(&wl->tx_work);
1725 flush_delayed_work(&wl->elp_work);
1730 static int wl1271_op_resume(struct ieee80211_hw *hw)
1732 struct wl1271 *wl = hw->priv;
1733 struct wl12xx_vif *wlvif;
1734 unsigned long flags;
1735 bool run_irq_work = false, pending_recovery;
1738 wl1271_debug(DEBUG_MAC80211, "mac80211 resume wow=%d",
1740 WARN_ON(!wl->wow_enabled);
1743 * re-enable irq_work enqueuing, and call irq_work directly if
1744 * there is a pending work.
1746 spin_lock_irqsave(&wl->wl_lock, flags);
1747 clear_bit(WL1271_FLAG_SUSPENDED, &wl->flags);
1748 if (test_and_clear_bit(WL1271_FLAG_PENDING_WORK, &wl->flags))
1749 run_irq_work = true;
1750 spin_unlock_irqrestore(&wl->wl_lock, flags);
1752 mutex_lock(&wl->mutex);
1754 /* test the recovery flag before calling any SDIO functions */
1755 pending_recovery = test_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS,
1759 wl1271_debug(DEBUG_MAC80211,
1760 "run postponed irq_work directly");
1762 /* don't talk to the HW if recovery is pending */
1763 if (!pending_recovery) {
1764 ret = wlcore_irq_locked(wl);
1766 wl12xx_queue_recovery_work(wl);
1769 wlcore_enable_interrupts(wl);
1772 if (pending_recovery) {
1773 wl1271_warning("queuing forgotten recovery on resume");
1774 ieee80211_queue_work(wl->hw, &wl->recovery_work);
1778 wl12xx_for_each_wlvif(wl, wlvif) {
1779 wl1271_configure_resume(wl, wlvif);
1783 wl->wow_enabled = false;
1784 mutex_unlock(&wl->mutex);
1790 static int wl1271_op_start(struct ieee80211_hw *hw)
1792 wl1271_debug(DEBUG_MAC80211, "mac80211 start");
1795 * We have to delay the booting of the hardware because
1796 * we need to know the local MAC address before downloading and
1797 * initializing the firmware. The MAC address cannot be changed
1798 * after boot, and without the proper MAC address, the firmware
1799 * will not function properly.
1801 * The MAC address is first known when the corresponding interface
1802 * is added. That is where we will initialize the hardware.
1808 static void wlcore_op_stop_locked(struct wl1271 *wl)
1812 if (wl->state == WLCORE_STATE_OFF) {
1813 if (test_and_clear_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS,
1815 wlcore_enable_interrupts(wl);
1821 * this must be before the cancel_work calls below, so that the work
1822 * functions don't perform further work.
1824 wl->state = WLCORE_STATE_OFF;
1827 * Use the nosync variant to disable interrupts, so the mutex could be
1828 * held while doing so without deadlocking.
1830 wlcore_disable_interrupts_nosync(wl);
1832 mutex_unlock(&wl->mutex);
1834 wlcore_synchronize_interrupts(wl);
1835 if (!test_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags))
1836 cancel_work_sync(&wl->recovery_work);
1837 wl1271_flush_deferred_work(wl);
1838 cancel_delayed_work_sync(&wl->scan_complete_work);
1839 cancel_work_sync(&wl->netstack_work);
1840 cancel_work_sync(&wl->tx_work);
1841 cancel_delayed_work_sync(&wl->elp_work);
1842 cancel_delayed_work_sync(&wl->tx_watchdog_work);
1844 /* let's notify MAC80211 about the remaining pending TX frames */
1845 wl12xx_tx_reset(wl);
1846 mutex_lock(&wl->mutex);
1848 wl1271_power_off(wl);
1850 * In case a recovery was scheduled, interrupts were disabled to avoid
1851 * an interrupt storm. Now that the power is down, it is safe to
1852 * re-enable interrupts to balance the disable depth
1854 if (test_and_clear_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags))
1855 wlcore_enable_interrupts(wl);
1857 wl->band = IEEE80211_BAND_2GHZ;
1860 wl->power_level = WL1271_DEFAULT_POWER_LEVEL;
1861 wl->channel_type = NL80211_CHAN_NO_HT;
1862 wl->tx_blocks_available = 0;
1863 wl->tx_allocated_blocks = 0;
1864 wl->tx_results_count = 0;
1865 wl->tx_packets_count = 0;
1866 wl->time_offset = 0;
1867 wl->ap_fw_ps_map = 0;
1869 wl->sched_scanning = false;
1870 wl->sleep_auth = WL1271_PSM_ILLEGAL;
1871 memset(wl->roles_map, 0, sizeof(wl->roles_map));
1872 memset(wl->links_map, 0, sizeof(wl->links_map));
1873 memset(wl->roc_map, 0, sizeof(wl->roc_map));
1874 wl->active_sta_count = 0;
1876 /* The system link is always allocated */
1877 __set_bit(WL12XX_SYSTEM_HLID, wl->links_map);
1880 * this is performed after the cancel_work calls and the associated
1881 * mutex_lock, so that wl1271_op_add_interface does not accidentally
1882 * get executed before all these vars have been reset.
1886 wl->tx_blocks_freed = 0;
1888 for (i = 0; i < NUM_TX_QUEUES; i++) {
1889 wl->tx_pkts_freed[i] = 0;
1890 wl->tx_allocated_pkts[i] = 0;
1893 wl1271_debugfs_reset(wl);
1895 kfree(wl->fw_status_1);
1896 wl->fw_status_1 = NULL;
1897 wl->fw_status_2 = NULL;
1898 kfree(wl->tx_res_if);
1899 wl->tx_res_if = NULL;
1900 kfree(wl->target_mem_map);
1901 wl->target_mem_map = NULL;
1904 static void wlcore_op_stop(struct ieee80211_hw *hw)
1906 struct wl1271 *wl = hw->priv;
1908 wl1271_debug(DEBUG_MAC80211, "mac80211 stop");
1910 mutex_lock(&wl->mutex);
1912 wlcore_op_stop_locked(wl);
1914 mutex_unlock(&wl->mutex);
1917 static void wlcore_channel_switch_work(struct work_struct *work)
1919 struct delayed_work *dwork;
1921 struct ieee80211_vif *vif;
1922 struct wl12xx_vif *wlvif;
1925 dwork = container_of(work, struct delayed_work, work);
1926 wlvif = container_of(dwork, struct wl12xx_vif, channel_switch_work);
1929 wl1271_info("channel switch failed (role_id: %d).", wlvif->role_id);
1931 mutex_lock(&wl->mutex);
1933 if (unlikely(wl->state != WLCORE_STATE_ON))
1936 /* check the channel switch is still ongoing */
1937 if (!test_and_clear_bit(WLVIF_FLAG_CS_PROGRESS, &wlvif->flags))
1940 vif = wl12xx_wlvif_to_vif(wlvif);
1941 ieee80211_chswitch_done(vif, false);
1943 ret = wl1271_ps_elp_wakeup(wl);
1947 wl12xx_cmd_stop_channel_switch(wl, wlvif);
1949 wl1271_ps_elp_sleep(wl);
1951 mutex_unlock(&wl->mutex);
1954 static void wlcore_connection_loss_work(struct work_struct *work)
1956 struct delayed_work *dwork;
1958 struct ieee80211_vif *vif;
1959 struct wl12xx_vif *wlvif;
1961 dwork = container_of(work, struct delayed_work, work);
1962 wlvif = container_of(dwork, struct wl12xx_vif, connection_loss_work);
1965 wl1271_info("Connection loss work (role_id: %d).", wlvif->role_id);
1967 mutex_lock(&wl->mutex);
1969 if (unlikely(wl->state != WLCORE_STATE_ON))
1972 /* Call mac80211 connection loss */
1973 if (!test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags))
1976 vif = wl12xx_wlvif_to_vif(wlvif);
1977 ieee80211_connection_loss(vif);
1979 mutex_unlock(&wl->mutex);
1982 static int wl12xx_allocate_rate_policy(struct wl1271 *wl, u8 *idx)
1984 u8 policy = find_first_zero_bit(wl->rate_policies_map,
1985 WL12XX_MAX_RATE_POLICIES);
1986 if (policy >= WL12XX_MAX_RATE_POLICIES)
1989 __set_bit(policy, wl->rate_policies_map);
1994 static void wl12xx_free_rate_policy(struct wl1271 *wl, u8 *idx)
1996 if (WARN_ON(*idx >= WL12XX_MAX_RATE_POLICIES))
1999 __clear_bit(*idx, wl->rate_policies_map);
2000 *idx = WL12XX_MAX_RATE_POLICIES;
2003 static int wlcore_allocate_klv_template(struct wl1271 *wl, u8 *idx)
2005 u8 policy = find_first_zero_bit(wl->klv_templates_map,
2006 WLCORE_MAX_KLV_TEMPLATES);
2007 if (policy >= WLCORE_MAX_KLV_TEMPLATES)
2010 __set_bit(policy, wl->klv_templates_map);
2015 static void wlcore_free_klv_template(struct wl1271 *wl, u8 *idx)
2017 if (WARN_ON(*idx >= WLCORE_MAX_KLV_TEMPLATES))
2020 __clear_bit(*idx, wl->klv_templates_map);
2021 *idx = WLCORE_MAX_KLV_TEMPLATES;
2024 static u8 wl12xx_get_role_type(struct wl1271 *wl, struct wl12xx_vif *wlvif)
2026 switch (wlvif->bss_type) {
2027 case BSS_TYPE_AP_BSS:
2029 return WL1271_ROLE_P2P_GO;
2031 return WL1271_ROLE_AP;
2033 case BSS_TYPE_STA_BSS:
2035 return WL1271_ROLE_P2P_CL;
2037 return WL1271_ROLE_STA;
2040 return WL1271_ROLE_IBSS;
2043 wl1271_error("invalid bss_type: %d", wlvif->bss_type);
2045 return WL12XX_INVALID_ROLE_TYPE;
2048 static int wl12xx_init_vif_data(struct wl1271 *wl, struct ieee80211_vif *vif)
2050 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
2053 /* clear everything but the persistent data */
2054 memset(wlvif, 0, offsetof(struct wl12xx_vif, persistent));
2056 switch (ieee80211_vif_type_p2p(vif)) {
2057 case NL80211_IFTYPE_P2P_CLIENT:
2060 case NL80211_IFTYPE_STATION:
2061 wlvif->bss_type = BSS_TYPE_STA_BSS;
2063 case NL80211_IFTYPE_ADHOC:
2064 wlvif->bss_type = BSS_TYPE_IBSS;
2066 case NL80211_IFTYPE_P2P_GO:
2069 case NL80211_IFTYPE_AP:
2070 wlvif->bss_type = BSS_TYPE_AP_BSS;
2073 wlvif->bss_type = MAX_BSS_TYPE;
2077 wlvif->role_id = WL12XX_INVALID_ROLE_ID;
2078 wlvif->dev_role_id = WL12XX_INVALID_ROLE_ID;
2079 wlvif->dev_hlid = WL12XX_INVALID_LINK_ID;
2081 if (wlvif->bss_type == BSS_TYPE_STA_BSS ||
2082 wlvif->bss_type == BSS_TYPE_IBSS) {
2083 /* init sta/ibss data */
2084 wlvif->sta.hlid = WL12XX_INVALID_LINK_ID;
2085 wl12xx_allocate_rate_policy(wl, &wlvif->sta.basic_rate_idx);
2086 wl12xx_allocate_rate_policy(wl, &wlvif->sta.ap_rate_idx);
2087 wl12xx_allocate_rate_policy(wl, &wlvif->sta.p2p_rate_idx);
2088 wlcore_allocate_klv_template(wl, &wlvif->sta.klv_template_id);
2089 wlvif->basic_rate_set = CONF_TX_RATE_MASK_BASIC;
2090 wlvif->basic_rate = CONF_TX_RATE_MASK_BASIC;
2091 wlvif->rate_set = CONF_TX_RATE_MASK_BASIC;
2094 wlvif->ap.bcast_hlid = WL12XX_INVALID_LINK_ID;
2095 wlvif->ap.global_hlid = WL12XX_INVALID_LINK_ID;
2096 wl12xx_allocate_rate_policy(wl, &wlvif->ap.mgmt_rate_idx);
2097 wl12xx_allocate_rate_policy(wl, &wlvif->ap.bcast_rate_idx);
2098 for (i = 0; i < CONF_TX_MAX_AC_COUNT; i++)
2099 wl12xx_allocate_rate_policy(wl,
2100 &wlvif->ap.ucast_rate_idx[i]);
2101 wlvif->basic_rate_set = CONF_TX_ENABLED_RATES;
2103 * TODO: check if basic_rate shouldn't be
2104 * wl1271_tx_min_rate_get(wl, wlvif->basic_rate_set);
2105 * instead (the same thing for STA above).
2107 wlvif->basic_rate = CONF_TX_ENABLED_RATES;
2108 /* TODO: this seems to be used only for STA, check it */
2109 wlvif->rate_set = CONF_TX_ENABLED_RATES;
2112 wlvif->bitrate_masks[IEEE80211_BAND_2GHZ] = wl->conf.tx.basic_rate;
2113 wlvif->bitrate_masks[IEEE80211_BAND_5GHZ] = wl->conf.tx.basic_rate_5;
2114 wlvif->beacon_int = WL1271_DEFAULT_BEACON_INT;
2117 * mac80211 configures some values globally, while we treat them
2118 * per-interface. thus, on init, we have to copy them from wl
2120 wlvif->band = wl->band;
2121 wlvif->channel = wl->channel;
2122 wlvif->power_level = wl->power_level;
2123 wlvif->channel_type = wl->channel_type;
2125 INIT_WORK(&wlvif->rx_streaming_enable_work,
2126 wl1271_rx_streaming_enable_work);
2127 INIT_WORK(&wlvif->rx_streaming_disable_work,
2128 wl1271_rx_streaming_disable_work);
2129 INIT_DELAYED_WORK(&wlvif->channel_switch_work,
2130 wlcore_channel_switch_work);
2131 INIT_DELAYED_WORK(&wlvif->connection_loss_work,
2132 wlcore_connection_loss_work);
2133 INIT_LIST_HEAD(&wlvif->list);
2135 setup_timer(&wlvif->rx_streaming_timer, wl1271_rx_streaming_timer,
2136 (unsigned long) wlvif);
2140 static bool wl12xx_init_fw(struct wl1271 *wl)
2142 int retries = WL1271_BOOT_RETRIES;
2143 bool booted = false;
2144 struct wiphy *wiphy = wl->hw->wiphy;
2149 ret = wl12xx_chip_wakeup(wl, false);
2153 ret = wl->ops->boot(wl);
2157 ret = wl1271_hw_init(wl);
2165 mutex_unlock(&wl->mutex);
2166 /* Unlocking the mutex in the middle of handling is
2167 inherently unsafe. In this case we deem it safe to do,
2168 because we need to let any possibly pending IRQ out of
2169 the system (and while we are WLCORE_STATE_OFF the IRQ
2170 work function will not do anything.) Also, any other
2171 possible concurrent operations will fail due to the
2172 current state, hence the wl1271 struct should be safe. */
2173 wlcore_disable_interrupts(wl);
2174 wl1271_flush_deferred_work(wl);
2175 cancel_work_sync(&wl->netstack_work);
2176 mutex_lock(&wl->mutex);
2178 wl1271_power_off(wl);
2182 wl1271_error("firmware boot failed despite %d retries",
2183 WL1271_BOOT_RETRIES);
2187 wl1271_info("firmware booted (%s)", wl->chip.fw_ver_str);
2189 /* update hw/fw version info in wiphy struct */
2190 wiphy->hw_version = wl->chip.id;
2191 strncpy(wiphy->fw_version, wl->chip.fw_ver_str,
2192 sizeof(wiphy->fw_version));
2195 * Now we know if 11a is supported (info from the NVS), so disable
2196 * 11a channels if not supported
2198 if (!wl->enable_11a)
2199 wiphy->bands[IEEE80211_BAND_5GHZ]->n_channels = 0;
2201 wl1271_debug(DEBUG_MAC80211, "11a is %ssupported",
2202 wl->enable_11a ? "" : "not ");
2204 wl->state = WLCORE_STATE_ON;
2209 static bool wl12xx_dev_role_started(struct wl12xx_vif *wlvif)
2211 return wlvif->dev_hlid != WL12XX_INVALID_LINK_ID;
2215 * Check whether a fw switch (i.e. moving from one loaded
2216 * fw to another) is needed. This function is also responsible
2217 * for updating wl->last_vif_count, so it must be called before
2218 * loading a non-plt fw (so the correct fw (single-role/multi-role)
2221 static bool wl12xx_need_fw_change(struct wl1271 *wl,
2222 struct vif_counter_data vif_counter_data,
2225 enum wl12xx_fw_type current_fw = wl->fw_type;
2226 u8 vif_count = vif_counter_data.counter;
2228 if (test_bit(WL1271_FLAG_VIF_CHANGE_IN_PROGRESS, &wl->flags))
2231 /* increase the vif count if this is a new vif */
2232 if (add && !vif_counter_data.cur_vif_running)
2235 wl->last_vif_count = vif_count;
2237 /* no need for fw change if the device is OFF */
2238 if (wl->state == WLCORE_STATE_OFF)
2241 /* no need for fw change if a single fw is used */
2242 if (!wl->mr_fw_name)
2245 if (vif_count > 1 && current_fw == WL12XX_FW_TYPE_NORMAL)
2247 if (vif_count <= 1 && current_fw == WL12XX_FW_TYPE_MULTI)
2254 * Enter "forced psm". Make sure the sta is in psm against the ap,
2255 * to make the fw switch a bit more disconnection-persistent.
2257 static void wl12xx_force_active_psm(struct wl1271 *wl)
2259 struct wl12xx_vif *wlvif;
2261 wl12xx_for_each_wlvif_sta(wl, wlvif) {
2262 wl1271_ps_set_mode(wl, wlvif, STATION_POWER_SAVE_MODE);
2266 static int wl1271_op_add_interface(struct ieee80211_hw *hw,
2267 struct ieee80211_vif *vif)
2269 struct wl1271 *wl = hw->priv;
2270 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
2271 struct vif_counter_data vif_count;
2274 bool booted = false;
2276 vif->driver_flags |= IEEE80211_VIF_BEACON_FILTER |
2277 IEEE80211_VIF_SUPPORTS_CQM_RSSI;
2279 wl1271_debug(DEBUG_MAC80211, "mac80211 add interface type %d mac %pM",
2280 ieee80211_vif_type_p2p(vif), vif->addr);
2282 wl12xx_get_vif_count(hw, vif, &vif_count);
2284 mutex_lock(&wl->mutex);
2285 ret = wl1271_ps_elp_wakeup(wl);
2290 * in some very corner case HW recovery scenarios its possible to
2291 * get here before __wl1271_op_remove_interface is complete, so
2292 * opt out if that is the case.
2294 if (test_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags) ||
2295 test_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags)) {
2301 ret = wl12xx_init_vif_data(wl, vif);
2306 role_type = wl12xx_get_role_type(wl, wlvif);
2307 if (role_type == WL12XX_INVALID_ROLE_TYPE) {
2312 if (wl12xx_need_fw_change(wl, vif_count, true)) {
2313 wl12xx_force_active_psm(wl);
2314 set_bit(WL1271_FLAG_INTENDED_FW_RECOVERY, &wl->flags);
2315 mutex_unlock(&wl->mutex);
2316 wl1271_recovery_work(&wl->recovery_work);
2321 * TODO: after the nvs issue will be solved, move this block
2322 * to start(), and make sure here the driver is ON.
2324 if (wl->state == WLCORE_STATE_OFF) {
2326 * we still need this in order to configure the fw
2327 * while uploading the nvs
2329 memcpy(wl->addresses[0].addr, vif->addr, ETH_ALEN);
2331 booted = wl12xx_init_fw(wl);
2338 ret = wl12xx_cmd_role_enable(wl, vif->addr,
2339 role_type, &wlvif->role_id);
2343 ret = wl1271_init_vif_specific(wl, vif);
2347 list_add(&wlvif->list, &wl->wlvif_list);
2348 set_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags);
2350 if (wlvif->bss_type == BSS_TYPE_AP_BSS)
2355 wl1271_ps_elp_sleep(wl);
2357 mutex_unlock(&wl->mutex);
2362 static void __wl1271_op_remove_interface(struct wl1271 *wl,
2363 struct ieee80211_vif *vif,
2364 bool reset_tx_queues)
2366 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
2368 bool is_ap = (wlvif->bss_type == BSS_TYPE_AP_BSS);
2370 wl1271_debug(DEBUG_MAC80211, "mac80211 remove interface");
2372 if (!test_and_clear_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags))
2375 /* because of hardware recovery, we may get here twice */
2376 if (wl->state == WLCORE_STATE_OFF)
2379 wl1271_info("down");
2381 if (wl->scan.state != WL1271_SCAN_STATE_IDLE &&
2382 wl->scan_wlvif == wlvif) {
2384 * Rearm the tx watchdog just before idling scan. This
2385 * prevents just-finished scans from triggering the watchdog
2387 wl12xx_rearm_tx_watchdog_locked(wl);
2389 wl->scan.state = WL1271_SCAN_STATE_IDLE;
2390 memset(wl->scan.scanned_ch, 0, sizeof(wl->scan.scanned_ch));
2391 wl->scan_wlvif = NULL;
2392 wl->scan.req = NULL;
2393 ieee80211_scan_completed(wl->hw, true);
2396 if (!test_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags)) {
2397 /* disable active roles */
2398 ret = wl1271_ps_elp_wakeup(wl);
2402 if (wlvif->bss_type == BSS_TYPE_STA_BSS ||
2403 wlvif->bss_type == BSS_TYPE_IBSS) {
2404 if (wl12xx_dev_role_started(wlvif))
2405 wl12xx_stop_dev(wl, wlvif);
2408 ret = wl12xx_cmd_role_disable(wl, &wlvif->role_id);
2412 wl1271_ps_elp_sleep(wl);
2415 /* clear all hlids (except system_hlid) */
2416 wlvif->dev_hlid = WL12XX_INVALID_LINK_ID;
2418 if (wlvif->bss_type == BSS_TYPE_STA_BSS ||
2419 wlvif->bss_type == BSS_TYPE_IBSS) {
2420 wlvif->sta.hlid = WL12XX_INVALID_LINK_ID;
2421 wl12xx_free_rate_policy(wl, &wlvif->sta.basic_rate_idx);
2422 wl12xx_free_rate_policy(wl, &wlvif->sta.ap_rate_idx);
2423 wl12xx_free_rate_policy(wl, &wlvif->sta.p2p_rate_idx);
2424 wlcore_free_klv_template(wl, &wlvif->sta.klv_template_id);
2426 wlvif->ap.bcast_hlid = WL12XX_INVALID_LINK_ID;
2427 wlvif->ap.global_hlid = WL12XX_INVALID_LINK_ID;
2428 wl12xx_free_rate_policy(wl, &wlvif->ap.mgmt_rate_idx);
2429 wl12xx_free_rate_policy(wl, &wlvif->ap.bcast_rate_idx);
2430 for (i = 0; i < CONF_TX_MAX_AC_COUNT; i++)
2431 wl12xx_free_rate_policy(wl,
2432 &wlvif->ap.ucast_rate_idx[i]);
2433 wl1271_free_ap_keys(wl, wlvif);
2436 dev_kfree_skb(wlvif->probereq);
2437 wlvif->probereq = NULL;
2438 wl12xx_tx_reset_wlvif(wl, wlvif);
2439 if (wl->last_wlvif == wlvif)
2440 wl->last_wlvif = NULL;
2441 list_del(&wlvif->list);
2442 memset(wlvif->ap.sta_hlid_map, 0, sizeof(wlvif->ap.sta_hlid_map));
2443 wlvif->role_id = WL12XX_INVALID_ROLE_ID;
2444 wlvif->dev_role_id = WL12XX_INVALID_ROLE_ID;
2452 * Last AP, have more stations. Configure sleep auth according to STA.
2453 * Don't do thin on unintended recovery.
2455 if (test_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags) &&
2456 !test_bit(WL1271_FLAG_INTENDED_FW_RECOVERY, &wl->flags))
2459 if (wl->ap_count == 0 && is_ap && wl->sta_count) {
2460 u8 sta_auth = wl->conf.conn.sta_sleep_auth;
2461 /* Configure for power according to debugfs */
2462 if (sta_auth != WL1271_PSM_ILLEGAL)
2463 wl1271_acx_sleep_auth(wl, sta_auth);
2464 /* Configure for power always on */
2465 else if (wl->quirks & WLCORE_QUIRK_NO_ELP)
2466 wl1271_acx_sleep_auth(wl, WL1271_PSM_CAM);
2467 /* Configure for ELP power saving */
2469 wl1271_acx_sleep_auth(wl, WL1271_PSM_ELP);
2473 mutex_unlock(&wl->mutex);
2475 del_timer_sync(&wlvif->rx_streaming_timer);
2476 cancel_work_sync(&wlvif->rx_streaming_enable_work);
2477 cancel_work_sync(&wlvif->rx_streaming_disable_work);
2478 cancel_delayed_work_sync(&wlvif->connection_loss_work);
2480 mutex_lock(&wl->mutex);
2483 static void wl1271_op_remove_interface(struct ieee80211_hw *hw,
2484 struct ieee80211_vif *vif)
2486 struct wl1271 *wl = hw->priv;
2487 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
2488 struct wl12xx_vif *iter;
2489 struct vif_counter_data vif_count;
2491 wl12xx_get_vif_count(hw, vif, &vif_count);
2492 mutex_lock(&wl->mutex);
2494 if (wl->state == WLCORE_STATE_OFF ||
2495 !test_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags))
2499 * wl->vif can be null here if someone shuts down the interface
2500 * just when hardware recovery has been started.
2502 wl12xx_for_each_wlvif(wl, iter) {
2506 __wl1271_op_remove_interface(wl, vif, true);
2509 WARN_ON(iter != wlvif);
2510 if (wl12xx_need_fw_change(wl, vif_count, false)) {
2511 wl12xx_force_active_psm(wl);
2512 set_bit(WL1271_FLAG_INTENDED_FW_RECOVERY, &wl->flags);
2513 wl12xx_queue_recovery_work(wl);
2516 mutex_unlock(&wl->mutex);
2519 static int wl12xx_op_change_interface(struct ieee80211_hw *hw,
2520 struct ieee80211_vif *vif,
2521 enum nl80211_iftype new_type, bool p2p)
2523 struct wl1271 *wl = hw->priv;
2526 set_bit(WL1271_FLAG_VIF_CHANGE_IN_PROGRESS, &wl->flags);
2527 wl1271_op_remove_interface(hw, vif);
2529 vif->type = new_type;
2531 ret = wl1271_op_add_interface(hw, vif);
2533 clear_bit(WL1271_FLAG_VIF_CHANGE_IN_PROGRESS, &wl->flags);
2537 static int wlcore_join(struct wl1271 *wl, struct wl12xx_vif *wlvif)
2540 bool is_ibss = (wlvif->bss_type == BSS_TYPE_IBSS);
2543 * One of the side effects of the JOIN command is that is clears
2544 * WPA/WPA2 keys from the chipset. Performing a JOIN while associated
2545 * to a WPA/WPA2 access point will therefore kill the data-path.
2546 * Currently the only valid scenario for JOIN during association
2547 * is on roaming, in which case we will also be given new keys.
2548 * Keep the below message for now, unless it starts bothering
2549 * users who really like to roam a lot :)
2551 if (test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags))
2552 wl1271_info("JOIN while associated.");
2554 /* clear encryption type */
2555 wlvif->encryption_type = KEY_NONE;
2558 ret = wl12xx_cmd_role_start_ibss(wl, wlvif);
2560 if (wl->quirks & WLCORE_QUIRK_START_STA_FAILS) {
2562 * TODO: this is an ugly workaround for wl12xx fw
2563 * bug - we are not able to tx/rx after the first
2564 * start_sta, so make dummy start+stop calls,
2565 * and then call start_sta again.
2566 * this should be fixed in the fw.
2568 wl12xx_cmd_role_start_sta(wl, wlvif);
2569 wl12xx_cmd_role_stop_sta(wl, wlvif);
2572 ret = wl12xx_cmd_role_start_sta(wl, wlvif);
2578 static int wl1271_ssid_set(struct wl12xx_vif *wlvif, struct sk_buff *skb,
2582 const u8 *ptr = cfg80211_find_ie(WLAN_EID_SSID, skb->data + offset,
2586 wl1271_error("No SSID in IEs!");
2591 if (ssid_len > IEEE80211_MAX_SSID_LEN) {
2592 wl1271_error("SSID is too long!");
2596 wlvif->ssid_len = ssid_len;
2597 memcpy(wlvif->ssid, ptr+2, ssid_len);
2601 static int wlcore_set_ssid(struct wl1271 *wl, struct wl12xx_vif *wlvif)
2603 struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif);
2604 struct sk_buff *skb;
2607 /* we currently only support setting the ssid from the ap probe req */
2608 if (wlvif->bss_type != BSS_TYPE_STA_BSS)
2611 skb = ieee80211_ap_probereq_get(wl->hw, vif);
2615 ieoffset = offsetof(struct ieee80211_mgmt,
2616 u.probe_req.variable);
2617 wl1271_ssid_set(wlvif, skb, ieoffset);
2623 static int wlcore_set_assoc(struct wl1271 *wl, struct wl12xx_vif *wlvif,
2624 struct ieee80211_bss_conf *bss_conf,
2630 wlvif->aid = bss_conf->aid;
2631 wlvif->channel_type = bss_conf->channel_type;
2632 wlvif->beacon_int = bss_conf->beacon_int;
2633 wlvif->wmm_enabled = bss_conf->qos;
2635 set_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags);
2638 * with wl1271, we don't need to update the
2639 * beacon_int and dtim_period, because the firmware
2640 * updates it by itself when the first beacon is
2641 * received after a join.
2643 ret = wl1271_cmd_build_ps_poll(wl, wlvif, wlvif->aid);
2648 * Get a template for hardware connection maintenance
2650 dev_kfree_skb(wlvif->probereq);
2651 wlvif->probereq = wl1271_cmd_build_ap_probe_req(wl,
2654 ieoffset = offsetof(struct ieee80211_mgmt,
2655 u.probe_req.variable);
2656 wl1271_ssid_set(wlvif, wlvif->probereq, ieoffset);
2658 /* enable the connection monitoring feature */
2659 ret = wl1271_acx_conn_monit_params(wl, wlvif, true);
2664 * The join command disable the keep-alive mode, shut down its process,
2665 * and also clear the template config, so we need to reset it all after
2666 * the join. The acx_aid starts the keep-alive process, and the order
2667 * of the commands below is relevant.
2669 ret = wl1271_acx_keep_alive_mode(wl, wlvif, true);
2673 ret = wl1271_acx_aid(wl, wlvif, wlvif->aid);
2677 ret = wl12xx_cmd_build_klv_null_data(wl, wlvif);
2681 ret = wl1271_acx_keep_alive_config(wl, wlvif,
2682 wlvif->sta.klv_template_id,
2683 ACX_KEEP_ALIVE_TPL_VALID);
2688 * The default fw psm configuration is AUTO, while mac80211 default
2689 * setting is off (ACTIVE), so sync the fw with the correct value.
2691 ret = wl1271_ps_set_mode(wl, wlvif, STATION_ACTIVE_MODE);
2697 wl1271_tx_enabled_rates_get(wl,
2700 ret = wl1271_acx_sta_rate_policies(wl, wlvif);
2708 static int wlcore_unset_assoc(struct wl1271 *wl, struct wl12xx_vif *wlvif)
2711 bool sta = wlvif->bss_type == BSS_TYPE_STA_BSS;
2713 /* make sure we are connected (sta) joined */
2715 !test_and_clear_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags))
2718 /* make sure we are joined (ibss) */
2720 test_and_clear_bit(WLVIF_FLAG_IBSS_JOINED, &wlvif->flags))
2724 /* use defaults when not associated */
2727 /* free probe-request template */
2728 dev_kfree_skb(wlvif->probereq);
2729 wlvif->probereq = NULL;
2731 /* disable connection monitor features */
2732 ret = wl1271_acx_conn_monit_params(wl, wlvif, false);
2736 /* Disable the keep-alive feature */
2737 ret = wl1271_acx_keep_alive_mode(wl, wlvif, false);
2742 if (test_and_clear_bit(WLVIF_FLAG_CS_PROGRESS, &wlvif->flags)) {
2743 struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif);
2745 wl12xx_cmd_stop_channel_switch(wl, wlvif);
2746 ieee80211_chswitch_done(vif, false);
2747 cancel_delayed_work(&wlvif->channel_switch_work);
2750 /* invalidate keep-alive template */
2751 wl1271_acx_keep_alive_config(wl, wlvif,
2752 wlvif->sta.klv_template_id,
2753 ACX_KEEP_ALIVE_TPL_INVALID);
2755 /* reset TX security counters on a clean disconnect */
2756 wlvif->tx_security_last_seq_lsb = 0;
2757 wlvif->tx_security_seq = 0;
2762 static void wl1271_set_band_rate(struct wl1271 *wl, struct wl12xx_vif *wlvif)
2764 wlvif->basic_rate_set = wlvif->bitrate_masks[wlvif->band];
2765 wlvif->rate_set = wlvif->basic_rate_set;
2768 static int wl12xx_config_vif(struct wl1271 *wl, struct wl12xx_vif *wlvif,
2769 struct ieee80211_conf *conf, u32 changed)
2771 bool is_ap = (wlvif->bss_type == BSS_TYPE_AP_BSS);
2774 if ((changed & IEEE80211_CONF_CHANGE_PS) && !is_ap) {
2776 if ((conf->flags & IEEE80211_CONF_PS) &&
2777 test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags) &&
2778 !test_bit(WLVIF_FLAG_IN_PS, &wlvif->flags)) {
2783 if (wl->conf.conn.forced_ps) {
2784 ps_mode = STATION_POWER_SAVE_MODE;
2785 ps_mode_str = "forced";
2787 ps_mode = STATION_AUTO_PS_MODE;
2788 ps_mode_str = "auto";
2791 wl1271_debug(DEBUG_PSM, "%s ps enabled", ps_mode_str);
2793 ret = wl1271_ps_set_mode(wl, wlvif, ps_mode);
2796 wl1271_warning("enter %s ps failed %d",
2799 } else if (!(conf->flags & IEEE80211_CONF_PS) &&
2800 test_bit(WLVIF_FLAG_IN_PS, &wlvif->flags)) {
2802 wl1271_debug(DEBUG_PSM, "auto ps disabled");
2804 ret = wl1271_ps_set_mode(wl, wlvif,
2805 STATION_ACTIVE_MODE);
2807 wl1271_warning("exit auto ps failed %d", ret);
2811 if (conf->power_level != wlvif->power_level) {
2812 ret = wl1271_acx_tx_power(wl, wlvif, conf->power_level);
2816 wlvif->power_level = conf->power_level;
2822 static int wl1271_op_config(struct ieee80211_hw *hw, u32 changed)
2824 struct wl1271 *wl = hw->priv;
2825 struct wl12xx_vif *wlvif;
2826 struct ieee80211_conf *conf = &hw->conf;
2829 wl1271_debug(DEBUG_MAC80211, "mac80211 config psm %s power %d %s"
2831 conf->flags & IEEE80211_CONF_PS ? "on" : "off",
2833 conf->flags & IEEE80211_CONF_IDLE ? "idle" : "in use",
2836 mutex_lock(&wl->mutex);
2838 if (changed & IEEE80211_CONF_CHANGE_POWER)
2839 wl->power_level = conf->power_level;
2841 if (unlikely(wl->state != WLCORE_STATE_ON))
2844 ret = wl1271_ps_elp_wakeup(wl);
2848 /* configure each interface */
2849 wl12xx_for_each_wlvif(wl, wlvif) {
2850 ret = wl12xx_config_vif(wl, wlvif, conf, changed);
2856 wl1271_ps_elp_sleep(wl);
2859 mutex_unlock(&wl->mutex);
2864 struct wl1271_filter_params {
2867 u8 mc_list[ACX_MC_ADDRESS_GROUP_MAX][ETH_ALEN];
2870 static u64 wl1271_op_prepare_multicast(struct ieee80211_hw *hw,
2871 struct netdev_hw_addr_list *mc_list)
2873 struct wl1271_filter_params *fp;
2874 struct netdev_hw_addr *ha;
2876 fp = kzalloc(sizeof(*fp), GFP_ATOMIC);
2878 wl1271_error("Out of memory setting filters.");
2882 /* update multicast filtering parameters */
2883 fp->mc_list_length = 0;
2884 if (netdev_hw_addr_list_count(mc_list) > ACX_MC_ADDRESS_GROUP_MAX) {
2885 fp->enabled = false;
2888 netdev_hw_addr_list_for_each(ha, mc_list) {
2889 memcpy(fp->mc_list[fp->mc_list_length],
2890 ha->addr, ETH_ALEN);
2891 fp->mc_list_length++;
2895 return (u64)(unsigned long)fp;
2898 #define WL1271_SUPPORTED_FILTERS (FIF_PROMISC_IN_BSS | \
2901 FIF_BCN_PRBRESP_PROMISC | \
2905 static void wl1271_op_configure_filter(struct ieee80211_hw *hw,
2906 unsigned int changed,
2907 unsigned int *total, u64 multicast)
2909 struct wl1271_filter_params *fp = (void *)(unsigned long)multicast;
2910 struct wl1271 *wl = hw->priv;
2911 struct wl12xx_vif *wlvif;
2915 wl1271_debug(DEBUG_MAC80211, "mac80211 configure filter changed %x"
2916 " total %x", changed, *total);
2918 mutex_lock(&wl->mutex);
2920 *total &= WL1271_SUPPORTED_FILTERS;
2921 changed &= WL1271_SUPPORTED_FILTERS;
2923 if (unlikely(wl->state != WLCORE_STATE_ON))
2926 ret = wl1271_ps_elp_wakeup(wl);
2930 wl12xx_for_each_wlvif(wl, wlvif) {
2931 if (wlvif->bss_type != BSS_TYPE_AP_BSS) {
2932 if (*total & FIF_ALLMULTI)
2933 ret = wl1271_acx_group_address_tbl(wl, wlvif,
2937 ret = wl1271_acx_group_address_tbl(wl, wlvif,
2940 fp->mc_list_length);
2947 * the fw doesn't provide an api to configure the filters. instead,
2948 * the filters configuration is based on the active roles / ROC
2953 wl1271_ps_elp_sleep(wl);
2956 mutex_unlock(&wl->mutex);
2960 static int wl1271_record_ap_key(struct wl1271 *wl, struct wl12xx_vif *wlvif,
2961 u8 id, u8 key_type, u8 key_size,
2962 const u8 *key, u8 hlid, u32 tx_seq_32,
2965 struct wl1271_ap_key *ap_key;
2968 wl1271_debug(DEBUG_CRYPT, "record ap key id %d", (int)id);
2970 if (key_size > MAX_KEY_SIZE)
2974 * Find next free entry in ap_keys. Also check we are not replacing
2977 for (i = 0; i < MAX_NUM_KEYS; i++) {
2978 if (wlvif->ap.recorded_keys[i] == NULL)
2981 if (wlvif->ap.recorded_keys[i]->id == id) {
2982 wl1271_warning("trying to record key replacement");
2987 if (i == MAX_NUM_KEYS)
2990 ap_key = kzalloc(sizeof(*ap_key), GFP_KERNEL);
2995 ap_key->key_type = key_type;
2996 ap_key->key_size = key_size;
2997 memcpy(ap_key->key, key, key_size);
2998 ap_key->hlid = hlid;
2999 ap_key->tx_seq_32 = tx_seq_32;
3000 ap_key->tx_seq_16 = tx_seq_16;
3002 wlvif->ap.recorded_keys[i] = ap_key;
3006 static void wl1271_free_ap_keys(struct wl1271 *wl, struct wl12xx_vif *wlvif)
3010 for (i = 0; i < MAX_NUM_KEYS; i++) {
3011 kfree(wlvif->ap.recorded_keys[i]);
3012 wlvif->ap.recorded_keys[i] = NULL;
3016 static int wl1271_ap_init_hwenc(struct wl1271 *wl, struct wl12xx_vif *wlvif)
3019 struct wl1271_ap_key *key;
3020 bool wep_key_added = false;
3022 for (i = 0; i < MAX_NUM_KEYS; i++) {
3024 if (wlvif->ap.recorded_keys[i] == NULL)
3027 key = wlvif->ap.recorded_keys[i];
3029 if (hlid == WL12XX_INVALID_LINK_ID)
3030 hlid = wlvif->ap.bcast_hlid;
3032 ret = wl1271_cmd_set_ap_key(wl, wlvif, KEY_ADD_OR_REPLACE,
3033 key->id, key->key_type,
3034 key->key_size, key->key,
3035 hlid, key->tx_seq_32,
3040 if (key->key_type == KEY_WEP)
3041 wep_key_added = true;
3044 if (wep_key_added) {
3045 ret = wl12xx_cmd_set_default_wep_key(wl, wlvif->default_key,
3046 wlvif->ap.bcast_hlid);
3052 wl1271_free_ap_keys(wl, wlvif);
3056 static int wl1271_set_key(struct wl1271 *wl, struct wl12xx_vif *wlvif,
3057 u16 action, u8 id, u8 key_type,
3058 u8 key_size, const u8 *key, u32 tx_seq_32,
3059 u16 tx_seq_16, struct ieee80211_sta *sta)
3062 bool is_ap = (wlvif->bss_type == BSS_TYPE_AP_BSS);
3065 struct wl1271_station *wl_sta;
3069 wl_sta = (struct wl1271_station *)sta->drv_priv;
3070 hlid = wl_sta->hlid;
3072 hlid = wlvif->ap.bcast_hlid;
3075 if (!test_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags)) {
3077 * We do not support removing keys after AP shutdown.
3078 * Pretend we do to make mac80211 happy.
3080 if (action != KEY_ADD_OR_REPLACE)
3083 ret = wl1271_record_ap_key(wl, wlvif, id,
3085 key, hlid, tx_seq_32,
3088 ret = wl1271_cmd_set_ap_key(wl, wlvif, action,
3089 id, key_type, key_size,
3090 key, hlid, tx_seq_32,
3098 static const u8 bcast_addr[ETH_ALEN] = {
3099 0xff, 0xff, 0xff, 0xff, 0xff, 0xff
3102 addr = sta ? sta->addr : bcast_addr;
3104 if (is_zero_ether_addr(addr)) {
3105 /* We dont support TX only encryption */
3109 /* The wl1271 does not allow to remove unicast keys - they
3110 will be cleared automatically on next CMD_JOIN. Ignore the
3111 request silently, as we dont want the mac80211 to emit
3112 an error message. */
3113 if (action == KEY_REMOVE && !is_broadcast_ether_addr(addr))
3116 /* don't remove key if hlid was already deleted */
3117 if (action == KEY_REMOVE &&
3118 wlvif->sta.hlid == WL12XX_INVALID_LINK_ID)
3121 ret = wl1271_cmd_set_sta_key(wl, wlvif, action,
3122 id, key_type, key_size,
3123 key, addr, tx_seq_32,
3128 /* the default WEP key needs to be configured at least once */
3129 if (key_type == KEY_WEP) {
3130 ret = wl12xx_cmd_set_default_wep_key(wl,
3141 static int wlcore_op_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
3142 struct ieee80211_vif *vif,
3143 struct ieee80211_sta *sta,
3144 struct ieee80211_key_conf *key_conf)
3146 struct wl1271 *wl = hw->priv;
3148 bool might_change_spare =
3149 key_conf->cipher == WL1271_CIPHER_SUITE_GEM ||
3150 key_conf->cipher == WLAN_CIPHER_SUITE_TKIP;
3152 if (might_change_spare) {
3154 * stop the queues and flush to ensure the next packets are
3155 * in sync with FW spare block accounting
3157 mutex_lock(&wl->mutex);
3158 wlcore_stop_queues(wl, WLCORE_QUEUE_STOP_REASON_SPARE_BLK);
3159 mutex_unlock(&wl->mutex);
3161 wl1271_tx_flush(wl);
3164 mutex_lock(&wl->mutex);
3166 if (unlikely(wl->state != WLCORE_STATE_ON)) {
3168 goto out_wake_queues;
3171 ret = wl1271_ps_elp_wakeup(wl);
3173 goto out_wake_queues;
3175 ret = wlcore_hw_set_key(wl, cmd, vif, sta, key_conf);
3177 wl1271_ps_elp_sleep(wl);
3180 if (might_change_spare)
3181 wlcore_wake_queues(wl, WLCORE_QUEUE_STOP_REASON_SPARE_BLK);
3183 mutex_unlock(&wl->mutex);
3188 int wlcore_set_key(struct wl1271 *wl, enum set_key_cmd cmd,
3189 struct ieee80211_vif *vif,
3190 struct ieee80211_sta *sta,
3191 struct ieee80211_key_conf *key_conf)
3193 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
3199 wl1271_debug(DEBUG_MAC80211, "mac80211 set key");
3201 wl1271_debug(DEBUG_CRYPT, "CMD: 0x%x sta: %p", cmd, sta);
3202 wl1271_debug(DEBUG_CRYPT, "Key: algo:0x%x, id:%d, len:%d flags 0x%x",
3203 key_conf->cipher, key_conf->keyidx,
3204 key_conf->keylen, key_conf->flags);
3205 wl1271_dump(DEBUG_CRYPT, "KEY: ", key_conf->key, key_conf->keylen);
3207 switch (key_conf->cipher) {
3208 case WLAN_CIPHER_SUITE_WEP40:
3209 case WLAN_CIPHER_SUITE_WEP104:
3212 key_conf->hw_key_idx = key_conf->keyidx;
3214 case WLAN_CIPHER_SUITE_TKIP:
3215 key_type = KEY_TKIP;
3217 key_conf->hw_key_idx = key_conf->keyidx;
3218 tx_seq_32 = WL1271_TX_SECURITY_HI32(wlvif->tx_security_seq);
3219 tx_seq_16 = WL1271_TX_SECURITY_LO16(wlvif->tx_security_seq);
3221 case WLAN_CIPHER_SUITE_CCMP:
3224 key_conf->flags |= IEEE80211_KEY_FLAG_PUT_IV_SPACE;
3225 tx_seq_32 = WL1271_TX_SECURITY_HI32(wlvif->tx_security_seq);
3226 tx_seq_16 = WL1271_TX_SECURITY_LO16(wlvif->tx_security_seq);
3228 case WL1271_CIPHER_SUITE_GEM:
3230 tx_seq_32 = WL1271_TX_SECURITY_HI32(wlvif->tx_security_seq);
3231 tx_seq_16 = WL1271_TX_SECURITY_LO16(wlvif->tx_security_seq);
3234 wl1271_error("Unknown key algo 0x%x", key_conf->cipher);
3241 ret = wl1271_set_key(wl, wlvif, KEY_ADD_OR_REPLACE,
3242 key_conf->keyidx, key_type,
3243 key_conf->keylen, key_conf->key,
3244 tx_seq_32, tx_seq_16, sta);
3246 wl1271_error("Could not add or replace key");
3251 * reconfiguring arp response if the unicast (or common)
3252 * encryption key type was changed
3254 if (wlvif->bss_type == BSS_TYPE_STA_BSS &&
3255 (sta || key_type == KEY_WEP) &&
3256 wlvif->encryption_type != key_type) {
3257 wlvif->encryption_type = key_type;
3258 ret = wl1271_cmd_build_arp_rsp(wl, wlvif);
3260 wl1271_warning("build arp rsp failed: %d", ret);
3267 ret = wl1271_set_key(wl, wlvif, KEY_REMOVE,
3268 key_conf->keyidx, key_type,
3269 key_conf->keylen, key_conf->key,
3272 wl1271_error("Could not remove key");
3278 wl1271_error("Unsupported key cmd 0x%x", cmd);
3284 EXPORT_SYMBOL_GPL(wlcore_set_key);
3286 static int wl1271_op_hw_scan(struct ieee80211_hw *hw,
3287 struct ieee80211_vif *vif,
3288 struct cfg80211_scan_request *req)
3290 struct wl1271 *wl = hw->priv;
3295 wl1271_debug(DEBUG_MAC80211, "mac80211 hw scan");
3298 ssid = req->ssids[0].ssid;
3299 len = req->ssids[0].ssid_len;
3302 mutex_lock(&wl->mutex);
3304 if (unlikely(wl->state != WLCORE_STATE_ON)) {
3306 * We cannot return -EBUSY here because cfg80211 will expect
3307 * a call to ieee80211_scan_completed if we do - in this case
3308 * there won't be any call.
3314 ret = wl1271_ps_elp_wakeup(wl);
3318 /* fail if there is any role in ROC */
3319 if (find_first_bit(wl->roc_map, WL12XX_MAX_ROLES) < WL12XX_MAX_ROLES) {
3320 /* don't allow scanning right now */
3325 ret = wlcore_scan(hw->priv, vif, ssid, len, req);
3327 wl1271_ps_elp_sleep(wl);
3329 mutex_unlock(&wl->mutex);
3334 static void wl1271_op_cancel_hw_scan(struct ieee80211_hw *hw,
3335 struct ieee80211_vif *vif)
3337 struct wl1271 *wl = hw->priv;
3338 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
3341 wl1271_debug(DEBUG_MAC80211, "mac80211 cancel hw scan");
3343 mutex_lock(&wl->mutex);
3345 if (unlikely(wl->state != WLCORE_STATE_ON))
3348 if (wl->scan.state == WL1271_SCAN_STATE_IDLE)
3351 ret = wl1271_ps_elp_wakeup(wl);
3355 if (wl->scan.state != WL1271_SCAN_STATE_DONE) {
3356 ret = wl->ops->scan_stop(wl, wlvif);
3362 * Rearm the tx watchdog just before idling scan. This
3363 * prevents just-finished scans from triggering the watchdog
3365 wl12xx_rearm_tx_watchdog_locked(wl);
3367 wl->scan.state = WL1271_SCAN_STATE_IDLE;
3368 memset(wl->scan.scanned_ch, 0, sizeof(wl->scan.scanned_ch));
3369 wl->scan_wlvif = NULL;
3370 wl->scan.req = NULL;
3371 ieee80211_scan_completed(wl->hw, true);
3374 wl1271_ps_elp_sleep(wl);
3376 mutex_unlock(&wl->mutex);
3378 cancel_delayed_work_sync(&wl->scan_complete_work);
3381 static int wl1271_op_sched_scan_start(struct ieee80211_hw *hw,
3382 struct ieee80211_vif *vif,
3383 struct cfg80211_sched_scan_request *req,
3384 struct ieee80211_sched_scan_ies *ies)
3386 struct wl1271 *wl = hw->priv;
3387 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
3390 wl1271_debug(DEBUG_MAC80211, "wl1271_op_sched_scan_start");
3392 mutex_lock(&wl->mutex);
3394 if (unlikely(wl->state != WLCORE_STATE_ON)) {
3399 ret = wl1271_ps_elp_wakeup(wl);
3403 ret = wl->ops->sched_scan_start(wl, wlvif, req, ies);
3407 wl->sched_scanning = true;
3410 wl1271_ps_elp_sleep(wl);
3412 mutex_unlock(&wl->mutex);
3416 static void wl1271_op_sched_scan_stop(struct ieee80211_hw *hw,
3417 struct ieee80211_vif *vif)
3419 struct wl1271 *wl = hw->priv;
3420 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
3423 wl1271_debug(DEBUG_MAC80211, "wl1271_op_sched_scan_stop");
3425 mutex_lock(&wl->mutex);
3427 if (unlikely(wl->state != WLCORE_STATE_ON))
3430 ret = wl1271_ps_elp_wakeup(wl);
3434 wl->ops->sched_scan_stop(wl, wlvif);
3436 wl1271_ps_elp_sleep(wl);
3438 mutex_unlock(&wl->mutex);
3441 static int wl1271_op_set_frag_threshold(struct ieee80211_hw *hw, u32 value)
3443 struct wl1271 *wl = hw->priv;
3446 mutex_lock(&wl->mutex);
3448 if (unlikely(wl->state != WLCORE_STATE_ON)) {
3453 ret = wl1271_ps_elp_wakeup(wl);
3457 ret = wl1271_acx_frag_threshold(wl, value);
3459 wl1271_warning("wl1271_op_set_frag_threshold failed: %d", ret);
3461 wl1271_ps_elp_sleep(wl);
3464 mutex_unlock(&wl->mutex);
3469 static int wl1271_op_set_rts_threshold(struct ieee80211_hw *hw, u32 value)
3471 struct wl1271 *wl = hw->priv;
3472 struct wl12xx_vif *wlvif;
3475 mutex_lock(&wl->mutex);
3477 if (unlikely(wl->state != WLCORE_STATE_ON)) {
3482 ret = wl1271_ps_elp_wakeup(wl);
3486 wl12xx_for_each_wlvif(wl, wlvif) {
3487 ret = wl1271_acx_rts_threshold(wl, wlvif, value);
3489 wl1271_warning("set rts threshold failed: %d", ret);
3491 wl1271_ps_elp_sleep(wl);
3494 mutex_unlock(&wl->mutex);
3499 static void wl12xx_remove_ie(struct sk_buff *skb, u8 eid, int ieoffset)
3502 const u8 *next, *end = skb->data + skb->len;
3503 u8 *ie = (u8 *)cfg80211_find_ie(eid, skb->data + ieoffset,
3504 skb->len - ieoffset);
3509 memmove(ie, next, end - next);
3510 skb_trim(skb, skb->len - len);
3513 static void wl12xx_remove_vendor_ie(struct sk_buff *skb,
3514 unsigned int oui, u8 oui_type,
3518 const u8 *next, *end = skb->data + skb->len;
3519 u8 *ie = (u8 *)cfg80211_find_vendor_ie(oui, oui_type,
3520 skb->data + ieoffset,
3521 skb->len - ieoffset);
3526 memmove(ie, next, end - next);
3527 skb_trim(skb, skb->len - len);
3530 static int wl1271_ap_set_probe_resp_tmpl(struct wl1271 *wl, u32 rates,
3531 struct ieee80211_vif *vif)
3533 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
3534 struct sk_buff *skb;
3537 skb = ieee80211_proberesp_get(wl->hw, vif);
3541 ret = wl1271_cmd_template_set(wl, wlvif->role_id,
3542 CMD_TEMPL_AP_PROBE_RESPONSE,
3551 wl1271_debug(DEBUG_AP, "probe response updated");
3552 set_bit(WLVIF_FLAG_AP_PROBE_RESP_SET, &wlvif->flags);
3558 static int wl1271_ap_set_probe_resp_tmpl_legacy(struct wl1271 *wl,
3559 struct ieee80211_vif *vif,
3561 size_t probe_rsp_len,
3564 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
3565 struct ieee80211_bss_conf *bss_conf = &vif->bss_conf;
3566 u8 probe_rsp_templ[WL1271_CMD_TEMPL_MAX_SIZE];
3567 int ssid_ie_offset, ie_offset, templ_len;
3570 /* no need to change probe response if the SSID is set correctly */
3571 if (wlvif->ssid_len > 0)
3572 return wl1271_cmd_template_set(wl, wlvif->role_id,
3573 CMD_TEMPL_AP_PROBE_RESPONSE,
3578 if (probe_rsp_len + bss_conf->ssid_len > WL1271_CMD_TEMPL_MAX_SIZE) {
3579 wl1271_error("probe_rsp template too big");
3583 /* start searching from IE offset */
3584 ie_offset = offsetof(struct ieee80211_mgmt, u.probe_resp.variable);
3586 ptr = cfg80211_find_ie(WLAN_EID_SSID, probe_rsp_data + ie_offset,
3587 probe_rsp_len - ie_offset);
3589 wl1271_error("No SSID in beacon!");
3593 ssid_ie_offset = ptr - probe_rsp_data;
3594 ptr += (ptr[1] + 2);
3596 memcpy(probe_rsp_templ, probe_rsp_data, ssid_ie_offset);
3598 /* insert SSID from bss_conf */
3599 probe_rsp_templ[ssid_ie_offset] = WLAN_EID_SSID;
3600 probe_rsp_templ[ssid_ie_offset + 1] = bss_conf->ssid_len;
3601 memcpy(probe_rsp_templ + ssid_ie_offset + 2,
3602 bss_conf->ssid, bss_conf->ssid_len);
3603 templ_len = ssid_ie_offset + 2 + bss_conf->ssid_len;
3605 memcpy(probe_rsp_templ + ssid_ie_offset + 2 + bss_conf->ssid_len,
3606 ptr, probe_rsp_len - (ptr - probe_rsp_data));
3607 templ_len += probe_rsp_len - (ptr - probe_rsp_data);
3609 return wl1271_cmd_template_set(wl, wlvif->role_id,
3610 CMD_TEMPL_AP_PROBE_RESPONSE,
3616 static int wl1271_bss_erp_info_changed(struct wl1271 *wl,
3617 struct ieee80211_vif *vif,
3618 struct ieee80211_bss_conf *bss_conf,
3621 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
3624 if (changed & BSS_CHANGED_ERP_SLOT) {
3625 if (bss_conf->use_short_slot)
3626 ret = wl1271_acx_slot(wl, wlvif, SLOT_TIME_SHORT);
3628 ret = wl1271_acx_slot(wl, wlvif, SLOT_TIME_LONG);
3630 wl1271_warning("Set slot time failed %d", ret);
3635 if (changed & BSS_CHANGED_ERP_PREAMBLE) {
3636 if (bss_conf->use_short_preamble)
3637 wl1271_acx_set_preamble(wl, wlvif, ACX_PREAMBLE_SHORT);
3639 wl1271_acx_set_preamble(wl, wlvif, ACX_PREAMBLE_LONG);
3642 if (changed & BSS_CHANGED_ERP_CTS_PROT) {
3643 if (bss_conf->use_cts_prot)
3644 ret = wl1271_acx_cts_protect(wl, wlvif,
3647 ret = wl1271_acx_cts_protect(wl, wlvif,
3648 CTSPROTECT_DISABLE);
3650 wl1271_warning("Set ctsprotect failed %d", ret);
3659 static int wlcore_set_beacon_template(struct wl1271 *wl,
3660 struct ieee80211_vif *vif,
3663 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
3664 struct ieee80211_hdr *hdr;
3667 int ieoffset = offsetof(struct ieee80211_mgmt,
3669 struct sk_buff *beacon = ieee80211_beacon_get(wl->hw, vif);
3677 wl1271_debug(DEBUG_MASTER, "beacon updated");
3679 ret = wl1271_ssid_set(wlvif, beacon, ieoffset);
3681 dev_kfree_skb(beacon);
3684 min_rate = wl1271_tx_min_rate_get(wl, wlvif->basic_rate_set);
3685 tmpl_id = is_ap ? CMD_TEMPL_AP_BEACON :
3687 ret = wl1271_cmd_template_set(wl, wlvif->role_id, tmpl_id,
3692 dev_kfree_skb(beacon);
3696 wlvif->wmm_enabled =
3697 cfg80211_find_vendor_ie(WLAN_OUI_MICROSOFT,
3698 WLAN_OUI_TYPE_MICROSOFT_WMM,
3699 beacon->data + ieoffset,
3700 beacon->len - ieoffset);
3703 * In case we already have a probe-resp beacon set explicitly
3704 * by usermode, don't use the beacon data.
3706 if (test_bit(WLVIF_FLAG_AP_PROBE_RESP_SET, &wlvif->flags))
3709 /* remove TIM ie from probe response */
3710 wl12xx_remove_ie(beacon, WLAN_EID_TIM, ieoffset);
3713 * remove p2p ie from probe response.
3714 * the fw reponds to probe requests that don't include
3715 * the p2p ie. probe requests with p2p ie will be passed,
3716 * and will be responded by the supplicant (the spec
3717 * forbids including the p2p ie when responding to probe
3718 * requests that didn't include it).
3720 wl12xx_remove_vendor_ie(beacon, WLAN_OUI_WFA,
3721 WLAN_OUI_TYPE_WFA_P2P, ieoffset);
3723 hdr = (struct ieee80211_hdr *) beacon->data;
3724 hdr->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT |
3725 IEEE80211_STYPE_PROBE_RESP);
3727 ret = wl1271_ap_set_probe_resp_tmpl_legacy(wl, vif,
3732 ret = wl1271_cmd_template_set(wl, wlvif->role_id,
3733 CMD_TEMPL_PROBE_RESPONSE,
3738 dev_kfree_skb(beacon);
3746 static int wl1271_bss_beacon_info_changed(struct wl1271 *wl,
3747 struct ieee80211_vif *vif,
3748 struct ieee80211_bss_conf *bss_conf,
3751 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
3752 bool is_ap = (wlvif->bss_type == BSS_TYPE_AP_BSS);
3755 if (changed & BSS_CHANGED_BEACON_INT) {
3756 wl1271_debug(DEBUG_MASTER, "beacon interval updated: %d",
3757 bss_conf->beacon_int);
3759 wlvif->beacon_int = bss_conf->beacon_int;
3762 if ((changed & BSS_CHANGED_AP_PROBE_RESP) && is_ap) {
3763 u32 rate = wl1271_tx_min_rate_get(wl, wlvif->basic_rate_set);
3765 wl1271_ap_set_probe_resp_tmpl(wl, rate, vif);
3768 if (changed & BSS_CHANGED_BEACON) {
3769 ret = wlcore_set_beacon_template(wl, vif, is_ap);
3776 wl1271_error("beacon info change failed: %d", ret);
3780 /* AP mode changes */
3781 static void wl1271_bss_info_changed_ap(struct wl1271 *wl,
3782 struct ieee80211_vif *vif,
3783 struct ieee80211_bss_conf *bss_conf,
3786 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
3789 if (changed & BSS_CHANGED_BASIC_RATES) {
3790 u32 rates = bss_conf->basic_rates;
3792 wlvif->basic_rate_set = wl1271_tx_enabled_rates_get(wl, rates,
3794 wlvif->basic_rate = wl1271_tx_min_rate_get(wl,
3795 wlvif->basic_rate_set);
3797 ret = wl1271_init_ap_rates(wl, wlvif);
3799 wl1271_error("AP rate policy change failed %d", ret);
3803 ret = wl1271_ap_init_templates(wl, vif);
3807 ret = wl1271_ap_set_probe_resp_tmpl(wl, wlvif->basic_rate, vif);
3811 ret = wlcore_set_beacon_template(wl, vif, true);
3816 ret = wl1271_bss_beacon_info_changed(wl, vif, bss_conf, changed);
3820 if (changed & BSS_CHANGED_BEACON_ENABLED) {
3821 if (bss_conf->enable_beacon) {
3822 if (!test_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags)) {
3823 ret = wl12xx_cmd_role_start_ap(wl, wlvif);
3827 ret = wl1271_ap_init_hwenc(wl, wlvif);
3831 set_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags);
3832 wl1271_debug(DEBUG_AP, "started AP");
3835 if (test_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags)) {
3836 ret = wl12xx_cmd_role_stop_ap(wl, wlvif);
3840 clear_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags);
3841 clear_bit(WLVIF_FLAG_AP_PROBE_RESP_SET,
3843 wl1271_debug(DEBUG_AP, "stopped AP");
3848 ret = wl1271_bss_erp_info_changed(wl, vif, bss_conf, changed);
3852 /* Handle HT information change */
3853 if ((changed & BSS_CHANGED_HT) &&
3854 (bss_conf->channel_type != NL80211_CHAN_NO_HT)) {
3855 ret = wl1271_acx_set_ht_information(wl, wlvif,
3856 bss_conf->ht_operation_mode);
3858 wl1271_warning("Set ht information failed %d", ret);
3867 static int wlcore_set_bssid(struct wl1271 *wl, struct wl12xx_vif *wlvif,
3868 struct ieee80211_bss_conf *bss_conf,
3874 wl1271_debug(DEBUG_MAC80211,
3875 "changed_bssid: %pM, aid: %d, bcn_int: %d, brates: 0x%x sta_rate_set: 0x%x",
3876 bss_conf->bssid, bss_conf->aid,
3877 bss_conf->beacon_int,
3878 bss_conf->basic_rates, sta_rate_set);
3880 wlvif->beacon_int = bss_conf->beacon_int;
3881 rates = bss_conf->basic_rates;
3882 wlvif->basic_rate_set =
3883 wl1271_tx_enabled_rates_get(wl, rates,
3886 wl1271_tx_min_rate_get(wl,
3887 wlvif->basic_rate_set);
3891 wl1271_tx_enabled_rates_get(wl,
3895 /* we only support sched_scan while not connected */
3896 if (wl->sched_scanning)
3897 wl->ops->sched_scan_stop(wl, wlvif);
3899 ret = wl1271_acx_sta_rate_policies(wl, wlvif);
3903 ret = wl12xx_cmd_build_null_data(wl, wlvif);
3907 ret = wl1271_build_qos_null_data(wl, wl12xx_wlvif_to_vif(wlvif));
3911 wlcore_set_ssid(wl, wlvif);
3913 set_bit(WLVIF_FLAG_IN_USE, &wlvif->flags);
3918 static int wlcore_clear_bssid(struct wl1271 *wl, struct wl12xx_vif *wlvif)
3922 /* revert back to minimum rates for the current band */
3923 wl1271_set_band_rate(wl, wlvif);
3924 wlvif->basic_rate = wl1271_tx_min_rate_get(wl, wlvif->basic_rate_set);
3926 ret = wl1271_acx_sta_rate_policies(wl, wlvif);
3930 if (wlvif->bss_type == BSS_TYPE_STA_BSS &&
3931 test_bit(WLVIF_FLAG_IN_USE, &wlvif->flags)) {
3932 ret = wl12xx_cmd_role_stop_sta(wl, wlvif);
3937 clear_bit(WLVIF_FLAG_IN_USE, &wlvif->flags);
3940 /* STA/IBSS mode changes */
3941 static void wl1271_bss_info_changed_sta(struct wl1271 *wl,
3942 struct ieee80211_vif *vif,
3943 struct ieee80211_bss_conf *bss_conf,
3946 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
3947 bool do_join = false;
3948 bool is_ibss = (wlvif->bss_type == BSS_TYPE_IBSS);
3949 bool ibss_joined = false;
3950 u32 sta_rate_set = 0;
3952 struct ieee80211_sta *sta;
3953 bool sta_exists = false;
3954 struct ieee80211_sta_ht_cap sta_ht_cap;
3957 ret = wl1271_bss_beacon_info_changed(wl, vif, bss_conf,
3963 if (changed & BSS_CHANGED_IBSS) {
3964 if (bss_conf->ibss_joined) {
3965 set_bit(WLVIF_FLAG_IBSS_JOINED, &wlvif->flags);
3968 wlcore_unset_assoc(wl, wlvif);
3969 wl12xx_cmd_role_stop_sta(wl, wlvif);
3973 if ((changed & BSS_CHANGED_BEACON_INT) && ibss_joined)
3976 /* Need to update the SSID (for filtering etc) */
3977 if ((changed & BSS_CHANGED_BEACON) && ibss_joined)
3980 if ((changed & BSS_CHANGED_BEACON_ENABLED) && ibss_joined) {
3981 wl1271_debug(DEBUG_ADHOC, "ad-hoc beaconing: %s",
3982 bss_conf->enable_beacon ? "enabled" : "disabled");
3987 if (changed & BSS_CHANGED_CQM) {
3988 bool enable = false;
3989 if (bss_conf->cqm_rssi_thold)
3991 ret = wl1271_acx_rssi_snr_trigger(wl, wlvif, enable,
3992 bss_conf->cqm_rssi_thold,
3993 bss_conf->cqm_rssi_hyst);
3996 wlvif->rssi_thold = bss_conf->cqm_rssi_thold;
3999 if (changed & (BSS_CHANGED_BSSID | BSS_CHANGED_HT |
4000 BSS_CHANGED_ASSOC)) {
4002 sta = ieee80211_find_sta(vif, bss_conf->bssid);
4004 u8 *rx_mask = sta->ht_cap.mcs.rx_mask;
4006 /* save the supp_rates of the ap */
4007 sta_rate_set = sta->supp_rates[wlvif->band];
4008 if (sta->ht_cap.ht_supported)
4010 (rx_mask[0] << HW_HT_RATES_OFFSET) |
4011 (rx_mask[1] << HW_MIMO_RATES_OFFSET);
4012 sta_ht_cap = sta->ht_cap;
4019 if (changed & BSS_CHANGED_BSSID) {
4020 if (!is_zero_ether_addr(bss_conf->bssid)) {
4021 ret = wlcore_set_bssid(wl, wlvif, bss_conf,
4026 /* Need to update the BSSID (for filtering etc) */
4029 ret = wlcore_clear_bssid(wl, wlvif);
4035 if (changed & BSS_CHANGED_IBSS) {
4036 wl1271_debug(DEBUG_ADHOC, "ibss_joined: %d",
4037 bss_conf->ibss_joined);
4039 if (bss_conf->ibss_joined) {
4040 u32 rates = bss_conf->basic_rates;
4041 wlvif->basic_rate_set =
4042 wl1271_tx_enabled_rates_get(wl, rates,
4045 wl1271_tx_min_rate_get(wl,
4046 wlvif->basic_rate_set);
4048 /* by default, use 11b + OFDM rates */
4049 wlvif->rate_set = CONF_TX_IBSS_DEFAULT_RATES;
4050 ret = wl1271_acx_sta_rate_policies(wl, wlvif);
4056 ret = wl1271_bss_erp_info_changed(wl, vif, bss_conf, changed);
4061 ret = wlcore_join(wl, wlvif);
4063 wl1271_warning("cmd join failed %d", ret);
4068 if (changed & BSS_CHANGED_ASSOC) {
4069 if (bss_conf->assoc) {
4070 ret = wlcore_set_assoc(wl, wlvif, bss_conf,
4075 if (test_bit(WLVIF_FLAG_STA_AUTHORIZED, &wlvif->flags))
4076 wl12xx_set_authorized(wl, wlvif);
4078 wlcore_unset_assoc(wl, wlvif);
4082 /* Handle new association with HT. Do this after join. */
4084 (changed & BSS_CHANGED_HT)) {
4086 bss_conf->channel_type != NL80211_CHAN_NO_HT;
4088 ret = wl1271_acx_set_ht_capabilities(wl,
4093 wl1271_warning("Set ht cap failed %d", ret);
4099 ret = wl1271_acx_set_ht_information(wl, wlvif,
4100 bss_conf->ht_operation_mode);
4102 wl1271_warning("Set ht information failed %d",
4109 /* Handle arp filtering. Done after join. */
4110 if ((changed & BSS_CHANGED_ARP_FILTER) ||
4111 (!is_ibss && (changed & BSS_CHANGED_QOS))) {
4112 __be32 addr = bss_conf->arp_addr_list[0];
4113 wlvif->sta.qos = bss_conf->qos;
4114 WARN_ON(wlvif->bss_type != BSS_TYPE_STA_BSS);
4116 if (bss_conf->arp_addr_cnt == 1 &&
4117 bss_conf->arp_filter_enabled) {
4118 wlvif->ip_addr = addr;
4120 * The template should have been configured only upon
4121 * association. however, it seems that the correct ip
4122 * isn't being set (when sending), so we have to
4123 * reconfigure the template upon every ip change.
4125 ret = wl1271_cmd_build_arp_rsp(wl, wlvif);
4127 wl1271_warning("build arp rsp failed: %d", ret);
4131 ret = wl1271_acx_arp_ip_filter(wl, wlvif,
4132 (ACX_ARP_FILTER_ARP_FILTERING |
4133 ACX_ARP_FILTER_AUTO_ARP),
4137 ret = wl1271_acx_arp_ip_filter(wl, wlvif, 0, addr);
4148 static void wl1271_op_bss_info_changed(struct ieee80211_hw *hw,
4149 struct ieee80211_vif *vif,
4150 struct ieee80211_bss_conf *bss_conf,
4153 struct wl1271 *wl = hw->priv;
4154 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4155 bool is_ap = (wlvif->bss_type == BSS_TYPE_AP_BSS);
4158 wl1271_debug(DEBUG_MAC80211, "mac80211 bss info role %d changed 0x%x",
4159 wlvif->role_id, (int)changed);
4162 * make sure to cancel pending disconnections if our association
4165 if (!is_ap && (changed & BSS_CHANGED_ASSOC))
4166 cancel_delayed_work_sync(&wlvif->connection_loss_work);
4168 if (is_ap && (changed & BSS_CHANGED_BEACON_ENABLED) &&
4169 !bss_conf->enable_beacon)
4170 wl1271_tx_flush(wl);
4172 mutex_lock(&wl->mutex);
4174 if (unlikely(wl->state != WLCORE_STATE_ON))
4177 if (unlikely(!test_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags)))
4180 ret = wl1271_ps_elp_wakeup(wl);
4185 wl1271_bss_info_changed_ap(wl, vif, bss_conf, changed);
4187 wl1271_bss_info_changed_sta(wl, vif, bss_conf, changed);
4189 wl1271_ps_elp_sleep(wl);
4192 mutex_unlock(&wl->mutex);
4195 static int wlcore_op_add_chanctx(struct ieee80211_hw *hw,
4196 struct ieee80211_chanctx_conf *ctx)
4198 wl1271_debug(DEBUG_MAC80211, "mac80211 add chanctx %d (type %d)",
4199 ieee80211_frequency_to_channel(ctx->channel->center_freq),
4204 static void wlcore_op_remove_chanctx(struct ieee80211_hw *hw,
4205 struct ieee80211_chanctx_conf *ctx)
4207 wl1271_debug(DEBUG_MAC80211, "mac80211 remove chanctx %d (type %d)",
4208 ieee80211_frequency_to_channel(ctx->channel->center_freq),
4212 static void wlcore_op_change_chanctx(struct ieee80211_hw *hw,
4213 struct ieee80211_chanctx_conf *ctx,
4216 wl1271_debug(DEBUG_MAC80211,
4217 "mac80211 change chanctx %d (type %d) changed 0x%x",
4218 ieee80211_frequency_to_channel(ctx->channel->center_freq),
4219 ctx->channel_type, changed);
4222 static int wlcore_op_assign_vif_chanctx(struct ieee80211_hw *hw,
4223 struct ieee80211_vif *vif,
4224 struct ieee80211_chanctx_conf *ctx)
4226 struct wl1271 *wl = hw->priv;
4227 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4228 int channel = ieee80211_frequency_to_channel(
4229 ctx->channel->center_freq);
4231 wl1271_debug(DEBUG_MAC80211,
4232 "mac80211 assign chanctx (role %d) %d (type %d)",
4233 wlvif->role_id, channel, ctx->channel_type);
4235 mutex_lock(&wl->mutex);
4237 wlvif->band = ctx->channel->band;
4238 wlvif->channel = channel;
4239 wlvif->channel_type = ctx->channel_type;
4241 /* update default rates according to the band */
4242 wl1271_set_band_rate(wl, wlvif);
4244 mutex_unlock(&wl->mutex);
4249 static void wlcore_op_unassign_vif_chanctx(struct ieee80211_hw *hw,
4250 struct ieee80211_vif *vif,
4251 struct ieee80211_chanctx_conf *ctx)
4253 struct wl1271 *wl = hw->priv;
4254 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4256 wl1271_debug(DEBUG_MAC80211,
4257 "mac80211 unassign chanctx (role %d) %d (type %d)",
4259 ieee80211_frequency_to_channel(ctx->channel->center_freq),
4262 wl1271_tx_flush(wl);
4265 static int wl1271_op_conf_tx(struct ieee80211_hw *hw,
4266 struct ieee80211_vif *vif, u16 queue,
4267 const struct ieee80211_tx_queue_params *params)
4269 struct wl1271 *wl = hw->priv;
4270 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4274 mutex_lock(&wl->mutex);
4276 wl1271_debug(DEBUG_MAC80211, "mac80211 conf tx %d", queue);
4279 ps_scheme = CONF_PS_SCHEME_UPSD_TRIGGER;
4281 ps_scheme = CONF_PS_SCHEME_LEGACY;
4283 if (!test_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags))
4286 ret = wl1271_ps_elp_wakeup(wl);
4291 * the txop is confed in units of 32us by the mac80211,
4294 ret = wl1271_acx_ac_cfg(wl, wlvif, wl1271_tx_get_queue(queue),
4295 params->cw_min, params->cw_max,
4296 params->aifs, params->txop << 5);
4300 ret = wl1271_acx_tid_cfg(wl, wlvif, wl1271_tx_get_queue(queue),
4301 CONF_CHANNEL_TYPE_EDCF,
4302 wl1271_tx_get_queue(queue),
4303 ps_scheme, CONF_ACK_POLICY_LEGACY,
4307 wl1271_ps_elp_sleep(wl);
4310 mutex_unlock(&wl->mutex);
4315 static u64 wl1271_op_get_tsf(struct ieee80211_hw *hw,
4316 struct ieee80211_vif *vif)
4319 struct wl1271 *wl = hw->priv;
4320 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4321 u64 mactime = ULLONG_MAX;
4324 wl1271_debug(DEBUG_MAC80211, "mac80211 get tsf");
4326 mutex_lock(&wl->mutex);
4328 if (unlikely(wl->state != WLCORE_STATE_ON))
4331 ret = wl1271_ps_elp_wakeup(wl);
4335 ret = wl12xx_acx_tsf_info(wl, wlvif, &mactime);
4340 wl1271_ps_elp_sleep(wl);
4343 mutex_unlock(&wl->mutex);
4347 static int wl1271_op_get_survey(struct ieee80211_hw *hw, int idx,
4348 struct survey_info *survey)
4350 struct ieee80211_conf *conf = &hw->conf;
4355 survey->channel = conf->channel;
4360 static int wl1271_allocate_sta(struct wl1271 *wl,
4361 struct wl12xx_vif *wlvif,
4362 struct ieee80211_sta *sta)
4364 struct wl1271_station *wl_sta;
4368 if (wl->active_sta_count >= AP_MAX_STATIONS) {
4369 wl1271_warning("could not allocate HLID - too much stations");
4373 wl_sta = (struct wl1271_station *)sta->drv_priv;
4374 ret = wl12xx_allocate_link(wl, wlvif, &wl_sta->hlid);
4376 wl1271_warning("could not allocate HLID - too many links");
4380 set_bit(wl_sta->hlid, wlvif->ap.sta_hlid_map);
4381 memcpy(wl->links[wl_sta->hlid].addr, sta->addr, ETH_ALEN);
4382 wl->active_sta_count++;
4386 void wl1271_free_sta(struct wl1271 *wl, struct wl12xx_vif *wlvif, u8 hlid)
4388 if (!test_bit(hlid, wlvif->ap.sta_hlid_map))
4391 clear_bit(hlid, wlvif->ap.sta_hlid_map);
4392 memset(wl->links[hlid].addr, 0, ETH_ALEN);
4393 wl->links[hlid].ba_bitmap = 0;
4394 __clear_bit(hlid, &wl->ap_ps_map);
4395 __clear_bit(hlid, (unsigned long *)&wl->ap_fw_ps_map);
4396 wl12xx_free_link(wl, wlvif, &hlid);
4397 wl->active_sta_count--;
4400 * rearm the tx watchdog when the last STA is freed - give the FW a
4401 * chance to return STA-buffered packets before complaining.
4403 if (wl->active_sta_count == 0)
4404 wl12xx_rearm_tx_watchdog_locked(wl);
4407 static int wl12xx_sta_add(struct wl1271 *wl,
4408 struct wl12xx_vif *wlvif,
4409 struct ieee80211_sta *sta)
4411 struct wl1271_station *wl_sta;
4415 wl1271_debug(DEBUG_MAC80211, "mac80211 add sta %d", (int)sta->aid);
4417 ret = wl1271_allocate_sta(wl, wlvif, sta);
4421 wl_sta = (struct wl1271_station *)sta->drv_priv;
4422 hlid = wl_sta->hlid;
4424 ret = wl12xx_cmd_add_peer(wl, wlvif, sta, hlid);
4426 wl1271_free_sta(wl, wlvif, hlid);
4431 static int wl12xx_sta_remove(struct wl1271 *wl,
4432 struct wl12xx_vif *wlvif,
4433 struct ieee80211_sta *sta)
4435 struct wl1271_station *wl_sta;
4438 wl1271_debug(DEBUG_MAC80211, "mac80211 remove sta %d", (int)sta->aid);
4440 wl_sta = (struct wl1271_station *)sta->drv_priv;
4442 if (WARN_ON(!test_bit(id, wlvif->ap.sta_hlid_map)))
4445 ret = wl12xx_cmd_remove_peer(wl, wl_sta->hlid);
4449 wl1271_free_sta(wl, wlvif, wl_sta->hlid);
4453 static int wl12xx_update_sta_state(struct wl1271 *wl,
4454 struct wl12xx_vif *wlvif,
4455 struct ieee80211_sta *sta,
4456 enum ieee80211_sta_state old_state,
4457 enum ieee80211_sta_state new_state)
4459 struct wl1271_station *wl_sta;
4461 bool is_ap = wlvif->bss_type == BSS_TYPE_AP_BSS;
4462 bool is_sta = wlvif->bss_type == BSS_TYPE_STA_BSS;
4465 wl_sta = (struct wl1271_station *)sta->drv_priv;
4466 hlid = wl_sta->hlid;
4468 /* Add station (AP mode) */
4470 old_state == IEEE80211_STA_NOTEXIST &&
4471 new_state == IEEE80211_STA_NONE) {
4472 ret = wl12xx_sta_add(wl, wlvif, sta);
4477 /* Remove station (AP mode) */
4479 old_state == IEEE80211_STA_NONE &&
4480 new_state == IEEE80211_STA_NOTEXIST) {
4482 wl12xx_sta_remove(wl, wlvif, sta);
4485 /* Authorize station (AP mode) */
4487 new_state == IEEE80211_STA_AUTHORIZED) {
4488 ret = wl12xx_cmd_set_peer_state(wl, wlvif, hlid);
4492 ret = wl1271_acx_set_ht_capabilities(wl, &sta->ht_cap, true,
4498 /* Authorize station */
4500 new_state == IEEE80211_STA_AUTHORIZED) {
4501 set_bit(WLVIF_FLAG_STA_AUTHORIZED, &wlvif->flags);
4502 ret = wl12xx_set_authorized(wl, wlvif);
4508 old_state == IEEE80211_STA_AUTHORIZED &&
4509 new_state == IEEE80211_STA_ASSOC) {
4510 clear_bit(WLVIF_FLAG_STA_AUTHORIZED, &wlvif->flags);
4511 clear_bit(WLVIF_FLAG_STA_STATE_SENT, &wlvif->flags);
4514 /* clear ROCs on failure or authorization */
4516 (new_state == IEEE80211_STA_AUTHORIZED ||
4517 new_state == IEEE80211_STA_NOTEXIST)) {
4518 if (test_bit(wlvif->role_id, wl->roc_map))
4519 wl12xx_croc(wl, wlvif->role_id);
4523 old_state == IEEE80211_STA_NOTEXIST &&
4524 new_state == IEEE80211_STA_NONE) {
4525 if (find_first_bit(wl->roc_map,
4526 WL12XX_MAX_ROLES) >= WL12XX_MAX_ROLES) {
4527 WARN_ON(wlvif->role_id == WL12XX_INVALID_ROLE_ID);
4528 wl12xx_roc(wl, wlvif, wlvif->role_id,
4529 wlvif->band, wlvif->channel);
4535 static int wl12xx_op_sta_state(struct ieee80211_hw *hw,
4536 struct ieee80211_vif *vif,
4537 struct ieee80211_sta *sta,
4538 enum ieee80211_sta_state old_state,
4539 enum ieee80211_sta_state new_state)
4541 struct wl1271 *wl = hw->priv;
4542 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4545 wl1271_debug(DEBUG_MAC80211, "mac80211 sta %d state=%d->%d",
4546 sta->aid, old_state, new_state);
4548 mutex_lock(&wl->mutex);
4550 if (unlikely(wl->state != WLCORE_STATE_ON)) {
4555 ret = wl1271_ps_elp_wakeup(wl);
4559 ret = wl12xx_update_sta_state(wl, wlvif, sta, old_state, new_state);
4561 wl1271_ps_elp_sleep(wl);
4563 mutex_unlock(&wl->mutex);
4564 if (new_state < old_state)
4569 static int wl1271_op_ampdu_action(struct ieee80211_hw *hw,
4570 struct ieee80211_vif *vif,
4571 enum ieee80211_ampdu_mlme_action action,
4572 struct ieee80211_sta *sta, u16 tid, u16 *ssn,
4575 struct wl1271 *wl = hw->priv;
4576 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4578 u8 hlid, *ba_bitmap;
4580 wl1271_debug(DEBUG_MAC80211, "mac80211 ampdu action %d tid %d", action,
4583 /* sanity check - the fields in FW are only 8bits wide */
4584 if (WARN_ON(tid > 0xFF))
4587 mutex_lock(&wl->mutex);
4589 if (unlikely(wl->state != WLCORE_STATE_ON)) {
4594 if (wlvif->bss_type == BSS_TYPE_STA_BSS) {
4595 hlid = wlvif->sta.hlid;
4596 ba_bitmap = &wlvif->sta.ba_rx_bitmap;
4597 } else if (wlvif->bss_type == BSS_TYPE_AP_BSS) {
4598 struct wl1271_station *wl_sta;
4600 wl_sta = (struct wl1271_station *)sta->drv_priv;
4601 hlid = wl_sta->hlid;
4602 ba_bitmap = &wl->links[hlid].ba_bitmap;
4608 ret = wl1271_ps_elp_wakeup(wl);
4612 wl1271_debug(DEBUG_MAC80211, "mac80211 ampdu: Rx tid %d action %d",
4616 case IEEE80211_AMPDU_RX_START:
4617 if (!wlvif->ba_support || !wlvif->ba_allowed) {
4622 if (wl->ba_rx_session_count >= RX_BA_MAX_SESSIONS) {
4624 wl1271_error("exceeded max RX BA sessions");
4628 if (*ba_bitmap & BIT(tid)) {
4630 wl1271_error("cannot enable RX BA session on active "
4635 ret = wl12xx_acx_set_ba_receiver_session(wl, tid, *ssn, true,
4638 *ba_bitmap |= BIT(tid);
4639 wl->ba_rx_session_count++;
4643 case IEEE80211_AMPDU_RX_STOP:
4644 if (!(*ba_bitmap & BIT(tid))) {
4646 * this happens on reconfig - so only output a debug
4647 * message for now, and don't fail the function.
4649 wl1271_debug(DEBUG_MAC80211,
4650 "no active RX BA session on tid: %d",
4656 ret = wl12xx_acx_set_ba_receiver_session(wl, tid, 0, false,
4659 *ba_bitmap &= ~BIT(tid);
4660 wl->ba_rx_session_count--;
4665 * The BA initiator session management in FW independently.
4666 * Falling break here on purpose for all TX APDU commands.
4668 case IEEE80211_AMPDU_TX_START:
4669 case IEEE80211_AMPDU_TX_STOP:
4670 case IEEE80211_AMPDU_TX_OPERATIONAL:
4675 wl1271_error("Incorrect ampdu action id=%x\n", action);
4679 wl1271_ps_elp_sleep(wl);
4682 mutex_unlock(&wl->mutex);
4687 static int wl12xx_set_bitrate_mask(struct ieee80211_hw *hw,
4688 struct ieee80211_vif *vif,
4689 const struct cfg80211_bitrate_mask *mask)
4691 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4692 struct wl1271 *wl = hw->priv;
4695 wl1271_debug(DEBUG_MAC80211, "mac80211 set_bitrate_mask 0x%x 0x%x",
4696 mask->control[NL80211_BAND_2GHZ].legacy,
4697 mask->control[NL80211_BAND_5GHZ].legacy);
4699 mutex_lock(&wl->mutex);
4701 for (i = 0; i < WLCORE_NUM_BANDS; i++)
4702 wlvif->bitrate_masks[i] =
4703 wl1271_tx_enabled_rates_get(wl,
4704 mask->control[i].legacy,
4707 if (unlikely(wl->state != WLCORE_STATE_ON))
4710 if (wlvif->bss_type == BSS_TYPE_STA_BSS &&
4711 !test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags)) {
4713 ret = wl1271_ps_elp_wakeup(wl);
4717 wl1271_set_band_rate(wl, wlvif);
4719 wl1271_tx_min_rate_get(wl, wlvif->basic_rate_set);
4720 ret = wl1271_acx_sta_rate_policies(wl, wlvif);
4722 wl1271_ps_elp_sleep(wl);
4725 mutex_unlock(&wl->mutex);
4730 static void wl12xx_op_channel_switch(struct ieee80211_hw *hw,
4731 struct ieee80211_channel_switch *ch_switch)
4733 struct wl1271 *wl = hw->priv;
4734 struct wl12xx_vif *wlvif;
4737 wl1271_debug(DEBUG_MAC80211, "mac80211 channel switch");
4739 wl1271_tx_flush(wl);
4741 mutex_lock(&wl->mutex);
4743 if (unlikely(wl->state == WLCORE_STATE_OFF)) {
4744 wl12xx_for_each_wlvif_sta(wl, wlvif) {
4745 struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif);
4746 ieee80211_chswitch_done(vif, false);
4749 } else if (unlikely(wl->state != WLCORE_STATE_ON)) {
4753 ret = wl1271_ps_elp_wakeup(wl);
4757 /* TODO: change mac80211 to pass vif as param */
4758 wl12xx_for_each_wlvif_sta(wl, wlvif) {
4759 unsigned long delay_usec;
4761 ret = wl->ops->channel_switch(wl, wlvif, ch_switch);
4765 set_bit(WLVIF_FLAG_CS_PROGRESS, &wlvif->flags);
4767 /* indicate failure 5 seconds after channel switch time */
4768 delay_usec = ieee80211_tu_to_usec(wlvif->beacon_int) *
4770 ieee80211_queue_delayed_work(hw, &wlvif->channel_switch_work,
4771 usecs_to_jiffies(delay_usec) +
4772 msecs_to_jiffies(5000));
4776 wl1271_ps_elp_sleep(wl);
4779 mutex_unlock(&wl->mutex);
4782 static void wlcore_op_flush(struct ieee80211_hw *hw, bool drop)
4784 struct wl1271 *wl = hw->priv;
4786 wl1271_tx_flush(wl);
4789 static int wlcore_op_remain_on_channel(struct ieee80211_hw *hw,
4790 struct ieee80211_vif *vif,
4791 struct ieee80211_channel *chan,
4792 enum nl80211_channel_type channel_type,
4795 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4796 struct wl1271 *wl = hw->priv;
4797 int channel, ret = 0;
4799 channel = ieee80211_frequency_to_channel(chan->center_freq);
4801 wl1271_debug(DEBUG_MAC80211, "mac80211 roc %d (%d)",
4802 channel, wlvif->role_id);
4804 mutex_lock(&wl->mutex);
4806 if (unlikely(wl->state != WLCORE_STATE_ON))
4809 /* return EBUSY if we can't ROC right now */
4810 if (WARN_ON(wl->roc_vif ||
4811 find_first_bit(wl->roc_map,
4812 WL12XX_MAX_ROLES) < WL12XX_MAX_ROLES)) {
4817 ret = wl1271_ps_elp_wakeup(wl);
4821 ret = wl12xx_start_dev(wl, wlvif, chan->band, channel);
4826 ieee80211_queue_delayed_work(hw, &wl->roc_complete_work,
4827 msecs_to_jiffies(duration));
4829 wl1271_ps_elp_sleep(wl);
4831 mutex_unlock(&wl->mutex);
4835 static int __wlcore_roc_completed(struct wl1271 *wl)
4837 struct wl12xx_vif *wlvif;
4840 /* already completed */
4841 if (unlikely(!wl->roc_vif))
4844 wlvif = wl12xx_vif_to_data(wl->roc_vif);
4846 if (!test_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags))
4849 ret = wl12xx_stop_dev(wl, wlvif);
4858 static int wlcore_roc_completed(struct wl1271 *wl)
4862 wl1271_debug(DEBUG_MAC80211, "roc complete");
4864 mutex_lock(&wl->mutex);
4866 if (unlikely(wl->state != WLCORE_STATE_ON)) {
4871 ret = wl1271_ps_elp_wakeup(wl);
4875 ret = __wlcore_roc_completed(wl);
4877 wl1271_ps_elp_sleep(wl);
4879 mutex_unlock(&wl->mutex);
4884 static void wlcore_roc_complete_work(struct work_struct *work)
4886 struct delayed_work *dwork;
4890 dwork = container_of(work, struct delayed_work, work);
4891 wl = container_of(dwork, struct wl1271, roc_complete_work);
4893 ret = wlcore_roc_completed(wl);
4895 ieee80211_remain_on_channel_expired(wl->hw);
4898 static int wlcore_op_cancel_remain_on_channel(struct ieee80211_hw *hw)
4900 struct wl1271 *wl = hw->priv;
4902 wl1271_debug(DEBUG_MAC80211, "mac80211 croc");
4905 wl1271_tx_flush(wl);
4908 * we can't just flush_work here, because it might deadlock
4909 * (as we might get called from the same workqueue)
4911 cancel_delayed_work_sync(&wl->roc_complete_work);
4912 wlcore_roc_completed(wl);
4917 static bool wl1271_tx_frames_pending(struct ieee80211_hw *hw)
4919 struct wl1271 *wl = hw->priv;
4922 mutex_lock(&wl->mutex);
4924 if (unlikely(wl->state != WLCORE_STATE_ON))
4927 /* packets are considered pending if in the TX queue or the FW */
4928 ret = (wl1271_tx_total_queue_count(wl) > 0) || (wl->tx_frames_cnt > 0);
4930 mutex_unlock(&wl->mutex);
4935 /* can't be const, mac80211 writes to this */
4936 static struct ieee80211_rate wl1271_rates[] = {
4938 .hw_value = CONF_HW_BIT_RATE_1MBPS,
4939 .hw_value_short = CONF_HW_BIT_RATE_1MBPS, },
4941 .hw_value = CONF_HW_BIT_RATE_2MBPS,
4942 .hw_value_short = CONF_HW_BIT_RATE_2MBPS,
4943 .flags = IEEE80211_RATE_SHORT_PREAMBLE },
4945 .hw_value = CONF_HW_BIT_RATE_5_5MBPS,
4946 .hw_value_short = CONF_HW_BIT_RATE_5_5MBPS,
4947 .flags = IEEE80211_RATE_SHORT_PREAMBLE },
4949 .hw_value = CONF_HW_BIT_RATE_11MBPS,
4950 .hw_value_short = CONF_HW_BIT_RATE_11MBPS,
4951 .flags = IEEE80211_RATE_SHORT_PREAMBLE },
4953 .hw_value = CONF_HW_BIT_RATE_6MBPS,
4954 .hw_value_short = CONF_HW_BIT_RATE_6MBPS, },
4956 .hw_value = CONF_HW_BIT_RATE_9MBPS,
4957 .hw_value_short = CONF_HW_BIT_RATE_9MBPS, },
4959 .hw_value = CONF_HW_BIT_RATE_12MBPS,
4960 .hw_value_short = CONF_HW_BIT_RATE_12MBPS, },
4962 .hw_value = CONF_HW_BIT_RATE_18MBPS,
4963 .hw_value_short = CONF_HW_BIT_RATE_18MBPS, },
4965 .hw_value = CONF_HW_BIT_RATE_24MBPS,
4966 .hw_value_short = CONF_HW_BIT_RATE_24MBPS, },
4968 .hw_value = CONF_HW_BIT_RATE_36MBPS,
4969 .hw_value_short = CONF_HW_BIT_RATE_36MBPS, },
4971 .hw_value = CONF_HW_BIT_RATE_48MBPS,
4972 .hw_value_short = CONF_HW_BIT_RATE_48MBPS, },
4974 .hw_value = CONF_HW_BIT_RATE_54MBPS,
4975 .hw_value_short = CONF_HW_BIT_RATE_54MBPS, },
4978 /* can't be const, mac80211 writes to this */
4979 static struct ieee80211_channel wl1271_channels[] = {
4980 { .hw_value = 1, .center_freq = 2412, .max_power = 25 },
4981 { .hw_value = 2, .center_freq = 2417, .max_power = 25 },
4982 { .hw_value = 3, .center_freq = 2422, .max_power = 25 },
4983 { .hw_value = 4, .center_freq = 2427, .max_power = 25 },
4984 { .hw_value = 5, .center_freq = 2432, .max_power = 25 },
4985 { .hw_value = 6, .center_freq = 2437, .max_power = 25 },
4986 { .hw_value = 7, .center_freq = 2442, .max_power = 25 },
4987 { .hw_value = 8, .center_freq = 2447, .max_power = 25 },
4988 { .hw_value = 9, .center_freq = 2452, .max_power = 25 },
4989 { .hw_value = 10, .center_freq = 2457, .max_power = 25 },
4990 { .hw_value = 11, .center_freq = 2462, .max_power = 25 },
4991 { .hw_value = 12, .center_freq = 2467, .max_power = 25 },
4992 { .hw_value = 13, .center_freq = 2472, .max_power = 25 },
4993 { .hw_value = 14, .center_freq = 2484, .max_power = 25 },
4996 /* can't be const, mac80211 writes to this */
4997 static struct ieee80211_supported_band wl1271_band_2ghz = {
4998 .channels = wl1271_channels,
4999 .n_channels = ARRAY_SIZE(wl1271_channels),
5000 .bitrates = wl1271_rates,
5001 .n_bitrates = ARRAY_SIZE(wl1271_rates),
5004 /* 5 GHz data rates for WL1273 */
5005 static struct ieee80211_rate wl1271_rates_5ghz[] = {
5007 .hw_value = CONF_HW_BIT_RATE_6MBPS,
5008 .hw_value_short = CONF_HW_BIT_RATE_6MBPS, },
5010 .hw_value = CONF_HW_BIT_RATE_9MBPS,
5011 .hw_value_short = CONF_HW_BIT_RATE_9MBPS, },
5013 .hw_value = CONF_HW_BIT_RATE_12MBPS,
5014 .hw_value_short = CONF_HW_BIT_RATE_12MBPS, },
5016 .hw_value = CONF_HW_BIT_RATE_18MBPS,
5017 .hw_value_short = CONF_HW_BIT_RATE_18MBPS, },
5019 .hw_value = CONF_HW_BIT_RATE_24MBPS,
5020 .hw_value_short = CONF_HW_BIT_RATE_24MBPS, },
5022 .hw_value = CONF_HW_BIT_RATE_36MBPS,
5023 .hw_value_short = CONF_HW_BIT_RATE_36MBPS, },
5025 .hw_value = CONF_HW_BIT_RATE_48MBPS,
5026 .hw_value_short = CONF_HW_BIT_RATE_48MBPS, },
5028 .hw_value = CONF_HW_BIT_RATE_54MBPS,
5029 .hw_value_short = CONF_HW_BIT_RATE_54MBPS, },
5032 /* 5 GHz band channels for WL1273 */
5033 static struct ieee80211_channel wl1271_channels_5ghz[] = {
5034 { .hw_value = 7, .center_freq = 5035, .max_power = 25 },
5035 { .hw_value = 8, .center_freq = 5040, .max_power = 25 },
5036 { .hw_value = 9, .center_freq = 5045, .max_power = 25 },
5037 { .hw_value = 11, .center_freq = 5055, .max_power = 25 },
5038 { .hw_value = 12, .center_freq = 5060, .max_power = 25 },
5039 { .hw_value = 16, .center_freq = 5080, .max_power = 25 },
5040 { .hw_value = 34, .center_freq = 5170, .max_power = 25 },
5041 { .hw_value = 36, .center_freq = 5180, .max_power = 25 },
5042 { .hw_value = 38, .center_freq = 5190, .max_power = 25 },
5043 { .hw_value = 40, .center_freq = 5200, .max_power = 25 },
5044 { .hw_value = 42, .center_freq = 5210, .max_power = 25 },
5045 { .hw_value = 44, .center_freq = 5220, .max_power = 25 },
5046 { .hw_value = 46, .center_freq = 5230, .max_power = 25 },
5047 { .hw_value = 48, .center_freq = 5240, .max_power = 25 },
5048 { .hw_value = 52, .center_freq = 5260, .max_power = 25 },
5049 { .hw_value = 56, .center_freq = 5280, .max_power = 25 },
5050 { .hw_value = 60, .center_freq = 5300, .max_power = 25 },
5051 { .hw_value = 64, .center_freq = 5320, .max_power = 25 },
5052 { .hw_value = 100, .center_freq = 5500, .max_power = 25 },
5053 { .hw_value = 104, .center_freq = 5520, .max_power = 25 },
5054 { .hw_value = 108, .center_freq = 5540, .max_power = 25 },
5055 { .hw_value = 112, .center_freq = 5560, .max_power = 25 },
5056 { .hw_value = 116, .center_freq = 5580, .max_power = 25 },
5057 { .hw_value = 120, .center_freq = 5600, .max_power = 25 },
5058 { .hw_value = 124, .center_freq = 5620, .max_power = 25 },
5059 { .hw_value = 128, .center_freq = 5640, .max_power = 25 },
5060 { .hw_value = 132, .center_freq = 5660, .max_power = 25 },
5061 { .hw_value = 136, .center_freq = 5680, .max_power = 25 },
5062 { .hw_value = 140, .center_freq = 5700, .max_power = 25 },
5063 { .hw_value = 149, .center_freq = 5745, .max_power = 25 },
5064 { .hw_value = 153, .center_freq = 5765, .max_power = 25 },
5065 { .hw_value = 157, .center_freq = 5785, .max_power = 25 },
5066 { .hw_value = 161, .center_freq = 5805, .max_power = 25 },
5067 { .hw_value = 165, .center_freq = 5825, .max_power = 25 },
5070 static struct ieee80211_supported_band wl1271_band_5ghz = {
5071 .channels = wl1271_channels_5ghz,
5072 .n_channels = ARRAY_SIZE(wl1271_channels_5ghz),
5073 .bitrates = wl1271_rates_5ghz,
5074 .n_bitrates = ARRAY_SIZE(wl1271_rates_5ghz),
5077 static const struct ieee80211_ops wl1271_ops = {
5078 .start = wl1271_op_start,
5079 .stop = wlcore_op_stop,
5080 .add_interface = wl1271_op_add_interface,
5081 .remove_interface = wl1271_op_remove_interface,
5082 .change_interface = wl12xx_op_change_interface,
5084 .suspend = wl1271_op_suspend,
5085 .resume = wl1271_op_resume,
5087 .config = wl1271_op_config,
5088 .prepare_multicast = wl1271_op_prepare_multicast,
5089 .configure_filter = wl1271_op_configure_filter,
5091 .set_key = wlcore_op_set_key,
5092 .hw_scan = wl1271_op_hw_scan,
5093 .cancel_hw_scan = wl1271_op_cancel_hw_scan,
5094 .sched_scan_start = wl1271_op_sched_scan_start,
5095 .sched_scan_stop = wl1271_op_sched_scan_stop,
5096 .bss_info_changed = wl1271_op_bss_info_changed,
5097 .set_frag_threshold = wl1271_op_set_frag_threshold,
5098 .set_rts_threshold = wl1271_op_set_rts_threshold,
5099 .conf_tx = wl1271_op_conf_tx,
5100 .get_tsf = wl1271_op_get_tsf,
5101 .get_survey = wl1271_op_get_survey,
5102 .sta_state = wl12xx_op_sta_state,
5103 .ampdu_action = wl1271_op_ampdu_action,
5104 .tx_frames_pending = wl1271_tx_frames_pending,
5105 .set_bitrate_mask = wl12xx_set_bitrate_mask,
5106 .channel_switch = wl12xx_op_channel_switch,
5107 .flush = wlcore_op_flush,
5108 .remain_on_channel = wlcore_op_remain_on_channel,
5109 .cancel_remain_on_channel = wlcore_op_cancel_remain_on_channel,
5110 .add_chanctx = wlcore_op_add_chanctx,
5111 .remove_chanctx = wlcore_op_remove_chanctx,
5112 .change_chanctx = wlcore_op_change_chanctx,
5113 .assign_vif_chanctx = wlcore_op_assign_vif_chanctx,
5114 .unassign_vif_chanctx = wlcore_op_unassign_vif_chanctx,
5115 CFG80211_TESTMODE_CMD(wl1271_tm_cmd)
5119 u8 wlcore_rate_to_idx(struct wl1271 *wl, u8 rate, enum ieee80211_band band)
5125 if (unlikely(rate >= wl->hw_tx_rate_tbl_size)) {
5126 wl1271_error("Illegal RX rate from HW: %d", rate);
5130 idx = wl->band_rate_to_idx[band][rate];
5131 if (unlikely(idx == CONF_HW_RXTX_RATE_UNSUPPORTED)) {
5132 wl1271_error("Unsupported RX rate from HW: %d", rate);
5139 static ssize_t wl1271_sysfs_show_bt_coex_state(struct device *dev,
5140 struct device_attribute *attr,
5143 struct wl1271 *wl = dev_get_drvdata(dev);
5148 mutex_lock(&wl->mutex);
5149 len = snprintf(buf, len, "%d\n\n0 - off\n1 - on\n",
5151 mutex_unlock(&wl->mutex);
5157 static ssize_t wl1271_sysfs_store_bt_coex_state(struct device *dev,
5158 struct device_attribute *attr,
5159 const char *buf, size_t count)
5161 struct wl1271 *wl = dev_get_drvdata(dev);
5165 ret = kstrtoul(buf, 10, &res);
5167 wl1271_warning("incorrect value written to bt_coex_mode");
5171 mutex_lock(&wl->mutex);
5175 if (res == wl->sg_enabled)
5178 wl->sg_enabled = res;
5180 if (unlikely(wl->state != WLCORE_STATE_ON))
5183 ret = wl1271_ps_elp_wakeup(wl);
5187 wl1271_acx_sg_enable(wl, wl->sg_enabled);
5188 wl1271_ps_elp_sleep(wl);
5191 mutex_unlock(&wl->mutex);
5195 static DEVICE_ATTR(bt_coex_state, S_IRUGO | S_IWUSR,
5196 wl1271_sysfs_show_bt_coex_state,
5197 wl1271_sysfs_store_bt_coex_state);
5199 static ssize_t wl1271_sysfs_show_hw_pg_ver(struct device *dev,
5200 struct device_attribute *attr,
5203 struct wl1271 *wl = dev_get_drvdata(dev);
5208 mutex_lock(&wl->mutex);
5209 if (wl->hw_pg_ver >= 0)
5210 len = snprintf(buf, len, "%d\n", wl->hw_pg_ver);
5212 len = snprintf(buf, len, "n/a\n");
5213 mutex_unlock(&wl->mutex);
5218 static DEVICE_ATTR(hw_pg_ver, S_IRUGO,
5219 wl1271_sysfs_show_hw_pg_ver, NULL);
5221 static ssize_t wl1271_sysfs_read_fwlog(struct file *filp, struct kobject *kobj,
5222 struct bin_attribute *bin_attr,
5223 char *buffer, loff_t pos, size_t count)
5225 struct device *dev = container_of(kobj, struct device, kobj);
5226 struct wl1271 *wl = dev_get_drvdata(dev);
5230 ret = mutex_lock_interruptible(&wl->mutex);
5232 return -ERESTARTSYS;
5234 /* Let only one thread read the log at a time, blocking others */
5235 while (wl->fwlog_size == 0) {
5238 prepare_to_wait_exclusive(&wl->fwlog_waitq,
5240 TASK_INTERRUPTIBLE);
5242 if (wl->fwlog_size != 0) {
5243 finish_wait(&wl->fwlog_waitq, &wait);
5247 mutex_unlock(&wl->mutex);
5250 finish_wait(&wl->fwlog_waitq, &wait);
5252 if (signal_pending(current))
5253 return -ERESTARTSYS;
5255 ret = mutex_lock_interruptible(&wl->mutex);
5257 return -ERESTARTSYS;
5260 /* Check if the fwlog is still valid */
5261 if (wl->fwlog_size < 0) {
5262 mutex_unlock(&wl->mutex);
5266 /* Seeking is not supported - old logs are not kept. Disregard pos. */
5267 len = min(count, (size_t)wl->fwlog_size);
5268 wl->fwlog_size -= len;
5269 memcpy(buffer, wl->fwlog, len);
5271 /* Make room for new messages */
5272 memmove(wl->fwlog, wl->fwlog + len, wl->fwlog_size);
5274 mutex_unlock(&wl->mutex);
5279 static struct bin_attribute fwlog_attr = {
5280 .attr = {.name = "fwlog", .mode = S_IRUSR},
5281 .read = wl1271_sysfs_read_fwlog,
5284 static void wl12xx_derive_mac_addresses(struct wl1271 *wl, u32 oui, u32 nic)
5288 wl1271_debug(DEBUG_PROBE, "base address: oui %06x nic %06x",
5291 if (nic + WLCORE_NUM_MAC_ADDRESSES - wl->num_mac_addr > 0xffffff)
5292 wl1271_warning("NIC part of the MAC address wraps around!");
5294 for (i = 0; i < wl->num_mac_addr; i++) {
5295 wl->addresses[i].addr[0] = (u8)(oui >> 16);
5296 wl->addresses[i].addr[1] = (u8)(oui >> 8);
5297 wl->addresses[i].addr[2] = (u8) oui;
5298 wl->addresses[i].addr[3] = (u8)(nic >> 16);
5299 wl->addresses[i].addr[4] = (u8)(nic >> 8);
5300 wl->addresses[i].addr[5] = (u8) nic;
5304 /* we may be one address short at the most */
5305 WARN_ON(wl->num_mac_addr + 1 < WLCORE_NUM_MAC_ADDRESSES);
5308 * turn on the LAA bit in the first address and use it as
5311 if (wl->num_mac_addr < WLCORE_NUM_MAC_ADDRESSES) {
5312 int idx = WLCORE_NUM_MAC_ADDRESSES - 1;
5313 memcpy(&wl->addresses[idx], &wl->addresses[0],
5314 sizeof(wl->addresses[0]));
5316 wl->addresses[idx].addr[2] |= BIT(1);
5319 wl->hw->wiphy->n_addresses = WLCORE_NUM_MAC_ADDRESSES;
5320 wl->hw->wiphy->addresses = wl->addresses;
5323 static int wl12xx_get_hw_info(struct wl1271 *wl)
5327 ret = wl12xx_set_power_on(wl);
5331 ret = wlcore_read_reg(wl, REG_CHIP_ID_B, &wl->chip.id);
5335 wl->fuse_oui_addr = 0;
5336 wl->fuse_nic_addr = 0;
5338 ret = wl->ops->get_pg_ver(wl, &wl->hw_pg_ver);
5342 if (wl->ops->get_mac)
5343 ret = wl->ops->get_mac(wl);
5346 wl1271_power_off(wl);
5350 static int wl1271_register_hw(struct wl1271 *wl)
5353 u32 oui_addr = 0, nic_addr = 0;
5355 if (wl->mac80211_registered)
5358 if (wl->nvs_len >= 12) {
5359 /* NOTE: The wl->nvs->nvs element must be first, in
5360 * order to simplify the casting, we assume it is at
5361 * the beginning of the wl->nvs structure.
5363 u8 *nvs_ptr = (u8 *)wl->nvs;
5366 (nvs_ptr[11] << 16) + (nvs_ptr[10] << 8) + nvs_ptr[6];
5368 (nvs_ptr[5] << 16) + (nvs_ptr[4] << 8) + nvs_ptr[3];
5371 /* if the MAC address is zeroed in the NVS derive from fuse */
5372 if (oui_addr == 0 && nic_addr == 0) {
5373 oui_addr = wl->fuse_oui_addr;
5374 /* fuse has the BD_ADDR, the WLAN addresses are the next two */
5375 nic_addr = wl->fuse_nic_addr + 1;
5378 wl12xx_derive_mac_addresses(wl, oui_addr, nic_addr);
5380 ret = ieee80211_register_hw(wl->hw);
5382 wl1271_error("unable to register mac80211 hw: %d", ret);
5386 wl->mac80211_registered = true;
5388 wl1271_debugfs_init(wl);
5390 wl1271_notice("loaded");
5396 static void wl1271_unregister_hw(struct wl1271 *wl)
5399 wl1271_plt_stop(wl);
5401 ieee80211_unregister_hw(wl->hw);
5402 wl->mac80211_registered = false;
5406 static const struct ieee80211_iface_limit wlcore_iface_limits[] = {
5409 .types = BIT(NL80211_IFTYPE_STATION),
5413 .types = BIT(NL80211_IFTYPE_AP) |
5414 BIT(NL80211_IFTYPE_P2P_GO) |
5415 BIT(NL80211_IFTYPE_P2P_CLIENT),
5419 static const struct ieee80211_iface_combination
5420 wlcore_iface_combinations[] = {
5422 .num_different_channels = 1,
5423 .max_interfaces = 3,
5424 .limits = wlcore_iface_limits,
5425 .n_limits = ARRAY_SIZE(wlcore_iface_limits),
5429 static int wl1271_init_ieee80211(struct wl1271 *wl)
5431 static const u32 cipher_suites[] = {
5432 WLAN_CIPHER_SUITE_WEP40,
5433 WLAN_CIPHER_SUITE_WEP104,
5434 WLAN_CIPHER_SUITE_TKIP,
5435 WLAN_CIPHER_SUITE_CCMP,
5436 WL1271_CIPHER_SUITE_GEM,
5439 /* The tx descriptor buffer */
5440 wl->hw->extra_tx_headroom = sizeof(struct wl1271_tx_hw_descr);
5442 if (wl->quirks & WLCORE_QUIRK_TKIP_HEADER_SPACE)
5443 wl->hw->extra_tx_headroom += WL1271_EXTRA_SPACE_TKIP;
5446 /* FIXME: find a proper value */
5447 wl->hw->channel_change_time = 10000;
5448 wl->hw->max_listen_interval = wl->conf.conn.max_listen_interval;
5450 wl->hw->flags = IEEE80211_HW_SIGNAL_DBM |
5451 IEEE80211_HW_SUPPORTS_PS |
5452 IEEE80211_HW_SUPPORTS_DYNAMIC_PS |
5453 IEEE80211_HW_SUPPORTS_UAPSD |
5454 IEEE80211_HW_HAS_RATE_CONTROL |
5455 IEEE80211_HW_CONNECTION_MONITOR |
5456 IEEE80211_HW_REPORTS_TX_ACK_STATUS |
5457 IEEE80211_HW_SPECTRUM_MGMT |
5458 IEEE80211_HW_AP_LINK_PS |
5459 IEEE80211_HW_AMPDU_AGGREGATION |
5460 IEEE80211_HW_TX_AMPDU_SETUP_IN_HW |
5461 IEEE80211_HW_SCAN_WHILE_IDLE;
5463 wl->hw->wiphy->cipher_suites = cipher_suites;
5464 wl->hw->wiphy->n_cipher_suites = ARRAY_SIZE(cipher_suites);
5466 wl->hw->wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION) |
5467 BIT(NL80211_IFTYPE_ADHOC) | BIT(NL80211_IFTYPE_AP) |
5468 BIT(NL80211_IFTYPE_P2P_CLIENT) | BIT(NL80211_IFTYPE_P2P_GO);
5469 wl->hw->wiphy->max_scan_ssids = 1;
5470 wl->hw->wiphy->max_sched_scan_ssids = 16;
5471 wl->hw->wiphy->max_match_sets = 16;
5473 * Maximum length of elements in scanning probe request templates
5474 * should be the maximum length possible for a template, without
5475 * the IEEE80211 header of the template
5477 wl->hw->wiphy->max_scan_ie_len = WL1271_CMD_TEMPL_MAX_SIZE -
5478 sizeof(struct ieee80211_header);
5480 wl->hw->wiphy->max_sched_scan_ie_len = WL1271_CMD_TEMPL_MAX_SIZE -
5481 sizeof(struct ieee80211_header);
5483 wl->hw->wiphy->max_remain_on_channel_duration = 5000;
5485 wl->hw->wiphy->flags |= WIPHY_FLAG_AP_UAPSD |
5486 WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL;
5488 /* make sure all our channels fit in the scanned_ch bitmask */
5489 BUILD_BUG_ON(ARRAY_SIZE(wl1271_channels) +
5490 ARRAY_SIZE(wl1271_channels_5ghz) >
5491 WL1271_MAX_CHANNELS);
5493 * We keep local copies of the band structs because we need to
5494 * modify them on a per-device basis.
5496 memcpy(&wl->bands[IEEE80211_BAND_2GHZ], &wl1271_band_2ghz,
5497 sizeof(wl1271_band_2ghz));
5498 memcpy(&wl->bands[IEEE80211_BAND_2GHZ].ht_cap,
5499 &wl->ht_cap[IEEE80211_BAND_2GHZ],
5500 sizeof(*wl->ht_cap));
5501 memcpy(&wl->bands[IEEE80211_BAND_5GHZ], &wl1271_band_5ghz,
5502 sizeof(wl1271_band_5ghz));
5503 memcpy(&wl->bands[IEEE80211_BAND_5GHZ].ht_cap,
5504 &wl->ht_cap[IEEE80211_BAND_5GHZ],
5505 sizeof(*wl->ht_cap));
5507 wl->hw->wiphy->bands[IEEE80211_BAND_2GHZ] =
5508 &wl->bands[IEEE80211_BAND_2GHZ];
5509 wl->hw->wiphy->bands[IEEE80211_BAND_5GHZ] =
5510 &wl->bands[IEEE80211_BAND_5GHZ];
5513 wl->hw->max_rates = 1;
5515 wl->hw->wiphy->reg_notifier = wl1271_reg_notify;
5517 /* the FW answers probe-requests in AP-mode */
5518 wl->hw->wiphy->flags |= WIPHY_FLAG_AP_PROBE_RESP_OFFLOAD;
5519 wl->hw->wiphy->probe_resp_offload =
5520 NL80211_PROBE_RESP_OFFLOAD_SUPPORT_WPS |
5521 NL80211_PROBE_RESP_OFFLOAD_SUPPORT_WPS2 |
5522 NL80211_PROBE_RESP_OFFLOAD_SUPPORT_P2P;
5524 /* allowed interface combinations */
5525 wl->hw->wiphy->iface_combinations = wlcore_iface_combinations;
5526 wl->hw->wiphy->n_iface_combinations =
5527 ARRAY_SIZE(wlcore_iface_combinations);
5529 SET_IEEE80211_DEV(wl->hw, wl->dev);
5531 wl->hw->sta_data_size = sizeof(struct wl1271_station);
5532 wl->hw->vif_data_size = sizeof(struct wl12xx_vif);
5534 wl->hw->max_rx_aggregation_subframes = wl->conf.ht.rx_ba_win_size;
5539 #define WL1271_DEFAULT_CHANNEL 0
5541 struct ieee80211_hw *wlcore_alloc_hw(size_t priv_size, u32 aggr_buf_size,
5544 struct ieee80211_hw *hw;
5549 BUILD_BUG_ON(AP_MAX_STATIONS > WL12XX_MAX_LINKS);
5551 hw = ieee80211_alloc_hw(sizeof(*wl), &wl1271_ops);
5553 wl1271_error("could not alloc ieee80211_hw");
5559 memset(wl, 0, sizeof(*wl));
5561 wl->priv = kzalloc(priv_size, GFP_KERNEL);
5563 wl1271_error("could not alloc wl priv");
5565 goto err_priv_alloc;
5568 INIT_LIST_HEAD(&wl->wlvif_list);
5572 for (i = 0; i < NUM_TX_QUEUES; i++)
5573 for (j = 0; j < WL12XX_MAX_LINKS; j++)
5574 skb_queue_head_init(&wl->links[j].tx_queue[i]);
5576 skb_queue_head_init(&wl->deferred_rx_queue);
5577 skb_queue_head_init(&wl->deferred_tx_queue);
5579 INIT_DELAYED_WORK(&wl->elp_work, wl1271_elp_work);
5580 INIT_WORK(&wl->netstack_work, wl1271_netstack_work);
5581 INIT_WORK(&wl->tx_work, wl1271_tx_work);
5582 INIT_WORK(&wl->recovery_work, wl1271_recovery_work);
5583 INIT_DELAYED_WORK(&wl->scan_complete_work, wl1271_scan_complete_work);
5584 INIT_DELAYED_WORK(&wl->roc_complete_work, wlcore_roc_complete_work);
5585 INIT_DELAYED_WORK(&wl->tx_watchdog_work, wl12xx_tx_watchdog_work);
5587 wl->freezable_wq = create_freezable_workqueue("wl12xx_wq");
5588 if (!wl->freezable_wq) {
5593 wl->channel = WL1271_DEFAULT_CHANNEL;
5595 wl->power_level = WL1271_DEFAULT_POWER_LEVEL;
5596 wl->band = IEEE80211_BAND_2GHZ;
5597 wl->channel_type = NL80211_CHAN_NO_HT;
5599 wl->sg_enabled = true;
5600 wl->sleep_auth = WL1271_PSM_ILLEGAL;
5603 wl->ap_fw_ps_map = 0;
5605 wl->platform_quirks = 0;
5606 wl->sched_scanning = false;
5607 wl->system_hlid = WL12XX_SYSTEM_HLID;
5608 wl->active_sta_count = 0;
5610 init_waitqueue_head(&wl->fwlog_waitq);
5612 /* The system link is always allocated */
5613 __set_bit(WL12XX_SYSTEM_HLID, wl->links_map);
5615 memset(wl->tx_frames_map, 0, sizeof(wl->tx_frames_map));
5616 for (i = 0; i < wl->num_tx_desc; i++)
5617 wl->tx_frames[i] = NULL;
5619 spin_lock_init(&wl->wl_lock);
5621 wl->state = WLCORE_STATE_OFF;
5622 wl->fw_type = WL12XX_FW_TYPE_NONE;
5623 mutex_init(&wl->mutex);
5624 mutex_init(&wl->flush_mutex);
5625 init_completion(&wl->nvs_loading_complete);
5627 order = get_order(aggr_buf_size);
5628 wl->aggr_buf = (u8 *)__get_free_pages(GFP_KERNEL, order);
5629 if (!wl->aggr_buf) {
5633 wl->aggr_buf_size = aggr_buf_size;
5635 wl->dummy_packet = wl12xx_alloc_dummy_packet(wl);
5636 if (!wl->dummy_packet) {
5641 /* Allocate one page for the FW log */
5642 wl->fwlog = (u8 *)get_zeroed_page(GFP_KERNEL);
5645 goto err_dummy_packet;
5648 wl->mbox_size = mbox_size;
5649 wl->mbox = kmalloc(wl->mbox_size, GFP_KERNEL | GFP_DMA);
5658 free_page((unsigned long)wl->fwlog);
5661 dev_kfree_skb(wl->dummy_packet);
5664 free_pages((unsigned long)wl->aggr_buf, order);
5667 destroy_workqueue(wl->freezable_wq);
5670 wl1271_debugfs_exit(wl);
5674 ieee80211_free_hw(hw);
5678 return ERR_PTR(ret);
5680 EXPORT_SYMBOL_GPL(wlcore_alloc_hw);
5682 int wlcore_free_hw(struct wl1271 *wl)
5684 /* Unblock any fwlog readers */
5685 mutex_lock(&wl->mutex);
5686 wl->fwlog_size = -1;
5687 wake_up_interruptible_all(&wl->fwlog_waitq);
5688 mutex_unlock(&wl->mutex);
5690 device_remove_bin_file(wl->dev, &fwlog_attr);
5692 device_remove_file(wl->dev, &dev_attr_hw_pg_ver);
5694 device_remove_file(wl->dev, &dev_attr_bt_coex_state);
5696 free_page((unsigned long)wl->fwlog);
5697 dev_kfree_skb(wl->dummy_packet);
5698 free_pages((unsigned long)wl->aggr_buf, get_order(wl->aggr_buf_size));
5700 wl1271_debugfs_exit(wl);
5704 wl->fw_type = WL12XX_FW_TYPE_NONE;
5708 kfree(wl->fw_status_1);
5709 kfree(wl->tx_res_if);
5710 destroy_workqueue(wl->freezable_wq);
5713 ieee80211_free_hw(wl->hw);
5717 EXPORT_SYMBOL_GPL(wlcore_free_hw);
5719 static irqreturn_t wl12xx_hardirq(int irq, void *cookie)
5721 struct wl1271 *wl = cookie;
5722 unsigned long flags;
5724 wl1271_debug(DEBUG_IRQ, "IRQ");
5726 /* complete the ELP completion */
5727 spin_lock_irqsave(&wl->wl_lock, flags);
5728 set_bit(WL1271_FLAG_IRQ_RUNNING, &wl->flags);
5729 if (wl->elp_compl) {
5730 complete(wl->elp_compl);
5731 wl->elp_compl = NULL;
5734 if (test_bit(WL1271_FLAG_SUSPENDED, &wl->flags)) {
5735 /* don't enqueue a work right now. mark it as pending */
5736 set_bit(WL1271_FLAG_PENDING_WORK, &wl->flags);
5737 wl1271_debug(DEBUG_IRQ, "should not enqueue work");
5738 disable_irq_nosync(wl->irq);
5739 pm_wakeup_event(wl->dev, 0);
5740 spin_unlock_irqrestore(&wl->wl_lock, flags);
5743 spin_unlock_irqrestore(&wl->wl_lock, flags);
5745 return IRQ_WAKE_THREAD;
5748 static void wlcore_nvs_cb(const struct firmware *fw, void *context)
5750 struct wl1271 *wl = context;
5751 struct platform_device *pdev = wl->pdev;
5752 struct wl12xx_platform_data *pdata = pdev->dev.platform_data;
5753 unsigned long irqflags;
5757 wl->nvs = kmemdup(fw->data, fw->size, GFP_KERNEL);
5759 wl1271_error("Could not allocate nvs data");
5762 wl->nvs_len = fw->size;
5764 wl1271_debug(DEBUG_BOOT, "Could not get nvs file %s",
5770 ret = wl->ops->setup(wl);
5774 BUG_ON(wl->num_tx_desc > WLCORE_MAX_TX_DESCRIPTORS);
5776 /* adjust some runtime configuration parameters */
5777 wlcore_adjust_conf(wl);
5779 wl->irq = platform_get_irq(pdev, 0);
5780 wl->platform_quirks = pdata->platform_quirks;
5781 wl->set_power = pdata->set_power;
5782 wl->if_ops = pdata->ops;
5784 if (wl->platform_quirks & WL12XX_PLATFORM_QUIRK_EDGE_IRQ)
5785 irqflags = IRQF_TRIGGER_RISING;
5787 irqflags = IRQF_TRIGGER_HIGH | IRQF_ONESHOT;
5789 ret = request_threaded_irq(wl->irq, wl12xx_hardirq, wlcore_irq,
5793 wl1271_error("request_irq() failed: %d", ret);
5798 ret = enable_irq_wake(wl->irq);
5800 wl->irq_wake_enabled = true;
5801 device_init_wakeup(wl->dev, 1);
5802 if (pdata->pwr_in_suspend) {
5803 wl->hw->wiphy->wowlan.flags = WIPHY_WOWLAN_ANY;
5804 wl->hw->wiphy->wowlan.n_patterns =
5805 WL1271_MAX_RX_FILTERS;
5806 wl->hw->wiphy->wowlan.pattern_min_len = 1;
5807 wl->hw->wiphy->wowlan.pattern_max_len =
5808 WL1271_RX_FILTER_MAX_PATTERN_SIZE;
5812 disable_irq(wl->irq);
5814 ret = wl12xx_get_hw_info(wl);
5816 wl1271_error("couldn't get hw info");
5820 ret = wl->ops->identify_chip(wl);
5824 ret = wl1271_init_ieee80211(wl);
5828 ret = wl1271_register_hw(wl);
5832 /* Create sysfs file to control bt coex state */
5833 ret = device_create_file(wl->dev, &dev_attr_bt_coex_state);
5835 wl1271_error("failed to create sysfs file bt_coex_state");
5839 /* Create sysfs file to get HW PG version */
5840 ret = device_create_file(wl->dev, &dev_attr_hw_pg_ver);
5842 wl1271_error("failed to create sysfs file hw_pg_ver");
5843 goto out_bt_coex_state;
5846 /* Create sysfs file for the FW log */
5847 ret = device_create_bin_file(wl->dev, &fwlog_attr);
5849 wl1271_error("failed to create sysfs file fwlog");
5853 wl->initialized = true;
5857 device_remove_file(wl->dev, &dev_attr_hw_pg_ver);
5860 device_remove_file(wl->dev, &dev_attr_bt_coex_state);
5863 wl1271_unregister_hw(wl);
5866 free_irq(wl->irq, wl);
5872 release_firmware(fw);
5873 complete_all(&wl->nvs_loading_complete);
5876 int __devinit wlcore_probe(struct wl1271 *wl, struct platform_device *pdev)
5880 if (!wl->ops || !wl->ptable)
5883 wl->dev = &pdev->dev;
5885 platform_set_drvdata(pdev, wl);
5887 ret = request_firmware_nowait(THIS_MODULE, FW_ACTION_HOTPLUG,
5888 WL12XX_NVS_NAME, &pdev->dev, GFP_KERNEL,
5891 wl1271_error("request_firmware_nowait failed: %d", ret);
5892 complete_all(&wl->nvs_loading_complete);
5897 EXPORT_SYMBOL_GPL(wlcore_probe);
5899 int __devexit wlcore_remove(struct platform_device *pdev)
5901 struct wl1271 *wl = platform_get_drvdata(pdev);
5903 wait_for_completion(&wl->nvs_loading_complete);
5904 if (!wl->initialized)
5907 if (wl->irq_wake_enabled) {
5908 device_init_wakeup(wl->dev, 0);
5909 disable_irq_wake(wl->irq);
5911 wl1271_unregister_hw(wl);
5912 free_irq(wl->irq, wl);
5917 EXPORT_SYMBOL_GPL(wlcore_remove);
5919 u32 wl12xx_debug_level = DEBUG_NONE;
5920 EXPORT_SYMBOL_GPL(wl12xx_debug_level);
5921 module_param_named(debug_level, wl12xx_debug_level, uint, S_IRUSR | S_IWUSR);
5922 MODULE_PARM_DESC(debug_level, "wl12xx debugging level");
5924 module_param_named(fwlog, fwlog_param, charp, 0);
5925 MODULE_PARM_DESC(fwlog,
5926 "FW logger options: continuous, ondemand, dbgpins or disable");
5928 module_param(bug_on_recovery, bool, S_IRUSR | S_IWUSR);
5929 MODULE_PARM_DESC(bug_on_recovery, "BUG() on fw recovery");
5931 module_param(no_recovery, bool, S_IRUSR | S_IWUSR);
5932 MODULE_PARM_DESC(no_recovery, "Prevent HW recovery. FW will remain stuck.");
5934 MODULE_LICENSE("GPL");
5935 MODULE_AUTHOR("Luciano Coelho <coelho@ti.com>");
5936 MODULE_AUTHOR("Juuso Oikarinen <juuso.oikarinen@nokia.com>");
5937 MODULE_FIRMWARE(WL12XX_NVS_NAME);