1 /******************************************************************************
3 * This file is provided under a dual BSD/GPLv2 license. When using or
4 * redistributing this file, you may do so under either license.
8 * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
9 * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
10 * Copyright(c) 2016 Intel Deutschland GmbH
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of version 2 of the GNU General Public License as
14 * published by the Free Software Foundation.
16 * This program is distributed in the hope that it will be useful, but
17 * WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
19 * General Public License for more details.
21 * You should have received a copy of the GNU General Public License
22 * along with this program; if not, write to the Free Software
23 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
26 * The full GNU General Public License is included in this distribution
27 * in the file called COPYING.
29 * Contact Information:
30 * Intel Linux Wireless <linuxwifi@intel.com>
31 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
35 * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
36 * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
37 * All rights reserved.
39 * Redistribution and use in source and binary forms, with or without
40 * modification, are permitted provided that the following conditions
43 * * Redistributions of source code must retain the above copyright
44 * notice, this list of conditions and the following disclaimer.
45 * * Redistributions in binary form must reproduce the above copyright
46 * notice, this list of conditions and the following disclaimer in
47 * the documentation and/or other materials provided with the
49 * * Neither the name Intel Corporation nor the names of its
50 * contributors may be used to endorse or promote products derived
51 * from this software without specific prior written permission.
53 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
54 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
55 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
56 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
57 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
58 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
59 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
60 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
61 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
62 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
63 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
65 *****************************************************************************/
66 #include <net/mac80211.h>
67 #include <linux/netdevice.h>
69 #include "iwl-trans.h"
70 #include "iwl-op-mode.h"
72 #include "iwl-debug.h"
73 #include "iwl-csr.h" /* for iwl_mvm_rx_card_state_notif */
74 #include "iwl-io.h" /* for iwl_mvm_rx_card_state_notif */
76 #include "iwl-eeprom-parse.h"
80 #include "iwl-phy-db.h"
82 #define MVM_UCODE_ALIVE_TIMEOUT HZ
83 #define MVM_UCODE_CALIB_TIMEOUT (2*HZ)
85 #define UCODE_VALID_OK cpu_to_le32(0x1)
87 struct iwl_mvm_alive_data {
92 static inline const struct fw_img *
93 iwl_get_ucode_image(struct iwl_mvm *mvm, enum iwl_ucode_type ucode_type)
95 if (ucode_type >= IWL_UCODE_TYPE_MAX)
98 return &mvm->fw->img[ucode_type];
101 static int iwl_send_tx_ant_cfg(struct iwl_mvm *mvm, u8 valid_tx_ant)
103 struct iwl_tx_ant_cfg_cmd tx_ant_cmd = {
104 .valid = cpu_to_le32(valid_tx_ant),
107 IWL_DEBUG_FW(mvm, "select valid tx ant: %u\n", valid_tx_ant);
108 return iwl_mvm_send_cmd_pdu(mvm, TX_ANT_CONFIGURATION_CMD, 0,
109 sizeof(tx_ant_cmd), &tx_ant_cmd);
112 static int iwl_send_rss_cfg_cmd(struct iwl_mvm *mvm)
115 struct iwl_rss_config_cmd cmd = {
116 .flags = cpu_to_le32(IWL_RSS_ENABLE),
117 .hash_mask = IWL_RSS_HASH_TYPE_IPV4_TCP |
118 IWL_RSS_HASH_TYPE_IPV4_UDP |
119 IWL_RSS_HASH_TYPE_IPV4_PAYLOAD |
120 IWL_RSS_HASH_TYPE_IPV6_TCP |
121 IWL_RSS_HASH_TYPE_IPV6_UDP |
122 IWL_RSS_HASH_TYPE_IPV6_PAYLOAD,
125 /* Do not direct RSS traffic to Q 0 which is our fallback queue */
126 for (i = 0; i < ARRAY_SIZE(cmd.indirection_table); i++)
127 cmd.indirection_table[i] =
128 1 + (i % (mvm->trans->num_rx_queues - 1));
129 netdev_rss_key_fill(cmd.secret_key, sizeof(cmd.secret_key));
131 return iwl_mvm_send_cmd_pdu(mvm, RSS_CONFIG_CMD, 0, sizeof(cmd), &cmd);
134 void iwl_free_fw_paging(struct iwl_mvm *mvm)
138 if (!mvm->fw_paging_db[0].fw_paging_block)
141 for (i = 0; i < NUM_OF_FW_PAGING_BLOCKS; i++) {
142 if (!mvm->fw_paging_db[i].fw_paging_block) {
144 "Paging: block %d already freed, continue to next page\n",
150 __free_pages(mvm->fw_paging_db[i].fw_paging_block,
151 get_order(mvm->fw_paging_db[i].fw_paging_size));
153 kfree(mvm->trans->paging_download_buf);
154 mvm->trans->paging_download_buf = NULL;
156 memset(mvm->fw_paging_db, 0, sizeof(mvm->fw_paging_db));
159 static int iwl_fill_paging_mem(struct iwl_mvm *mvm, const struct fw_img *image)
165 * find where is the paging image start point:
166 * if CPU2 exist and it's in paging format, then the image looks like:
167 * CPU1 sections (2 or more)
168 * CPU1_CPU2_SEPARATOR_SECTION delimiter - separate between CPU1 to CPU2
169 * CPU2 sections (not paged)
170 * PAGING_SEPARATOR_SECTION delimiter - separate between CPU2
171 * non paged to CPU2 paging sec
173 * CPU2 paging image (including instruction and data)
175 for (sec_idx = 0; sec_idx < IWL_UCODE_SECTION_MAX; sec_idx++) {
176 if (image->sec[sec_idx].offset == PAGING_SEPARATOR_SECTION) {
183 * If paging is enabled there should be at least 2 more sections left
184 * (one for CSS and one for Paging data)
186 if (sec_idx >= ARRAY_SIZE(image->sec) - 1) {
187 IWL_ERR(mvm, "Paging: Missing CSS and/or paging sections\n");
188 iwl_free_fw_paging(mvm);
192 /* copy the CSS block to the dram */
193 IWL_DEBUG_FW(mvm, "Paging: load paging CSS to FW, sec = %d\n",
196 memcpy(page_address(mvm->fw_paging_db[0].fw_paging_block),
197 image->sec[sec_idx].data,
198 mvm->fw_paging_db[0].fw_paging_size);
201 "Paging: copied %d CSS bytes to first block\n",
202 mvm->fw_paging_db[0].fw_paging_size);
207 * copy the paging blocks to the dram
208 * loop index start from 1 since that CSS block already copied to dram
209 * and CSS index is 0.
210 * loop stop at num_of_paging_blk since that last block is not full.
212 for (idx = 1; idx < mvm->num_of_paging_blk; idx++) {
213 memcpy(page_address(mvm->fw_paging_db[idx].fw_paging_block),
214 image->sec[sec_idx].data + offset,
215 mvm->fw_paging_db[idx].fw_paging_size);
218 "Paging: copied %d paging bytes to block %d\n",
219 mvm->fw_paging_db[idx].fw_paging_size,
222 offset += mvm->fw_paging_db[idx].fw_paging_size;
225 /* copy the last paging block */
226 if (mvm->num_of_pages_in_last_blk > 0) {
227 memcpy(page_address(mvm->fw_paging_db[idx].fw_paging_block),
228 image->sec[sec_idx].data + offset,
229 FW_PAGING_SIZE * mvm->num_of_pages_in_last_blk);
232 "Paging: copied %d pages in the last block %d\n",
233 mvm->num_of_pages_in_last_blk, idx);
239 static int iwl_alloc_fw_paging_mem(struct iwl_mvm *mvm,
240 const struct fw_img *image)
245 int order, num_of_pages;
248 if (mvm->fw_paging_db[0].fw_paging_block)
251 dma_enabled = is_device_dma_capable(mvm->trans->dev);
253 /* ensure BLOCK_2_EXP_SIZE is power of 2 of PAGING_BLOCK_SIZE */
254 BUILD_BUG_ON(BIT(BLOCK_2_EXP_SIZE) != PAGING_BLOCK_SIZE);
256 num_of_pages = image->paging_mem_size / FW_PAGING_SIZE;
257 mvm->num_of_paging_blk = ((num_of_pages - 1) /
258 NUM_OF_PAGE_PER_GROUP) + 1;
260 mvm->num_of_pages_in_last_blk =
262 NUM_OF_PAGE_PER_GROUP * (mvm->num_of_paging_blk - 1);
265 "Paging: allocating mem for %d paging blocks, each block holds 8 pages, last block holds %d pages\n",
266 mvm->num_of_paging_blk,
267 mvm->num_of_pages_in_last_blk);
269 /* allocate block of 4Kbytes for paging CSS */
270 order = get_order(FW_PAGING_SIZE);
271 block = alloc_pages(GFP_KERNEL, order);
273 /* free all the previous pages since we failed */
274 iwl_free_fw_paging(mvm);
278 mvm->fw_paging_db[blk_idx].fw_paging_block = block;
279 mvm->fw_paging_db[blk_idx].fw_paging_size = FW_PAGING_SIZE;
282 phys = dma_map_page(mvm->trans->dev, block, 0,
283 PAGE_SIZE << order, DMA_BIDIRECTIONAL);
284 if (dma_mapping_error(mvm->trans->dev, phys)) {
286 * free the previous pages and the current one since
287 * we failed to map_page.
289 iwl_free_fw_paging(mvm);
292 mvm->fw_paging_db[blk_idx].fw_paging_phys = phys;
294 mvm->fw_paging_db[blk_idx].fw_paging_phys = PAGING_ADDR_SIG |
295 blk_idx << BLOCK_2_EXP_SIZE;
299 "Paging: allocated 4K(CSS) bytes (order %d) for firmware paging.\n",
303 * allocate blocks in dram.
304 * since that CSS allocated in fw_paging_db[0] loop start from index 1
306 for (blk_idx = 1; blk_idx < mvm->num_of_paging_blk + 1; blk_idx++) {
307 /* allocate block of PAGING_BLOCK_SIZE (32K) */
308 order = get_order(PAGING_BLOCK_SIZE);
309 block = alloc_pages(GFP_KERNEL, order);
311 /* free all the previous pages since we failed */
312 iwl_free_fw_paging(mvm);
316 mvm->fw_paging_db[blk_idx].fw_paging_block = block;
317 mvm->fw_paging_db[blk_idx].fw_paging_size = PAGING_BLOCK_SIZE;
320 phys = dma_map_page(mvm->trans->dev, block, 0,
323 if (dma_mapping_error(mvm->trans->dev, phys)) {
325 * free the previous pages and the current one
326 * since we failed to map_page.
328 iwl_free_fw_paging(mvm);
331 mvm->fw_paging_db[blk_idx].fw_paging_phys = phys;
333 mvm->fw_paging_db[blk_idx].fw_paging_phys =
335 blk_idx << BLOCK_2_EXP_SIZE;
339 "Paging: allocated 32K bytes (order %d) for firmware paging.\n",
346 static int iwl_save_fw_paging(struct iwl_mvm *mvm,
347 const struct fw_img *fw)
351 ret = iwl_alloc_fw_paging_mem(mvm, fw);
355 return iwl_fill_paging_mem(mvm, fw);
358 /* send paging cmd to FW in case CPU2 has paging image */
359 static int iwl_send_paging_cmd(struct iwl_mvm *mvm, const struct fw_img *fw)
363 struct iwl_fw_paging_cmd fw_paging_cmd = {
365 cpu_to_le32(PAGING_CMD_IS_SECURED |
366 PAGING_CMD_IS_ENABLED |
367 (mvm->num_of_pages_in_last_blk <<
368 PAGING_CMD_NUM_OF_PAGES_IN_LAST_GRP_POS)),
369 .block_size = cpu_to_le32(BLOCK_2_EXP_SIZE),
370 .block_num = cpu_to_le32(mvm->num_of_paging_blk),
373 /* loop for for all paging blocks + CSS block */
374 for (blk_idx = 0; blk_idx < mvm->num_of_paging_blk + 1; blk_idx++) {
376 cpu_to_le32(mvm->fw_paging_db[blk_idx].fw_paging_phys >>
378 fw_paging_cmd.device_phy_addr[blk_idx] = dev_phy_addr;
381 return iwl_mvm_send_cmd_pdu(mvm, iwl_cmd_id(FW_PAGING_BLOCK_CMD,
382 IWL_ALWAYS_LONG_GROUP, 0),
383 0, sizeof(fw_paging_cmd), &fw_paging_cmd);
387 * Send paging item cmd to FW in case CPU2 has paging image
389 static int iwl_trans_get_paging_item(struct iwl_mvm *mvm)
392 struct iwl_fw_get_item_cmd fw_get_item_cmd = {
393 .item_id = cpu_to_le32(IWL_FW_ITEM_ID_PAGING),
396 struct iwl_fw_get_item_resp *item_resp;
397 struct iwl_host_cmd cmd = {
398 .id = iwl_cmd_id(FW_GET_ITEM_CMD, IWL_ALWAYS_LONG_GROUP, 0),
399 .flags = CMD_WANT_SKB | CMD_SEND_IN_RFKILL,
400 .data = { &fw_get_item_cmd, },
403 cmd.len[0] = sizeof(struct iwl_fw_get_item_cmd);
405 ret = iwl_mvm_send_cmd(mvm, &cmd);
408 "Paging: Failed to send FW_GET_ITEM_CMD cmd (err = %d)\n",
413 item_resp = (void *)((struct iwl_rx_packet *)cmd.resp_pkt)->data;
414 if (item_resp->item_id != cpu_to_le32(IWL_FW_ITEM_ID_PAGING)) {
416 "Paging: got wrong item in FW_GET_ITEM_CMD resp (item_id = %u)\n",
417 le32_to_cpu(item_resp->item_id));
422 /* Add an extra page for headers */
423 mvm->trans->paging_download_buf = kzalloc(PAGING_BLOCK_SIZE +
426 if (!mvm->trans->paging_download_buf) {
430 mvm->trans->paging_req_addr = le32_to_cpu(item_resp->item_val);
431 mvm->trans->paging_db = mvm->fw_paging_db;
433 "Paging: got paging request address (paging_req_addr 0x%08x)\n",
434 mvm->trans->paging_req_addr);
442 static bool iwl_alive_fn(struct iwl_notif_wait_data *notif_wait,
443 struct iwl_rx_packet *pkt, void *data)
445 struct iwl_mvm *mvm =
446 container_of(notif_wait, struct iwl_mvm, notif_wait);
447 struct iwl_mvm_alive_data *alive_data = data;
448 struct mvm_alive_resp_ver1 *palive1;
449 struct mvm_alive_resp_ver2 *palive2;
450 struct mvm_alive_resp *palive;
452 if (iwl_rx_packet_payload_len(pkt) == sizeof(*palive1)) {
453 palive1 = (void *)pkt->data;
455 mvm->support_umac_log = false;
456 mvm->error_event_table =
457 le32_to_cpu(palive1->error_event_table_ptr);
458 mvm->log_event_table =
459 le32_to_cpu(palive1->log_event_table_ptr);
460 alive_data->scd_base_addr = le32_to_cpu(palive1->scd_base_ptr);
462 alive_data->valid = le16_to_cpu(palive1->status) ==
465 "Alive VER1 ucode status 0x%04x revision 0x%01X 0x%01X flags 0x%01X\n",
466 le16_to_cpu(palive1->status), palive1->ver_type,
467 palive1->ver_subtype, palive1->flags);
468 } else if (iwl_rx_packet_payload_len(pkt) == sizeof(*palive2)) {
469 palive2 = (void *)pkt->data;
471 mvm->error_event_table =
472 le32_to_cpu(palive2->error_event_table_ptr);
473 mvm->log_event_table =
474 le32_to_cpu(palive2->log_event_table_ptr);
475 alive_data->scd_base_addr = le32_to_cpu(palive2->scd_base_ptr);
476 mvm->umac_error_event_table =
477 le32_to_cpu(palive2->error_info_addr);
478 mvm->sf_space.addr = le32_to_cpu(palive2->st_fwrd_addr);
479 mvm->sf_space.size = le32_to_cpu(palive2->st_fwrd_size);
481 alive_data->valid = le16_to_cpu(palive2->status) ==
483 if (mvm->umac_error_event_table)
484 mvm->support_umac_log = true;
487 "Alive VER2 ucode status 0x%04x revision 0x%01X 0x%01X flags 0x%01X\n",
488 le16_to_cpu(palive2->status), palive2->ver_type,
489 palive2->ver_subtype, palive2->flags);
492 "UMAC version: Major - 0x%x, Minor - 0x%x\n",
493 palive2->umac_major, palive2->umac_minor);
494 } else if (iwl_rx_packet_payload_len(pkt) == sizeof(*palive)) {
495 palive = (void *)pkt->data;
497 mvm->error_event_table =
498 le32_to_cpu(palive->error_event_table_ptr);
499 mvm->log_event_table =
500 le32_to_cpu(palive->log_event_table_ptr);
501 alive_data->scd_base_addr = le32_to_cpu(palive->scd_base_ptr);
502 mvm->umac_error_event_table =
503 le32_to_cpu(palive->error_info_addr);
504 mvm->sf_space.addr = le32_to_cpu(palive->st_fwrd_addr);
505 mvm->sf_space.size = le32_to_cpu(palive->st_fwrd_size);
507 alive_data->valid = le16_to_cpu(palive->status) ==
509 if (mvm->umac_error_event_table)
510 mvm->support_umac_log = true;
513 "Alive VER3 ucode status 0x%04x revision 0x%01X 0x%01X flags 0x%01X\n",
514 le16_to_cpu(palive->status), palive->ver_type,
515 palive->ver_subtype, palive->flags);
518 "UMAC version: Major - 0x%x, Minor - 0x%x\n",
519 le32_to_cpu(palive->umac_major),
520 le32_to_cpu(palive->umac_minor));
526 static bool iwl_wait_phy_db_entry(struct iwl_notif_wait_data *notif_wait,
527 struct iwl_rx_packet *pkt, void *data)
529 struct iwl_phy_db *phy_db = data;
531 if (pkt->hdr.cmd != CALIB_RES_NOTIF_PHY_DB) {
532 WARN_ON(pkt->hdr.cmd != INIT_COMPLETE_NOTIF);
536 WARN_ON(iwl_phy_db_set_section(phy_db, pkt, GFP_ATOMIC));
541 static int iwl_mvm_load_ucode_wait_alive(struct iwl_mvm *mvm,
542 enum iwl_ucode_type ucode_type)
544 struct iwl_notification_wait alive_wait;
545 struct iwl_mvm_alive_data alive_data;
546 const struct fw_img *fw;
548 enum iwl_ucode_type old_type = mvm->cur_ucode;
549 static const u16 alive_cmd[] = { MVM_ALIVE };
550 struct iwl_sf_region st_fwrd_space;
552 if (ucode_type == IWL_UCODE_REGULAR &&
553 iwl_fw_dbg_conf_usniffer(mvm->fw, FW_DBG_START_FROM_ALIVE) &&
554 !(fw_has_capa(&mvm->fw->ucode_capa,
555 IWL_UCODE_TLV_CAPA_USNIFFER_UNIFIED)))
556 fw = iwl_get_ucode_image(mvm, IWL_UCODE_REGULAR_USNIFFER);
558 fw = iwl_get_ucode_image(mvm, ucode_type);
561 mvm->cur_ucode = ucode_type;
562 mvm->ucode_loaded = false;
564 iwl_init_notification_wait(&mvm->notif_wait, &alive_wait,
565 alive_cmd, ARRAY_SIZE(alive_cmd),
566 iwl_alive_fn, &alive_data);
568 ret = iwl_trans_start_fw(mvm->trans, fw, ucode_type == IWL_UCODE_INIT);
570 mvm->cur_ucode = old_type;
571 iwl_remove_notification(&mvm->notif_wait, &alive_wait);
576 * Some things may run in the background now, but we
577 * just wait for the ALIVE notification here.
579 ret = iwl_wait_notification(&mvm->notif_wait, &alive_wait,
580 MVM_UCODE_ALIVE_TIMEOUT);
582 if (mvm->trans->cfg->device_family == IWL_DEVICE_FAMILY_8000)
584 "SecBoot CPU1 Status: 0x%x, CPU2 Status: 0x%x\n",
585 iwl_read_prph(mvm->trans, SB_CPU_1_STATUS),
586 iwl_read_prph(mvm->trans, SB_CPU_2_STATUS));
587 mvm->cur_ucode = old_type;
591 if (!alive_data.valid) {
592 IWL_ERR(mvm, "Loaded ucode is not valid!\n");
593 mvm->cur_ucode = old_type;
598 * update the sdio allocation according to the pointer we get in the
599 * alive notification.
601 st_fwrd_space.addr = mvm->sf_space.addr;
602 st_fwrd_space.size = mvm->sf_space.size;
603 ret = iwl_trans_update_sf(mvm->trans, &st_fwrd_space);
605 IWL_ERR(mvm, "Failed to update SF size. ret %d\n", ret);
609 iwl_trans_fw_alive(mvm->trans, alive_data.scd_base_addr);
612 * configure and operate fw paging mechanism.
613 * driver configures the paging flow only once, CPU2 paging image
614 * included in the IWL_UCODE_INIT image.
616 if (fw->paging_mem_size) {
618 * When dma is not enabled, the driver needs to copy / write
619 * the downloaded / uploaded page to / from the smem.
620 * This gets the location of the place were the pages are
623 if (!is_device_dma_capable(mvm->trans->dev)) {
624 ret = iwl_trans_get_paging_item(mvm);
626 IWL_ERR(mvm, "failed to get FW paging item\n");
631 ret = iwl_save_fw_paging(mvm, fw);
633 IWL_ERR(mvm, "failed to save the FW paging image\n");
637 ret = iwl_send_paging_cmd(mvm, fw);
639 IWL_ERR(mvm, "failed to send the paging cmd\n");
640 iwl_free_fw_paging(mvm);
646 * Note: all the queues are enabled as part of the interface
647 * initialization, but in firmware restart scenarios they
648 * could be stopped, so wake them up. In firmware restart,
649 * mac80211 will have the queues stopped as well until the
650 * reconfiguration completes. During normal startup, they
654 memset(&mvm->queue_info, 0, sizeof(mvm->queue_info));
655 if (iwl_mvm_is_dqa_supported(mvm))
656 mvm->queue_info[IWL_MVM_DQA_CMD_QUEUE].hw_queue_refcount = 1;
658 mvm->queue_info[IWL_MVM_CMD_QUEUE].hw_queue_refcount = 1;
660 for (i = 0; i < IEEE80211_MAX_QUEUES; i++)
661 atomic_set(&mvm->mac80211_queue_stop_count[i], 0);
663 mvm->ucode_loaded = true;
668 static int iwl_send_phy_cfg_cmd(struct iwl_mvm *mvm)
670 struct iwl_phy_cfg_cmd phy_cfg_cmd;
671 enum iwl_ucode_type ucode_type = mvm->cur_ucode;
674 phy_cfg_cmd.phy_cfg = cpu_to_le32(iwl_mvm_get_phy_config(mvm));
675 phy_cfg_cmd.calib_control.event_trigger =
676 mvm->fw->default_calib[ucode_type].event_trigger;
677 phy_cfg_cmd.calib_control.flow_trigger =
678 mvm->fw->default_calib[ucode_type].flow_trigger;
680 IWL_DEBUG_INFO(mvm, "Sending Phy CFG command: 0x%x\n",
681 phy_cfg_cmd.phy_cfg);
683 return iwl_mvm_send_cmd_pdu(mvm, PHY_CONFIGURATION_CMD, 0,
684 sizeof(phy_cfg_cmd), &phy_cfg_cmd);
687 int iwl_run_init_mvm_ucode(struct iwl_mvm *mvm, bool read_nvm)
689 struct iwl_notification_wait calib_wait;
690 static const u16 init_complete[] = {
692 CALIB_RES_NOTIF_PHY_DB
696 lockdep_assert_held(&mvm->mutex);
698 if (WARN_ON_ONCE(mvm->calibrating))
701 iwl_init_notification_wait(&mvm->notif_wait,
704 ARRAY_SIZE(init_complete),
705 iwl_wait_phy_db_entry,
708 /* Will also start the device */
709 ret = iwl_mvm_load_ucode_wait_alive(mvm, IWL_UCODE_INIT);
711 IWL_ERR(mvm, "Failed to start INIT ucode: %d\n", ret);
715 ret = iwl_send_bt_init_conf(mvm);
719 /* Read the NVM only at driver load time, no need to do this twice */
722 ret = iwl_nvm_init(mvm, true);
724 IWL_ERR(mvm, "Failed to read NVM: %d\n", ret);
729 /* In case we read the NVM from external file, load it to the NIC */
730 if (mvm->nvm_file_name)
731 iwl_mvm_load_nvm_to_nic(mvm);
733 ret = iwl_nvm_check_version(mvm->nvm_data, mvm->trans);
737 * abort after reading the nvm in case RF Kill is on, we will complete
738 * the init seq later when RF kill will switch to off
740 if (iwl_mvm_is_radio_hw_killed(mvm)) {
741 IWL_DEBUG_RF_KILL(mvm,
742 "jump over all phy activities due to RF kill\n");
743 iwl_remove_notification(&mvm->notif_wait, &calib_wait);
748 mvm->calibrating = true;
750 /* Send TX valid antennas before triggering calibrations */
751 ret = iwl_send_tx_ant_cfg(mvm, iwl_mvm_get_valid_tx_ant(mvm));
756 * Send phy configurations command to init uCode
757 * to start the 16.0 uCode init image internal calibrations.
759 ret = iwl_send_phy_cfg_cmd(mvm);
761 IWL_ERR(mvm, "Failed to run INIT calibrations: %d\n",
767 * Some things may run in the background now, but we
768 * just wait for the calibration complete notification.
770 ret = iwl_wait_notification(&mvm->notif_wait, &calib_wait,
771 MVM_UCODE_CALIB_TIMEOUT);
773 if (ret && iwl_mvm_is_radio_hw_killed(mvm)) {
774 IWL_DEBUG_RF_KILL(mvm, "RFKILL while calibrating.\n");
780 iwl_remove_notification(&mvm->notif_wait, &calib_wait);
782 mvm->calibrating = false;
783 if (iwlmvm_mod_params.init_dbg && !mvm->nvm_data) {
784 /* we want to debug INIT and we have no NVM - fake */
785 mvm->nvm_data = kzalloc(sizeof(struct iwl_nvm_data) +
786 sizeof(struct ieee80211_channel) +
787 sizeof(struct ieee80211_rate),
791 mvm->nvm_data->bands[0].channels = mvm->nvm_data->channels;
792 mvm->nvm_data->bands[0].n_channels = 1;
793 mvm->nvm_data->bands[0].n_bitrates = 1;
794 mvm->nvm_data->bands[0].bitrates =
795 (void *)mvm->nvm_data->channels + 1;
796 mvm->nvm_data->bands[0].bitrates->hw_value = 10;
802 static void iwl_mvm_get_shared_mem_conf(struct iwl_mvm *mvm)
804 struct iwl_host_cmd cmd = {
805 .flags = CMD_WANT_SKB,
809 struct iwl_shared_mem_cfg *mem_cfg;
810 struct iwl_rx_packet *pkt;
813 lockdep_assert_held(&mvm->mutex);
815 if (fw_has_capa(&mvm->fw->ucode_capa,
816 IWL_UCODE_TLV_CAPA_EXTEND_SHARED_MEM_CFG))
817 cmd.id = iwl_cmd_id(SHARED_MEM_CFG_CMD, SYSTEM_GROUP, 0);
819 cmd.id = SHARED_MEM_CFG;
821 if (WARN_ON(iwl_mvm_send_cmd(mvm, &cmd)))
825 mem_cfg = (void *)pkt->data;
827 mvm->shared_mem_cfg.shared_mem_addr =
828 le32_to_cpu(mem_cfg->shared_mem_addr);
829 mvm->shared_mem_cfg.shared_mem_size =
830 le32_to_cpu(mem_cfg->shared_mem_size);
831 mvm->shared_mem_cfg.sample_buff_addr =
832 le32_to_cpu(mem_cfg->sample_buff_addr);
833 mvm->shared_mem_cfg.sample_buff_size =
834 le32_to_cpu(mem_cfg->sample_buff_size);
835 mvm->shared_mem_cfg.txfifo_addr = le32_to_cpu(mem_cfg->txfifo_addr);
836 for (i = 0; i < ARRAY_SIZE(mvm->shared_mem_cfg.txfifo_size); i++)
837 mvm->shared_mem_cfg.txfifo_size[i] =
838 le32_to_cpu(mem_cfg->txfifo_size[i]);
839 for (i = 0; i < ARRAY_SIZE(mvm->shared_mem_cfg.rxfifo_size); i++)
840 mvm->shared_mem_cfg.rxfifo_size[i] =
841 le32_to_cpu(mem_cfg->rxfifo_size[i]);
842 mvm->shared_mem_cfg.page_buff_addr =
843 le32_to_cpu(mem_cfg->page_buff_addr);
844 mvm->shared_mem_cfg.page_buff_size =
845 le32_to_cpu(mem_cfg->page_buff_size);
847 /* new API has more data */
848 if (fw_has_capa(&mvm->fw->ucode_capa,
849 IWL_UCODE_TLV_CAPA_EXTEND_SHARED_MEM_CFG)) {
850 mvm->shared_mem_cfg.rxfifo_addr =
851 le32_to_cpu(mem_cfg->rxfifo_addr);
852 mvm->shared_mem_cfg.internal_txfifo_addr =
853 le32_to_cpu(mem_cfg->internal_txfifo_addr);
855 BUILD_BUG_ON(sizeof(mvm->shared_mem_cfg.internal_txfifo_size) !=
856 sizeof(mem_cfg->internal_txfifo_size));
859 i < ARRAY_SIZE(mvm->shared_mem_cfg.internal_txfifo_size);
861 mvm->shared_mem_cfg.internal_txfifo_size[i] =
862 le32_to_cpu(mem_cfg->internal_txfifo_size[i]);
865 IWL_DEBUG_INFO(mvm, "SHARED MEM CFG: got memory offsets/sizes\n");
870 static int iwl_mvm_config_ltr(struct iwl_mvm *mvm)
872 struct iwl_ltr_config_cmd cmd = {
873 .flags = cpu_to_le32(LTR_CFG_FLAG_FEATURE_ENABLE),
876 if (!mvm->trans->ltr_enabled)
879 return iwl_mvm_send_cmd_pdu(mvm, LTR_CONFIG, 0,
883 int iwl_mvm_up(struct iwl_mvm *mvm)
886 struct ieee80211_channel *chan;
887 struct cfg80211_chan_def chandef;
889 lockdep_assert_held(&mvm->mutex);
891 ret = iwl_trans_start_hw(mvm->trans);
896 * If we haven't completed the run of the init ucode during
897 * module loading, load init ucode now
898 * (for example, if we were in RFKILL)
900 ret = iwl_run_init_mvm_ucode(mvm, false);
901 if (ret && !iwlmvm_mod_params.init_dbg) {
902 IWL_ERR(mvm, "Failed to run INIT ucode: %d\n", ret);
903 /* this can't happen */
904 if (WARN_ON(ret > 0))
908 if (!iwlmvm_mod_params.init_dbg) {
910 * Stop and start the transport without entering low power
911 * mode. This will save the state of other components on the
912 * device that are triggered by the INIT firwmare (MFUART).
914 _iwl_trans_stop_device(mvm->trans, false);
915 ret = _iwl_trans_start_hw(mvm->trans, false);
920 if (iwlmvm_mod_params.init_dbg)
923 ret = iwl_mvm_load_ucode_wait_alive(mvm, IWL_UCODE_REGULAR);
925 IWL_ERR(mvm, "Failed to start RT ucode: %d\n", ret);
929 iwl_mvm_get_shared_mem_conf(mvm);
931 ret = iwl_mvm_sf_update(mvm, NULL, false);
933 IWL_ERR(mvm, "Failed to initialize Smart Fifo\n");
935 mvm->fw_dbg_conf = FW_DBG_INVALID;
936 /* if we have a destination, assume EARLY START */
937 if (mvm->fw->dbg_dest_tlv)
938 mvm->fw_dbg_conf = FW_DBG_START_FROM_ALIVE;
939 iwl_mvm_start_fw_dbg_conf(mvm, FW_DBG_START_FROM_ALIVE);
941 ret = iwl_send_tx_ant_cfg(mvm, iwl_mvm_get_valid_tx_ant(mvm));
945 ret = iwl_send_bt_init_conf(mvm);
949 /* Send phy db control command and then phy db calibration*/
950 ret = iwl_send_phy_db_data(mvm->phy_db);
954 ret = iwl_send_phy_cfg_cmd(mvm);
958 /* Init RSS configuration */
959 if (iwl_mvm_has_new_rx_api(mvm)) {
960 ret = iwl_send_rss_cfg_cmd(mvm);
962 IWL_ERR(mvm, "Failed to configure RSS queues: %d\n",
968 /* init the fw <-> mac80211 STA mapping */
969 for (i = 0; i < IWL_MVM_STATION_COUNT; i++)
970 RCU_INIT_POINTER(mvm->fw_id_to_mac_id[i], NULL);
972 mvm->tdls_cs.peer.sta_id = IWL_MVM_STATION_COUNT;
974 /* reset quota debouncing buffer - 0xff will yield invalid data */
975 memset(&mvm->last_quota_cmd, 0xff, sizeof(mvm->last_quota_cmd));
977 /* Add auxiliary station for scanning */
978 ret = iwl_mvm_add_aux_sta(mvm);
982 /* Add all the PHY contexts */
983 chan = &mvm->hw->wiphy->bands[NL80211_BAND_2GHZ]->channels[0];
984 cfg80211_chandef_create(&chandef, chan, NL80211_CHAN_NO_HT);
985 for (i = 0; i < NUM_PHY_CTX; i++) {
987 * The channel used here isn't relevant as it's
988 * going to be overwritten in the other flows.
989 * For now use the first channel we have.
991 ret = iwl_mvm_phy_ctxt_add(mvm, &mvm->phy_ctxts[i],
997 #ifdef CONFIG_THERMAL
998 if (iwl_mvm_is_tt_in_fw(mvm)) {
999 /* in order to give the responsibility of ct-kill and
1000 * TX backoff to FW we need to send empty temperature reporting
1001 * cmd during init time
1003 iwl_mvm_send_temp_report_ths_cmd(mvm);
1005 /* Initialize tx backoffs to the minimal possible */
1006 iwl_mvm_tt_tx_backoff(mvm, 0);
1009 /* TODO: read the budget from BIOS / Platform NVM */
1010 if (iwl_mvm_is_ctdp_supported(mvm) && mvm->cooling_dev.cur_state > 0)
1011 ret = iwl_mvm_ctdp_command(mvm, CTDP_CMD_OPERATION_START,
1012 mvm->cooling_dev.cur_state);
1014 /* Initialize tx backoffs to the minimal possible */
1015 iwl_mvm_tt_tx_backoff(mvm, 0);
1018 WARN_ON(iwl_mvm_config_ltr(mvm));
1020 ret = iwl_mvm_power_update_device(mvm);
1025 * RTNL is not taken during Ct-kill, but we don't need to scan/Tx
1026 * anyway, so don't init MCC.
1028 if (!test_bit(IWL_MVM_STATUS_HW_CTKILL, &mvm->status)) {
1029 ret = iwl_mvm_init_mcc(mvm);
1034 if (fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_UMAC_SCAN)) {
1035 mvm->scan_type = IWL_SCAN_TYPE_NOT_SET;
1036 ret = iwl_mvm_config_scan(mvm);
1041 if (iwl_mvm_is_csum_supported(mvm) &&
1042 mvm->cfg->features & NETIF_F_RXCSUM)
1043 iwl_trans_write_prph(mvm->trans, RX_EN_CSUM, 0x3);
1045 /* allow FW/transport low power modes if not during restart */
1046 if (!test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status))
1047 iwl_mvm_unref(mvm, IWL_MVM_REF_UCODE_DOWN);
1049 IWL_DEBUG_INFO(mvm, "RT uCode started.\n");
1052 iwl_mvm_stop_device(mvm);
1056 int iwl_mvm_load_d3_fw(struct iwl_mvm *mvm)
1060 lockdep_assert_held(&mvm->mutex);
1062 ret = iwl_trans_start_hw(mvm->trans);
1066 ret = iwl_mvm_load_ucode_wait_alive(mvm, IWL_UCODE_WOWLAN);
1068 IWL_ERR(mvm, "Failed to start WoWLAN firmware: %d\n", ret);
1072 ret = iwl_send_tx_ant_cfg(mvm, iwl_mvm_get_valid_tx_ant(mvm));
1076 /* Send phy db control command and then phy db calibration*/
1077 ret = iwl_send_phy_db_data(mvm->phy_db);
1081 ret = iwl_send_phy_cfg_cmd(mvm);
1085 /* init the fw <-> mac80211 STA mapping */
1086 for (i = 0; i < IWL_MVM_STATION_COUNT; i++)
1087 RCU_INIT_POINTER(mvm->fw_id_to_mac_id[i], NULL);
1089 /* Add auxiliary station for scanning */
1090 ret = iwl_mvm_add_aux_sta(mvm);
1096 iwl_mvm_stop_device(mvm);
1100 void iwl_mvm_rx_card_state_notif(struct iwl_mvm *mvm,
1101 struct iwl_rx_cmd_buffer *rxb)
1103 struct iwl_rx_packet *pkt = rxb_addr(rxb);
1104 struct iwl_card_state_notif *card_state_notif = (void *)pkt->data;
1105 u32 flags = le32_to_cpu(card_state_notif->flags);
1107 IWL_DEBUG_RF_KILL(mvm, "Card state received: HW:%s SW:%s CT:%s\n",
1108 (flags & HW_CARD_DISABLED) ? "Kill" : "On",
1109 (flags & SW_CARD_DISABLED) ? "Kill" : "On",
1110 (flags & CT_KILL_CARD_DISABLED) ?
1111 "Reached" : "Not reached");
1114 void iwl_mvm_rx_mfuart_notif(struct iwl_mvm *mvm,
1115 struct iwl_rx_cmd_buffer *rxb)
1117 struct iwl_rx_packet *pkt = rxb_addr(rxb);
1118 struct iwl_mfuart_load_notif *mfuart_notif = (void *)pkt->data;
1121 "MFUART: installed ver: 0x%08x, external ver: 0x%08x, status: 0x%08x, duration: 0x%08x\n",
1122 le32_to_cpu(mfuart_notif->installed_ver),
1123 le32_to_cpu(mfuart_notif->external_ver),
1124 le32_to_cpu(mfuart_notif->status),
1125 le32_to_cpu(mfuart_notif->duration));