Merge remote-tracking branch 'regulator/fix/core' into regulator-linus
[sfrench/cifs-2.6.git] / drivers / net / wireless / intel / iwlwifi / mvm / fw.c
1 /******************************************************************************
2  *
3  * This file is provided under a dual BSD/GPLv2 license.  When using or
4  * redistributing this file, you may do so under either license.
5  *
6  * GPL LICENSE SUMMARY
7  *
8  * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
9  * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
10  * Copyright(c) 2016 - 2017 Intel Deutschland GmbH
11  *
12  * This program is free software; you can redistribute it and/or modify
13  * it under the terms of version 2 of the GNU General Public License as
14  * published by the Free Software Foundation.
15  *
16  * This program is distributed in the hope that it will be useful, but
17  * WITHOUT ANY WARRANTY; without even the implied warranty of
18  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
19  * General Public License for more details.
20  *
21  * You should have received a copy of the GNU General Public License
22  * along with this program; if not, write to the Free Software
23  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
24  * USA
25  *
26  * The full GNU General Public License is included in this distribution
27  * in the file called COPYING.
28  *
29  * Contact Information:
30  *  Intel Linux Wireless <linuxwifi@intel.com>
31  * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
32  *
33  * BSD LICENSE
34  *
35  * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
36  * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
37  * Copyright(c) 2016 - 2017 Intel Deutschland GmbH
38  * All rights reserved.
39  *
40  * Redistribution and use in source and binary forms, with or without
41  * modification, are permitted provided that the following conditions
42  * are met:
43  *
44  *  * Redistributions of source code must retain the above copyright
45  *    notice, this list of conditions and the following disclaimer.
46  *  * Redistributions in binary form must reproduce the above copyright
47  *    notice, this list of conditions and the following disclaimer in
48  *    the documentation and/or other materials provided with the
49  *    distribution.
50  *  * Neither the name Intel Corporation nor the names of its
51  *    contributors may be used to endorse or promote products derived
52  *    from this software without specific prior written permission.
53  *
54  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
55  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
56  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
57  * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
58  * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
59  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
60  * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
61  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
62  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
63  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
64  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
65  *
66  *****************************************************************************/
67 #include <net/mac80211.h>
68 #include <linux/netdevice.h>
69 #include <linux/acpi.h>
70
71 #include "iwl-trans.h"
72 #include "iwl-op-mode.h"
73 #include "fw/img.h"
74 #include "iwl-debug.h"
75 #include "iwl-csr.h" /* for iwl_mvm_rx_card_state_notif */
76 #include "iwl-io.h" /* for iwl_mvm_rx_card_state_notif */
77 #include "iwl-prph.h"
78 #include "iwl-eeprom-parse.h"
79
80 #include "mvm.h"
81 #include "fw-dbg.h"
82 #include "iwl-phy-db.h"
83
84 #define MVM_UCODE_ALIVE_TIMEOUT HZ
85 #define MVM_UCODE_CALIB_TIMEOUT (2*HZ)
86
87 #define UCODE_VALID_OK  cpu_to_le32(0x1)
88
89 struct iwl_mvm_alive_data {
90         bool valid;
91         u32 scd_base_addr;
92 };
93
94 static int iwl_send_tx_ant_cfg(struct iwl_mvm *mvm, u8 valid_tx_ant)
95 {
96         struct iwl_tx_ant_cfg_cmd tx_ant_cmd = {
97                 .valid = cpu_to_le32(valid_tx_ant),
98         };
99
100         IWL_DEBUG_FW(mvm, "select valid tx ant: %u\n", valid_tx_ant);
101         return iwl_mvm_send_cmd_pdu(mvm, TX_ANT_CONFIGURATION_CMD, 0,
102                                     sizeof(tx_ant_cmd), &tx_ant_cmd);
103 }
104
105 static int iwl_send_rss_cfg_cmd(struct iwl_mvm *mvm)
106 {
107         int i;
108         struct iwl_rss_config_cmd cmd = {
109                 .flags = cpu_to_le32(IWL_RSS_ENABLE),
110                 .hash_mask = IWL_RSS_HASH_TYPE_IPV4_TCP |
111                              IWL_RSS_HASH_TYPE_IPV4_UDP |
112                              IWL_RSS_HASH_TYPE_IPV4_PAYLOAD |
113                              IWL_RSS_HASH_TYPE_IPV6_TCP |
114                              IWL_RSS_HASH_TYPE_IPV6_UDP |
115                              IWL_RSS_HASH_TYPE_IPV6_PAYLOAD,
116         };
117
118         if (mvm->trans->num_rx_queues == 1)
119                 return 0;
120
121         /* Do not direct RSS traffic to Q 0 which is our fallback queue */
122         for (i = 0; i < ARRAY_SIZE(cmd.indirection_table); i++)
123                 cmd.indirection_table[i] =
124                         1 + (i % (mvm->trans->num_rx_queues - 1));
125         netdev_rss_key_fill(cmd.secret_key, sizeof(cmd.secret_key));
126
127         return iwl_mvm_send_cmd_pdu(mvm, RSS_CONFIG_CMD, 0, sizeof(cmd), &cmd);
128 }
129
130 static int iwl_mvm_send_dqa_cmd(struct iwl_mvm *mvm)
131 {
132         struct iwl_dqa_enable_cmd dqa_cmd = {
133                 .cmd_queue = cpu_to_le32(IWL_MVM_DQA_CMD_QUEUE),
134         };
135         u32 cmd_id = iwl_cmd_id(DQA_ENABLE_CMD, DATA_PATH_GROUP, 0);
136         int ret;
137
138         ret = iwl_mvm_send_cmd_pdu(mvm, cmd_id, 0, sizeof(dqa_cmd), &dqa_cmd);
139         if (ret)
140                 IWL_ERR(mvm, "Failed to send DQA enabling command: %d\n", ret);
141         else
142                 IWL_DEBUG_FW(mvm, "Working in DQA mode\n");
143
144         return ret;
145 }
146
147 void iwl_free_fw_paging(struct iwl_mvm *mvm)
148 {
149         int i;
150
151         if (!mvm->fw_paging_db[0].fw_paging_block)
152                 return;
153
154         for (i = 0; i < NUM_OF_FW_PAGING_BLOCKS; i++) {
155                 struct iwl_fw_paging *paging = &mvm->fw_paging_db[i];
156
157                 if (!paging->fw_paging_block) {
158                         IWL_DEBUG_FW(mvm,
159                                      "Paging: block %d already freed, continue to next page\n",
160                                      i);
161
162                         continue;
163                 }
164                 dma_unmap_page(mvm->trans->dev, paging->fw_paging_phys,
165                                paging->fw_paging_size, DMA_BIDIRECTIONAL);
166
167                 __free_pages(paging->fw_paging_block,
168                              get_order(paging->fw_paging_size));
169                 paging->fw_paging_block = NULL;
170         }
171         kfree(mvm->trans->paging_download_buf);
172         mvm->trans->paging_download_buf = NULL;
173         mvm->trans->paging_db = NULL;
174
175         memset(mvm->fw_paging_db, 0, sizeof(mvm->fw_paging_db));
176 }
177
178 static int iwl_fill_paging_mem(struct iwl_mvm *mvm, const struct fw_img *image)
179 {
180         int sec_idx, idx;
181         u32 offset = 0;
182
183         /*
184          * find where is the paging image start point:
185          * if CPU2 exist and it's in paging format, then the image looks like:
186          * CPU1 sections (2 or more)
187          * CPU1_CPU2_SEPARATOR_SECTION delimiter - separate between CPU1 to CPU2
188          * CPU2 sections (not paged)
189          * PAGING_SEPARATOR_SECTION delimiter - separate between CPU2
190          * non paged to CPU2 paging sec
191          * CPU2 paging CSS
192          * CPU2 paging image (including instruction and data)
193          */
194         for (sec_idx = 0; sec_idx < image->num_sec; sec_idx++) {
195                 if (image->sec[sec_idx].offset == PAGING_SEPARATOR_SECTION) {
196                         sec_idx++;
197                         break;
198                 }
199         }
200
201         /*
202          * If paging is enabled there should be at least 2 more sections left
203          * (one for CSS and one for Paging data)
204          */
205         if (sec_idx >= image->num_sec - 1) {
206                 IWL_ERR(mvm, "Paging: Missing CSS and/or paging sections\n");
207                 iwl_free_fw_paging(mvm);
208                 return -EINVAL;
209         }
210
211         /* copy the CSS block to the dram */
212         IWL_DEBUG_FW(mvm, "Paging: load paging CSS to FW, sec = %d\n",
213                      sec_idx);
214
215         memcpy(page_address(mvm->fw_paging_db[0].fw_paging_block),
216                image->sec[sec_idx].data,
217                mvm->fw_paging_db[0].fw_paging_size);
218         dma_sync_single_for_device(mvm->trans->dev,
219                                    mvm->fw_paging_db[0].fw_paging_phys,
220                                    mvm->fw_paging_db[0].fw_paging_size,
221                                    DMA_BIDIRECTIONAL);
222
223         IWL_DEBUG_FW(mvm,
224                      "Paging: copied %d CSS bytes to first block\n",
225                      mvm->fw_paging_db[0].fw_paging_size);
226
227         sec_idx++;
228
229         /*
230          * copy the paging blocks to the dram
231          * loop index start from 1 since that CSS block already copied to dram
232          * and CSS index is 0.
233          * loop stop at num_of_paging_blk since that last block is not full.
234          */
235         for (idx = 1; idx < mvm->num_of_paging_blk; idx++) {
236                 struct iwl_fw_paging *block = &mvm->fw_paging_db[idx];
237
238                 memcpy(page_address(block->fw_paging_block),
239                        image->sec[sec_idx].data + offset,
240                        block->fw_paging_size);
241                 dma_sync_single_for_device(mvm->trans->dev,
242                                            block->fw_paging_phys,
243                                            block->fw_paging_size,
244                                            DMA_BIDIRECTIONAL);
245
246
247                 IWL_DEBUG_FW(mvm,
248                              "Paging: copied %d paging bytes to block %d\n",
249                              mvm->fw_paging_db[idx].fw_paging_size,
250                              idx);
251
252                 offset += mvm->fw_paging_db[idx].fw_paging_size;
253         }
254
255         /* copy the last paging block */
256         if (mvm->num_of_pages_in_last_blk > 0) {
257                 struct iwl_fw_paging *block = &mvm->fw_paging_db[idx];
258
259                 memcpy(page_address(block->fw_paging_block),
260                        image->sec[sec_idx].data + offset,
261                        FW_PAGING_SIZE * mvm->num_of_pages_in_last_blk);
262                 dma_sync_single_for_device(mvm->trans->dev,
263                                            block->fw_paging_phys,
264                                            block->fw_paging_size,
265                                            DMA_BIDIRECTIONAL);
266
267                 IWL_DEBUG_FW(mvm,
268                              "Paging: copied %d pages in the last block %d\n",
269                              mvm->num_of_pages_in_last_blk, idx);
270         }
271
272         return 0;
273 }
274
275 void iwl_mvm_mfu_assert_dump_notif(struct iwl_mvm *mvm,
276                                    struct iwl_rx_cmd_buffer *rxb)
277 {
278         struct iwl_rx_packet *pkt = rxb_addr(rxb);
279         struct iwl_mfu_assert_dump_notif *mfu_dump_notif = (void *)pkt->data;
280         __le32 *dump_data = mfu_dump_notif->data;
281         int n_words = le32_to_cpu(mfu_dump_notif->data_size) / sizeof(__le32);
282         int i;
283
284         if (mfu_dump_notif->index_num == 0)
285                 IWL_INFO(mvm, "MFUART assert id 0x%x occurred\n",
286                          le32_to_cpu(mfu_dump_notif->assert_id));
287
288         for (i = 0; i < n_words; i++)
289                 IWL_DEBUG_INFO(mvm,
290                                "MFUART assert dump, dword %u: 0x%08x\n",
291                                le16_to_cpu(mfu_dump_notif->index_num) *
292                                n_words + i,
293                                le32_to_cpu(dump_data[i]));
294 }
295
296 static int iwl_alloc_fw_paging_mem(struct iwl_mvm *mvm,
297                                    const struct fw_img *image)
298 {
299         struct page *block;
300         dma_addr_t phys = 0;
301         int blk_idx, order, num_of_pages, size, dma_enabled;
302
303         if (mvm->fw_paging_db[0].fw_paging_block)
304                 return 0;
305
306         dma_enabled = is_device_dma_capable(mvm->trans->dev);
307
308         /* ensure BLOCK_2_EXP_SIZE is power of 2 of PAGING_BLOCK_SIZE */
309         BUILD_BUG_ON(BIT(BLOCK_2_EXP_SIZE) != PAGING_BLOCK_SIZE);
310
311         num_of_pages = image->paging_mem_size / FW_PAGING_SIZE;
312         mvm->num_of_paging_blk =
313                 DIV_ROUND_UP(num_of_pages, NUM_OF_PAGE_PER_GROUP);
314         mvm->num_of_pages_in_last_blk =
315                 num_of_pages -
316                 NUM_OF_PAGE_PER_GROUP * (mvm->num_of_paging_blk - 1);
317
318         IWL_DEBUG_FW(mvm,
319                      "Paging: allocating mem for %d paging blocks, each block holds 8 pages, last block holds %d pages\n",
320                      mvm->num_of_paging_blk,
321                      mvm->num_of_pages_in_last_blk);
322
323         /*
324          * Allocate CSS and paging blocks in dram.
325          */
326         for (blk_idx = 0; blk_idx < mvm->num_of_paging_blk + 1; blk_idx++) {
327                 /* For CSS allocate 4KB, for others PAGING_BLOCK_SIZE (32K) */
328                 size = blk_idx ? PAGING_BLOCK_SIZE : FW_PAGING_SIZE;
329                 order = get_order(size);
330                 block = alloc_pages(GFP_KERNEL, order);
331                 if (!block) {
332                         /* free all the previous pages since we failed */
333                         iwl_free_fw_paging(mvm);
334                         return -ENOMEM;
335                 }
336
337                 mvm->fw_paging_db[blk_idx].fw_paging_block = block;
338                 mvm->fw_paging_db[blk_idx].fw_paging_size = size;
339
340                 if (dma_enabled) {
341                         phys = dma_map_page(mvm->trans->dev, block, 0,
342                                             PAGE_SIZE << order,
343                                             DMA_BIDIRECTIONAL);
344                         if (dma_mapping_error(mvm->trans->dev, phys)) {
345                                 /*
346                                  * free the previous pages and the current one
347                                  * since we failed to map_page.
348                                  */
349                                 iwl_free_fw_paging(mvm);
350                                 return -ENOMEM;
351                         }
352                         mvm->fw_paging_db[blk_idx].fw_paging_phys = phys;
353                 } else {
354                         mvm->fw_paging_db[blk_idx].fw_paging_phys =
355                                 PAGING_ADDR_SIG |
356                                 blk_idx << BLOCK_2_EXP_SIZE;
357                 }
358
359                 if (!blk_idx)
360                         IWL_DEBUG_FW(mvm,
361                                      "Paging: allocated 4K(CSS) bytes (order %d) for firmware paging.\n",
362                                      order);
363                 else
364                         IWL_DEBUG_FW(mvm,
365                                      "Paging: allocated 32K bytes (order %d) for firmware paging.\n",
366                                      order);
367         }
368
369         return 0;
370 }
371
372 static int iwl_save_fw_paging(struct iwl_mvm *mvm,
373                               const struct fw_img *fw)
374 {
375         int ret;
376
377         ret = iwl_alloc_fw_paging_mem(mvm, fw);
378         if (ret)
379                 return ret;
380
381         return iwl_fill_paging_mem(mvm, fw);
382 }
383
384 /* send paging cmd to FW in case CPU2 has paging image */
385 static int iwl_send_paging_cmd(struct iwl_mvm *mvm, const struct fw_img *fw)
386 {
387         struct iwl_fw_paging_cmd paging_cmd = {
388                 .flags = cpu_to_le32(PAGING_CMD_IS_SECURED |
389                                      PAGING_CMD_IS_ENABLED |
390                                      (mvm->num_of_pages_in_last_blk <<
391                                       PAGING_CMD_NUM_OF_PAGES_IN_LAST_GRP_POS)),
392                 .block_size = cpu_to_le32(BLOCK_2_EXP_SIZE),
393                 .block_num = cpu_to_le32(mvm->num_of_paging_blk),
394         };
395         int blk_idx;
396
397         /* loop for for all paging blocks + CSS block */
398         for (blk_idx = 0; blk_idx < mvm->num_of_paging_blk + 1; blk_idx++) {
399                 dma_addr_t addr = mvm->fw_paging_db[blk_idx].fw_paging_phys;
400                 __le32 phy_addr;
401
402                 addr = addr >> PAGE_2_EXP_SIZE;
403                 phy_addr = cpu_to_le32(addr);
404                 paging_cmd.device_phy_addr[blk_idx] = phy_addr;
405         }
406
407         return iwl_mvm_send_cmd_pdu(mvm, iwl_cmd_id(FW_PAGING_BLOCK_CMD,
408                                                     IWL_ALWAYS_LONG_GROUP, 0),
409                                     0, sizeof(paging_cmd), &paging_cmd);
410 }
411
412 /*
413  * Send paging item cmd to FW in case CPU2 has paging image
414  */
415 static int iwl_trans_get_paging_item(struct iwl_mvm *mvm)
416 {
417         int ret;
418         struct iwl_fw_get_item_cmd fw_get_item_cmd = {
419                 .item_id = cpu_to_le32(IWL_FW_ITEM_ID_PAGING),
420         };
421
422         struct iwl_fw_get_item_resp *item_resp;
423         struct iwl_host_cmd cmd = {
424                 .id = iwl_cmd_id(FW_GET_ITEM_CMD, IWL_ALWAYS_LONG_GROUP, 0),
425                 .flags = CMD_WANT_SKB | CMD_SEND_IN_RFKILL,
426                 .data = { &fw_get_item_cmd, },
427         };
428
429         cmd.len[0] = sizeof(struct iwl_fw_get_item_cmd);
430
431         ret = iwl_mvm_send_cmd(mvm, &cmd);
432         if (ret) {
433                 IWL_ERR(mvm,
434                         "Paging: Failed to send FW_GET_ITEM_CMD cmd (err = %d)\n",
435                         ret);
436                 return ret;
437         }
438
439         item_resp = (void *)((struct iwl_rx_packet *)cmd.resp_pkt)->data;
440         if (item_resp->item_id != cpu_to_le32(IWL_FW_ITEM_ID_PAGING)) {
441                 IWL_ERR(mvm,
442                         "Paging: got wrong item in FW_GET_ITEM_CMD resp (item_id = %u)\n",
443                         le32_to_cpu(item_resp->item_id));
444                 ret = -EIO;
445                 goto exit;
446         }
447
448         /* Add an extra page for headers */
449         mvm->trans->paging_download_buf = kzalloc(PAGING_BLOCK_SIZE +
450                                                   FW_PAGING_SIZE,
451                                                   GFP_KERNEL);
452         if (!mvm->trans->paging_download_buf) {
453                 ret = -ENOMEM;
454                 goto exit;
455         }
456         mvm->trans->paging_req_addr = le32_to_cpu(item_resp->item_val);
457         mvm->trans->paging_db = mvm->fw_paging_db;
458         IWL_DEBUG_FW(mvm,
459                      "Paging: got paging request address (paging_req_addr 0x%08x)\n",
460                      mvm->trans->paging_req_addr);
461
462 exit:
463         iwl_free_resp(&cmd);
464
465         return ret;
466 }
467
468 static bool iwl_alive_fn(struct iwl_notif_wait_data *notif_wait,
469                          struct iwl_rx_packet *pkt, void *data)
470 {
471         struct iwl_mvm *mvm =
472                 container_of(notif_wait, struct iwl_mvm, notif_wait);
473         struct iwl_mvm_alive_data *alive_data = data;
474         struct mvm_alive_resp_v3 *palive3;
475         struct mvm_alive_resp *palive;
476         struct iwl_umac_alive *umac;
477         struct iwl_lmac_alive *lmac1;
478         struct iwl_lmac_alive *lmac2 = NULL;
479         u16 status;
480
481         if (iwl_rx_packet_payload_len(pkt) == sizeof(*palive)) {
482                 palive = (void *)pkt->data;
483                 umac = &palive->umac_data;
484                 lmac1 = &palive->lmac_data[0];
485                 lmac2 = &palive->lmac_data[1];
486                 status = le16_to_cpu(palive->status);
487         } else {
488                 palive3 = (void *)pkt->data;
489                 umac = &palive3->umac_data;
490                 lmac1 = &palive3->lmac_data;
491                 status = le16_to_cpu(palive3->status);
492         }
493
494         mvm->error_event_table[0] = le32_to_cpu(lmac1->error_event_table_ptr);
495         if (lmac2)
496                 mvm->error_event_table[1] =
497                         le32_to_cpu(lmac2->error_event_table_ptr);
498         mvm->log_event_table = le32_to_cpu(lmac1->log_event_table_ptr);
499         mvm->sf_space.addr = le32_to_cpu(lmac1->st_fwrd_addr);
500         mvm->sf_space.size = le32_to_cpu(lmac1->st_fwrd_size);
501
502         mvm->umac_error_event_table = le32_to_cpu(umac->error_info_addr);
503
504         alive_data->scd_base_addr = le32_to_cpu(lmac1->scd_base_ptr);
505         alive_data->valid = status == IWL_ALIVE_STATUS_OK;
506         if (mvm->umac_error_event_table)
507                 mvm->support_umac_log = true;
508
509         IWL_DEBUG_FW(mvm,
510                      "Alive ucode status 0x%04x revision 0x%01X 0x%01X\n",
511                      status, lmac1->ver_type, lmac1->ver_subtype);
512
513         if (lmac2)
514                 IWL_DEBUG_FW(mvm, "Alive ucode CDB\n");
515
516         IWL_DEBUG_FW(mvm,
517                      "UMAC version: Major - 0x%x, Minor - 0x%x\n",
518                      le32_to_cpu(umac->umac_major),
519                      le32_to_cpu(umac->umac_minor));
520
521         return true;
522 }
523
524 static bool iwl_wait_init_complete(struct iwl_notif_wait_data *notif_wait,
525                                    struct iwl_rx_packet *pkt, void *data)
526 {
527         WARN_ON(pkt->hdr.cmd != INIT_COMPLETE_NOTIF);
528
529         return true;
530 }
531
532 static bool iwl_wait_phy_db_entry(struct iwl_notif_wait_data *notif_wait,
533                                   struct iwl_rx_packet *pkt, void *data)
534 {
535         struct iwl_phy_db *phy_db = data;
536
537         if (pkt->hdr.cmd != CALIB_RES_NOTIF_PHY_DB) {
538                 WARN_ON(pkt->hdr.cmd != INIT_COMPLETE_NOTIF);
539                 return true;
540         }
541
542         WARN_ON(iwl_phy_db_set_section(phy_db, pkt));
543
544         return false;
545 }
546
547 static int iwl_mvm_init_paging(struct iwl_mvm *mvm)
548 {
549         const struct fw_img *fw = &mvm->fw->img[mvm->cur_ucode];
550         int ret;
551
552         /*
553          * Configure and operate fw paging mechanism.
554          * The driver configures the paging flow only once.
555          * The CPU2 paging image is included in the IWL_UCODE_INIT image.
556          */
557         if (!fw->paging_mem_size)
558                 return 0;
559
560         /*
561          * When dma is not enabled, the driver needs to copy / write
562          * the downloaded / uploaded page to / from the smem.
563          * This gets the location of the place were the pages are
564          * stored.
565          */
566         if (!is_device_dma_capable(mvm->trans->dev)) {
567                 ret = iwl_trans_get_paging_item(mvm);
568                 if (ret) {
569                         IWL_ERR(mvm, "failed to get FW paging item\n");
570                         return ret;
571                 }
572         }
573
574         ret = iwl_save_fw_paging(mvm, fw);
575         if (ret) {
576                 IWL_ERR(mvm, "failed to save the FW paging image\n");
577                 return ret;
578         }
579
580         ret = iwl_send_paging_cmd(mvm, fw);
581         if (ret) {
582                 IWL_ERR(mvm, "failed to send the paging cmd\n");
583                 iwl_free_fw_paging(mvm);
584                 return ret;
585         }
586
587         return 0;
588 }
589 static int iwl_mvm_load_ucode_wait_alive(struct iwl_mvm *mvm,
590                                          enum iwl_ucode_type ucode_type)
591 {
592         struct iwl_notification_wait alive_wait;
593         struct iwl_mvm_alive_data alive_data;
594         const struct fw_img *fw;
595         int ret, i;
596         enum iwl_ucode_type old_type = mvm->cur_ucode;
597         static const u16 alive_cmd[] = { MVM_ALIVE };
598         struct iwl_sf_region st_fwrd_space;
599
600         if (ucode_type == IWL_UCODE_REGULAR &&
601             iwl_fw_dbg_conf_usniffer(mvm->fw, FW_DBG_START_FROM_ALIVE) &&
602             !(fw_has_capa(&mvm->fw->ucode_capa,
603                           IWL_UCODE_TLV_CAPA_USNIFFER_UNIFIED)))
604                 fw = iwl_get_ucode_image(mvm->fw, IWL_UCODE_REGULAR_USNIFFER);
605         else
606                 fw = iwl_get_ucode_image(mvm->fw, ucode_type);
607         if (WARN_ON(!fw))
608                 return -EINVAL;
609         mvm->cur_ucode = ucode_type;
610         clear_bit(IWL_MVM_STATUS_FIRMWARE_RUNNING, &mvm->status);
611
612         iwl_init_notification_wait(&mvm->notif_wait, &alive_wait,
613                                    alive_cmd, ARRAY_SIZE(alive_cmd),
614                                    iwl_alive_fn, &alive_data);
615
616         ret = iwl_trans_start_fw(mvm->trans, fw, ucode_type == IWL_UCODE_INIT);
617         if (ret) {
618                 mvm->cur_ucode = old_type;
619                 iwl_remove_notification(&mvm->notif_wait, &alive_wait);
620                 return ret;
621         }
622
623         /*
624          * Some things may run in the background now, but we
625          * just wait for the ALIVE notification here.
626          */
627         ret = iwl_wait_notification(&mvm->notif_wait, &alive_wait,
628                                     MVM_UCODE_ALIVE_TIMEOUT);
629         if (ret) {
630                 struct iwl_trans *trans = mvm->trans;
631
632                 if (trans->cfg->device_family == IWL_DEVICE_FAMILY_A000)
633                         IWL_ERR(mvm,
634                                 "SecBoot CPU1 Status: 0x%x, CPU2 Status: 0x%x\n",
635                                 iwl_read_prph(trans, UMAG_SB_CPU_1_STATUS),
636                                 iwl_read_prph(trans, UMAG_SB_CPU_2_STATUS));
637                 else if (trans->cfg->device_family >= IWL_DEVICE_FAMILY_8000)
638                         IWL_ERR(mvm,
639                                 "SecBoot CPU1 Status: 0x%x, CPU2 Status: 0x%x\n",
640                                 iwl_read_prph(trans, SB_CPU_1_STATUS),
641                                 iwl_read_prph(trans, SB_CPU_2_STATUS));
642                 mvm->cur_ucode = old_type;
643                 return ret;
644         }
645
646         if (!alive_data.valid) {
647                 IWL_ERR(mvm, "Loaded ucode is not valid!\n");
648                 mvm->cur_ucode = old_type;
649                 return -EIO;
650         }
651
652         /*
653          * update the sdio allocation according to the pointer we get in the
654          * alive notification.
655          */
656         st_fwrd_space.addr = mvm->sf_space.addr;
657         st_fwrd_space.size = mvm->sf_space.size;
658         ret = iwl_trans_update_sf(mvm->trans, &st_fwrd_space);
659         if (ret) {
660                 IWL_ERR(mvm, "Failed to update SF size. ret %d\n", ret);
661                 return ret;
662         }
663
664         iwl_trans_fw_alive(mvm->trans, alive_data.scd_base_addr);
665
666         /*
667          * Note: all the queues are enabled as part of the interface
668          * initialization, but in firmware restart scenarios they
669          * could be stopped, so wake them up. In firmware restart,
670          * mac80211 will have the queues stopped as well until the
671          * reconfiguration completes. During normal startup, they
672          * will be empty.
673          */
674
675         memset(&mvm->queue_info, 0, sizeof(mvm->queue_info));
676         if (iwl_mvm_is_dqa_supported(mvm))
677                 mvm->queue_info[IWL_MVM_DQA_CMD_QUEUE].hw_queue_refcount = 1;
678         else
679                 mvm->queue_info[IWL_MVM_CMD_QUEUE].hw_queue_refcount = 1;
680
681         for (i = 0; i < IEEE80211_MAX_QUEUES; i++)
682                 atomic_set(&mvm->mac80211_queue_stop_count[i], 0);
683
684         set_bit(IWL_MVM_STATUS_FIRMWARE_RUNNING, &mvm->status);
685
686         return 0;
687 }
688
689 static int iwl_run_unified_mvm_ucode(struct iwl_mvm *mvm, bool read_nvm)
690 {
691         struct iwl_notification_wait init_wait;
692         struct iwl_nvm_access_complete_cmd nvm_complete = {};
693         struct iwl_init_extended_cfg_cmd init_cfg = {
694                 .init_flags = cpu_to_le32(BIT(IWL_INIT_NVM)),
695         };
696         static const u16 init_complete[] = {
697                 INIT_COMPLETE_NOTIF,
698         };
699         int ret;
700
701         lockdep_assert_held(&mvm->mutex);
702
703         iwl_init_notification_wait(&mvm->notif_wait,
704                                    &init_wait,
705                                    init_complete,
706                                    ARRAY_SIZE(init_complete),
707                                    iwl_wait_init_complete,
708                                    NULL);
709
710         /* Will also start the device */
711         ret = iwl_mvm_load_ucode_wait_alive(mvm, IWL_UCODE_REGULAR);
712         if (ret) {
713                 IWL_ERR(mvm, "Failed to start RT ucode: %d\n", ret);
714                 goto error;
715         }
716
717         /* Send init config command to mark that we are sending NVM access
718          * commands
719          */
720         ret = iwl_mvm_send_cmd_pdu(mvm, WIDE_ID(SYSTEM_GROUP,
721                                                 INIT_EXTENDED_CFG_CMD), 0,
722                                    sizeof(init_cfg), &init_cfg);
723         if (ret) {
724                 IWL_ERR(mvm, "Failed to run init config command: %d\n",
725                         ret);
726                 goto error;
727         }
728
729         /* Load NVM to NIC if needed */
730         if (mvm->nvm_file_name) {
731                 iwl_mvm_read_external_nvm(mvm);
732                 iwl_mvm_load_nvm_to_nic(mvm);
733         }
734
735         if (IWL_MVM_PARSE_NVM && read_nvm) {
736                 ret = iwl_nvm_init(mvm, true);
737                 if (ret) {
738                         IWL_ERR(mvm, "Failed to read NVM: %d\n", ret);
739                         goto error;
740                 }
741         }
742
743         ret = iwl_mvm_send_cmd_pdu(mvm, WIDE_ID(REGULATORY_AND_NVM_GROUP,
744                                                 NVM_ACCESS_COMPLETE), 0,
745                                    sizeof(nvm_complete), &nvm_complete);
746         if (ret) {
747                 IWL_ERR(mvm, "Failed to run complete NVM access: %d\n",
748                         ret);
749                 goto error;
750         }
751
752         /* We wait for the INIT complete notification */
753         ret = iwl_wait_notification(&mvm->notif_wait, &init_wait,
754                                     MVM_UCODE_ALIVE_TIMEOUT);
755         if (ret)
756                 return ret;
757
758         /* Read the NVM only at driver load time, no need to do this twice */
759         if (!IWL_MVM_PARSE_NVM && read_nvm) {
760                 ret = iwl_mvm_nvm_get_from_fw(mvm);
761                 if (ret) {
762                         IWL_ERR(mvm, "Failed to read NVM: %d\n", ret);
763                         return ret;
764                 }
765         }
766
767         return 0;
768
769 error:
770         iwl_remove_notification(&mvm->notif_wait, &init_wait);
771         return ret;
772 }
773
774 static int iwl_send_phy_cfg_cmd(struct iwl_mvm *mvm)
775 {
776         struct iwl_phy_cfg_cmd phy_cfg_cmd;
777         enum iwl_ucode_type ucode_type = mvm->cur_ucode;
778
779         /* Set parameters */
780         phy_cfg_cmd.phy_cfg = cpu_to_le32(iwl_mvm_get_phy_config(mvm));
781         phy_cfg_cmd.calib_control.event_trigger =
782                 mvm->fw->default_calib[ucode_type].event_trigger;
783         phy_cfg_cmd.calib_control.flow_trigger =
784                 mvm->fw->default_calib[ucode_type].flow_trigger;
785
786         IWL_DEBUG_INFO(mvm, "Sending Phy CFG command: 0x%x\n",
787                        phy_cfg_cmd.phy_cfg);
788
789         return iwl_mvm_send_cmd_pdu(mvm, PHY_CONFIGURATION_CMD, 0,
790                                     sizeof(phy_cfg_cmd), &phy_cfg_cmd);
791 }
792
793 int iwl_run_init_mvm_ucode(struct iwl_mvm *mvm, bool read_nvm)
794 {
795         struct iwl_notification_wait calib_wait;
796         static const u16 init_complete[] = {
797                 INIT_COMPLETE_NOTIF,
798                 CALIB_RES_NOTIF_PHY_DB
799         };
800         int ret;
801
802         if (iwl_mvm_has_new_tx_api(mvm))
803                 return iwl_run_unified_mvm_ucode(mvm, true);
804
805         lockdep_assert_held(&mvm->mutex);
806
807         if (WARN_ON_ONCE(mvm->calibrating))
808                 return 0;
809
810         iwl_init_notification_wait(&mvm->notif_wait,
811                                    &calib_wait,
812                                    init_complete,
813                                    ARRAY_SIZE(init_complete),
814                                    iwl_wait_phy_db_entry,
815                                    mvm->phy_db);
816
817         /* Will also start the device */
818         ret = iwl_mvm_load_ucode_wait_alive(mvm, IWL_UCODE_INIT);
819         if (ret) {
820                 IWL_ERR(mvm, "Failed to start INIT ucode: %d\n", ret);
821                 goto error;
822         }
823
824         if (mvm->cfg->device_family < IWL_DEVICE_FAMILY_8000) {
825                 ret = iwl_mvm_send_bt_init_conf(mvm);
826                 if (ret)
827                         goto error;
828         }
829
830         /* Read the NVM only at driver load time, no need to do this twice */
831         if (read_nvm) {
832                 /* Read nvm */
833                 ret = iwl_nvm_init(mvm, true);
834                 if (ret) {
835                         IWL_ERR(mvm, "Failed to read NVM: %d\n", ret);
836                         goto error;
837                 }
838         }
839
840         /* In case we read the NVM from external file, load it to the NIC */
841         if (mvm->nvm_file_name)
842                 iwl_mvm_load_nvm_to_nic(mvm);
843
844         ret = iwl_nvm_check_version(mvm->nvm_data, mvm->trans);
845         WARN_ON(ret);
846
847         /*
848          * abort after reading the nvm in case RF Kill is on, we will complete
849          * the init seq later when RF kill will switch to off
850          */
851         if (iwl_mvm_is_radio_hw_killed(mvm)) {
852                 IWL_DEBUG_RF_KILL(mvm,
853                                   "jump over all phy activities due to RF kill\n");
854                 iwl_remove_notification(&mvm->notif_wait, &calib_wait);
855                 ret = 1;
856                 goto out;
857         }
858
859         mvm->calibrating = true;
860
861         /* Send TX valid antennas before triggering calibrations */
862         ret = iwl_send_tx_ant_cfg(mvm, iwl_mvm_get_valid_tx_ant(mvm));
863         if (ret)
864                 goto error;
865
866         /*
867          * Send phy configurations command to init uCode
868          * to start the 16.0 uCode init image internal calibrations.
869          */
870         ret = iwl_send_phy_cfg_cmd(mvm);
871         if (ret) {
872                 IWL_ERR(mvm, "Failed to run INIT calibrations: %d\n",
873                         ret);
874                 goto error;
875         }
876
877         /*
878          * Some things may run in the background now, but we
879          * just wait for the calibration complete notification.
880          */
881         ret = iwl_wait_notification(&mvm->notif_wait, &calib_wait,
882                         MVM_UCODE_CALIB_TIMEOUT);
883
884         if (ret && iwl_mvm_is_radio_hw_killed(mvm)) {
885                 IWL_DEBUG_RF_KILL(mvm, "RFKILL while calibrating.\n");
886                 ret = 1;
887         }
888         goto out;
889
890 error:
891         iwl_remove_notification(&mvm->notif_wait, &calib_wait);
892 out:
893         mvm->calibrating = false;
894         if (iwlmvm_mod_params.init_dbg && !mvm->nvm_data) {
895                 /* we want to debug INIT and we have no NVM - fake */
896                 mvm->nvm_data = kzalloc(sizeof(struct iwl_nvm_data) +
897                                         sizeof(struct ieee80211_channel) +
898                                         sizeof(struct ieee80211_rate),
899                                         GFP_KERNEL);
900                 if (!mvm->nvm_data)
901                         return -ENOMEM;
902                 mvm->nvm_data->bands[0].channels = mvm->nvm_data->channels;
903                 mvm->nvm_data->bands[0].n_channels = 1;
904                 mvm->nvm_data->bands[0].n_bitrates = 1;
905                 mvm->nvm_data->bands[0].bitrates =
906                         (void *)mvm->nvm_data->channels + 1;
907                 mvm->nvm_data->bands[0].bitrates->hw_value = 10;
908         }
909
910         return ret;
911 }
912
913 static void iwl_mvm_parse_shared_mem_a000(struct iwl_mvm *mvm,
914                                           struct iwl_rx_packet *pkt)
915 {
916         struct iwl_shared_mem_cfg *mem_cfg = (void *)pkt->data;
917         int i, lmac;
918         int lmac_num = le32_to_cpu(mem_cfg->lmac_num);
919
920         if (WARN_ON(lmac_num > ARRAY_SIZE(mem_cfg->lmac_smem)))
921                 return;
922
923         mvm->smem_cfg.num_lmacs = lmac_num;
924         mvm->smem_cfg.num_txfifo_entries =
925                 ARRAY_SIZE(mem_cfg->lmac_smem[0].txfifo_size);
926         mvm->smem_cfg.rxfifo2_size = le32_to_cpu(mem_cfg->rxfifo2_size);
927
928         for (lmac = 0; lmac < lmac_num; lmac++) {
929                 struct iwl_shared_mem_lmac_cfg *lmac_cfg =
930                         &mem_cfg->lmac_smem[lmac];
931
932                 for (i = 0; i < ARRAY_SIZE(lmac_cfg->txfifo_size); i++)
933                         mvm->smem_cfg.lmac[lmac].txfifo_size[i] =
934                                 le32_to_cpu(lmac_cfg->txfifo_size[i]);
935                 mvm->smem_cfg.lmac[lmac].rxfifo1_size =
936                         le32_to_cpu(lmac_cfg->rxfifo1_size);
937         }
938 }
939
940 static void iwl_mvm_parse_shared_mem(struct iwl_mvm *mvm,
941                                      struct iwl_rx_packet *pkt)
942 {
943         struct iwl_shared_mem_cfg_v2 *mem_cfg = (void *)pkt->data;
944         int i;
945
946         mvm->smem_cfg.num_lmacs = 1;
947
948         mvm->smem_cfg.num_txfifo_entries = ARRAY_SIZE(mem_cfg->txfifo_size);
949         for (i = 0; i < ARRAY_SIZE(mem_cfg->txfifo_size); i++)
950                 mvm->smem_cfg.lmac[0].txfifo_size[i] =
951                         le32_to_cpu(mem_cfg->txfifo_size[i]);
952
953         mvm->smem_cfg.lmac[0].rxfifo1_size =
954                 le32_to_cpu(mem_cfg->rxfifo_size[0]);
955         mvm->smem_cfg.rxfifo2_size = le32_to_cpu(mem_cfg->rxfifo_size[1]);
956
957         /* new API has more data, from rxfifo_addr field and on */
958         if (fw_has_capa(&mvm->fw->ucode_capa,
959                         IWL_UCODE_TLV_CAPA_EXTEND_SHARED_MEM_CFG)) {
960                 BUILD_BUG_ON(sizeof(mvm->smem_cfg.internal_txfifo_size) !=
961                              sizeof(mem_cfg->internal_txfifo_size));
962
963                 for (i = 0;
964                      i < ARRAY_SIZE(mvm->smem_cfg.internal_txfifo_size);
965                      i++)
966                         mvm->smem_cfg.internal_txfifo_size[i] =
967                                 le32_to_cpu(mem_cfg->internal_txfifo_size[i]);
968         }
969 }
970
971 static void iwl_mvm_get_shared_mem_conf(struct iwl_mvm *mvm)
972 {
973         struct iwl_host_cmd cmd = {
974                 .flags = CMD_WANT_SKB,
975                 .data = { NULL, },
976                 .len = { 0, },
977         };
978         struct iwl_rx_packet *pkt;
979
980         lockdep_assert_held(&mvm->mutex);
981
982         if (fw_has_capa(&mvm->fw->ucode_capa,
983                         IWL_UCODE_TLV_CAPA_EXTEND_SHARED_MEM_CFG))
984                 cmd.id = iwl_cmd_id(SHARED_MEM_CFG_CMD, SYSTEM_GROUP, 0);
985         else
986                 cmd.id = SHARED_MEM_CFG;
987
988         if (WARN_ON(iwl_mvm_send_cmd(mvm, &cmd)))
989                 return;
990
991         pkt = cmd.resp_pkt;
992         if (iwl_mvm_has_new_tx_api(mvm))
993                 iwl_mvm_parse_shared_mem_a000(mvm, pkt);
994         else
995                 iwl_mvm_parse_shared_mem(mvm, pkt);
996
997         IWL_DEBUG_INFO(mvm, "SHARED MEM CFG: got memory offsets/sizes\n");
998
999         iwl_free_resp(&cmd);
1000 }
1001
1002 static int iwl_mvm_config_ltr(struct iwl_mvm *mvm)
1003 {
1004         struct iwl_ltr_config_cmd cmd = {
1005                 .flags = cpu_to_le32(LTR_CFG_FLAG_FEATURE_ENABLE),
1006         };
1007
1008         if (!mvm->trans->ltr_enabled)
1009                 return 0;
1010
1011         return iwl_mvm_send_cmd_pdu(mvm, LTR_CONFIG, 0,
1012                                     sizeof(cmd), &cmd);
1013 }
1014
1015 #ifdef CONFIG_ACPI
1016 #define ACPI_WRDS_METHOD                "WRDS"
1017 #define ACPI_EWRD_METHOD                "EWRD"
1018 #define ACPI_WGDS_METHOD                "WGDS"
1019 #define ACPI_WIFI_DOMAIN                (0x07)
1020 #define ACPI_WRDS_WIFI_DATA_SIZE        (IWL_MVM_SAR_TABLE_SIZE + 2)
1021 #define ACPI_EWRD_WIFI_DATA_SIZE        ((IWL_MVM_SAR_PROFILE_NUM - 1) * \
1022                                          IWL_MVM_SAR_TABLE_SIZE + 3)
1023 #define ACPI_WGDS_WIFI_DATA_SIZE        18
1024 #define ACPI_WGDS_NUM_BANDS             2
1025 #define ACPI_WGDS_TABLE_SIZE            3
1026
1027 static int iwl_mvm_sar_set_profile(struct iwl_mvm *mvm,
1028                                    union acpi_object *table,
1029                                    struct iwl_mvm_sar_profile *profile,
1030                                    bool enabled)
1031 {
1032         int i;
1033
1034         profile->enabled = enabled;
1035
1036         for (i = 0; i < IWL_MVM_SAR_TABLE_SIZE; i++) {
1037                 if ((table[i].type != ACPI_TYPE_INTEGER) ||
1038                     (table[i].integer.value > U8_MAX))
1039                         return -EINVAL;
1040
1041                 profile->table[i] = table[i].integer.value;
1042         }
1043
1044         return 0;
1045 }
1046
1047 static union acpi_object *iwl_mvm_sar_find_wifi_pkg(struct iwl_mvm *mvm,
1048                                                     union acpi_object *data,
1049                                                     int data_size)
1050 {
1051         int i;
1052         union acpi_object *wifi_pkg;
1053
1054         /*
1055          * We need at least two packages, one for the revision and one
1056          * for the data itself.  Also check that the revision is valid
1057          * (i.e. it is an integer set to 0).
1058          */
1059         if (data->type != ACPI_TYPE_PACKAGE ||
1060             data->package.count < 2 ||
1061             data->package.elements[0].type != ACPI_TYPE_INTEGER ||
1062             data->package.elements[0].integer.value != 0) {
1063                 IWL_DEBUG_RADIO(mvm, "Unsupported packages structure\n");
1064                 return ERR_PTR(-EINVAL);
1065         }
1066
1067         /* loop through all the packages to find the one for WiFi */
1068         for (i = 1; i < data->package.count; i++) {
1069                 union acpi_object *domain;
1070
1071                 wifi_pkg = &data->package.elements[i];
1072
1073                 /* Skip anything that is not a package with the right
1074                  * amount of elements (i.e. domain_type,
1075                  * enabled/disabled plus the actual data size.
1076                  */
1077                 if (wifi_pkg->type != ACPI_TYPE_PACKAGE ||
1078                     wifi_pkg->package.count != data_size)
1079                         continue;
1080
1081                 domain = &wifi_pkg->package.elements[0];
1082                 if (domain->type == ACPI_TYPE_INTEGER &&
1083                     domain->integer.value == ACPI_WIFI_DOMAIN)
1084                         break;
1085
1086                 wifi_pkg = NULL;
1087         }
1088
1089         if (!wifi_pkg)
1090                 return ERR_PTR(-ENOENT);
1091
1092         return wifi_pkg;
1093 }
1094
1095 static int iwl_mvm_sar_get_wrds_table(struct iwl_mvm *mvm)
1096 {
1097         union acpi_object *wifi_pkg, *table;
1098         acpi_handle root_handle;
1099         acpi_handle handle;
1100         struct acpi_buffer wrds = {ACPI_ALLOCATE_BUFFER, NULL};
1101         acpi_status status;
1102         bool enabled;
1103         int ret;
1104
1105         root_handle = ACPI_HANDLE(mvm->dev);
1106         if (!root_handle) {
1107                 IWL_DEBUG_RADIO(mvm,
1108                                 "Could not retrieve root port ACPI handle\n");
1109                 return -ENOENT;
1110         }
1111
1112         /* Get the method's handle */
1113         status = acpi_get_handle(root_handle, (acpi_string)ACPI_WRDS_METHOD,
1114                                  &handle);
1115         if (ACPI_FAILURE(status)) {
1116                 IWL_DEBUG_RADIO(mvm, "WRDS method not found\n");
1117                 return -ENOENT;
1118         }
1119
1120         /* Call WRDS with no arguments */
1121         status = acpi_evaluate_object(handle, NULL, NULL, &wrds);
1122         if (ACPI_FAILURE(status)) {
1123                 IWL_DEBUG_RADIO(mvm, "WRDS invocation failed (0x%x)\n", status);
1124                 return -ENOENT;
1125         }
1126
1127         wifi_pkg = iwl_mvm_sar_find_wifi_pkg(mvm, wrds.pointer,
1128                                              ACPI_WRDS_WIFI_DATA_SIZE);
1129         if (IS_ERR(wifi_pkg)) {
1130                 ret = PTR_ERR(wifi_pkg);
1131                 goto out_free;
1132         }
1133
1134         if (wifi_pkg->package.elements[1].type != ACPI_TYPE_INTEGER) {
1135                 ret = -EINVAL;
1136                 goto out_free;
1137         }
1138
1139         enabled = !!(wifi_pkg->package.elements[1].integer.value);
1140
1141         /* position of the actual table */
1142         table = &wifi_pkg->package.elements[2];
1143
1144         /* The profile from WRDS is officially profile 1, but goes
1145          * into sar_profiles[0] (because we don't have a profile 0).
1146          */
1147         ret = iwl_mvm_sar_set_profile(mvm, table, &mvm->sar_profiles[0],
1148                                       enabled);
1149
1150 out_free:
1151         kfree(wrds.pointer);
1152         return ret;
1153 }
1154
1155 static int iwl_mvm_sar_get_ewrd_table(struct iwl_mvm *mvm)
1156 {
1157         union acpi_object *wifi_pkg;
1158         acpi_handle root_handle;
1159         acpi_handle handle;
1160         struct acpi_buffer ewrd = {ACPI_ALLOCATE_BUFFER, NULL};
1161         acpi_status status;
1162         bool enabled;
1163         int i, n_profiles, ret;
1164
1165         root_handle = ACPI_HANDLE(mvm->dev);
1166         if (!root_handle) {
1167                 IWL_DEBUG_RADIO(mvm,
1168                                 "Could not retrieve root port ACPI handle\n");
1169                 return -ENOENT;
1170         }
1171
1172         /* Get the method's handle */
1173         status = acpi_get_handle(root_handle, (acpi_string)ACPI_EWRD_METHOD,
1174                                  &handle);
1175         if (ACPI_FAILURE(status)) {
1176                 IWL_DEBUG_RADIO(mvm, "EWRD method not found\n");
1177                 return -ENOENT;
1178         }
1179
1180         /* Call EWRD with no arguments */
1181         status = acpi_evaluate_object(handle, NULL, NULL, &ewrd);
1182         if (ACPI_FAILURE(status)) {
1183                 IWL_DEBUG_RADIO(mvm, "EWRD invocation failed (0x%x)\n", status);
1184                 return -ENOENT;
1185         }
1186
1187         wifi_pkg = iwl_mvm_sar_find_wifi_pkg(mvm, ewrd.pointer,
1188                                              ACPI_EWRD_WIFI_DATA_SIZE);
1189         if (IS_ERR(wifi_pkg)) {
1190                 ret = PTR_ERR(wifi_pkg);
1191                 goto out_free;
1192         }
1193
1194         if ((wifi_pkg->package.elements[1].type != ACPI_TYPE_INTEGER) ||
1195             (wifi_pkg->package.elements[2].type != ACPI_TYPE_INTEGER)) {
1196                 ret = -EINVAL;
1197                 goto out_free;
1198         }
1199
1200         enabled = !!(wifi_pkg->package.elements[1].integer.value);
1201         n_profiles = wifi_pkg->package.elements[2].integer.value;
1202
1203         /* in case of BIOS bug */
1204         if (n_profiles <= 0) {
1205                 ret = -EINVAL;
1206                 goto out_free;
1207         }
1208
1209         for (i = 0; i < n_profiles; i++) {
1210                 /* the tables start at element 3 */
1211                 static int pos = 3;
1212
1213                 /* The EWRD profiles officially go from 2 to 4, but we
1214                  * save them in sar_profiles[1-3] (because we don't
1215                  * have profile 0).  So in the array we start from 1.
1216                  */
1217                 ret = iwl_mvm_sar_set_profile(mvm,
1218                                               &wifi_pkg->package.elements[pos],
1219                                               &mvm->sar_profiles[i + 1],
1220                                               enabled);
1221                 if (ret < 0)
1222                         break;
1223
1224                 /* go to the next table */
1225                 pos += IWL_MVM_SAR_TABLE_SIZE;
1226         }
1227
1228 out_free:
1229         kfree(ewrd.pointer);
1230         return ret;
1231 }
1232
1233 static int iwl_mvm_sar_get_wgds_table(struct iwl_mvm *mvm)
1234 {
1235         union acpi_object *wifi_pkg;
1236         acpi_handle root_handle;
1237         acpi_handle handle;
1238         struct acpi_buffer wgds = {ACPI_ALLOCATE_BUFFER, NULL};
1239         acpi_status status;
1240         int i, j, ret;
1241         int idx = 1;
1242
1243         root_handle = ACPI_HANDLE(mvm->dev);
1244         if (!root_handle) {
1245                 IWL_DEBUG_RADIO(mvm,
1246                                 "Could not retrieve root port ACPI handle\n");
1247                 return -ENOENT;
1248         }
1249
1250         /* Get the method's handle */
1251         status = acpi_get_handle(root_handle, (acpi_string)ACPI_WGDS_METHOD,
1252                                  &handle);
1253         if (ACPI_FAILURE(status)) {
1254                 IWL_DEBUG_RADIO(mvm, "WGDS method not found\n");
1255                 return -ENOENT;
1256         }
1257
1258         /* Call WGDS with no arguments */
1259         status = acpi_evaluate_object(handle, NULL, NULL, &wgds);
1260         if (ACPI_FAILURE(status)) {
1261                 IWL_DEBUG_RADIO(mvm, "WGDS invocation failed (0x%x)\n", status);
1262                 return -ENOENT;
1263         }
1264
1265         wifi_pkg = iwl_mvm_sar_find_wifi_pkg(mvm, wgds.pointer,
1266                                              ACPI_WGDS_WIFI_DATA_SIZE);
1267         if (IS_ERR(wifi_pkg)) {
1268                 ret = PTR_ERR(wifi_pkg);
1269                 goto out_free;
1270         }
1271
1272         for (i = 0; i < IWL_NUM_GEO_PROFILES; i++) {
1273                 for (j = 0; j < IWL_MVM_GEO_TABLE_SIZE; j++) {
1274                         union acpi_object *entry;
1275
1276                         entry = &wifi_pkg->package.elements[idx++];
1277                         if ((entry->type != ACPI_TYPE_INTEGER) ||
1278                             (entry->integer.value > U8_MAX)) {
1279                                 ret = -EINVAL;
1280                                 goto out_free;
1281                         }
1282
1283                         mvm->geo_profiles[i].values[j] = entry->integer.value;
1284                 }
1285         }
1286         ret = 0;
1287 out_free:
1288         kfree(wgds.pointer);
1289         return ret;
1290 }
1291
1292 int iwl_mvm_sar_select_profile(struct iwl_mvm *mvm, int prof_a, int prof_b)
1293 {
1294         struct iwl_dev_tx_power_cmd cmd = {
1295                 .v3.set_mode = cpu_to_le32(IWL_TX_POWER_MODE_SET_CHAINS),
1296         };
1297         int i, j, idx;
1298         int profs[IWL_NUM_CHAIN_LIMITS] = { prof_a, prof_b };
1299         int len = sizeof(cmd);
1300
1301         BUILD_BUG_ON(IWL_NUM_CHAIN_LIMITS < 2);
1302         BUILD_BUG_ON(IWL_NUM_CHAIN_LIMITS * IWL_NUM_SUB_BANDS !=
1303                      IWL_MVM_SAR_TABLE_SIZE);
1304
1305         if (!fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_TX_POWER_ACK))
1306                 len = sizeof(cmd.v3);
1307
1308         for (i = 0; i < IWL_NUM_CHAIN_LIMITS; i++) {
1309                 struct iwl_mvm_sar_profile *prof;
1310
1311                 /* don't allow SAR to be disabled (profile 0 means disable) */
1312                 if (profs[i] == 0)
1313                         return -EPERM;
1314
1315                 /* we are off by one, so allow up to IWL_MVM_SAR_PROFILE_NUM */
1316                 if (profs[i] > IWL_MVM_SAR_PROFILE_NUM)
1317                         return -EINVAL;
1318
1319                 /* profiles go from 1 to 4, so decrement to access the array */
1320                 prof = &mvm->sar_profiles[profs[i] - 1];
1321
1322                 /* if the profile is disabled, do nothing */
1323                 if (!prof->enabled) {
1324                         IWL_DEBUG_RADIO(mvm, "SAR profile %d is disabled.\n",
1325                                         profs[i]);
1326                         /* if one of the profiles is disabled, we fail all */
1327                         return -ENOENT;
1328                 }
1329
1330                 IWL_DEBUG_RADIO(mvm, "  Chain[%d]:\n", i);
1331                 for (j = 0; j < IWL_NUM_SUB_BANDS; j++) {
1332                         idx = (i * IWL_NUM_SUB_BANDS) + j;
1333                         cmd.v3.per_chain_restriction[i][j] =
1334                                 cpu_to_le16(prof->table[idx]);
1335                         IWL_DEBUG_RADIO(mvm, "    Band[%d] = %d * .125dBm\n",
1336                                         j, prof->table[idx]);
1337                 }
1338         }
1339
1340         IWL_DEBUG_RADIO(mvm, "Sending REDUCE_TX_POWER_CMD per chain\n");
1341
1342         return iwl_mvm_send_cmd_pdu(mvm, REDUCE_TX_POWER_CMD, 0, len, &cmd);
1343 }
1344
1345 int iwl_mvm_get_sar_geo_profile(struct iwl_mvm *mvm)
1346 {
1347         struct iwl_geo_tx_power_profiles_resp *resp;
1348         int ret;
1349
1350         struct iwl_geo_tx_power_profiles_cmd geo_cmd = {
1351                 .ops = cpu_to_le32(IWL_PER_CHAIN_OFFSET_GET_CURRENT_TABLE),
1352         };
1353         struct iwl_host_cmd cmd = {
1354                 .id =  WIDE_ID(PHY_OPS_GROUP, GEO_TX_POWER_LIMIT),
1355                 .len = { sizeof(geo_cmd), },
1356                 .flags = CMD_WANT_SKB,
1357                 .data = { &geo_cmd },
1358         };
1359
1360         ret = iwl_mvm_send_cmd(mvm, &cmd);
1361         if (ret) {
1362                 IWL_ERR(mvm, "Failed to get geographic profile info %d\n", ret);
1363                 return ret;
1364         }
1365
1366         resp = (void *)cmd.resp_pkt->data;
1367         ret = le32_to_cpu(resp->profile_idx);
1368         if (WARN_ON(ret > IWL_NUM_GEO_PROFILES)) {
1369                 ret = -EIO;
1370                 IWL_WARN(mvm, "Invalid geographic profile idx (%d)\n", ret);
1371         }
1372
1373         iwl_free_resp(&cmd);
1374         return ret;
1375 }
1376
1377 static int iwl_mvm_sar_geo_init(struct iwl_mvm *mvm)
1378 {
1379         struct iwl_geo_tx_power_profiles_cmd cmd = {
1380                 .ops = cpu_to_le32(IWL_PER_CHAIN_OFFSET_SET_TABLES),
1381         };
1382         int ret, i, j;
1383         u16 cmd_wide_id =  WIDE_ID(PHY_OPS_GROUP, GEO_TX_POWER_LIMIT);
1384
1385         ret = iwl_mvm_sar_get_wgds_table(mvm);
1386         if (ret < 0) {
1387                 IWL_DEBUG_RADIO(mvm,
1388                                 "Geo SAR BIOS table invalid or unavailable. (%d)\n",
1389                                 ret);
1390                 /* we don't fail if the table is not available */
1391                 return 0;
1392         }
1393
1394         IWL_DEBUG_RADIO(mvm, "Sending GEO_TX_POWER_LIMIT\n");
1395
1396         BUILD_BUG_ON(IWL_NUM_GEO_PROFILES * ACPI_WGDS_NUM_BANDS *
1397                      ACPI_WGDS_TABLE_SIZE !=  ACPI_WGDS_WIFI_DATA_SIZE);
1398
1399         for (i = 0; i < IWL_NUM_GEO_PROFILES; i++) {
1400                 struct iwl_per_chain_offset *chain =
1401                         (struct iwl_per_chain_offset *)&cmd.table[i];
1402
1403                 for (j = 0; j < ACPI_WGDS_NUM_BANDS; j++) {
1404                         u8 *value;
1405
1406                         value = &mvm->geo_profiles[i].values[j *
1407                                 IWL_GEO_PER_CHAIN_SIZE];
1408                         chain[j].max_tx_power = cpu_to_le16(value[0]);
1409                         chain[j].chain_a = value[1];
1410                         chain[j].chain_b = value[2];
1411                         IWL_DEBUG_RADIO(mvm,
1412                                         "SAR geographic profile[%d] Band[%d]: chain A = %d chain B = %d max_tx_power = %d\n",
1413                                         i, j, value[1], value[2], value[0]);
1414                 }
1415         }
1416         return iwl_mvm_send_cmd_pdu(mvm, cmd_wide_id, 0, sizeof(cmd), &cmd);
1417 }
1418
1419 #else /* CONFIG_ACPI */
1420 static int iwl_mvm_sar_get_wrds_table(struct iwl_mvm *mvm)
1421 {
1422         return -ENOENT;
1423 }
1424
1425 static int iwl_mvm_sar_get_ewrd_table(struct iwl_mvm *mvm)
1426 {
1427         return -ENOENT;
1428 }
1429
1430 static int iwl_mvm_sar_geo_init(struct iwl_mvm *mvm)
1431 {
1432         return 0;
1433 }
1434 #endif /* CONFIG_ACPI */
1435
1436 static int iwl_mvm_sar_init(struct iwl_mvm *mvm)
1437 {
1438         int ret;
1439
1440         ret = iwl_mvm_sar_get_wrds_table(mvm);
1441         if (ret < 0) {
1442                 IWL_DEBUG_RADIO(mvm,
1443                                 "WRDS SAR BIOS table invalid or unavailable. (%d)\n",
1444                                 ret);
1445                 /* if not available, don't fail and don't bother with EWRD */
1446                 return 0;
1447         }
1448
1449         ret = iwl_mvm_sar_get_ewrd_table(mvm);
1450         /* if EWRD is not available, we can still use WRDS, so don't fail */
1451         if (ret < 0)
1452                 IWL_DEBUG_RADIO(mvm,
1453                                 "EWRD SAR BIOS table invalid or unavailable. (%d)\n",
1454                                 ret);
1455
1456         /* choose profile 1 (WRDS) as default for both chains */
1457         ret = iwl_mvm_sar_select_profile(mvm, 1, 1);
1458
1459         /* if we don't have profile 0 from BIOS, just skip it */
1460         if (ret == -ENOENT)
1461                 return 0;
1462
1463         return ret;
1464 }
1465
1466 static int iwl_mvm_load_rt_fw(struct iwl_mvm *mvm)
1467 {
1468         int ret;
1469
1470         if (iwl_mvm_has_new_tx_api(mvm))
1471                 return iwl_run_unified_mvm_ucode(mvm, false);
1472
1473         ret = iwl_run_init_mvm_ucode(mvm, false);
1474
1475         if (iwlmvm_mod_params.init_dbg)
1476                 return 0;
1477
1478         if (ret) {
1479                 IWL_ERR(mvm, "Failed to run INIT ucode: %d\n", ret);
1480                 /* this can't happen */
1481                 if (WARN_ON(ret > 0))
1482                         ret = -ERFKILL;
1483                 return ret;
1484         }
1485
1486         /*
1487          * Stop and start the transport without entering low power
1488          * mode. This will save the state of other components on the
1489          * device that are triggered by the INIT firwmare (MFUART).
1490          */
1491         _iwl_trans_stop_device(mvm->trans, false);
1492         ret = _iwl_trans_start_hw(mvm->trans, false);
1493         if (ret)
1494                 return ret;
1495
1496         ret = iwl_mvm_load_ucode_wait_alive(mvm, IWL_UCODE_REGULAR);
1497         if (ret)
1498                 return ret;
1499
1500         return iwl_mvm_init_paging(mvm);
1501 }
1502
1503 int iwl_mvm_up(struct iwl_mvm *mvm)
1504 {
1505         int ret, i;
1506         struct ieee80211_channel *chan;
1507         struct cfg80211_chan_def chandef;
1508
1509         lockdep_assert_held(&mvm->mutex);
1510
1511         ret = iwl_trans_start_hw(mvm->trans);
1512         if (ret)
1513                 return ret;
1514
1515         ret = iwl_mvm_load_rt_fw(mvm);
1516         if (ret) {
1517                 IWL_ERR(mvm, "Failed to start RT ucode: %d\n", ret);
1518                 goto error;
1519         }
1520
1521         iwl_mvm_get_shared_mem_conf(mvm);
1522
1523         ret = iwl_mvm_sf_update(mvm, NULL, false);
1524         if (ret)
1525                 IWL_ERR(mvm, "Failed to initialize Smart Fifo\n");
1526
1527         mvm->fw_dbg_conf = FW_DBG_INVALID;
1528         /* if we have a destination, assume EARLY START */
1529         if (mvm->fw->dbg_dest_tlv)
1530                 mvm->fw_dbg_conf = FW_DBG_START_FROM_ALIVE;
1531         iwl_mvm_start_fw_dbg_conf(mvm, FW_DBG_START_FROM_ALIVE);
1532
1533         ret = iwl_send_tx_ant_cfg(mvm, iwl_mvm_get_valid_tx_ant(mvm));
1534         if (ret)
1535                 goto error;
1536
1537         /* Send phy db control command and then phy db calibration*/
1538         if (!iwl_mvm_has_new_tx_api(mvm)) {
1539                 ret = iwl_send_phy_db_data(mvm->phy_db);
1540                 if (ret)
1541                         goto error;
1542
1543                 ret = iwl_send_phy_cfg_cmd(mvm);
1544                 if (ret)
1545                         goto error;
1546         }
1547
1548         ret = iwl_mvm_send_bt_init_conf(mvm);
1549         if (ret)
1550                 goto error;
1551
1552         /* Init RSS configuration */
1553         /* TODO - remove a000 disablement when we have RXQ config API */
1554         if (iwl_mvm_has_new_rx_api(mvm) && !iwl_mvm_has_new_tx_api(mvm)) {
1555                 ret = iwl_send_rss_cfg_cmd(mvm);
1556                 if (ret) {
1557                         IWL_ERR(mvm, "Failed to configure RSS queues: %d\n",
1558                                 ret);
1559                         goto error;
1560                 }
1561         }
1562
1563         /* init the fw <-> mac80211 STA mapping */
1564         for (i = 0; i < ARRAY_SIZE(mvm->fw_id_to_mac_id); i++)
1565                 RCU_INIT_POINTER(mvm->fw_id_to_mac_id[i], NULL);
1566
1567         mvm->tdls_cs.peer.sta_id = IWL_MVM_INVALID_STA;
1568
1569         /* reset quota debouncing buffer - 0xff will yield invalid data */
1570         memset(&mvm->last_quota_cmd, 0xff, sizeof(mvm->last_quota_cmd));
1571
1572         /* Enable DQA-mode if required */
1573         if (iwl_mvm_is_dqa_supported(mvm)) {
1574                 ret = iwl_mvm_send_dqa_cmd(mvm);
1575                 if (ret)
1576                         goto error;
1577         } else {
1578                 IWL_DEBUG_FW(mvm, "Working in non-DQA mode\n");
1579         }
1580
1581         /* Add auxiliary station for scanning */
1582         ret = iwl_mvm_add_aux_sta(mvm);
1583         if (ret)
1584                 goto error;
1585
1586         /* Add all the PHY contexts */
1587         chan = &mvm->hw->wiphy->bands[NL80211_BAND_2GHZ]->channels[0];
1588         cfg80211_chandef_create(&chandef, chan, NL80211_CHAN_NO_HT);
1589         for (i = 0; i < NUM_PHY_CTX; i++) {
1590                 /*
1591                  * The channel used here isn't relevant as it's
1592                  * going to be overwritten in the other flows.
1593                  * For now use the first channel we have.
1594                  */
1595                 ret = iwl_mvm_phy_ctxt_add(mvm, &mvm->phy_ctxts[i],
1596                                            &chandef, 1, 1);
1597                 if (ret)
1598                         goto error;
1599         }
1600
1601 #ifdef CONFIG_THERMAL
1602         if (iwl_mvm_is_tt_in_fw(mvm)) {
1603                 /* in order to give the responsibility of ct-kill and
1604                  * TX backoff to FW we need to send empty temperature reporting
1605                  * cmd during init time
1606                  */
1607                 iwl_mvm_send_temp_report_ths_cmd(mvm);
1608         } else {
1609                 /* Initialize tx backoffs to the minimal possible */
1610                 iwl_mvm_tt_tx_backoff(mvm, 0);
1611         }
1612
1613         /* TODO: read the budget from BIOS / Platform NVM */
1614         if (iwl_mvm_is_ctdp_supported(mvm) && mvm->cooling_dev.cur_state > 0) {
1615                 ret = iwl_mvm_ctdp_command(mvm, CTDP_CMD_OPERATION_START,
1616                                            mvm->cooling_dev.cur_state);
1617                 if (ret)
1618                         goto error;
1619         }
1620 #else
1621         /* Initialize tx backoffs to the minimal possible */
1622         iwl_mvm_tt_tx_backoff(mvm, 0);
1623 #endif
1624
1625         WARN_ON(iwl_mvm_config_ltr(mvm));
1626
1627         ret = iwl_mvm_power_update_device(mvm);
1628         if (ret)
1629                 goto error;
1630
1631         /*
1632          * RTNL is not taken during Ct-kill, but we don't need to scan/Tx
1633          * anyway, so don't init MCC.
1634          */
1635         if (!test_bit(IWL_MVM_STATUS_HW_CTKILL, &mvm->status)) {
1636                 ret = iwl_mvm_init_mcc(mvm);
1637                 if (ret)
1638                         goto error;
1639         }
1640
1641         if (fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_UMAC_SCAN)) {
1642                 mvm->scan_type = IWL_SCAN_TYPE_NOT_SET;
1643                 ret = iwl_mvm_config_scan(mvm);
1644                 if (ret)
1645                         goto error;
1646         }
1647
1648         /* allow FW/transport low power modes if not during restart */
1649         if (!test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status))
1650                 iwl_mvm_unref(mvm, IWL_MVM_REF_UCODE_DOWN);
1651
1652         ret = iwl_mvm_sar_init(mvm);
1653         if (ret)
1654                 goto error;
1655
1656         ret = iwl_mvm_sar_geo_init(mvm);
1657         if (ret)
1658                 goto error;
1659
1660         IWL_DEBUG_INFO(mvm, "RT uCode started.\n");
1661         return 0;
1662  error:
1663         if (!iwlmvm_mod_params.init_dbg)
1664                 iwl_mvm_stop_device(mvm);
1665         return ret;
1666 }
1667
1668 int iwl_mvm_load_d3_fw(struct iwl_mvm *mvm)
1669 {
1670         int ret, i;
1671
1672         lockdep_assert_held(&mvm->mutex);
1673
1674         ret = iwl_trans_start_hw(mvm->trans);
1675         if (ret)
1676                 return ret;
1677
1678         ret = iwl_mvm_load_ucode_wait_alive(mvm, IWL_UCODE_WOWLAN);
1679         if (ret) {
1680                 IWL_ERR(mvm, "Failed to start WoWLAN firmware: %d\n", ret);
1681                 goto error;
1682         }
1683
1684         ret = iwl_send_tx_ant_cfg(mvm, iwl_mvm_get_valid_tx_ant(mvm));
1685         if (ret)
1686                 goto error;
1687
1688         /* Send phy db control command and then phy db calibration*/
1689         ret = iwl_send_phy_db_data(mvm->phy_db);
1690         if (ret)
1691                 goto error;
1692
1693         ret = iwl_send_phy_cfg_cmd(mvm);
1694         if (ret)
1695                 goto error;
1696
1697         /* init the fw <-> mac80211 STA mapping */
1698         for (i = 0; i < ARRAY_SIZE(mvm->fw_id_to_mac_id); i++)
1699                 RCU_INIT_POINTER(mvm->fw_id_to_mac_id[i], NULL);
1700
1701         /* Add auxiliary station for scanning */
1702         ret = iwl_mvm_add_aux_sta(mvm);
1703         if (ret)
1704                 goto error;
1705
1706         return 0;
1707  error:
1708         iwl_mvm_stop_device(mvm);
1709         return ret;
1710 }
1711
1712 void iwl_mvm_rx_card_state_notif(struct iwl_mvm *mvm,
1713                                  struct iwl_rx_cmd_buffer *rxb)
1714 {
1715         struct iwl_rx_packet *pkt = rxb_addr(rxb);
1716         struct iwl_card_state_notif *card_state_notif = (void *)pkt->data;
1717         u32 flags = le32_to_cpu(card_state_notif->flags);
1718
1719         IWL_DEBUG_RF_KILL(mvm, "Card state received: HW:%s SW:%s CT:%s\n",
1720                           (flags & HW_CARD_DISABLED) ? "Kill" : "On",
1721                           (flags & SW_CARD_DISABLED) ? "Kill" : "On",
1722                           (flags & CT_KILL_CARD_DISABLED) ?
1723                           "Reached" : "Not reached");
1724 }
1725
1726 void iwl_mvm_rx_mfuart_notif(struct iwl_mvm *mvm,
1727                              struct iwl_rx_cmd_buffer *rxb)
1728 {
1729         struct iwl_rx_packet *pkt = rxb_addr(rxb);
1730         struct iwl_mfuart_load_notif *mfuart_notif = (void *)pkt->data;
1731
1732         IWL_DEBUG_INFO(mvm,
1733                        "MFUART: installed ver: 0x%08x, external ver: 0x%08x, status: 0x%08x, duration: 0x%08x\n",
1734                        le32_to_cpu(mfuart_notif->installed_ver),
1735                        le32_to_cpu(mfuart_notif->external_ver),
1736                        le32_to_cpu(mfuart_notif->status),
1737                        le32_to_cpu(mfuart_notif->duration));
1738
1739         if (iwl_rx_packet_payload_len(pkt) == sizeof(*mfuart_notif))
1740                 IWL_DEBUG_INFO(mvm,
1741                                "MFUART: image size: 0x%08x\n",
1742                                le32_to_cpu(mfuart_notif->image_size));
1743 }