Merge branch 'for-5.4/apple' into for-linus
[sfrench/cifs-2.6.git] / drivers / net / wireless / intel / iwlwifi / mvm / fw.c
1 /******************************************************************************
2  *
3  * This file is provided under a dual BSD/GPLv2 license.  When using or
4  * redistributing this file, you may do so under either license.
5  *
6  * GPL LICENSE SUMMARY
7  *
8  * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
9  * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
10  * Copyright(c) 2016 - 2017 Intel Deutschland GmbH
11  * Copyright(c) 2018 - 2019        Intel Corporation
12  *
13  * This program is free software; you can redistribute it and/or modify
14  * it under the terms of version 2 of the GNU General Public License as
15  * published by the Free Software Foundation.
16  *
17  * This program is distributed in the hope that it will be useful, but
18  * WITHOUT ANY WARRANTY; without even the implied warranty of
19  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
20  * General Public License for more details.
21  *
22  * The full GNU General Public License is included in this distribution
23  * in the file called COPYING.
24  *
25  * Contact Information:
26  *  Intel Linux Wireless <linuxwifi@intel.com>
27  * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
28  *
29  * BSD LICENSE
30  *
31  * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
32  * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
33  * Copyright(c) 2016 - 2017 Intel Deutschland GmbH
34  * Copyright(c) 2018 - 2019       Intel Corporation
35  * All rights reserved.
36  *
37  * Redistribution and use in source and binary forms, with or without
38  * modification, are permitted provided that the following conditions
39  * are met:
40  *
41  *  * Redistributions of source code must retain the above copyright
42  *    notice, this list of conditions and the following disclaimer.
43  *  * Redistributions in binary form must reproduce the above copyright
44  *    notice, this list of conditions and the following disclaimer in
45  *    the documentation and/or other materials provided with the
46  *    distribution.
47  *  * Neither the name Intel Corporation nor the names of its
48  *    contributors may be used to endorse or promote products derived
49  *    from this software without specific prior written permission.
50  *
51  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
52  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
53  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
54  * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
55  * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
56  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
57  * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
58  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
59  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
60  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
61  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
62  *
63  *****************************************************************************/
64 #include <net/mac80211.h>
65 #include <linux/netdevice.h>
66
67 #include "iwl-trans.h"
68 #include "iwl-op-mode.h"
69 #include "fw/img.h"
70 #include "iwl-debug.h"
71 #include "iwl-csr.h" /* for iwl_mvm_rx_card_state_notif */
72 #include "iwl-io.h" /* for iwl_mvm_rx_card_state_notif */
73 #include "iwl-prph.h"
74 #include "fw/acpi.h"
75
76 #include "mvm.h"
77 #include "fw/dbg.h"
78 #include "iwl-phy-db.h"
79 #include "iwl-modparams.h"
80 #include "iwl-nvm-parse.h"
81
82 #define MVM_UCODE_ALIVE_TIMEOUT HZ
83 #define MVM_UCODE_CALIB_TIMEOUT (2*HZ)
84
85 #define UCODE_VALID_OK  cpu_to_le32(0x1)
86
87 struct iwl_mvm_alive_data {
88         bool valid;
89         u32 scd_base_addr;
90 };
91
92 static int iwl_send_tx_ant_cfg(struct iwl_mvm *mvm, u8 valid_tx_ant)
93 {
94         struct iwl_tx_ant_cfg_cmd tx_ant_cmd = {
95                 .valid = cpu_to_le32(valid_tx_ant),
96         };
97
98         IWL_DEBUG_FW(mvm, "select valid tx ant: %u\n", valid_tx_ant);
99         return iwl_mvm_send_cmd_pdu(mvm, TX_ANT_CONFIGURATION_CMD, 0,
100                                     sizeof(tx_ant_cmd), &tx_ant_cmd);
101 }
102
103 static int iwl_send_rss_cfg_cmd(struct iwl_mvm *mvm)
104 {
105         int i;
106         struct iwl_rss_config_cmd cmd = {
107                 .flags = cpu_to_le32(IWL_RSS_ENABLE),
108                 .hash_mask = BIT(IWL_RSS_HASH_TYPE_IPV4_TCP) |
109                              BIT(IWL_RSS_HASH_TYPE_IPV4_UDP) |
110                              BIT(IWL_RSS_HASH_TYPE_IPV4_PAYLOAD) |
111                              BIT(IWL_RSS_HASH_TYPE_IPV6_TCP) |
112                              BIT(IWL_RSS_HASH_TYPE_IPV6_UDP) |
113                              BIT(IWL_RSS_HASH_TYPE_IPV6_PAYLOAD),
114         };
115
116         if (mvm->trans->num_rx_queues == 1)
117                 return 0;
118
119         /* Do not direct RSS traffic to Q 0 which is our fallback queue */
120         for (i = 0; i < ARRAY_SIZE(cmd.indirection_table); i++)
121                 cmd.indirection_table[i] =
122                         1 + (i % (mvm->trans->num_rx_queues - 1));
123         netdev_rss_key_fill(cmd.secret_key, sizeof(cmd.secret_key));
124
125         return iwl_mvm_send_cmd_pdu(mvm, RSS_CONFIG_CMD, 0, sizeof(cmd), &cmd);
126 }
127
128 static int iwl_configure_rxq(struct iwl_mvm *mvm)
129 {
130         int i, num_queues, size, ret;
131         struct iwl_rfh_queue_config *cmd;
132         struct iwl_host_cmd hcmd = {
133                 .id = WIDE_ID(DATA_PATH_GROUP, RFH_QUEUE_CONFIG_CMD),
134                 .dataflags[0] = IWL_HCMD_DFL_NOCOPY,
135         };
136
137         /* Do not configure default queue, it is configured via context info */
138         num_queues = mvm->trans->num_rx_queues - 1;
139
140         size = struct_size(cmd, data, num_queues);
141
142         cmd = kzalloc(size, GFP_KERNEL);
143         if (!cmd)
144                 return -ENOMEM;
145
146         cmd->num_queues = num_queues;
147
148         for (i = 0; i < num_queues; i++) {
149                 struct iwl_trans_rxq_dma_data data;
150
151                 cmd->data[i].q_num = i + 1;
152                 iwl_trans_get_rxq_dma_data(mvm->trans, i + 1, &data);
153
154                 cmd->data[i].fr_bd_cb = cpu_to_le64(data.fr_bd_cb);
155                 cmd->data[i].urbd_stts_wrptr =
156                         cpu_to_le64(data.urbd_stts_wrptr);
157                 cmd->data[i].ur_bd_cb = cpu_to_le64(data.ur_bd_cb);
158                 cmd->data[i].fr_bd_wid = cpu_to_le32(data.fr_bd_wid);
159         }
160
161         hcmd.data[0] = cmd;
162         hcmd.len[0] = size;
163
164         ret = iwl_mvm_send_cmd(mvm, &hcmd);
165
166         kfree(cmd);
167
168         return ret;
169 }
170
171 static int iwl_mvm_send_dqa_cmd(struct iwl_mvm *mvm)
172 {
173         struct iwl_dqa_enable_cmd dqa_cmd = {
174                 .cmd_queue = cpu_to_le32(IWL_MVM_DQA_CMD_QUEUE),
175         };
176         u32 cmd_id = iwl_cmd_id(DQA_ENABLE_CMD, DATA_PATH_GROUP, 0);
177         int ret;
178
179         ret = iwl_mvm_send_cmd_pdu(mvm, cmd_id, 0, sizeof(dqa_cmd), &dqa_cmd);
180         if (ret)
181                 IWL_ERR(mvm, "Failed to send DQA enabling command: %d\n", ret);
182         else
183                 IWL_DEBUG_FW(mvm, "Working in DQA mode\n");
184
185         return ret;
186 }
187
188 void iwl_mvm_mfu_assert_dump_notif(struct iwl_mvm *mvm,
189                                    struct iwl_rx_cmd_buffer *rxb)
190 {
191         struct iwl_rx_packet *pkt = rxb_addr(rxb);
192         struct iwl_mfu_assert_dump_notif *mfu_dump_notif = (void *)pkt->data;
193         __le32 *dump_data = mfu_dump_notif->data;
194         int n_words = le32_to_cpu(mfu_dump_notif->data_size) / sizeof(__le32);
195         int i;
196
197         if (mfu_dump_notif->index_num == 0)
198                 IWL_INFO(mvm, "MFUART assert id 0x%x occurred\n",
199                          le32_to_cpu(mfu_dump_notif->assert_id));
200
201         for (i = 0; i < n_words; i++)
202                 IWL_DEBUG_INFO(mvm,
203                                "MFUART assert dump, dword %u: 0x%08x\n",
204                                le16_to_cpu(mfu_dump_notif->index_num) *
205                                n_words + i,
206                                le32_to_cpu(dump_data[i]));
207 }
208
209 static bool iwl_alive_fn(struct iwl_notif_wait_data *notif_wait,
210                          struct iwl_rx_packet *pkt, void *data)
211 {
212         struct iwl_mvm *mvm =
213                 container_of(notif_wait, struct iwl_mvm, notif_wait);
214         struct iwl_mvm_alive_data *alive_data = data;
215         struct mvm_alive_resp_v3 *palive3;
216         struct mvm_alive_resp *palive;
217         struct iwl_umac_alive *umac;
218         struct iwl_lmac_alive *lmac1;
219         struct iwl_lmac_alive *lmac2 = NULL;
220         u16 status;
221         u32 lmac_error_event_table, umac_error_event_table;
222
223         if (iwl_rx_packet_payload_len(pkt) == sizeof(*palive)) {
224                 palive = (void *)pkt->data;
225                 umac = &palive->umac_data;
226                 lmac1 = &palive->lmac_data[0];
227                 lmac2 = &palive->lmac_data[1];
228                 status = le16_to_cpu(palive->status);
229         } else {
230                 palive3 = (void *)pkt->data;
231                 umac = &palive3->umac_data;
232                 lmac1 = &palive3->lmac_data;
233                 status = le16_to_cpu(palive3->status);
234         }
235
236         lmac_error_event_table =
237                 le32_to_cpu(lmac1->dbg_ptrs.error_event_table_ptr);
238         iwl_fw_lmac1_set_alive_err_table(mvm->trans, lmac_error_event_table);
239
240         if (lmac2)
241                 mvm->trans->dbg.lmac_error_event_table[1] =
242                         le32_to_cpu(lmac2->dbg_ptrs.error_event_table_ptr);
243
244         umac_error_event_table = le32_to_cpu(umac->dbg_ptrs.error_info_addr);
245
246         if (!umac_error_event_table) {
247                 mvm->support_umac_log = false;
248         } else if (umac_error_event_table >=
249                    mvm->trans->cfg->min_umac_error_event_table) {
250                 mvm->support_umac_log = true;
251         } else {
252                 IWL_ERR(mvm,
253                         "Not valid error log pointer 0x%08X for %s uCode\n",
254                         umac_error_event_table,
255                         (mvm->fwrt.cur_fw_img == IWL_UCODE_INIT) ?
256                         "Init" : "RT");
257                 mvm->support_umac_log = false;
258         }
259
260         if (mvm->support_umac_log)
261                 iwl_fw_umac_set_alive_err_table(mvm->trans,
262                                                 umac_error_event_table);
263
264         alive_data->scd_base_addr = le32_to_cpu(lmac1->dbg_ptrs.scd_base_ptr);
265         alive_data->valid = status == IWL_ALIVE_STATUS_OK;
266
267         IWL_DEBUG_FW(mvm,
268                      "Alive ucode status 0x%04x revision 0x%01X 0x%01X\n",
269                      status, lmac1->ver_type, lmac1->ver_subtype);
270
271         if (lmac2)
272                 IWL_DEBUG_FW(mvm, "Alive ucode CDB\n");
273
274         IWL_DEBUG_FW(mvm,
275                      "UMAC version: Major - 0x%x, Minor - 0x%x\n",
276                      le32_to_cpu(umac->umac_major),
277                      le32_to_cpu(umac->umac_minor));
278
279         iwl_fwrt_update_fw_versions(&mvm->fwrt, lmac1, umac);
280
281         return true;
282 }
283
284 static bool iwl_wait_init_complete(struct iwl_notif_wait_data *notif_wait,
285                                    struct iwl_rx_packet *pkt, void *data)
286 {
287         WARN_ON(pkt->hdr.cmd != INIT_COMPLETE_NOTIF);
288
289         return true;
290 }
291
292 static bool iwl_wait_phy_db_entry(struct iwl_notif_wait_data *notif_wait,
293                                   struct iwl_rx_packet *pkt, void *data)
294 {
295         struct iwl_phy_db *phy_db = data;
296
297         if (pkt->hdr.cmd != CALIB_RES_NOTIF_PHY_DB) {
298                 WARN_ON(pkt->hdr.cmd != INIT_COMPLETE_NOTIF);
299                 return true;
300         }
301
302         WARN_ON(iwl_phy_db_set_section(phy_db, pkt));
303
304         return false;
305 }
306
307 static int iwl_mvm_load_ucode_wait_alive(struct iwl_mvm *mvm,
308                                          enum iwl_ucode_type ucode_type)
309 {
310         struct iwl_notification_wait alive_wait;
311         struct iwl_mvm_alive_data alive_data = {};
312         const struct fw_img *fw;
313         int ret;
314         enum iwl_ucode_type old_type = mvm->fwrt.cur_fw_img;
315         static const u16 alive_cmd[] = { MVM_ALIVE };
316         bool run_in_rfkill =
317                 ucode_type == IWL_UCODE_INIT || iwl_mvm_has_unified_ucode(mvm);
318
319         if (ucode_type == IWL_UCODE_REGULAR &&
320             iwl_fw_dbg_conf_usniffer(mvm->fw, FW_DBG_START_FROM_ALIVE) &&
321             !(fw_has_capa(&mvm->fw->ucode_capa,
322                           IWL_UCODE_TLV_CAPA_USNIFFER_UNIFIED)))
323                 fw = iwl_get_ucode_image(mvm->fw, IWL_UCODE_REGULAR_USNIFFER);
324         else
325                 fw = iwl_get_ucode_image(mvm->fw, ucode_type);
326         if (WARN_ON(!fw))
327                 return -EINVAL;
328         iwl_fw_set_current_image(&mvm->fwrt, ucode_type);
329         clear_bit(IWL_MVM_STATUS_FIRMWARE_RUNNING, &mvm->status);
330
331         iwl_init_notification_wait(&mvm->notif_wait, &alive_wait,
332                                    alive_cmd, ARRAY_SIZE(alive_cmd),
333                                    iwl_alive_fn, &alive_data);
334
335         /*
336          * We want to load the INIT firmware even in RFKILL
337          * For the unified firmware case, the ucode_type is not
338          * INIT, but we still need to run it.
339          */
340         ret = iwl_trans_start_fw(mvm->trans, fw, run_in_rfkill);
341         if (ret) {
342                 iwl_fw_set_current_image(&mvm->fwrt, old_type);
343                 iwl_remove_notification(&mvm->notif_wait, &alive_wait);
344                 return ret;
345         }
346
347         /*
348          * Some things may run in the background now, but we
349          * just wait for the ALIVE notification here.
350          */
351         ret = iwl_wait_notification(&mvm->notif_wait, &alive_wait,
352                                     MVM_UCODE_ALIVE_TIMEOUT);
353         if (ret) {
354                 struct iwl_trans *trans = mvm->trans;
355
356                 if (ret == -ETIMEDOUT)
357                         iwl_fw_dbg_error_collect(&mvm->fwrt,
358                                                  FW_DBG_TRIGGER_ALIVE_TIMEOUT);
359
360                 if (trans->cfg->device_family >= IWL_DEVICE_FAMILY_22000)
361                         IWL_ERR(mvm,
362                                 "SecBoot CPU1 Status: 0x%x, CPU2 Status: 0x%x\n",
363                                 iwl_read_umac_prph(trans, UMAG_SB_CPU_1_STATUS),
364                                 iwl_read_umac_prph(trans,
365                                                    UMAG_SB_CPU_2_STATUS));
366                 else if (trans->cfg->device_family >= IWL_DEVICE_FAMILY_8000)
367                         IWL_ERR(mvm,
368                                 "SecBoot CPU1 Status: 0x%x, CPU2 Status: 0x%x\n",
369                                 iwl_read_prph(trans, SB_CPU_1_STATUS),
370                                 iwl_read_prph(trans, SB_CPU_2_STATUS));
371                 iwl_fw_set_current_image(&mvm->fwrt, old_type);
372                 return ret;
373         }
374
375         if (!alive_data.valid) {
376                 IWL_ERR(mvm, "Loaded ucode is not valid!\n");
377                 iwl_fw_set_current_image(&mvm->fwrt, old_type);
378                 return -EIO;
379         }
380
381         iwl_trans_fw_alive(mvm->trans, alive_data.scd_base_addr);
382
383         /*
384          * Note: all the queues are enabled as part of the interface
385          * initialization, but in firmware restart scenarios they
386          * could be stopped, so wake them up. In firmware restart,
387          * mac80211 will have the queues stopped as well until the
388          * reconfiguration completes. During normal startup, they
389          * will be empty.
390          */
391
392         memset(&mvm->queue_info, 0, sizeof(mvm->queue_info));
393         /*
394          * Set a 'fake' TID for the command queue, since we use the
395          * hweight() of the tid_bitmap as a refcount now. Not that
396          * we ever even consider the command queue as one we might
397          * want to reuse, but be safe nevertheless.
398          */
399         mvm->queue_info[IWL_MVM_DQA_CMD_QUEUE].tid_bitmap =
400                 BIT(IWL_MAX_TID_COUNT + 2);
401
402         set_bit(IWL_MVM_STATUS_FIRMWARE_RUNNING, &mvm->status);
403 #ifdef CONFIG_IWLWIFI_DEBUGFS
404         iwl_fw_set_dbg_rec_on(&mvm->fwrt);
405 #endif
406
407         return 0;
408 }
409
410 static int iwl_run_unified_mvm_ucode(struct iwl_mvm *mvm, bool read_nvm)
411 {
412         struct iwl_notification_wait init_wait;
413         struct iwl_nvm_access_complete_cmd nvm_complete = {};
414         struct iwl_init_extended_cfg_cmd init_cfg = {
415                 .init_flags = cpu_to_le32(BIT(IWL_INIT_NVM)),
416         };
417         static const u16 init_complete[] = {
418                 INIT_COMPLETE_NOTIF,
419         };
420         int ret;
421
422         lockdep_assert_held(&mvm->mutex);
423
424         mvm->rfkill_safe_init_done = false;
425
426         iwl_init_notification_wait(&mvm->notif_wait,
427                                    &init_wait,
428                                    init_complete,
429                                    ARRAY_SIZE(init_complete),
430                                    iwl_wait_init_complete,
431                                    NULL);
432
433         iwl_fw_dbg_apply_point(&mvm->fwrt, IWL_FW_INI_APPLY_EARLY);
434
435         /* Will also start the device */
436         ret = iwl_mvm_load_ucode_wait_alive(mvm, IWL_UCODE_REGULAR);
437         if (ret) {
438                 IWL_ERR(mvm, "Failed to start RT ucode: %d\n", ret);
439                 goto error;
440         }
441         iwl_fw_dbg_apply_point(&mvm->fwrt, IWL_FW_INI_APPLY_AFTER_ALIVE);
442
443         /* Send init config command to mark that we are sending NVM access
444          * commands
445          */
446         ret = iwl_mvm_send_cmd_pdu(mvm, WIDE_ID(SYSTEM_GROUP,
447                                                 INIT_EXTENDED_CFG_CMD),
448                                    CMD_SEND_IN_RFKILL,
449                                    sizeof(init_cfg), &init_cfg);
450         if (ret) {
451                 IWL_ERR(mvm, "Failed to run init config command: %d\n",
452                         ret);
453                 goto error;
454         }
455
456         /* Load NVM to NIC if needed */
457         if (mvm->nvm_file_name) {
458                 iwl_read_external_nvm(mvm->trans, mvm->nvm_file_name,
459                                       mvm->nvm_sections);
460                 iwl_mvm_load_nvm_to_nic(mvm);
461         }
462
463         if (IWL_MVM_PARSE_NVM && read_nvm) {
464                 ret = iwl_nvm_init(mvm);
465                 if (ret) {
466                         IWL_ERR(mvm, "Failed to read NVM: %d\n", ret);
467                         goto error;
468                 }
469         }
470
471         ret = iwl_mvm_send_cmd_pdu(mvm, WIDE_ID(REGULATORY_AND_NVM_GROUP,
472                                                 NVM_ACCESS_COMPLETE),
473                                    CMD_SEND_IN_RFKILL,
474                                    sizeof(nvm_complete), &nvm_complete);
475         if (ret) {
476                 IWL_ERR(mvm, "Failed to run complete NVM access: %d\n",
477                         ret);
478                 goto error;
479         }
480
481         /* We wait for the INIT complete notification */
482         ret = iwl_wait_notification(&mvm->notif_wait, &init_wait,
483                                     MVM_UCODE_ALIVE_TIMEOUT);
484         if (ret)
485                 return ret;
486
487         /* Read the NVM only at driver load time, no need to do this twice */
488         if (!IWL_MVM_PARSE_NVM && read_nvm) {
489                 mvm->nvm_data = iwl_get_nvm(mvm->trans, mvm->fw);
490                 if (IS_ERR(mvm->nvm_data)) {
491                         ret = PTR_ERR(mvm->nvm_data);
492                         mvm->nvm_data = NULL;
493                         IWL_ERR(mvm, "Failed to read NVM: %d\n", ret);
494                         return ret;
495                 }
496         }
497
498         mvm->rfkill_safe_init_done = true;
499
500         return 0;
501
502 error:
503         iwl_remove_notification(&mvm->notif_wait, &init_wait);
504         return ret;
505 }
506
507 static int iwl_send_phy_cfg_cmd(struct iwl_mvm *mvm)
508 {
509         struct iwl_phy_cfg_cmd phy_cfg_cmd;
510         enum iwl_ucode_type ucode_type = mvm->fwrt.cur_fw_img;
511
512         /* Set parameters */
513         phy_cfg_cmd.phy_cfg = cpu_to_le32(iwl_mvm_get_phy_config(mvm));
514
515         /* set flags extra PHY configuration flags from the device's cfg */
516         phy_cfg_cmd.phy_cfg |= cpu_to_le32(mvm->cfg->extra_phy_cfg_flags);
517
518         phy_cfg_cmd.calib_control.event_trigger =
519                 mvm->fw->default_calib[ucode_type].event_trigger;
520         phy_cfg_cmd.calib_control.flow_trigger =
521                 mvm->fw->default_calib[ucode_type].flow_trigger;
522
523         IWL_DEBUG_INFO(mvm, "Sending Phy CFG command: 0x%x\n",
524                        phy_cfg_cmd.phy_cfg);
525
526         return iwl_mvm_send_cmd_pdu(mvm, PHY_CONFIGURATION_CMD, 0,
527                                     sizeof(phy_cfg_cmd), &phy_cfg_cmd);
528 }
529
530 int iwl_run_init_mvm_ucode(struct iwl_mvm *mvm, bool read_nvm)
531 {
532         struct iwl_notification_wait calib_wait;
533         static const u16 init_complete[] = {
534                 INIT_COMPLETE_NOTIF,
535                 CALIB_RES_NOTIF_PHY_DB
536         };
537         int ret;
538
539         if (iwl_mvm_has_unified_ucode(mvm))
540                 return iwl_run_unified_mvm_ucode(mvm, true);
541
542         lockdep_assert_held(&mvm->mutex);
543
544         mvm->rfkill_safe_init_done = false;
545
546         iwl_init_notification_wait(&mvm->notif_wait,
547                                    &calib_wait,
548                                    init_complete,
549                                    ARRAY_SIZE(init_complete),
550                                    iwl_wait_phy_db_entry,
551                                    mvm->phy_db);
552
553         /* Will also start the device */
554         ret = iwl_mvm_load_ucode_wait_alive(mvm, IWL_UCODE_INIT);
555         if (ret) {
556                 IWL_ERR(mvm, "Failed to start INIT ucode: %d\n", ret);
557                 goto remove_notif;
558         }
559
560         if (mvm->cfg->device_family < IWL_DEVICE_FAMILY_8000) {
561                 ret = iwl_mvm_send_bt_init_conf(mvm);
562                 if (ret)
563                         goto remove_notif;
564         }
565
566         /* Read the NVM only at driver load time, no need to do this twice */
567         if (read_nvm) {
568                 ret = iwl_nvm_init(mvm);
569                 if (ret) {
570                         IWL_ERR(mvm, "Failed to read NVM: %d\n", ret);
571                         goto remove_notif;
572                 }
573         }
574
575         /* In case we read the NVM from external file, load it to the NIC */
576         if (mvm->nvm_file_name)
577                 iwl_mvm_load_nvm_to_nic(mvm);
578
579         WARN_ONCE(mvm->nvm_data->nvm_version < mvm->trans->cfg->nvm_ver,
580                   "Too old NVM version (0x%0x, required = 0x%0x)",
581                   mvm->nvm_data->nvm_version, mvm->trans->cfg->nvm_ver);
582
583         /*
584          * abort after reading the nvm in case RF Kill is on, we will complete
585          * the init seq later when RF kill will switch to off
586          */
587         if (iwl_mvm_is_radio_hw_killed(mvm)) {
588                 IWL_DEBUG_RF_KILL(mvm,
589                                   "jump over all phy activities due to RF kill\n");
590                 goto remove_notif;
591         }
592
593         mvm->rfkill_safe_init_done = true;
594
595         /* Send TX valid antennas before triggering calibrations */
596         ret = iwl_send_tx_ant_cfg(mvm, iwl_mvm_get_valid_tx_ant(mvm));
597         if (ret)
598                 goto remove_notif;
599
600         ret = iwl_send_phy_cfg_cmd(mvm);
601         if (ret) {
602                 IWL_ERR(mvm, "Failed to run INIT calibrations: %d\n",
603                         ret);
604                 goto remove_notif;
605         }
606
607         /*
608          * Some things may run in the background now, but we
609          * just wait for the calibration complete notification.
610          */
611         ret = iwl_wait_notification(&mvm->notif_wait, &calib_wait,
612                                     MVM_UCODE_CALIB_TIMEOUT);
613         if (!ret)
614                 goto out;
615
616         if (iwl_mvm_is_radio_hw_killed(mvm)) {
617                 IWL_DEBUG_RF_KILL(mvm, "RFKILL while calibrating.\n");
618                 ret = 0;
619         } else {
620                 IWL_ERR(mvm, "Failed to run INIT calibrations: %d\n",
621                         ret);
622         }
623
624         goto out;
625
626 remove_notif:
627         iwl_remove_notification(&mvm->notif_wait, &calib_wait);
628 out:
629         mvm->rfkill_safe_init_done = false;
630         if (iwlmvm_mod_params.init_dbg && !mvm->nvm_data) {
631                 /* we want to debug INIT and we have no NVM - fake */
632                 mvm->nvm_data = kzalloc(sizeof(struct iwl_nvm_data) +
633                                         sizeof(struct ieee80211_channel) +
634                                         sizeof(struct ieee80211_rate),
635                                         GFP_KERNEL);
636                 if (!mvm->nvm_data)
637                         return -ENOMEM;
638                 mvm->nvm_data->bands[0].channels = mvm->nvm_data->channels;
639                 mvm->nvm_data->bands[0].n_channels = 1;
640                 mvm->nvm_data->bands[0].n_bitrates = 1;
641                 mvm->nvm_data->bands[0].bitrates =
642                         (void *)mvm->nvm_data->channels + 1;
643                 mvm->nvm_data->bands[0].bitrates->hw_value = 10;
644         }
645
646         return ret;
647 }
648
649 static int iwl_mvm_config_ltr(struct iwl_mvm *mvm)
650 {
651         struct iwl_ltr_config_cmd cmd = {
652                 .flags = cpu_to_le32(LTR_CFG_FLAG_FEATURE_ENABLE),
653         };
654
655         if (!mvm->trans->ltr_enabled)
656                 return 0;
657
658         return iwl_mvm_send_cmd_pdu(mvm, LTR_CONFIG, 0,
659                                     sizeof(cmd), &cmd);
660 }
661
662 #ifdef CONFIG_ACPI
663 static inline int iwl_mvm_sar_set_profile(struct iwl_mvm *mvm,
664                                           union acpi_object *table,
665                                           struct iwl_mvm_sar_profile *profile,
666                                           bool enabled)
667 {
668         int i;
669
670         profile->enabled = enabled;
671
672         for (i = 0; i < ACPI_SAR_TABLE_SIZE; i++) {
673                 if ((table[i].type != ACPI_TYPE_INTEGER) ||
674                     (table[i].integer.value > U8_MAX))
675                         return -EINVAL;
676
677                 profile->table[i] = table[i].integer.value;
678         }
679
680         return 0;
681 }
682
683 static int iwl_mvm_sar_get_wrds_table(struct iwl_mvm *mvm)
684 {
685         union acpi_object *wifi_pkg, *table, *data;
686         bool enabled;
687         int ret, tbl_rev;
688
689         data = iwl_acpi_get_object(mvm->dev, ACPI_WRDS_METHOD);
690         if (IS_ERR(data))
691                 return PTR_ERR(data);
692
693         wifi_pkg = iwl_acpi_get_wifi_pkg(mvm->dev, data,
694                                          ACPI_WRDS_WIFI_DATA_SIZE, &tbl_rev);
695         if (IS_ERR(wifi_pkg) || tbl_rev != 0) {
696                 ret = PTR_ERR(wifi_pkg);
697                 goto out_free;
698         }
699
700         if (wifi_pkg->package.elements[1].type != ACPI_TYPE_INTEGER) {
701                 ret = -EINVAL;
702                 goto out_free;
703         }
704
705         enabled = !!(wifi_pkg->package.elements[1].integer.value);
706
707         /* position of the actual table */
708         table = &wifi_pkg->package.elements[2];
709
710         /* The profile from WRDS is officially profile 1, but goes
711          * into sar_profiles[0] (because we don't have a profile 0).
712          */
713         ret = iwl_mvm_sar_set_profile(mvm, table, &mvm->sar_profiles[0],
714                                       enabled);
715 out_free:
716         kfree(data);
717         return ret;
718 }
719
720 static int iwl_mvm_sar_get_ewrd_table(struct iwl_mvm *mvm)
721 {
722         union acpi_object *wifi_pkg, *data;
723         bool enabled;
724         int i, n_profiles, ret, tbl_rev;
725
726         data = iwl_acpi_get_object(mvm->dev, ACPI_EWRD_METHOD);
727         if (IS_ERR(data))
728                 return PTR_ERR(data);
729
730         wifi_pkg = iwl_acpi_get_wifi_pkg(mvm->dev, data,
731                                          ACPI_EWRD_WIFI_DATA_SIZE, &tbl_rev);
732         if (IS_ERR(wifi_pkg) || tbl_rev != 0) {
733                 ret = PTR_ERR(wifi_pkg);
734                 goto out_free;
735         }
736
737         if ((wifi_pkg->package.elements[1].type != ACPI_TYPE_INTEGER) ||
738             (wifi_pkg->package.elements[2].type != ACPI_TYPE_INTEGER)) {
739                 ret = -EINVAL;
740                 goto out_free;
741         }
742
743         enabled = !!(wifi_pkg->package.elements[1].integer.value);
744         n_profiles = wifi_pkg->package.elements[2].integer.value;
745
746         /*
747          * Check the validity of n_profiles.  The EWRD profiles start
748          * from index 1, so the maximum value allowed here is
749          * ACPI_SAR_PROFILES_NUM - 1.
750          */
751         if (n_profiles <= 0 || n_profiles >= ACPI_SAR_PROFILE_NUM) {
752                 ret = -EINVAL;
753                 goto out_free;
754         }
755
756         for (i = 0; i < n_profiles; i++) {
757                 /* the tables start at element 3 */
758                 int pos = 3;
759
760                 /* The EWRD profiles officially go from 2 to 4, but we
761                  * save them in sar_profiles[1-3] (because we don't
762                  * have profile 0).  So in the array we start from 1.
763                  */
764                 ret = iwl_mvm_sar_set_profile(mvm,
765                                               &wifi_pkg->package.elements[pos],
766                                               &mvm->sar_profiles[i + 1],
767                                               enabled);
768                 if (ret < 0)
769                         break;
770
771                 /* go to the next table */
772                 pos += ACPI_SAR_TABLE_SIZE;
773         }
774
775 out_free:
776         kfree(data);
777         return ret;
778 }
779
780 static int iwl_mvm_sar_get_wgds_table(struct iwl_mvm *mvm)
781 {
782         union acpi_object *wifi_pkg, *data;
783         int i, j, ret, tbl_rev;
784         int idx = 1;
785
786         data = iwl_acpi_get_object(mvm->dev, ACPI_WGDS_METHOD);
787         if (IS_ERR(data))
788                 return PTR_ERR(data);
789
790         wifi_pkg = iwl_acpi_get_wifi_pkg(mvm->dev, data,
791                                          ACPI_WGDS_WIFI_DATA_SIZE, &tbl_rev);
792         if (IS_ERR(wifi_pkg) || tbl_rev > 1) {
793                 ret = PTR_ERR(wifi_pkg);
794                 goto out_free;
795         }
796
797         mvm->geo_rev = tbl_rev;
798         for (i = 0; i < ACPI_NUM_GEO_PROFILES; i++) {
799                 for (j = 0; j < ACPI_GEO_TABLE_SIZE; j++) {
800                         union acpi_object *entry;
801
802                         entry = &wifi_pkg->package.elements[idx++];
803                         if ((entry->type != ACPI_TYPE_INTEGER) ||
804                             (entry->integer.value > U8_MAX)) {
805                                 ret = -EINVAL;
806                                 goto out_free;
807                         }
808
809                         mvm->geo_profiles[i].values[j] = entry->integer.value;
810                 }
811         }
812         ret = 0;
813 out_free:
814         kfree(data);
815         return ret;
816 }
817
818 int iwl_mvm_sar_select_profile(struct iwl_mvm *mvm, int prof_a, int prof_b)
819 {
820         union {
821                 struct iwl_dev_tx_power_cmd v5;
822                 struct iwl_dev_tx_power_cmd_v4 v4;
823         } cmd;
824         int i, j, idx;
825         int profs[ACPI_SAR_NUM_CHAIN_LIMITS] = { prof_a, prof_b };
826         int len;
827
828         BUILD_BUG_ON(ACPI_SAR_NUM_CHAIN_LIMITS < 2);
829         BUILD_BUG_ON(ACPI_SAR_NUM_CHAIN_LIMITS * ACPI_SAR_NUM_SUB_BANDS !=
830                      ACPI_SAR_TABLE_SIZE);
831
832         cmd.v5.v3.set_mode = cpu_to_le32(IWL_TX_POWER_MODE_SET_CHAINS);
833
834         if (fw_has_api(&mvm->fw->ucode_capa,
835                        IWL_UCODE_TLV_API_REDUCE_TX_POWER))
836                 len = sizeof(cmd.v5);
837         else if (fw_has_capa(&mvm->fw->ucode_capa,
838                              IWL_UCODE_TLV_CAPA_TX_POWER_ACK))
839                 len = sizeof(cmd.v4);
840         else
841                 len = sizeof(cmd.v4.v3);
842
843         for (i = 0; i < ACPI_SAR_NUM_CHAIN_LIMITS; i++) {
844                 struct iwl_mvm_sar_profile *prof;
845
846                 /* don't allow SAR to be disabled (profile 0 means disable) */
847                 if (profs[i] == 0)
848                         return -EPERM;
849
850                 /* we are off by one, so allow up to ACPI_SAR_PROFILE_NUM */
851                 if (profs[i] > ACPI_SAR_PROFILE_NUM)
852                         return -EINVAL;
853
854                 /* profiles go from 1 to 4, so decrement to access the array */
855                 prof = &mvm->sar_profiles[profs[i] - 1];
856
857                 /* if the profile is disabled, do nothing */
858                 if (!prof->enabled) {
859                         IWL_DEBUG_RADIO(mvm, "SAR profile %d is disabled.\n",
860                                         profs[i]);
861                         /* if one of the profiles is disabled, we fail all */
862                         return -ENOENT;
863                 }
864
865                 IWL_DEBUG_INFO(mvm,
866                                "SAR EWRD: chain %d profile index %d\n",
867                                i, profs[i]);
868                 IWL_DEBUG_RADIO(mvm, "  Chain[%d]:\n", i);
869                 for (j = 0; j < ACPI_SAR_NUM_SUB_BANDS; j++) {
870                         idx = (i * ACPI_SAR_NUM_SUB_BANDS) + j;
871                         cmd.v5.v3.per_chain_restriction[i][j] =
872                                 cpu_to_le16(prof->table[idx]);
873                         IWL_DEBUG_RADIO(mvm, "    Band[%d] = %d * .125dBm\n",
874                                         j, prof->table[idx]);
875                 }
876         }
877
878         IWL_DEBUG_RADIO(mvm, "Sending REDUCE_TX_POWER_CMD per chain\n");
879
880         return iwl_mvm_send_cmd_pdu(mvm, REDUCE_TX_POWER_CMD, 0, len, &cmd);
881 }
882
883 static bool iwl_mvm_sar_geo_support(struct iwl_mvm *mvm)
884 {
885         /*
886          * The GEO_TX_POWER_LIMIT command is not supported on earlier
887          * firmware versions.  Unfortunately, we don't have a TLV API
888          * flag to rely on, so rely on the major version which is in
889          * the first byte of ucode_ver.  This was implemented
890          * initially on version 38 and then backported to 36, 29 and
891          * 17.
892          */
893         return IWL_UCODE_SERIAL(mvm->fw->ucode_ver) >= 38 ||
894                IWL_UCODE_SERIAL(mvm->fw->ucode_ver) == 36 ||
895                IWL_UCODE_SERIAL(mvm->fw->ucode_ver) == 29 ||
896                IWL_UCODE_SERIAL(mvm->fw->ucode_ver) == 17;
897 }
898
899 int iwl_mvm_get_sar_geo_profile(struct iwl_mvm *mvm)
900 {
901         struct iwl_geo_tx_power_profiles_resp *resp;
902         int ret;
903         u16 len;
904         void *data;
905         struct iwl_geo_tx_power_profiles_cmd geo_cmd;
906         struct iwl_geo_tx_power_profiles_cmd_v1 geo_cmd_v1;
907         struct iwl_host_cmd cmd;
908
909         if (fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_SAR_TABLE_VER)) {
910                 geo_cmd.ops =
911                         cpu_to_le32(IWL_PER_CHAIN_OFFSET_GET_CURRENT_TABLE);
912                 len = sizeof(geo_cmd);
913                 data = &geo_cmd;
914         } else {
915                 geo_cmd_v1.ops =
916                         cpu_to_le32(IWL_PER_CHAIN_OFFSET_GET_CURRENT_TABLE);
917                 len = sizeof(geo_cmd_v1);
918                 data = &geo_cmd_v1;
919         }
920
921         cmd = (struct iwl_host_cmd){
922                 .id =  WIDE_ID(PHY_OPS_GROUP, GEO_TX_POWER_LIMIT),
923                 .len = { len, },
924                 .flags = CMD_WANT_SKB,
925                 .data = { data },
926         };
927
928         if (!iwl_mvm_sar_geo_support(mvm))
929                 return -EOPNOTSUPP;
930
931         ret = iwl_mvm_send_cmd(mvm, &cmd);
932         if (ret) {
933                 IWL_ERR(mvm, "Failed to get geographic profile info %d\n", ret);
934                 return ret;
935         }
936
937         resp = (void *)cmd.resp_pkt->data;
938         ret = le32_to_cpu(resp->profile_idx);
939         if (WARN_ON(ret > ACPI_NUM_GEO_PROFILES)) {
940                 ret = -EIO;
941                 IWL_WARN(mvm, "Invalid geographic profile idx (%d)\n", ret);
942         }
943
944         iwl_free_resp(&cmd);
945         return ret;
946 }
947
948 static int iwl_mvm_sar_geo_init(struct iwl_mvm *mvm)
949 {
950         struct iwl_geo_tx_power_profiles_cmd cmd = {
951                 .ops = cpu_to_le32(IWL_PER_CHAIN_OFFSET_SET_TABLES),
952         };
953         int ret, i, j;
954         u16 cmd_wide_id =  WIDE_ID(PHY_OPS_GROUP, GEO_TX_POWER_LIMIT);
955
956         if (!iwl_mvm_sar_geo_support(mvm))
957                 return 0;
958
959         ret = iwl_mvm_sar_get_wgds_table(mvm);
960         if (ret < 0) {
961                 IWL_DEBUG_RADIO(mvm,
962                                 "Geo SAR BIOS table invalid or unavailable. (%d)\n",
963                                 ret);
964                 /* we don't fail if the table is not available */
965                 return 0;
966         }
967
968         IWL_DEBUG_RADIO(mvm, "Sending GEO_TX_POWER_LIMIT\n");
969
970         BUILD_BUG_ON(ACPI_NUM_GEO_PROFILES * ACPI_WGDS_NUM_BANDS *
971                      ACPI_WGDS_TABLE_SIZE + 1 !=  ACPI_WGDS_WIFI_DATA_SIZE);
972
973         BUILD_BUG_ON(ACPI_NUM_GEO_PROFILES > IWL_NUM_GEO_PROFILES);
974
975         for (i = 0; i < ACPI_NUM_GEO_PROFILES; i++) {
976                 struct iwl_per_chain_offset *chain =
977                         (struct iwl_per_chain_offset *)&cmd.table[i];
978
979                 for (j = 0; j < ACPI_WGDS_NUM_BANDS; j++) {
980                         u8 *value;
981
982                         value = &mvm->geo_profiles[i].values[j *
983                                 ACPI_GEO_PER_CHAIN_SIZE];
984                         chain[j].max_tx_power = cpu_to_le16(value[0]);
985                         chain[j].chain_a = value[1];
986                         chain[j].chain_b = value[2];
987                         IWL_DEBUG_RADIO(mvm,
988                                         "SAR geographic profile[%d] Band[%d]: chain A = %d chain B = %d max_tx_power = %d\n",
989                                         i, j, value[1], value[2], value[0]);
990                 }
991         }
992
993         cmd.table_revision = cpu_to_le32(mvm->geo_rev);
994
995         if (!fw_has_api(&mvm->fw->ucode_capa,
996                        IWL_UCODE_TLV_API_SAR_TABLE_VER)) {
997                 return iwl_mvm_send_cmd_pdu(mvm, cmd_wide_id, 0,
998                                 sizeof(struct iwl_geo_tx_power_profiles_cmd_v1),
999                                 &cmd);
1000         }
1001
1002         return iwl_mvm_send_cmd_pdu(mvm, cmd_wide_id, 0, sizeof(cmd), &cmd);
1003 }
1004
1005 #else /* CONFIG_ACPI */
1006 static int iwl_mvm_sar_get_wrds_table(struct iwl_mvm *mvm)
1007 {
1008         return -ENOENT;
1009 }
1010
1011 static int iwl_mvm_sar_get_ewrd_table(struct iwl_mvm *mvm)
1012 {
1013         return -ENOENT;
1014 }
1015
1016 static int iwl_mvm_sar_get_wgds_table(struct iwl_mvm *mvm)
1017 {
1018         return -ENOENT;
1019 }
1020
1021 static int iwl_mvm_sar_geo_init(struct iwl_mvm *mvm)
1022 {
1023         return 0;
1024 }
1025
1026 int iwl_mvm_sar_select_profile(struct iwl_mvm *mvm, int prof_a,
1027                                int prof_b)
1028 {
1029         return -ENOENT;
1030 }
1031
1032 int iwl_mvm_get_sar_geo_profile(struct iwl_mvm *mvm)
1033 {
1034         return -ENOENT;
1035 }
1036 #endif /* CONFIG_ACPI */
1037
1038 void iwl_mvm_send_recovery_cmd(struct iwl_mvm *mvm, u32 flags)
1039 {
1040         u32 error_log_size = mvm->fw->ucode_capa.error_log_size;
1041         int ret;
1042         u32 resp;
1043
1044         struct iwl_fw_error_recovery_cmd recovery_cmd = {
1045                 .flags = cpu_to_le32(flags),
1046                 .buf_size = 0,
1047         };
1048         struct iwl_host_cmd host_cmd = {
1049                 .id = WIDE_ID(SYSTEM_GROUP, FW_ERROR_RECOVERY_CMD),
1050                 .flags = CMD_WANT_SKB,
1051                 .data = {&recovery_cmd, },
1052                 .len = {sizeof(recovery_cmd), },
1053         };
1054
1055         /* no error log was defined in TLV */
1056         if (!error_log_size)
1057                 return;
1058
1059         if (flags & ERROR_RECOVERY_UPDATE_DB) {
1060                 /* no buf was allocated while HW reset */
1061                 if (!mvm->error_recovery_buf)
1062                         return;
1063
1064                 host_cmd.data[1] = mvm->error_recovery_buf;
1065                 host_cmd.len[1] =  error_log_size;
1066                 host_cmd.dataflags[1] = IWL_HCMD_DFL_NOCOPY;
1067                 recovery_cmd.buf_size = cpu_to_le32(error_log_size);
1068         }
1069
1070         ret = iwl_mvm_send_cmd(mvm, &host_cmd);
1071         kfree(mvm->error_recovery_buf);
1072         mvm->error_recovery_buf = NULL;
1073
1074         if (ret) {
1075                 IWL_ERR(mvm, "Failed to send recovery cmd %d\n", ret);
1076                 return;
1077         }
1078
1079         /* skb respond is only relevant in ERROR_RECOVERY_UPDATE_DB */
1080         if (flags & ERROR_RECOVERY_UPDATE_DB) {
1081                 resp = le32_to_cpu(*(__le32 *)host_cmd.resp_pkt->data);
1082                 if (resp)
1083                         IWL_ERR(mvm,
1084                                 "Failed to send recovery cmd blob was invalid %d\n",
1085                                 resp);
1086         }
1087 }
1088
1089 static int iwl_mvm_sar_init(struct iwl_mvm *mvm)
1090 {
1091         int ret;
1092
1093         ret = iwl_mvm_sar_get_wrds_table(mvm);
1094         if (ret < 0) {
1095                 IWL_DEBUG_RADIO(mvm,
1096                                 "WRDS SAR BIOS table invalid or unavailable. (%d)\n",
1097                                 ret);
1098                 /*
1099                  * If not available, don't fail and don't bother with EWRD.
1100                  * Return 1 to tell that we can't use WGDS either.
1101                  */
1102                 return 1;
1103         }
1104
1105         ret = iwl_mvm_sar_get_ewrd_table(mvm);
1106         /* if EWRD is not available, we can still use WRDS, so don't fail */
1107         if (ret < 0)
1108                 IWL_DEBUG_RADIO(mvm,
1109                                 "EWRD SAR BIOS table invalid or unavailable. (%d)\n",
1110                                 ret);
1111
1112         /* choose profile 1 (WRDS) as default for both chains */
1113         ret = iwl_mvm_sar_select_profile(mvm, 1, 1);
1114
1115         /*
1116          * If we don't have profile 0 from BIOS, just skip it.  This
1117          * means that SAR Geo will not be enabled either, even if we
1118          * have other valid profiles.
1119          */
1120         if (ret == -ENOENT)
1121                 return 1;
1122
1123         return ret;
1124 }
1125
1126 static int iwl_mvm_load_rt_fw(struct iwl_mvm *mvm)
1127 {
1128         int ret;
1129
1130         if (iwl_mvm_has_unified_ucode(mvm))
1131                 return iwl_run_unified_mvm_ucode(mvm, false);
1132
1133         ret = iwl_run_init_mvm_ucode(mvm, false);
1134
1135         if (ret) {
1136                 IWL_ERR(mvm, "Failed to run INIT ucode: %d\n", ret);
1137
1138                 if (iwlmvm_mod_params.init_dbg)
1139                         return 0;
1140                 return ret;
1141         }
1142
1143         /*
1144          * Stop and start the transport without entering low power
1145          * mode. This will save the state of other components on the
1146          * device that are triggered by the INIT firwmare (MFUART).
1147          */
1148         _iwl_trans_stop_device(mvm->trans, false);
1149         ret = _iwl_trans_start_hw(mvm->trans, false);
1150         if (ret)
1151                 return ret;
1152
1153         iwl_fw_dbg_apply_point(&mvm->fwrt, IWL_FW_INI_APPLY_EARLY);
1154
1155         mvm->rfkill_safe_init_done = false;
1156         ret = iwl_mvm_load_ucode_wait_alive(mvm, IWL_UCODE_REGULAR);
1157         if (ret)
1158                 return ret;
1159
1160         mvm->rfkill_safe_init_done = true;
1161
1162         iwl_fw_dbg_apply_point(&mvm->fwrt, IWL_FW_INI_APPLY_AFTER_ALIVE);
1163
1164         return iwl_init_paging(&mvm->fwrt, mvm->fwrt.cur_fw_img);
1165 }
1166
1167 int iwl_mvm_up(struct iwl_mvm *mvm)
1168 {
1169         int ret, i;
1170         struct ieee80211_channel *chan;
1171         struct cfg80211_chan_def chandef;
1172
1173         lockdep_assert_held(&mvm->mutex);
1174
1175         ret = iwl_trans_start_hw(mvm->trans);
1176         if (ret)
1177                 return ret;
1178
1179         ret = iwl_mvm_load_rt_fw(mvm);
1180         if (ret) {
1181                 IWL_ERR(mvm, "Failed to start RT ucode: %d\n", ret);
1182                 if (ret != -ERFKILL)
1183                         iwl_fw_dbg_error_collect(&mvm->fwrt,
1184                                                  FW_DBG_TRIGGER_DRIVER);
1185                 goto error;
1186         }
1187
1188         iwl_get_shared_mem_conf(&mvm->fwrt);
1189
1190         ret = iwl_mvm_sf_update(mvm, NULL, false);
1191         if (ret)
1192                 IWL_ERR(mvm, "Failed to initialize Smart Fifo\n");
1193
1194         if (!mvm->trans->dbg.ini_valid) {
1195                 mvm->fwrt.dump.conf = FW_DBG_INVALID;
1196                 /* if we have a destination, assume EARLY START */
1197                 if (mvm->fw->dbg.dest_tlv)
1198                         mvm->fwrt.dump.conf = FW_DBG_START_FROM_ALIVE;
1199                 iwl_fw_start_dbg_conf(&mvm->fwrt, FW_DBG_START_FROM_ALIVE);
1200         }
1201
1202         ret = iwl_send_tx_ant_cfg(mvm, iwl_mvm_get_valid_tx_ant(mvm));
1203         if (ret)
1204                 goto error;
1205
1206         if (!iwl_mvm_has_unified_ucode(mvm)) {
1207                 /* Send phy db control command and then phy db calibration */
1208                 ret = iwl_send_phy_db_data(mvm->phy_db);
1209                 if (ret)
1210                         goto error;
1211
1212                 ret = iwl_send_phy_cfg_cmd(mvm);
1213                 if (ret)
1214                         goto error;
1215         }
1216
1217         ret = iwl_mvm_send_bt_init_conf(mvm);
1218         if (ret)
1219                 goto error;
1220
1221         /* Init RSS configuration */
1222         if (mvm->trans->cfg->device_family >= IWL_DEVICE_FAMILY_22000) {
1223                 ret = iwl_configure_rxq(mvm);
1224                 if (ret) {
1225                         IWL_ERR(mvm, "Failed to configure RX queues: %d\n",
1226                                 ret);
1227                         goto error;
1228                 }
1229         }
1230
1231         if (iwl_mvm_has_new_rx_api(mvm)) {
1232                 ret = iwl_send_rss_cfg_cmd(mvm);
1233                 if (ret) {
1234                         IWL_ERR(mvm, "Failed to configure RSS queues: %d\n",
1235                                 ret);
1236                         goto error;
1237                 }
1238         }
1239
1240         /* init the fw <-> mac80211 STA mapping */
1241         for (i = 0; i < ARRAY_SIZE(mvm->fw_id_to_mac_id); i++)
1242                 RCU_INIT_POINTER(mvm->fw_id_to_mac_id[i], NULL);
1243
1244         mvm->tdls_cs.peer.sta_id = IWL_MVM_INVALID_STA;
1245
1246         /* reset quota debouncing buffer - 0xff will yield invalid data */
1247         memset(&mvm->last_quota_cmd, 0xff, sizeof(mvm->last_quota_cmd));
1248
1249         ret = iwl_mvm_send_dqa_cmd(mvm);
1250         if (ret)
1251                 goto error;
1252
1253         /* Add auxiliary station for scanning */
1254         ret = iwl_mvm_add_aux_sta(mvm);
1255         if (ret)
1256                 goto error;
1257
1258         /* Add all the PHY contexts */
1259         chan = &mvm->hw->wiphy->bands[NL80211_BAND_2GHZ]->channels[0];
1260         cfg80211_chandef_create(&chandef, chan, NL80211_CHAN_NO_HT);
1261         for (i = 0; i < NUM_PHY_CTX; i++) {
1262                 /*
1263                  * The channel used here isn't relevant as it's
1264                  * going to be overwritten in the other flows.
1265                  * For now use the first channel we have.
1266                  */
1267                 ret = iwl_mvm_phy_ctxt_add(mvm, &mvm->phy_ctxts[i],
1268                                            &chandef, 1, 1);
1269                 if (ret)
1270                         goto error;
1271         }
1272
1273 #ifdef CONFIG_THERMAL
1274         if (iwl_mvm_is_tt_in_fw(mvm)) {
1275                 /* in order to give the responsibility of ct-kill and
1276                  * TX backoff to FW we need to send empty temperature reporting
1277                  * cmd during init time
1278                  */
1279                 iwl_mvm_send_temp_report_ths_cmd(mvm);
1280         } else {
1281                 /* Initialize tx backoffs to the minimal possible */
1282                 iwl_mvm_tt_tx_backoff(mvm, 0);
1283         }
1284
1285         /* TODO: read the budget from BIOS / Platform NVM */
1286
1287         /*
1288          * In case there is no budget from BIOS / Platform NVM the default
1289          * budget should be 2000mW (cooling state 0).
1290          */
1291         if (iwl_mvm_is_ctdp_supported(mvm)) {
1292                 ret = iwl_mvm_ctdp_command(mvm, CTDP_CMD_OPERATION_START,
1293                                            mvm->cooling_dev.cur_state);
1294                 if (ret)
1295                         goto error;
1296         }
1297 #else
1298         /* Initialize tx backoffs to the minimal possible */
1299         iwl_mvm_tt_tx_backoff(mvm, 0);
1300 #endif
1301
1302         WARN_ON(iwl_mvm_config_ltr(mvm));
1303
1304         ret = iwl_mvm_power_update_device(mvm);
1305         if (ret)
1306                 goto error;
1307
1308         /*
1309          * RTNL is not taken during Ct-kill, but we don't need to scan/Tx
1310          * anyway, so don't init MCC.
1311          */
1312         if (!test_bit(IWL_MVM_STATUS_HW_CTKILL, &mvm->status)) {
1313                 ret = iwl_mvm_init_mcc(mvm);
1314                 if (ret)
1315                         goto error;
1316         }
1317
1318         if (fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_UMAC_SCAN)) {
1319                 mvm->scan_type = IWL_SCAN_TYPE_NOT_SET;
1320                 mvm->hb_scan_type = IWL_SCAN_TYPE_NOT_SET;
1321                 ret = iwl_mvm_config_scan(mvm);
1322                 if (ret)
1323                         goto error;
1324         }
1325
1326         /* allow FW/transport low power modes if not during restart */
1327         if (!test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status))
1328                 iwl_mvm_unref(mvm, IWL_MVM_REF_UCODE_DOWN);
1329
1330         if (test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status))
1331                 iwl_mvm_send_recovery_cmd(mvm, ERROR_RECOVERY_UPDATE_DB);
1332
1333         if (iwl_acpi_get_eckv(mvm->dev, &mvm->ext_clock_valid))
1334                 IWL_DEBUG_INFO(mvm, "ECKV table doesn't exist in BIOS\n");
1335
1336         ret = iwl_mvm_sar_init(mvm);
1337         if (ret == 0) {
1338                 ret = iwl_mvm_sar_geo_init(mvm);
1339         } else if (ret > 0 && !iwl_mvm_sar_get_wgds_table(mvm)) {
1340                 /*
1341                  * If basic SAR is not available, we check for WGDS,
1342                  * which should *not* be available either.  If it is
1343                  * available, issue an error, because we can't use SAR
1344                  * Geo without basic SAR.
1345                  */
1346                 IWL_ERR(mvm, "BIOS contains WGDS but no WRDS\n");
1347         }
1348
1349         if (ret < 0)
1350                 goto error;
1351
1352         iwl_mvm_leds_sync(mvm);
1353
1354         IWL_DEBUG_INFO(mvm, "RT uCode started.\n");
1355         return 0;
1356  error:
1357         if (!iwlmvm_mod_params.init_dbg || !ret)
1358                 iwl_mvm_stop_device(mvm);
1359         return ret;
1360 }
1361
1362 int iwl_mvm_load_d3_fw(struct iwl_mvm *mvm)
1363 {
1364         int ret, i;
1365
1366         lockdep_assert_held(&mvm->mutex);
1367
1368         ret = iwl_trans_start_hw(mvm->trans);
1369         if (ret)
1370                 return ret;
1371
1372         ret = iwl_mvm_load_ucode_wait_alive(mvm, IWL_UCODE_WOWLAN);
1373         if (ret) {
1374                 IWL_ERR(mvm, "Failed to start WoWLAN firmware: %d\n", ret);
1375                 goto error;
1376         }
1377
1378         ret = iwl_send_tx_ant_cfg(mvm, iwl_mvm_get_valid_tx_ant(mvm));
1379         if (ret)
1380                 goto error;
1381
1382         /* Send phy db control command and then phy db calibration*/
1383         ret = iwl_send_phy_db_data(mvm->phy_db);
1384         if (ret)
1385                 goto error;
1386
1387         ret = iwl_send_phy_cfg_cmd(mvm);
1388         if (ret)
1389                 goto error;
1390
1391         /* init the fw <-> mac80211 STA mapping */
1392         for (i = 0; i < ARRAY_SIZE(mvm->fw_id_to_mac_id); i++)
1393                 RCU_INIT_POINTER(mvm->fw_id_to_mac_id[i], NULL);
1394
1395         /* Add auxiliary station for scanning */
1396         ret = iwl_mvm_add_aux_sta(mvm);
1397         if (ret)
1398                 goto error;
1399
1400         return 0;
1401  error:
1402         iwl_mvm_stop_device(mvm);
1403         return ret;
1404 }
1405
1406 void iwl_mvm_rx_card_state_notif(struct iwl_mvm *mvm,
1407                                  struct iwl_rx_cmd_buffer *rxb)
1408 {
1409         struct iwl_rx_packet *pkt = rxb_addr(rxb);
1410         struct iwl_card_state_notif *card_state_notif = (void *)pkt->data;
1411         u32 flags = le32_to_cpu(card_state_notif->flags);
1412
1413         IWL_DEBUG_RF_KILL(mvm, "Card state received: HW:%s SW:%s CT:%s\n",
1414                           (flags & HW_CARD_DISABLED) ? "Kill" : "On",
1415                           (flags & SW_CARD_DISABLED) ? "Kill" : "On",
1416                           (flags & CT_KILL_CARD_DISABLED) ?
1417                           "Reached" : "Not reached");
1418 }
1419
1420 void iwl_mvm_rx_mfuart_notif(struct iwl_mvm *mvm,
1421                              struct iwl_rx_cmd_buffer *rxb)
1422 {
1423         struct iwl_rx_packet *pkt = rxb_addr(rxb);
1424         struct iwl_mfuart_load_notif *mfuart_notif = (void *)pkt->data;
1425
1426         IWL_DEBUG_INFO(mvm,
1427                        "MFUART: installed ver: 0x%08x, external ver: 0x%08x, status: 0x%08x, duration: 0x%08x\n",
1428                        le32_to_cpu(mfuart_notif->installed_ver),
1429                        le32_to_cpu(mfuart_notif->external_ver),
1430                        le32_to_cpu(mfuart_notif->status),
1431                        le32_to_cpu(mfuart_notif->duration));
1432
1433         if (iwl_rx_packet_payload_len(pkt) == sizeof(*mfuart_notif))
1434                 IWL_DEBUG_INFO(mvm,
1435                                "MFUART: image size: 0x%08x\n",
1436                                le32_to_cpu(mfuart_notif->image_size));
1437 }