1 /* QLogic qed NIC Driver
2 * Copyright (c) 2015-2017 QLogic Corporation
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and /or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 #include <linux/types.h>
34 #include <asm/byteorder.h>
35 #include <linux/delay.h>
36 #include <linux/errno.h>
37 #include <linux/kernel.h>
38 #include <linux/slab.h>
39 #include <linux/spinlock.h>
40 #include <linux/string.h>
41 #include <linux/etherdevice.h>
48 #include "qed_reg_addr.h"
49 #include "qed_sriov.h"
51 #define QED_MCP_RESP_ITER_US 10
53 #define QED_DRV_MB_MAX_RETRIES (500 * 1000) /* Account for 5 sec */
54 #define QED_MCP_RESET_RETRIES (50 * 1000) /* Account for 500 msec */
56 #define DRV_INNER_WR(_p_hwfn, _p_ptt, _ptr, _offset, _val) \
57 qed_wr(_p_hwfn, _p_ptt, (_p_hwfn->mcp_info->_ptr + _offset), \
60 #define DRV_INNER_RD(_p_hwfn, _p_ptt, _ptr, _offset) \
61 qed_rd(_p_hwfn, _p_ptt, (_p_hwfn->mcp_info->_ptr + _offset))
63 #define DRV_MB_WR(_p_hwfn, _p_ptt, _field, _val) \
64 DRV_INNER_WR(p_hwfn, _p_ptt, drv_mb_addr, \
65 offsetof(struct public_drv_mb, _field), _val)
67 #define DRV_MB_RD(_p_hwfn, _p_ptt, _field) \
68 DRV_INNER_RD(_p_hwfn, _p_ptt, drv_mb_addr, \
69 offsetof(struct public_drv_mb, _field))
71 #define PDA_COMP (((FW_MAJOR_VERSION) + (FW_MINOR_VERSION << 8)) << \
72 DRV_ID_PDA_COMP_VER_SHIFT)
74 #define MCP_BYTES_PER_MBIT_SHIFT 17
76 bool qed_mcp_is_init(struct qed_hwfn *p_hwfn)
78 if (!p_hwfn->mcp_info || !p_hwfn->mcp_info->public_base)
83 void qed_mcp_cmd_port_init(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
85 u32 addr = SECTION_OFFSIZE_ADDR(p_hwfn->mcp_info->public_base,
87 u32 mfw_mb_offsize = qed_rd(p_hwfn, p_ptt, addr);
89 p_hwfn->mcp_info->port_addr = SECTION_ADDR(mfw_mb_offsize,
91 DP_VERBOSE(p_hwfn, QED_MSG_SP,
92 "port_addr = 0x%x, port_id 0x%02x\n",
93 p_hwfn->mcp_info->port_addr, MFW_PORT(p_hwfn));
96 void qed_mcp_read_mb(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
98 u32 length = MFW_DRV_MSG_MAX_DWORDS(p_hwfn->mcp_info->mfw_mb_length);
101 if (!p_hwfn->mcp_info->public_base)
104 for (i = 0; i < length; i++) {
105 tmp = qed_rd(p_hwfn, p_ptt,
106 p_hwfn->mcp_info->mfw_mb_addr +
107 (i << 2) + sizeof(u32));
109 /* The MB data is actually BE; Need to force it to cpu */
110 ((u32 *)p_hwfn->mcp_info->mfw_mb_cur)[i] =
111 be32_to_cpu((__force __be32)tmp);
115 struct qed_mcp_cmd_elem {
116 struct list_head list;
117 struct qed_mcp_mb_params *p_mb_params;
118 u16 expected_seq_num;
122 /* Must be called while cmd_lock is acquired */
123 static struct qed_mcp_cmd_elem *
124 qed_mcp_cmd_add_elem(struct qed_hwfn *p_hwfn,
125 struct qed_mcp_mb_params *p_mb_params,
126 u16 expected_seq_num)
128 struct qed_mcp_cmd_elem *p_cmd_elem = NULL;
130 p_cmd_elem = kzalloc(sizeof(*p_cmd_elem), GFP_ATOMIC);
134 p_cmd_elem->p_mb_params = p_mb_params;
135 p_cmd_elem->expected_seq_num = expected_seq_num;
136 list_add(&p_cmd_elem->list, &p_hwfn->mcp_info->cmd_list);
141 /* Must be called while cmd_lock is acquired */
142 static void qed_mcp_cmd_del_elem(struct qed_hwfn *p_hwfn,
143 struct qed_mcp_cmd_elem *p_cmd_elem)
145 list_del(&p_cmd_elem->list);
149 /* Must be called while cmd_lock is acquired */
150 static struct qed_mcp_cmd_elem *qed_mcp_cmd_get_elem(struct qed_hwfn *p_hwfn,
153 struct qed_mcp_cmd_elem *p_cmd_elem = NULL;
155 list_for_each_entry(p_cmd_elem, &p_hwfn->mcp_info->cmd_list, list) {
156 if (p_cmd_elem->expected_seq_num == seq_num)
163 int qed_mcp_free(struct qed_hwfn *p_hwfn)
165 if (p_hwfn->mcp_info) {
166 struct qed_mcp_cmd_elem *p_cmd_elem, *p_tmp;
168 kfree(p_hwfn->mcp_info->mfw_mb_cur);
169 kfree(p_hwfn->mcp_info->mfw_mb_shadow);
171 spin_lock_bh(&p_hwfn->mcp_info->cmd_lock);
172 list_for_each_entry_safe(p_cmd_elem,
174 &p_hwfn->mcp_info->cmd_list, list) {
175 qed_mcp_cmd_del_elem(p_hwfn, p_cmd_elem);
177 spin_unlock_bh(&p_hwfn->mcp_info->cmd_lock);
180 kfree(p_hwfn->mcp_info);
181 p_hwfn->mcp_info = NULL;
186 /* Maximum of 1 sec to wait for the SHMEM ready indication */
187 #define QED_MCP_SHMEM_RDY_MAX_RETRIES 20
188 #define QED_MCP_SHMEM_RDY_ITER_MS 50
190 static int qed_load_mcp_offsets(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
192 struct qed_mcp_info *p_info = p_hwfn->mcp_info;
193 u8 cnt = QED_MCP_SHMEM_RDY_MAX_RETRIES;
194 u8 msec = QED_MCP_SHMEM_RDY_ITER_MS;
195 u32 drv_mb_offsize, mfw_mb_offsize;
196 u32 mcp_pf_id = MCP_PF_ID(p_hwfn);
198 p_info->public_base = qed_rd(p_hwfn, p_ptt, MISC_REG_SHARED_MEM_ADDR);
199 if (!p_info->public_base) {
201 "The address of the MCP scratch-pad is not configured\n");
205 p_info->public_base |= GRCBASE_MCP;
207 /* Get the MFW MB address and number of supported messages */
208 mfw_mb_offsize = qed_rd(p_hwfn, p_ptt,
209 SECTION_OFFSIZE_ADDR(p_info->public_base,
211 p_info->mfw_mb_addr = SECTION_ADDR(mfw_mb_offsize, mcp_pf_id);
212 p_info->mfw_mb_length = (u16)qed_rd(p_hwfn, p_ptt,
213 p_info->mfw_mb_addr +
214 offsetof(struct public_mfw_mb,
217 /* The driver can notify that there was an MCP reset, and might read the
218 * SHMEM values before the MFW has completed initializing them.
219 * To avoid this, the "sup_msgs" field in the MFW mailbox is used as a
220 * data ready indication.
222 while (!p_info->mfw_mb_length && --cnt) {
224 p_info->mfw_mb_length =
225 (u16)qed_rd(p_hwfn, p_ptt,
226 p_info->mfw_mb_addr +
227 offsetof(struct public_mfw_mb, sup_msgs));
232 "Failed to get the SHMEM ready notification after %d msec\n",
233 QED_MCP_SHMEM_RDY_MAX_RETRIES * msec);
237 /* Calculate the driver and MFW mailbox address */
238 drv_mb_offsize = qed_rd(p_hwfn, p_ptt,
239 SECTION_OFFSIZE_ADDR(p_info->public_base,
241 p_info->drv_mb_addr = SECTION_ADDR(drv_mb_offsize, mcp_pf_id);
242 DP_VERBOSE(p_hwfn, QED_MSG_SP,
243 "drv_mb_offsiz = 0x%x, drv_mb_addr = 0x%x mcp_pf_id = 0x%x\n",
244 drv_mb_offsize, p_info->drv_mb_addr, mcp_pf_id);
246 /* Get the current driver mailbox sequence before sending
249 p_info->drv_mb_seq = DRV_MB_RD(p_hwfn, p_ptt, drv_mb_header) &
250 DRV_MSG_SEQ_NUMBER_MASK;
252 /* Get current FW pulse sequence */
253 p_info->drv_pulse_seq = DRV_MB_RD(p_hwfn, p_ptt, drv_pulse_mb) &
256 p_info->mcp_hist = qed_rd(p_hwfn, p_ptt, MISCS_REG_GENERIC_POR_0);
261 int qed_mcp_cmd_init(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
263 struct qed_mcp_info *p_info;
266 /* Allocate mcp_info structure */
267 p_hwfn->mcp_info = kzalloc(sizeof(*p_hwfn->mcp_info), GFP_KERNEL);
268 if (!p_hwfn->mcp_info)
270 p_info = p_hwfn->mcp_info;
272 /* Initialize the MFW spinlock */
273 spin_lock_init(&p_info->cmd_lock);
274 spin_lock_init(&p_info->link_lock);
276 INIT_LIST_HEAD(&p_info->cmd_list);
278 if (qed_load_mcp_offsets(p_hwfn, p_ptt) != 0) {
279 DP_NOTICE(p_hwfn, "MCP is not initialized\n");
280 /* Do not free mcp_info here, since public_base indicate that
281 * the MCP is not initialized
286 size = MFW_DRV_MSG_MAX_DWORDS(p_info->mfw_mb_length) * sizeof(u32);
287 p_info->mfw_mb_cur = kzalloc(size, GFP_KERNEL);
288 p_info->mfw_mb_shadow = kzalloc(size, GFP_KERNEL);
289 if (!p_info->mfw_mb_cur || !p_info->mfw_mb_shadow)
295 qed_mcp_free(p_hwfn);
299 static void qed_mcp_reread_offsets(struct qed_hwfn *p_hwfn,
300 struct qed_ptt *p_ptt)
302 u32 generic_por_0 = qed_rd(p_hwfn, p_ptt, MISCS_REG_GENERIC_POR_0);
304 /* Use MCP history register to check if MCP reset occurred between init
307 if (p_hwfn->mcp_info->mcp_hist != generic_por_0) {
310 "Rereading MCP offsets [mcp_hist 0x%08x, generic_por_0 0x%08x]\n",
311 p_hwfn->mcp_info->mcp_hist, generic_por_0);
313 qed_load_mcp_offsets(p_hwfn, p_ptt);
314 qed_mcp_cmd_port_init(p_hwfn, p_ptt);
318 int qed_mcp_reset(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
320 u32 org_mcp_reset_seq, seq, delay = QED_MCP_RESP_ITER_US, cnt = 0;
323 if (p_hwfn->mcp_info->b_block_cmd) {
325 "The MFW is not responsive. Avoid sending MCP_RESET mailbox command.\n");
329 /* Ensure that only a single thread is accessing the mailbox */
330 spin_lock_bh(&p_hwfn->mcp_info->cmd_lock);
332 org_mcp_reset_seq = qed_rd(p_hwfn, p_ptt, MISCS_REG_GENERIC_POR_0);
334 /* Set drv command along with the updated sequence */
335 qed_mcp_reread_offsets(p_hwfn, p_ptt);
336 seq = ++p_hwfn->mcp_info->drv_mb_seq;
337 DRV_MB_WR(p_hwfn, p_ptt, drv_mb_header, (DRV_MSG_CODE_MCP_RESET | seq));
340 /* Wait for MFW response */
342 /* Give the FW up to 500 second (50*1000*10usec) */
343 } while ((org_mcp_reset_seq == qed_rd(p_hwfn, p_ptt,
344 MISCS_REG_GENERIC_POR_0)) &&
345 (cnt++ < QED_MCP_RESET_RETRIES));
347 if (org_mcp_reset_seq !=
348 qed_rd(p_hwfn, p_ptt, MISCS_REG_GENERIC_POR_0)) {
349 DP_VERBOSE(p_hwfn, QED_MSG_SP,
350 "MCP was reset after %d usec\n", cnt * delay);
352 DP_ERR(p_hwfn, "Failed to reset MCP\n");
356 spin_unlock_bh(&p_hwfn->mcp_info->cmd_lock);
361 /* Must be called while cmd_lock is acquired */
362 static bool qed_mcp_has_pending_cmd(struct qed_hwfn *p_hwfn)
364 struct qed_mcp_cmd_elem *p_cmd_elem;
366 /* There is at most one pending command at a certain time, and if it
367 * exists - it is placed at the HEAD of the list.
369 if (!list_empty(&p_hwfn->mcp_info->cmd_list)) {
370 p_cmd_elem = list_first_entry(&p_hwfn->mcp_info->cmd_list,
371 struct qed_mcp_cmd_elem, list);
372 return !p_cmd_elem->b_is_completed;
378 /* Must be called while cmd_lock is acquired */
380 qed_mcp_update_pending_cmd(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
382 struct qed_mcp_mb_params *p_mb_params;
383 struct qed_mcp_cmd_elem *p_cmd_elem;
387 mcp_resp = DRV_MB_RD(p_hwfn, p_ptt, fw_mb_header);
388 seq_num = (u16)(mcp_resp & FW_MSG_SEQ_NUMBER_MASK);
390 /* Return if no new non-handled response has been received */
391 if (seq_num != p_hwfn->mcp_info->drv_mb_seq)
394 p_cmd_elem = qed_mcp_cmd_get_elem(p_hwfn, seq_num);
397 "Failed to find a pending mailbox cmd that expects sequence number %d\n",
402 p_mb_params = p_cmd_elem->p_mb_params;
404 /* Get the MFW response along with the sequence number */
405 p_mb_params->mcp_resp = mcp_resp;
407 /* Get the MFW param */
408 p_mb_params->mcp_param = DRV_MB_RD(p_hwfn, p_ptt, fw_mb_param);
410 /* Get the union data */
411 if (p_mb_params->p_data_dst != NULL && p_mb_params->data_dst_size) {
412 u32 union_data_addr = p_hwfn->mcp_info->drv_mb_addr +
413 offsetof(struct public_drv_mb,
415 qed_memcpy_from(p_hwfn, p_ptt, p_mb_params->p_data_dst,
416 union_data_addr, p_mb_params->data_dst_size);
419 p_cmd_elem->b_is_completed = true;
424 /* Must be called while cmd_lock is acquired */
425 static void __qed_mcp_cmd_and_union(struct qed_hwfn *p_hwfn,
426 struct qed_ptt *p_ptt,
427 struct qed_mcp_mb_params *p_mb_params,
430 union drv_union_data union_data;
433 /* Set the union data */
434 union_data_addr = p_hwfn->mcp_info->drv_mb_addr +
435 offsetof(struct public_drv_mb, union_data);
436 memset(&union_data, 0, sizeof(union_data));
437 if (p_mb_params->p_data_src != NULL && p_mb_params->data_src_size)
438 memcpy(&union_data, p_mb_params->p_data_src,
439 p_mb_params->data_src_size);
440 qed_memcpy_to(p_hwfn, p_ptt, union_data_addr, &union_data,
443 /* Set the drv param */
444 DRV_MB_WR(p_hwfn, p_ptt, drv_mb_param, p_mb_params->param);
446 /* Set the drv command along with the sequence number */
447 DRV_MB_WR(p_hwfn, p_ptt, drv_mb_header, (p_mb_params->cmd | seq_num));
449 DP_VERBOSE(p_hwfn, QED_MSG_SP,
450 "MFW mailbox: command 0x%08x param 0x%08x\n",
451 (p_mb_params->cmd | seq_num), p_mb_params->param);
454 static void qed_mcp_cmd_set_blocking(struct qed_hwfn *p_hwfn, bool block_cmd)
456 p_hwfn->mcp_info->b_block_cmd = block_cmd;
458 DP_INFO(p_hwfn, "%s sending of mailbox commands to the MFW\n",
459 block_cmd ? "Block" : "Unblock");
462 static void qed_mcp_print_cpu_info(struct qed_hwfn *p_hwfn,
463 struct qed_ptt *p_ptt)
465 u32 cpu_mode, cpu_state, cpu_pc_0, cpu_pc_1, cpu_pc_2;
466 u32 delay = QED_MCP_RESP_ITER_US;
468 cpu_mode = qed_rd(p_hwfn, p_ptt, MCP_REG_CPU_MODE);
469 cpu_state = qed_rd(p_hwfn, p_ptt, MCP_REG_CPU_STATE);
470 cpu_pc_0 = qed_rd(p_hwfn, p_ptt, MCP_REG_CPU_PROGRAM_COUNTER);
472 cpu_pc_1 = qed_rd(p_hwfn, p_ptt, MCP_REG_CPU_PROGRAM_COUNTER);
474 cpu_pc_2 = qed_rd(p_hwfn, p_ptt, MCP_REG_CPU_PROGRAM_COUNTER);
477 "MCP CPU info: mode 0x%08x, state 0x%08x, pc {0x%08x, 0x%08x, 0x%08x}\n",
478 cpu_mode, cpu_state, cpu_pc_0, cpu_pc_1, cpu_pc_2);
482 _qed_mcp_cmd_and_union(struct qed_hwfn *p_hwfn,
483 struct qed_ptt *p_ptt,
484 struct qed_mcp_mb_params *p_mb_params,
485 u32 max_retries, u32 usecs)
487 u32 cnt = 0, msecs = DIV_ROUND_UP(usecs, 1000);
488 struct qed_mcp_cmd_elem *p_cmd_elem;
492 /* Wait until the mailbox is non-occupied */
494 /* Exit the loop if there is no pending command, or if the
495 * pending command is completed during this iteration.
496 * The spinlock stays locked until the command is sent.
499 spin_lock_bh(&p_hwfn->mcp_info->cmd_lock);
501 if (!qed_mcp_has_pending_cmd(p_hwfn))
504 rc = qed_mcp_update_pending_cmd(p_hwfn, p_ptt);
507 else if (rc != -EAGAIN)
510 spin_unlock_bh(&p_hwfn->mcp_info->cmd_lock);
512 if (QED_MB_FLAGS_IS_SET(p_mb_params, CAN_SLEEP))
516 } while (++cnt < max_retries);
518 if (cnt >= max_retries) {
520 "The MFW mailbox is occupied by an uncompleted command. Failed to send command 0x%08x [param 0x%08x].\n",
521 p_mb_params->cmd, p_mb_params->param);
525 /* Send the mailbox command */
526 qed_mcp_reread_offsets(p_hwfn, p_ptt);
527 seq_num = ++p_hwfn->mcp_info->drv_mb_seq;
528 p_cmd_elem = qed_mcp_cmd_add_elem(p_hwfn, p_mb_params, seq_num);
534 __qed_mcp_cmd_and_union(p_hwfn, p_ptt, p_mb_params, seq_num);
535 spin_unlock_bh(&p_hwfn->mcp_info->cmd_lock);
537 /* Wait for the MFW response */
539 /* Exit the loop if the command is already completed, or if the
540 * command is completed during this iteration.
541 * The spinlock stays locked until the list element is removed.
544 if (QED_MB_FLAGS_IS_SET(p_mb_params, CAN_SLEEP))
549 spin_lock_bh(&p_hwfn->mcp_info->cmd_lock);
551 if (p_cmd_elem->b_is_completed)
554 rc = qed_mcp_update_pending_cmd(p_hwfn, p_ptt);
557 else if (rc != -EAGAIN)
560 spin_unlock_bh(&p_hwfn->mcp_info->cmd_lock);
561 } while (++cnt < max_retries);
563 if (cnt >= max_retries) {
565 "The MFW failed to respond to command 0x%08x [param 0x%08x].\n",
566 p_mb_params->cmd, p_mb_params->param);
567 qed_mcp_print_cpu_info(p_hwfn, p_ptt);
569 spin_lock_bh(&p_hwfn->mcp_info->cmd_lock);
570 qed_mcp_cmd_del_elem(p_hwfn, p_cmd_elem);
571 spin_unlock_bh(&p_hwfn->mcp_info->cmd_lock);
573 if (!QED_MB_FLAGS_IS_SET(p_mb_params, AVOID_BLOCK))
574 qed_mcp_cmd_set_blocking(p_hwfn, true);
579 qed_mcp_cmd_del_elem(p_hwfn, p_cmd_elem);
580 spin_unlock_bh(&p_hwfn->mcp_info->cmd_lock);
584 "MFW mailbox: response 0x%08x param 0x%08x [after %d.%03d ms]\n",
585 p_mb_params->mcp_resp,
586 p_mb_params->mcp_param,
587 (cnt * usecs) / 1000, (cnt * usecs) % 1000);
589 /* Clear the sequence number from the MFW response */
590 p_mb_params->mcp_resp &= FW_MSG_CODE_MASK;
595 spin_unlock_bh(&p_hwfn->mcp_info->cmd_lock);
599 static int qed_mcp_cmd_and_union(struct qed_hwfn *p_hwfn,
600 struct qed_ptt *p_ptt,
601 struct qed_mcp_mb_params *p_mb_params)
603 size_t union_data_size = sizeof(union drv_union_data);
604 u32 max_retries = QED_DRV_MB_MAX_RETRIES;
605 u32 usecs = QED_MCP_RESP_ITER_US;
607 /* MCP not initialized */
608 if (!qed_mcp_is_init(p_hwfn)) {
609 DP_NOTICE(p_hwfn, "MFW is not initialized!\n");
613 if (p_hwfn->mcp_info->b_block_cmd) {
615 "The MFW is not responsive. Avoid sending mailbox command 0x%08x [param 0x%08x].\n",
616 p_mb_params->cmd, p_mb_params->param);
620 if (p_mb_params->data_src_size > union_data_size ||
621 p_mb_params->data_dst_size > union_data_size) {
623 "The provided size is larger than the union data size [src_size %u, dst_size %u, union_data_size %zu]\n",
624 p_mb_params->data_src_size,
625 p_mb_params->data_dst_size, union_data_size);
629 if (QED_MB_FLAGS_IS_SET(p_mb_params, CAN_SLEEP)) {
630 max_retries = DIV_ROUND_UP(max_retries, 1000);
634 return _qed_mcp_cmd_and_union(p_hwfn, p_ptt, p_mb_params, max_retries,
638 int qed_mcp_cmd(struct qed_hwfn *p_hwfn,
639 struct qed_ptt *p_ptt,
645 struct qed_mcp_mb_params mb_params;
648 memset(&mb_params, 0, sizeof(mb_params));
650 mb_params.param = param;
652 rc = qed_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
656 *o_mcp_resp = mb_params.mcp_resp;
657 *o_mcp_param = mb_params.mcp_param;
663 qed_mcp_nvm_wr_cmd(struct qed_hwfn *p_hwfn,
664 struct qed_ptt *p_ptt,
668 u32 *o_mcp_param, u32 i_txn_size, u32 *i_buf)
670 struct qed_mcp_mb_params mb_params;
673 memset(&mb_params, 0, sizeof(mb_params));
675 mb_params.param = param;
676 mb_params.p_data_src = i_buf;
677 mb_params.data_src_size = (u8)i_txn_size;
678 rc = qed_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
682 *o_mcp_resp = mb_params.mcp_resp;
683 *o_mcp_param = mb_params.mcp_param;
685 /* nvm_info needs to be updated */
686 p_hwfn->nvm_info.valid = false;
691 int qed_mcp_nvm_rd_cmd(struct qed_hwfn *p_hwfn,
692 struct qed_ptt *p_ptt,
696 u32 *o_mcp_param, u32 *o_txn_size, u32 *o_buf)
698 struct qed_mcp_mb_params mb_params;
699 u8 raw_data[MCP_DRV_NVM_BUF_LEN];
702 memset(&mb_params, 0, sizeof(mb_params));
704 mb_params.param = param;
705 mb_params.p_data_dst = raw_data;
707 /* Use the maximal value since the actual one is part of the response */
708 mb_params.data_dst_size = MCP_DRV_NVM_BUF_LEN;
710 rc = qed_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
714 *o_mcp_resp = mb_params.mcp_resp;
715 *o_mcp_param = mb_params.mcp_param;
717 *o_txn_size = *o_mcp_param;
718 memcpy(o_buf, raw_data, *o_txn_size);
724 qed_mcp_can_force_load(u8 drv_role,
726 enum qed_override_force_load override_force_load)
728 bool can_force_load = false;
730 switch (override_force_load) {
731 case QED_OVERRIDE_FORCE_LOAD_ALWAYS:
732 can_force_load = true;
734 case QED_OVERRIDE_FORCE_LOAD_NEVER:
735 can_force_load = false;
738 can_force_load = (drv_role == DRV_ROLE_OS &&
739 exist_drv_role == DRV_ROLE_PREBOOT) ||
740 (drv_role == DRV_ROLE_KDUMP &&
741 exist_drv_role == DRV_ROLE_OS);
745 return can_force_load;
748 static int qed_mcp_cancel_load_req(struct qed_hwfn *p_hwfn,
749 struct qed_ptt *p_ptt)
751 u32 resp = 0, param = 0;
754 rc = qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_CANCEL_LOAD_REQ, 0,
758 "Failed to send cancel load request, rc = %d\n", rc);
763 #define CONFIG_QEDE_BITMAP_IDX BIT(0)
764 #define CONFIG_QED_SRIOV_BITMAP_IDX BIT(1)
765 #define CONFIG_QEDR_BITMAP_IDX BIT(2)
766 #define CONFIG_QEDF_BITMAP_IDX BIT(4)
767 #define CONFIG_QEDI_BITMAP_IDX BIT(5)
768 #define CONFIG_QED_LL2_BITMAP_IDX BIT(6)
770 static u32 qed_get_config_bitmap(void)
772 u32 config_bitmap = 0x0;
774 if (IS_ENABLED(CONFIG_QEDE))
775 config_bitmap |= CONFIG_QEDE_BITMAP_IDX;
777 if (IS_ENABLED(CONFIG_QED_SRIOV))
778 config_bitmap |= CONFIG_QED_SRIOV_BITMAP_IDX;
780 if (IS_ENABLED(CONFIG_QED_RDMA))
781 config_bitmap |= CONFIG_QEDR_BITMAP_IDX;
783 if (IS_ENABLED(CONFIG_QED_FCOE))
784 config_bitmap |= CONFIG_QEDF_BITMAP_IDX;
786 if (IS_ENABLED(CONFIG_QED_ISCSI))
787 config_bitmap |= CONFIG_QEDI_BITMAP_IDX;
789 if (IS_ENABLED(CONFIG_QED_LL2))
790 config_bitmap |= CONFIG_QED_LL2_BITMAP_IDX;
792 return config_bitmap;
795 struct qed_load_req_in_params {
797 #define QED_LOAD_REQ_HSI_VER_DEFAULT 0
798 #define QED_LOAD_REQ_HSI_VER_1 1
805 bool avoid_eng_reset;
808 struct qed_load_req_out_params {
819 __qed_mcp_load_req(struct qed_hwfn *p_hwfn,
820 struct qed_ptt *p_ptt,
821 struct qed_load_req_in_params *p_in_params,
822 struct qed_load_req_out_params *p_out_params)
824 struct qed_mcp_mb_params mb_params;
825 struct load_req_stc load_req;
826 struct load_rsp_stc load_rsp;
830 memset(&load_req, 0, sizeof(load_req));
831 load_req.drv_ver_0 = p_in_params->drv_ver_0;
832 load_req.drv_ver_1 = p_in_params->drv_ver_1;
833 load_req.fw_ver = p_in_params->fw_ver;
834 QED_MFW_SET_FIELD(load_req.misc0, LOAD_REQ_ROLE, p_in_params->drv_role);
835 QED_MFW_SET_FIELD(load_req.misc0, LOAD_REQ_LOCK_TO,
836 p_in_params->timeout_val);
837 QED_MFW_SET_FIELD(load_req.misc0, LOAD_REQ_FORCE,
838 p_in_params->force_cmd);
839 QED_MFW_SET_FIELD(load_req.misc0, LOAD_REQ_FLAGS0,
840 p_in_params->avoid_eng_reset);
842 hsi_ver = (p_in_params->hsi_ver == QED_LOAD_REQ_HSI_VER_DEFAULT) ?
843 DRV_ID_MCP_HSI_VER_CURRENT :
844 (p_in_params->hsi_ver << DRV_ID_MCP_HSI_VER_SHIFT);
846 memset(&mb_params, 0, sizeof(mb_params));
847 mb_params.cmd = DRV_MSG_CODE_LOAD_REQ;
848 mb_params.param = PDA_COMP | hsi_ver | p_hwfn->cdev->drv_type;
849 mb_params.p_data_src = &load_req;
850 mb_params.data_src_size = sizeof(load_req);
851 mb_params.p_data_dst = &load_rsp;
852 mb_params.data_dst_size = sizeof(load_rsp);
853 mb_params.flags = QED_MB_FLAG_CAN_SLEEP | QED_MB_FLAG_AVOID_BLOCK;
855 DP_VERBOSE(p_hwfn, QED_MSG_SP,
856 "Load Request: param 0x%08x [init_hw %d, drv_type %d, hsi_ver %d, pda 0x%04x]\n",
858 QED_MFW_GET_FIELD(mb_params.param, DRV_ID_DRV_INIT_HW),
859 QED_MFW_GET_FIELD(mb_params.param, DRV_ID_DRV_TYPE),
860 QED_MFW_GET_FIELD(mb_params.param, DRV_ID_MCP_HSI_VER),
861 QED_MFW_GET_FIELD(mb_params.param, DRV_ID_PDA_COMP_VER));
863 if (p_in_params->hsi_ver != QED_LOAD_REQ_HSI_VER_1) {
864 DP_VERBOSE(p_hwfn, QED_MSG_SP,
865 "Load Request: drv_ver 0x%08x_0x%08x, fw_ver 0x%08x, misc0 0x%08x [role %d, timeout %d, force %d, flags0 0x%x]\n",
870 QED_MFW_GET_FIELD(load_req.misc0, LOAD_REQ_ROLE),
871 QED_MFW_GET_FIELD(load_req.misc0,
873 QED_MFW_GET_FIELD(load_req.misc0, LOAD_REQ_FORCE),
874 QED_MFW_GET_FIELD(load_req.misc0, LOAD_REQ_FLAGS0));
877 rc = qed_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
879 DP_NOTICE(p_hwfn, "Failed to send load request, rc = %d\n", rc);
883 DP_VERBOSE(p_hwfn, QED_MSG_SP,
884 "Load Response: resp 0x%08x\n", mb_params.mcp_resp);
885 p_out_params->load_code = mb_params.mcp_resp;
887 if (p_in_params->hsi_ver != QED_LOAD_REQ_HSI_VER_1 &&
888 p_out_params->load_code != FW_MSG_CODE_DRV_LOAD_REFUSED_HSI_1) {
891 "Load Response: exist_drv_ver 0x%08x_0x%08x, exist_fw_ver 0x%08x, misc0 0x%08x [exist_role %d, mfw_hsi %d, flags0 0x%x]\n",
896 QED_MFW_GET_FIELD(load_rsp.misc0, LOAD_RSP_ROLE),
897 QED_MFW_GET_FIELD(load_rsp.misc0, LOAD_RSP_HSI),
898 QED_MFW_GET_FIELD(load_rsp.misc0, LOAD_RSP_FLAGS0));
900 p_out_params->exist_drv_ver_0 = load_rsp.drv_ver_0;
901 p_out_params->exist_drv_ver_1 = load_rsp.drv_ver_1;
902 p_out_params->exist_fw_ver = load_rsp.fw_ver;
903 p_out_params->exist_drv_role =
904 QED_MFW_GET_FIELD(load_rsp.misc0, LOAD_RSP_ROLE);
905 p_out_params->mfw_hsi_ver =
906 QED_MFW_GET_FIELD(load_rsp.misc0, LOAD_RSP_HSI);
907 p_out_params->drv_exists =
908 QED_MFW_GET_FIELD(load_rsp.misc0, LOAD_RSP_FLAGS0) &
909 LOAD_RSP_FLAGS0_DRV_EXISTS;
915 static int eocre_get_mfw_drv_role(struct qed_hwfn *p_hwfn,
916 enum qed_drv_role drv_role,
920 case QED_DRV_ROLE_OS:
921 *p_mfw_drv_role = DRV_ROLE_OS;
923 case QED_DRV_ROLE_KDUMP:
924 *p_mfw_drv_role = DRV_ROLE_KDUMP;
927 DP_ERR(p_hwfn, "Unexpected driver role %d\n", drv_role);
934 enum qed_load_req_force {
935 QED_LOAD_REQ_FORCE_NONE,
936 QED_LOAD_REQ_FORCE_PF,
937 QED_LOAD_REQ_FORCE_ALL,
940 static void qed_get_mfw_force_cmd(struct qed_hwfn *p_hwfn,
942 enum qed_load_req_force force_cmd,
946 case QED_LOAD_REQ_FORCE_NONE:
947 *p_mfw_force_cmd = LOAD_REQ_FORCE_NONE;
949 case QED_LOAD_REQ_FORCE_PF:
950 *p_mfw_force_cmd = LOAD_REQ_FORCE_PF;
952 case QED_LOAD_REQ_FORCE_ALL:
953 *p_mfw_force_cmd = LOAD_REQ_FORCE_ALL;
958 int qed_mcp_load_req(struct qed_hwfn *p_hwfn,
959 struct qed_ptt *p_ptt,
960 struct qed_load_req_params *p_params)
962 struct qed_load_req_out_params out_params;
963 struct qed_load_req_in_params in_params;
964 u8 mfw_drv_role, mfw_force_cmd;
967 memset(&in_params, 0, sizeof(in_params));
968 in_params.hsi_ver = QED_LOAD_REQ_HSI_VER_DEFAULT;
969 in_params.drv_ver_0 = QED_VERSION;
970 in_params.drv_ver_1 = qed_get_config_bitmap();
971 in_params.fw_ver = STORM_FW_VERSION;
972 rc = eocre_get_mfw_drv_role(p_hwfn, p_params->drv_role, &mfw_drv_role);
976 in_params.drv_role = mfw_drv_role;
977 in_params.timeout_val = p_params->timeout_val;
978 qed_get_mfw_force_cmd(p_hwfn,
979 QED_LOAD_REQ_FORCE_NONE, &mfw_force_cmd);
981 in_params.force_cmd = mfw_force_cmd;
982 in_params.avoid_eng_reset = p_params->avoid_eng_reset;
984 memset(&out_params, 0, sizeof(out_params));
985 rc = __qed_mcp_load_req(p_hwfn, p_ptt, &in_params, &out_params);
989 /* First handle cases where another load request should/might be sent:
990 * - MFW expects the old interface [HSI version = 1]
991 * - MFW responds that a force load request is required
993 if (out_params.load_code == FW_MSG_CODE_DRV_LOAD_REFUSED_HSI_1) {
995 "MFW refused a load request due to HSI > 1. Resending with HSI = 1\n");
997 in_params.hsi_ver = QED_LOAD_REQ_HSI_VER_1;
998 memset(&out_params, 0, sizeof(out_params));
999 rc = __qed_mcp_load_req(p_hwfn, p_ptt, &in_params, &out_params);
1002 } else if (out_params.load_code ==
1003 FW_MSG_CODE_DRV_LOAD_REFUSED_REQUIRES_FORCE) {
1004 if (qed_mcp_can_force_load(in_params.drv_role,
1005 out_params.exist_drv_role,
1006 p_params->override_force_load)) {
1008 "A force load is required [{role, fw_ver, drv_ver}: loading={%d, 0x%08x, x%08x_0x%08x}, existing={%d, 0x%08x, 0x%08x_0x%08x}]\n",
1009 in_params.drv_role, in_params.fw_ver,
1010 in_params.drv_ver_0, in_params.drv_ver_1,
1011 out_params.exist_drv_role,
1012 out_params.exist_fw_ver,
1013 out_params.exist_drv_ver_0,
1014 out_params.exist_drv_ver_1);
1016 qed_get_mfw_force_cmd(p_hwfn,
1017 QED_LOAD_REQ_FORCE_ALL,
1020 in_params.force_cmd = mfw_force_cmd;
1021 memset(&out_params, 0, sizeof(out_params));
1022 rc = __qed_mcp_load_req(p_hwfn, p_ptt, &in_params,
1028 "A force load is required [{role, fw_ver, drv_ver}: loading={%d, 0x%08x, x%08x_0x%08x}, existing={%d, 0x%08x, 0x%08x_0x%08x}] - Avoid\n",
1029 in_params.drv_role, in_params.fw_ver,
1030 in_params.drv_ver_0, in_params.drv_ver_1,
1031 out_params.exist_drv_role,
1032 out_params.exist_fw_ver,
1033 out_params.exist_drv_ver_0,
1034 out_params.exist_drv_ver_1);
1036 "Avoid sending a force load request to prevent disruption of active PFs\n");
1038 qed_mcp_cancel_load_req(p_hwfn, p_ptt);
1043 /* Now handle the other types of responses.
1044 * The "REFUSED_HSI_1" and "REFUSED_REQUIRES_FORCE" responses are not
1045 * expected here after the additional revised load requests were sent.
1047 switch (out_params.load_code) {
1048 case FW_MSG_CODE_DRV_LOAD_ENGINE:
1049 case FW_MSG_CODE_DRV_LOAD_PORT:
1050 case FW_MSG_CODE_DRV_LOAD_FUNCTION:
1051 if (out_params.mfw_hsi_ver != QED_LOAD_REQ_HSI_VER_1 &&
1052 out_params.drv_exists) {
1053 /* The role and fw/driver version match, but the PF is
1054 * already loaded and has not been unloaded gracefully.
1057 "PF is already loaded\n");
1063 "Unexpected refusal to load request [resp 0x%08x]. Aborting.\n",
1064 out_params.load_code);
1068 p_params->load_code = out_params.load_code;
1073 int qed_mcp_load_done(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
1075 u32 resp = 0, param = 0;
1078 rc = qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_LOAD_DONE, 0, &resp,
1082 "Failed to send a LOAD_DONE command, rc = %d\n", rc);
1086 /* Check if there is a DID mismatch between nvm-cfg/efuse */
1087 if (param & FW_MB_PARAM_LOAD_DONE_DID_EFUSE_ERROR)
1089 "warning: device configuration is not supported on this board type. The device may not function as expected.\n");
1094 int qed_mcp_unload_req(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
1096 struct qed_mcp_mb_params mb_params;
1099 switch (p_hwfn->cdev->wol_config) {
1100 case QED_OV_WOL_DISABLED:
1101 wol_param = DRV_MB_PARAM_UNLOAD_WOL_DISABLED;
1103 case QED_OV_WOL_ENABLED:
1104 wol_param = DRV_MB_PARAM_UNLOAD_WOL_ENABLED;
1108 "Unknown WoL configuration %02x\n",
1109 p_hwfn->cdev->wol_config);
1111 case QED_OV_WOL_DEFAULT:
1112 wol_param = DRV_MB_PARAM_UNLOAD_WOL_MCP;
1115 memset(&mb_params, 0, sizeof(mb_params));
1116 mb_params.cmd = DRV_MSG_CODE_UNLOAD_REQ;
1117 mb_params.param = wol_param;
1118 mb_params.flags = QED_MB_FLAG_CAN_SLEEP | QED_MB_FLAG_AVOID_BLOCK;
1120 return qed_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
1123 int qed_mcp_unload_done(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
1125 struct qed_mcp_mb_params mb_params;
1126 struct mcp_mac wol_mac;
1128 memset(&mb_params, 0, sizeof(mb_params));
1129 mb_params.cmd = DRV_MSG_CODE_UNLOAD_DONE;
1131 /* Set the primary MAC if WoL is enabled */
1132 if (p_hwfn->cdev->wol_config == QED_OV_WOL_ENABLED) {
1133 u8 *p_mac = p_hwfn->cdev->wol_mac;
1135 memset(&wol_mac, 0, sizeof(wol_mac));
1136 wol_mac.mac_upper = p_mac[0] << 8 | p_mac[1];
1137 wol_mac.mac_lower = p_mac[2] << 24 | p_mac[3] << 16 |
1138 p_mac[4] << 8 | p_mac[5];
1141 (QED_MSG_SP | NETIF_MSG_IFDOWN),
1142 "Setting WoL MAC: %pM --> [%08x,%08x]\n",
1143 p_mac, wol_mac.mac_upper, wol_mac.mac_lower);
1145 mb_params.p_data_src = &wol_mac;
1146 mb_params.data_src_size = sizeof(wol_mac);
1149 return qed_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
1152 static void qed_mcp_handle_vf_flr(struct qed_hwfn *p_hwfn,
1153 struct qed_ptt *p_ptt)
1155 u32 addr = SECTION_OFFSIZE_ADDR(p_hwfn->mcp_info->public_base,
1157 u32 mfw_path_offsize = qed_rd(p_hwfn, p_ptt, addr);
1158 u32 path_addr = SECTION_ADDR(mfw_path_offsize,
1159 QED_PATH_ID(p_hwfn));
1160 u32 disabled_vfs[VF_MAX_STATIC / 32];
1165 "Reading Disabled VF information from [offset %08x], path_addr %08x\n",
1166 mfw_path_offsize, path_addr);
1168 for (i = 0; i < (VF_MAX_STATIC / 32); i++) {
1169 disabled_vfs[i] = qed_rd(p_hwfn, p_ptt,
1171 offsetof(struct public_path,
1174 DP_VERBOSE(p_hwfn, (QED_MSG_SP | QED_MSG_IOV),
1175 "FLR-ed VFs [%08x,...,%08x] - %08x\n",
1176 i * 32, (i + 1) * 32 - 1, disabled_vfs[i]);
1179 if (qed_iov_mark_vf_flr(p_hwfn, disabled_vfs))
1180 qed_schedule_iov(p_hwfn, QED_IOV_WQ_FLR_FLAG);
1183 int qed_mcp_ack_vf_flr(struct qed_hwfn *p_hwfn,
1184 struct qed_ptt *p_ptt, u32 *vfs_to_ack)
1186 u32 addr = SECTION_OFFSIZE_ADDR(p_hwfn->mcp_info->public_base,
1188 u32 mfw_func_offsize = qed_rd(p_hwfn, p_ptt, addr);
1189 u32 func_addr = SECTION_ADDR(mfw_func_offsize,
1191 struct qed_mcp_mb_params mb_params;
1195 for (i = 0; i < (VF_MAX_STATIC / 32); i++)
1196 DP_VERBOSE(p_hwfn, (QED_MSG_SP | QED_MSG_IOV),
1197 "Acking VFs [%08x,...,%08x] - %08x\n",
1198 i * 32, (i + 1) * 32 - 1, vfs_to_ack[i]);
1200 memset(&mb_params, 0, sizeof(mb_params));
1201 mb_params.cmd = DRV_MSG_CODE_VF_DISABLED_DONE;
1202 mb_params.p_data_src = vfs_to_ack;
1203 mb_params.data_src_size = VF_MAX_STATIC / 8;
1204 rc = qed_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
1206 DP_NOTICE(p_hwfn, "Failed to pass ACK for VF flr to MFW\n");
1210 /* Clear the ACK bits */
1211 for (i = 0; i < (VF_MAX_STATIC / 32); i++)
1212 qed_wr(p_hwfn, p_ptt,
1214 offsetof(struct public_func, drv_ack_vf_disabled) +
1215 i * sizeof(u32), 0);
1220 static void qed_mcp_handle_transceiver_change(struct qed_hwfn *p_hwfn,
1221 struct qed_ptt *p_ptt)
1223 u32 transceiver_state;
1225 transceiver_state = qed_rd(p_hwfn, p_ptt,
1226 p_hwfn->mcp_info->port_addr +
1227 offsetof(struct public_port,
1231 (NETIF_MSG_HW | QED_MSG_SP),
1232 "Received transceiver state update [0x%08x] from mfw [Addr 0x%x]\n",
1234 (u32)(p_hwfn->mcp_info->port_addr +
1235 offsetof(struct public_port, transceiver_data)));
1237 transceiver_state = GET_FIELD(transceiver_state,
1238 ETH_TRANSCEIVER_STATE);
1240 if (transceiver_state == ETH_TRANSCEIVER_STATE_PRESENT)
1241 DP_NOTICE(p_hwfn, "Transceiver is present.\n");
1243 DP_NOTICE(p_hwfn, "Transceiver is unplugged.\n");
1246 static void qed_mcp_read_eee_config(struct qed_hwfn *p_hwfn,
1247 struct qed_ptt *p_ptt,
1248 struct qed_mcp_link_state *p_link)
1250 u32 eee_status, val;
1252 p_link->eee_adv_caps = 0;
1253 p_link->eee_lp_adv_caps = 0;
1254 eee_status = qed_rd(p_hwfn,
1256 p_hwfn->mcp_info->port_addr +
1257 offsetof(struct public_port, eee_status));
1258 p_link->eee_active = !!(eee_status & EEE_ACTIVE_BIT);
1259 val = (eee_status & EEE_LD_ADV_STATUS_MASK) >> EEE_LD_ADV_STATUS_OFFSET;
1260 if (val & EEE_1G_ADV)
1261 p_link->eee_adv_caps |= QED_EEE_1G_ADV;
1262 if (val & EEE_10G_ADV)
1263 p_link->eee_adv_caps |= QED_EEE_10G_ADV;
1264 val = (eee_status & EEE_LP_ADV_STATUS_MASK) >> EEE_LP_ADV_STATUS_OFFSET;
1265 if (val & EEE_1G_ADV)
1266 p_link->eee_lp_adv_caps |= QED_EEE_1G_ADV;
1267 if (val & EEE_10G_ADV)
1268 p_link->eee_lp_adv_caps |= QED_EEE_10G_ADV;
1271 static u32 qed_mcp_get_shmem_func(struct qed_hwfn *p_hwfn,
1272 struct qed_ptt *p_ptt,
1273 struct public_func *p_data, int pfid)
1275 u32 addr = SECTION_OFFSIZE_ADDR(p_hwfn->mcp_info->public_base,
1277 u32 mfw_path_offsize = qed_rd(p_hwfn, p_ptt, addr);
1281 func_addr = SECTION_ADDR(mfw_path_offsize, pfid);
1282 memset(p_data, 0, sizeof(*p_data));
1284 size = min_t(u32, sizeof(*p_data), QED_SECTION_SIZE(mfw_path_offsize));
1285 for (i = 0; i < size / sizeof(u32); i++)
1286 ((u32 *)p_data)[i] = qed_rd(p_hwfn, p_ptt,
1287 func_addr + (i << 2));
1291 static void qed_read_pf_bandwidth(struct qed_hwfn *p_hwfn,
1292 struct public_func *p_shmem_info)
1294 struct qed_mcp_function_info *p_info;
1296 p_info = &p_hwfn->mcp_info->func_info;
1298 p_info->bandwidth_min = QED_MFW_GET_FIELD(p_shmem_info->config,
1299 FUNC_MF_CFG_MIN_BW);
1300 if (p_info->bandwidth_min < 1 || p_info->bandwidth_min > 100) {
1302 "bandwidth minimum out of bounds [%02x]. Set to 1\n",
1303 p_info->bandwidth_min);
1304 p_info->bandwidth_min = 1;
1307 p_info->bandwidth_max = QED_MFW_GET_FIELD(p_shmem_info->config,
1308 FUNC_MF_CFG_MAX_BW);
1309 if (p_info->bandwidth_max < 1 || p_info->bandwidth_max > 100) {
1311 "bandwidth maximum out of bounds [%02x]. Set to 100\n",
1312 p_info->bandwidth_max);
1313 p_info->bandwidth_max = 100;
1317 static void qed_mcp_handle_link_change(struct qed_hwfn *p_hwfn,
1318 struct qed_ptt *p_ptt, bool b_reset)
1320 struct qed_mcp_link_state *p_link;
1324 /* Prevent SW/attentions from doing this at the same time */
1325 spin_lock_bh(&p_hwfn->mcp_info->link_lock);
1327 p_link = &p_hwfn->mcp_info->link_output;
1328 memset(p_link, 0, sizeof(*p_link));
1330 status = qed_rd(p_hwfn, p_ptt,
1331 p_hwfn->mcp_info->port_addr +
1332 offsetof(struct public_port, link_status));
1333 DP_VERBOSE(p_hwfn, (NETIF_MSG_LINK | QED_MSG_SP),
1334 "Received link update [0x%08x] from mfw [Addr 0x%x]\n",
1336 (u32)(p_hwfn->mcp_info->port_addr +
1337 offsetof(struct public_port, link_status)));
1339 DP_VERBOSE(p_hwfn, NETIF_MSG_LINK,
1340 "Resetting link indications\n");
1344 if (p_hwfn->b_drv_link_init) {
1345 /* Link indication with modern MFW arrives as per-PF
1348 if (p_hwfn->mcp_info->capabilities &
1349 FW_MB_PARAM_FEATURE_SUPPORT_VLINK) {
1350 struct public_func shmem_info;
1352 qed_mcp_get_shmem_func(p_hwfn, p_ptt, &shmem_info,
1354 p_link->link_up = !!(shmem_info.status &
1355 FUNC_STATUS_VIRTUAL_LINK_UP);
1356 qed_read_pf_bandwidth(p_hwfn, &shmem_info);
1357 DP_VERBOSE(p_hwfn, NETIF_MSG_LINK,
1358 "Virtual link_up = %d\n", p_link->link_up);
1360 p_link->link_up = !!(status & LINK_STATUS_LINK_UP);
1361 DP_VERBOSE(p_hwfn, NETIF_MSG_LINK,
1362 "Physical link_up = %d\n", p_link->link_up);
1365 p_link->link_up = false;
1368 p_link->full_duplex = true;
1369 switch ((status & LINK_STATUS_SPEED_AND_DUPLEX_MASK)) {
1370 case LINK_STATUS_SPEED_AND_DUPLEX_100G:
1371 p_link->speed = 100000;
1373 case LINK_STATUS_SPEED_AND_DUPLEX_50G:
1374 p_link->speed = 50000;
1376 case LINK_STATUS_SPEED_AND_DUPLEX_40G:
1377 p_link->speed = 40000;
1379 case LINK_STATUS_SPEED_AND_DUPLEX_25G:
1380 p_link->speed = 25000;
1382 case LINK_STATUS_SPEED_AND_DUPLEX_20G:
1383 p_link->speed = 20000;
1385 case LINK_STATUS_SPEED_AND_DUPLEX_10G:
1386 p_link->speed = 10000;
1388 case LINK_STATUS_SPEED_AND_DUPLEX_1000THD:
1389 p_link->full_duplex = false;
1391 case LINK_STATUS_SPEED_AND_DUPLEX_1000TFD:
1392 p_link->speed = 1000;
1396 p_link->link_up = 0;
1399 if (p_link->link_up && p_link->speed)
1400 p_link->line_speed = p_link->speed;
1402 p_link->line_speed = 0;
1404 max_bw = p_hwfn->mcp_info->func_info.bandwidth_max;
1405 min_bw = p_hwfn->mcp_info->func_info.bandwidth_min;
1407 /* Max bandwidth configuration */
1408 __qed_configure_pf_max_bandwidth(p_hwfn, p_ptt, p_link, max_bw);
1410 /* Min bandwidth configuration */
1411 __qed_configure_pf_min_bandwidth(p_hwfn, p_ptt, p_link, min_bw);
1412 qed_configure_vp_wfq_on_link_change(p_hwfn->cdev, p_ptt,
1413 p_link->min_pf_rate);
1415 p_link->an = !!(status & LINK_STATUS_AUTO_NEGOTIATE_ENABLED);
1416 p_link->an_complete = !!(status &
1417 LINK_STATUS_AUTO_NEGOTIATE_COMPLETE);
1418 p_link->parallel_detection = !!(status &
1419 LINK_STATUS_PARALLEL_DETECTION_USED);
1420 p_link->pfc_enabled = !!(status & LINK_STATUS_PFC_ENABLED);
1422 p_link->partner_adv_speed |=
1423 (status & LINK_STATUS_LINK_PARTNER_1000TFD_CAPABLE) ?
1424 QED_LINK_PARTNER_SPEED_1G_FD : 0;
1425 p_link->partner_adv_speed |=
1426 (status & LINK_STATUS_LINK_PARTNER_1000THD_CAPABLE) ?
1427 QED_LINK_PARTNER_SPEED_1G_HD : 0;
1428 p_link->partner_adv_speed |=
1429 (status & LINK_STATUS_LINK_PARTNER_10G_CAPABLE) ?
1430 QED_LINK_PARTNER_SPEED_10G : 0;
1431 p_link->partner_adv_speed |=
1432 (status & LINK_STATUS_LINK_PARTNER_20G_CAPABLE) ?
1433 QED_LINK_PARTNER_SPEED_20G : 0;
1434 p_link->partner_adv_speed |=
1435 (status & LINK_STATUS_LINK_PARTNER_25G_CAPABLE) ?
1436 QED_LINK_PARTNER_SPEED_25G : 0;
1437 p_link->partner_adv_speed |=
1438 (status & LINK_STATUS_LINK_PARTNER_40G_CAPABLE) ?
1439 QED_LINK_PARTNER_SPEED_40G : 0;
1440 p_link->partner_adv_speed |=
1441 (status & LINK_STATUS_LINK_PARTNER_50G_CAPABLE) ?
1442 QED_LINK_PARTNER_SPEED_50G : 0;
1443 p_link->partner_adv_speed |=
1444 (status & LINK_STATUS_LINK_PARTNER_100G_CAPABLE) ?
1445 QED_LINK_PARTNER_SPEED_100G : 0;
1447 p_link->partner_tx_flow_ctrl_en =
1448 !!(status & LINK_STATUS_TX_FLOW_CONTROL_ENABLED);
1449 p_link->partner_rx_flow_ctrl_en =
1450 !!(status & LINK_STATUS_RX_FLOW_CONTROL_ENABLED);
1452 switch (status & LINK_STATUS_LINK_PARTNER_FLOW_CONTROL_MASK) {
1453 case LINK_STATUS_LINK_PARTNER_SYMMETRIC_PAUSE:
1454 p_link->partner_adv_pause = QED_LINK_PARTNER_SYMMETRIC_PAUSE;
1456 case LINK_STATUS_LINK_PARTNER_ASYMMETRIC_PAUSE:
1457 p_link->partner_adv_pause = QED_LINK_PARTNER_ASYMMETRIC_PAUSE;
1459 case LINK_STATUS_LINK_PARTNER_BOTH_PAUSE:
1460 p_link->partner_adv_pause = QED_LINK_PARTNER_BOTH_PAUSE;
1463 p_link->partner_adv_pause = 0;
1466 p_link->sfp_tx_fault = !!(status & LINK_STATUS_SFP_TX_FAULT);
1468 if (p_hwfn->mcp_info->capabilities & FW_MB_PARAM_FEATURE_SUPPORT_EEE)
1469 qed_mcp_read_eee_config(p_hwfn, p_ptt, p_link);
1471 qed_link_update(p_hwfn, p_ptt);
1473 spin_unlock_bh(&p_hwfn->mcp_info->link_lock);
1476 int qed_mcp_set_link(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, bool b_up)
1478 struct qed_mcp_link_params *params = &p_hwfn->mcp_info->link_input;
1479 struct qed_mcp_mb_params mb_params;
1480 struct eth_phy_cfg phy_cfg;
1484 /* Set the shmem configuration according to params */
1485 memset(&phy_cfg, 0, sizeof(phy_cfg));
1486 cmd = b_up ? DRV_MSG_CODE_INIT_PHY : DRV_MSG_CODE_LINK_RESET;
1487 if (!params->speed.autoneg)
1488 phy_cfg.speed = params->speed.forced_speed;
1489 phy_cfg.pause |= (params->pause.autoneg) ? ETH_PAUSE_AUTONEG : 0;
1490 phy_cfg.pause |= (params->pause.forced_rx) ? ETH_PAUSE_RX : 0;
1491 phy_cfg.pause |= (params->pause.forced_tx) ? ETH_PAUSE_TX : 0;
1492 phy_cfg.adv_speed = params->speed.advertised_speeds;
1493 phy_cfg.loopback_mode = params->loopback_mode;
1495 /* There are MFWs that share this capability regardless of whether
1496 * this is feasible or not. And given that at the very least adv_caps
1497 * would be set internally by qed, we want to make sure LFA would
1500 if ((p_hwfn->mcp_info->capabilities &
1501 FW_MB_PARAM_FEATURE_SUPPORT_EEE) && params->eee.enable) {
1502 phy_cfg.eee_cfg |= EEE_CFG_EEE_ENABLED;
1503 if (params->eee.tx_lpi_enable)
1504 phy_cfg.eee_cfg |= EEE_CFG_TX_LPI;
1505 if (params->eee.adv_caps & QED_EEE_1G_ADV)
1506 phy_cfg.eee_cfg |= EEE_CFG_ADV_SPEED_1G;
1507 if (params->eee.adv_caps & QED_EEE_10G_ADV)
1508 phy_cfg.eee_cfg |= EEE_CFG_ADV_SPEED_10G;
1509 phy_cfg.eee_cfg |= (params->eee.tx_lpi_timer <<
1510 EEE_TX_TIMER_USEC_OFFSET) &
1511 EEE_TX_TIMER_USEC_MASK;
1514 p_hwfn->b_drv_link_init = b_up;
1517 DP_VERBOSE(p_hwfn, NETIF_MSG_LINK,
1518 "Configuring Link: Speed 0x%08x, Pause 0x%08x, adv_speed 0x%08x, loopback 0x%08x, features 0x%08x\n",
1522 phy_cfg.loopback_mode,
1523 phy_cfg.feature_config_flags);
1525 DP_VERBOSE(p_hwfn, NETIF_MSG_LINK,
1526 "Resetting link\n");
1529 memset(&mb_params, 0, sizeof(mb_params));
1530 mb_params.cmd = cmd;
1531 mb_params.p_data_src = &phy_cfg;
1532 mb_params.data_src_size = sizeof(phy_cfg);
1533 rc = qed_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
1535 /* if mcp fails to respond we must abort */
1537 DP_ERR(p_hwfn, "MCP response failure, aborting\n");
1541 /* Mimic link-change attention, done for several reasons:
1542 * - On reset, there's no guarantee MFW would trigger
1544 * - On initialization, older MFWs might not indicate link change
1545 * during LFA, so we'll never get an UP indication.
1547 qed_mcp_handle_link_change(p_hwfn, p_ptt, !b_up);
1552 u32 qed_get_process_kill_counter(struct qed_hwfn *p_hwfn,
1553 struct qed_ptt *p_ptt)
1555 u32 path_offsize_addr, path_offsize, path_addr, proc_kill_cnt;
1557 if (IS_VF(p_hwfn->cdev))
1560 path_offsize_addr = SECTION_OFFSIZE_ADDR(p_hwfn->mcp_info->public_base,
1562 path_offsize = qed_rd(p_hwfn, p_ptt, path_offsize_addr);
1563 path_addr = SECTION_ADDR(path_offsize, QED_PATH_ID(p_hwfn));
1565 proc_kill_cnt = qed_rd(p_hwfn, p_ptt,
1567 offsetof(struct public_path, process_kill)) &
1568 PROCESS_KILL_COUNTER_MASK;
1570 return proc_kill_cnt;
1573 static void qed_mcp_handle_process_kill(struct qed_hwfn *p_hwfn,
1574 struct qed_ptt *p_ptt)
1576 struct qed_dev *cdev = p_hwfn->cdev;
1579 /* Prevent possible attentions/interrupts during the recovery handling
1580 * and till its load phase, during which they will be re-enabled.
1582 qed_int_igu_disable_int(p_hwfn, p_ptt);
1584 DP_NOTICE(p_hwfn, "Received a process kill indication\n");
1586 /* The following operations should be done once, and thus in CMT mode
1587 * are carried out by only the first HW function.
1589 if (p_hwfn != QED_LEADING_HWFN(cdev))
1592 if (cdev->recov_in_prog) {
1594 "Ignoring the indication since a recovery process is already in progress\n");
1598 cdev->recov_in_prog = true;
1600 proc_kill_cnt = qed_get_process_kill_counter(p_hwfn, p_ptt);
1601 DP_NOTICE(p_hwfn, "Process kill counter: %d\n", proc_kill_cnt);
1603 qed_schedule_recovery_handler(p_hwfn);
1606 static void qed_mcp_send_protocol_stats(struct qed_hwfn *p_hwfn,
1607 struct qed_ptt *p_ptt,
1608 enum MFW_DRV_MSG_TYPE type)
1610 enum qed_mcp_protocol_type stats_type;
1611 union qed_mcp_protocol_stats stats;
1612 struct qed_mcp_mb_params mb_params;
1616 case MFW_DRV_MSG_GET_LAN_STATS:
1617 stats_type = QED_MCP_LAN_STATS;
1618 hsi_param = DRV_MSG_CODE_STATS_TYPE_LAN;
1620 case MFW_DRV_MSG_GET_FCOE_STATS:
1621 stats_type = QED_MCP_FCOE_STATS;
1622 hsi_param = DRV_MSG_CODE_STATS_TYPE_FCOE;
1624 case MFW_DRV_MSG_GET_ISCSI_STATS:
1625 stats_type = QED_MCP_ISCSI_STATS;
1626 hsi_param = DRV_MSG_CODE_STATS_TYPE_ISCSI;
1628 case MFW_DRV_MSG_GET_RDMA_STATS:
1629 stats_type = QED_MCP_RDMA_STATS;
1630 hsi_param = DRV_MSG_CODE_STATS_TYPE_RDMA;
1633 DP_NOTICE(p_hwfn, "Invalid protocol type %d\n", type);
1637 qed_get_protocol_stats(p_hwfn->cdev, stats_type, &stats);
1639 memset(&mb_params, 0, sizeof(mb_params));
1640 mb_params.cmd = DRV_MSG_CODE_GET_STATS;
1641 mb_params.param = hsi_param;
1642 mb_params.p_data_src = &stats;
1643 mb_params.data_src_size = sizeof(stats);
1644 qed_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
1647 static void qed_mcp_update_bw(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
1649 struct qed_mcp_function_info *p_info;
1650 struct public_func shmem_info;
1651 u32 resp = 0, param = 0;
1653 qed_mcp_get_shmem_func(p_hwfn, p_ptt, &shmem_info, MCP_PF_ID(p_hwfn));
1655 qed_read_pf_bandwidth(p_hwfn, &shmem_info);
1657 p_info = &p_hwfn->mcp_info->func_info;
1659 qed_configure_pf_min_bandwidth(p_hwfn->cdev, p_info->bandwidth_min);
1660 qed_configure_pf_max_bandwidth(p_hwfn->cdev, p_info->bandwidth_max);
1662 /* Acknowledge the MFW */
1663 qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_BW_UPDATE_ACK, 0, &resp,
1667 static void qed_mcp_update_stag(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
1669 struct public_func shmem_info;
1670 u32 resp = 0, param = 0;
1672 qed_mcp_get_shmem_func(p_hwfn, p_ptt, &shmem_info, MCP_PF_ID(p_hwfn));
1674 p_hwfn->mcp_info->func_info.ovlan = (u16)shmem_info.ovlan_stag &
1675 FUNC_MF_CFG_OV_STAG_MASK;
1676 p_hwfn->hw_info.ovlan = p_hwfn->mcp_info->func_info.ovlan;
1677 if (test_bit(QED_MF_OVLAN_CLSS, &p_hwfn->cdev->mf_bits)) {
1678 if (p_hwfn->hw_info.ovlan != QED_MCP_VLAN_UNSET) {
1679 qed_wr(p_hwfn, p_ptt, NIG_REG_LLH_FUNC_TAG_VALUE,
1680 p_hwfn->hw_info.ovlan);
1681 qed_wr(p_hwfn, p_ptt, NIG_REG_LLH_FUNC_TAG_EN, 1);
1683 /* Configure DB to add external vlan to EDPM packets */
1684 qed_wr(p_hwfn, p_ptt, DORQ_REG_TAG1_OVRD_MODE, 1);
1685 qed_wr(p_hwfn, p_ptt, DORQ_REG_PF_EXT_VID_BB_K2,
1686 p_hwfn->hw_info.ovlan);
1688 qed_wr(p_hwfn, p_ptt, NIG_REG_LLH_FUNC_TAG_EN, 0);
1689 qed_wr(p_hwfn, p_ptt, NIG_REG_LLH_FUNC_TAG_VALUE, 0);
1690 qed_wr(p_hwfn, p_ptt, DORQ_REG_TAG1_OVRD_MODE, 0);
1691 qed_wr(p_hwfn, p_ptt, DORQ_REG_PF_EXT_VID_BB_K2, 0);
1694 qed_sp_pf_update_stag(p_hwfn);
1697 DP_VERBOSE(p_hwfn, QED_MSG_SP, "ovlan = %d hw_mode = 0x%x\n",
1698 p_hwfn->mcp_info->func_info.ovlan, p_hwfn->hw_info.hw_mode);
1700 /* Acknowledge the MFW */
1701 qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_S_TAG_UPDATE_ACK, 0,
1705 void qed_mcp_read_ufp_config(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
1707 struct public_func shmem_info;
1710 if (!test_bit(QED_MF_UFP_SPECIFIC, &p_hwfn->cdev->mf_bits))
1713 memset(&p_hwfn->ufp_info, 0, sizeof(p_hwfn->ufp_info));
1714 port_cfg = qed_rd(p_hwfn, p_ptt, p_hwfn->mcp_info->port_addr +
1715 offsetof(struct public_port, oem_cfg_port));
1716 val = (port_cfg & OEM_CFG_CHANNEL_TYPE_MASK) >>
1717 OEM_CFG_CHANNEL_TYPE_OFFSET;
1718 if (val != OEM_CFG_CHANNEL_TYPE_STAGGED)
1720 "Incorrect UFP Channel type %d port_id 0x%02x\n",
1721 val, MFW_PORT(p_hwfn));
1723 val = (port_cfg & OEM_CFG_SCHED_TYPE_MASK) >> OEM_CFG_SCHED_TYPE_OFFSET;
1724 if (val == OEM_CFG_SCHED_TYPE_ETS) {
1725 p_hwfn->ufp_info.mode = QED_UFP_MODE_ETS;
1726 } else if (val == OEM_CFG_SCHED_TYPE_VNIC_BW) {
1727 p_hwfn->ufp_info.mode = QED_UFP_MODE_VNIC_BW;
1729 p_hwfn->ufp_info.mode = QED_UFP_MODE_UNKNOWN;
1731 "Unknown UFP scheduling mode %d port_id 0x%02x\n",
1732 val, MFW_PORT(p_hwfn));
1735 qed_mcp_get_shmem_func(p_hwfn, p_ptt, &shmem_info, MCP_PF_ID(p_hwfn));
1736 val = (shmem_info.oem_cfg_func & OEM_CFG_FUNC_TC_MASK) >>
1737 OEM_CFG_FUNC_TC_OFFSET;
1738 p_hwfn->ufp_info.tc = (u8)val;
1739 val = (shmem_info.oem_cfg_func & OEM_CFG_FUNC_HOST_PRI_CTRL_MASK) >>
1740 OEM_CFG_FUNC_HOST_PRI_CTRL_OFFSET;
1741 if (val == OEM_CFG_FUNC_HOST_PRI_CTRL_VNIC) {
1742 p_hwfn->ufp_info.pri_type = QED_UFP_PRI_VNIC;
1743 } else if (val == OEM_CFG_FUNC_HOST_PRI_CTRL_OS) {
1744 p_hwfn->ufp_info.pri_type = QED_UFP_PRI_OS;
1746 p_hwfn->ufp_info.pri_type = QED_UFP_PRI_UNKNOWN;
1748 "Unknown Host priority control %d port_id 0x%02x\n",
1749 val, MFW_PORT(p_hwfn));
1753 "UFP shmem config: mode = %d tc = %d pri_type = %d port_id 0x%02x\n",
1754 p_hwfn->ufp_info.mode, p_hwfn->ufp_info.tc,
1755 p_hwfn->ufp_info.pri_type, MFW_PORT(p_hwfn));
1759 qed_mcp_handle_ufp_event(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
1761 qed_mcp_read_ufp_config(p_hwfn, p_ptt);
1763 if (p_hwfn->ufp_info.mode == QED_UFP_MODE_VNIC_BW) {
1764 p_hwfn->qm_info.ooo_tc = p_hwfn->ufp_info.tc;
1765 qed_hw_info_set_offload_tc(&p_hwfn->hw_info,
1766 p_hwfn->ufp_info.tc);
1768 qed_qm_reconf(p_hwfn, p_ptt);
1769 } else if (p_hwfn->ufp_info.mode == QED_UFP_MODE_ETS) {
1770 /* Merge UFP TC with the dcbx TC data */
1771 qed_dcbx_mib_update_event(p_hwfn, p_ptt,
1772 QED_DCBX_OPERATIONAL_MIB);
1774 DP_ERR(p_hwfn, "Invalid sched type, discard the UFP config\n");
1778 /* update storm FW with negotiation results */
1779 qed_sp_pf_update_ufp(p_hwfn);
1781 /* update stag pcp value */
1782 qed_sp_pf_update_stag(p_hwfn);
1787 int qed_mcp_handle_events(struct qed_hwfn *p_hwfn,
1788 struct qed_ptt *p_ptt)
1790 struct qed_mcp_info *info = p_hwfn->mcp_info;
1795 DP_VERBOSE(p_hwfn, QED_MSG_SP, "Received message from MFW\n");
1797 /* Read Messages from MFW */
1798 qed_mcp_read_mb(p_hwfn, p_ptt);
1800 /* Compare current messages to old ones */
1801 for (i = 0; i < info->mfw_mb_length; i++) {
1802 if (info->mfw_mb_cur[i] == info->mfw_mb_shadow[i])
1807 DP_VERBOSE(p_hwfn, NETIF_MSG_LINK,
1808 "Msg [%d] - old CMD 0x%02x, new CMD 0x%02x\n",
1809 i, info->mfw_mb_shadow[i], info->mfw_mb_cur[i]);
1812 case MFW_DRV_MSG_LINK_CHANGE:
1813 qed_mcp_handle_link_change(p_hwfn, p_ptt, false);
1815 case MFW_DRV_MSG_VF_DISABLED:
1816 qed_mcp_handle_vf_flr(p_hwfn, p_ptt);
1818 case MFW_DRV_MSG_LLDP_DATA_UPDATED:
1819 qed_dcbx_mib_update_event(p_hwfn, p_ptt,
1820 QED_DCBX_REMOTE_LLDP_MIB);
1822 case MFW_DRV_MSG_DCBX_REMOTE_MIB_UPDATED:
1823 qed_dcbx_mib_update_event(p_hwfn, p_ptt,
1824 QED_DCBX_REMOTE_MIB);
1826 case MFW_DRV_MSG_DCBX_OPERATIONAL_MIB_UPDATED:
1827 qed_dcbx_mib_update_event(p_hwfn, p_ptt,
1828 QED_DCBX_OPERATIONAL_MIB);
1830 case MFW_DRV_MSG_OEM_CFG_UPDATE:
1831 qed_mcp_handle_ufp_event(p_hwfn, p_ptt);
1833 case MFW_DRV_MSG_TRANSCEIVER_STATE_CHANGE:
1834 qed_mcp_handle_transceiver_change(p_hwfn, p_ptt);
1836 case MFW_DRV_MSG_ERROR_RECOVERY:
1837 qed_mcp_handle_process_kill(p_hwfn, p_ptt);
1839 case MFW_DRV_MSG_GET_LAN_STATS:
1840 case MFW_DRV_MSG_GET_FCOE_STATS:
1841 case MFW_DRV_MSG_GET_ISCSI_STATS:
1842 case MFW_DRV_MSG_GET_RDMA_STATS:
1843 qed_mcp_send_protocol_stats(p_hwfn, p_ptt, i);
1845 case MFW_DRV_MSG_BW_UPDATE:
1846 qed_mcp_update_bw(p_hwfn, p_ptt);
1848 case MFW_DRV_MSG_S_TAG_UPDATE:
1849 qed_mcp_update_stag(p_hwfn, p_ptt);
1851 case MFW_DRV_MSG_GET_TLV_REQ:
1852 qed_mfw_tlv_req(p_hwfn);
1855 DP_INFO(p_hwfn, "Unimplemented MFW message %d\n", i);
1860 /* ACK everything */
1861 for (i = 0; i < MFW_DRV_MSG_MAX_DWORDS(info->mfw_mb_length); i++) {
1862 __be32 val = cpu_to_be32(((u32 *)info->mfw_mb_cur)[i]);
1864 /* MFW expect answer in BE, so we force write in that format */
1865 qed_wr(p_hwfn, p_ptt,
1866 info->mfw_mb_addr + sizeof(u32) +
1867 MFW_DRV_MSG_MAX_DWORDS(info->mfw_mb_length) *
1868 sizeof(u32) + i * sizeof(u32),
1874 "Received an MFW message indication but no new message!\n");
1878 /* Copy the new mfw messages into the shadow */
1879 memcpy(info->mfw_mb_shadow, info->mfw_mb_cur, info->mfw_mb_length);
1884 int qed_mcp_get_mfw_ver(struct qed_hwfn *p_hwfn,
1885 struct qed_ptt *p_ptt,
1886 u32 *p_mfw_ver, u32 *p_running_bundle_id)
1890 if (IS_VF(p_hwfn->cdev)) {
1891 if (p_hwfn->vf_iov_info) {
1892 struct pfvf_acquire_resp_tlv *p_resp;
1894 p_resp = &p_hwfn->vf_iov_info->acquire_resp;
1895 *p_mfw_ver = p_resp->pfdev_info.mfw_ver;
1900 "VF requested MFW version prior to ACQUIRE\n");
1905 global_offsize = qed_rd(p_hwfn, p_ptt,
1906 SECTION_OFFSIZE_ADDR(p_hwfn->
1907 mcp_info->public_base,
1910 qed_rd(p_hwfn, p_ptt,
1911 SECTION_ADDR(global_offsize,
1912 0) + offsetof(struct public_global, mfw_ver));
1914 if (p_running_bundle_id != NULL) {
1915 *p_running_bundle_id = qed_rd(p_hwfn, p_ptt,
1916 SECTION_ADDR(global_offsize, 0) +
1917 offsetof(struct public_global,
1918 running_bundle_id));
1924 int qed_mcp_get_mbi_ver(struct qed_hwfn *p_hwfn,
1925 struct qed_ptt *p_ptt, u32 *p_mbi_ver)
1927 u32 nvm_cfg_addr, nvm_cfg1_offset, mbi_ver_addr;
1929 if (IS_VF(p_hwfn->cdev))
1932 /* Read the address of the nvm_cfg */
1933 nvm_cfg_addr = qed_rd(p_hwfn, p_ptt, MISC_REG_GEN_PURP_CR0);
1934 if (!nvm_cfg_addr) {
1935 DP_NOTICE(p_hwfn, "Shared memory not initialized\n");
1939 /* Read the offset of nvm_cfg1 */
1940 nvm_cfg1_offset = qed_rd(p_hwfn, p_ptt, nvm_cfg_addr + 4);
1942 mbi_ver_addr = MCP_REG_SCRATCH + nvm_cfg1_offset +
1943 offsetof(struct nvm_cfg1, glob) +
1944 offsetof(struct nvm_cfg1_glob, mbi_version);
1945 *p_mbi_ver = qed_rd(p_hwfn, p_ptt,
1947 (NVM_CFG1_GLOB_MBI_VERSION_0_MASK |
1948 NVM_CFG1_GLOB_MBI_VERSION_1_MASK |
1949 NVM_CFG1_GLOB_MBI_VERSION_2_MASK);
1954 int qed_mcp_get_media_type(struct qed_hwfn *p_hwfn,
1955 struct qed_ptt *p_ptt, u32 *p_media_type)
1957 *p_media_type = MEDIA_UNSPECIFIED;
1959 if (IS_VF(p_hwfn->cdev))
1962 if (!qed_mcp_is_init(p_hwfn)) {
1963 DP_NOTICE(p_hwfn, "MFW is not initialized!\n");
1968 *p_media_type = MEDIA_UNSPECIFIED;
1972 *p_media_type = qed_rd(p_hwfn, p_ptt,
1973 p_hwfn->mcp_info->port_addr +
1974 offsetof(struct public_port,
1980 int qed_mcp_get_transceiver_data(struct qed_hwfn *p_hwfn,
1981 struct qed_ptt *p_ptt,
1982 u32 *p_transceiver_state,
1983 u32 *p_transceiver_type)
1985 u32 transceiver_info;
1987 *p_transceiver_type = ETH_TRANSCEIVER_TYPE_NONE;
1988 *p_transceiver_state = ETH_TRANSCEIVER_STATE_UPDATING;
1990 if (IS_VF(p_hwfn->cdev))
1993 if (!qed_mcp_is_init(p_hwfn)) {
1994 DP_NOTICE(p_hwfn, "MFW is not initialized!\n");
1998 transceiver_info = qed_rd(p_hwfn, p_ptt,
1999 p_hwfn->mcp_info->port_addr +
2000 offsetof(struct public_port,
2003 *p_transceiver_state = (transceiver_info &
2004 ETH_TRANSCEIVER_STATE_MASK) >>
2005 ETH_TRANSCEIVER_STATE_OFFSET;
2007 if (*p_transceiver_state == ETH_TRANSCEIVER_STATE_PRESENT)
2008 *p_transceiver_type = (transceiver_info &
2009 ETH_TRANSCEIVER_TYPE_MASK) >>
2010 ETH_TRANSCEIVER_TYPE_OFFSET;
2012 *p_transceiver_type = ETH_TRANSCEIVER_TYPE_UNKNOWN;
2016 static bool qed_is_transceiver_ready(u32 transceiver_state,
2017 u32 transceiver_type)
2019 if ((transceiver_state & ETH_TRANSCEIVER_STATE_PRESENT) &&
2020 ((transceiver_state & ETH_TRANSCEIVER_STATE_UPDATING) == 0x0) &&
2021 (transceiver_type != ETH_TRANSCEIVER_TYPE_NONE))
2027 int qed_mcp_trans_speed_mask(struct qed_hwfn *p_hwfn,
2028 struct qed_ptt *p_ptt, u32 *p_speed_mask)
2030 u32 transceiver_type, transceiver_state;
2033 ret = qed_mcp_get_transceiver_data(p_hwfn, p_ptt, &transceiver_state,
2038 if (qed_is_transceiver_ready(transceiver_state, transceiver_type) ==
2042 switch (transceiver_type) {
2043 case ETH_TRANSCEIVER_TYPE_1G_LX:
2044 case ETH_TRANSCEIVER_TYPE_1G_SX:
2045 case ETH_TRANSCEIVER_TYPE_1G_PCC:
2046 case ETH_TRANSCEIVER_TYPE_1G_ACC:
2047 case ETH_TRANSCEIVER_TYPE_1000BASET:
2048 *p_speed_mask = NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G;
2050 case ETH_TRANSCEIVER_TYPE_10G_SR:
2051 case ETH_TRANSCEIVER_TYPE_10G_LR:
2052 case ETH_TRANSCEIVER_TYPE_10G_LRM:
2053 case ETH_TRANSCEIVER_TYPE_10G_ER:
2054 case ETH_TRANSCEIVER_TYPE_10G_PCC:
2055 case ETH_TRANSCEIVER_TYPE_10G_ACC:
2056 case ETH_TRANSCEIVER_TYPE_4x10G:
2057 *p_speed_mask = NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G;
2059 case ETH_TRANSCEIVER_TYPE_40G_LR4:
2060 case ETH_TRANSCEIVER_TYPE_40G_SR4:
2061 case ETH_TRANSCEIVER_TYPE_MULTI_RATE_10G_40G_SR:
2062 case ETH_TRANSCEIVER_TYPE_MULTI_RATE_10G_40G_LR:
2063 *p_speed_mask = NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_40G |
2064 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G;
2066 case ETH_TRANSCEIVER_TYPE_100G_AOC:
2067 case ETH_TRANSCEIVER_TYPE_100G_SR4:
2068 case ETH_TRANSCEIVER_TYPE_100G_LR4:
2069 case ETH_TRANSCEIVER_TYPE_100G_ER4:
2070 case ETH_TRANSCEIVER_TYPE_100G_ACC:
2072 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_BB_100G |
2073 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_25G;
2075 case ETH_TRANSCEIVER_TYPE_25G_SR:
2076 case ETH_TRANSCEIVER_TYPE_25G_LR:
2077 case ETH_TRANSCEIVER_TYPE_25G_AOC:
2078 case ETH_TRANSCEIVER_TYPE_25G_ACC_S:
2079 case ETH_TRANSCEIVER_TYPE_25G_ACC_M:
2080 case ETH_TRANSCEIVER_TYPE_25G_ACC_L:
2081 *p_speed_mask = NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_25G;
2083 case ETH_TRANSCEIVER_TYPE_25G_CA_N:
2084 case ETH_TRANSCEIVER_TYPE_25G_CA_S:
2085 case ETH_TRANSCEIVER_TYPE_25G_CA_L:
2086 case ETH_TRANSCEIVER_TYPE_4x25G_CR:
2087 *p_speed_mask = NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_25G |
2088 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G |
2089 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G;
2091 case ETH_TRANSCEIVER_TYPE_40G_CR4:
2092 case ETH_TRANSCEIVER_TYPE_MULTI_RATE_10G_40G_CR:
2093 *p_speed_mask = NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_40G |
2094 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G |
2095 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G;
2097 case ETH_TRANSCEIVER_TYPE_100G_CR4:
2098 case ETH_TRANSCEIVER_TYPE_MULTI_RATE_40G_100G_CR:
2100 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_BB_100G |
2101 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_50G |
2102 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_40G |
2103 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_25G |
2104 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_20G |
2105 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G |
2106 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G;
2108 case ETH_TRANSCEIVER_TYPE_MULTI_RATE_40G_100G_SR:
2109 case ETH_TRANSCEIVER_TYPE_MULTI_RATE_40G_100G_LR:
2110 case ETH_TRANSCEIVER_TYPE_MULTI_RATE_40G_100G_AOC:
2112 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_BB_100G |
2113 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_40G |
2114 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_25G |
2115 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G;
2117 case ETH_TRANSCEIVER_TYPE_XLPPI:
2118 *p_speed_mask = NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_40G;
2120 case ETH_TRANSCEIVER_TYPE_10G_BASET:
2121 *p_speed_mask = NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G |
2122 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G;
2125 DP_INFO(p_hwfn, "Unknown transceiver type 0x%x\n",
2127 *p_speed_mask = 0xff;
2134 int qed_mcp_get_board_config(struct qed_hwfn *p_hwfn,
2135 struct qed_ptt *p_ptt, u32 *p_board_config)
2137 u32 nvm_cfg_addr, nvm_cfg1_offset, port_cfg_addr;
2139 if (IS_VF(p_hwfn->cdev))
2142 if (!qed_mcp_is_init(p_hwfn)) {
2143 DP_NOTICE(p_hwfn, "MFW is not initialized!\n");
2147 *p_board_config = NVM_CFG1_PORT_PORT_TYPE_UNDEFINED;
2151 nvm_cfg_addr = qed_rd(p_hwfn, p_ptt, MISC_REG_GEN_PURP_CR0);
2152 nvm_cfg1_offset = qed_rd(p_hwfn, p_ptt, nvm_cfg_addr + 4);
2153 port_cfg_addr = MCP_REG_SCRATCH + nvm_cfg1_offset +
2154 offsetof(struct nvm_cfg1, port[MFW_PORT(p_hwfn)]);
2155 *p_board_config = qed_rd(p_hwfn, p_ptt,
2157 offsetof(struct nvm_cfg1_port,
2163 /* Old MFW has a global configuration for all PFs regarding RDMA support */
2165 qed_mcp_get_shmem_proto_legacy(struct qed_hwfn *p_hwfn,
2166 enum qed_pci_personality *p_proto)
2168 /* There wasn't ever a legacy MFW that published iwarp.
2169 * So at this point, this is either plain l2 or RoCE.
2171 if (test_bit(QED_DEV_CAP_ROCE, &p_hwfn->hw_info.device_capabilities))
2172 *p_proto = QED_PCI_ETH_ROCE;
2174 *p_proto = QED_PCI_ETH;
2176 DP_VERBOSE(p_hwfn, NETIF_MSG_IFUP,
2177 "According to Legacy capabilities, L2 personality is %08x\n",
2182 qed_mcp_get_shmem_proto_mfw(struct qed_hwfn *p_hwfn,
2183 struct qed_ptt *p_ptt,
2184 enum qed_pci_personality *p_proto)
2186 u32 resp = 0, param = 0;
2189 rc = qed_mcp_cmd(p_hwfn, p_ptt,
2190 DRV_MSG_CODE_GET_PF_RDMA_PROTOCOL, 0, &resp, ¶m);
2193 if (resp != FW_MSG_CODE_OK) {
2194 DP_VERBOSE(p_hwfn, NETIF_MSG_IFUP,
2195 "MFW lacks support for command; Returns %08x\n",
2201 case FW_MB_PARAM_GET_PF_RDMA_NONE:
2202 *p_proto = QED_PCI_ETH;
2204 case FW_MB_PARAM_GET_PF_RDMA_ROCE:
2205 *p_proto = QED_PCI_ETH_ROCE;
2207 case FW_MB_PARAM_GET_PF_RDMA_IWARP:
2208 *p_proto = QED_PCI_ETH_IWARP;
2210 case FW_MB_PARAM_GET_PF_RDMA_BOTH:
2211 *p_proto = QED_PCI_ETH_RDMA;
2215 "MFW answers GET_PF_RDMA_PROTOCOL but param is %08x\n",
2222 "According to capabilities, L2 personality is %08x [resp %08x param %08x]\n",
2223 (u32) *p_proto, resp, param);
2228 qed_mcp_get_shmem_proto(struct qed_hwfn *p_hwfn,
2229 struct public_func *p_info,
2230 struct qed_ptt *p_ptt,
2231 enum qed_pci_personality *p_proto)
2235 switch (p_info->config & FUNC_MF_CFG_PROTOCOL_MASK) {
2236 case FUNC_MF_CFG_PROTOCOL_ETHERNET:
2237 if (!IS_ENABLED(CONFIG_QED_RDMA))
2238 *p_proto = QED_PCI_ETH;
2239 else if (qed_mcp_get_shmem_proto_mfw(p_hwfn, p_ptt, p_proto))
2240 qed_mcp_get_shmem_proto_legacy(p_hwfn, p_proto);
2242 case FUNC_MF_CFG_PROTOCOL_ISCSI:
2243 *p_proto = QED_PCI_ISCSI;
2245 case FUNC_MF_CFG_PROTOCOL_FCOE:
2246 *p_proto = QED_PCI_FCOE;
2248 case FUNC_MF_CFG_PROTOCOL_ROCE:
2249 DP_NOTICE(p_hwfn, "RoCE personality is not a valid value!\n");
2258 int qed_mcp_fill_shmem_func_info(struct qed_hwfn *p_hwfn,
2259 struct qed_ptt *p_ptt)
2261 struct qed_mcp_function_info *info;
2262 struct public_func shmem_info;
2264 qed_mcp_get_shmem_func(p_hwfn, p_ptt, &shmem_info, MCP_PF_ID(p_hwfn));
2265 info = &p_hwfn->mcp_info->func_info;
2267 info->pause_on_host = (shmem_info.config &
2268 FUNC_MF_CFG_PAUSE_ON_HOST_RING) ? 1 : 0;
2270 if (qed_mcp_get_shmem_proto(p_hwfn, &shmem_info, p_ptt,
2272 DP_ERR(p_hwfn, "Unknown personality %08x\n",
2273 (u32)(shmem_info.config & FUNC_MF_CFG_PROTOCOL_MASK));
2277 qed_read_pf_bandwidth(p_hwfn, &shmem_info);
2279 if (shmem_info.mac_upper || shmem_info.mac_lower) {
2280 info->mac[0] = (u8)(shmem_info.mac_upper >> 8);
2281 info->mac[1] = (u8)(shmem_info.mac_upper);
2282 info->mac[2] = (u8)(shmem_info.mac_lower >> 24);
2283 info->mac[3] = (u8)(shmem_info.mac_lower >> 16);
2284 info->mac[4] = (u8)(shmem_info.mac_lower >> 8);
2285 info->mac[5] = (u8)(shmem_info.mac_lower);
2287 /* Store primary MAC for later possible WoL */
2288 memcpy(&p_hwfn->cdev->wol_mac, info->mac, ETH_ALEN);
2290 DP_NOTICE(p_hwfn, "MAC is 0 in shmem\n");
2293 info->wwn_port = (u64)shmem_info.fcoe_wwn_port_name_lower |
2294 (((u64)shmem_info.fcoe_wwn_port_name_upper) << 32);
2295 info->wwn_node = (u64)shmem_info.fcoe_wwn_node_name_lower |
2296 (((u64)shmem_info.fcoe_wwn_node_name_upper) << 32);
2298 info->ovlan = (u16)(shmem_info.ovlan_stag & FUNC_MF_CFG_OV_STAG_MASK);
2300 info->mtu = (u16)shmem_info.mtu_size;
2302 p_hwfn->hw_info.b_wol_support = QED_WOL_SUPPORT_NONE;
2303 p_hwfn->cdev->wol_config = (u8)QED_OV_WOL_DEFAULT;
2304 if (qed_mcp_is_init(p_hwfn)) {
2305 u32 resp = 0, param = 0;
2308 rc = qed_mcp_cmd(p_hwfn, p_ptt,
2309 DRV_MSG_CODE_OS_WOL, 0, &resp, ¶m);
2312 if (resp == FW_MSG_CODE_OS_WOL_SUPPORTED)
2313 p_hwfn->hw_info.b_wol_support = QED_WOL_SUPPORT_PME;
2316 DP_VERBOSE(p_hwfn, (QED_MSG_SP | NETIF_MSG_IFUP),
2317 "Read configuration from shmem: pause_on_host %02x protocol %02x BW [%02x - %02x] MAC %02x:%02x:%02x:%02x:%02x:%02x wwn port %llx node %llx ovlan %04x wol %02x\n",
2318 info->pause_on_host, info->protocol,
2319 info->bandwidth_min, info->bandwidth_max,
2320 info->mac[0], info->mac[1], info->mac[2],
2321 info->mac[3], info->mac[4], info->mac[5],
2322 info->wwn_port, info->wwn_node,
2323 info->ovlan, (u8)p_hwfn->hw_info.b_wol_support);
2328 struct qed_mcp_link_params
2329 *qed_mcp_get_link_params(struct qed_hwfn *p_hwfn)
2331 if (!p_hwfn || !p_hwfn->mcp_info)
2333 return &p_hwfn->mcp_info->link_input;
2336 struct qed_mcp_link_state
2337 *qed_mcp_get_link_state(struct qed_hwfn *p_hwfn)
2339 if (!p_hwfn || !p_hwfn->mcp_info)
2341 return &p_hwfn->mcp_info->link_output;
2344 struct qed_mcp_link_capabilities
2345 *qed_mcp_get_link_capabilities(struct qed_hwfn *p_hwfn)
2347 if (!p_hwfn || !p_hwfn->mcp_info)
2349 return &p_hwfn->mcp_info->link_capabilities;
2352 int qed_mcp_drain(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
2354 u32 resp = 0, param = 0;
2357 rc = qed_mcp_cmd(p_hwfn, p_ptt,
2358 DRV_MSG_CODE_NIG_DRAIN, 1000, &resp, ¶m);
2360 /* Wait for the drain to complete before returning */
2366 int qed_mcp_get_flash_size(struct qed_hwfn *p_hwfn,
2367 struct qed_ptt *p_ptt, u32 *p_flash_size)
2371 if (IS_VF(p_hwfn->cdev))
2374 flash_size = qed_rd(p_hwfn, p_ptt, MCP_REG_NVM_CFG4);
2375 flash_size = (flash_size & MCP_REG_NVM_CFG4_FLASH_SIZE) >>
2376 MCP_REG_NVM_CFG4_FLASH_SIZE_SHIFT;
2377 flash_size = (1 << (flash_size + MCP_BYTES_PER_MBIT_SHIFT));
2379 *p_flash_size = flash_size;
2384 int qed_start_recovery_process(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
2386 struct qed_dev *cdev = p_hwfn->cdev;
2388 if (cdev->recov_in_prog) {
2390 "Avoid triggering a recovery since such a process is already in progress\n");
2394 DP_NOTICE(p_hwfn, "Triggering a recovery process\n");
2395 qed_wr(p_hwfn, p_ptt, MISC_REG_AEU_GENERAL_ATTN_35, 0x1);
2400 #define QED_RECOVERY_PROLOG_SLEEP_MS 100
2402 int qed_recovery_prolog(struct qed_dev *cdev)
2404 struct qed_hwfn *p_hwfn = QED_LEADING_HWFN(cdev);
2405 struct qed_ptt *p_ptt = p_hwfn->p_main_ptt;
2408 /* Allow ongoing PCIe transactions to complete */
2409 msleep(QED_RECOVERY_PROLOG_SLEEP_MS);
2411 /* Clear the PF's internal FID_enable in the PXP */
2412 rc = qed_pglueb_set_pfid_enable(p_hwfn, p_ptt, false);
2415 "qed_pglueb_set_pfid_enable() failed. rc = %d.\n",
2422 qed_mcp_config_vf_msix_bb(struct qed_hwfn *p_hwfn,
2423 struct qed_ptt *p_ptt, u8 vf_id, u8 num)
2425 u32 resp = 0, param = 0, rc_param = 0;
2428 /* Only Leader can configure MSIX, and need to take CMT into account */
2429 if (!IS_LEAD_HWFN(p_hwfn))
2431 num *= p_hwfn->cdev->num_hwfns;
2433 param |= (vf_id << DRV_MB_PARAM_CFG_VF_MSIX_VF_ID_SHIFT) &
2434 DRV_MB_PARAM_CFG_VF_MSIX_VF_ID_MASK;
2435 param |= (num << DRV_MB_PARAM_CFG_VF_MSIX_SB_NUM_SHIFT) &
2436 DRV_MB_PARAM_CFG_VF_MSIX_SB_NUM_MASK;
2438 rc = qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_CFG_VF_MSIX, param,
2441 if (resp != FW_MSG_CODE_DRV_CFG_VF_MSIX_DONE) {
2442 DP_NOTICE(p_hwfn, "VF[%d]: MFW failed to set MSI-X\n", vf_id);
2445 DP_VERBOSE(p_hwfn, QED_MSG_IOV,
2446 "Requested 0x%02x MSI-x interrupts from VF 0x%02x\n",
2454 qed_mcp_config_vf_msix_ah(struct qed_hwfn *p_hwfn,
2455 struct qed_ptt *p_ptt, u8 num)
2457 u32 resp = 0, param = num, rc_param = 0;
2460 rc = qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_CFG_PF_VFS_MSIX,
2461 param, &resp, &rc_param);
2463 if (resp != FW_MSG_CODE_DRV_CFG_PF_VFS_MSIX_DONE) {
2464 DP_NOTICE(p_hwfn, "MFW failed to set MSI-X for VFs\n");
2467 DP_VERBOSE(p_hwfn, QED_MSG_IOV,
2468 "Requested 0x%02x MSI-x interrupts for VFs\n", num);
2474 int qed_mcp_config_vf_msix(struct qed_hwfn *p_hwfn,
2475 struct qed_ptt *p_ptt, u8 vf_id, u8 num)
2477 if (QED_IS_BB(p_hwfn->cdev))
2478 return qed_mcp_config_vf_msix_bb(p_hwfn, p_ptt, vf_id, num);
2480 return qed_mcp_config_vf_msix_ah(p_hwfn, p_ptt, num);
2484 qed_mcp_send_drv_version(struct qed_hwfn *p_hwfn,
2485 struct qed_ptt *p_ptt,
2486 struct qed_mcp_drv_version *p_ver)
2488 struct qed_mcp_mb_params mb_params;
2489 struct drv_version_stc drv_version;
2494 memset(&drv_version, 0, sizeof(drv_version));
2495 drv_version.version = p_ver->version;
2496 for (i = 0; i < (MCP_DRV_VER_STR_SIZE - 4) / sizeof(u32); i++) {
2497 val = cpu_to_be32(*((u32 *)&p_ver->name[i * sizeof(u32)]));
2498 *(__be32 *)&drv_version.name[i * sizeof(u32)] = val;
2501 memset(&mb_params, 0, sizeof(mb_params));
2502 mb_params.cmd = DRV_MSG_CODE_SET_VERSION;
2503 mb_params.p_data_src = &drv_version;
2504 mb_params.data_src_size = sizeof(drv_version);
2505 rc = qed_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
2507 DP_ERR(p_hwfn, "MCP response failure, aborting\n");
2512 /* A maximal 100 msec waiting time for the MCP to halt */
2513 #define QED_MCP_HALT_SLEEP_MS 10
2514 #define QED_MCP_HALT_MAX_RETRIES 10
2516 int qed_mcp_halt(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
2518 u32 resp = 0, param = 0, cpu_state, cnt = 0;
2521 rc = qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_MCP_HALT, 0, &resp,
2524 DP_ERR(p_hwfn, "MCP response failure, aborting\n");
2529 msleep(QED_MCP_HALT_SLEEP_MS);
2530 cpu_state = qed_rd(p_hwfn, p_ptt, MCP_REG_CPU_STATE);
2531 if (cpu_state & MCP_REG_CPU_STATE_SOFT_HALTED)
2533 } while (++cnt < QED_MCP_HALT_MAX_RETRIES);
2535 if (cnt == QED_MCP_HALT_MAX_RETRIES) {
2537 "Failed to halt the MCP [CPU_MODE = 0x%08x, CPU_STATE = 0x%08x]\n",
2538 qed_rd(p_hwfn, p_ptt, MCP_REG_CPU_MODE), cpu_state);
2542 qed_mcp_cmd_set_blocking(p_hwfn, true);
2547 #define QED_MCP_RESUME_SLEEP_MS 10
2549 int qed_mcp_resume(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
2551 u32 cpu_mode, cpu_state;
2553 qed_wr(p_hwfn, p_ptt, MCP_REG_CPU_STATE, 0xffffffff);
2555 cpu_mode = qed_rd(p_hwfn, p_ptt, MCP_REG_CPU_MODE);
2556 cpu_mode &= ~MCP_REG_CPU_MODE_SOFT_HALT;
2557 qed_wr(p_hwfn, p_ptt, MCP_REG_CPU_MODE, cpu_mode);
2558 msleep(QED_MCP_RESUME_SLEEP_MS);
2559 cpu_state = qed_rd(p_hwfn, p_ptt, MCP_REG_CPU_STATE);
2561 if (cpu_state & MCP_REG_CPU_STATE_SOFT_HALTED) {
2563 "Failed to resume the MCP [CPU_MODE = 0x%08x, CPU_STATE = 0x%08x]\n",
2564 cpu_mode, cpu_state);
2568 qed_mcp_cmd_set_blocking(p_hwfn, false);
2573 int qed_mcp_ov_update_current_config(struct qed_hwfn *p_hwfn,
2574 struct qed_ptt *p_ptt,
2575 enum qed_ov_client client)
2577 u32 resp = 0, param = 0;
2582 case QED_OV_CLIENT_DRV:
2583 drv_mb_param = DRV_MB_PARAM_OV_CURR_CFG_OS;
2585 case QED_OV_CLIENT_USER:
2586 drv_mb_param = DRV_MB_PARAM_OV_CURR_CFG_OTHER;
2588 case QED_OV_CLIENT_VENDOR_SPEC:
2589 drv_mb_param = DRV_MB_PARAM_OV_CURR_CFG_VENDOR_SPEC;
2592 DP_NOTICE(p_hwfn, "Invalid client type %d\n", client);
2596 rc = qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_OV_UPDATE_CURR_CFG,
2597 drv_mb_param, &resp, ¶m);
2599 DP_ERR(p_hwfn, "MCP response failure, aborting\n");
2604 int qed_mcp_ov_update_driver_state(struct qed_hwfn *p_hwfn,
2605 struct qed_ptt *p_ptt,
2606 enum qed_ov_driver_state drv_state)
2608 u32 resp = 0, param = 0;
2612 switch (drv_state) {
2613 case QED_OV_DRIVER_STATE_NOT_LOADED:
2614 drv_mb_param = DRV_MSG_CODE_OV_UPDATE_DRIVER_STATE_NOT_LOADED;
2616 case QED_OV_DRIVER_STATE_DISABLED:
2617 drv_mb_param = DRV_MSG_CODE_OV_UPDATE_DRIVER_STATE_DISABLED;
2619 case QED_OV_DRIVER_STATE_ACTIVE:
2620 drv_mb_param = DRV_MSG_CODE_OV_UPDATE_DRIVER_STATE_ACTIVE;
2623 DP_NOTICE(p_hwfn, "Invalid driver state %d\n", drv_state);
2627 rc = qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_OV_UPDATE_DRIVER_STATE,
2628 drv_mb_param, &resp, ¶m);
2630 DP_ERR(p_hwfn, "Failed to send driver state\n");
2635 int qed_mcp_ov_update_mtu(struct qed_hwfn *p_hwfn,
2636 struct qed_ptt *p_ptt, u16 mtu)
2638 u32 resp = 0, param = 0;
2642 drv_mb_param = (u32)mtu << DRV_MB_PARAM_OV_MTU_SIZE_SHIFT;
2643 rc = qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_OV_UPDATE_MTU,
2644 drv_mb_param, &resp, ¶m);
2646 DP_ERR(p_hwfn, "Failed to send mtu value, rc = %d\n", rc);
2651 int qed_mcp_ov_update_mac(struct qed_hwfn *p_hwfn,
2652 struct qed_ptt *p_ptt, u8 *mac)
2654 struct qed_mcp_mb_params mb_params;
2658 memset(&mb_params, 0, sizeof(mb_params));
2659 mb_params.cmd = DRV_MSG_CODE_SET_VMAC;
2660 mb_params.param = DRV_MSG_CODE_VMAC_TYPE_MAC <<
2661 DRV_MSG_CODE_VMAC_TYPE_SHIFT;
2662 mb_params.param |= MCP_PF_ID(p_hwfn);
2664 /* MCP is BE, and on LE platforms PCI would swap access to SHMEM
2665 * in 32-bit granularity.
2666 * So the MAC has to be set in native order [and not byte order],
2667 * otherwise it would be read incorrectly by MFW after swap.
2669 mfw_mac[0] = mac[0] << 24 | mac[1] << 16 | mac[2] << 8 | mac[3];
2670 mfw_mac[1] = mac[4] << 24 | mac[5] << 16;
2672 mb_params.p_data_src = (u8 *)mfw_mac;
2673 mb_params.data_src_size = 8;
2674 rc = qed_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
2676 DP_ERR(p_hwfn, "Failed to send mac address, rc = %d\n", rc);
2678 /* Store primary MAC for later possible WoL */
2679 memcpy(p_hwfn->cdev->wol_mac, mac, ETH_ALEN);
2684 int qed_mcp_ov_update_wol(struct qed_hwfn *p_hwfn,
2685 struct qed_ptt *p_ptt, enum qed_ov_wol wol)
2687 u32 resp = 0, param = 0;
2691 if (p_hwfn->hw_info.b_wol_support == QED_WOL_SUPPORT_NONE) {
2692 DP_VERBOSE(p_hwfn, QED_MSG_SP,
2693 "Can't change WoL configuration when WoL isn't supported\n");
2698 case QED_OV_WOL_DEFAULT:
2699 drv_mb_param = DRV_MB_PARAM_WOL_DEFAULT;
2701 case QED_OV_WOL_DISABLED:
2702 drv_mb_param = DRV_MB_PARAM_WOL_DISABLED;
2704 case QED_OV_WOL_ENABLED:
2705 drv_mb_param = DRV_MB_PARAM_WOL_ENABLED;
2708 DP_ERR(p_hwfn, "Invalid wol state %d\n", wol);
2712 rc = qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_OV_UPDATE_WOL,
2713 drv_mb_param, &resp, ¶m);
2715 DP_ERR(p_hwfn, "Failed to send wol mode, rc = %d\n", rc);
2717 /* Store the WoL update for a future unload */
2718 p_hwfn->cdev->wol_config = (u8)wol;
2723 int qed_mcp_ov_update_eswitch(struct qed_hwfn *p_hwfn,
2724 struct qed_ptt *p_ptt,
2725 enum qed_ov_eswitch eswitch)
2727 u32 resp = 0, param = 0;
2732 case QED_OV_ESWITCH_NONE:
2733 drv_mb_param = DRV_MB_PARAM_ESWITCH_MODE_NONE;
2735 case QED_OV_ESWITCH_VEB:
2736 drv_mb_param = DRV_MB_PARAM_ESWITCH_MODE_VEB;
2738 case QED_OV_ESWITCH_VEPA:
2739 drv_mb_param = DRV_MB_PARAM_ESWITCH_MODE_VEPA;
2742 DP_ERR(p_hwfn, "Invalid eswitch mode %d\n", eswitch);
2746 rc = qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_OV_UPDATE_ESWITCH_MODE,
2747 drv_mb_param, &resp, ¶m);
2749 DP_ERR(p_hwfn, "Failed to send eswitch mode, rc = %d\n", rc);
2754 int qed_mcp_set_led(struct qed_hwfn *p_hwfn,
2755 struct qed_ptt *p_ptt, enum qed_led_mode mode)
2757 u32 resp = 0, param = 0, drv_mb_param;
2761 case QED_LED_MODE_ON:
2762 drv_mb_param = DRV_MB_PARAM_SET_LED_MODE_ON;
2764 case QED_LED_MODE_OFF:
2765 drv_mb_param = DRV_MB_PARAM_SET_LED_MODE_OFF;
2767 case QED_LED_MODE_RESTORE:
2768 drv_mb_param = DRV_MB_PARAM_SET_LED_MODE_OPER;
2771 DP_NOTICE(p_hwfn, "Invalid LED mode %d\n", mode);
2775 rc = qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_SET_LED_MODE,
2776 drv_mb_param, &resp, ¶m);
2781 int qed_mcp_mask_parities(struct qed_hwfn *p_hwfn,
2782 struct qed_ptt *p_ptt, u32 mask_parities)
2784 u32 resp = 0, param = 0;
2787 rc = qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_MASK_PARITIES,
2788 mask_parities, &resp, ¶m);
2792 "MCP response failure for mask parities, aborting\n");
2793 } else if (resp != FW_MSG_CODE_OK) {
2795 "MCP did not acknowledge mask parity request. Old MFW?\n");
2802 int qed_mcp_nvm_read(struct qed_dev *cdev, u32 addr, u8 *p_buf, u32 len)
2804 u32 bytes_left = len, offset = 0, bytes_to_copy, read_len = 0;
2805 struct qed_hwfn *p_hwfn = QED_LEADING_HWFN(cdev);
2806 u32 resp = 0, resp_param = 0;
2807 struct qed_ptt *p_ptt;
2810 p_ptt = qed_ptt_acquire(p_hwfn);
2814 while (bytes_left > 0) {
2815 bytes_to_copy = min_t(u32, bytes_left, MCP_DRV_NVM_BUF_LEN);
2817 rc = qed_mcp_nvm_rd_cmd(p_hwfn, p_ptt,
2818 DRV_MSG_CODE_NVM_READ_NVRAM,
2821 DRV_MB_PARAM_NVM_LEN_OFFSET),
2824 (u32 *)(p_buf + offset));
2826 if (rc || (resp != FW_MSG_CODE_NVM_OK)) {
2827 DP_NOTICE(cdev, "MCP command rc = %d\n", rc);
2831 /* This can be a lengthy process, and it's possible scheduler
2832 * isn't preemptable. Sleep a bit to prevent CPU hogging.
2834 if (bytes_left % 0x1000 <
2835 (bytes_left - read_len) % 0x1000)
2836 usleep_range(1000, 2000);
2839 bytes_left -= read_len;
2842 cdev->mcp_nvm_resp = resp;
2843 qed_ptt_release(p_hwfn, p_ptt);
2848 int qed_mcp_nvm_resp(struct qed_dev *cdev, u8 *p_buf)
2850 struct qed_hwfn *p_hwfn = QED_LEADING_HWFN(cdev);
2851 struct qed_ptt *p_ptt;
2853 p_ptt = qed_ptt_acquire(p_hwfn);
2857 memcpy(p_buf, &cdev->mcp_nvm_resp, sizeof(cdev->mcp_nvm_resp));
2858 qed_ptt_release(p_hwfn, p_ptt);
2863 int qed_mcp_nvm_write(struct qed_dev *cdev,
2864 u32 cmd, u32 addr, u8 *p_buf, u32 len)
2866 u32 buf_idx = 0, buf_size, nvm_cmd, nvm_offset, resp = 0, param;
2867 struct qed_hwfn *p_hwfn = QED_LEADING_HWFN(cdev);
2868 struct qed_ptt *p_ptt;
2871 p_ptt = qed_ptt_acquire(p_hwfn);
2876 case QED_PUT_FILE_BEGIN:
2877 nvm_cmd = DRV_MSG_CODE_NVM_PUT_FILE_BEGIN;
2879 case QED_PUT_FILE_DATA:
2880 nvm_cmd = DRV_MSG_CODE_NVM_PUT_FILE_DATA;
2882 case QED_NVM_WRITE_NVRAM:
2883 nvm_cmd = DRV_MSG_CODE_NVM_WRITE_NVRAM;
2886 DP_NOTICE(p_hwfn, "Invalid nvm write command 0x%x\n", cmd);
2891 buf_size = min_t(u32, (len - buf_idx), MCP_DRV_NVM_BUF_LEN);
2892 while (buf_idx < len) {
2893 if (cmd == QED_PUT_FILE_BEGIN)
2896 nvm_offset = ((buf_size <<
2897 DRV_MB_PARAM_NVM_LEN_OFFSET) | addr) +
2899 rc = qed_mcp_nvm_wr_cmd(p_hwfn, p_ptt, nvm_cmd, nvm_offset,
2900 &resp, ¶m, buf_size,
2901 (u32 *)&p_buf[buf_idx]);
2903 DP_NOTICE(cdev, "nvm write failed, rc = %d\n", rc);
2904 resp = FW_MSG_CODE_ERROR;
2908 if (resp != FW_MSG_CODE_OK &&
2909 resp != FW_MSG_CODE_NVM_OK &&
2910 resp != FW_MSG_CODE_NVM_PUT_FILE_FINISH_OK) {
2912 "nvm write failed, resp = 0x%08x\n", resp);
2917 /* This can be a lengthy process, and it's possible scheduler
2918 * isn't pre-emptable. Sleep a bit to prevent CPU hogging.
2920 if (buf_idx % 0x1000 > (buf_idx + buf_size) % 0x1000)
2921 usleep_range(1000, 2000);
2923 /* For MBI upgrade, MFW response includes the next buffer offset
2924 * to be delivered to MFW.
2926 if (param && cmd == QED_PUT_FILE_DATA) {
2927 buf_idx = QED_MFW_GET_FIELD(param,
2928 FW_MB_PARAM_NVM_PUT_FILE_REQ_OFFSET);
2929 buf_size = QED_MFW_GET_FIELD(param,
2930 FW_MB_PARAM_NVM_PUT_FILE_REQ_SIZE);
2932 buf_idx += buf_size;
2933 buf_size = min_t(u32, (len - buf_idx),
2934 MCP_DRV_NVM_BUF_LEN);
2938 cdev->mcp_nvm_resp = resp;
2940 qed_ptt_release(p_hwfn, p_ptt);
2945 int qed_mcp_phy_sfp_read(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
2946 u32 port, u32 addr, u32 offset, u32 len, u8 *p_buf)
2948 u32 bytes_left, bytes_to_copy, buf_size, nvm_offset = 0;
2952 nvm_offset |= (port << DRV_MB_PARAM_TRANSCEIVER_PORT_OFFSET) &
2953 DRV_MB_PARAM_TRANSCEIVER_PORT_MASK;
2954 nvm_offset |= (addr << DRV_MB_PARAM_TRANSCEIVER_I2C_ADDRESS_OFFSET) &
2955 DRV_MB_PARAM_TRANSCEIVER_I2C_ADDRESS_MASK;
2960 while (bytes_left > 0) {
2961 bytes_to_copy = min_t(u32, bytes_left,
2962 MAX_I2C_TRANSACTION_SIZE);
2963 nvm_offset &= (DRV_MB_PARAM_TRANSCEIVER_I2C_ADDRESS_MASK |
2964 DRV_MB_PARAM_TRANSCEIVER_PORT_MASK);
2965 nvm_offset |= ((addr + offset) <<
2966 DRV_MB_PARAM_TRANSCEIVER_OFFSET_OFFSET) &
2967 DRV_MB_PARAM_TRANSCEIVER_OFFSET_MASK;
2968 nvm_offset |= (bytes_to_copy <<
2969 DRV_MB_PARAM_TRANSCEIVER_SIZE_OFFSET) &
2970 DRV_MB_PARAM_TRANSCEIVER_SIZE_MASK;
2971 rc = qed_mcp_nvm_rd_cmd(p_hwfn, p_ptt,
2972 DRV_MSG_CODE_TRANSCEIVER_READ,
2973 nvm_offset, &resp, ¶m, &buf_size,
2974 (u32 *)(p_buf + offset));
2977 "Failed to send a transceiver read command to the MFW. rc = %d.\n",
2982 if (resp == FW_MSG_CODE_TRANSCEIVER_NOT_PRESENT)
2984 else if (resp != FW_MSG_CODE_TRANSCEIVER_DIAG_OK)
2988 bytes_left -= buf_size;
2994 int qed_mcp_bist_register_test(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
2996 u32 drv_mb_param = 0, rsp, param;
2999 drv_mb_param = (DRV_MB_PARAM_BIST_REGISTER_TEST <<
3000 DRV_MB_PARAM_BIST_TEST_INDEX_SHIFT);
3002 rc = qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_BIST_TEST,
3003 drv_mb_param, &rsp, ¶m);
3008 if (((rsp & FW_MSG_CODE_MASK) != FW_MSG_CODE_OK) ||
3009 (param != DRV_MB_PARAM_BIST_RC_PASSED))
3015 int qed_mcp_bist_clock_test(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
3017 u32 drv_mb_param, rsp, param;
3020 drv_mb_param = (DRV_MB_PARAM_BIST_CLOCK_TEST <<
3021 DRV_MB_PARAM_BIST_TEST_INDEX_SHIFT);
3023 rc = qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_BIST_TEST,
3024 drv_mb_param, &rsp, ¶m);
3029 if (((rsp & FW_MSG_CODE_MASK) != FW_MSG_CODE_OK) ||
3030 (param != DRV_MB_PARAM_BIST_RC_PASSED))
3036 int qed_mcp_bist_nvm_get_num_images(struct qed_hwfn *p_hwfn,
3037 struct qed_ptt *p_ptt,
3040 u32 drv_mb_param = 0, rsp;
3043 drv_mb_param = (DRV_MB_PARAM_BIST_NVM_TEST_NUM_IMAGES <<
3044 DRV_MB_PARAM_BIST_TEST_INDEX_SHIFT);
3046 rc = qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_BIST_TEST,
3047 drv_mb_param, &rsp, num_images);
3051 if (((rsp & FW_MSG_CODE_MASK) != FW_MSG_CODE_OK))
3057 int qed_mcp_bist_nvm_get_image_att(struct qed_hwfn *p_hwfn,
3058 struct qed_ptt *p_ptt,
3059 struct bist_nvm_image_att *p_image_att,
3062 u32 buf_size = 0, param, resp = 0, resp_param = 0;
3065 param = DRV_MB_PARAM_BIST_NVM_TEST_IMAGE_BY_INDEX <<
3066 DRV_MB_PARAM_BIST_TEST_INDEX_SHIFT;
3067 param |= image_index << DRV_MB_PARAM_BIST_TEST_IMAGE_INDEX_SHIFT;
3069 rc = qed_mcp_nvm_rd_cmd(p_hwfn, p_ptt,
3070 DRV_MSG_CODE_BIST_TEST, param,
3073 (u32 *)p_image_att);
3077 if (((resp & FW_MSG_CODE_MASK) != FW_MSG_CODE_OK) ||
3078 (p_image_att->return_code != 1))
3084 int qed_mcp_nvm_info_populate(struct qed_hwfn *p_hwfn)
3086 struct qed_nvm_image_info nvm_info;
3087 struct qed_ptt *p_ptt;
3091 if (p_hwfn->nvm_info.valid)
3094 p_ptt = qed_ptt_acquire(p_hwfn);
3096 DP_ERR(p_hwfn, "failed to acquire ptt\n");
3100 /* Acquire from MFW the amount of available images */
3101 nvm_info.num_images = 0;
3102 rc = qed_mcp_bist_nvm_get_num_images(p_hwfn,
3103 p_ptt, &nvm_info.num_images);
3104 if (rc == -EOPNOTSUPP) {
3105 DP_INFO(p_hwfn, "DRV_MSG_CODE_BIST_TEST is not supported\n");
3107 } else if (rc || !nvm_info.num_images) {
3108 DP_ERR(p_hwfn, "Failed getting number of images\n");
3112 nvm_info.image_att = kmalloc_array(nvm_info.num_images,
3113 sizeof(struct bist_nvm_image_att),
3115 if (!nvm_info.image_att) {
3120 /* Iterate over images and get their attributes */
3121 for (i = 0; i < nvm_info.num_images; i++) {
3122 rc = qed_mcp_bist_nvm_get_image_att(p_hwfn, p_ptt,
3123 &nvm_info.image_att[i], i);
3126 "Failed getting image index %d attributes\n", i);
3130 DP_VERBOSE(p_hwfn, QED_MSG_SP, "image index %d, size %x\n", i,
3131 nvm_info.image_att[i].len);
3134 /* Update hwfn's nvm_info */
3135 if (nvm_info.num_images) {
3136 p_hwfn->nvm_info.num_images = nvm_info.num_images;
3137 kfree(p_hwfn->nvm_info.image_att);
3138 p_hwfn->nvm_info.image_att = nvm_info.image_att;
3139 p_hwfn->nvm_info.valid = true;
3142 qed_ptt_release(p_hwfn, p_ptt);
3146 kfree(nvm_info.image_att);
3148 qed_ptt_release(p_hwfn, p_ptt);
3153 qed_mcp_get_nvm_image_att(struct qed_hwfn *p_hwfn,
3154 enum qed_nvm_images image_id,
3155 struct qed_nvm_image_att *p_image_att)
3157 enum nvm_image_type type;
3160 /* Translate image_id into MFW definitions */
3162 case QED_NVM_IMAGE_ISCSI_CFG:
3163 type = NVM_TYPE_ISCSI_CFG;
3165 case QED_NVM_IMAGE_FCOE_CFG:
3166 type = NVM_TYPE_FCOE_CFG;
3168 case QED_NVM_IMAGE_NVM_CFG1:
3169 type = NVM_TYPE_NVM_CFG1;
3171 case QED_NVM_IMAGE_DEFAULT_CFG:
3172 type = NVM_TYPE_DEFAULT_CFG;
3174 case QED_NVM_IMAGE_NVM_META:
3175 type = NVM_TYPE_META;
3178 DP_NOTICE(p_hwfn, "Unknown request of image_id %08x\n",
3183 qed_mcp_nvm_info_populate(p_hwfn);
3184 for (i = 0; i < p_hwfn->nvm_info.num_images; i++)
3185 if (type == p_hwfn->nvm_info.image_att[i].image_type)
3187 if (i == p_hwfn->nvm_info.num_images) {
3188 DP_VERBOSE(p_hwfn, QED_MSG_STORAGE,
3189 "Failed to find nvram image of type %08x\n",
3194 p_image_att->start_addr = p_hwfn->nvm_info.image_att[i].nvm_start_addr;
3195 p_image_att->length = p_hwfn->nvm_info.image_att[i].len;
3200 int qed_mcp_get_nvm_image(struct qed_hwfn *p_hwfn,
3201 enum qed_nvm_images image_id,
3202 u8 *p_buffer, u32 buffer_len)
3204 struct qed_nvm_image_att image_att;
3207 memset(p_buffer, 0, buffer_len);
3209 rc = qed_mcp_get_nvm_image_att(p_hwfn, image_id, &image_att);
3213 /* Validate sizes - both the image's and the supplied buffer's */
3214 if (image_att.length <= 4) {
3215 DP_VERBOSE(p_hwfn, QED_MSG_STORAGE,
3216 "Image [%d] is too small - only %d bytes\n",
3217 image_id, image_att.length);
3221 if (image_att.length > buffer_len) {
3224 "Image [%d] is too big - %08x bytes where only %08x are available\n",
3225 image_id, image_att.length, buffer_len);
3229 return qed_mcp_nvm_read(p_hwfn->cdev, image_att.start_addr,
3230 p_buffer, image_att.length);
3233 static enum resource_id_enum qed_mcp_get_mfw_res_id(enum qed_resources res_id)
3235 enum resource_id_enum mfw_res_id = RESOURCE_NUM_INVALID;
3239 mfw_res_id = RESOURCE_NUM_SB_E;
3242 mfw_res_id = RESOURCE_NUM_L2_QUEUE_E;
3245 mfw_res_id = RESOURCE_NUM_VPORT_E;
3248 mfw_res_id = RESOURCE_NUM_RSS_ENGINES_E;
3251 mfw_res_id = RESOURCE_NUM_PQ_E;
3254 mfw_res_id = RESOURCE_NUM_RL_E;
3258 /* Each VFC resource can accommodate both a MAC and a VLAN */
3259 mfw_res_id = RESOURCE_VFC_FILTER_E;
3262 mfw_res_id = RESOURCE_ILT_E;
3265 mfw_res_id = RESOURCE_LL2_QUEUE_E;
3267 case QED_RDMA_CNQ_RAM:
3269 /* CNQ/CMDQS are the same resource */
3270 mfw_res_id = RESOURCE_CQS_E;
3272 case QED_RDMA_STATS_QUEUE:
3273 mfw_res_id = RESOURCE_RDMA_STATS_QUEUE_E;
3276 mfw_res_id = RESOURCE_BDQ_E;
3285 #define QED_RESC_ALLOC_VERSION_MAJOR 2
3286 #define QED_RESC_ALLOC_VERSION_MINOR 0
3287 #define QED_RESC_ALLOC_VERSION \
3288 ((QED_RESC_ALLOC_VERSION_MAJOR << \
3289 DRV_MB_PARAM_RESOURCE_ALLOC_VERSION_MAJOR_SHIFT) | \
3290 (QED_RESC_ALLOC_VERSION_MINOR << \
3291 DRV_MB_PARAM_RESOURCE_ALLOC_VERSION_MINOR_SHIFT))
3293 struct qed_resc_alloc_in_params {
3295 enum qed_resources res_id;
3299 struct qed_resc_alloc_out_params {
3310 qed_mcp_resc_allocation_msg(struct qed_hwfn *p_hwfn,
3311 struct qed_ptt *p_ptt,
3312 struct qed_resc_alloc_in_params *p_in_params,
3313 struct qed_resc_alloc_out_params *p_out_params)
3315 struct qed_mcp_mb_params mb_params;
3316 struct resource_info mfw_resc_info;
3319 memset(&mfw_resc_info, 0, sizeof(mfw_resc_info));
3321 mfw_resc_info.res_id = qed_mcp_get_mfw_res_id(p_in_params->res_id);
3322 if (mfw_resc_info.res_id == RESOURCE_NUM_INVALID) {
3324 "Failed to match resource %d [%s] with the MFW resources\n",
3325 p_in_params->res_id,
3326 qed_hw_get_resc_name(p_in_params->res_id));
3330 switch (p_in_params->cmd) {
3331 case DRV_MSG_SET_RESOURCE_VALUE_MSG:
3332 mfw_resc_info.size = p_in_params->resc_max_val;
3334 case DRV_MSG_GET_RESOURCE_ALLOC_MSG:
3337 DP_ERR(p_hwfn, "Unexpected resource alloc command [0x%08x]\n",
3342 memset(&mb_params, 0, sizeof(mb_params));
3343 mb_params.cmd = p_in_params->cmd;
3344 mb_params.param = QED_RESC_ALLOC_VERSION;
3345 mb_params.p_data_src = &mfw_resc_info;
3346 mb_params.data_src_size = sizeof(mfw_resc_info);
3347 mb_params.p_data_dst = mb_params.p_data_src;
3348 mb_params.data_dst_size = mb_params.data_src_size;
3352 "Resource message request: cmd 0x%08x, res_id %d [%s], hsi_version %d.%d, val 0x%x\n",
3354 p_in_params->res_id,
3355 qed_hw_get_resc_name(p_in_params->res_id),
3356 QED_MFW_GET_FIELD(mb_params.param,
3357 DRV_MB_PARAM_RESOURCE_ALLOC_VERSION_MAJOR),
3358 QED_MFW_GET_FIELD(mb_params.param,
3359 DRV_MB_PARAM_RESOURCE_ALLOC_VERSION_MINOR),
3360 p_in_params->resc_max_val);
3362 rc = qed_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
3366 p_out_params->mcp_resp = mb_params.mcp_resp;
3367 p_out_params->mcp_param = mb_params.mcp_param;
3368 p_out_params->resc_num = mfw_resc_info.size;
3369 p_out_params->resc_start = mfw_resc_info.offset;
3370 p_out_params->vf_resc_num = mfw_resc_info.vf_size;
3371 p_out_params->vf_resc_start = mfw_resc_info.vf_offset;
3372 p_out_params->flags = mfw_resc_info.flags;
3376 "Resource message response: mfw_hsi_version %d.%d, num 0x%x, start 0x%x, vf_num 0x%x, vf_start 0x%x, flags 0x%08x\n",
3377 QED_MFW_GET_FIELD(p_out_params->mcp_param,
3378 FW_MB_PARAM_RESOURCE_ALLOC_VERSION_MAJOR),
3379 QED_MFW_GET_FIELD(p_out_params->mcp_param,
3380 FW_MB_PARAM_RESOURCE_ALLOC_VERSION_MINOR),
3381 p_out_params->resc_num,
3382 p_out_params->resc_start,
3383 p_out_params->vf_resc_num,
3384 p_out_params->vf_resc_start, p_out_params->flags);
3390 qed_mcp_set_resc_max_val(struct qed_hwfn *p_hwfn,
3391 struct qed_ptt *p_ptt,
3392 enum qed_resources res_id,
3393 u32 resc_max_val, u32 *p_mcp_resp)
3395 struct qed_resc_alloc_out_params out_params;
3396 struct qed_resc_alloc_in_params in_params;
3399 memset(&in_params, 0, sizeof(in_params));
3400 in_params.cmd = DRV_MSG_SET_RESOURCE_VALUE_MSG;
3401 in_params.res_id = res_id;
3402 in_params.resc_max_val = resc_max_val;
3403 memset(&out_params, 0, sizeof(out_params));
3404 rc = qed_mcp_resc_allocation_msg(p_hwfn, p_ptt, &in_params,
3409 *p_mcp_resp = out_params.mcp_resp;
3415 qed_mcp_get_resc_info(struct qed_hwfn *p_hwfn,
3416 struct qed_ptt *p_ptt,
3417 enum qed_resources res_id,
3418 u32 *p_mcp_resp, u32 *p_resc_num, u32 *p_resc_start)
3420 struct qed_resc_alloc_out_params out_params;
3421 struct qed_resc_alloc_in_params in_params;
3424 memset(&in_params, 0, sizeof(in_params));
3425 in_params.cmd = DRV_MSG_GET_RESOURCE_ALLOC_MSG;
3426 in_params.res_id = res_id;
3427 memset(&out_params, 0, sizeof(out_params));
3428 rc = qed_mcp_resc_allocation_msg(p_hwfn, p_ptt, &in_params,
3433 *p_mcp_resp = out_params.mcp_resp;
3435 if (*p_mcp_resp == FW_MSG_CODE_RESOURCE_ALLOC_OK) {
3436 *p_resc_num = out_params.resc_num;
3437 *p_resc_start = out_params.resc_start;
3443 int qed_mcp_initiate_pf_flr(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
3445 u32 mcp_resp, mcp_param;
3447 return qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_INITIATE_PF_FLR, 0,
3448 &mcp_resp, &mcp_param);
3451 static int qed_mcp_resource_cmd(struct qed_hwfn *p_hwfn,
3452 struct qed_ptt *p_ptt,
3453 u32 param, u32 *p_mcp_resp, u32 *p_mcp_param)
3457 rc = qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_RESOURCE_CMD, param,
3458 p_mcp_resp, p_mcp_param);
3462 if (*p_mcp_resp == FW_MSG_CODE_UNSUPPORTED) {
3464 "The resource command is unsupported by the MFW\n");
3468 if (*p_mcp_param == RESOURCE_OPCODE_UNKNOWN_CMD) {
3469 u8 opcode = QED_MFW_GET_FIELD(param, RESOURCE_CMD_REQ_OPCODE);
3472 "The resource command is unknown to the MFW [param 0x%08x, opcode %d]\n",
3481 __qed_mcp_resc_lock(struct qed_hwfn *p_hwfn,
3482 struct qed_ptt *p_ptt,
3483 struct qed_resc_lock_params *p_params)
3485 u32 param = 0, mcp_resp, mcp_param;
3489 switch (p_params->timeout) {
3490 case QED_MCP_RESC_LOCK_TO_DEFAULT:
3491 opcode = RESOURCE_OPCODE_REQ;
3492 p_params->timeout = 0;
3494 case QED_MCP_RESC_LOCK_TO_NONE:
3495 opcode = RESOURCE_OPCODE_REQ_WO_AGING;
3496 p_params->timeout = 0;
3499 opcode = RESOURCE_OPCODE_REQ_W_AGING;
3503 QED_MFW_SET_FIELD(param, RESOURCE_CMD_REQ_RESC, p_params->resource);
3504 QED_MFW_SET_FIELD(param, RESOURCE_CMD_REQ_OPCODE, opcode);
3505 QED_MFW_SET_FIELD(param, RESOURCE_CMD_REQ_AGE, p_params->timeout);
3509 "Resource lock request: param 0x%08x [age %d, opcode %d, resource %d]\n",
3510 param, p_params->timeout, opcode, p_params->resource);
3512 /* Attempt to acquire the resource */
3513 rc = qed_mcp_resource_cmd(p_hwfn, p_ptt, param, &mcp_resp, &mcp_param);
3517 /* Analyze the response */
3518 p_params->owner = QED_MFW_GET_FIELD(mcp_param, RESOURCE_CMD_RSP_OWNER);
3519 opcode = QED_MFW_GET_FIELD(mcp_param, RESOURCE_CMD_RSP_OPCODE);
3523 "Resource lock response: mcp_param 0x%08x [opcode %d, owner %d]\n",
3524 mcp_param, opcode, p_params->owner);
3527 case RESOURCE_OPCODE_GNT:
3528 p_params->b_granted = true;
3530 case RESOURCE_OPCODE_BUSY:
3531 p_params->b_granted = false;
3535 "Unexpected opcode in resource lock response [mcp_param 0x%08x, opcode %d]\n",
3544 qed_mcp_resc_lock(struct qed_hwfn *p_hwfn,
3545 struct qed_ptt *p_ptt, struct qed_resc_lock_params *p_params)
3551 /* No need for an interval before the first iteration */
3553 if (p_params->sleep_b4_retry) {
3554 u16 retry_interval_in_ms =
3555 DIV_ROUND_UP(p_params->retry_interval,
3558 msleep(retry_interval_in_ms);
3560 udelay(p_params->retry_interval);
3564 rc = __qed_mcp_resc_lock(p_hwfn, p_ptt, p_params);
3568 if (p_params->b_granted)
3570 } while (retry_cnt++ < p_params->retry_num);
3576 qed_mcp_resc_unlock(struct qed_hwfn *p_hwfn,
3577 struct qed_ptt *p_ptt,
3578 struct qed_resc_unlock_params *p_params)
3580 u32 param = 0, mcp_resp, mcp_param;
3584 opcode = p_params->b_force ? RESOURCE_OPCODE_FORCE_RELEASE
3585 : RESOURCE_OPCODE_RELEASE;
3586 QED_MFW_SET_FIELD(param, RESOURCE_CMD_REQ_RESC, p_params->resource);
3587 QED_MFW_SET_FIELD(param, RESOURCE_CMD_REQ_OPCODE, opcode);
3589 DP_VERBOSE(p_hwfn, QED_MSG_SP,
3590 "Resource unlock request: param 0x%08x [opcode %d, resource %d]\n",
3591 param, opcode, p_params->resource);
3593 /* Attempt to release the resource */
3594 rc = qed_mcp_resource_cmd(p_hwfn, p_ptt, param, &mcp_resp, &mcp_param);
3598 /* Analyze the response */
3599 opcode = QED_MFW_GET_FIELD(mcp_param, RESOURCE_CMD_RSP_OPCODE);
3601 DP_VERBOSE(p_hwfn, QED_MSG_SP,
3602 "Resource unlock response: mcp_param 0x%08x [opcode %d]\n",
3606 case RESOURCE_OPCODE_RELEASED_PREVIOUS:
3608 "Resource unlock request for an already released resource [%d]\n",
3609 p_params->resource);
3611 case RESOURCE_OPCODE_RELEASED:
3612 p_params->b_released = true;
3614 case RESOURCE_OPCODE_WRONG_OWNER:
3615 p_params->b_released = false;
3619 "Unexpected opcode in resource unlock response [mcp_param 0x%08x, opcode %d]\n",
3627 void qed_mcp_resc_lock_default_init(struct qed_resc_lock_params *p_lock,
3628 struct qed_resc_unlock_params *p_unlock,
3630 resource, bool b_is_permanent)
3633 memset(p_lock, 0, sizeof(*p_lock));
3635 /* Permanent resources don't require aging, and there's no
3636 * point in trying to acquire them more than once since it's
3637 * unexpected another entity would release them.
3639 if (b_is_permanent) {
3640 p_lock->timeout = QED_MCP_RESC_LOCK_TO_NONE;
3642 p_lock->retry_num = QED_MCP_RESC_LOCK_RETRY_CNT_DFLT;
3643 p_lock->retry_interval =
3644 QED_MCP_RESC_LOCK_RETRY_VAL_DFLT;
3645 p_lock->sleep_b4_retry = true;
3648 p_lock->resource = resource;
3652 memset(p_unlock, 0, sizeof(*p_unlock));
3653 p_unlock->resource = resource;
3657 int qed_mcp_get_capabilities(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
3662 rc = qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_GET_MFW_FEATURE_SUPPORT,
3663 0, &mcp_resp, &p_hwfn->mcp_info->capabilities);
3665 DP_VERBOSE(p_hwfn, (QED_MSG_SP | NETIF_MSG_PROBE),
3666 "MFW supported features: %08x\n",
3667 p_hwfn->mcp_info->capabilities);
3672 int qed_mcp_set_capabilities(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
3674 u32 mcp_resp, mcp_param, features;
3676 features = DRV_MB_PARAM_FEATURE_SUPPORT_PORT_EEE |
3677 DRV_MB_PARAM_FEATURE_SUPPORT_FUNC_VLINK;
3679 return qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_FEATURE_SUPPORT,
3680 features, &mcp_resp, &mcp_param);