1 // SPDX-License-Identifier: GPL-2.0+
2 // Copyright (c) 2016-2017 Hisilicon Limited.
4 #include "hclge_main.h"
8 /* hclge_gen_resp_to_vf: used to generate a synchronous response to VF when PF
9 * receives a mailbox message from VF.
10 * @vport: pointer to struct hclge_vport
11 * @vf_to_pf_req: pointer to hclge_mbx_vf_to_pf_cmd of the original mailbox
13 * @resp_status: indicate to VF whether its request success(0) or failed.
15 static int hclge_gen_resp_to_vf(struct hclge_vport *vport,
16 struct hclge_mbx_vf_to_pf_cmd *vf_to_pf_req,
18 u8 *resp_data, u16 resp_data_len)
20 struct hclge_mbx_pf_to_vf_cmd *resp_pf_to_vf;
21 struct hclge_dev *hdev = vport->back;
22 enum hclge_cmd_status status;
23 struct hclge_desc desc;
25 resp_pf_to_vf = (struct hclge_mbx_pf_to_vf_cmd *)desc.data;
27 if (resp_data_len > HCLGE_MBX_MAX_RESP_DATA_SIZE) {
28 dev_err(&hdev->pdev->dev,
29 "PF fail to gen resp to VF len %d exceeds max len %d\n",
31 HCLGE_MBX_MAX_RESP_DATA_SIZE);
32 /* If resp_data_len is too long, set the value to max length
33 * and return the msg to VF
35 resp_data_len = HCLGE_MBX_MAX_RESP_DATA_SIZE;
38 hclge_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_MBX_PF_TO_VF, false);
40 resp_pf_to_vf->dest_vfid = vf_to_pf_req->mbx_src_vfid;
41 resp_pf_to_vf->msg_len = vf_to_pf_req->msg_len;
43 resp_pf_to_vf->msg[0] = HCLGE_MBX_PF_VF_RESP;
44 resp_pf_to_vf->msg[1] = vf_to_pf_req->msg[0];
45 resp_pf_to_vf->msg[2] = vf_to_pf_req->msg[1];
46 resp_pf_to_vf->msg[3] = (resp_status == 0) ? 0 : 1;
48 if (resp_data && resp_data_len > 0)
49 memcpy(&resp_pf_to_vf->msg[4], resp_data, resp_data_len);
51 status = hclge_cmd_send(&hdev->hw, &desc, 1);
53 dev_err(&hdev->pdev->dev,
54 "PF failed(=%d) to send response to VF\n", status);
59 static int hclge_send_mbx_msg(struct hclge_vport *vport, u8 *msg, u16 msg_len,
60 u16 mbx_opcode, u8 dest_vfid)
62 struct hclge_mbx_pf_to_vf_cmd *resp_pf_to_vf;
63 struct hclge_dev *hdev = vport->back;
64 enum hclge_cmd_status status;
65 struct hclge_desc desc;
67 resp_pf_to_vf = (struct hclge_mbx_pf_to_vf_cmd *)desc.data;
69 hclge_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_MBX_PF_TO_VF, false);
71 resp_pf_to_vf->dest_vfid = dest_vfid;
72 resp_pf_to_vf->msg_len = msg_len;
73 resp_pf_to_vf->msg[0] = mbx_opcode;
75 memcpy(&resp_pf_to_vf->msg[1], msg, msg_len);
77 status = hclge_cmd_send(&hdev->hw, &desc, 1);
79 dev_err(&hdev->pdev->dev,
80 "PF failed(=%d) to send mailbox message to VF\n",
86 int hclge_inform_reset_assert_to_vf(struct hclge_vport *vport)
88 struct hclge_dev *hdev = vport->back;
89 enum hnae3_reset_type reset_type;
93 dest_vfid = (u8)vport->vport_id;
95 if (hdev->reset_type == HNAE3_FUNC_RESET)
96 reset_type = HNAE3_VF_PF_FUNC_RESET;
97 else if (hdev->reset_type == HNAE3_FLR_RESET)
98 reset_type = HNAE3_VF_FULL_RESET;
100 reset_type = HNAE3_VF_FUNC_RESET;
102 memcpy(&msg_data[0], &reset_type, sizeof(u16));
104 /* send this requested info to VF */
105 return hclge_send_mbx_msg(vport, msg_data, sizeof(msg_data),
106 HCLGE_MBX_ASSERTING_RESET, dest_vfid);
109 static void hclge_free_vector_ring_chain(struct hnae3_ring_chain_node *head)
111 struct hnae3_ring_chain_node *chain_tmp, *chain;
116 chain_tmp = chain->next;
122 /* hclge_get_ring_chain_from_mbx: get ring type & tqp id & int_gl idx
123 * from mailbox message
125 * msg[1]: <not relevant to this function>
127 * msg[3]: first ring type (TX|RX)
128 * msg[4]: first tqp id
129 * msg[5]: first int_gl idx
130 * msg[6] ~ msg[14]: other ring type, tqp id and int_gl idx
132 static int hclge_get_ring_chain_from_mbx(
133 struct hclge_mbx_vf_to_pf_cmd *req,
134 struct hnae3_ring_chain_node *ring_chain,
135 struct hclge_vport *vport)
137 struct hnae3_ring_chain_node *cur_chain, *new_chain;
141 ring_num = req->msg[2];
143 if (ring_num > ((HCLGE_MBX_VF_MSG_DATA_NUM -
144 HCLGE_MBX_RING_MAP_BASIC_MSG_NUM) /
145 HCLGE_MBX_RING_NODE_VARIABLE_NUM))
148 hnae3_set_bit(ring_chain->flag, HNAE3_RING_TYPE_B, req->msg[3]);
149 ring_chain->tqp_index =
150 hclge_get_queue_id(vport->nic.kinfo.tqp[req->msg[4]]);
151 hnae3_set_field(ring_chain->int_gl_idx, HNAE3_RING_GL_IDX_M,
155 cur_chain = ring_chain;
157 for (i = 1; i < ring_num; i++) {
158 new_chain = kzalloc(sizeof(*new_chain), GFP_KERNEL);
162 hnae3_set_bit(new_chain->flag, HNAE3_RING_TYPE_B,
163 req->msg[HCLGE_MBX_RING_NODE_VARIABLE_NUM * i +
164 HCLGE_MBX_RING_MAP_BASIC_MSG_NUM]);
166 new_chain->tqp_index =
167 hclge_get_queue_id(vport->nic.kinfo.tqp
168 [req->msg[HCLGE_MBX_RING_NODE_VARIABLE_NUM * i +
169 HCLGE_MBX_RING_MAP_BASIC_MSG_NUM + 1]]);
171 hnae3_set_field(new_chain->int_gl_idx, HNAE3_RING_GL_IDX_M,
173 req->msg[HCLGE_MBX_RING_NODE_VARIABLE_NUM * i +
174 HCLGE_MBX_RING_MAP_BASIC_MSG_NUM + 2]);
176 cur_chain->next = new_chain;
177 cur_chain = new_chain;
182 hclge_free_vector_ring_chain(ring_chain);
186 static int hclge_map_unmap_ring_to_vf_vector(struct hclge_vport *vport, bool en,
187 struct hclge_mbx_vf_to_pf_cmd *req)
189 struct hnae3_ring_chain_node ring_chain;
190 int vector_id = req->msg[1];
193 memset(&ring_chain, 0, sizeof(ring_chain));
194 ret = hclge_get_ring_chain_from_mbx(req, &ring_chain, vport);
198 ret = hclge_bind_ring_with_vector(vport, vector_id, en, &ring_chain);
200 hclge_free_vector_ring_chain(&ring_chain);
205 static int hclge_set_vf_promisc_mode(struct hclge_vport *vport,
206 struct hclge_mbx_vf_to_pf_cmd *req)
208 bool en_bc = req->msg[1] ? true : false;
209 struct hclge_promisc_param param;
211 /* vf is not allowed to enable unicast/multicast broadcast */
212 hclge_promisc_param_init(¶m, false, false, en_bc, vport->vport_id);
213 return hclge_cmd_set_promisc_mode(vport->back, ¶m);
216 static int hclge_set_vf_uc_mac_addr(struct hclge_vport *vport,
217 struct hclge_mbx_vf_to_pf_cmd *mbx_req)
219 const u8 *mac_addr = (const u8 *)(&mbx_req->msg[2]);
220 struct hclge_dev *hdev = vport->back;
223 if (mbx_req->msg[1] == HCLGE_MBX_MAC_VLAN_UC_MODIFY) {
224 const u8 *old_addr = (const u8 *)(&mbx_req->msg[8]);
226 hclge_rm_uc_addr_common(vport, old_addr);
227 status = hclge_add_uc_addr_common(vport, mac_addr);
229 hclge_add_uc_addr_common(vport, old_addr);
231 hclge_rm_vport_mac_table(vport, mac_addr,
232 false, HCLGE_MAC_ADDR_UC);
233 hclge_add_vport_mac_table(vport, mac_addr,
236 } else if (mbx_req->msg[1] == HCLGE_MBX_MAC_VLAN_UC_ADD) {
237 status = hclge_add_uc_addr_common(vport, mac_addr);
239 hclge_add_vport_mac_table(vport, mac_addr,
241 } else if (mbx_req->msg[1] == HCLGE_MBX_MAC_VLAN_UC_REMOVE) {
242 status = hclge_rm_uc_addr_common(vport, mac_addr);
244 hclge_rm_vport_mac_table(vport, mac_addr,
245 false, HCLGE_MAC_ADDR_UC);
247 dev_err(&hdev->pdev->dev,
248 "failed to set unicast mac addr, unknown subcode %d\n",
253 if (mbx_req->mbx_need_resp & HCLGE_MBX_NEED_RESP_BIT)
254 hclge_gen_resp_to_vf(vport, mbx_req, status, NULL, 0);
259 static int hclge_set_vf_mc_mac_addr(struct hclge_vport *vport,
260 struct hclge_mbx_vf_to_pf_cmd *mbx_req,
263 const u8 *mac_addr = (const u8 *)(&mbx_req->msg[2]);
264 struct hclge_dev *hdev = vport->back;
269 if (mbx_req->msg[1] == HCLGE_MBX_MAC_VLAN_MC_ADD) {
270 status = hclge_add_mc_addr_common(vport, mac_addr);
272 hclge_add_vport_mac_table(vport, mac_addr,
274 } else if (mbx_req->msg[1] == HCLGE_MBX_MAC_VLAN_MC_REMOVE) {
275 status = hclge_rm_mc_addr_common(vport, mac_addr);
277 hclge_rm_vport_mac_table(vport, mac_addr,
278 false, HCLGE_MAC_ADDR_MC);
280 dev_err(&hdev->pdev->dev,
281 "failed to set mcast mac addr, unknown subcode %d\n",
287 hclge_gen_resp_to_vf(vport, mbx_req, status,
288 &resp_data, resp_len);
293 int hclge_push_vf_port_base_vlan_info(struct hclge_vport *vport, u8 vfid,
294 u16 state, u16 vlan_tag, u16 qos,
297 #define MSG_DATA_SIZE 8
299 u8 msg_data[MSG_DATA_SIZE];
301 memcpy(&msg_data[0], &state, sizeof(u16));
302 memcpy(&msg_data[2], &vlan_proto, sizeof(u16));
303 memcpy(&msg_data[4], &qos, sizeof(u16));
304 memcpy(&msg_data[6], &vlan_tag, sizeof(u16));
306 return hclge_send_mbx_msg(vport, msg_data, sizeof(msg_data),
307 HLCGE_MBX_PUSH_VLAN_INFO, vfid);
310 static int hclge_set_vf_vlan_cfg(struct hclge_vport *vport,
311 struct hclge_mbx_vf_to_pf_cmd *mbx_req)
313 struct hclge_vf_vlan_cfg *msg_cmd;
316 msg_cmd = (struct hclge_vf_vlan_cfg *)mbx_req->msg;
317 if (msg_cmd->subcode == HCLGE_MBX_VLAN_FILTER) {
318 struct hnae3_handle *handle = &vport->nic;
322 is_kill = !!msg_cmd->is_kill;
323 vlan = msg_cmd->vlan;
324 proto = msg_cmd->proto;
325 status = hclge_set_vlan_filter(handle, cpu_to_be16(proto),
327 } else if (msg_cmd->subcode == HCLGE_MBX_VLAN_RX_OFF_CFG) {
328 struct hnae3_handle *handle = &vport->nic;
329 bool en = msg_cmd->is_kill ? true : false;
331 status = hclge_en_hw_strip_rxvtag(handle, en);
332 } else if (mbx_req->msg[1] == HCLGE_MBX_PORT_BASE_VLAN_CFG) {
333 struct hclge_vlan_info *vlan_info;
336 state = (u16 *)&mbx_req->msg[2];
337 vlan_info = (struct hclge_vlan_info *)&mbx_req->msg[4];
338 status = hclge_update_port_base_vlan_cfg(vport, *state,
340 } else if (mbx_req->msg[1] == HCLGE_MBX_GET_PORT_BASE_VLAN_STATE) {
343 state = vport->port_base_vlan_cfg.state;
344 status = hclge_gen_resp_to_vf(vport, mbx_req, 0, &state,
351 static int hclge_set_vf_alive(struct hclge_vport *vport,
352 struct hclge_mbx_vf_to_pf_cmd *mbx_req,
355 bool alive = !!mbx_req->msg[2];
359 ret = hclge_vport_start(vport);
361 hclge_vport_stop(vport);
366 static int hclge_get_vf_tcinfo(struct hclge_vport *vport,
367 struct hclge_mbx_vf_to_pf_cmd *mbx_req,
370 struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
375 for (i = 0; i < kinfo->num_tc; i++)
378 ret = hclge_gen_resp_to_vf(vport, mbx_req, 0, &vf_tc_map,
384 static int hclge_get_vf_queue_info(struct hclge_vport *vport,
385 struct hclge_mbx_vf_to_pf_cmd *mbx_req,
388 #define HCLGE_TQPS_RSS_INFO_LEN 6
389 u8 resp_data[HCLGE_TQPS_RSS_INFO_LEN];
390 struct hclge_dev *hdev = vport->back;
392 /* get the queue related info */
393 memcpy(&resp_data[0], &vport->alloc_tqps, sizeof(u16));
394 memcpy(&resp_data[2], &vport->nic.kinfo.rss_size, sizeof(u16));
395 memcpy(&resp_data[4], &hdev->rx_buf_len, sizeof(u16));
397 return hclge_gen_resp_to_vf(vport, mbx_req, 0, resp_data,
398 HCLGE_TQPS_RSS_INFO_LEN);
401 static int hclge_get_vf_queue_depth(struct hclge_vport *vport,
402 struct hclge_mbx_vf_to_pf_cmd *mbx_req,
405 #define HCLGE_TQPS_DEPTH_INFO_LEN 4
406 u8 resp_data[HCLGE_TQPS_DEPTH_INFO_LEN];
407 struct hclge_dev *hdev = vport->back;
409 /* get the queue depth info */
410 memcpy(&resp_data[0], &hdev->num_tx_desc, sizeof(u16));
411 memcpy(&resp_data[2], &hdev->num_rx_desc, sizeof(u16));
412 return hclge_gen_resp_to_vf(vport, mbx_req, 0, resp_data,
413 HCLGE_TQPS_DEPTH_INFO_LEN);
416 static int hclge_get_vf_media_type(struct hclge_vport *vport,
417 struct hclge_mbx_vf_to_pf_cmd *mbx_req)
419 struct hclge_dev *hdev = vport->back;
422 resp_data[0] = hdev->hw.mac.media_type;
423 resp_data[1] = hdev->hw.mac.module_type;
424 return hclge_gen_resp_to_vf(vport, mbx_req, 0, resp_data,
428 static int hclge_get_link_info(struct hclge_vport *vport,
429 struct hclge_mbx_vf_to_pf_cmd *mbx_req)
431 struct hclge_dev *hdev = vport->back;
437 /* mac.link can only be 0 or 1 */
438 link_status = (u16)hdev->hw.mac.link;
439 duplex = hdev->hw.mac.duplex;
440 memcpy(&msg_data[0], &link_status, sizeof(u16));
441 memcpy(&msg_data[2], &hdev->hw.mac.speed, sizeof(u32));
442 memcpy(&msg_data[6], &duplex, sizeof(u16));
443 dest_vfid = mbx_req->mbx_src_vfid;
445 /* send this requested info to VF */
446 return hclge_send_mbx_msg(vport, msg_data, sizeof(msg_data),
447 HCLGE_MBX_LINK_STAT_CHANGE, dest_vfid);
450 static void hclge_get_link_mode(struct hclge_vport *vport,
451 struct hclge_mbx_vf_to_pf_cmd *mbx_req)
453 #define HCLGE_SUPPORTED 1
454 struct hclge_dev *hdev = vport->back;
455 unsigned long advertising;
456 unsigned long supported;
457 unsigned long send_data;
461 advertising = hdev->hw.mac.advertising[0];
462 supported = hdev->hw.mac.supported[0];
463 dest_vfid = mbx_req->mbx_src_vfid;
464 msg_data[0] = mbx_req->msg[2];
466 send_data = msg_data[0] == HCLGE_SUPPORTED ? supported : advertising;
468 memcpy(&msg_data[2], &send_data, sizeof(unsigned long));
469 hclge_send_mbx_msg(vport, msg_data, sizeof(msg_data),
470 HCLGE_MBX_LINK_STAT_MODE, dest_vfid);
473 static void hclge_mbx_reset_vf_queue(struct hclge_vport *vport,
474 struct hclge_mbx_vf_to_pf_cmd *mbx_req)
478 memcpy(&queue_id, &mbx_req->msg[2], sizeof(queue_id));
480 hclge_reset_vf_queue(vport, queue_id);
482 /* send response msg to VF after queue reset complete*/
483 hclge_gen_resp_to_vf(vport, mbx_req, 0, NULL, 0);
486 static void hclge_reset_vf(struct hclge_vport *vport,
487 struct hclge_mbx_vf_to_pf_cmd *mbx_req)
489 struct hclge_dev *hdev = vport->back;
492 dev_warn(&hdev->pdev->dev, "PF received VF reset request from VF %d!",
495 ret = hclge_func_reset_cmd(hdev, vport->vport_id);
496 hclge_gen_resp_to_vf(vport, mbx_req, ret, NULL, 0);
499 static void hclge_vf_keep_alive(struct hclge_vport *vport,
500 struct hclge_mbx_vf_to_pf_cmd *mbx_req)
502 vport->last_active_jiffies = jiffies;
505 static int hclge_set_vf_mtu(struct hclge_vport *vport,
506 struct hclge_mbx_vf_to_pf_cmd *mbx_req)
511 memcpy(&mtu, &mbx_req->msg[2], sizeof(mtu));
512 ret = hclge_set_vport_mtu(vport, mtu);
514 return hclge_gen_resp_to_vf(vport, mbx_req, ret, NULL, 0);
517 static int hclge_get_queue_id_in_pf(struct hclge_vport *vport,
518 struct hclge_mbx_vf_to_pf_cmd *mbx_req)
520 u16 queue_id, qid_in_pf;
523 memcpy(&queue_id, &mbx_req->msg[2], sizeof(queue_id));
524 qid_in_pf = hclge_covert_handle_qid_global(&vport->nic, queue_id);
525 memcpy(resp_data, &qid_in_pf, sizeof(qid_in_pf));
527 return hclge_gen_resp_to_vf(vport, mbx_req, 0, resp_data, 2);
530 static int hclge_get_rss_key(struct hclge_vport *vport,
531 struct hclge_mbx_vf_to_pf_cmd *mbx_req)
533 #define HCLGE_RSS_MBX_RESP_LEN 8
534 u8 resp_data[HCLGE_RSS_MBX_RESP_LEN];
535 struct hclge_dev *hdev = vport->back;
538 index = mbx_req->msg[2];
540 memcpy(&resp_data[0],
541 &hdev->vport[0].rss_hash_key[index * HCLGE_RSS_MBX_RESP_LEN],
542 HCLGE_RSS_MBX_RESP_LEN);
544 return hclge_gen_resp_to_vf(vport, mbx_req, 0, resp_data,
545 HCLGE_RSS_MBX_RESP_LEN);
548 static bool hclge_cmd_crq_empty(struct hclge_hw *hw)
550 u32 tail = hclge_read_dev(hw, HCLGE_NIC_CRQ_TAIL_REG);
552 return tail == hw->cmq.crq.next_to_use;
555 void hclge_mbx_handler(struct hclge_dev *hdev)
557 struct hclge_cmq_ring *crq = &hdev->hw.cmq.crq;
558 struct hclge_mbx_vf_to_pf_cmd *req;
559 struct hclge_vport *vport;
560 struct hclge_desc *desc;
564 /* handle all the mailbox requests in the queue */
565 while (!hclge_cmd_crq_empty(&hdev->hw)) {
566 if (test_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state)) {
567 dev_warn(&hdev->pdev->dev,
568 "command queue needs re-initializing\n");
572 desc = &crq->desc[crq->next_to_use];
573 req = (struct hclge_mbx_vf_to_pf_cmd *)desc->data;
575 flag = le16_to_cpu(crq->desc[crq->next_to_use].flag);
576 if (unlikely(!hnae3_get_bit(flag, HCLGE_CMDQ_RX_OUTVLD_B))) {
577 dev_warn(&hdev->pdev->dev,
578 "dropped invalid mailbox message, code = %d\n",
581 /* dropping/not processing this invalid message */
582 crq->desc[crq->next_to_use].flag = 0;
583 hclge_mbx_ring_ptr_move_crq(crq);
587 vport = &hdev->vport[req->mbx_src_vfid];
589 switch (req->msg[0]) {
590 case HCLGE_MBX_MAP_RING_TO_VECTOR:
591 ret = hclge_map_unmap_ring_to_vf_vector(vport, true,
594 case HCLGE_MBX_UNMAP_RING_TO_VECTOR:
595 ret = hclge_map_unmap_ring_to_vf_vector(vport, false,
598 case HCLGE_MBX_SET_PROMISC_MODE:
599 ret = hclge_set_vf_promisc_mode(vport, req);
601 dev_err(&hdev->pdev->dev,
602 "PF fail(%d) to set VF promisc mode\n",
605 case HCLGE_MBX_SET_UNICAST:
606 ret = hclge_set_vf_uc_mac_addr(vport, req);
608 dev_err(&hdev->pdev->dev,
609 "PF fail(%d) to set VF UC MAC Addr\n",
612 case HCLGE_MBX_SET_MULTICAST:
613 ret = hclge_set_vf_mc_mac_addr(vport, req, false);
615 dev_err(&hdev->pdev->dev,
616 "PF fail(%d) to set VF MC MAC Addr\n",
619 case HCLGE_MBX_SET_VLAN:
620 ret = hclge_set_vf_vlan_cfg(vport, req);
622 dev_err(&hdev->pdev->dev,
623 "PF failed(%d) to config VF's VLAN\n",
626 case HCLGE_MBX_SET_ALIVE:
627 ret = hclge_set_vf_alive(vport, req, false);
629 dev_err(&hdev->pdev->dev,
630 "PF failed(%d) to set VF's ALIVE\n",
633 case HCLGE_MBX_GET_QINFO:
634 ret = hclge_get_vf_queue_info(vport, req, true);
636 dev_err(&hdev->pdev->dev,
637 "PF failed(%d) to get Q info for VF\n",
640 case HCLGE_MBX_GET_QDEPTH:
641 ret = hclge_get_vf_queue_depth(vport, req, true);
643 dev_err(&hdev->pdev->dev,
644 "PF failed(%d) to get Q depth for VF\n",
648 case HCLGE_MBX_GET_TCINFO:
649 ret = hclge_get_vf_tcinfo(vport, req, true);
651 dev_err(&hdev->pdev->dev,
652 "PF failed(%d) to get TC info for VF\n",
655 case HCLGE_MBX_GET_LINK_STATUS:
656 ret = hclge_get_link_info(vport, req);
658 dev_err(&hdev->pdev->dev,
659 "PF fail(%d) to get link stat for VF\n",
662 case HCLGE_MBX_QUEUE_RESET:
663 hclge_mbx_reset_vf_queue(vport, req);
665 case HCLGE_MBX_RESET:
666 hclge_reset_vf(vport, req);
668 case HCLGE_MBX_KEEP_ALIVE:
669 hclge_vf_keep_alive(vport, req);
671 case HCLGE_MBX_SET_MTU:
672 ret = hclge_set_vf_mtu(vport, req);
674 dev_err(&hdev->pdev->dev,
675 "VF fail(%d) to set mtu\n", ret);
677 case HCLGE_MBX_GET_QID_IN_PF:
678 ret = hclge_get_queue_id_in_pf(vport, req);
680 dev_err(&hdev->pdev->dev,
681 "PF failed(%d) to get qid for VF\n",
684 case HCLGE_MBX_GET_RSS_KEY:
685 ret = hclge_get_rss_key(vport, req);
687 dev_err(&hdev->pdev->dev,
688 "PF fail(%d) to get rss key for VF\n",
691 case HCLGE_MBX_GET_LINK_MODE:
692 hclge_get_link_mode(vport, req);
694 case HCLGE_MBX_GET_VF_FLR_STATUS:
695 mutex_lock(&hdev->vport_cfg_mutex);
696 hclge_rm_vport_all_mac_table(vport, true,
698 hclge_rm_vport_all_mac_table(vport, true,
700 hclge_rm_vport_all_vlan_table(vport, true);
701 mutex_unlock(&hdev->vport_cfg_mutex);
703 case HCLGE_MBX_GET_MEDIA_TYPE:
704 ret = hclge_get_vf_media_type(vport, req);
706 dev_err(&hdev->pdev->dev,
707 "PF fail(%d) to media type for VF\n",
711 dev_err(&hdev->pdev->dev,
712 "un-supported mailbox message, code = %d\n",
716 crq->desc[crq->next_to_use].flag = 0;
717 hclge_mbx_ring_ptr_move_crq(crq);
720 /* Write back CMDQ_RQ header pointer, M7 need this pointer */
721 hclge_write_dev(&hdev->hw, HCLGE_NIC_CRQ_HEAD_REG, crq->next_to_use);