1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright(c) 2013 - 2018 Intel Corporation. */
8 static s32 fm10k_iov_msg_error(struct fm10k_hw *hw, u32 **results,
9 struct fm10k_mbx_info *mbx)
11 struct fm10k_vf_info *vf_info = (struct fm10k_vf_info *)mbx;
12 struct fm10k_intfc *interface = hw->back;
13 struct pci_dev *pdev = interface->pdev;
15 dev_err(&pdev->dev, "Unknown message ID %u on VF %d\n",
16 **results & FM10K_TLV_ID_MASK, vf_info->vf_idx);
18 return fm10k_tlv_msg_error(hw, results, mbx);
22 * fm10k_iov_msg_queue_mac_vlan - Message handler for MAC/VLAN request from VF
23 * @hw: Pointer to hardware structure
24 * @results: Pointer array to message, results[0] is pointer to message
25 * @mbx: Pointer to mailbox information structure
27 * This function is a custom handler for MAC/VLAN requests from the VF. The
28 * assumption is that it is acceptable to directly hand off the message from
29 * the VF to the PF's switch manager. However, we use a MAC/VLAN message
30 * queue to avoid overloading the mailbox when a large number of requests
33 static s32 fm10k_iov_msg_queue_mac_vlan(struct fm10k_hw *hw, u32 **results,
34 struct fm10k_mbx_info *mbx)
36 struct fm10k_vf_info *vf_info = (struct fm10k_vf_info *)mbx;
37 struct fm10k_intfc *interface = hw->back;
45 /* we shouldn't be updating rules on a disabled interface */
46 if (!FM10K_VF_FLAG_ENABLED(vf_info))
47 err = FM10K_ERR_PARAM;
49 if (!err && !!results[FM10K_MAC_VLAN_MSG_VLAN]) {
50 result = results[FM10K_MAC_VLAN_MSG_VLAN];
52 /* record VLAN id requested */
53 err = fm10k_tlv_attr_get_u32(result, &vid);
57 set = !(vid & FM10K_VLAN_CLEAR);
58 vid &= ~FM10K_VLAN_CLEAR;
60 /* if the length field has been set, this is a multi-bit
61 * update request. For multi-bit requests, simply disallow
62 * them when the pf_vid has been set. In this case, the PF
63 * should have already cleared the VLAN_TABLE, and if we
64 * allowed them, it could allow a rogue VF to receive traffic
65 * on a VLAN it was not assigned. In the single-bit case, we
66 * need to modify requests for VLAN 0 to use the default PF or
67 * SW vid when assigned.
71 /* prevent multi-bit requests when PF has
72 * administratively set the VLAN for this VF
75 return FM10K_ERR_PARAM;
77 err = fm10k_iov_select_vid(vf_info, (u16)vid);
84 /* update VSI info for VF in regards to VLAN table */
85 err = hw->mac.ops.update_vlan(hw, vid, vf_info->vsi, set);
88 if (!err && !!results[FM10K_MAC_VLAN_MSG_MAC]) {
89 result = results[FM10K_MAC_VLAN_MSG_MAC];
91 /* record unicast MAC address requested */
92 err = fm10k_tlv_attr_get_mac_vlan(result, mac, &vlan);
96 /* block attempts to set MAC for a locked device */
97 if (is_valid_ether_addr(vf_info->mac) &&
98 !ether_addr_equal(mac, vf_info->mac))
99 return FM10K_ERR_PARAM;
101 set = !(vlan & FM10K_VLAN_CLEAR);
102 vlan &= ~FM10K_VLAN_CLEAR;
104 err = fm10k_iov_select_vid(vf_info, vlan);
110 /* Add this request to the MAC/VLAN queue */
111 err = fm10k_queue_mac_request(interface, vf_info->glort,
115 if (!err && !!results[FM10K_MAC_VLAN_MSG_MULTICAST]) {
116 result = results[FM10K_MAC_VLAN_MSG_MULTICAST];
118 /* record multicast MAC address requested */
119 err = fm10k_tlv_attr_get_mac_vlan(result, mac, &vlan);
123 /* verify that the VF is allowed to request multicast */
124 if (!(vf_info->vf_flags & FM10K_VF_FLAG_MULTI_ENABLED))
125 return FM10K_ERR_PARAM;
127 set = !(vlan & FM10K_VLAN_CLEAR);
128 vlan &= ~FM10K_VLAN_CLEAR;
130 err = fm10k_iov_select_vid(vf_info, vlan);
136 /* Add this request to the MAC/VLAN queue */
137 err = fm10k_queue_mac_request(interface, vf_info->glort,
144 static const struct fm10k_msg_data iov_mbx_data[] = {
145 FM10K_TLV_MSG_TEST_HANDLER(fm10k_tlv_msg_test),
146 FM10K_VF_MSG_MSIX_HANDLER(fm10k_iov_msg_msix_pf),
147 FM10K_VF_MSG_MAC_VLAN_HANDLER(fm10k_iov_msg_queue_mac_vlan),
148 FM10K_VF_MSG_LPORT_STATE_HANDLER(fm10k_iov_msg_lport_state_pf),
149 FM10K_TLV_MSG_ERROR_HANDLER(fm10k_iov_msg_error),
152 s32 fm10k_iov_event(struct fm10k_intfc *interface)
154 struct fm10k_hw *hw = &interface->hw;
155 struct fm10k_iov_data *iov_data;
159 /* if there is no iov_data then there is no mailbox to process */
160 if (!READ_ONCE(interface->iov_data))
165 iov_data = interface->iov_data;
167 /* check again now that we are in the RCU block */
171 if (!(fm10k_read_reg(hw, FM10K_EICR) & FM10K_EICR_VFLR))
174 /* read VFLRE to determine if any VFs have been reset */
175 vflre = fm10k_read_reg(hw, FM10K_PFVFLRE(1));
177 vflre |= fm10k_read_reg(hw, FM10K_PFVFLRE(0));
179 i = iov_data->num_vfs;
181 for (vflre <<= 64 - i; vflre && i--; vflre += vflre) {
182 struct fm10k_vf_info *vf_info = &iov_data->vf_info[i];
187 hw->iov.ops.reset_resources(hw, vf_info);
188 vf_info->mbx.ops.connect(hw, &vf_info->mbx);
197 s32 fm10k_iov_mbx(struct fm10k_intfc *interface)
199 struct fm10k_hw *hw = &interface->hw;
200 struct fm10k_iov_data *iov_data;
203 /* if there is no iov_data then there is no mailbox to process */
204 if (!READ_ONCE(interface->iov_data))
209 iov_data = interface->iov_data;
211 /* check again now that we are in the RCU block */
215 /* lock the mailbox for transmit and receive */
216 fm10k_mbx_lock(interface);
218 /* Most VF messages sent to the PF cause the PF to respond by
219 * requesting from the SM mailbox. This means that too many VF
220 * messages processed at once could cause a mailbox timeout on the PF.
221 * To prevent this, store a pointer to the next VF mbx to process. Use
222 * that as the start of the loop so that we don't starve whichever VF
223 * got ignored on the previous run.
226 for (i = iov_data->next_vf_mbx ? : iov_data->num_vfs; i--;) {
227 struct fm10k_vf_info *vf_info = &iov_data->vf_info[i];
228 struct fm10k_mbx_info *mbx = &vf_info->mbx;
229 u16 glort = vf_info->glort;
231 /* process the SM mailbox first to drain outgoing messages */
232 hw->mbx.ops.process(hw, &hw->mbx);
234 /* verify port mapping is valid, if not reset port */
235 if (vf_info->vf_flags && !fm10k_glort_valid_pf(hw, glort)) {
236 hw->iov.ops.reset_lport(hw, vf_info);
237 fm10k_clear_macvlan_queue(interface, glort, false);
240 /* reset VFs that have mailbox timed out */
242 hw->iov.ops.reset_resources(hw, vf_info);
243 mbx->ops.connect(hw, mbx);
246 /* guarantee we have free space in the SM mailbox */
247 if (!hw->mbx.ops.tx_ready(&hw->mbx, FM10K_VFMBX_MSG_MTU)) {
248 /* keep track of how many times this occurs */
249 interface->hw_sm_mbx_full++;
251 /* make sure we try again momentarily */
252 fm10k_service_event_schedule(interface);
257 /* cleanup mailbox and process received messages */
258 mbx->ops.process(hw, mbx);
261 /* if we stopped processing mailboxes early, update next_vf_mbx.
262 * Otherwise, reset next_vf_mbx, and restart loop so that we process
263 * the remaining mailboxes we skipped at the start.
266 iov_data->next_vf_mbx = i + 1;
267 } else if (iov_data->next_vf_mbx) {
268 iov_data->next_vf_mbx = 0;
273 fm10k_mbx_unlock(interface);
281 void fm10k_iov_suspend(struct pci_dev *pdev)
283 struct fm10k_intfc *interface = pci_get_drvdata(pdev);
284 struct fm10k_iov_data *iov_data = interface->iov_data;
285 struct fm10k_hw *hw = &interface->hw;
288 /* pull out num_vfs from iov_data */
289 num_vfs = iov_data ? iov_data->num_vfs : 0;
291 /* shut down queue mapping for VFs */
292 fm10k_write_reg(hw, FM10K_DGLORTMAP(fm10k_dglort_vf_rss),
293 FM10K_DGLORTMAP_NONE);
295 /* Stop any active VFs and reset their resources */
296 for (i = 0; i < num_vfs; i++) {
297 struct fm10k_vf_info *vf_info = &iov_data->vf_info[i];
299 hw->iov.ops.reset_resources(hw, vf_info);
300 hw->iov.ops.reset_lport(hw, vf_info);
301 fm10k_clear_macvlan_queue(interface, vf_info->glort, false);
305 int fm10k_iov_resume(struct pci_dev *pdev)
307 struct fm10k_intfc *interface = pci_get_drvdata(pdev);
308 struct fm10k_iov_data *iov_data = interface->iov_data;
309 struct fm10k_dglort_cfg dglort = { 0 };
310 struct fm10k_hw *hw = &interface->hw;
313 /* pull out num_vfs from iov_data */
314 num_vfs = iov_data ? iov_data->num_vfs : 0;
316 /* return error if iov_data is not already populated */
320 /* allocate hardware resources for the VFs */
321 hw->iov.ops.assign_resources(hw, num_vfs, num_vfs);
323 /* configure DGLORT mapping for RSS */
324 dglort.glort = hw->mac.dglort_map & FM10K_DGLORTMAP_NONE;
325 dglort.idx = fm10k_dglort_vf_rss;
326 dglort.inner_rss = 1;
327 dglort.rss_l = fls(fm10k_queues_per_pool(hw) - 1);
328 dglort.queue_b = fm10k_vf_queue_index(hw, 0);
329 dglort.vsi_l = fls(hw->iov.total_vfs - 1);
332 hw->mac.ops.configure_dglort_map(hw, &dglort);
334 /* assign resources to the device */
335 for (i = 0; i < num_vfs; i++) {
336 struct fm10k_vf_info *vf_info = &iov_data->vf_info[i];
338 /* allocate all but the last GLORT to the VFs */
339 if (i == (~hw->mac.dglort_map >> FM10K_DGLORTMAP_MASK_SHIFT))
342 /* assign GLORT to VF, and restrict it to multicast */
343 hw->iov.ops.set_lport(hw, vf_info, i,
344 FM10K_VF_FLAG_MULTI_CAPABLE);
346 /* mailbox is disconnected so we don't send a message */
347 hw->iov.ops.assign_default_mac_vlan(hw, vf_info);
349 /* now we are ready so we can connect */
350 vf_info->mbx.ops.connect(hw, &vf_info->mbx);
356 s32 fm10k_iov_update_pvid(struct fm10k_intfc *interface, u16 glort, u16 pvid)
358 struct fm10k_iov_data *iov_data = interface->iov_data;
359 struct fm10k_hw *hw = &interface->hw;
360 struct fm10k_vf_info *vf_info;
361 u16 vf_idx = (glort - hw->mac.dglort_map) & FM10K_DGLORTMAP_NONE;
363 /* no IOV support, not our message to process */
365 return FM10K_ERR_PARAM;
367 /* glort outside our range, not our message to process */
368 if (vf_idx >= iov_data->num_vfs)
369 return FM10K_ERR_PARAM;
371 /* determine if an update has occurred and if so notify the VF */
372 vf_info = &iov_data->vf_info[vf_idx];
373 if (vf_info->sw_vid != pvid) {
374 vf_info->sw_vid = pvid;
375 hw->iov.ops.assign_default_mac_vlan(hw, vf_info);
381 static void fm10k_iov_free_data(struct pci_dev *pdev)
383 struct fm10k_intfc *interface = pci_get_drvdata(pdev);
385 if (!interface->iov_data)
388 /* reclaim hardware resources */
389 fm10k_iov_suspend(pdev);
391 /* drop iov_data from interface */
392 kfree_rcu(interface->iov_data, rcu);
393 interface->iov_data = NULL;
396 static s32 fm10k_iov_alloc_data(struct pci_dev *pdev, int num_vfs)
398 struct fm10k_intfc *interface = pci_get_drvdata(pdev);
399 struct fm10k_iov_data *iov_data = interface->iov_data;
400 struct fm10k_hw *hw = &interface->hw;
404 /* return error if iov_data is already populated */
408 /* The PF should always be able to assign resources */
409 if (!hw->iov.ops.assign_resources)
412 /* nothing to do if no VFs are requested */
416 /* allocate memory for VF storage */
417 size = offsetof(struct fm10k_iov_data, vf_info[num_vfs]);
418 iov_data = kzalloc(size, GFP_KERNEL);
422 /* record number of VFs */
423 iov_data->num_vfs = num_vfs;
425 /* loop through vf_info structures initializing each entry */
426 for (i = 0; i < num_vfs; i++) {
427 struct fm10k_vf_info *vf_info = &iov_data->vf_info[i];
429 /* Record VF VSI value */
430 vf_info->vsi = i + 1;
433 /* initialize mailbox memory */
434 err = fm10k_pfvf_mbx_init(hw, &vf_info->mbx, iov_mbx_data, i);
437 "Unable to initialize SR-IOV mailbox\n");
443 /* assign iov_data to interface */
444 interface->iov_data = iov_data;
446 /* allocate hardware resources for the VFs */
447 fm10k_iov_resume(pdev);
452 void fm10k_iov_disable(struct pci_dev *pdev)
454 if (pci_num_vf(pdev) && pci_vfs_assigned(pdev))
456 "Cannot disable SR-IOV while VFs are assigned\n");
458 pci_disable_sriov(pdev);
460 fm10k_iov_free_data(pdev);
463 static void fm10k_disable_aer_comp_abort(struct pci_dev *pdev)
468 pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_ERR);
472 pci_read_config_dword(pdev, pos + PCI_ERR_UNCOR_SEVER, &err_sev);
473 err_sev &= ~PCI_ERR_UNC_COMP_ABORT;
474 pci_write_config_dword(pdev, pos + PCI_ERR_UNCOR_SEVER, err_sev);
477 int fm10k_iov_configure(struct pci_dev *pdev, int num_vfs)
479 int current_vfs = pci_num_vf(pdev);
482 if (current_vfs && pci_vfs_assigned(pdev)) {
484 "Cannot modify SR-IOV while VFs are assigned\n");
485 num_vfs = current_vfs;
487 pci_disable_sriov(pdev);
488 fm10k_iov_free_data(pdev);
491 /* allocate resources for the VFs */
492 err = fm10k_iov_alloc_data(pdev, num_vfs);
496 /* allocate VFs if not already allocated */
497 if (num_vfs && num_vfs != current_vfs) {
498 /* Disable completer abort error reporting as
499 * the VFs can trigger this any time they read a queue
500 * that they don't own.
502 fm10k_disable_aer_comp_abort(pdev);
504 err = pci_enable_sriov(pdev, num_vfs);
507 "Enable PCI SR-IOV failed: %d\n", err);
515 static inline void fm10k_reset_vf_info(struct fm10k_intfc *interface,
516 struct fm10k_vf_info *vf_info)
518 struct fm10k_hw *hw = &interface->hw;
520 /* assigning the MAC address will send a mailbox message */
521 fm10k_mbx_lock(interface);
523 /* disable LPORT for this VF which clears switch rules */
524 hw->iov.ops.reset_lport(hw, vf_info);
526 fm10k_clear_macvlan_queue(interface, vf_info->glort, false);
528 /* assign new MAC+VLAN for this VF */
529 hw->iov.ops.assign_default_mac_vlan(hw, vf_info);
531 /* re-enable the LPORT for this VF */
532 hw->iov.ops.set_lport(hw, vf_info, vf_info->vf_idx,
533 FM10K_VF_FLAG_MULTI_CAPABLE);
535 fm10k_mbx_unlock(interface);
538 int fm10k_ndo_set_vf_mac(struct net_device *netdev, int vf_idx, u8 *mac)
540 struct fm10k_intfc *interface = netdev_priv(netdev);
541 struct fm10k_iov_data *iov_data = interface->iov_data;
542 struct fm10k_vf_info *vf_info;
544 /* verify SR-IOV is active and that vf idx is valid */
545 if (!iov_data || vf_idx >= iov_data->num_vfs)
548 /* verify MAC addr is valid */
549 if (!is_zero_ether_addr(mac) && !is_valid_ether_addr(mac))
552 /* record new MAC address */
553 vf_info = &iov_data->vf_info[vf_idx];
554 ether_addr_copy(vf_info->mac, mac);
556 fm10k_reset_vf_info(interface, vf_info);
561 int fm10k_ndo_set_vf_vlan(struct net_device *netdev, int vf_idx, u16 vid,
562 u8 qos, __be16 vlan_proto)
564 struct fm10k_intfc *interface = netdev_priv(netdev);
565 struct fm10k_iov_data *iov_data = interface->iov_data;
566 struct fm10k_hw *hw = &interface->hw;
567 struct fm10k_vf_info *vf_info;
569 /* verify SR-IOV is active and that vf idx is valid */
570 if (!iov_data || vf_idx >= iov_data->num_vfs)
573 /* QOS is unsupported and VLAN IDs accepted range 0-4094 */
574 if (qos || (vid > (VLAN_VID_MASK - 1)))
577 /* VF VLAN Protocol part to default is unsupported */
578 if (vlan_proto != htons(ETH_P_8021Q))
579 return -EPROTONOSUPPORT;
581 vf_info = &iov_data->vf_info[vf_idx];
583 /* exit if there is nothing to do */
584 if (vf_info->pf_vid == vid)
587 /* record default VLAN ID for VF */
588 vf_info->pf_vid = vid;
590 /* Clear the VLAN table for the VF */
591 hw->mac.ops.update_vlan(hw, FM10K_VLAN_ALL, vf_info->vsi, false);
593 fm10k_reset_vf_info(interface, vf_info);
598 int fm10k_ndo_set_vf_bw(struct net_device *netdev, int vf_idx,
599 int __always_unused min_rate, int max_rate)
601 struct fm10k_intfc *interface = netdev_priv(netdev);
602 struct fm10k_iov_data *iov_data = interface->iov_data;
603 struct fm10k_hw *hw = &interface->hw;
605 /* verify SR-IOV is active and that vf idx is valid */
606 if (!iov_data || vf_idx >= iov_data->num_vfs)
609 /* rate limit cannot be less than 10Mbs or greater than link speed */
611 (max_rate < FM10K_VF_TC_MIN || max_rate > FM10K_VF_TC_MAX))
615 iov_data->vf_info[vf_idx].rate = max_rate;
617 /* update hardware configuration */
618 hw->iov.ops.configure_tc(hw, vf_idx, max_rate);
623 int fm10k_ndo_get_vf_config(struct net_device *netdev,
624 int vf_idx, struct ifla_vf_info *ivi)
626 struct fm10k_intfc *interface = netdev_priv(netdev);
627 struct fm10k_iov_data *iov_data = interface->iov_data;
628 struct fm10k_vf_info *vf_info;
630 /* verify SR-IOV is active and that vf idx is valid */
631 if (!iov_data || vf_idx >= iov_data->num_vfs)
634 vf_info = &iov_data->vf_info[vf_idx];
637 ivi->max_tx_rate = vf_info->rate;
638 ivi->min_tx_rate = 0;
639 ether_addr_copy(ivi->mac, vf_info->mac);
640 ivi->vlan = vf_info->pf_vid;