1 /****************************************************************************
2 * Driver for Solarflare network controllers and boards
3 * Copyright 2012-2013 Solarflare Communications Inc.
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 as published
7 * by the Free Software Foundation, incorporated herein by reference.
10 #include "net_driver.h"
11 #include "ef10_regs.h"
14 #include "mcdi_pcol.h"
16 #include "workarounds.h"
18 #include "ef10_sriov.h"
20 #include <linux/jhash.h>
21 #include <linux/wait.h>
22 #include <linux/workqueue.h>
24 /* Hardware control for EF10 architecture including 'Huntington'. */
26 #define EFX_EF10_DRVGEN_EV 7
32 /* The reserved RSS context value */
33 #define EFX_EF10_RSS_CONTEXT_INVALID 0xffffffff
34 /* The maximum size of a shared RSS context */
35 /* TODO: this should really be from the mcdi protocol export */
36 #define EFX_EF10_MAX_SHARED_RSS_CONTEXT_SIZE 64UL
38 /* The filter table(s) are managed by firmware and we have write-only
39 * access. When removing filters we must identify them to the
40 * firmware by a 64-bit handle, but this is too wide for Linux kernel
41 * interfaces (32-bit for RX NFC, 16-bit for RFS). Also, we need to
42 * be able to tell in advance whether a requested insertion will
43 * replace an existing filter. Therefore we maintain a software hash
44 * table, which should be at least as large as the hardware hash
47 * Huntington has a single 8K filter table shared between all filter
48 * types and both ports.
50 #define HUNT_FILTER_TBL_ROWS 8192
52 #define EFX_EF10_FILTER_ID_INVALID 0xffff
54 #define EFX_EF10_FILTER_DEV_UC_MAX 32
55 #define EFX_EF10_FILTER_DEV_MC_MAX 256
58 struct efx_ef10_vlan {
59 struct list_head list;
63 enum efx_ef10_default_filters {
67 EFX_EF10_VXLAN4_UCDEF,
68 EFX_EF10_VXLAN4_MCDEF,
69 EFX_EF10_VXLAN6_UCDEF,
70 EFX_EF10_VXLAN6_MCDEF,
71 EFX_EF10_NVGRE4_UCDEF,
72 EFX_EF10_NVGRE4_MCDEF,
73 EFX_EF10_NVGRE6_UCDEF,
74 EFX_EF10_NVGRE6_MCDEF,
75 EFX_EF10_GENEVE4_UCDEF,
76 EFX_EF10_GENEVE4_MCDEF,
77 EFX_EF10_GENEVE6_UCDEF,
78 EFX_EF10_GENEVE6_MCDEF,
80 EFX_EF10_NUM_DEFAULT_FILTERS
83 /* Per-VLAN filters information */
84 struct efx_ef10_filter_vlan {
85 struct list_head list;
87 u16 uc[EFX_EF10_FILTER_DEV_UC_MAX];
88 u16 mc[EFX_EF10_FILTER_DEV_MC_MAX];
89 u16 default_filters[EFX_EF10_NUM_DEFAULT_FILTERS];
92 struct efx_ef10_dev_addr {
96 struct efx_ef10_filter_table {
97 /* The MCDI match masks supported by this fw & hw, in order of priority */
98 u32 rx_match_mcdi_flags[
99 MC_CMD_GET_PARSER_DISP_INFO_OUT_SUPPORTED_MATCHES_MAXNUM * 2];
100 unsigned int rx_match_count;
103 unsigned long spec; /* pointer to spec plus flag bits */
104 /* BUSY flag indicates that an update is in progress. AUTO_OLD is
105 * used to mark and sweep MAC filters for the device address lists.
107 #define EFX_EF10_FILTER_FLAG_BUSY 1UL
108 #define EFX_EF10_FILTER_FLAG_AUTO_OLD 2UL
109 #define EFX_EF10_FILTER_FLAGS 3UL
110 u64 handle; /* firmware handle */
112 wait_queue_head_t waitq;
113 /* Shadow of net_device address lists, guarded by mac_lock */
114 struct efx_ef10_dev_addr dev_uc_list[EFX_EF10_FILTER_DEV_UC_MAX];
115 struct efx_ef10_dev_addr dev_mc_list[EFX_EF10_FILTER_DEV_MC_MAX];
120 /* Whether in multicast promiscuous mode when last changed */
121 bool mc_promisc_last;
122 bool mc_overflow; /* Too many MC addrs; should always imply mc_promisc */
124 struct list_head vlan_list;
127 /* An arbitrary search limit for the software hash table */
128 #define EFX_EF10_FILTER_SEARCH_LIMIT 200
130 static void efx_ef10_rx_free_indir_table(struct efx_nic *efx);
131 static void efx_ef10_filter_table_remove(struct efx_nic *efx);
132 static int efx_ef10_filter_add_vlan(struct efx_nic *efx, u16 vid);
133 static void efx_ef10_filter_del_vlan_internal(struct efx_nic *efx,
134 struct efx_ef10_filter_vlan *vlan);
135 static void efx_ef10_filter_del_vlan(struct efx_nic *efx, u16 vid);
136 static int efx_ef10_set_udp_tnl_ports(struct efx_nic *efx, bool unloading);
138 static u32 efx_ef10_filter_get_unsafe_id(u32 filter_id)
140 WARN_ON_ONCE(filter_id == EFX_EF10_FILTER_ID_INVALID);
141 return filter_id & (HUNT_FILTER_TBL_ROWS - 1);
144 static unsigned int efx_ef10_filter_get_unsafe_pri(u32 filter_id)
146 return filter_id / (HUNT_FILTER_TBL_ROWS * 2);
149 static u32 efx_ef10_make_filter_id(unsigned int pri, u16 idx)
151 return pri * HUNT_FILTER_TBL_ROWS * 2 + idx;
154 static int efx_ef10_get_warm_boot_count(struct efx_nic *efx)
158 efx_readd(efx, ®, ER_DZ_BIU_MC_SFT_STATUS);
159 return EFX_DWORD_FIELD(reg, EFX_WORD_1) == 0xb007 ?
160 EFX_DWORD_FIELD(reg, EFX_WORD_0) : -EIO;
163 static unsigned int efx_ef10_mem_map_size(struct efx_nic *efx)
167 bar = efx->type->mem_bar;
168 return resource_size(&efx->pci_dev->resource[bar]);
171 static bool efx_ef10_is_vf(struct efx_nic *efx)
173 return efx->type->is_vf;
176 static int efx_ef10_get_pf_index(struct efx_nic *efx)
178 MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_FUNCTION_INFO_OUT_LEN);
179 struct efx_ef10_nic_data *nic_data = efx->nic_data;
183 rc = efx_mcdi_rpc(efx, MC_CMD_GET_FUNCTION_INFO, NULL, 0, outbuf,
184 sizeof(outbuf), &outlen);
187 if (outlen < sizeof(outbuf))
190 nic_data->pf_index = MCDI_DWORD(outbuf, GET_FUNCTION_INFO_OUT_PF);
194 #ifdef CONFIG_SFC_SRIOV
195 static int efx_ef10_get_vf_index(struct efx_nic *efx)
197 MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_FUNCTION_INFO_OUT_LEN);
198 struct efx_ef10_nic_data *nic_data = efx->nic_data;
202 rc = efx_mcdi_rpc(efx, MC_CMD_GET_FUNCTION_INFO, NULL, 0, outbuf,
203 sizeof(outbuf), &outlen);
206 if (outlen < sizeof(outbuf))
209 nic_data->vf_index = MCDI_DWORD(outbuf, GET_FUNCTION_INFO_OUT_VF);
214 static int efx_ef10_init_datapath_caps(struct efx_nic *efx)
216 MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_CAPABILITIES_V2_OUT_LEN);
217 struct efx_ef10_nic_data *nic_data = efx->nic_data;
221 BUILD_BUG_ON(MC_CMD_GET_CAPABILITIES_IN_LEN != 0);
223 rc = efx_mcdi_rpc(efx, MC_CMD_GET_CAPABILITIES, NULL, 0,
224 outbuf, sizeof(outbuf), &outlen);
227 if (outlen < MC_CMD_GET_CAPABILITIES_OUT_LEN) {
228 netif_err(efx, drv, efx->net_dev,
229 "unable to read datapath firmware capabilities\n");
233 nic_data->datapath_caps =
234 MCDI_DWORD(outbuf, GET_CAPABILITIES_OUT_FLAGS1);
236 if (outlen >= MC_CMD_GET_CAPABILITIES_V2_OUT_LEN) {
237 nic_data->datapath_caps2 = MCDI_DWORD(outbuf,
238 GET_CAPABILITIES_V2_OUT_FLAGS2);
239 nic_data->piobuf_size = MCDI_WORD(outbuf,
240 GET_CAPABILITIES_V2_OUT_SIZE_PIO_BUFF);
242 nic_data->datapath_caps2 = 0;
243 nic_data->piobuf_size = ER_DZ_TX_PIOBUF_SIZE;
246 /* record the DPCPU firmware IDs to determine VEB vswitching support.
248 nic_data->rx_dpcpu_fw_id =
249 MCDI_WORD(outbuf, GET_CAPABILITIES_OUT_RX_DPCPU_FW_ID);
250 nic_data->tx_dpcpu_fw_id =
251 MCDI_WORD(outbuf, GET_CAPABILITIES_OUT_TX_DPCPU_FW_ID);
253 if (!(nic_data->datapath_caps &
254 (1 << MC_CMD_GET_CAPABILITIES_OUT_RX_PREFIX_LEN_14_LBN))) {
255 netif_err(efx, probe, efx->net_dev,
256 "current firmware does not support an RX prefix\n");
263 static int efx_ef10_get_sysclk_freq(struct efx_nic *efx)
265 MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_CLOCK_OUT_LEN);
268 rc = efx_mcdi_rpc(efx, MC_CMD_GET_CLOCK, NULL, 0,
269 outbuf, sizeof(outbuf), NULL);
272 rc = MCDI_DWORD(outbuf, GET_CLOCK_OUT_SYS_FREQ);
273 return rc > 0 ? rc : -ERANGE;
276 static int efx_ef10_get_timer_workarounds(struct efx_nic *efx)
278 struct efx_ef10_nic_data *nic_data = efx->nic_data;
279 unsigned int implemented;
280 unsigned int enabled;
283 nic_data->workaround_35388 = false;
284 nic_data->workaround_61265 = false;
286 rc = efx_mcdi_get_workarounds(efx, &implemented, &enabled);
289 /* Firmware without GET_WORKAROUNDS - not a problem. */
291 } else if (rc == 0) {
292 /* Bug61265 workaround is always enabled if implemented. */
293 if (enabled & MC_CMD_GET_WORKAROUNDS_OUT_BUG61265)
294 nic_data->workaround_61265 = true;
296 if (enabled & MC_CMD_GET_WORKAROUNDS_OUT_BUG35388) {
297 nic_data->workaround_35388 = true;
298 } else if (implemented & MC_CMD_GET_WORKAROUNDS_OUT_BUG35388) {
299 /* Workaround is implemented but not enabled.
302 rc = efx_mcdi_set_workaround(efx,
303 MC_CMD_WORKAROUND_BUG35388,
306 nic_data->workaround_35388 = true;
307 /* If we failed to set the workaround just carry on. */
312 netif_dbg(efx, probe, efx->net_dev,
313 "workaround for bug 35388 is %sabled\n",
314 nic_data->workaround_35388 ? "en" : "dis");
315 netif_dbg(efx, probe, efx->net_dev,
316 "workaround for bug 61265 is %sabled\n",
317 nic_data->workaround_61265 ? "en" : "dis");
322 static void efx_ef10_process_timer_config(struct efx_nic *efx,
323 const efx_dword_t *data)
325 unsigned int max_count;
327 if (EFX_EF10_WORKAROUND_61265(efx)) {
328 efx->timer_quantum_ns = MCDI_DWORD(data,
329 GET_EVQ_TMR_PROPERTIES_OUT_MCDI_TMR_STEP_NS);
330 efx->timer_max_ns = MCDI_DWORD(data,
331 GET_EVQ_TMR_PROPERTIES_OUT_MCDI_TMR_MAX_NS);
332 } else if (EFX_EF10_WORKAROUND_35388(efx)) {
333 efx->timer_quantum_ns = MCDI_DWORD(data,
334 GET_EVQ_TMR_PROPERTIES_OUT_BUG35388_TMR_NS_PER_COUNT);
335 max_count = MCDI_DWORD(data,
336 GET_EVQ_TMR_PROPERTIES_OUT_BUG35388_TMR_MAX_COUNT);
337 efx->timer_max_ns = max_count * efx->timer_quantum_ns;
339 efx->timer_quantum_ns = MCDI_DWORD(data,
340 GET_EVQ_TMR_PROPERTIES_OUT_TMR_REG_NS_PER_COUNT);
341 max_count = MCDI_DWORD(data,
342 GET_EVQ_TMR_PROPERTIES_OUT_TMR_REG_MAX_COUNT);
343 efx->timer_max_ns = max_count * efx->timer_quantum_ns;
346 netif_dbg(efx, probe, efx->net_dev,
347 "got timer properties from MC: quantum %u ns; max %u ns\n",
348 efx->timer_quantum_ns, efx->timer_max_ns);
351 static int efx_ef10_get_timer_config(struct efx_nic *efx)
353 MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_EVQ_TMR_PROPERTIES_OUT_LEN);
356 rc = efx_ef10_get_timer_workarounds(efx);
360 rc = efx_mcdi_rpc_quiet(efx, MC_CMD_GET_EVQ_TMR_PROPERTIES, NULL, 0,
361 outbuf, sizeof(outbuf), NULL);
364 efx_ef10_process_timer_config(efx, outbuf);
365 } else if (rc == -ENOSYS || rc == -EPERM) {
366 /* Not available - fall back to Huntington defaults. */
367 unsigned int quantum;
369 rc = efx_ef10_get_sysclk_freq(efx);
373 quantum = 1536000 / rc; /* 1536 cycles */
374 efx->timer_quantum_ns = quantum;
375 efx->timer_max_ns = efx->type->timer_period_max * quantum;
378 efx_mcdi_display_error(efx, MC_CMD_GET_EVQ_TMR_PROPERTIES,
379 MC_CMD_GET_EVQ_TMR_PROPERTIES_OUT_LEN,
386 static int efx_ef10_get_mac_address_pf(struct efx_nic *efx, u8 *mac_address)
388 MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_MAC_ADDRESSES_OUT_LEN);
392 BUILD_BUG_ON(MC_CMD_GET_MAC_ADDRESSES_IN_LEN != 0);
394 rc = efx_mcdi_rpc(efx, MC_CMD_GET_MAC_ADDRESSES, NULL, 0,
395 outbuf, sizeof(outbuf), &outlen);
398 if (outlen < MC_CMD_GET_MAC_ADDRESSES_OUT_LEN)
401 ether_addr_copy(mac_address,
402 MCDI_PTR(outbuf, GET_MAC_ADDRESSES_OUT_MAC_ADDR_BASE));
406 static int efx_ef10_get_mac_address_vf(struct efx_nic *efx, u8 *mac_address)
408 MCDI_DECLARE_BUF(inbuf, MC_CMD_VPORT_GET_MAC_ADDRESSES_IN_LEN);
409 MCDI_DECLARE_BUF(outbuf, MC_CMD_VPORT_GET_MAC_ADDRESSES_OUT_LENMAX);
413 MCDI_SET_DWORD(inbuf, VPORT_GET_MAC_ADDRESSES_IN_VPORT_ID,
414 EVB_PORT_ID_ASSIGNED);
415 rc = efx_mcdi_rpc(efx, MC_CMD_VPORT_GET_MAC_ADDRESSES, inbuf,
416 sizeof(inbuf), outbuf, sizeof(outbuf), &outlen);
420 if (outlen < MC_CMD_VPORT_GET_MAC_ADDRESSES_OUT_LENMIN)
423 num_addrs = MCDI_DWORD(outbuf,
424 VPORT_GET_MAC_ADDRESSES_OUT_MACADDR_COUNT);
426 WARN_ON(num_addrs != 1);
428 ether_addr_copy(mac_address,
429 MCDI_PTR(outbuf, VPORT_GET_MAC_ADDRESSES_OUT_MACADDR));
434 static ssize_t efx_ef10_show_link_control_flag(struct device *dev,
435 struct device_attribute *attr,
438 struct efx_nic *efx = pci_get_drvdata(to_pci_dev(dev));
440 return sprintf(buf, "%d\n",
441 ((efx->mcdi->fn_flags) &
442 (1 << MC_CMD_DRV_ATTACH_EXT_OUT_FLAG_LINKCTRL))
446 static ssize_t efx_ef10_show_primary_flag(struct device *dev,
447 struct device_attribute *attr,
450 struct efx_nic *efx = pci_get_drvdata(to_pci_dev(dev));
452 return sprintf(buf, "%d\n",
453 ((efx->mcdi->fn_flags) &
454 (1 << MC_CMD_DRV_ATTACH_EXT_OUT_FLAG_PRIMARY))
458 static struct efx_ef10_vlan *efx_ef10_find_vlan(struct efx_nic *efx, u16 vid)
460 struct efx_ef10_nic_data *nic_data = efx->nic_data;
461 struct efx_ef10_vlan *vlan;
463 WARN_ON(!mutex_is_locked(&nic_data->vlan_lock));
465 list_for_each_entry(vlan, &nic_data->vlan_list, list) {
466 if (vlan->vid == vid)
473 static int efx_ef10_add_vlan(struct efx_nic *efx, u16 vid)
475 struct efx_ef10_nic_data *nic_data = efx->nic_data;
476 struct efx_ef10_vlan *vlan;
479 mutex_lock(&nic_data->vlan_lock);
481 vlan = efx_ef10_find_vlan(efx, vid);
483 /* We add VID 0 on init. 8021q adds it on module init
484 * for all interfaces with VLAN filtring feature.
488 netif_warn(efx, drv, efx->net_dev,
489 "VLAN %u already added\n", vid);
495 vlan = kzalloc(sizeof(*vlan), GFP_KERNEL);
501 list_add_tail(&vlan->list, &nic_data->vlan_list);
503 if (efx->filter_state) {
504 mutex_lock(&efx->mac_lock);
505 down_write(&efx->filter_sem);
506 rc = efx_ef10_filter_add_vlan(efx, vlan->vid);
507 up_write(&efx->filter_sem);
508 mutex_unlock(&efx->mac_lock);
510 goto fail_filter_add_vlan;
514 mutex_unlock(&nic_data->vlan_lock);
517 fail_filter_add_vlan:
518 list_del(&vlan->list);
522 mutex_unlock(&nic_data->vlan_lock);
526 static void efx_ef10_del_vlan_internal(struct efx_nic *efx,
527 struct efx_ef10_vlan *vlan)
529 struct efx_ef10_nic_data *nic_data = efx->nic_data;
531 WARN_ON(!mutex_is_locked(&nic_data->vlan_lock));
533 if (efx->filter_state) {
534 down_write(&efx->filter_sem);
535 efx_ef10_filter_del_vlan(efx, vlan->vid);
536 up_write(&efx->filter_sem);
539 list_del(&vlan->list);
543 static int efx_ef10_del_vlan(struct efx_nic *efx, u16 vid)
545 struct efx_ef10_nic_data *nic_data = efx->nic_data;
546 struct efx_ef10_vlan *vlan;
549 /* 8021q removes VID 0 on module unload for all interfaces
550 * with VLAN filtering feature. We need to keep it to receive
556 mutex_lock(&nic_data->vlan_lock);
558 vlan = efx_ef10_find_vlan(efx, vid);
560 netif_err(efx, drv, efx->net_dev,
561 "VLAN %u to be deleted not found\n", vid);
564 efx_ef10_del_vlan_internal(efx, vlan);
567 mutex_unlock(&nic_data->vlan_lock);
572 static void efx_ef10_cleanup_vlans(struct efx_nic *efx)
574 struct efx_ef10_nic_data *nic_data = efx->nic_data;
575 struct efx_ef10_vlan *vlan, *next_vlan;
577 mutex_lock(&nic_data->vlan_lock);
578 list_for_each_entry_safe(vlan, next_vlan, &nic_data->vlan_list, list)
579 efx_ef10_del_vlan_internal(efx, vlan);
580 mutex_unlock(&nic_data->vlan_lock);
583 static DEVICE_ATTR(link_control_flag, 0444, efx_ef10_show_link_control_flag,
585 static DEVICE_ATTR(primary_flag, 0444, efx_ef10_show_primary_flag, NULL);
587 static int efx_ef10_probe(struct efx_nic *efx)
589 struct efx_ef10_nic_data *nic_data;
592 /* We can have one VI for each 8K region. However, until we
593 * use TX option descriptors we need two TX queues per channel.
595 efx->max_channels = min_t(unsigned int,
597 efx_ef10_mem_map_size(efx) /
598 (EFX_VI_PAGE_SIZE * EFX_TXQ_TYPES));
599 efx->max_tx_channels = efx->max_channels;
600 if (WARN_ON(efx->max_channels == 0))
603 nic_data = kzalloc(sizeof(*nic_data), GFP_KERNEL);
606 efx->nic_data = nic_data;
608 /* we assume later that we can copy from this buffer in dwords */
609 BUILD_BUG_ON(MCDI_CTL_SDU_LEN_MAX_V2 % 4);
611 rc = efx_nic_alloc_buffer(efx, &nic_data->mcdi_buf,
612 8 + MCDI_CTL_SDU_LEN_MAX_V2, GFP_KERNEL);
616 /* Get the MC's warm boot count. In case it's rebooting right
617 * now, be prepared to retry.
621 rc = efx_ef10_get_warm_boot_count(efx);
628 nic_data->warm_boot_count = rc;
630 nic_data->rx_rss_context = EFX_EF10_RSS_CONTEXT_INVALID;
632 nic_data->vport_id = EVB_PORT_ID_ASSIGNED;
634 /* In case we're recovering from a crash (kexec), we want to
635 * cancel any outstanding request by the previous user of this
636 * function. We send a special message using the least
637 * significant bits of the 'high' (doorbell) register.
639 _efx_writed(efx, cpu_to_le32(1), ER_DZ_MC_DB_HWRD);
641 rc = efx_mcdi_init(efx);
645 mutex_init(&nic_data->udp_tunnels_lock);
647 /* Reset (most) configuration for this function */
648 rc = efx_mcdi_reset(efx, RESET_TYPE_ALL);
652 /* Enable event logging */
653 rc = efx_mcdi_log_ctrl(efx, true, false, 0);
657 rc = device_create_file(&efx->pci_dev->dev,
658 &dev_attr_link_control_flag);
662 rc = device_create_file(&efx->pci_dev->dev, &dev_attr_primary_flag);
666 rc = efx_ef10_get_pf_index(efx);
670 rc = efx_ef10_init_datapath_caps(efx);
674 efx->rx_packet_len_offset =
675 ES_DZ_RX_PREFIX_PKTLEN_OFST - ES_DZ_RX_PREFIX_SIZE;
677 rc = efx_mcdi_port_get_number(efx);
682 rc = efx->type->get_mac_address(efx, efx->net_dev->perm_addr);
686 rc = efx_ef10_get_timer_config(efx);
690 rc = efx_mcdi_mon_probe(efx);
691 if (rc && rc != -EPERM)
694 efx_ptp_probe(efx, NULL);
696 #ifdef CONFIG_SFC_SRIOV
697 if ((efx->pci_dev->physfn) && (!efx->pci_dev->is_physfn)) {
698 struct pci_dev *pci_dev_pf = efx->pci_dev->physfn;
699 struct efx_nic *efx_pf = pci_get_drvdata(pci_dev_pf);
701 efx_pf->type->get_mac_address(efx_pf, nic_data->port_id);
704 ether_addr_copy(nic_data->port_id, efx->net_dev->perm_addr);
706 INIT_LIST_HEAD(&nic_data->vlan_list);
707 mutex_init(&nic_data->vlan_lock);
709 /* Add unspecified VID to support VLAN filtering being disabled */
710 rc = efx_ef10_add_vlan(efx, EFX_FILTER_VID_UNSPEC);
712 goto fail_add_vid_unspec;
714 /* If VLAN filtering is enabled, we need VID 0 to get untagged
715 * traffic. It is added automatically if 8021q module is loaded,
716 * but we can't rely on it since module may be not loaded.
718 rc = efx_ef10_add_vlan(efx, 0);
725 efx_ef10_cleanup_vlans(efx);
727 mutex_destroy(&nic_data->vlan_lock);
729 efx_mcdi_mon_remove(efx);
731 device_remove_file(&efx->pci_dev->dev, &dev_attr_primary_flag);
733 device_remove_file(&efx->pci_dev->dev, &dev_attr_link_control_flag);
735 efx_mcdi_detach(efx);
737 mutex_lock(&nic_data->udp_tunnels_lock);
738 memset(nic_data->udp_tunnels, 0, sizeof(nic_data->udp_tunnels));
739 (void)efx_ef10_set_udp_tnl_ports(efx, true);
740 mutex_unlock(&nic_data->udp_tunnels_lock);
741 mutex_destroy(&nic_data->udp_tunnels_lock);
745 efx_nic_free_buffer(efx, &nic_data->mcdi_buf);
748 efx->nic_data = NULL;
752 static int efx_ef10_free_vis(struct efx_nic *efx)
754 MCDI_DECLARE_BUF_ERR(outbuf);
756 int rc = efx_mcdi_rpc_quiet(efx, MC_CMD_FREE_VIS, NULL, 0,
757 outbuf, sizeof(outbuf), &outlen);
759 /* -EALREADY means nothing to free, so ignore */
763 efx_mcdi_display_error(efx, MC_CMD_FREE_VIS, 0, outbuf, outlen,
770 static void efx_ef10_free_piobufs(struct efx_nic *efx)
772 struct efx_ef10_nic_data *nic_data = efx->nic_data;
773 MCDI_DECLARE_BUF(inbuf, MC_CMD_FREE_PIOBUF_IN_LEN);
777 BUILD_BUG_ON(MC_CMD_FREE_PIOBUF_OUT_LEN != 0);
779 for (i = 0; i < nic_data->n_piobufs; i++) {
780 MCDI_SET_DWORD(inbuf, FREE_PIOBUF_IN_PIOBUF_HANDLE,
781 nic_data->piobuf_handle[i]);
782 rc = efx_mcdi_rpc(efx, MC_CMD_FREE_PIOBUF, inbuf, sizeof(inbuf),
787 nic_data->n_piobufs = 0;
790 static int efx_ef10_alloc_piobufs(struct efx_nic *efx, unsigned int n)
792 struct efx_ef10_nic_data *nic_data = efx->nic_data;
793 MCDI_DECLARE_BUF(outbuf, MC_CMD_ALLOC_PIOBUF_OUT_LEN);
798 BUILD_BUG_ON(MC_CMD_ALLOC_PIOBUF_IN_LEN != 0);
800 for (i = 0; i < n; i++) {
801 rc = efx_mcdi_rpc_quiet(efx, MC_CMD_ALLOC_PIOBUF, NULL, 0,
802 outbuf, sizeof(outbuf), &outlen);
804 /* Don't display the MC error if we didn't have space
807 if (!(efx_ef10_is_vf(efx) && rc == -ENOSPC))
808 efx_mcdi_display_error(efx, MC_CMD_ALLOC_PIOBUF,
809 0, outbuf, outlen, rc);
812 if (outlen < MC_CMD_ALLOC_PIOBUF_OUT_LEN) {
816 nic_data->piobuf_handle[i] =
817 MCDI_DWORD(outbuf, ALLOC_PIOBUF_OUT_PIOBUF_HANDLE);
818 netif_dbg(efx, probe, efx->net_dev,
819 "allocated PIO buffer %u handle %x\n", i,
820 nic_data->piobuf_handle[i]);
823 nic_data->n_piobufs = i;
825 efx_ef10_free_piobufs(efx);
829 static int efx_ef10_link_piobufs(struct efx_nic *efx)
831 struct efx_ef10_nic_data *nic_data = efx->nic_data;
832 MCDI_DECLARE_BUF(inbuf, MC_CMD_LINK_PIOBUF_IN_LEN);
833 struct efx_channel *channel;
834 struct efx_tx_queue *tx_queue;
835 unsigned int offset, index;
838 BUILD_BUG_ON(MC_CMD_LINK_PIOBUF_OUT_LEN != 0);
839 BUILD_BUG_ON(MC_CMD_UNLINK_PIOBUF_OUT_LEN != 0);
841 /* Link a buffer to each VI in the write-combining mapping */
842 for (index = 0; index < nic_data->n_piobufs; ++index) {
843 MCDI_SET_DWORD(inbuf, LINK_PIOBUF_IN_PIOBUF_HANDLE,
844 nic_data->piobuf_handle[index]);
845 MCDI_SET_DWORD(inbuf, LINK_PIOBUF_IN_TXQ_INSTANCE,
846 nic_data->pio_write_vi_base + index);
847 rc = efx_mcdi_rpc(efx, MC_CMD_LINK_PIOBUF,
848 inbuf, MC_CMD_LINK_PIOBUF_IN_LEN,
851 netif_err(efx, drv, efx->net_dev,
852 "failed to link VI %u to PIO buffer %u (%d)\n",
853 nic_data->pio_write_vi_base + index, index,
857 netif_dbg(efx, probe, efx->net_dev,
858 "linked VI %u to PIO buffer %u\n",
859 nic_data->pio_write_vi_base + index, index);
862 /* Link a buffer to each TX queue */
863 efx_for_each_channel(channel, efx) {
864 efx_for_each_channel_tx_queue(tx_queue, channel) {
865 /* We assign the PIO buffers to queues in
866 * reverse order to allow for the following
869 offset = ((efx->tx_channel_offset + efx->n_tx_channels -
870 tx_queue->channel->channel - 1) *
872 index = offset / nic_data->piobuf_size;
873 offset = offset % nic_data->piobuf_size;
875 /* When the host page size is 4K, the first
876 * host page in the WC mapping may be within
877 * the same VI page as the last TX queue. We
878 * can only link one buffer to each VI.
880 if (tx_queue->queue == nic_data->pio_write_vi_base) {
884 MCDI_SET_DWORD(inbuf,
885 LINK_PIOBUF_IN_PIOBUF_HANDLE,
886 nic_data->piobuf_handle[index]);
887 MCDI_SET_DWORD(inbuf,
888 LINK_PIOBUF_IN_TXQ_INSTANCE,
890 rc = efx_mcdi_rpc(efx, MC_CMD_LINK_PIOBUF,
891 inbuf, MC_CMD_LINK_PIOBUF_IN_LEN,
896 /* This is non-fatal; the TX path just
897 * won't use PIO for this queue
899 netif_err(efx, drv, efx->net_dev,
900 "failed to link VI %u to PIO buffer %u (%d)\n",
901 tx_queue->queue, index, rc);
902 tx_queue->piobuf = NULL;
905 nic_data->pio_write_base +
906 index * EFX_VI_PAGE_SIZE + offset;
907 tx_queue->piobuf_offset = offset;
908 netif_dbg(efx, probe, efx->net_dev,
909 "linked VI %u to PIO buffer %u offset %x addr %p\n",
910 tx_queue->queue, index,
911 tx_queue->piobuf_offset,
920 /* inbuf was defined for MC_CMD_LINK_PIOBUF. We can use the same
921 * buffer for MC_CMD_UNLINK_PIOBUF because it's shorter.
923 BUILD_BUG_ON(MC_CMD_LINK_PIOBUF_IN_LEN < MC_CMD_UNLINK_PIOBUF_IN_LEN);
925 MCDI_SET_DWORD(inbuf, UNLINK_PIOBUF_IN_TXQ_INSTANCE,
926 nic_data->pio_write_vi_base + index);
927 efx_mcdi_rpc(efx, MC_CMD_UNLINK_PIOBUF,
928 inbuf, MC_CMD_UNLINK_PIOBUF_IN_LEN,
934 static void efx_ef10_forget_old_piobufs(struct efx_nic *efx)
936 struct efx_channel *channel;
937 struct efx_tx_queue *tx_queue;
939 /* All our existing PIO buffers went away */
940 efx_for_each_channel(channel, efx)
941 efx_for_each_channel_tx_queue(tx_queue, channel)
942 tx_queue->piobuf = NULL;
945 #else /* !EFX_USE_PIO */
947 static int efx_ef10_alloc_piobufs(struct efx_nic *efx, unsigned int n)
949 return n == 0 ? 0 : -ENOBUFS;
952 static int efx_ef10_link_piobufs(struct efx_nic *efx)
957 static void efx_ef10_free_piobufs(struct efx_nic *efx)
961 static void efx_ef10_forget_old_piobufs(struct efx_nic *efx)
965 #endif /* EFX_USE_PIO */
967 static void efx_ef10_remove(struct efx_nic *efx)
969 struct efx_ef10_nic_data *nic_data = efx->nic_data;
972 #ifdef CONFIG_SFC_SRIOV
973 struct efx_ef10_nic_data *nic_data_pf;
974 struct pci_dev *pci_dev_pf;
975 struct efx_nic *efx_pf;
978 if (efx->pci_dev->is_virtfn) {
979 pci_dev_pf = efx->pci_dev->physfn;
981 efx_pf = pci_get_drvdata(pci_dev_pf);
982 nic_data_pf = efx_pf->nic_data;
983 vf = nic_data_pf->vf + nic_data->vf_index;
986 netif_info(efx, drv, efx->net_dev,
987 "Could not get the PF id from VF\n");
991 efx_ef10_cleanup_vlans(efx);
992 mutex_destroy(&nic_data->vlan_lock);
996 efx_mcdi_mon_remove(efx);
998 efx_ef10_rx_free_indir_table(efx);
1000 if (nic_data->wc_membase)
1001 iounmap(nic_data->wc_membase);
1003 rc = efx_ef10_free_vis(efx);
1006 if (!nic_data->must_restore_piobufs)
1007 efx_ef10_free_piobufs(efx);
1009 device_remove_file(&efx->pci_dev->dev, &dev_attr_primary_flag);
1010 device_remove_file(&efx->pci_dev->dev, &dev_attr_link_control_flag);
1012 efx_mcdi_detach(efx);
1014 memset(nic_data->udp_tunnels, 0, sizeof(nic_data->udp_tunnels));
1015 mutex_lock(&nic_data->udp_tunnels_lock);
1016 (void)efx_ef10_set_udp_tnl_ports(efx, true);
1017 mutex_unlock(&nic_data->udp_tunnels_lock);
1019 mutex_destroy(&nic_data->udp_tunnels_lock);
1022 efx_nic_free_buffer(efx, &nic_data->mcdi_buf);
1026 static int efx_ef10_probe_pf(struct efx_nic *efx)
1028 return efx_ef10_probe(efx);
1031 int efx_ef10_vadaptor_query(struct efx_nic *efx, unsigned int port_id,
1032 u32 *port_flags, u32 *vadaptor_flags,
1033 unsigned int *vlan_tags)
1035 struct efx_ef10_nic_data *nic_data = efx->nic_data;
1036 MCDI_DECLARE_BUF(inbuf, MC_CMD_VADAPTOR_QUERY_IN_LEN);
1037 MCDI_DECLARE_BUF(outbuf, MC_CMD_VADAPTOR_QUERY_OUT_LEN);
1041 if (nic_data->datapath_caps &
1042 (1 << MC_CMD_GET_CAPABILITIES_OUT_VADAPTOR_QUERY_LBN)) {
1043 MCDI_SET_DWORD(inbuf, VADAPTOR_QUERY_IN_UPSTREAM_PORT_ID,
1046 rc = efx_mcdi_rpc(efx, MC_CMD_VADAPTOR_QUERY, inbuf, sizeof(inbuf),
1047 outbuf, sizeof(outbuf), &outlen);
1051 if (outlen < sizeof(outbuf)) {
1058 *port_flags = MCDI_DWORD(outbuf, VADAPTOR_QUERY_OUT_PORT_FLAGS);
1061 MCDI_DWORD(outbuf, VADAPTOR_QUERY_OUT_VADAPTOR_FLAGS);
1065 VADAPTOR_QUERY_OUT_NUM_AVAILABLE_VLAN_TAGS);
1070 int efx_ef10_vadaptor_alloc(struct efx_nic *efx, unsigned int port_id)
1072 MCDI_DECLARE_BUF(inbuf, MC_CMD_VADAPTOR_ALLOC_IN_LEN);
1074 MCDI_SET_DWORD(inbuf, VADAPTOR_ALLOC_IN_UPSTREAM_PORT_ID, port_id);
1075 return efx_mcdi_rpc(efx, MC_CMD_VADAPTOR_ALLOC, inbuf, sizeof(inbuf),
1079 int efx_ef10_vadaptor_free(struct efx_nic *efx, unsigned int port_id)
1081 MCDI_DECLARE_BUF(inbuf, MC_CMD_VADAPTOR_FREE_IN_LEN);
1083 MCDI_SET_DWORD(inbuf, VADAPTOR_FREE_IN_UPSTREAM_PORT_ID, port_id);
1084 return efx_mcdi_rpc(efx, MC_CMD_VADAPTOR_FREE, inbuf, sizeof(inbuf),
1088 int efx_ef10_vport_add_mac(struct efx_nic *efx,
1089 unsigned int port_id, u8 *mac)
1091 MCDI_DECLARE_BUF(inbuf, MC_CMD_VPORT_ADD_MAC_ADDRESS_IN_LEN);
1093 MCDI_SET_DWORD(inbuf, VPORT_ADD_MAC_ADDRESS_IN_VPORT_ID, port_id);
1094 ether_addr_copy(MCDI_PTR(inbuf, VPORT_ADD_MAC_ADDRESS_IN_MACADDR), mac);
1096 return efx_mcdi_rpc(efx, MC_CMD_VPORT_ADD_MAC_ADDRESS, inbuf,
1097 sizeof(inbuf), NULL, 0, NULL);
1100 int efx_ef10_vport_del_mac(struct efx_nic *efx,
1101 unsigned int port_id, u8 *mac)
1103 MCDI_DECLARE_BUF(inbuf, MC_CMD_VPORT_DEL_MAC_ADDRESS_IN_LEN);
1105 MCDI_SET_DWORD(inbuf, VPORT_DEL_MAC_ADDRESS_IN_VPORT_ID, port_id);
1106 ether_addr_copy(MCDI_PTR(inbuf, VPORT_DEL_MAC_ADDRESS_IN_MACADDR), mac);
1108 return efx_mcdi_rpc(efx, MC_CMD_VPORT_DEL_MAC_ADDRESS, inbuf,
1109 sizeof(inbuf), NULL, 0, NULL);
1112 #ifdef CONFIG_SFC_SRIOV
1113 static int efx_ef10_probe_vf(struct efx_nic *efx)
1116 struct pci_dev *pci_dev_pf;
1118 /* If the parent PF has no VF data structure, it doesn't know about this
1119 * VF so fail probe. The VF needs to be re-created. This can happen
1120 * if the PF driver is unloaded while the VF is assigned to a guest.
1122 pci_dev_pf = efx->pci_dev->physfn;
1124 struct efx_nic *efx_pf = pci_get_drvdata(pci_dev_pf);
1125 struct efx_ef10_nic_data *nic_data_pf = efx_pf->nic_data;
1127 if (!nic_data_pf->vf) {
1128 netif_info(efx, drv, efx->net_dev,
1129 "The VF cannot link to its parent PF; "
1130 "please destroy and re-create the VF\n");
1135 rc = efx_ef10_probe(efx);
1139 rc = efx_ef10_get_vf_index(efx);
1143 if (efx->pci_dev->is_virtfn) {
1144 if (efx->pci_dev->physfn) {
1145 struct efx_nic *efx_pf =
1146 pci_get_drvdata(efx->pci_dev->physfn);
1147 struct efx_ef10_nic_data *nic_data_p = efx_pf->nic_data;
1148 struct efx_ef10_nic_data *nic_data = efx->nic_data;
1150 nic_data_p->vf[nic_data->vf_index].efx = efx;
1151 nic_data_p->vf[nic_data->vf_index].pci_dev =
1154 netif_info(efx, drv, efx->net_dev,
1155 "Could not get the PF id from VF\n");
1161 efx_ef10_remove(efx);
1165 static int efx_ef10_probe_vf(struct efx_nic *efx __attribute__ ((unused)))
1171 static int efx_ef10_alloc_vis(struct efx_nic *efx,
1172 unsigned int min_vis, unsigned int max_vis)
1174 MCDI_DECLARE_BUF(inbuf, MC_CMD_ALLOC_VIS_IN_LEN);
1175 MCDI_DECLARE_BUF(outbuf, MC_CMD_ALLOC_VIS_OUT_LEN);
1176 struct efx_ef10_nic_data *nic_data = efx->nic_data;
1180 MCDI_SET_DWORD(inbuf, ALLOC_VIS_IN_MIN_VI_COUNT, min_vis);
1181 MCDI_SET_DWORD(inbuf, ALLOC_VIS_IN_MAX_VI_COUNT, max_vis);
1182 rc = efx_mcdi_rpc(efx, MC_CMD_ALLOC_VIS, inbuf, sizeof(inbuf),
1183 outbuf, sizeof(outbuf), &outlen);
1187 if (outlen < MC_CMD_ALLOC_VIS_OUT_LEN)
1190 netif_dbg(efx, drv, efx->net_dev, "base VI is A0x%03x\n",
1191 MCDI_DWORD(outbuf, ALLOC_VIS_OUT_VI_BASE));
1193 nic_data->vi_base = MCDI_DWORD(outbuf, ALLOC_VIS_OUT_VI_BASE);
1194 nic_data->n_allocated_vis = MCDI_DWORD(outbuf, ALLOC_VIS_OUT_VI_COUNT);
1198 /* Note that the failure path of this function does not free
1199 * resources, as this will be done by efx_ef10_remove().
1201 static int efx_ef10_dimension_resources(struct efx_nic *efx)
1203 struct efx_ef10_nic_data *nic_data = efx->nic_data;
1204 unsigned int uc_mem_map_size, wc_mem_map_size;
1205 unsigned int min_vis = max(EFX_TXQ_TYPES,
1206 efx_separate_tx_channels ? 2 : 1);
1207 unsigned int channel_vis, pio_write_vi_base, max_vis;
1208 void __iomem *membase;
1211 channel_vis = max(efx->n_channels, efx->n_tx_channels * EFX_TXQ_TYPES);
1214 /* Try to allocate PIO buffers if wanted and if the full
1215 * number of PIO buffers would be sufficient to allocate one
1216 * copy-buffer per TX channel. Failure is non-fatal, as there
1217 * are only a small number of PIO buffers shared between all
1218 * functions of the controller.
1220 if (efx_piobuf_size != 0 &&
1221 nic_data->piobuf_size / efx_piobuf_size * EF10_TX_PIOBUF_COUNT >=
1222 efx->n_tx_channels) {
1223 unsigned int n_piobufs =
1224 DIV_ROUND_UP(efx->n_tx_channels,
1225 nic_data->piobuf_size / efx_piobuf_size);
1227 rc = efx_ef10_alloc_piobufs(efx, n_piobufs);
1229 netif_dbg(efx, probe, efx->net_dev,
1230 "out of PIO buffers; cannot allocate more\n");
1231 else if (rc == -EPERM)
1232 netif_dbg(efx, probe, efx->net_dev,
1233 "not permitted to allocate PIO buffers\n");
1235 netif_err(efx, probe, efx->net_dev,
1236 "failed to allocate PIO buffers (%d)\n", rc);
1238 netif_dbg(efx, probe, efx->net_dev,
1239 "allocated %u PIO buffers\n", n_piobufs);
1242 nic_data->n_piobufs = 0;
1245 /* PIO buffers should be mapped with write-combining enabled,
1246 * and we want to make single UC and WC mappings rather than
1247 * several of each (in fact that's the only option if host
1248 * page size is >4K). So we may allocate some extra VIs just
1249 * for writing PIO buffers through.
1251 * The UC mapping contains (channel_vis - 1) complete VIs and the
1252 * first half of the next VI. Then the WC mapping begins with
1253 * the second half of this last VI.
1255 uc_mem_map_size = PAGE_ALIGN((channel_vis - 1) * EFX_VI_PAGE_SIZE +
1257 if (nic_data->n_piobufs) {
1258 /* pio_write_vi_base rounds down to give the number of complete
1259 * VIs inside the UC mapping.
1261 pio_write_vi_base = uc_mem_map_size / EFX_VI_PAGE_SIZE;
1262 wc_mem_map_size = (PAGE_ALIGN((pio_write_vi_base +
1263 nic_data->n_piobufs) *
1266 max_vis = pio_write_vi_base + nic_data->n_piobufs;
1268 pio_write_vi_base = 0;
1269 wc_mem_map_size = 0;
1270 max_vis = channel_vis;
1273 /* In case the last attached driver failed to free VIs, do it now */
1274 rc = efx_ef10_free_vis(efx);
1278 rc = efx_ef10_alloc_vis(efx, min_vis, max_vis);
1282 if (nic_data->n_allocated_vis < channel_vis) {
1283 netif_info(efx, drv, efx->net_dev,
1284 "Could not allocate enough VIs to satisfy RSS"
1285 " requirements. Performance may not be optimal.\n");
1286 /* We didn't get the VIs to populate our channels.
1287 * We could keep what we got but then we'd have more
1288 * interrupts than we need.
1289 * Instead calculate new max_channels and restart
1291 efx->max_channels = nic_data->n_allocated_vis;
1292 efx->max_tx_channels =
1293 nic_data->n_allocated_vis / EFX_TXQ_TYPES;
1295 efx_ef10_free_vis(efx);
1299 /* If we didn't get enough VIs to map all the PIO buffers, free the
1302 if (nic_data->n_piobufs &&
1303 nic_data->n_allocated_vis <
1304 pio_write_vi_base + nic_data->n_piobufs) {
1305 netif_dbg(efx, probe, efx->net_dev,
1306 "%u VIs are not sufficient to map %u PIO buffers\n",
1307 nic_data->n_allocated_vis, nic_data->n_piobufs);
1308 efx_ef10_free_piobufs(efx);
1311 /* Shrink the original UC mapping of the memory BAR */
1312 membase = ioremap_nocache(efx->membase_phys, uc_mem_map_size);
1314 netif_err(efx, probe, efx->net_dev,
1315 "could not shrink memory BAR to %x\n",
1319 iounmap(efx->membase);
1320 efx->membase = membase;
1322 /* Set up the WC mapping if needed */
1323 if (wc_mem_map_size) {
1324 nic_data->wc_membase = ioremap_wc(efx->membase_phys +
1327 if (!nic_data->wc_membase) {
1328 netif_err(efx, probe, efx->net_dev,
1329 "could not allocate WC mapping of size %x\n",
1333 nic_data->pio_write_vi_base = pio_write_vi_base;
1334 nic_data->pio_write_base =
1335 nic_data->wc_membase +
1336 (pio_write_vi_base * EFX_VI_PAGE_SIZE + ER_DZ_TX_PIOBUF -
1339 rc = efx_ef10_link_piobufs(efx);
1341 efx_ef10_free_piobufs(efx);
1344 netif_dbg(efx, probe, efx->net_dev,
1345 "memory BAR at %pa (virtual %p+%x UC, %p+%x WC)\n",
1346 &efx->membase_phys, efx->membase, uc_mem_map_size,
1347 nic_data->wc_membase, wc_mem_map_size);
1352 static int efx_ef10_init_nic(struct efx_nic *efx)
1354 struct efx_ef10_nic_data *nic_data = efx->nic_data;
1357 if (nic_data->must_check_datapath_caps) {
1358 rc = efx_ef10_init_datapath_caps(efx);
1361 nic_data->must_check_datapath_caps = false;
1364 if (nic_data->must_realloc_vis) {
1365 /* We cannot let the number of VIs change now */
1366 rc = efx_ef10_alloc_vis(efx, nic_data->n_allocated_vis,
1367 nic_data->n_allocated_vis);
1370 nic_data->must_realloc_vis = false;
1373 if (nic_data->must_restore_piobufs && nic_data->n_piobufs) {
1374 rc = efx_ef10_alloc_piobufs(efx, nic_data->n_piobufs);
1376 rc = efx_ef10_link_piobufs(efx);
1378 efx_ef10_free_piobufs(efx);
1381 /* Log an error on failure, but this is non-fatal.
1382 * Permission errors are less important - we've presumably
1383 * had the PIO buffer licence removed.
1386 netif_dbg(efx, drv, efx->net_dev,
1387 "not permitted to restore PIO buffers\n");
1389 netif_err(efx, drv, efx->net_dev,
1390 "failed to restore PIO buffers (%d)\n", rc);
1391 nic_data->must_restore_piobufs = false;
1394 /* don't fail init if RSS setup doesn't work */
1395 rc = efx->type->rx_push_rss_config(efx, false, efx->rx_indir_table, NULL);
1396 efx->rss_active = (rc == 0);
1401 static void efx_ef10_reset_mc_allocations(struct efx_nic *efx)
1403 struct efx_ef10_nic_data *nic_data = efx->nic_data;
1404 #ifdef CONFIG_SFC_SRIOV
1408 /* All our allocations have been reset */
1409 nic_data->must_realloc_vis = true;
1410 nic_data->must_restore_filters = true;
1411 nic_data->must_restore_piobufs = true;
1412 efx_ef10_forget_old_piobufs(efx);
1413 nic_data->rx_rss_context = EFX_EF10_RSS_CONTEXT_INVALID;
1415 /* Driver-created vswitches and vports must be re-created */
1416 nic_data->must_probe_vswitching = true;
1417 nic_data->vport_id = EVB_PORT_ID_ASSIGNED;
1418 #ifdef CONFIG_SFC_SRIOV
1420 for (i = 0; i < efx->vf_count; i++)
1421 nic_data->vf[i].vport_id = 0;
1425 static enum reset_type efx_ef10_map_reset_reason(enum reset_type reason)
1427 if (reason == RESET_TYPE_MC_FAILURE)
1428 return RESET_TYPE_DATAPATH;
1430 return efx_mcdi_map_reset_reason(reason);
1433 static int efx_ef10_map_reset_flags(u32 *flags)
1436 EF10_RESET_PORT = ((ETH_RESET_MAC | ETH_RESET_PHY) <<
1437 ETH_RESET_SHARED_SHIFT),
1438 EF10_RESET_MC = ((ETH_RESET_DMA | ETH_RESET_FILTER |
1439 ETH_RESET_OFFLOAD | ETH_RESET_MAC |
1440 ETH_RESET_PHY | ETH_RESET_MGMT) <<
1441 ETH_RESET_SHARED_SHIFT)
1444 /* We assume for now that our PCI function is permitted to
1448 if ((*flags & EF10_RESET_MC) == EF10_RESET_MC) {
1449 *flags &= ~EF10_RESET_MC;
1450 return RESET_TYPE_WORLD;
1453 if ((*flags & EF10_RESET_PORT) == EF10_RESET_PORT) {
1454 *flags &= ~EF10_RESET_PORT;
1455 return RESET_TYPE_ALL;
1458 /* no invisible reset implemented */
1463 static int efx_ef10_reset(struct efx_nic *efx, enum reset_type reset_type)
1465 int rc = efx_mcdi_reset(efx, reset_type);
1467 /* Unprivileged functions return -EPERM, but need to return success
1468 * here so that the datapath is brought back up.
1470 if (reset_type == RESET_TYPE_WORLD && rc == -EPERM)
1473 /* If it was a port reset, trigger reallocation of MC resources.
1474 * Note that on an MC reset nothing needs to be done now because we'll
1475 * detect the MC reset later and handle it then.
1476 * For an FLR, we never get an MC reset event, but the MC has reset all
1477 * resources assigned to us, so we have to trigger reallocation now.
1479 if ((reset_type == RESET_TYPE_ALL ||
1480 reset_type == RESET_TYPE_MCDI_TIMEOUT) && !rc)
1481 efx_ef10_reset_mc_allocations(efx);
1485 #define EF10_DMA_STAT(ext_name, mcdi_name) \
1486 [EF10_STAT_ ## ext_name] = \
1487 { #ext_name, 64, 8 * MC_CMD_MAC_ ## mcdi_name }
1488 #define EF10_DMA_INVIS_STAT(int_name, mcdi_name) \
1489 [EF10_STAT_ ## int_name] = \
1490 { NULL, 64, 8 * MC_CMD_MAC_ ## mcdi_name }
1491 #define EF10_OTHER_STAT(ext_name) \
1492 [EF10_STAT_ ## ext_name] = { #ext_name, 0, 0 }
1493 #define GENERIC_SW_STAT(ext_name) \
1494 [GENERIC_STAT_ ## ext_name] = { #ext_name, 0, 0 }
1496 static const struct efx_hw_stat_desc efx_ef10_stat_desc[EF10_STAT_COUNT] = {
1497 EF10_DMA_STAT(port_tx_bytes, TX_BYTES),
1498 EF10_DMA_STAT(port_tx_packets, TX_PKTS),
1499 EF10_DMA_STAT(port_tx_pause, TX_PAUSE_PKTS),
1500 EF10_DMA_STAT(port_tx_control, TX_CONTROL_PKTS),
1501 EF10_DMA_STAT(port_tx_unicast, TX_UNICAST_PKTS),
1502 EF10_DMA_STAT(port_tx_multicast, TX_MULTICAST_PKTS),
1503 EF10_DMA_STAT(port_tx_broadcast, TX_BROADCAST_PKTS),
1504 EF10_DMA_STAT(port_tx_lt64, TX_LT64_PKTS),
1505 EF10_DMA_STAT(port_tx_64, TX_64_PKTS),
1506 EF10_DMA_STAT(port_tx_65_to_127, TX_65_TO_127_PKTS),
1507 EF10_DMA_STAT(port_tx_128_to_255, TX_128_TO_255_PKTS),
1508 EF10_DMA_STAT(port_tx_256_to_511, TX_256_TO_511_PKTS),
1509 EF10_DMA_STAT(port_tx_512_to_1023, TX_512_TO_1023_PKTS),
1510 EF10_DMA_STAT(port_tx_1024_to_15xx, TX_1024_TO_15XX_PKTS),
1511 EF10_DMA_STAT(port_tx_15xx_to_jumbo, TX_15XX_TO_JUMBO_PKTS),
1512 EF10_DMA_STAT(port_rx_bytes, RX_BYTES),
1513 EF10_DMA_INVIS_STAT(port_rx_bytes_minus_good_bytes, RX_BAD_BYTES),
1514 EF10_OTHER_STAT(port_rx_good_bytes),
1515 EF10_OTHER_STAT(port_rx_bad_bytes),
1516 EF10_DMA_STAT(port_rx_packets, RX_PKTS),
1517 EF10_DMA_STAT(port_rx_good, RX_GOOD_PKTS),
1518 EF10_DMA_STAT(port_rx_bad, RX_BAD_FCS_PKTS),
1519 EF10_DMA_STAT(port_rx_pause, RX_PAUSE_PKTS),
1520 EF10_DMA_STAT(port_rx_control, RX_CONTROL_PKTS),
1521 EF10_DMA_STAT(port_rx_unicast, RX_UNICAST_PKTS),
1522 EF10_DMA_STAT(port_rx_multicast, RX_MULTICAST_PKTS),
1523 EF10_DMA_STAT(port_rx_broadcast, RX_BROADCAST_PKTS),
1524 EF10_DMA_STAT(port_rx_lt64, RX_UNDERSIZE_PKTS),
1525 EF10_DMA_STAT(port_rx_64, RX_64_PKTS),
1526 EF10_DMA_STAT(port_rx_65_to_127, RX_65_TO_127_PKTS),
1527 EF10_DMA_STAT(port_rx_128_to_255, RX_128_TO_255_PKTS),
1528 EF10_DMA_STAT(port_rx_256_to_511, RX_256_TO_511_PKTS),
1529 EF10_DMA_STAT(port_rx_512_to_1023, RX_512_TO_1023_PKTS),
1530 EF10_DMA_STAT(port_rx_1024_to_15xx, RX_1024_TO_15XX_PKTS),
1531 EF10_DMA_STAT(port_rx_15xx_to_jumbo, RX_15XX_TO_JUMBO_PKTS),
1532 EF10_DMA_STAT(port_rx_gtjumbo, RX_GTJUMBO_PKTS),
1533 EF10_DMA_STAT(port_rx_bad_gtjumbo, RX_JABBER_PKTS),
1534 EF10_DMA_STAT(port_rx_overflow, RX_OVERFLOW_PKTS),
1535 EF10_DMA_STAT(port_rx_align_error, RX_ALIGN_ERROR_PKTS),
1536 EF10_DMA_STAT(port_rx_length_error, RX_LENGTH_ERROR_PKTS),
1537 EF10_DMA_STAT(port_rx_nodesc_drops, RX_NODESC_DROPS),
1538 GENERIC_SW_STAT(rx_nodesc_trunc),
1539 GENERIC_SW_STAT(rx_noskb_drops),
1540 EF10_DMA_STAT(port_rx_pm_trunc_bb_overflow, PM_TRUNC_BB_OVERFLOW),
1541 EF10_DMA_STAT(port_rx_pm_discard_bb_overflow, PM_DISCARD_BB_OVERFLOW),
1542 EF10_DMA_STAT(port_rx_pm_trunc_vfifo_full, PM_TRUNC_VFIFO_FULL),
1543 EF10_DMA_STAT(port_rx_pm_discard_vfifo_full, PM_DISCARD_VFIFO_FULL),
1544 EF10_DMA_STAT(port_rx_pm_trunc_qbb, PM_TRUNC_QBB),
1545 EF10_DMA_STAT(port_rx_pm_discard_qbb, PM_DISCARD_QBB),
1546 EF10_DMA_STAT(port_rx_pm_discard_mapping, PM_DISCARD_MAPPING),
1547 EF10_DMA_STAT(port_rx_dp_q_disabled_packets, RXDP_Q_DISABLED_PKTS),
1548 EF10_DMA_STAT(port_rx_dp_di_dropped_packets, RXDP_DI_DROPPED_PKTS),
1549 EF10_DMA_STAT(port_rx_dp_streaming_packets, RXDP_STREAMING_PKTS),
1550 EF10_DMA_STAT(port_rx_dp_hlb_fetch, RXDP_HLB_FETCH_CONDITIONS),
1551 EF10_DMA_STAT(port_rx_dp_hlb_wait, RXDP_HLB_WAIT_CONDITIONS),
1552 EF10_DMA_STAT(rx_unicast, VADAPTER_RX_UNICAST_PACKETS),
1553 EF10_DMA_STAT(rx_unicast_bytes, VADAPTER_RX_UNICAST_BYTES),
1554 EF10_DMA_STAT(rx_multicast, VADAPTER_RX_MULTICAST_PACKETS),
1555 EF10_DMA_STAT(rx_multicast_bytes, VADAPTER_RX_MULTICAST_BYTES),
1556 EF10_DMA_STAT(rx_broadcast, VADAPTER_RX_BROADCAST_PACKETS),
1557 EF10_DMA_STAT(rx_broadcast_bytes, VADAPTER_RX_BROADCAST_BYTES),
1558 EF10_DMA_STAT(rx_bad, VADAPTER_RX_BAD_PACKETS),
1559 EF10_DMA_STAT(rx_bad_bytes, VADAPTER_RX_BAD_BYTES),
1560 EF10_DMA_STAT(rx_overflow, VADAPTER_RX_OVERFLOW),
1561 EF10_DMA_STAT(tx_unicast, VADAPTER_TX_UNICAST_PACKETS),
1562 EF10_DMA_STAT(tx_unicast_bytes, VADAPTER_TX_UNICAST_BYTES),
1563 EF10_DMA_STAT(tx_multicast, VADAPTER_TX_MULTICAST_PACKETS),
1564 EF10_DMA_STAT(tx_multicast_bytes, VADAPTER_TX_MULTICAST_BYTES),
1565 EF10_DMA_STAT(tx_broadcast, VADAPTER_TX_BROADCAST_PACKETS),
1566 EF10_DMA_STAT(tx_broadcast_bytes, VADAPTER_TX_BROADCAST_BYTES),
1567 EF10_DMA_STAT(tx_bad, VADAPTER_TX_BAD_PACKETS),
1568 EF10_DMA_STAT(tx_bad_bytes, VADAPTER_TX_BAD_BYTES),
1569 EF10_DMA_STAT(tx_overflow, VADAPTER_TX_OVERFLOW),
1572 #define HUNT_COMMON_STAT_MASK ((1ULL << EF10_STAT_port_tx_bytes) | \
1573 (1ULL << EF10_STAT_port_tx_packets) | \
1574 (1ULL << EF10_STAT_port_tx_pause) | \
1575 (1ULL << EF10_STAT_port_tx_unicast) | \
1576 (1ULL << EF10_STAT_port_tx_multicast) | \
1577 (1ULL << EF10_STAT_port_tx_broadcast) | \
1578 (1ULL << EF10_STAT_port_rx_bytes) | \
1580 EF10_STAT_port_rx_bytes_minus_good_bytes) | \
1581 (1ULL << EF10_STAT_port_rx_good_bytes) | \
1582 (1ULL << EF10_STAT_port_rx_bad_bytes) | \
1583 (1ULL << EF10_STAT_port_rx_packets) | \
1584 (1ULL << EF10_STAT_port_rx_good) | \
1585 (1ULL << EF10_STAT_port_rx_bad) | \
1586 (1ULL << EF10_STAT_port_rx_pause) | \
1587 (1ULL << EF10_STAT_port_rx_control) | \
1588 (1ULL << EF10_STAT_port_rx_unicast) | \
1589 (1ULL << EF10_STAT_port_rx_multicast) | \
1590 (1ULL << EF10_STAT_port_rx_broadcast) | \
1591 (1ULL << EF10_STAT_port_rx_lt64) | \
1592 (1ULL << EF10_STAT_port_rx_64) | \
1593 (1ULL << EF10_STAT_port_rx_65_to_127) | \
1594 (1ULL << EF10_STAT_port_rx_128_to_255) | \
1595 (1ULL << EF10_STAT_port_rx_256_to_511) | \
1596 (1ULL << EF10_STAT_port_rx_512_to_1023) |\
1597 (1ULL << EF10_STAT_port_rx_1024_to_15xx) |\
1598 (1ULL << EF10_STAT_port_rx_15xx_to_jumbo) |\
1599 (1ULL << EF10_STAT_port_rx_gtjumbo) | \
1600 (1ULL << EF10_STAT_port_rx_bad_gtjumbo) |\
1601 (1ULL << EF10_STAT_port_rx_overflow) | \
1602 (1ULL << EF10_STAT_port_rx_nodesc_drops) |\
1603 (1ULL << GENERIC_STAT_rx_nodesc_trunc) | \
1604 (1ULL << GENERIC_STAT_rx_noskb_drops))
1606 /* On 7000 series NICs, these statistics are only provided by the 10G MAC.
1607 * For a 10G/40G switchable port we do not expose these because they might
1608 * not include all the packets they should.
1609 * On 8000 series NICs these statistics are always provided.
1611 #define HUNT_10G_ONLY_STAT_MASK ((1ULL << EF10_STAT_port_tx_control) | \
1612 (1ULL << EF10_STAT_port_tx_lt64) | \
1613 (1ULL << EF10_STAT_port_tx_64) | \
1614 (1ULL << EF10_STAT_port_tx_65_to_127) |\
1615 (1ULL << EF10_STAT_port_tx_128_to_255) |\
1616 (1ULL << EF10_STAT_port_tx_256_to_511) |\
1617 (1ULL << EF10_STAT_port_tx_512_to_1023) |\
1618 (1ULL << EF10_STAT_port_tx_1024_to_15xx) |\
1619 (1ULL << EF10_STAT_port_tx_15xx_to_jumbo))
1621 /* These statistics are only provided by the 40G MAC. For a 10G/40G
1622 * switchable port we do expose these because the errors will otherwise
1625 #define HUNT_40G_EXTRA_STAT_MASK ((1ULL << EF10_STAT_port_rx_align_error) |\
1626 (1ULL << EF10_STAT_port_rx_length_error))
1628 /* These statistics are only provided if the firmware supports the
1629 * capability PM_AND_RXDP_COUNTERS.
1631 #define HUNT_PM_AND_RXDP_STAT_MASK ( \
1632 (1ULL << EF10_STAT_port_rx_pm_trunc_bb_overflow) | \
1633 (1ULL << EF10_STAT_port_rx_pm_discard_bb_overflow) | \
1634 (1ULL << EF10_STAT_port_rx_pm_trunc_vfifo_full) | \
1635 (1ULL << EF10_STAT_port_rx_pm_discard_vfifo_full) | \
1636 (1ULL << EF10_STAT_port_rx_pm_trunc_qbb) | \
1637 (1ULL << EF10_STAT_port_rx_pm_discard_qbb) | \
1638 (1ULL << EF10_STAT_port_rx_pm_discard_mapping) | \
1639 (1ULL << EF10_STAT_port_rx_dp_q_disabled_packets) | \
1640 (1ULL << EF10_STAT_port_rx_dp_di_dropped_packets) | \
1641 (1ULL << EF10_STAT_port_rx_dp_streaming_packets) | \
1642 (1ULL << EF10_STAT_port_rx_dp_hlb_fetch) | \
1643 (1ULL << EF10_STAT_port_rx_dp_hlb_wait))
1645 static u64 efx_ef10_raw_stat_mask(struct efx_nic *efx)
1647 u64 raw_mask = HUNT_COMMON_STAT_MASK;
1648 u32 port_caps = efx_mcdi_phy_get_caps(efx);
1649 struct efx_ef10_nic_data *nic_data = efx->nic_data;
1651 if (!(efx->mcdi->fn_flags &
1652 1 << MC_CMD_DRV_ATTACH_EXT_OUT_FLAG_LINKCTRL))
1655 if (port_caps & (1 << MC_CMD_PHY_CAP_40000FDX_LBN)) {
1656 raw_mask |= HUNT_40G_EXTRA_STAT_MASK;
1657 /* 8000 series have everything even at 40G */
1658 if (nic_data->datapath_caps2 &
1659 (1 << MC_CMD_GET_CAPABILITIES_V2_OUT_MAC_STATS_40G_TX_SIZE_BINS_LBN))
1660 raw_mask |= HUNT_10G_ONLY_STAT_MASK;
1662 raw_mask |= HUNT_10G_ONLY_STAT_MASK;
1665 if (nic_data->datapath_caps &
1666 (1 << MC_CMD_GET_CAPABILITIES_OUT_PM_AND_RXDP_COUNTERS_LBN))
1667 raw_mask |= HUNT_PM_AND_RXDP_STAT_MASK;
1672 static void efx_ef10_get_stat_mask(struct efx_nic *efx, unsigned long *mask)
1674 struct efx_ef10_nic_data *nic_data = efx->nic_data;
1677 raw_mask[0] = efx_ef10_raw_stat_mask(efx);
1679 /* Only show vadaptor stats when EVB capability is present */
1680 if (nic_data->datapath_caps &
1681 (1 << MC_CMD_GET_CAPABILITIES_OUT_EVB_LBN)) {
1682 raw_mask[0] |= ~((1ULL << EF10_STAT_rx_unicast) - 1);
1683 raw_mask[1] = (1ULL << (EF10_STAT_COUNT - 63)) - 1;
1688 #if BITS_PER_LONG == 64
1689 BUILD_BUG_ON(BITS_TO_LONGS(EF10_STAT_COUNT) != 2);
1690 mask[0] = raw_mask[0];
1691 mask[1] = raw_mask[1];
1693 BUILD_BUG_ON(BITS_TO_LONGS(EF10_STAT_COUNT) != 3);
1694 mask[0] = raw_mask[0] & 0xffffffff;
1695 mask[1] = raw_mask[0] >> 32;
1696 mask[2] = raw_mask[1] & 0xffffffff;
1700 static size_t efx_ef10_describe_stats(struct efx_nic *efx, u8 *names)
1702 DECLARE_BITMAP(mask, EF10_STAT_COUNT);
1704 efx_ef10_get_stat_mask(efx, mask);
1705 return efx_nic_describe_stats(efx_ef10_stat_desc, EF10_STAT_COUNT,
1709 static size_t efx_ef10_update_stats_common(struct efx_nic *efx, u64 *full_stats,
1710 struct rtnl_link_stats64 *core_stats)
1712 DECLARE_BITMAP(mask, EF10_STAT_COUNT);
1713 struct efx_ef10_nic_data *nic_data = efx->nic_data;
1714 u64 *stats = nic_data->stats;
1715 size_t stats_count = 0, index;
1717 efx_ef10_get_stat_mask(efx, mask);
1720 for_each_set_bit(index, mask, EF10_STAT_COUNT) {
1721 if (efx_ef10_stat_desc[index].name) {
1722 *full_stats++ = stats[index];
1731 if (nic_data->datapath_caps &
1732 1 << MC_CMD_GET_CAPABILITIES_OUT_EVB_LBN) {
1733 /* Use vadaptor stats. */
1734 core_stats->rx_packets = stats[EF10_STAT_rx_unicast] +
1735 stats[EF10_STAT_rx_multicast] +
1736 stats[EF10_STAT_rx_broadcast];
1737 core_stats->tx_packets = stats[EF10_STAT_tx_unicast] +
1738 stats[EF10_STAT_tx_multicast] +
1739 stats[EF10_STAT_tx_broadcast];
1740 core_stats->rx_bytes = stats[EF10_STAT_rx_unicast_bytes] +
1741 stats[EF10_STAT_rx_multicast_bytes] +
1742 stats[EF10_STAT_rx_broadcast_bytes];
1743 core_stats->tx_bytes = stats[EF10_STAT_tx_unicast_bytes] +
1744 stats[EF10_STAT_tx_multicast_bytes] +
1745 stats[EF10_STAT_tx_broadcast_bytes];
1746 core_stats->rx_dropped = stats[GENERIC_STAT_rx_nodesc_trunc] +
1747 stats[GENERIC_STAT_rx_noskb_drops];
1748 core_stats->multicast = stats[EF10_STAT_rx_multicast];
1749 core_stats->rx_crc_errors = stats[EF10_STAT_rx_bad];
1750 core_stats->rx_fifo_errors = stats[EF10_STAT_rx_overflow];
1751 core_stats->rx_errors = core_stats->rx_crc_errors;
1752 core_stats->tx_errors = stats[EF10_STAT_tx_bad];
1754 /* Use port stats. */
1755 core_stats->rx_packets = stats[EF10_STAT_port_rx_packets];
1756 core_stats->tx_packets = stats[EF10_STAT_port_tx_packets];
1757 core_stats->rx_bytes = stats[EF10_STAT_port_rx_bytes];
1758 core_stats->tx_bytes = stats[EF10_STAT_port_tx_bytes];
1759 core_stats->rx_dropped = stats[EF10_STAT_port_rx_nodesc_drops] +
1760 stats[GENERIC_STAT_rx_nodesc_trunc] +
1761 stats[GENERIC_STAT_rx_noskb_drops];
1762 core_stats->multicast = stats[EF10_STAT_port_rx_multicast];
1763 core_stats->rx_length_errors =
1764 stats[EF10_STAT_port_rx_gtjumbo] +
1765 stats[EF10_STAT_port_rx_length_error];
1766 core_stats->rx_crc_errors = stats[EF10_STAT_port_rx_bad];
1767 core_stats->rx_frame_errors =
1768 stats[EF10_STAT_port_rx_align_error];
1769 core_stats->rx_fifo_errors = stats[EF10_STAT_port_rx_overflow];
1770 core_stats->rx_errors = (core_stats->rx_length_errors +
1771 core_stats->rx_crc_errors +
1772 core_stats->rx_frame_errors);
1778 static int efx_ef10_try_update_nic_stats_pf(struct efx_nic *efx)
1780 struct efx_ef10_nic_data *nic_data = efx->nic_data;
1781 DECLARE_BITMAP(mask, EF10_STAT_COUNT);
1782 __le64 generation_start, generation_end;
1783 u64 *stats = nic_data->stats;
1786 efx_ef10_get_stat_mask(efx, mask);
1788 dma_stats = efx->stats_buffer.addr;
1790 generation_end = dma_stats[MC_CMD_MAC_GENERATION_END];
1791 if (generation_end == EFX_MC_STATS_GENERATION_INVALID)
1794 efx_nic_update_stats(efx_ef10_stat_desc, EF10_STAT_COUNT, mask,
1795 stats, efx->stats_buffer.addr, false);
1797 generation_start = dma_stats[MC_CMD_MAC_GENERATION_START];
1798 if (generation_end != generation_start)
1801 /* Update derived statistics */
1802 efx_nic_fix_nodesc_drop_stat(efx,
1803 &stats[EF10_STAT_port_rx_nodesc_drops]);
1804 stats[EF10_STAT_port_rx_good_bytes] =
1805 stats[EF10_STAT_port_rx_bytes] -
1806 stats[EF10_STAT_port_rx_bytes_minus_good_bytes];
1807 efx_update_diff_stat(&stats[EF10_STAT_port_rx_bad_bytes],
1808 stats[EF10_STAT_port_rx_bytes_minus_good_bytes]);
1809 efx_update_sw_stats(efx, stats);
1814 static size_t efx_ef10_update_stats_pf(struct efx_nic *efx, u64 *full_stats,
1815 struct rtnl_link_stats64 *core_stats)
1819 /* If we're unlucky enough to read statistics during the DMA, wait
1820 * up to 10ms for it to finish (typically takes <500us)
1822 for (retry = 0; retry < 100; ++retry) {
1823 if (efx_ef10_try_update_nic_stats_pf(efx) == 0)
1828 return efx_ef10_update_stats_common(efx, full_stats, core_stats);
1831 static int efx_ef10_try_update_nic_stats_vf(struct efx_nic *efx)
1833 MCDI_DECLARE_BUF(inbuf, MC_CMD_MAC_STATS_IN_LEN);
1834 struct efx_ef10_nic_data *nic_data = efx->nic_data;
1835 DECLARE_BITMAP(mask, EF10_STAT_COUNT);
1836 __le64 generation_start, generation_end;
1837 u64 *stats = nic_data->stats;
1838 u32 dma_len = MC_CMD_MAC_NSTATS * sizeof(u64);
1839 struct efx_buffer stats_buf;
1843 spin_unlock_bh(&efx->stats_lock);
1845 if (in_interrupt()) {
1846 /* If in atomic context, cannot update stats. Just update the
1847 * software stats and return so the caller can continue.
1849 spin_lock_bh(&efx->stats_lock);
1850 efx_update_sw_stats(efx, stats);
1854 efx_ef10_get_stat_mask(efx, mask);
1856 rc = efx_nic_alloc_buffer(efx, &stats_buf, dma_len, GFP_ATOMIC);
1858 spin_lock_bh(&efx->stats_lock);
1862 dma_stats = stats_buf.addr;
1863 dma_stats[MC_CMD_MAC_GENERATION_END] = EFX_MC_STATS_GENERATION_INVALID;
1865 MCDI_SET_QWORD(inbuf, MAC_STATS_IN_DMA_ADDR, stats_buf.dma_addr);
1866 MCDI_POPULATE_DWORD_1(inbuf, MAC_STATS_IN_CMD,
1867 MAC_STATS_IN_DMA, 1);
1868 MCDI_SET_DWORD(inbuf, MAC_STATS_IN_DMA_LEN, dma_len);
1869 MCDI_SET_DWORD(inbuf, MAC_STATS_IN_PORT_ID, EVB_PORT_ID_ASSIGNED);
1871 rc = efx_mcdi_rpc_quiet(efx, MC_CMD_MAC_STATS, inbuf, sizeof(inbuf),
1873 spin_lock_bh(&efx->stats_lock);
1875 /* Expect ENOENT if DMA queues have not been set up */
1876 if (rc != -ENOENT || atomic_read(&efx->active_queues))
1877 efx_mcdi_display_error(efx, MC_CMD_MAC_STATS,
1878 sizeof(inbuf), NULL, 0, rc);
1882 generation_end = dma_stats[MC_CMD_MAC_GENERATION_END];
1883 if (generation_end == EFX_MC_STATS_GENERATION_INVALID) {
1888 efx_nic_update_stats(efx_ef10_stat_desc, EF10_STAT_COUNT, mask,
1889 stats, stats_buf.addr, false);
1891 generation_start = dma_stats[MC_CMD_MAC_GENERATION_START];
1892 if (generation_end != generation_start) {
1897 efx_update_sw_stats(efx, stats);
1899 efx_nic_free_buffer(efx, &stats_buf);
1903 static size_t efx_ef10_update_stats_vf(struct efx_nic *efx, u64 *full_stats,
1904 struct rtnl_link_stats64 *core_stats)
1906 if (efx_ef10_try_update_nic_stats_vf(efx))
1909 return efx_ef10_update_stats_common(efx, full_stats, core_stats);
1912 static void efx_ef10_push_irq_moderation(struct efx_channel *channel)
1914 struct efx_nic *efx = channel->efx;
1915 unsigned int mode, usecs;
1916 efx_dword_t timer_cmd;
1918 if (channel->irq_moderation_us) {
1920 usecs = channel->irq_moderation_us;
1926 if (EFX_EF10_WORKAROUND_61265(efx)) {
1927 MCDI_DECLARE_BUF(inbuf, MC_CMD_SET_EVQ_TMR_IN_LEN);
1928 unsigned int ns = usecs * 1000;
1930 MCDI_SET_DWORD(inbuf, SET_EVQ_TMR_IN_INSTANCE,
1932 MCDI_SET_DWORD(inbuf, SET_EVQ_TMR_IN_TMR_LOAD_REQ_NS, ns);
1933 MCDI_SET_DWORD(inbuf, SET_EVQ_TMR_IN_TMR_RELOAD_REQ_NS, ns);
1934 MCDI_SET_DWORD(inbuf, SET_EVQ_TMR_IN_TMR_MODE, mode);
1936 efx_mcdi_rpc_async(efx, MC_CMD_SET_EVQ_TMR,
1937 inbuf, sizeof(inbuf), 0, NULL, 0);
1938 } else if (EFX_EF10_WORKAROUND_35388(efx)) {
1939 unsigned int ticks = efx_usecs_to_ticks(efx, usecs);
1941 EFX_POPULATE_DWORD_3(timer_cmd, ERF_DD_EVQ_IND_TIMER_FLAGS,
1942 EFE_DD_EVQ_IND_TIMER_FLAGS,
1943 ERF_DD_EVQ_IND_TIMER_MODE, mode,
1944 ERF_DD_EVQ_IND_TIMER_VAL, ticks);
1945 efx_writed_page(efx, &timer_cmd, ER_DD_EVQ_INDIRECT,
1948 unsigned int ticks = efx_usecs_to_ticks(efx, usecs);
1950 EFX_POPULATE_DWORD_2(timer_cmd, ERF_DZ_TC_TIMER_MODE, mode,
1951 ERF_DZ_TC_TIMER_VAL, ticks);
1952 efx_writed_page(efx, &timer_cmd, ER_DZ_EVQ_TMR,
1957 static void efx_ef10_get_wol_vf(struct efx_nic *efx,
1958 struct ethtool_wolinfo *wol) {}
1960 static int efx_ef10_set_wol_vf(struct efx_nic *efx, u32 type)
1965 static void efx_ef10_get_wol(struct efx_nic *efx, struct ethtool_wolinfo *wol)
1969 memset(&wol->sopass, 0, sizeof(wol->sopass));
1972 static int efx_ef10_set_wol(struct efx_nic *efx, u32 type)
1979 static void efx_ef10_mcdi_request(struct efx_nic *efx,
1980 const efx_dword_t *hdr, size_t hdr_len,
1981 const efx_dword_t *sdu, size_t sdu_len)
1983 struct efx_ef10_nic_data *nic_data = efx->nic_data;
1984 u8 *pdu = nic_data->mcdi_buf.addr;
1986 memcpy(pdu, hdr, hdr_len);
1987 memcpy(pdu + hdr_len, sdu, sdu_len);
1990 /* The hardware provides 'low' and 'high' (doorbell) registers
1991 * for passing the 64-bit address of an MCDI request to
1992 * firmware. However the dwords are swapped by firmware. The
1993 * least significant bits of the doorbell are then 0 for all
1994 * MCDI requests due to alignment.
1996 _efx_writed(efx, cpu_to_le32((u64)nic_data->mcdi_buf.dma_addr >> 32),
1998 _efx_writed(efx, cpu_to_le32((u32)nic_data->mcdi_buf.dma_addr),
2002 static bool efx_ef10_mcdi_poll_response(struct efx_nic *efx)
2004 struct efx_ef10_nic_data *nic_data = efx->nic_data;
2005 const efx_dword_t hdr = *(const efx_dword_t *)nic_data->mcdi_buf.addr;
2008 return EFX_DWORD_FIELD(hdr, MCDI_HEADER_RESPONSE);
2012 efx_ef10_mcdi_read_response(struct efx_nic *efx, efx_dword_t *outbuf,
2013 size_t offset, size_t outlen)
2015 struct efx_ef10_nic_data *nic_data = efx->nic_data;
2016 const u8 *pdu = nic_data->mcdi_buf.addr;
2018 memcpy(outbuf, pdu + offset, outlen);
2021 static void efx_ef10_mcdi_reboot_detected(struct efx_nic *efx)
2023 struct efx_ef10_nic_data *nic_data = efx->nic_data;
2025 /* All our allocations have been reset */
2026 efx_ef10_reset_mc_allocations(efx);
2028 /* The datapath firmware might have been changed */
2029 nic_data->must_check_datapath_caps = true;
2031 /* MAC statistics have been cleared on the NIC; clear the local
2032 * statistic that we update with efx_update_diff_stat().
2034 nic_data->stats[EF10_STAT_port_rx_bad_bytes] = 0;
2037 static int efx_ef10_mcdi_poll_reboot(struct efx_nic *efx)
2039 struct efx_ef10_nic_data *nic_data = efx->nic_data;
2042 rc = efx_ef10_get_warm_boot_count(efx);
2044 /* The firmware is presumably in the process of
2045 * rebooting. However, we are supposed to report each
2046 * reboot just once, so we must only do that once we
2047 * can read and store the updated warm boot count.
2052 if (rc == nic_data->warm_boot_count)
2055 nic_data->warm_boot_count = rc;
2056 efx_ef10_mcdi_reboot_detected(efx);
2061 /* Handle an MSI interrupt
2063 * Handle an MSI hardware interrupt. This routine schedules event
2064 * queue processing. No interrupt acknowledgement cycle is necessary.
2065 * Also, we never need to check that the interrupt is for us, since
2066 * MSI interrupts cannot be shared.
2068 static irqreturn_t efx_ef10_msi_interrupt(int irq, void *dev_id)
2070 struct efx_msi_context *context = dev_id;
2071 struct efx_nic *efx = context->efx;
2073 netif_vdbg(efx, intr, efx->net_dev,
2074 "IRQ %d on CPU %d\n", irq, raw_smp_processor_id());
2076 if (likely(ACCESS_ONCE(efx->irq_soft_enabled))) {
2077 /* Note test interrupts */
2078 if (context->index == efx->irq_level)
2079 efx->last_irq_cpu = raw_smp_processor_id();
2081 /* Schedule processing of the channel */
2082 efx_schedule_channel_irq(efx->channel[context->index]);
2088 static irqreturn_t efx_ef10_legacy_interrupt(int irq, void *dev_id)
2090 struct efx_nic *efx = dev_id;
2091 bool soft_enabled = ACCESS_ONCE(efx->irq_soft_enabled);
2092 struct efx_channel *channel;
2096 /* Read the ISR which also ACKs the interrupts */
2097 efx_readd(efx, ®, ER_DZ_BIU_INT_ISR);
2098 queues = EFX_DWORD_FIELD(reg, ERF_DZ_ISR_REG);
2103 if (likely(soft_enabled)) {
2104 /* Note test interrupts */
2105 if (queues & (1U << efx->irq_level))
2106 efx->last_irq_cpu = raw_smp_processor_id();
2108 efx_for_each_channel(channel, efx) {
2110 efx_schedule_channel_irq(channel);
2115 netif_vdbg(efx, intr, efx->net_dev,
2116 "IRQ %d on CPU %d status " EFX_DWORD_FMT "\n",
2117 irq, raw_smp_processor_id(), EFX_DWORD_VAL(reg));
2122 static int efx_ef10_irq_test_generate(struct efx_nic *efx)
2124 MCDI_DECLARE_BUF(inbuf, MC_CMD_TRIGGER_INTERRUPT_IN_LEN);
2126 if (efx_mcdi_set_workaround(efx, MC_CMD_WORKAROUND_BUG41750, true,
2130 BUILD_BUG_ON(MC_CMD_TRIGGER_INTERRUPT_OUT_LEN != 0);
2132 MCDI_SET_DWORD(inbuf, TRIGGER_INTERRUPT_IN_INTR_LEVEL, efx->irq_level);
2133 return efx_mcdi_rpc(efx, MC_CMD_TRIGGER_INTERRUPT,
2134 inbuf, sizeof(inbuf), NULL, 0, NULL);
2137 static int efx_ef10_tx_probe(struct efx_tx_queue *tx_queue)
2139 return efx_nic_alloc_buffer(tx_queue->efx, &tx_queue->txd.buf,
2140 (tx_queue->ptr_mask + 1) *
2141 sizeof(efx_qword_t),
2145 /* This writes to the TX_DESC_WPTR and also pushes data */
2146 static inline void efx_ef10_push_tx_desc(struct efx_tx_queue *tx_queue,
2147 const efx_qword_t *txd)
2149 unsigned int write_ptr;
2152 write_ptr = tx_queue->write_count & tx_queue->ptr_mask;
2153 EFX_POPULATE_OWORD_1(reg, ERF_DZ_TX_DESC_WPTR, write_ptr);
2154 reg.qword[0] = *txd;
2155 efx_writeo_page(tx_queue->efx, ®,
2156 ER_DZ_TX_DESC_UPD, tx_queue->queue);
2159 /* Add Firmware-Assisted TSO v2 option descriptors to a queue.
2161 static int efx_ef10_tx_tso_desc(struct efx_tx_queue *tx_queue,
2162 struct sk_buff *skb,
2165 struct efx_tx_buffer *buffer;
2173 EFX_WARN_ON_ONCE_PARANOID(tx_queue->tso_version != 2);
2175 mss = skb_shinfo(skb)->gso_size;
2177 if (unlikely(mss < 4)) {
2178 WARN_ONCE(1, "MSS of %u is too small for TSO v2\n", mss);
2183 if (ip->version == 4) {
2184 /* Modify IPv4 header if needed. */
2187 ipv4_id = ntohs(ip->id);
2189 /* Modify IPv6 header if needed. */
2190 struct ipv6hdr *ipv6 = ipv6_hdr(skb);
2192 ipv6->payload_len = 0;
2197 seqnum = ntohl(tcp->seq);
2199 buffer = efx_tx_queue_get_insert_buffer(tx_queue);
2201 buffer->flags = EFX_TX_BUF_OPTION;
2203 buffer->unmap_len = 0;
2204 EFX_POPULATE_QWORD_5(buffer->option,
2205 ESF_DZ_TX_DESC_IS_OPT, 1,
2206 ESF_DZ_TX_OPTION_TYPE, ESE_DZ_TX_OPTION_DESC_TSO,
2207 ESF_DZ_TX_TSO_OPTION_TYPE,
2208 ESE_DZ_TX_TSO_OPTION_DESC_FATSO2A,
2209 ESF_DZ_TX_TSO_IP_ID, ipv4_id,
2210 ESF_DZ_TX_TSO_TCP_SEQNO, seqnum
2212 ++tx_queue->insert_count;
2214 buffer = efx_tx_queue_get_insert_buffer(tx_queue);
2216 buffer->flags = EFX_TX_BUF_OPTION;
2218 buffer->unmap_len = 0;
2219 EFX_POPULATE_QWORD_4(buffer->option,
2220 ESF_DZ_TX_DESC_IS_OPT, 1,
2221 ESF_DZ_TX_OPTION_TYPE, ESE_DZ_TX_OPTION_DESC_TSO,
2222 ESF_DZ_TX_TSO_OPTION_TYPE,
2223 ESE_DZ_TX_TSO_OPTION_DESC_FATSO2B,
2224 ESF_DZ_TX_TSO_TCP_MSS, mss
2226 ++tx_queue->insert_count;
2231 static u32 efx_ef10_tso_versions(struct efx_nic *efx)
2233 struct efx_ef10_nic_data *nic_data = efx->nic_data;
2234 u32 tso_versions = 0;
2236 if (nic_data->datapath_caps &
2237 (1 << MC_CMD_GET_CAPABILITIES_OUT_TX_TSO_LBN))
2238 tso_versions |= BIT(1);
2239 if (nic_data->datapath_caps2 &
2240 (1 << MC_CMD_GET_CAPABILITIES_V2_OUT_TX_TSO_V2_LBN))
2241 tso_versions |= BIT(2);
2242 return tso_versions;
2245 static void efx_ef10_tx_init(struct efx_tx_queue *tx_queue)
2247 MCDI_DECLARE_BUF(inbuf, MC_CMD_INIT_TXQ_IN_LEN(EFX_MAX_DMAQ_SIZE * 8 /
2249 bool csum_offload = tx_queue->queue & EFX_TXQ_TYPE_OFFLOAD;
2250 size_t entries = tx_queue->txd.buf.len / EFX_BUF_SIZE;
2251 struct efx_channel *channel = tx_queue->channel;
2252 struct efx_nic *efx = tx_queue->efx;
2253 struct efx_ef10_nic_data *nic_data = efx->nic_data;
2254 bool tso_v2 = false;
2256 dma_addr_t dma_addr;
2260 BUILD_BUG_ON(MC_CMD_INIT_TXQ_OUT_LEN != 0);
2262 /* TSOv2 is a limited resource that can only be configured on a limited
2263 * number of queues. TSO without checksum offload is not really a thing,
2264 * so we only enable it for those queues.
2266 if (csum_offload && (nic_data->datapath_caps2 &
2267 (1 << MC_CMD_GET_CAPABILITIES_V2_OUT_TX_TSO_V2_LBN))) {
2269 netif_dbg(efx, hw, efx->net_dev, "Using TSOv2 for channel %u\n",
2273 MCDI_SET_DWORD(inbuf, INIT_TXQ_IN_SIZE, tx_queue->ptr_mask + 1);
2274 MCDI_SET_DWORD(inbuf, INIT_TXQ_IN_TARGET_EVQ, channel->channel);
2275 MCDI_SET_DWORD(inbuf, INIT_TXQ_IN_LABEL, tx_queue->queue);
2276 MCDI_SET_DWORD(inbuf, INIT_TXQ_IN_INSTANCE, tx_queue->queue);
2277 MCDI_SET_DWORD(inbuf, INIT_TXQ_IN_OWNER_ID, 0);
2278 MCDI_SET_DWORD(inbuf, INIT_TXQ_IN_PORT_ID, nic_data->vport_id);
2280 dma_addr = tx_queue->txd.buf.dma_addr;
2282 netif_dbg(efx, hw, efx->net_dev, "pushing TXQ %d. %zu entries (%llx)\n",
2283 tx_queue->queue, entries, (u64)dma_addr);
2285 for (i = 0; i < entries; ++i) {
2286 MCDI_SET_ARRAY_QWORD(inbuf, INIT_TXQ_IN_DMA_ADDR, i, dma_addr);
2287 dma_addr += EFX_BUF_SIZE;
2290 inlen = MC_CMD_INIT_TXQ_IN_LEN(entries);
2293 MCDI_POPULATE_DWORD_3(inbuf, INIT_TXQ_IN_FLAGS,
2294 /* This flag was removed from mcdi_pcol.h for
2295 * the non-_EXT version of INIT_TXQ. However,
2296 * firmware still honours it.
2298 INIT_TXQ_EXT_IN_FLAG_TSOV2_EN, tso_v2,
2299 INIT_TXQ_IN_FLAG_IP_CSUM_DIS, !csum_offload,
2300 INIT_TXQ_IN_FLAG_TCP_CSUM_DIS, !csum_offload);
2302 rc = efx_mcdi_rpc_quiet(efx, MC_CMD_INIT_TXQ, inbuf, inlen,
2304 if (rc == -ENOSPC && tso_v2) {
2305 /* Retry without TSOv2 if we're short on contexts. */
2307 netif_warn(efx, probe, efx->net_dev,
2308 "TSOv2 context not available to segment in hardware. TCP performance may be reduced.\n");
2310 efx_mcdi_display_error(efx, MC_CMD_INIT_TXQ,
2311 MC_CMD_INIT_TXQ_EXT_IN_LEN,
2317 /* A previous user of this TX queue might have set us up the
2318 * bomb by writing a descriptor to the TX push collector but
2319 * not the doorbell. (Each collector belongs to a port, not a
2320 * queue or function, so cannot easily be reset.) We must
2321 * attempt to push a no-op descriptor in its place.
2323 tx_queue->buffer[0].flags = EFX_TX_BUF_OPTION;
2324 tx_queue->insert_count = 1;
2325 txd = efx_tx_desc(tx_queue, 0);
2326 EFX_POPULATE_QWORD_4(*txd,
2327 ESF_DZ_TX_DESC_IS_OPT, true,
2328 ESF_DZ_TX_OPTION_TYPE,
2329 ESE_DZ_TX_OPTION_DESC_CRC_CSUM,
2330 ESF_DZ_TX_OPTION_UDP_TCP_CSUM, csum_offload,
2331 ESF_DZ_TX_OPTION_IP_CSUM, csum_offload);
2332 tx_queue->write_count = 1;
2335 tx_queue->handle_tso = efx_ef10_tx_tso_desc;
2336 tx_queue->tso_version = 2;
2337 } else if (nic_data->datapath_caps &
2338 (1 << MC_CMD_GET_CAPABILITIES_OUT_TX_TSO_LBN)) {
2339 tx_queue->tso_version = 1;
2343 efx_ef10_push_tx_desc(tx_queue, txd);
2348 netdev_WARN(efx->net_dev, "failed to initialise TXQ %d\n",
2352 static void efx_ef10_tx_fini(struct efx_tx_queue *tx_queue)
2354 MCDI_DECLARE_BUF(inbuf, MC_CMD_FINI_TXQ_IN_LEN);
2355 MCDI_DECLARE_BUF_ERR(outbuf);
2356 struct efx_nic *efx = tx_queue->efx;
2360 MCDI_SET_DWORD(inbuf, FINI_TXQ_IN_INSTANCE,
2363 rc = efx_mcdi_rpc_quiet(efx, MC_CMD_FINI_TXQ, inbuf, sizeof(inbuf),
2364 outbuf, sizeof(outbuf), &outlen);
2366 if (rc && rc != -EALREADY)
2372 efx_mcdi_display_error(efx, MC_CMD_FINI_TXQ, MC_CMD_FINI_TXQ_IN_LEN,
2373 outbuf, outlen, rc);
2376 static void efx_ef10_tx_remove(struct efx_tx_queue *tx_queue)
2378 efx_nic_free_buffer(tx_queue->efx, &tx_queue->txd.buf);
2381 /* This writes to the TX_DESC_WPTR; write pointer for TX descriptor ring */
2382 static inline void efx_ef10_notify_tx_desc(struct efx_tx_queue *tx_queue)
2384 unsigned int write_ptr;
2387 write_ptr = tx_queue->write_count & tx_queue->ptr_mask;
2388 EFX_POPULATE_DWORD_1(reg, ERF_DZ_TX_DESC_WPTR_DWORD, write_ptr);
2389 efx_writed_page(tx_queue->efx, ®,
2390 ER_DZ_TX_DESC_UPD_DWORD, tx_queue->queue);
2393 #define EFX_EF10_MAX_TX_DESCRIPTOR_LEN 0x3fff
2395 static unsigned int efx_ef10_tx_limit_len(struct efx_tx_queue *tx_queue,
2396 dma_addr_t dma_addr, unsigned int len)
2398 if (len > EFX_EF10_MAX_TX_DESCRIPTOR_LEN) {
2399 /* If we need to break across multiple descriptors we should
2400 * stop at a page boundary. This assumes the length limit is
2401 * greater than the page size.
2403 dma_addr_t end = dma_addr + EFX_EF10_MAX_TX_DESCRIPTOR_LEN;
2405 BUILD_BUG_ON(EFX_EF10_MAX_TX_DESCRIPTOR_LEN < EFX_PAGE_SIZE);
2406 len = (end & (~(EFX_PAGE_SIZE - 1))) - dma_addr;
2412 static void efx_ef10_tx_write(struct efx_tx_queue *tx_queue)
2414 unsigned int old_write_count = tx_queue->write_count;
2415 struct efx_tx_buffer *buffer;
2416 unsigned int write_ptr;
2419 tx_queue->xmit_more_available = false;
2420 if (unlikely(tx_queue->write_count == tx_queue->insert_count))
2424 write_ptr = tx_queue->write_count & tx_queue->ptr_mask;
2425 buffer = &tx_queue->buffer[write_ptr];
2426 txd = efx_tx_desc(tx_queue, write_ptr);
2427 ++tx_queue->write_count;
2429 /* Create TX descriptor ring entry */
2430 if (buffer->flags & EFX_TX_BUF_OPTION) {
2431 *txd = buffer->option;
2432 if (EFX_QWORD_FIELD(*txd, ESF_DZ_TX_OPTION_TYPE) == 1)
2433 /* PIO descriptor */
2434 tx_queue->packet_write_count = tx_queue->write_count;
2436 tx_queue->packet_write_count = tx_queue->write_count;
2437 BUILD_BUG_ON(EFX_TX_BUF_CONT != 1);
2438 EFX_POPULATE_QWORD_3(
2441 buffer->flags & EFX_TX_BUF_CONT,
2442 ESF_DZ_TX_KER_BYTE_CNT, buffer->len,
2443 ESF_DZ_TX_KER_BUF_ADDR, buffer->dma_addr);
2445 } while (tx_queue->write_count != tx_queue->insert_count);
2447 wmb(); /* Ensure descriptors are written before they are fetched */
2449 if (efx_nic_may_push_tx_desc(tx_queue, old_write_count)) {
2450 txd = efx_tx_desc(tx_queue,
2451 old_write_count & tx_queue->ptr_mask);
2452 efx_ef10_push_tx_desc(tx_queue, txd);
2455 efx_ef10_notify_tx_desc(tx_queue);
2459 #define RSS_MODE_HASH_ADDRS (1 << RSS_MODE_HASH_SRC_ADDR_LBN |\
2460 1 << RSS_MODE_HASH_DST_ADDR_LBN)
2461 #define RSS_MODE_HASH_PORTS (1 << RSS_MODE_HASH_SRC_PORT_LBN |\
2462 1 << RSS_MODE_HASH_DST_PORT_LBN)
2463 #define RSS_CONTEXT_FLAGS_DEFAULT (1 << MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_TOEPLITZ_IPV4_EN_LBN |\
2464 1 << MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_TOEPLITZ_TCPV4_EN_LBN |\
2465 1 << MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_TOEPLITZ_IPV6_EN_LBN |\
2466 1 << MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_TOEPLITZ_TCPV6_EN_LBN |\
2467 (RSS_MODE_HASH_ADDRS | RSS_MODE_HASH_PORTS) << MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_TCP_IPV4_RSS_MODE_LBN |\
2468 RSS_MODE_HASH_ADDRS << MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_UDP_IPV4_RSS_MODE_LBN |\
2469 RSS_MODE_HASH_ADDRS << MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_OTHER_IPV4_RSS_MODE_LBN |\
2470 (RSS_MODE_HASH_ADDRS | RSS_MODE_HASH_PORTS) << MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_TCP_IPV6_RSS_MODE_LBN |\
2471 RSS_MODE_HASH_ADDRS << MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_UDP_IPV6_RSS_MODE_LBN |\
2472 RSS_MODE_HASH_ADDRS << MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_OTHER_IPV6_RSS_MODE_LBN)
2474 static int efx_ef10_get_rss_flags(struct efx_nic *efx, u32 context, u32 *flags)
2476 /* Firmware had a bug (sfc bug 61952) where it would not actually
2477 * fill in the flags field in the response to MC_CMD_RSS_CONTEXT_GET_FLAGS.
2478 * This meant that it would always contain whatever was previously
2479 * in the MCDI buffer. Fortunately, all firmware versions with
2480 * this bug have the same default flags value for a newly-allocated
2481 * RSS context, and the only time we want to get the flags is just
2482 * after allocating. Moreover, the response has a 32-bit hole
2483 * where the context ID would be in the request, so we can use an
2484 * overlength buffer in the request and pre-fill the flags field
2485 * with what we believe the default to be. Thus if the firmware
2486 * has the bug, it will leave our pre-filled value in the flags
2487 * field of the response, and we will get the right answer.
2489 * However, this does mean that this function should NOT be used if
2490 * the RSS context flags might not be their defaults - it is ONLY
2491 * reliably correct for a newly-allocated RSS context.
2493 MCDI_DECLARE_BUF(inbuf, MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_LEN);
2494 MCDI_DECLARE_BUF(outbuf, MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_LEN);
2498 /* Check we have a hole for the context ID */
2499 BUILD_BUG_ON(MC_CMD_RSS_CONTEXT_GET_FLAGS_IN_LEN != MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_FLAGS_OFST);
2500 MCDI_SET_DWORD(inbuf, RSS_CONTEXT_GET_FLAGS_IN_RSS_CONTEXT_ID, context);
2501 MCDI_SET_DWORD(inbuf, RSS_CONTEXT_GET_FLAGS_OUT_FLAGS,
2502 RSS_CONTEXT_FLAGS_DEFAULT);
2503 rc = efx_mcdi_rpc(efx, MC_CMD_RSS_CONTEXT_GET_FLAGS, inbuf,
2504 sizeof(inbuf), outbuf, sizeof(outbuf), &outlen);
2506 if (outlen < MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_LEN)
2509 *flags = MCDI_DWORD(outbuf, RSS_CONTEXT_GET_FLAGS_OUT_FLAGS);
2514 /* Attempt to enable 4-tuple UDP hashing on the specified RSS context.
2515 * If we fail, we just leave the RSS context at its default hash settings,
2516 * which is safe but may slightly reduce performance.
2517 * Defaults are 4-tuple for TCP and 2-tuple for UDP and other-IP, so we
2518 * just need to set the UDP ports flags (for both IP versions).
2520 static void efx_ef10_set_rss_flags(struct efx_nic *efx, u32 context)
2522 MCDI_DECLARE_BUF(inbuf, MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_LEN);
2525 BUILD_BUG_ON(MC_CMD_RSS_CONTEXT_SET_FLAGS_OUT_LEN != 0);
2527 if (efx_ef10_get_rss_flags(efx, context, &flags) != 0)
2529 MCDI_SET_DWORD(inbuf, RSS_CONTEXT_SET_FLAGS_IN_RSS_CONTEXT_ID, context);
2530 flags |= RSS_MODE_HASH_PORTS << MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_UDP_IPV4_RSS_MODE_LBN;
2531 flags |= RSS_MODE_HASH_PORTS << MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_UDP_IPV6_RSS_MODE_LBN;
2532 MCDI_SET_DWORD(inbuf, RSS_CONTEXT_SET_FLAGS_IN_FLAGS, flags);
2533 if (!efx_mcdi_rpc(efx, MC_CMD_RSS_CONTEXT_SET_FLAGS, inbuf, sizeof(inbuf),
2535 /* Succeeded, so UDP 4-tuple is now enabled */
2536 efx->rx_hash_udp_4tuple = true;
2539 static int efx_ef10_alloc_rss_context(struct efx_nic *efx, u32 *context,
2540 bool exclusive, unsigned *context_size)
2542 MCDI_DECLARE_BUF(inbuf, MC_CMD_RSS_CONTEXT_ALLOC_IN_LEN);
2543 MCDI_DECLARE_BUF(outbuf, MC_CMD_RSS_CONTEXT_ALLOC_OUT_LEN);
2544 struct efx_ef10_nic_data *nic_data = efx->nic_data;
2547 u32 alloc_type = exclusive ?
2548 MC_CMD_RSS_CONTEXT_ALLOC_IN_TYPE_EXCLUSIVE :
2549 MC_CMD_RSS_CONTEXT_ALLOC_IN_TYPE_SHARED;
2550 unsigned rss_spread = exclusive ?
2552 min(rounddown_pow_of_two(efx->rss_spread),
2553 EFX_EF10_MAX_SHARED_RSS_CONTEXT_SIZE);
2555 if (!exclusive && rss_spread == 1) {
2556 *context = EFX_EF10_RSS_CONTEXT_INVALID;
2562 if (nic_data->datapath_caps &
2563 1 << MC_CMD_GET_CAPABILITIES_OUT_RX_RSS_LIMITED_LBN)
2566 MCDI_SET_DWORD(inbuf, RSS_CONTEXT_ALLOC_IN_UPSTREAM_PORT_ID,
2567 nic_data->vport_id);
2568 MCDI_SET_DWORD(inbuf, RSS_CONTEXT_ALLOC_IN_TYPE, alloc_type);
2569 MCDI_SET_DWORD(inbuf, RSS_CONTEXT_ALLOC_IN_NUM_QUEUES, rss_spread);
2571 rc = efx_mcdi_rpc(efx, MC_CMD_RSS_CONTEXT_ALLOC, inbuf, sizeof(inbuf),
2572 outbuf, sizeof(outbuf), &outlen);
2576 if (outlen < MC_CMD_RSS_CONTEXT_ALLOC_OUT_LEN)
2579 *context = MCDI_DWORD(outbuf, RSS_CONTEXT_ALLOC_OUT_RSS_CONTEXT_ID);
2582 *context_size = rss_spread;
2584 if (nic_data->datapath_caps &
2585 1 << MC_CMD_GET_CAPABILITIES_OUT_ADDITIONAL_RSS_MODES_LBN)
2586 efx_ef10_set_rss_flags(efx, *context);
2591 static void efx_ef10_free_rss_context(struct efx_nic *efx, u32 context)
2593 MCDI_DECLARE_BUF(inbuf, MC_CMD_RSS_CONTEXT_FREE_IN_LEN);
2596 MCDI_SET_DWORD(inbuf, RSS_CONTEXT_FREE_IN_RSS_CONTEXT_ID,
2599 rc = efx_mcdi_rpc(efx, MC_CMD_RSS_CONTEXT_FREE, inbuf, sizeof(inbuf),
2604 static int efx_ef10_populate_rss_table(struct efx_nic *efx, u32 context,
2605 const u32 *rx_indir_table, const u8 *key)
2607 MCDI_DECLARE_BUF(tablebuf, MC_CMD_RSS_CONTEXT_SET_TABLE_IN_LEN);
2608 MCDI_DECLARE_BUF(keybuf, MC_CMD_RSS_CONTEXT_SET_KEY_IN_LEN);
2611 MCDI_SET_DWORD(tablebuf, RSS_CONTEXT_SET_TABLE_IN_RSS_CONTEXT_ID,
2613 BUILD_BUG_ON(ARRAY_SIZE(efx->rx_indir_table) !=
2614 MC_CMD_RSS_CONTEXT_SET_TABLE_IN_INDIRECTION_TABLE_LEN);
2616 /* This iterates over the length of efx->rx_indir_table, but copies
2617 * bytes from rx_indir_table. That's because the latter is a pointer
2618 * rather than an array, but should have the same length.
2619 * The efx->rx_hash_key loop below is similar.
2621 for (i = 0; i < ARRAY_SIZE(efx->rx_indir_table); ++i)
2623 RSS_CONTEXT_SET_TABLE_IN_INDIRECTION_TABLE)[i] =
2624 (u8) rx_indir_table[i];
2626 rc = efx_mcdi_rpc(efx, MC_CMD_RSS_CONTEXT_SET_TABLE, tablebuf,
2627 sizeof(tablebuf), NULL, 0, NULL);
2631 MCDI_SET_DWORD(keybuf, RSS_CONTEXT_SET_KEY_IN_RSS_CONTEXT_ID,
2633 BUILD_BUG_ON(ARRAY_SIZE(efx->rx_hash_key) !=
2634 MC_CMD_RSS_CONTEXT_SET_KEY_IN_TOEPLITZ_KEY_LEN);
2635 for (i = 0; i < ARRAY_SIZE(efx->rx_hash_key); ++i)
2636 MCDI_PTR(keybuf, RSS_CONTEXT_SET_KEY_IN_TOEPLITZ_KEY)[i] = key[i];
2638 return efx_mcdi_rpc(efx, MC_CMD_RSS_CONTEXT_SET_KEY, keybuf,
2639 sizeof(keybuf), NULL, 0, NULL);
2642 static void efx_ef10_rx_free_indir_table(struct efx_nic *efx)
2644 struct efx_ef10_nic_data *nic_data = efx->nic_data;
2646 if (nic_data->rx_rss_context != EFX_EF10_RSS_CONTEXT_INVALID)
2647 efx_ef10_free_rss_context(efx, nic_data->rx_rss_context);
2648 nic_data->rx_rss_context = EFX_EF10_RSS_CONTEXT_INVALID;
2651 static int efx_ef10_rx_push_shared_rss_config(struct efx_nic *efx,
2652 unsigned *context_size)
2654 u32 new_rx_rss_context;
2655 struct efx_ef10_nic_data *nic_data = efx->nic_data;
2656 int rc = efx_ef10_alloc_rss_context(efx, &new_rx_rss_context,
2657 false, context_size);
2662 nic_data->rx_rss_context = new_rx_rss_context;
2663 nic_data->rx_rss_context_exclusive = false;
2664 efx_set_default_rx_indir_table(efx);
2668 static int efx_ef10_rx_push_exclusive_rss_config(struct efx_nic *efx,
2669 const u32 *rx_indir_table,
2672 struct efx_ef10_nic_data *nic_data = efx->nic_data;
2674 u32 new_rx_rss_context;
2676 if (nic_data->rx_rss_context == EFX_EF10_RSS_CONTEXT_INVALID ||
2677 !nic_data->rx_rss_context_exclusive) {
2678 rc = efx_ef10_alloc_rss_context(efx, &new_rx_rss_context,
2680 if (rc == -EOPNOTSUPP)
2685 new_rx_rss_context = nic_data->rx_rss_context;
2688 rc = efx_ef10_populate_rss_table(efx, new_rx_rss_context,
2689 rx_indir_table, key);
2693 if (nic_data->rx_rss_context != new_rx_rss_context)
2694 efx_ef10_rx_free_indir_table(efx);
2695 nic_data->rx_rss_context = new_rx_rss_context;
2696 nic_data->rx_rss_context_exclusive = true;
2697 if (rx_indir_table != efx->rx_indir_table)
2698 memcpy(efx->rx_indir_table, rx_indir_table,
2699 sizeof(efx->rx_indir_table));
2700 if (key != efx->rx_hash_key)
2701 memcpy(efx->rx_hash_key, key, efx->type->rx_hash_key_size);
2706 if (new_rx_rss_context != nic_data->rx_rss_context)
2707 efx_ef10_free_rss_context(efx, new_rx_rss_context);
2709 netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
2713 static int efx_ef10_rx_pull_rss_config(struct efx_nic *efx)
2715 struct efx_ef10_nic_data *nic_data = efx->nic_data;
2716 MCDI_DECLARE_BUF(inbuf, MC_CMD_RSS_CONTEXT_GET_TABLE_IN_LEN);
2717 MCDI_DECLARE_BUF(tablebuf, MC_CMD_RSS_CONTEXT_GET_TABLE_OUT_LEN);
2718 MCDI_DECLARE_BUF(keybuf, MC_CMD_RSS_CONTEXT_GET_KEY_OUT_LEN);
2722 BUILD_BUG_ON(MC_CMD_RSS_CONTEXT_GET_TABLE_IN_LEN !=
2723 MC_CMD_RSS_CONTEXT_GET_KEY_IN_LEN);
2725 if (nic_data->rx_rss_context == EFX_EF10_RSS_CONTEXT_INVALID)
2728 MCDI_SET_DWORD(inbuf, RSS_CONTEXT_GET_TABLE_IN_RSS_CONTEXT_ID,
2729 nic_data->rx_rss_context);
2730 BUILD_BUG_ON(ARRAY_SIZE(efx->rx_indir_table) !=
2731 MC_CMD_RSS_CONTEXT_GET_TABLE_OUT_INDIRECTION_TABLE_LEN);
2732 rc = efx_mcdi_rpc(efx, MC_CMD_RSS_CONTEXT_GET_TABLE, inbuf, sizeof(inbuf),
2733 tablebuf, sizeof(tablebuf), &outlen);
2737 if (WARN_ON(outlen != MC_CMD_RSS_CONTEXT_GET_TABLE_OUT_LEN))
2740 for (i = 0; i < ARRAY_SIZE(efx->rx_indir_table); i++)
2741 efx->rx_indir_table[i] = MCDI_PTR(tablebuf,
2742 RSS_CONTEXT_GET_TABLE_OUT_INDIRECTION_TABLE)[i];
2744 MCDI_SET_DWORD(inbuf, RSS_CONTEXT_GET_KEY_IN_RSS_CONTEXT_ID,
2745 nic_data->rx_rss_context);
2746 BUILD_BUG_ON(ARRAY_SIZE(efx->rx_hash_key) !=
2747 MC_CMD_RSS_CONTEXT_SET_KEY_IN_TOEPLITZ_KEY_LEN);
2748 rc = efx_mcdi_rpc(efx, MC_CMD_RSS_CONTEXT_GET_KEY, inbuf, sizeof(inbuf),
2749 keybuf, sizeof(keybuf), &outlen);
2753 if (WARN_ON(outlen != MC_CMD_RSS_CONTEXT_GET_KEY_OUT_LEN))
2756 for (i = 0; i < ARRAY_SIZE(efx->rx_hash_key); ++i)
2757 efx->rx_hash_key[i] = MCDI_PTR(
2758 keybuf, RSS_CONTEXT_GET_KEY_OUT_TOEPLITZ_KEY)[i];
2763 static int efx_ef10_pf_rx_push_rss_config(struct efx_nic *efx, bool user,
2764 const u32 *rx_indir_table,
2769 if (efx->rss_spread == 1)
2773 key = efx->rx_hash_key;
2775 rc = efx_ef10_rx_push_exclusive_rss_config(efx, rx_indir_table, key);
2777 if (rc == -ENOBUFS && !user) {
2778 unsigned context_size;
2779 bool mismatch = false;
2782 for (i = 0; i < ARRAY_SIZE(efx->rx_indir_table) && !mismatch;
2784 mismatch = rx_indir_table[i] !=
2785 ethtool_rxfh_indir_default(i, efx->rss_spread);
2787 rc = efx_ef10_rx_push_shared_rss_config(efx, &context_size);
2789 if (context_size != efx->rss_spread)
2790 netif_warn(efx, probe, efx->net_dev,
2791 "Could not allocate an exclusive RSS"
2792 " context; allocated a shared one of"
2794 " Wanted %u, got %u.\n",
2795 efx->rss_spread, context_size);
2797 netif_warn(efx, probe, efx->net_dev,
2798 "Could not allocate an exclusive RSS"
2799 " context; allocated a shared one but"
2800 " could not apply custom"
2803 netif_info(efx, probe, efx->net_dev,
2804 "Could not allocate an exclusive RSS"
2805 " context; allocated a shared one.\n");
2811 static int efx_ef10_vf_rx_push_rss_config(struct efx_nic *efx, bool user,
2812 const u32 *rx_indir_table
2813 __attribute__ ((unused)),
2815 __attribute__ ((unused)))
2817 struct efx_ef10_nic_data *nic_data = efx->nic_data;
2821 if (nic_data->rx_rss_context != EFX_EF10_RSS_CONTEXT_INVALID)
2823 return efx_ef10_rx_push_shared_rss_config(efx, NULL);
2826 static int efx_ef10_rx_probe(struct efx_rx_queue *rx_queue)
2828 return efx_nic_alloc_buffer(rx_queue->efx, &rx_queue->rxd.buf,
2829 (rx_queue->ptr_mask + 1) *
2830 sizeof(efx_qword_t),
2834 static void efx_ef10_rx_init(struct efx_rx_queue *rx_queue)
2836 MCDI_DECLARE_BUF(inbuf,
2837 MC_CMD_INIT_RXQ_IN_LEN(EFX_MAX_DMAQ_SIZE * 8 /
2839 struct efx_channel *channel = efx_rx_queue_channel(rx_queue);
2840 size_t entries = rx_queue->rxd.buf.len / EFX_BUF_SIZE;
2841 struct efx_nic *efx = rx_queue->efx;
2842 struct efx_ef10_nic_data *nic_data = efx->nic_data;
2844 dma_addr_t dma_addr;
2847 BUILD_BUG_ON(MC_CMD_INIT_RXQ_OUT_LEN != 0);
2849 rx_queue->scatter_n = 0;
2850 rx_queue->scatter_len = 0;
2852 MCDI_SET_DWORD(inbuf, INIT_RXQ_IN_SIZE, rx_queue->ptr_mask + 1);
2853 MCDI_SET_DWORD(inbuf, INIT_RXQ_IN_TARGET_EVQ, channel->channel);
2854 MCDI_SET_DWORD(inbuf, INIT_RXQ_IN_LABEL, efx_rx_queue_index(rx_queue));
2855 MCDI_SET_DWORD(inbuf, INIT_RXQ_IN_INSTANCE,
2856 efx_rx_queue_index(rx_queue));
2857 MCDI_POPULATE_DWORD_2(inbuf, INIT_RXQ_IN_FLAGS,
2858 INIT_RXQ_IN_FLAG_PREFIX, 1,
2859 INIT_RXQ_IN_FLAG_TIMESTAMP, 1);
2860 MCDI_SET_DWORD(inbuf, INIT_RXQ_IN_OWNER_ID, 0);
2861 MCDI_SET_DWORD(inbuf, INIT_RXQ_IN_PORT_ID, nic_data->vport_id);
2863 dma_addr = rx_queue->rxd.buf.dma_addr;
2865 netif_dbg(efx, hw, efx->net_dev, "pushing RXQ %d. %zu entries (%llx)\n",
2866 efx_rx_queue_index(rx_queue), entries, (u64)dma_addr);
2868 for (i = 0; i < entries; ++i) {
2869 MCDI_SET_ARRAY_QWORD(inbuf, INIT_RXQ_IN_DMA_ADDR, i, dma_addr);
2870 dma_addr += EFX_BUF_SIZE;
2873 inlen = MC_CMD_INIT_RXQ_IN_LEN(entries);
2875 rc = efx_mcdi_rpc(efx, MC_CMD_INIT_RXQ, inbuf, inlen,
2878 netdev_WARN(efx->net_dev, "failed to initialise RXQ %d\n",
2879 efx_rx_queue_index(rx_queue));
2882 static void efx_ef10_rx_fini(struct efx_rx_queue *rx_queue)
2884 MCDI_DECLARE_BUF(inbuf, MC_CMD_FINI_RXQ_IN_LEN);
2885 MCDI_DECLARE_BUF_ERR(outbuf);
2886 struct efx_nic *efx = rx_queue->efx;
2890 MCDI_SET_DWORD(inbuf, FINI_RXQ_IN_INSTANCE,
2891 efx_rx_queue_index(rx_queue));
2893 rc = efx_mcdi_rpc_quiet(efx, MC_CMD_FINI_RXQ, inbuf, sizeof(inbuf),
2894 outbuf, sizeof(outbuf), &outlen);
2896 if (rc && rc != -EALREADY)
2902 efx_mcdi_display_error(efx, MC_CMD_FINI_RXQ, MC_CMD_FINI_RXQ_IN_LEN,
2903 outbuf, outlen, rc);
2906 static void efx_ef10_rx_remove(struct efx_rx_queue *rx_queue)
2908 efx_nic_free_buffer(rx_queue->efx, &rx_queue->rxd.buf);
2911 /* This creates an entry in the RX descriptor queue */
2913 efx_ef10_build_rx_desc(struct efx_rx_queue *rx_queue, unsigned int index)
2915 struct efx_rx_buffer *rx_buf;
2918 rxd = efx_rx_desc(rx_queue, index);
2919 rx_buf = efx_rx_buffer(rx_queue, index);
2920 EFX_POPULATE_QWORD_2(*rxd,
2921 ESF_DZ_RX_KER_BYTE_CNT, rx_buf->len,
2922 ESF_DZ_RX_KER_BUF_ADDR, rx_buf->dma_addr);
2925 static void efx_ef10_rx_write(struct efx_rx_queue *rx_queue)
2927 struct efx_nic *efx = rx_queue->efx;
2928 unsigned int write_count;
2931 /* Firmware requires that RX_DESC_WPTR be a multiple of 8 */
2932 write_count = rx_queue->added_count & ~7;
2933 if (rx_queue->notified_count == write_count)
2937 efx_ef10_build_rx_desc(
2939 rx_queue->notified_count & rx_queue->ptr_mask);
2940 while (++rx_queue->notified_count != write_count);
2943 EFX_POPULATE_DWORD_1(reg, ERF_DZ_RX_DESC_WPTR,
2944 write_count & rx_queue->ptr_mask);
2945 efx_writed_page(efx, ®, ER_DZ_RX_DESC_UPD,
2946 efx_rx_queue_index(rx_queue));
2949 static efx_mcdi_async_completer efx_ef10_rx_defer_refill_complete;
2951 static void efx_ef10_rx_defer_refill(struct efx_rx_queue *rx_queue)
2953 struct efx_channel *channel = efx_rx_queue_channel(rx_queue);
2954 MCDI_DECLARE_BUF(inbuf, MC_CMD_DRIVER_EVENT_IN_LEN);
2957 EFX_POPULATE_QWORD_2(event,
2958 ESF_DZ_EV_CODE, EFX_EF10_DRVGEN_EV,
2959 ESF_DZ_EV_DATA, EFX_EF10_REFILL);
2961 MCDI_SET_DWORD(inbuf, DRIVER_EVENT_IN_EVQ, channel->channel);
2963 /* MCDI_SET_QWORD is not appropriate here since EFX_POPULATE_* has
2964 * already swapped the data to little-endian order.
2966 memcpy(MCDI_PTR(inbuf, DRIVER_EVENT_IN_DATA), &event.u64[0],
2967 sizeof(efx_qword_t));
2969 efx_mcdi_rpc_async(channel->efx, MC_CMD_DRIVER_EVENT,
2970 inbuf, sizeof(inbuf), 0,
2971 efx_ef10_rx_defer_refill_complete, 0);
2975 efx_ef10_rx_defer_refill_complete(struct efx_nic *efx, unsigned long cookie,
2976 int rc, efx_dword_t *outbuf,
2977 size_t outlen_actual)
2982 static int efx_ef10_ev_probe(struct efx_channel *channel)
2984 return efx_nic_alloc_buffer(channel->efx, &channel->eventq.buf,
2985 (channel->eventq_mask + 1) *
2986 sizeof(efx_qword_t),
2990 static void efx_ef10_ev_fini(struct efx_channel *channel)
2992 MCDI_DECLARE_BUF(inbuf, MC_CMD_FINI_EVQ_IN_LEN);
2993 MCDI_DECLARE_BUF_ERR(outbuf);
2994 struct efx_nic *efx = channel->efx;
2998 MCDI_SET_DWORD(inbuf, FINI_EVQ_IN_INSTANCE, channel->channel);
3000 rc = efx_mcdi_rpc_quiet(efx, MC_CMD_FINI_EVQ, inbuf, sizeof(inbuf),
3001 outbuf, sizeof(outbuf), &outlen);
3003 if (rc && rc != -EALREADY)
3009 efx_mcdi_display_error(efx, MC_CMD_FINI_EVQ, MC_CMD_FINI_EVQ_IN_LEN,
3010 outbuf, outlen, rc);
3013 static int efx_ef10_ev_init(struct efx_channel *channel)
3015 MCDI_DECLARE_BUF(inbuf,
3016 MC_CMD_INIT_EVQ_V2_IN_LEN(EFX_MAX_EVQ_SIZE * 8 /
3018 MCDI_DECLARE_BUF(outbuf, MC_CMD_INIT_EVQ_V2_OUT_LEN);
3019 size_t entries = channel->eventq.buf.len / EFX_BUF_SIZE;
3020 struct efx_nic *efx = channel->efx;
3021 struct efx_ef10_nic_data *nic_data;
3022 size_t inlen, outlen;
3023 unsigned int enabled, implemented;
3024 dma_addr_t dma_addr;
3028 nic_data = efx->nic_data;
3030 /* Fill event queue with all ones (i.e. empty events) */
3031 memset(channel->eventq.buf.addr, 0xff, channel->eventq.buf.len);
3033 MCDI_SET_DWORD(inbuf, INIT_EVQ_IN_SIZE, channel->eventq_mask + 1);
3034 MCDI_SET_DWORD(inbuf, INIT_EVQ_IN_INSTANCE, channel->channel);
3035 /* INIT_EVQ expects index in vector table, not absolute */
3036 MCDI_SET_DWORD(inbuf, INIT_EVQ_IN_IRQ_NUM, channel->channel);
3037 MCDI_SET_DWORD(inbuf, INIT_EVQ_IN_TMR_MODE,
3038 MC_CMD_INIT_EVQ_IN_TMR_MODE_DIS);
3039 MCDI_SET_DWORD(inbuf, INIT_EVQ_IN_TMR_LOAD, 0);
3040 MCDI_SET_DWORD(inbuf, INIT_EVQ_IN_TMR_RELOAD, 0);
3041 MCDI_SET_DWORD(inbuf, INIT_EVQ_IN_COUNT_MODE,
3042 MC_CMD_INIT_EVQ_IN_COUNT_MODE_DIS);
3043 MCDI_SET_DWORD(inbuf, INIT_EVQ_IN_COUNT_THRSHLD, 0);
3045 if (nic_data->datapath_caps2 &
3046 1 << MC_CMD_GET_CAPABILITIES_V2_OUT_INIT_EVQ_V2_LBN) {
3047 /* Use the new generic approach to specifying event queue
3048 * configuration, requesting lower latency or higher throughput.
3049 * The options that actually get used appear in the output.
3051 MCDI_POPULATE_DWORD_2(inbuf, INIT_EVQ_V2_IN_FLAGS,
3052 INIT_EVQ_V2_IN_FLAG_INTERRUPTING, 1,
3053 INIT_EVQ_V2_IN_FLAG_TYPE,
3054 MC_CMD_INIT_EVQ_V2_IN_FLAG_TYPE_AUTO);
3056 bool cut_thru = !(nic_data->datapath_caps &
3057 1 << MC_CMD_GET_CAPABILITIES_OUT_RX_BATCHING_LBN);
3059 MCDI_POPULATE_DWORD_4(inbuf, INIT_EVQ_IN_FLAGS,
3060 INIT_EVQ_IN_FLAG_INTERRUPTING, 1,
3061 INIT_EVQ_IN_FLAG_RX_MERGE, 1,
3062 INIT_EVQ_IN_FLAG_TX_MERGE, 1,
3063 INIT_EVQ_IN_FLAG_CUT_THRU, cut_thru);
3066 dma_addr = channel->eventq.buf.dma_addr;
3067 for (i = 0; i < entries; ++i) {
3068 MCDI_SET_ARRAY_QWORD(inbuf, INIT_EVQ_IN_DMA_ADDR, i, dma_addr);
3069 dma_addr += EFX_BUF_SIZE;
3072 inlen = MC_CMD_INIT_EVQ_IN_LEN(entries);
3074 rc = efx_mcdi_rpc(efx, MC_CMD_INIT_EVQ, inbuf, inlen,
3075 outbuf, sizeof(outbuf), &outlen);
3077 if (outlen >= MC_CMD_INIT_EVQ_V2_OUT_LEN)
3078 netif_dbg(efx, drv, efx->net_dev,
3079 "Channel %d using event queue flags %08x\n",
3081 MCDI_DWORD(outbuf, INIT_EVQ_V2_OUT_FLAGS));
3083 /* IRQ return is ignored */
3084 if (channel->channel || rc)
3087 /* Successfully created event queue on channel 0 */
3088 rc = efx_mcdi_get_workarounds(efx, &implemented, &enabled);
3089 if (rc == -ENOSYS) {
3090 /* GET_WORKAROUNDS was implemented before this workaround,
3091 * thus it must be unavailable in this firmware.
3093 nic_data->workaround_26807 = false;
3098 nic_data->workaround_26807 =
3099 !!(enabled & MC_CMD_GET_WORKAROUNDS_OUT_BUG26807);
3101 if (implemented & MC_CMD_GET_WORKAROUNDS_OUT_BUG26807 &&
3102 !nic_data->workaround_26807) {
3105 rc = efx_mcdi_set_workaround(efx,
3106 MC_CMD_WORKAROUND_BUG26807,
3111 1 << MC_CMD_WORKAROUND_EXT_OUT_FLR_DONE_LBN) {
3112 netif_info(efx, drv, efx->net_dev,
3113 "other functions on NIC have been reset\n");
3115 /* With MCFW v4.6.x and earlier, the
3116 * boot count will have incremented,
3117 * so re-read the warm_boot_count
3118 * value now to ensure this function
3119 * doesn't think it has changed next
3122 rc = efx_ef10_get_warm_boot_count(efx);
3124 nic_data->warm_boot_count = rc;
3128 nic_data->workaround_26807 = true;
3129 } else if (rc == -EPERM) {
3139 efx_ef10_ev_fini(channel);
3143 static void efx_ef10_ev_remove(struct efx_channel *channel)
3145 efx_nic_free_buffer(channel->efx, &channel->eventq.buf);
3148 static void efx_ef10_handle_rx_wrong_queue(struct efx_rx_queue *rx_queue,
3149 unsigned int rx_queue_label)
3151 struct efx_nic *efx = rx_queue->efx;
3153 netif_info(efx, hw, efx->net_dev,
3154 "rx event arrived on queue %d labeled as queue %u\n",
3155 efx_rx_queue_index(rx_queue), rx_queue_label);
3157 efx_schedule_reset(efx, RESET_TYPE_DISABLE);
3161 efx_ef10_handle_rx_bad_lbits(struct efx_rx_queue *rx_queue,
3162 unsigned int actual, unsigned int expected)
3164 unsigned int dropped = (actual - expected) & rx_queue->ptr_mask;
3165 struct efx_nic *efx = rx_queue->efx;
3167 netif_info(efx, hw, efx->net_dev,
3168 "dropped %d events (index=%d expected=%d)\n",
3169 dropped, actual, expected);
3171 efx_schedule_reset(efx, RESET_TYPE_DISABLE);
3174 /* partially received RX was aborted. clean up. */
3175 static void efx_ef10_handle_rx_abort(struct efx_rx_queue *rx_queue)
3177 unsigned int rx_desc_ptr;
3179 netif_dbg(rx_queue->efx, hw, rx_queue->efx->net_dev,
3180 "scattered RX aborted (dropping %u buffers)\n",
3181 rx_queue->scatter_n);
3183 rx_desc_ptr = rx_queue->removed_count & rx_queue->ptr_mask;
3185 efx_rx_packet(rx_queue, rx_desc_ptr, rx_queue->scatter_n,
3186 0, EFX_RX_PKT_DISCARD);
3188 rx_queue->removed_count += rx_queue->scatter_n;
3189 rx_queue->scatter_n = 0;
3190 rx_queue->scatter_len = 0;
3191 ++efx_rx_queue_channel(rx_queue)->n_rx_nodesc_trunc;
3194 static u16 efx_ef10_handle_rx_event_errors(struct efx_channel *channel,
3195 unsigned int n_packets,
3196 unsigned int rx_encap_hdr,
3197 unsigned int rx_l3_class,
3198 unsigned int rx_l4_class,
3199 const efx_qword_t *event)
3201 struct efx_nic *efx = channel->efx;
3203 if (EFX_QWORD_FIELD(*event, ESF_DZ_RX_ECRC_ERR)) {
3204 if (!efx->loopback_selftest)
3205 channel->n_rx_eth_crc_err += n_packets;
3206 return EFX_RX_PKT_DISCARD;
3208 if (EFX_QWORD_FIELD(*event, ESF_DZ_RX_IPCKSUM_ERR)) {
3209 if (unlikely(rx_encap_hdr != ESE_EZ_ENCAP_HDR_VXLAN &&
3210 rx_l3_class != ESE_DZ_L3_CLASS_IP4 &&
3211 rx_l3_class != ESE_DZ_L3_CLASS_IP4_FRAG &&
3212 rx_l3_class != ESE_DZ_L3_CLASS_IP6 &&
3213 rx_l3_class != ESE_DZ_L3_CLASS_IP6_FRAG))
3214 netdev_WARN(efx->net_dev,
3215 "invalid class for RX_IPCKSUM_ERR: event="
3217 EFX_QWORD_VAL(*event));
3218 if (!efx->loopback_selftest)
3220 &channel->n_rx_outer_ip_hdr_chksum_err :
3221 &channel->n_rx_ip_hdr_chksum_err) += n_packets;
3224 if (EFX_QWORD_FIELD(*event, ESF_DZ_RX_TCPUDP_CKSUM_ERR)) {
3225 if (unlikely(rx_encap_hdr != ESE_EZ_ENCAP_HDR_VXLAN &&
3226 ((rx_l3_class != ESE_DZ_L3_CLASS_IP4 &&
3227 rx_l3_class != ESE_DZ_L3_CLASS_IP6) ||
3228 (rx_l4_class != ESE_DZ_L4_CLASS_TCP &&
3229 rx_l4_class != ESE_DZ_L4_CLASS_UDP))))
3230 netdev_WARN(efx->net_dev,
3231 "invalid class for RX_TCPUDP_CKSUM_ERR: event="
3233 EFX_QWORD_VAL(*event));
3234 if (!efx->loopback_selftest)
3236 &channel->n_rx_outer_tcp_udp_chksum_err :
3237 &channel->n_rx_tcp_udp_chksum_err) += n_packets;
3240 if (EFX_QWORD_FIELD(*event, ESF_EZ_RX_IP_INNER_CHKSUM_ERR)) {
3241 if (unlikely(!rx_encap_hdr))
3242 netdev_WARN(efx->net_dev,
3243 "invalid encapsulation type for RX_IP_INNER_CHKSUM_ERR: event="
3245 EFX_QWORD_VAL(*event));
3246 else if (unlikely(rx_l3_class != ESE_DZ_L3_CLASS_IP4 &&
3247 rx_l3_class != ESE_DZ_L3_CLASS_IP4_FRAG &&
3248 rx_l3_class != ESE_DZ_L3_CLASS_IP6 &&
3249 rx_l3_class != ESE_DZ_L3_CLASS_IP6_FRAG))
3250 netdev_WARN(efx->net_dev,
3251 "invalid class for RX_IP_INNER_CHKSUM_ERR: event="
3253 EFX_QWORD_VAL(*event));
3254 if (!efx->loopback_selftest)
3255 channel->n_rx_inner_ip_hdr_chksum_err += n_packets;
3258 if (EFX_QWORD_FIELD(*event, ESF_EZ_RX_TCP_UDP_INNER_CHKSUM_ERR)) {
3259 if (unlikely(!rx_encap_hdr))
3260 netdev_WARN(efx->net_dev,
3261 "invalid encapsulation type for RX_TCP_UDP_INNER_CHKSUM_ERR: event="
3263 EFX_QWORD_VAL(*event));
3264 else if (unlikely((rx_l3_class != ESE_DZ_L3_CLASS_IP4 &&
3265 rx_l3_class != ESE_DZ_L3_CLASS_IP6) ||
3266 (rx_l4_class != ESE_DZ_L4_CLASS_TCP &&
3267 rx_l4_class != ESE_DZ_L4_CLASS_UDP)))
3268 netdev_WARN(efx->net_dev,
3269 "invalid class for RX_TCP_UDP_INNER_CHKSUM_ERR: event="
3271 EFX_QWORD_VAL(*event));
3272 if (!efx->loopback_selftest)
3273 channel->n_rx_inner_tcp_udp_chksum_err += n_packets;
3277 WARN_ON(1); /* No error bits were recognised */
3281 static int efx_ef10_handle_rx_event(struct efx_channel *channel,
3282 const efx_qword_t *event)
3284 unsigned int rx_bytes, next_ptr_lbits, rx_queue_label;
3285 unsigned int rx_l3_class, rx_l4_class, rx_encap_hdr;
3286 unsigned int n_descs, n_packets, i;
3287 struct efx_nic *efx = channel->efx;
3288 struct efx_ef10_nic_data *nic_data = efx->nic_data;
3289 struct efx_rx_queue *rx_queue;
3294 if (unlikely(ACCESS_ONCE(efx->reset_pending)))
3297 /* Basic packet information */
3298 rx_bytes = EFX_QWORD_FIELD(*event, ESF_DZ_RX_BYTES);
3299 next_ptr_lbits = EFX_QWORD_FIELD(*event, ESF_DZ_RX_DSC_PTR_LBITS);
3300 rx_queue_label = EFX_QWORD_FIELD(*event, ESF_DZ_RX_QLABEL);
3301 rx_l3_class = EFX_QWORD_FIELD(*event, ESF_DZ_RX_L3_CLASS);
3302 rx_l4_class = EFX_QWORD_FIELD(*event, ESF_DZ_RX_L4_CLASS);
3303 rx_cont = EFX_QWORD_FIELD(*event, ESF_DZ_RX_CONT);
3305 nic_data->datapath_caps &
3306 (1 << MC_CMD_GET_CAPABILITIES_OUT_VXLAN_NVGRE_LBN) ?
3307 EFX_QWORD_FIELD(*event, ESF_EZ_RX_ENCAP_HDR) :
3308 ESE_EZ_ENCAP_HDR_NONE;
3310 if (EFX_QWORD_FIELD(*event, ESF_DZ_RX_DROP_EVENT))
3311 netdev_WARN(efx->net_dev, "saw RX_DROP_EVENT: event="
3313 EFX_QWORD_VAL(*event));
3315 rx_queue = efx_channel_get_rx_queue(channel);
3317 if (unlikely(rx_queue_label != efx_rx_queue_index(rx_queue)))
3318 efx_ef10_handle_rx_wrong_queue(rx_queue, rx_queue_label);
3320 n_descs = ((next_ptr_lbits - rx_queue->removed_count) &
3321 ((1 << ESF_DZ_RX_DSC_PTR_LBITS_WIDTH) - 1));
3323 if (n_descs != rx_queue->scatter_n + 1) {
3324 struct efx_ef10_nic_data *nic_data = efx->nic_data;
3326 /* detect rx abort */
3327 if (unlikely(n_descs == rx_queue->scatter_n)) {
3328 if (rx_queue->scatter_n == 0 || rx_bytes != 0)
3329 netdev_WARN(efx->net_dev,
3330 "invalid RX abort: scatter_n=%u event="
3332 rx_queue->scatter_n,
3333 EFX_QWORD_VAL(*event));
3334 efx_ef10_handle_rx_abort(rx_queue);
3338 /* Check that RX completion merging is valid, i.e.
3339 * the current firmware supports it and this is a
3340 * non-scattered packet.
3342 if (!(nic_data->datapath_caps &
3343 (1 << MC_CMD_GET_CAPABILITIES_OUT_RX_BATCHING_LBN)) ||
3344 rx_queue->scatter_n != 0 || rx_cont) {
3345 efx_ef10_handle_rx_bad_lbits(
3346 rx_queue, next_ptr_lbits,
3347 (rx_queue->removed_count +
3348 rx_queue->scatter_n + 1) &
3349 ((1 << ESF_DZ_RX_DSC_PTR_LBITS_WIDTH) - 1));
3353 /* Merged completion for multiple non-scattered packets */
3354 rx_queue->scatter_n = 1;
3355 rx_queue->scatter_len = 0;
3356 n_packets = n_descs;
3357 ++channel->n_rx_merge_events;
3358 channel->n_rx_merge_packets += n_packets;
3359 flags |= EFX_RX_PKT_PREFIX_LEN;
3361 ++rx_queue->scatter_n;
3362 rx_queue->scatter_len += rx_bytes;
3368 EFX_POPULATE_QWORD_5(errors, ESF_DZ_RX_ECRC_ERR, 1,
3369 ESF_DZ_RX_IPCKSUM_ERR, 1,
3370 ESF_DZ_RX_TCPUDP_CKSUM_ERR, 1,
3371 ESF_EZ_RX_IP_INNER_CHKSUM_ERR, 1,
3372 ESF_EZ_RX_TCP_UDP_INNER_CHKSUM_ERR, 1);
3373 EFX_AND_QWORD(errors, *event, errors);
3374 if (unlikely(!EFX_QWORD_IS_ZERO(errors))) {
3375 flags |= efx_ef10_handle_rx_event_errors(channel, n_packets,
3377 rx_l3_class, rx_l4_class,
3380 bool tcpudp = rx_l4_class == ESE_DZ_L4_CLASS_TCP ||
3381 rx_l4_class == ESE_DZ_L4_CLASS_UDP;
3383 switch (rx_encap_hdr) {
3384 case ESE_EZ_ENCAP_HDR_VXLAN: /* VxLAN or GENEVE */
3385 flags |= EFX_RX_PKT_CSUMMED; /* outer UDP csum */
3387 flags |= EFX_RX_PKT_CSUM_LEVEL; /* inner L4 */
3389 case ESE_EZ_ENCAP_HDR_GRE:
3390 case ESE_EZ_ENCAP_HDR_NONE:
3392 flags |= EFX_RX_PKT_CSUMMED;
3395 netdev_WARN(efx->net_dev,
3396 "unknown encapsulation type: event="
3398 EFX_QWORD_VAL(*event));
3402 if (rx_l4_class == ESE_DZ_L4_CLASS_TCP)
3403 flags |= EFX_RX_PKT_TCP;
3405 channel->irq_mod_score += 2 * n_packets;
3407 /* Handle received packet(s) */
3408 for (i = 0; i < n_packets; i++) {
3409 efx_rx_packet(rx_queue,
3410 rx_queue->removed_count & rx_queue->ptr_mask,
3411 rx_queue->scatter_n, rx_queue->scatter_len,
3413 rx_queue->removed_count += rx_queue->scatter_n;
3416 rx_queue->scatter_n = 0;
3417 rx_queue->scatter_len = 0;
3423 efx_ef10_handle_tx_event(struct efx_channel *channel, efx_qword_t *event)
3425 struct efx_nic *efx = channel->efx;
3426 struct efx_tx_queue *tx_queue;
3427 unsigned int tx_ev_desc_ptr;
3428 unsigned int tx_ev_q_label;
3431 if (unlikely(ACCESS_ONCE(efx->reset_pending)))
3434 if (unlikely(EFX_QWORD_FIELD(*event, ESF_DZ_TX_DROP_EVENT)))
3437 /* Transmit completion */
3438 tx_ev_desc_ptr = EFX_QWORD_FIELD(*event, ESF_DZ_TX_DESCR_INDX);
3439 tx_ev_q_label = EFX_QWORD_FIELD(*event, ESF_DZ_TX_QLABEL);
3440 tx_queue = efx_channel_get_tx_queue(channel,
3441 tx_ev_q_label % EFX_TXQ_TYPES);
3442 tx_descs = ((tx_ev_desc_ptr + 1 - tx_queue->read_count) &
3443 tx_queue->ptr_mask);
3444 efx_xmit_done(tx_queue, tx_ev_desc_ptr & tx_queue->ptr_mask);
3450 efx_ef10_handle_driver_event(struct efx_channel *channel, efx_qword_t *event)
3452 struct efx_nic *efx = channel->efx;
3455 subcode = EFX_QWORD_FIELD(*event, ESF_DZ_DRV_SUB_CODE);
3458 case ESE_DZ_DRV_TIMER_EV:
3459 case ESE_DZ_DRV_WAKE_UP_EV:
3461 case ESE_DZ_DRV_START_UP_EV:
3462 /* event queue init complete. ok. */
3465 netif_err(efx, hw, efx->net_dev,
3466 "channel %d unknown driver event type %d"
3467 " (data " EFX_QWORD_FMT ")\n",
3468 channel->channel, subcode,
3469 EFX_QWORD_VAL(*event));
3474 static void efx_ef10_handle_driver_generated_event(struct efx_channel *channel,
3477 struct efx_nic *efx = channel->efx;
3480 subcode = EFX_QWORD_FIELD(*event, EFX_DWORD_0);
3484 channel->event_test_cpu = raw_smp_processor_id();
3486 case EFX_EF10_REFILL:
3487 /* The queue must be empty, so we won't receive any rx
3488 * events, so efx_process_channel() won't refill the
3489 * queue. Refill it here
3491 efx_fast_push_rx_descriptors(&channel->rx_queue, true);
3494 netif_err(efx, hw, efx->net_dev,
3495 "channel %d unknown driver event type %u"
3496 " (data " EFX_QWORD_FMT ")\n",
3497 channel->channel, (unsigned) subcode,
3498 EFX_QWORD_VAL(*event));
3502 static int efx_ef10_ev_process(struct efx_channel *channel, int quota)
3504 struct efx_nic *efx = channel->efx;
3505 efx_qword_t event, *p_event;
3506 unsigned int read_ptr;
3514 read_ptr = channel->eventq_read_ptr;
3517 p_event = efx_event(channel, read_ptr);
3520 if (!efx_event_present(&event))
3523 EFX_SET_QWORD(*p_event);
3527 ev_code = EFX_QWORD_FIELD(event, ESF_DZ_EV_CODE);
3529 netif_vdbg(efx, drv, efx->net_dev,
3530 "processing event on %d " EFX_QWORD_FMT "\n",
3531 channel->channel, EFX_QWORD_VAL(event));
3534 case ESE_DZ_EV_CODE_MCDI_EV:
3535 efx_mcdi_process_event(channel, &event);
3537 case ESE_DZ_EV_CODE_RX_EV:
3538 spent += efx_ef10_handle_rx_event(channel, &event);
3539 if (spent >= quota) {
3540 /* XXX can we split a merged event to
3541 * avoid going over-quota?
3547 case ESE_DZ_EV_CODE_TX_EV:
3548 tx_descs += efx_ef10_handle_tx_event(channel, &event);
3549 if (tx_descs > efx->txq_entries) {
3552 } else if (++spent == quota) {
3556 case ESE_DZ_EV_CODE_DRIVER_EV:
3557 efx_ef10_handle_driver_event(channel, &event);
3558 if (++spent == quota)
3561 case EFX_EF10_DRVGEN_EV:
3562 efx_ef10_handle_driver_generated_event(channel, &event);
3565 netif_err(efx, hw, efx->net_dev,
3566 "channel %d unknown event type %d"
3567 " (data " EFX_QWORD_FMT ")\n",
3568 channel->channel, ev_code,
3569 EFX_QWORD_VAL(event));
3574 channel->eventq_read_ptr = read_ptr;
3578 static void efx_ef10_ev_read_ack(struct efx_channel *channel)
3580 struct efx_nic *efx = channel->efx;
3583 if (EFX_EF10_WORKAROUND_35388(efx)) {
3584 BUILD_BUG_ON(EFX_MIN_EVQ_SIZE <
3585 (1 << ERF_DD_EVQ_IND_RPTR_WIDTH));
3586 BUILD_BUG_ON(EFX_MAX_EVQ_SIZE >
3587 (1 << 2 * ERF_DD_EVQ_IND_RPTR_WIDTH));
3589 EFX_POPULATE_DWORD_2(rptr, ERF_DD_EVQ_IND_RPTR_FLAGS,
3590 EFE_DD_EVQ_IND_RPTR_FLAGS_HIGH,
3591 ERF_DD_EVQ_IND_RPTR,
3592 (channel->eventq_read_ptr &
3593 channel->eventq_mask) >>
3594 ERF_DD_EVQ_IND_RPTR_WIDTH);
3595 efx_writed_page(efx, &rptr, ER_DD_EVQ_INDIRECT,
3597 EFX_POPULATE_DWORD_2(rptr, ERF_DD_EVQ_IND_RPTR_FLAGS,
3598 EFE_DD_EVQ_IND_RPTR_FLAGS_LOW,
3599 ERF_DD_EVQ_IND_RPTR,
3600 channel->eventq_read_ptr &
3601 ((1 << ERF_DD_EVQ_IND_RPTR_WIDTH) - 1));
3602 efx_writed_page(efx, &rptr, ER_DD_EVQ_INDIRECT,
3605 EFX_POPULATE_DWORD_1(rptr, ERF_DZ_EVQ_RPTR,
3606 channel->eventq_read_ptr &
3607 channel->eventq_mask);
3608 efx_writed_page(efx, &rptr, ER_DZ_EVQ_RPTR, channel->channel);
3612 static void efx_ef10_ev_test_generate(struct efx_channel *channel)
3614 MCDI_DECLARE_BUF(inbuf, MC_CMD_DRIVER_EVENT_IN_LEN);
3615 struct efx_nic *efx = channel->efx;
3619 EFX_POPULATE_QWORD_2(event,
3620 ESF_DZ_EV_CODE, EFX_EF10_DRVGEN_EV,
3621 ESF_DZ_EV_DATA, EFX_EF10_TEST);
3623 MCDI_SET_DWORD(inbuf, DRIVER_EVENT_IN_EVQ, channel->channel);
3625 /* MCDI_SET_QWORD is not appropriate here since EFX_POPULATE_* has
3626 * already swapped the data to little-endian order.
3628 memcpy(MCDI_PTR(inbuf, DRIVER_EVENT_IN_DATA), &event.u64[0],
3629 sizeof(efx_qword_t));
3631 rc = efx_mcdi_rpc(efx, MC_CMD_DRIVER_EVENT, inbuf, sizeof(inbuf),
3640 netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
3643 void efx_ef10_handle_drain_event(struct efx_nic *efx)
3645 if (atomic_dec_and_test(&efx->active_queues))
3646 wake_up(&efx->flush_wq);
3648 WARN_ON(atomic_read(&efx->active_queues) < 0);
3651 static int efx_ef10_fini_dmaq(struct efx_nic *efx)
3653 struct efx_ef10_nic_data *nic_data = efx->nic_data;
3654 struct efx_channel *channel;
3655 struct efx_tx_queue *tx_queue;
3656 struct efx_rx_queue *rx_queue;
3659 /* If the MC has just rebooted, the TX/RX queues will have already been
3660 * torn down, but efx->active_queues needs to be set to zero.
3662 if (nic_data->must_realloc_vis) {
3663 atomic_set(&efx->active_queues, 0);
3667 /* Do not attempt to write to the NIC during EEH recovery */
3668 if (efx->state != STATE_RECOVERY) {
3669 efx_for_each_channel(channel, efx) {
3670 efx_for_each_channel_rx_queue(rx_queue, channel)
3671 efx_ef10_rx_fini(rx_queue);
3672 efx_for_each_channel_tx_queue(tx_queue, channel)
3673 efx_ef10_tx_fini(tx_queue);
3676 wait_event_timeout(efx->flush_wq,
3677 atomic_read(&efx->active_queues) == 0,
3678 msecs_to_jiffies(EFX_MAX_FLUSH_TIME));
3679 pending = atomic_read(&efx->active_queues);
3681 netif_err(efx, hw, efx->net_dev, "failed to flush %d queues\n",
3690 static void efx_ef10_prepare_flr(struct efx_nic *efx)
3692 atomic_set(&efx->active_queues, 0);
3695 static bool efx_ef10_filter_equal(const struct efx_filter_spec *left,
3696 const struct efx_filter_spec *right)
3698 if ((left->match_flags ^ right->match_flags) |
3699 ((left->flags ^ right->flags) &
3700 (EFX_FILTER_FLAG_RX | EFX_FILTER_FLAG_TX)))
3703 return memcmp(&left->outer_vid, &right->outer_vid,
3704 sizeof(struct efx_filter_spec) -
3705 offsetof(struct efx_filter_spec, outer_vid)) == 0;
3708 static unsigned int efx_ef10_filter_hash(const struct efx_filter_spec *spec)
3710 BUILD_BUG_ON(offsetof(struct efx_filter_spec, outer_vid) & 3);
3711 return jhash2((const u32 *)&spec->outer_vid,
3712 (sizeof(struct efx_filter_spec) -
3713 offsetof(struct efx_filter_spec, outer_vid)) / 4,
3715 /* XXX should we randomise the initval? */
3718 /* Decide whether a filter should be exclusive or else should allow
3719 * delivery to additional recipients. Currently we decide that
3720 * filters for specific local unicast MAC and IP addresses are
3723 static bool efx_ef10_filter_is_exclusive(const struct efx_filter_spec *spec)
3725 if (spec->match_flags & EFX_FILTER_MATCH_LOC_MAC &&
3726 !is_multicast_ether_addr(spec->loc_mac))
3729 if ((spec->match_flags &
3730 (EFX_FILTER_MATCH_ETHER_TYPE | EFX_FILTER_MATCH_LOC_HOST)) ==
3731 (EFX_FILTER_MATCH_ETHER_TYPE | EFX_FILTER_MATCH_LOC_HOST)) {
3732 if (spec->ether_type == htons(ETH_P_IP) &&
3733 !ipv4_is_multicast(spec->loc_host[0]))
3735 if (spec->ether_type == htons(ETH_P_IPV6) &&
3736 ((const u8 *)spec->loc_host)[0] != 0xff)
3743 static struct efx_filter_spec *
3744 efx_ef10_filter_entry_spec(const struct efx_ef10_filter_table *table,
3745 unsigned int filter_idx)
3747 return (struct efx_filter_spec *)(table->entry[filter_idx].spec &
3748 ~EFX_EF10_FILTER_FLAGS);
3752 efx_ef10_filter_entry_flags(const struct efx_ef10_filter_table *table,
3753 unsigned int filter_idx)
3755 return table->entry[filter_idx].spec & EFX_EF10_FILTER_FLAGS;
3759 efx_ef10_filter_set_entry(struct efx_ef10_filter_table *table,
3760 unsigned int filter_idx,
3761 const struct efx_filter_spec *spec,
3764 table->entry[filter_idx].spec = (unsigned long)spec | flags;
3768 efx_ef10_filter_push_prep_set_match_fields(struct efx_nic *efx,
3769 const struct efx_filter_spec *spec,
3772 enum efx_encap_type encap_type = efx_filter_get_encap_type(spec);
3773 u32 match_fields = 0, uc_match, mc_match;
3775 MCDI_SET_DWORD(inbuf, FILTER_OP_IN_OP,
3776 efx_ef10_filter_is_exclusive(spec) ?
3777 MC_CMD_FILTER_OP_IN_OP_INSERT :
3778 MC_CMD_FILTER_OP_IN_OP_SUBSCRIBE);
3780 /* Convert match flags and values. Unlike almost
3781 * everything else in MCDI, these fields are in
3782 * network byte order.
3784 #define COPY_VALUE(value, mcdi_field) \
3787 1 << MC_CMD_FILTER_OP_IN_MATCH_ ## \
3788 mcdi_field ## _LBN; \
3790 MC_CMD_FILTER_OP_IN_ ## mcdi_field ## _LEN < \
3792 memcpy(MCDI_PTR(inbuf, FILTER_OP_IN_ ## mcdi_field), \
3793 &value, sizeof(value)); \
3795 #define COPY_FIELD(gen_flag, gen_field, mcdi_field) \
3796 if (spec->match_flags & EFX_FILTER_MATCH_ ## gen_flag) { \
3797 COPY_VALUE(spec->gen_field, mcdi_field); \
3799 /* Handle encap filters first. They will always be mismatch
3800 * (unknown UC or MC) filters
3803 /* ether_type and outer_ip_proto need to be variables
3804 * because COPY_VALUE wants to memcpy them
3807 htons(encap_type & EFX_ENCAP_FLAG_IPV6 ?
3808 ETH_P_IPV6 : ETH_P_IP);
3809 u8 vni_type = MC_CMD_FILTER_OP_EXT_IN_VNI_TYPE_GENEVE;
3812 switch (encap_type & EFX_ENCAP_TYPES_MASK) {
3813 case EFX_ENCAP_TYPE_VXLAN:
3814 vni_type = MC_CMD_FILTER_OP_EXT_IN_VNI_TYPE_VXLAN;
3816 case EFX_ENCAP_TYPE_GENEVE:
3817 COPY_VALUE(ether_type, ETHER_TYPE);
3818 outer_ip_proto = IPPROTO_UDP;
3819 COPY_VALUE(outer_ip_proto, IP_PROTO);
3820 /* We always need to set the type field, even
3821 * though we're not matching on the TNI.
3823 MCDI_POPULATE_DWORD_1(inbuf,
3824 FILTER_OP_EXT_IN_VNI_OR_VSID,
3825 FILTER_OP_EXT_IN_VNI_TYPE,
3828 case EFX_ENCAP_TYPE_NVGRE:
3829 COPY_VALUE(ether_type, ETHER_TYPE);
3830 outer_ip_proto = IPPROTO_GRE;
3831 COPY_VALUE(outer_ip_proto, IP_PROTO);
3837 uc_match = MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_UNKNOWN_UCAST_DST_LBN;
3838 mc_match = MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_UNKNOWN_MCAST_DST_LBN;
3840 uc_match = MC_CMD_FILTER_OP_EXT_IN_MATCH_UNKNOWN_UCAST_DST_LBN;
3841 mc_match = MC_CMD_FILTER_OP_EXT_IN_MATCH_UNKNOWN_MCAST_DST_LBN;
3844 if (spec->match_flags & EFX_FILTER_MATCH_LOC_MAC_IG)
3846 is_multicast_ether_addr(spec->loc_mac) ?
3849 COPY_FIELD(REM_HOST, rem_host, SRC_IP);
3850 COPY_FIELD(LOC_HOST, loc_host, DST_IP);
3851 COPY_FIELD(REM_MAC, rem_mac, SRC_MAC);
3852 COPY_FIELD(REM_PORT, rem_port, SRC_PORT);
3853 COPY_FIELD(LOC_MAC, loc_mac, DST_MAC);
3854 COPY_FIELD(LOC_PORT, loc_port, DST_PORT);
3855 COPY_FIELD(ETHER_TYPE, ether_type, ETHER_TYPE);
3856 COPY_FIELD(INNER_VID, inner_vid, INNER_VLAN);
3857 COPY_FIELD(OUTER_VID, outer_vid, OUTER_VLAN);
3858 COPY_FIELD(IP_PROTO, ip_proto, IP_PROTO);
3861 MCDI_SET_DWORD(inbuf, FILTER_OP_IN_MATCH_FIELDS,
3865 static void efx_ef10_filter_push_prep(struct efx_nic *efx,
3866 const struct efx_filter_spec *spec,
3867 efx_dword_t *inbuf, u64 handle,
3870 struct efx_ef10_nic_data *nic_data = efx->nic_data;
3871 u32 flags = spec->flags;
3873 memset(inbuf, 0, MC_CMD_FILTER_OP_EXT_IN_LEN);
3875 /* Remove RSS flag if we don't have an RSS context. */
3876 if (flags & EFX_FILTER_FLAG_RX_RSS &&
3877 spec->rss_context == EFX_FILTER_RSS_CONTEXT_DEFAULT &&
3878 nic_data->rx_rss_context == EFX_EF10_RSS_CONTEXT_INVALID)
3879 flags &= ~EFX_FILTER_FLAG_RX_RSS;
3882 MCDI_SET_DWORD(inbuf, FILTER_OP_IN_OP,
3883 MC_CMD_FILTER_OP_IN_OP_REPLACE);
3884 MCDI_SET_QWORD(inbuf, FILTER_OP_IN_HANDLE, handle);
3886 efx_ef10_filter_push_prep_set_match_fields(efx, spec, inbuf);
3889 MCDI_SET_DWORD(inbuf, FILTER_OP_IN_PORT_ID, nic_data->vport_id);
3890 MCDI_SET_DWORD(inbuf, FILTER_OP_IN_RX_DEST,
3891 spec->dmaq_id == EFX_FILTER_RX_DMAQ_ID_DROP ?
3892 MC_CMD_FILTER_OP_IN_RX_DEST_DROP :
3893 MC_CMD_FILTER_OP_IN_RX_DEST_HOST);
3894 MCDI_SET_DWORD(inbuf, FILTER_OP_IN_TX_DOMAIN, 0);
3895 MCDI_SET_DWORD(inbuf, FILTER_OP_IN_TX_DEST,
3896 MC_CMD_FILTER_OP_IN_TX_DEST_DEFAULT);
3897 MCDI_SET_DWORD(inbuf, FILTER_OP_IN_RX_QUEUE,
3898 spec->dmaq_id == EFX_FILTER_RX_DMAQ_ID_DROP ?
3900 MCDI_SET_DWORD(inbuf, FILTER_OP_IN_RX_MODE,
3901 (flags & EFX_FILTER_FLAG_RX_RSS) ?
3902 MC_CMD_FILTER_OP_IN_RX_MODE_RSS :
3903 MC_CMD_FILTER_OP_IN_RX_MODE_SIMPLE);
3904 if (flags & EFX_FILTER_FLAG_RX_RSS)
3905 MCDI_SET_DWORD(inbuf, FILTER_OP_IN_RX_CONTEXT,
3906 spec->rss_context !=
3907 EFX_FILTER_RSS_CONTEXT_DEFAULT ?
3908 spec->rss_context : nic_data->rx_rss_context);
3911 static int efx_ef10_filter_push(struct efx_nic *efx,
3912 const struct efx_filter_spec *spec,
3913 u64 *handle, bool replacing)
3915 MCDI_DECLARE_BUF(inbuf, MC_CMD_FILTER_OP_EXT_IN_LEN);
3916 MCDI_DECLARE_BUF(outbuf, MC_CMD_FILTER_OP_EXT_OUT_LEN);
3919 efx_ef10_filter_push_prep(efx, spec, inbuf, *handle, replacing);
3920 rc = efx_mcdi_rpc(efx, MC_CMD_FILTER_OP, inbuf, sizeof(inbuf),
3921 outbuf, sizeof(outbuf), NULL);
3923 *handle = MCDI_QWORD(outbuf, FILTER_OP_OUT_HANDLE);
3925 rc = -EBUSY; /* to match efx_farch_filter_insert() */
3929 static u32 efx_ef10_filter_mcdi_flags_from_spec(const struct efx_filter_spec *spec)
3931 enum efx_encap_type encap_type = efx_filter_get_encap_type(spec);
3932 unsigned int match_flags = spec->match_flags;
3933 unsigned int uc_match, mc_match;
3936 #define MAP_FILTER_TO_MCDI_FLAG(gen_flag, mcdi_field, encap) { \
3937 unsigned int old_match_flags = match_flags; \
3938 match_flags &= ~EFX_FILTER_MATCH_ ## gen_flag; \
3939 if (match_flags != old_match_flags) \
3942 MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_ ## \
3943 mcdi_field ## _LBN : \
3944 MC_CMD_FILTER_OP_EXT_IN_MATCH_ ##\
3945 mcdi_field ## _LBN)); \
3947 /* inner or outer based on encap type */
3948 MAP_FILTER_TO_MCDI_FLAG(REM_HOST, SRC_IP, encap_type);
3949 MAP_FILTER_TO_MCDI_FLAG(LOC_HOST, DST_IP, encap_type);
3950 MAP_FILTER_TO_MCDI_FLAG(REM_MAC, SRC_MAC, encap_type);
3951 MAP_FILTER_TO_MCDI_FLAG(REM_PORT, SRC_PORT, encap_type);
3952 MAP_FILTER_TO_MCDI_FLAG(LOC_MAC, DST_MAC, encap_type);
3953 MAP_FILTER_TO_MCDI_FLAG(LOC_PORT, DST_PORT, encap_type);
3954 MAP_FILTER_TO_MCDI_FLAG(ETHER_TYPE, ETHER_TYPE, encap_type);
3955 MAP_FILTER_TO_MCDI_FLAG(IP_PROTO, IP_PROTO, encap_type);
3957 MAP_FILTER_TO_MCDI_FLAG(INNER_VID, INNER_VLAN, false);
3958 MAP_FILTER_TO_MCDI_FLAG(OUTER_VID, OUTER_VLAN, false);
3959 #undef MAP_FILTER_TO_MCDI_FLAG
3961 /* special handling for encap type, and mismatch */
3963 match_flags &= ~EFX_FILTER_MATCH_ENCAP_TYPE;
3965 (1 << MC_CMD_FILTER_OP_EXT_IN_MATCH_ETHER_TYPE_LBN);
3966 mcdi_flags |= (1 << MC_CMD_FILTER_OP_EXT_IN_MATCH_IP_PROTO_LBN);
3968 uc_match = MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_UNKNOWN_UCAST_DST_LBN;
3969 mc_match = MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_UNKNOWN_MCAST_DST_LBN;
3971 uc_match = MC_CMD_FILTER_OP_EXT_IN_MATCH_UNKNOWN_UCAST_DST_LBN;
3972 mc_match = MC_CMD_FILTER_OP_EXT_IN_MATCH_UNKNOWN_MCAST_DST_LBN;
3975 if (match_flags & EFX_FILTER_MATCH_LOC_MAC_IG) {
3976 match_flags &= ~EFX_FILTER_MATCH_LOC_MAC_IG;
3978 is_multicast_ether_addr(spec->loc_mac) ?
3983 /* Did we map them all? */
3984 WARN_ON_ONCE(match_flags);
3989 static int efx_ef10_filter_pri(struct efx_ef10_filter_table *table,
3990 const struct efx_filter_spec *spec)
3992 u32 mcdi_flags = efx_ef10_filter_mcdi_flags_from_spec(spec);
3993 unsigned int match_pri;
3996 match_pri < table->rx_match_count;
3998 if (table->rx_match_mcdi_flags[match_pri] == mcdi_flags)
4001 return -EPROTONOSUPPORT;
4004 static s32 efx_ef10_filter_insert(struct efx_nic *efx,
4005 struct efx_filter_spec *spec,
4008 struct efx_ef10_filter_table *table = efx->filter_state;
4009 DECLARE_BITMAP(mc_rem_map, EFX_EF10_FILTER_SEARCH_LIMIT);
4010 struct efx_filter_spec *saved_spec;
4011 unsigned int match_pri, hash;
4012 unsigned int priv_flags;
4013 bool replacing = false;
4019 /* For now, only support RX filters */
4020 if ((spec->flags & (EFX_FILTER_FLAG_RX | EFX_FILTER_FLAG_TX)) !=
4024 rc = efx_ef10_filter_pri(table, spec);
4029 hash = efx_ef10_filter_hash(spec);
4030 is_mc_recip = efx_filter_is_mc_recipient(spec);
4032 bitmap_zero(mc_rem_map, EFX_EF10_FILTER_SEARCH_LIMIT);
4034 /* Find any existing filters with the same match tuple or
4035 * else a free slot to insert at. If any of them are busy,
4036 * we have to wait and retry.
4039 unsigned int depth = 1;
4042 spin_lock_bh(&efx->filter_lock);
4045 i = (hash + depth) & (HUNT_FILTER_TBL_ROWS - 1);
4046 saved_spec = efx_ef10_filter_entry_spec(table, i);
4051 } else if (efx_ef10_filter_equal(spec, saved_spec)) {
4052 if (table->entry[i].spec &
4053 EFX_EF10_FILTER_FLAG_BUSY)
4055 if (spec->priority < saved_spec->priority &&
4056 spec->priority != EFX_FILTER_PRI_AUTO) {
4061 /* This is the only one */
4062 if (spec->priority ==
4063 saved_spec->priority &&
4070 } else if (spec->priority >
4071 saved_spec->priority ||
4073 saved_spec->priority &&
4078 __set_bit(depth, mc_rem_map);
4082 /* Once we reach the maximum search depth, use
4083 * the first suitable slot or return -EBUSY if
4086 if (depth == EFX_EF10_FILTER_SEARCH_LIMIT) {
4087 if (ins_index < 0) {
4097 prepare_to_wait(&table->waitq, &wait, TASK_UNINTERRUPTIBLE);
4098 spin_unlock_bh(&efx->filter_lock);
4103 /* Create a software table entry if necessary, and mark it
4104 * busy. We might yet fail to insert, but any attempt to
4105 * insert a conflicting filter while we're waiting for the
4106 * firmware must find the busy entry.
4108 saved_spec = efx_ef10_filter_entry_spec(table, ins_index);
4110 if (spec->priority == EFX_FILTER_PRI_AUTO &&
4111 saved_spec->priority >= EFX_FILTER_PRI_AUTO) {
4112 /* Just make sure it won't be removed */
4113 if (saved_spec->priority > EFX_FILTER_PRI_AUTO)
4114 saved_spec->flags |= EFX_FILTER_FLAG_RX_OVER_AUTO;
4115 table->entry[ins_index].spec &=
4116 ~EFX_EF10_FILTER_FLAG_AUTO_OLD;
4121 priv_flags = efx_ef10_filter_entry_flags(table, ins_index);
4123 saved_spec = kmalloc(sizeof(*spec), GFP_ATOMIC);
4128 *saved_spec = *spec;
4131 efx_ef10_filter_set_entry(table, ins_index, saved_spec,
4132 priv_flags | EFX_EF10_FILTER_FLAG_BUSY);
4134 /* Mark lower-priority multicast recipients busy prior to removal */
4136 unsigned int depth, i;
4138 for (depth = 0; depth < EFX_EF10_FILTER_SEARCH_LIMIT; depth++) {
4139 i = (hash + depth) & (HUNT_FILTER_TBL_ROWS - 1);
4140 if (test_bit(depth, mc_rem_map))
4141 table->entry[i].spec |=
4142 EFX_EF10_FILTER_FLAG_BUSY;
4146 spin_unlock_bh(&efx->filter_lock);
4148 rc = efx_ef10_filter_push(efx, spec, &table->entry[ins_index].handle,
4151 /* Finalise the software table entry */
4152 spin_lock_bh(&efx->filter_lock);
4155 /* Update the fields that may differ */
4156 if (saved_spec->priority == EFX_FILTER_PRI_AUTO)
4157 saved_spec->flags |=
4158 EFX_FILTER_FLAG_RX_OVER_AUTO;
4159 saved_spec->priority = spec->priority;
4160 saved_spec->flags &= EFX_FILTER_FLAG_RX_OVER_AUTO;
4161 saved_spec->flags |= spec->flags;
4162 saved_spec->rss_context = spec->rss_context;
4163 saved_spec->dmaq_id = spec->dmaq_id;
4165 } else if (!replacing) {
4169 efx_ef10_filter_set_entry(table, ins_index, saved_spec, priv_flags);
4171 /* Remove and finalise entries for lower-priority multicast
4175 MCDI_DECLARE_BUF(inbuf, MC_CMD_FILTER_OP_EXT_IN_LEN);
4176 unsigned int depth, i;
4178 memset(inbuf, 0, sizeof(inbuf));
4180 for (depth = 0; depth < EFX_EF10_FILTER_SEARCH_LIMIT; depth++) {
4181 if (!test_bit(depth, mc_rem_map))
4184 i = (hash + depth) & (HUNT_FILTER_TBL_ROWS - 1);
4185 saved_spec = efx_ef10_filter_entry_spec(table, i);
4186 priv_flags = efx_ef10_filter_entry_flags(table, i);
4189 spin_unlock_bh(&efx->filter_lock);
4190 MCDI_SET_DWORD(inbuf, FILTER_OP_IN_OP,
4191 MC_CMD_FILTER_OP_IN_OP_UNSUBSCRIBE);
4192 MCDI_SET_QWORD(inbuf, FILTER_OP_IN_HANDLE,
4193 table->entry[i].handle);
4194 rc = efx_mcdi_rpc(efx, MC_CMD_FILTER_OP,
4195 inbuf, sizeof(inbuf),
4197 spin_lock_bh(&efx->filter_lock);
4205 priv_flags &= ~EFX_EF10_FILTER_FLAG_BUSY;
4207 efx_ef10_filter_set_entry(table, i, saved_spec,
4212 /* If successful, return the inserted filter ID */
4214 rc = efx_ef10_make_filter_id(match_pri, ins_index);
4216 wake_up_all(&table->waitq);
4218 spin_unlock_bh(&efx->filter_lock);
4219 finish_wait(&table->waitq, &wait);
4223 static void efx_ef10_filter_update_rx_scatter(struct efx_nic *efx)
4225 /* no need to do anything here on EF10 */
4229 * If !by_index, remove by ID
4230 * If by_index, remove by index
4231 * Filter ID may come from userland and must be range-checked.
4233 static int efx_ef10_filter_remove_internal(struct efx_nic *efx,
4234 unsigned int priority_mask,
4235 u32 filter_id, bool by_index)
4237 unsigned int filter_idx = efx_ef10_filter_get_unsafe_id(filter_id);
4238 struct efx_ef10_filter_table *table = efx->filter_state;
4239 MCDI_DECLARE_BUF(inbuf,
4240 MC_CMD_FILTER_OP_IN_HANDLE_OFST +
4241 MC_CMD_FILTER_OP_IN_HANDLE_LEN);
4242 struct efx_filter_spec *spec;
4246 /* Find the software table entry and mark it busy. Don't
4247 * remove it yet; any attempt to update while we're waiting
4248 * for the firmware must find the busy entry.
4251 spin_lock_bh(&efx->filter_lock);
4252 if (!(table->entry[filter_idx].spec &
4253 EFX_EF10_FILTER_FLAG_BUSY))
4255 prepare_to_wait(&table->waitq, &wait, TASK_UNINTERRUPTIBLE);
4256 spin_unlock_bh(&efx->filter_lock);
4260 spec = efx_ef10_filter_entry_spec(table, filter_idx);
4263 efx_ef10_filter_pri(table, spec) !=
4264 efx_ef10_filter_get_unsafe_pri(filter_id))) {
4269 if (spec->flags & EFX_FILTER_FLAG_RX_OVER_AUTO &&
4270 priority_mask == (1U << EFX_FILTER_PRI_AUTO)) {
4271 /* Just remove flags */
4272 spec->flags &= ~EFX_FILTER_FLAG_RX_OVER_AUTO;
4273 table->entry[filter_idx].spec &= ~EFX_EF10_FILTER_FLAG_AUTO_OLD;
4278 if (!(priority_mask & (1U << spec->priority))) {
4283 table->entry[filter_idx].spec |= EFX_EF10_FILTER_FLAG_BUSY;
4284 spin_unlock_bh(&efx->filter_lock);
4286 if (spec->flags & EFX_FILTER_FLAG_RX_OVER_AUTO) {
4287 /* Reset to an automatic filter */
4289 struct efx_filter_spec new_spec = *spec;
4291 new_spec.priority = EFX_FILTER_PRI_AUTO;
4292 new_spec.flags = (EFX_FILTER_FLAG_RX |
4293 (efx_rss_enabled(efx) ?
4294 EFX_FILTER_FLAG_RX_RSS : 0));
4295 new_spec.dmaq_id = 0;
4296 new_spec.rss_context = EFX_FILTER_RSS_CONTEXT_DEFAULT;
4297 rc = efx_ef10_filter_push(efx, &new_spec,
4298 &table->entry[filter_idx].handle,
4301 spin_lock_bh(&efx->filter_lock);
4305 /* Really remove the filter */
4307 MCDI_SET_DWORD(inbuf, FILTER_OP_IN_OP,
4308 efx_ef10_filter_is_exclusive(spec) ?
4309 MC_CMD_FILTER_OP_IN_OP_REMOVE :
4310 MC_CMD_FILTER_OP_IN_OP_UNSUBSCRIBE);
4311 MCDI_SET_QWORD(inbuf, FILTER_OP_IN_HANDLE,
4312 table->entry[filter_idx].handle);
4313 rc = efx_mcdi_rpc_quiet(efx, MC_CMD_FILTER_OP,
4314 inbuf, sizeof(inbuf), NULL, 0, NULL);
4316 spin_lock_bh(&efx->filter_lock);
4317 if ((rc == 0) || (rc == -ENOENT)) {
4318 /* Filter removed OK or didn't actually exist */
4320 efx_ef10_filter_set_entry(table, filter_idx, NULL, 0);
4322 efx_mcdi_display_error(efx, MC_CMD_FILTER_OP,
4323 MC_CMD_FILTER_OP_EXT_IN_LEN,
4328 table->entry[filter_idx].spec &= ~EFX_EF10_FILTER_FLAG_BUSY;
4329 wake_up_all(&table->waitq);
4331 spin_unlock_bh(&efx->filter_lock);
4332 finish_wait(&table->waitq, &wait);
4336 static int efx_ef10_filter_remove_safe(struct efx_nic *efx,
4337 enum efx_filter_priority priority,
4340 return efx_ef10_filter_remove_internal(efx, 1U << priority,
4344 static void efx_ef10_filter_remove_unsafe(struct efx_nic *efx,
4345 enum efx_filter_priority priority,
4348 if (filter_id == EFX_EF10_FILTER_ID_INVALID)
4350 efx_ef10_filter_remove_internal(efx, 1U << priority, filter_id, true);
4353 static int efx_ef10_filter_get_safe(struct efx_nic *efx,
4354 enum efx_filter_priority priority,
4355 u32 filter_id, struct efx_filter_spec *spec)
4357 unsigned int filter_idx = efx_ef10_filter_get_unsafe_id(filter_id);
4358 struct efx_ef10_filter_table *table = efx->filter_state;
4359 const struct efx_filter_spec *saved_spec;
4362 spin_lock_bh(&efx->filter_lock);
4363 saved_spec = efx_ef10_filter_entry_spec(table, filter_idx);
4364 if (saved_spec && saved_spec->priority == priority &&
4365 efx_ef10_filter_pri(table, saved_spec) ==
4366 efx_ef10_filter_get_unsafe_pri(filter_id)) {
4367 *spec = *saved_spec;
4372 spin_unlock_bh(&efx->filter_lock);
4376 static int efx_ef10_filter_clear_rx(struct efx_nic *efx,
4377 enum efx_filter_priority priority)
4379 unsigned int priority_mask;
4383 priority_mask = (((1U << (priority + 1)) - 1) &
4384 ~(1U << EFX_FILTER_PRI_AUTO));
4386 for (i = 0; i < HUNT_FILTER_TBL_ROWS; i++) {
4387 rc = efx_ef10_filter_remove_internal(efx, priority_mask,
4389 if (rc && rc != -ENOENT)
4396 static u32 efx_ef10_filter_count_rx_used(struct efx_nic *efx,
4397 enum efx_filter_priority priority)
4399 struct efx_ef10_filter_table *table = efx->filter_state;
4400 unsigned int filter_idx;
4403 spin_lock_bh(&efx->filter_lock);
4404 for (filter_idx = 0; filter_idx < HUNT_FILTER_TBL_ROWS; filter_idx++) {
4405 if (table->entry[filter_idx].spec &&
4406 efx_ef10_filter_entry_spec(table, filter_idx)->priority ==
4410 spin_unlock_bh(&efx->filter_lock);
4414 static u32 efx_ef10_filter_get_rx_id_limit(struct efx_nic *efx)
4416 struct efx_ef10_filter_table *table = efx->filter_state;
4418 return table->rx_match_count * HUNT_FILTER_TBL_ROWS * 2;
4421 static s32 efx_ef10_filter_get_rx_ids(struct efx_nic *efx,
4422 enum efx_filter_priority priority,
4425 struct efx_ef10_filter_table *table = efx->filter_state;
4426 struct efx_filter_spec *spec;
4427 unsigned int filter_idx;
4430 spin_lock_bh(&efx->filter_lock);
4431 for (filter_idx = 0; filter_idx < HUNT_FILTER_TBL_ROWS; filter_idx++) {
4432 spec = efx_ef10_filter_entry_spec(table, filter_idx);
4433 if (spec && spec->priority == priority) {
4434 if (count == size) {
4439 efx_ef10_make_filter_id(
4440 efx_ef10_filter_pri(table, spec),
4444 spin_unlock_bh(&efx->filter_lock);
4448 #ifdef CONFIG_RFS_ACCEL
4450 static efx_mcdi_async_completer efx_ef10_filter_rfs_insert_complete;
4452 static s32 efx_ef10_filter_rfs_insert(struct efx_nic *efx,
4453 struct efx_filter_spec *spec)
4455 struct efx_ef10_filter_table *table = efx->filter_state;
4456 MCDI_DECLARE_BUF(inbuf, MC_CMD_FILTER_OP_EXT_IN_LEN);
4457 struct efx_filter_spec *saved_spec;
4458 unsigned int hash, i, depth = 1;
4459 bool replacing = false;
4464 /* Must be an RX filter without RSS and not for a multicast
4465 * destination address (RFS only works for connected sockets).
4466 * These restrictions allow us to pass only a tiny amount of
4467 * data through to the completion function.
4469 EFX_WARN_ON_PARANOID(spec->flags !=
4470 (EFX_FILTER_FLAG_RX | EFX_FILTER_FLAG_RX_SCATTER));
4471 EFX_WARN_ON_PARANOID(spec->priority != EFX_FILTER_PRI_HINT);
4472 EFX_WARN_ON_PARANOID(efx_filter_is_mc_recipient(spec));
4474 hash = efx_ef10_filter_hash(spec);
4476 spin_lock_bh(&efx->filter_lock);
4478 /* Find any existing filter with the same match tuple or else
4479 * a free slot to insert at. If an existing filter is busy,
4480 * we have to give up.
4483 i = (hash + depth) & (HUNT_FILTER_TBL_ROWS - 1);
4484 saved_spec = efx_ef10_filter_entry_spec(table, i);
4489 } else if (efx_ef10_filter_equal(spec, saved_spec)) {
4490 if (table->entry[i].spec & EFX_EF10_FILTER_FLAG_BUSY) {
4494 if (spec->priority < saved_spec->priority) {
4502 /* Once we reach the maximum search depth, use the
4503 * first suitable slot or return -EBUSY if there was
4506 if (depth == EFX_EF10_FILTER_SEARCH_LIMIT) {
4507 if (ins_index < 0) {
4517 /* Create a software table entry if necessary, and mark it
4518 * busy. We might yet fail to insert, but any attempt to
4519 * insert a conflicting filter while we're waiting for the
4520 * firmware must find the busy entry.
4522 saved_spec = efx_ef10_filter_entry_spec(table, ins_index);
4526 saved_spec = kmalloc(sizeof(*spec), GFP_ATOMIC);
4531 *saved_spec = *spec;
4533 efx_ef10_filter_set_entry(table, ins_index, saved_spec,
4534 EFX_EF10_FILTER_FLAG_BUSY);
4536 spin_unlock_bh(&efx->filter_lock);
4538 /* Pack up the variables needed on completion */
4539 cookie = replacing << 31 | ins_index << 16 | spec->dmaq_id;
4541 efx_ef10_filter_push_prep(efx, spec, inbuf,
4542 table->entry[ins_index].handle, replacing);
4543 efx_mcdi_rpc_async(efx, MC_CMD_FILTER_OP, inbuf, sizeof(inbuf),
4544 MC_CMD_FILTER_OP_OUT_LEN,
4545 efx_ef10_filter_rfs_insert_complete, cookie);
4550 spin_unlock_bh(&efx->filter_lock);
4555 efx_ef10_filter_rfs_insert_complete(struct efx_nic *efx, unsigned long cookie,
4556 int rc, efx_dword_t *outbuf,
4557 size_t outlen_actual)
4559 struct efx_ef10_filter_table *table = efx->filter_state;
4560 unsigned int ins_index, dmaq_id;
4561 struct efx_filter_spec *spec;
4564 /* Unpack the cookie */
4565 replacing = cookie >> 31;
4566 ins_index = (cookie >> 16) & (HUNT_FILTER_TBL_ROWS - 1);
4567 dmaq_id = cookie & 0xffff;
4569 spin_lock_bh(&efx->filter_lock);
4570 spec = efx_ef10_filter_entry_spec(table, ins_index);
4572 table->entry[ins_index].handle =
4573 MCDI_QWORD(outbuf, FILTER_OP_OUT_HANDLE);
4575 spec->dmaq_id = dmaq_id;
4576 } else if (!replacing) {
4580 efx_ef10_filter_set_entry(table, ins_index, spec, 0);
4581 spin_unlock_bh(&efx->filter_lock);
4583 wake_up_all(&table->waitq);
4587 efx_ef10_filter_rfs_expire_complete(struct efx_nic *efx,
4588 unsigned long filter_idx,
4589 int rc, efx_dword_t *outbuf,
4590 size_t outlen_actual);
4592 static bool efx_ef10_filter_rfs_expire_one(struct efx_nic *efx, u32 flow_id,
4593 unsigned int filter_idx)
4595 struct efx_ef10_filter_table *table = efx->filter_state;
4596 struct efx_filter_spec *spec =
4597 efx_ef10_filter_entry_spec(table, filter_idx);
4598 MCDI_DECLARE_BUF(inbuf,
4599 MC_CMD_FILTER_OP_IN_HANDLE_OFST +
4600 MC_CMD_FILTER_OP_IN_HANDLE_LEN);
4603 (table->entry[filter_idx].spec & EFX_EF10_FILTER_FLAG_BUSY) ||
4604 spec->priority != EFX_FILTER_PRI_HINT ||
4605 !rps_may_expire_flow(efx->net_dev, spec->dmaq_id,
4606 flow_id, filter_idx))
4609 MCDI_SET_DWORD(inbuf, FILTER_OP_IN_OP,
4610 MC_CMD_FILTER_OP_IN_OP_REMOVE);
4611 MCDI_SET_QWORD(inbuf, FILTER_OP_IN_HANDLE,
4612 table->entry[filter_idx].handle);
4613 if (efx_mcdi_rpc_async(efx, MC_CMD_FILTER_OP, inbuf, sizeof(inbuf), 0,
4614 efx_ef10_filter_rfs_expire_complete, filter_idx))
4617 table->entry[filter_idx].spec |= EFX_EF10_FILTER_FLAG_BUSY;
4622 efx_ef10_filter_rfs_expire_complete(struct efx_nic *efx,
4623 unsigned long filter_idx,
4624 int rc, efx_dword_t *outbuf,
4625 size_t outlen_actual)
4627 struct efx_ef10_filter_table *table = efx->filter_state;
4628 struct efx_filter_spec *spec =
4629 efx_ef10_filter_entry_spec(table, filter_idx);
4631 spin_lock_bh(&efx->filter_lock);
4634 efx_ef10_filter_set_entry(table, filter_idx, NULL, 0);
4636 table->entry[filter_idx].spec &= ~EFX_EF10_FILTER_FLAG_BUSY;
4637 wake_up_all(&table->waitq);
4638 spin_unlock_bh(&efx->filter_lock);
4641 #endif /* CONFIG_RFS_ACCEL */
4643 static int efx_ef10_filter_match_flags_from_mcdi(bool encap, u32 mcdi_flags)
4645 int match_flags = 0;
4647 #define MAP_FLAG(gen_flag, mcdi_field) do { \
4648 u32 old_mcdi_flags = mcdi_flags; \
4649 mcdi_flags &= ~(1 << MC_CMD_FILTER_OP_EXT_IN_MATCH_ ## \
4650 mcdi_field ## _LBN); \
4651 if (mcdi_flags != old_mcdi_flags) \
4652 match_flags |= EFX_FILTER_MATCH_ ## gen_flag; \
4656 /* encap filters must specify encap type */
4657 match_flags |= EFX_FILTER_MATCH_ENCAP_TYPE;
4658 /* and imply ethertype and ip proto */
4660 ~(1 << MC_CMD_FILTER_OP_EXT_IN_MATCH_IP_PROTO_LBN);
4662 ~(1 << MC_CMD_FILTER_OP_EXT_IN_MATCH_ETHER_TYPE_LBN);
4663 /* VLAN tags refer to the outer packet */
4664 MAP_FLAG(INNER_VID, INNER_VLAN);
4665 MAP_FLAG(OUTER_VID, OUTER_VLAN);
4666 /* everything else refers to the inner packet */
4667 MAP_FLAG(LOC_MAC_IG, IFRM_UNKNOWN_UCAST_DST);
4668 MAP_FLAG(LOC_MAC_IG, IFRM_UNKNOWN_MCAST_DST);
4669 MAP_FLAG(REM_HOST, IFRM_SRC_IP);
4670 MAP_FLAG(LOC_HOST, IFRM_DST_IP);
4671 MAP_FLAG(REM_MAC, IFRM_SRC_MAC);
4672 MAP_FLAG(REM_PORT, IFRM_SRC_PORT);
4673 MAP_FLAG(LOC_MAC, IFRM_DST_MAC);
4674 MAP_FLAG(LOC_PORT, IFRM_DST_PORT);
4675 MAP_FLAG(ETHER_TYPE, IFRM_ETHER_TYPE);
4676 MAP_FLAG(IP_PROTO, IFRM_IP_PROTO);
4678 MAP_FLAG(LOC_MAC_IG, UNKNOWN_UCAST_DST);
4679 MAP_FLAG(LOC_MAC_IG, UNKNOWN_MCAST_DST);
4680 MAP_FLAG(REM_HOST, SRC_IP);
4681 MAP_FLAG(LOC_HOST, DST_IP);
4682 MAP_FLAG(REM_MAC, SRC_MAC);
4683 MAP_FLAG(REM_PORT, SRC_PORT);
4684 MAP_FLAG(LOC_MAC, DST_MAC);
4685 MAP_FLAG(LOC_PORT, DST_PORT);
4686 MAP_FLAG(ETHER_TYPE, ETHER_TYPE);
4687 MAP_FLAG(INNER_VID, INNER_VLAN);
4688 MAP_FLAG(OUTER_VID, OUTER_VLAN);
4689 MAP_FLAG(IP_PROTO, IP_PROTO);
4693 /* Did we map them all? */
4700 static void efx_ef10_filter_cleanup_vlans(struct efx_nic *efx)
4702 struct efx_ef10_filter_table *table = efx->filter_state;
4703 struct efx_ef10_filter_vlan *vlan, *next_vlan;
4705 /* See comment in efx_ef10_filter_table_remove() */
4706 if (!efx_rwsem_assert_write_locked(&efx->filter_sem))
4712 list_for_each_entry_safe(vlan, next_vlan, &table->vlan_list, list)
4713 efx_ef10_filter_del_vlan_internal(efx, vlan);
4716 static bool efx_ef10_filter_match_supported(struct efx_ef10_filter_table *table,
4718 enum efx_filter_match_flags match_flags)
4720 unsigned int match_pri;
4724 match_pri < table->rx_match_count;
4726 mf = efx_ef10_filter_match_flags_from_mcdi(encap,
4727 table->rx_match_mcdi_flags[match_pri]);
4728 if (mf == match_flags)
4736 efx_ef10_filter_table_probe_matches(struct efx_nic *efx,
4737 struct efx_ef10_filter_table *table,
4740 MCDI_DECLARE_BUF(inbuf, MC_CMD_GET_PARSER_DISP_INFO_IN_LEN);
4741 MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_PARSER_DISP_INFO_OUT_LENMAX);
4742 unsigned int pd_match_pri, pd_match_count;
4746 /* Find out which RX filter types are supported, and their priorities */
4747 MCDI_SET_DWORD(inbuf, GET_PARSER_DISP_INFO_IN_OP,
4749 MC_CMD_GET_PARSER_DISP_INFO_IN_OP_GET_SUPPORTED_ENCAP_RX_MATCHES :
4750 MC_CMD_GET_PARSER_DISP_INFO_IN_OP_GET_SUPPORTED_RX_MATCHES);
4751 rc = efx_mcdi_rpc(efx, MC_CMD_GET_PARSER_DISP_INFO,
4752 inbuf, sizeof(inbuf), outbuf, sizeof(outbuf),
4757 pd_match_count = MCDI_VAR_ARRAY_LEN(
4758 outlen, GET_PARSER_DISP_INFO_OUT_SUPPORTED_MATCHES);
4760 for (pd_match_pri = 0; pd_match_pri < pd_match_count; pd_match_pri++) {
4764 GET_PARSER_DISP_INFO_OUT_SUPPORTED_MATCHES,
4766 rc = efx_ef10_filter_match_flags_from_mcdi(encap, mcdi_flags);
4768 netif_dbg(efx, probe, efx->net_dev,
4769 "%s: fw flags %#x pri %u not supported in driver\n",
4770 __func__, mcdi_flags, pd_match_pri);
4772 netif_dbg(efx, probe, efx->net_dev,
4773 "%s: fw flags %#x pri %u supported as driver flags %#x pri %u\n",
4774 __func__, mcdi_flags, pd_match_pri,
4775 rc, table->rx_match_count);
4776 table->rx_match_mcdi_flags[table->rx_match_count] = mcdi_flags;
4777 table->rx_match_count++;
4784 static int efx_ef10_filter_table_probe(struct efx_nic *efx)
4786 struct efx_ef10_nic_data *nic_data = efx->nic_data;
4787 struct net_device *net_dev = efx->net_dev;
4788 struct efx_ef10_filter_table *table;
4789 struct efx_ef10_vlan *vlan;
4792 if (!efx_rwsem_assert_write_locked(&efx->filter_sem))
4795 if (efx->filter_state) /* already probed */
4798 table = kzalloc(sizeof(*table), GFP_KERNEL);
4802 table->rx_match_count = 0;
4803 rc = efx_ef10_filter_table_probe_matches(efx, table, false);
4806 if (nic_data->datapath_caps &
4807 (1 << MC_CMD_GET_CAPABILITIES_OUT_VXLAN_NVGRE_LBN))
4808 rc = efx_ef10_filter_table_probe_matches(efx, table, true);
4811 if ((efx_supported_features(efx) & NETIF_F_HW_VLAN_CTAG_FILTER) &&
4812 !(efx_ef10_filter_match_supported(table, false,
4813 (EFX_FILTER_MATCH_OUTER_VID | EFX_FILTER_MATCH_LOC_MAC)) &&
4814 efx_ef10_filter_match_supported(table, false,
4815 (EFX_FILTER_MATCH_OUTER_VID | EFX_FILTER_MATCH_LOC_MAC_IG)))) {
4816 netif_info(efx, probe, net_dev,
4817 "VLAN filters are not supported in this firmware variant\n");
4818 net_dev->features &= ~NETIF_F_HW_VLAN_CTAG_FILTER;
4819 efx->fixed_features &= ~NETIF_F_HW_VLAN_CTAG_FILTER;
4820 net_dev->hw_features &= ~NETIF_F_HW_VLAN_CTAG_FILTER;
4823 table->entry = vzalloc(HUNT_FILTER_TBL_ROWS * sizeof(*table->entry));
4824 if (!table->entry) {
4829 table->mc_promisc_last = false;
4830 table->vlan_filter =
4831 !!(efx->net_dev->features & NETIF_F_HW_VLAN_CTAG_FILTER);
4832 INIT_LIST_HEAD(&table->vlan_list);
4834 efx->filter_state = table;
4835 init_waitqueue_head(&table->waitq);
4837 list_for_each_entry(vlan, &nic_data->vlan_list, list) {
4838 rc = efx_ef10_filter_add_vlan(efx, vlan->vid);
4846 efx_ef10_filter_cleanup_vlans(efx);
4847 efx->filter_state = NULL;
4853 /* Caller must hold efx->filter_sem for read if race against
4854 * efx_ef10_filter_table_remove() is possible
4856 static void efx_ef10_filter_table_restore(struct efx_nic *efx)
4858 struct efx_ef10_filter_table *table = efx->filter_state;
4859 struct efx_ef10_nic_data *nic_data = efx->nic_data;
4860 unsigned int invalid_filters = 0, failed = 0;
4861 struct efx_ef10_filter_vlan *vlan;
4862 struct efx_filter_spec *spec;
4863 unsigned int filter_idx;
4868 WARN_ON(!rwsem_is_locked(&efx->filter_sem));
4870 if (!nic_data->must_restore_filters)
4876 spin_lock_bh(&efx->filter_lock);
4878 for (filter_idx = 0; filter_idx < HUNT_FILTER_TBL_ROWS; filter_idx++) {
4879 spec = efx_ef10_filter_entry_spec(table, filter_idx);
4883 mcdi_flags = efx_ef10_filter_mcdi_flags_from_spec(spec);
4885 while (match_pri < table->rx_match_count &&
4886 table->rx_match_mcdi_flags[match_pri] != mcdi_flags)
4888 if (match_pri >= table->rx_match_count) {
4892 if (spec->rss_context != EFX_FILTER_RSS_CONTEXT_DEFAULT &&
4893 spec->rss_context != nic_data->rx_rss_context)
4894 netif_warn(efx, drv, efx->net_dev,
4895 "Warning: unable to restore a filter with specific RSS context.\n");
4897 table->entry[filter_idx].spec |= EFX_EF10_FILTER_FLAG_BUSY;
4898 spin_unlock_bh(&efx->filter_lock);
4900 rc = efx_ef10_filter_push(efx, spec,
4901 &table->entry[filter_idx].handle,
4905 spin_lock_bh(&efx->filter_lock);
4909 list_for_each_entry(vlan, &table->vlan_list, list)
4910 for (i = 0; i < EFX_EF10_NUM_DEFAULT_FILTERS; ++i)
4911 if (vlan->default_filters[i] == filter_idx)
4912 vlan->default_filters[i] =
4913 EFX_EF10_FILTER_ID_INVALID;
4916 efx_ef10_filter_set_entry(table, filter_idx, NULL, 0);
4918 table->entry[filter_idx].spec &=
4919 ~EFX_EF10_FILTER_FLAG_BUSY;
4923 spin_unlock_bh(&efx->filter_lock);
4925 /* This can happen validly if the MC's capabilities have changed, so
4928 if (invalid_filters)
4929 netif_dbg(efx, drv, efx->net_dev,
4930 "Did not restore %u filters that are now unsupported.\n",
4934 netif_err(efx, hw, efx->net_dev,
4935 "unable to restore %u filters\n", failed);
4937 nic_data->must_restore_filters = false;
4940 static void efx_ef10_filter_table_remove(struct efx_nic *efx)
4942 struct efx_ef10_filter_table *table = efx->filter_state;
4943 MCDI_DECLARE_BUF(inbuf, MC_CMD_FILTER_OP_EXT_IN_LEN);
4944 struct efx_filter_spec *spec;
4945 unsigned int filter_idx;
4948 efx_ef10_filter_cleanup_vlans(efx);
4949 efx->filter_state = NULL;
4950 /* If we were called without locking, then it's not safe to free
4951 * the table as others might be using it. So we just WARN, leak
4952 * the memory, and potentially get an inconsistent filter table
4954 * This should never actually happen.
4956 if (!efx_rwsem_assert_write_locked(&efx->filter_sem))
4962 for (filter_idx = 0; filter_idx < HUNT_FILTER_TBL_ROWS; filter_idx++) {
4963 spec = efx_ef10_filter_entry_spec(table, filter_idx);
4967 MCDI_SET_DWORD(inbuf, FILTER_OP_IN_OP,
4968 efx_ef10_filter_is_exclusive(spec) ?
4969 MC_CMD_FILTER_OP_IN_OP_REMOVE :
4970 MC_CMD_FILTER_OP_IN_OP_UNSUBSCRIBE);
4971 MCDI_SET_QWORD(inbuf, FILTER_OP_IN_HANDLE,
4972 table->entry[filter_idx].handle);
4973 rc = efx_mcdi_rpc_quiet(efx, MC_CMD_FILTER_OP, inbuf,
4974 sizeof(inbuf), NULL, 0, NULL);
4976 netif_info(efx, drv, efx->net_dev,
4977 "%s: filter %04x remove failed\n",
4978 __func__, filter_idx);
4982 vfree(table->entry);
4986 static void efx_ef10_filter_mark_one_old(struct efx_nic *efx, uint16_t *id)
4988 struct efx_ef10_filter_table *table = efx->filter_state;
4989 unsigned int filter_idx;
4991 if (*id != EFX_EF10_FILTER_ID_INVALID) {
4992 filter_idx = efx_ef10_filter_get_unsafe_id(*id);
4993 if (!table->entry[filter_idx].spec)
4994 netif_dbg(efx, drv, efx->net_dev,
4995 "marked null spec old %04x:%04x\n", *id,
4997 table->entry[filter_idx].spec |= EFX_EF10_FILTER_FLAG_AUTO_OLD;
4998 *id = EFX_EF10_FILTER_ID_INVALID;
5002 /* Mark old per-VLAN filters that may need to be removed */
5003 static void _efx_ef10_filter_vlan_mark_old(struct efx_nic *efx,
5004 struct efx_ef10_filter_vlan *vlan)
5006 struct efx_ef10_filter_table *table = efx->filter_state;
5009 for (i = 0; i < table->dev_uc_count; i++)
5010 efx_ef10_filter_mark_one_old(efx, &vlan->uc[i]);
5011 for (i = 0; i < table->dev_mc_count; i++)
5012 efx_ef10_filter_mark_one_old(efx, &vlan->mc[i]);
5013 for (i = 0; i < EFX_EF10_NUM_DEFAULT_FILTERS; i++)
5014 efx_ef10_filter_mark_one_old(efx, &vlan->default_filters[i]);
5017 /* Mark old filters that may need to be removed.
5018 * Caller must hold efx->filter_sem for read if race against
5019 * efx_ef10_filter_table_remove() is possible
5021 static void efx_ef10_filter_mark_old(struct efx_nic *efx)
5023 struct efx_ef10_filter_table *table = efx->filter_state;
5024 struct efx_ef10_filter_vlan *vlan;
5026 spin_lock_bh(&efx->filter_lock);
5027 list_for_each_entry(vlan, &table->vlan_list, list)
5028 _efx_ef10_filter_vlan_mark_old(efx, vlan);
5029 spin_unlock_bh(&efx->filter_lock);
5032 static void efx_ef10_filter_uc_addr_list(struct efx_nic *efx)
5034 struct efx_ef10_filter_table *table = efx->filter_state;
5035 struct net_device *net_dev = efx->net_dev;
5036 struct netdev_hw_addr *uc;
5039 table->uc_promisc = !!(net_dev->flags & IFF_PROMISC);
5040 ether_addr_copy(table->dev_uc_list[0].addr, net_dev->dev_addr);
5042 netdev_for_each_uc_addr(uc, net_dev) {
5043 if (i >= EFX_EF10_FILTER_DEV_UC_MAX) {
5044 table->uc_promisc = true;
5047 ether_addr_copy(table->dev_uc_list[i].addr, uc->addr);
5051 table->dev_uc_count = i;
5054 static void efx_ef10_filter_mc_addr_list(struct efx_nic *efx)
5056 struct efx_ef10_filter_table *table = efx->filter_state;
5057 struct net_device *net_dev = efx->net_dev;
5058 struct netdev_hw_addr *mc;
5061 table->mc_overflow = false;
5062 table->mc_promisc = !!(net_dev->flags & (IFF_PROMISC | IFF_ALLMULTI));
5065 netdev_for_each_mc_addr(mc, net_dev) {
5066 if (i >= EFX_EF10_FILTER_DEV_MC_MAX) {
5067 table->mc_promisc = true;
5068 table->mc_overflow = true;
5071 ether_addr_copy(table->dev_mc_list[i].addr, mc->addr);
5075 table->dev_mc_count = i;
5078 static int efx_ef10_filter_insert_addr_list(struct efx_nic *efx,
5079 struct efx_ef10_filter_vlan *vlan,
5080 bool multicast, bool rollback)
5082 struct efx_ef10_filter_table *table = efx->filter_state;
5083 struct efx_ef10_dev_addr *addr_list;
5084 enum efx_filter_flags filter_flags;
5085 struct efx_filter_spec spec;
5093 addr_list = table->dev_mc_list;
5094 addr_count = table->dev_mc_count;
5097 addr_list = table->dev_uc_list;
5098 addr_count = table->dev_uc_count;
5102 filter_flags = efx_rss_enabled(efx) ? EFX_FILTER_FLAG_RX_RSS : 0;
5104 /* Insert/renew filters */
5105 for (i = 0; i < addr_count; i++) {
5106 EFX_WARN_ON_PARANOID(ids[i] != EFX_EF10_FILTER_ID_INVALID);
5107 efx_filter_init_rx(&spec, EFX_FILTER_PRI_AUTO, filter_flags, 0);
5108 efx_filter_set_eth_local(&spec, vlan->vid, addr_list[i].addr);
5109 rc = efx_ef10_filter_insert(efx, &spec, true);
5112 netif_info(efx, drv, efx->net_dev,
5113 "efx_ef10_filter_insert failed rc=%d\n",
5115 /* Fall back to promiscuous */
5116 for (j = 0; j < i; j++) {
5117 efx_ef10_filter_remove_unsafe(
5118 efx, EFX_FILTER_PRI_AUTO,
5120 ids[j] = EFX_EF10_FILTER_ID_INVALID;
5124 /* keep invalid ID, and carry on */
5127 ids[i] = efx_ef10_filter_get_unsafe_id(rc);
5131 if (multicast && rollback) {
5132 /* Also need an Ethernet broadcast filter */
5133 EFX_WARN_ON_PARANOID(vlan->default_filters[EFX_EF10_BCAST] !=
5134 EFX_EF10_FILTER_ID_INVALID);
5135 efx_filter_init_rx(&spec, EFX_FILTER_PRI_AUTO, filter_flags, 0);
5136 eth_broadcast_addr(baddr);
5137 efx_filter_set_eth_local(&spec, vlan->vid, baddr);
5138 rc = efx_ef10_filter_insert(efx, &spec, true);
5140 netif_warn(efx, drv, efx->net_dev,
5141 "Broadcast filter insert failed rc=%d\n", rc);
5142 /* Fall back to promiscuous */
5143 for (j = 0; j < i; j++) {
5144 efx_ef10_filter_remove_unsafe(
5145 efx, EFX_FILTER_PRI_AUTO,
5147 ids[j] = EFX_EF10_FILTER_ID_INVALID;
5151 vlan->default_filters[EFX_EF10_BCAST] =
5152 efx_ef10_filter_get_unsafe_id(rc);
5159 static int efx_ef10_filter_insert_def(struct efx_nic *efx,
5160 struct efx_ef10_filter_vlan *vlan,
5161 enum efx_encap_type encap_type,
5162 bool multicast, bool rollback)
5164 struct efx_ef10_nic_data *nic_data = efx->nic_data;
5165 enum efx_filter_flags filter_flags;
5166 struct efx_filter_spec spec;
5171 filter_flags = efx_rss_enabled(efx) ? EFX_FILTER_FLAG_RX_RSS : 0;
5173 efx_filter_init_rx(&spec, EFX_FILTER_PRI_AUTO, filter_flags, 0);
5176 efx_filter_set_mc_def(&spec);
5178 efx_filter_set_uc_def(&spec);
5181 if (nic_data->datapath_caps &
5182 (1 << MC_CMD_GET_CAPABILITIES_OUT_VXLAN_NVGRE_LBN))
5183 efx_filter_set_encap_type(&spec, encap_type);
5185 /* don't insert encap filters on non-supporting
5186 * platforms. ID will be left as INVALID.
5191 if (vlan->vid != EFX_FILTER_VID_UNSPEC)
5192 efx_filter_set_eth_local(&spec, vlan->vid, NULL);
5194 rc = efx_ef10_filter_insert(efx, &spec, true);
5196 const char *um = multicast ? "Multicast" : "Unicast";
5197 const char *encap_name = "";
5198 const char *encap_ipv = "";
5200 if ((encap_type & EFX_ENCAP_TYPES_MASK) ==
5201 EFX_ENCAP_TYPE_VXLAN)
5202 encap_name = "VXLAN ";
5203 else if ((encap_type & EFX_ENCAP_TYPES_MASK) ==
5204 EFX_ENCAP_TYPE_NVGRE)
5205 encap_name = "NVGRE ";
5206 else if ((encap_type & EFX_ENCAP_TYPES_MASK) ==
5207 EFX_ENCAP_TYPE_GENEVE)
5208 encap_name = "GENEVE ";
5209 if (encap_type & EFX_ENCAP_FLAG_IPV6)
5210 encap_ipv = "IPv6 ";
5211 else if (encap_type)
5212 encap_ipv = "IPv4 ";
5214 /* unprivileged functions can't insert mismatch filters
5215 * for encapsulated or unicast traffic, so downgrade
5216 * those warnings to debug.
5218 netif_cond_dbg(efx, drv, efx->net_dev,
5219 rc == -EPERM && (encap_type || !multicast), warn,
5220 "%s%s%s mismatch filter insert failed rc=%d\n",
5221 encap_name, encap_ipv, um, rc);
5222 } else if (multicast) {
5223 /* mapping from encap types to default filter IDs (multicast) */
5224 static enum efx_ef10_default_filters map[] = {
5225 [EFX_ENCAP_TYPE_NONE] = EFX_EF10_MCDEF,
5226 [EFX_ENCAP_TYPE_VXLAN] = EFX_EF10_VXLAN4_MCDEF,
5227 [EFX_ENCAP_TYPE_NVGRE] = EFX_EF10_NVGRE4_MCDEF,
5228 [EFX_ENCAP_TYPE_GENEVE] = EFX_EF10_GENEVE4_MCDEF,
5229 [EFX_ENCAP_TYPE_VXLAN | EFX_ENCAP_FLAG_IPV6] =
5230 EFX_EF10_VXLAN6_MCDEF,
5231 [EFX_ENCAP_TYPE_NVGRE | EFX_ENCAP_FLAG_IPV6] =
5232 EFX_EF10_NVGRE6_MCDEF,
5233 [EFX_ENCAP_TYPE_GENEVE | EFX_ENCAP_FLAG_IPV6] =
5234 EFX_EF10_GENEVE6_MCDEF,
5237 /* quick bounds check (BCAST result impossible) */
5238 BUILD_BUG_ON(EFX_EF10_BCAST != 0);
5239 if (encap_type >= ARRAY_SIZE(map) || map[encap_type] == 0) {
5243 /* then follow map */
5244 id = &vlan->default_filters[map[encap_type]];
5246 EFX_WARN_ON_PARANOID(*id != EFX_EF10_FILTER_ID_INVALID);
5247 *id = efx_ef10_filter_get_unsafe_id(rc);
5248 if (!nic_data->workaround_26807 && !encap_type) {
5249 /* Also need an Ethernet broadcast filter */
5250 efx_filter_init_rx(&spec, EFX_FILTER_PRI_AUTO,
5252 eth_broadcast_addr(baddr);
5253 efx_filter_set_eth_local(&spec, vlan->vid, baddr);
5254 rc = efx_ef10_filter_insert(efx, &spec, true);
5256 netif_warn(efx, drv, efx->net_dev,
5257 "Broadcast filter insert failed rc=%d\n",
5260 /* Roll back the mc_def filter */
5261 efx_ef10_filter_remove_unsafe(
5262 efx, EFX_FILTER_PRI_AUTO,
5264 *id = EFX_EF10_FILTER_ID_INVALID;
5268 EFX_WARN_ON_PARANOID(
5269 vlan->default_filters[EFX_EF10_BCAST] !=
5270 EFX_EF10_FILTER_ID_INVALID);
5271 vlan->default_filters[EFX_EF10_BCAST] =
5272 efx_ef10_filter_get_unsafe_id(rc);
5277 /* mapping from encap types to default filter IDs (unicast) */
5278 static enum efx_ef10_default_filters map[] = {
5279 [EFX_ENCAP_TYPE_NONE] = EFX_EF10_UCDEF,
5280 [EFX_ENCAP_TYPE_VXLAN] = EFX_EF10_VXLAN4_UCDEF,
5281 [EFX_ENCAP_TYPE_NVGRE] = EFX_EF10_NVGRE4_UCDEF,
5282 [EFX_ENCAP_TYPE_GENEVE] = EFX_EF10_GENEVE4_UCDEF,
5283 [EFX_ENCAP_TYPE_VXLAN | EFX_ENCAP_FLAG_IPV6] =
5284 EFX_EF10_VXLAN6_UCDEF,
5285 [EFX_ENCAP_TYPE_NVGRE | EFX_ENCAP_FLAG_IPV6] =
5286 EFX_EF10_NVGRE6_UCDEF,
5287 [EFX_ENCAP_TYPE_GENEVE | EFX_ENCAP_FLAG_IPV6] =
5288 EFX_EF10_GENEVE6_UCDEF,
5291 /* quick bounds check (BCAST result impossible) */
5292 BUILD_BUG_ON(EFX_EF10_BCAST != 0);
5293 if (encap_type >= ARRAY_SIZE(map) || map[encap_type] == 0) {
5297 /* then follow map */
5298 id = &vlan->default_filters[map[encap_type]];
5299 EFX_WARN_ON_PARANOID(*id != EFX_EF10_FILTER_ID_INVALID);
5306 /* Remove filters that weren't renewed. Since nothing else changes the AUTO_OLD
5307 * flag or removes these filters, we don't need to hold the filter_lock while
5308 * scanning for these filters.
5310 static void efx_ef10_filter_remove_old(struct efx_nic *efx)
5312 struct efx_ef10_filter_table *table = efx->filter_state;
5313 int remove_failed = 0;
5314 int remove_noent = 0;
5318 for (i = 0; i < HUNT_FILTER_TBL_ROWS; i++) {
5319 if (ACCESS_ONCE(table->entry[i].spec) &
5320 EFX_EF10_FILTER_FLAG_AUTO_OLD) {
5321 rc = efx_ef10_filter_remove_internal(efx,
5322 1U << EFX_FILTER_PRI_AUTO, i, true);
5331 netif_info(efx, drv, efx->net_dev,
5332 "%s: failed to remove %d filters\n",
5333 __func__, remove_failed);
5335 netif_info(efx, drv, efx->net_dev,
5336 "%s: failed to remove %d non-existent filters\n",
5337 __func__, remove_noent);
5340 static int efx_ef10_vport_set_mac_address(struct efx_nic *efx)
5342 struct efx_ef10_nic_data *nic_data = efx->nic_data;
5343 u8 mac_old[ETH_ALEN];
5346 /* Only reconfigure a PF-created vport */
5347 if (is_zero_ether_addr(nic_data->vport_mac))
5350 efx_device_detach_sync(efx);
5351 efx_net_stop(efx->net_dev);
5352 down_write(&efx->filter_sem);
5353 efx_ef10_filter_table_remove(efx);
5354 up_write(&efx->filter_sem);
5356 rc = efx_ef10_vadaptor_free(efx, nic_data->vport_id);
5358 goto restore_filters;
5360 ether_addr_copy(mac_old, nic_data->vport_mac);
5361 rc = efx_ef10_vport_del_mac(efx, nic_data->vport_id,
5362 nic_data->vport_mac);
5364 goto restore_vadaptor;
5366 rc = efx_ef10_vport_add_mac(efx, nic_data->vport_id,
5367 efx->net_dev->dev_addr);
5369 ether_addr_copy(nic_data->vport_mac, efx->net_dev->dev_addr);
5371 rc2 = efx_ef10_vport_add_mac(efx, nic_data->vport_id, mac_old);
5373 /* Failed to add original MAC, so clear vport_mac */
5374 eth_zero_addr(nic_data->vport_mac);
5380 rc2 = efx_ef10_vadaptor_alloc(efx, nic_data->vport_id);
5384 down_write(&efx->filter_sem);
5385 rc2 = efx_ef10_filter_table_probe(efx);
5386 up_write(&efx->filter_sem);
5390 rc2 = efx_net_open(efx->net_dev);
5394 efx_device_attach_if_not_resetting(efx);
5399 netif_err(efx, drv, efx->net_dev,
5400 "Failed to restore when changing MAC address - scheduling reset\n");
5401 efx_schedule_reset(efx, RESET_TYPE_DATAPATH);
5403 return rc ? rc : rc2;
5406 /* Caller must hold efx->filter_sem for read if race against
5407 * efx_ef10_filter_table_remove() is possible
5409 static void efx_ef10_filter_vlan_sync_rx_mode(struct efx_nic *efx,
5410 struct efx_ef10_filter_vlan *vlan)
5412 struct efx_ef10_filter_table *table = efx->filter_state;
5413 struct efx_ef10_nic_data *nic_data = efx->nic_data;
5415 /* Do not install unspecified VID if VLAN filtering is enabled.
5416 * Do not install all specified VIDs if VLAN filtering is disabled.
5418 if ((vlan->vid == EFX_FILTER_VID_UNSPEC) == table->vlan_filter)
5421 /* Insert/renew unicast filters */
5422 if (table->uc_promisc) {
5423 efx_ef10_filter_insert_def(efx, vlan, EFX_ENCAP_TYPE_NONE,
5425 efx_ef10_filter_insert_addr_list(efx, vlan, false, false);
5427 /* If any of the filters failed to insert, fall back to
5428 * promiscuous mode - add in the uc_def filter. But keep
5429 * our individual unicast filters.
5431 if (efx_ef10_filter_insert_addr_list(efx, vlan, false, false))
5432 efx_ef10_filter_insert_def(efx, vlan,
5433 EFX_ENCAP_TYPE_NONE,
5436 efx_ef10_filter_insert_def(efx, vlan, EFX_ENCAP_TYPE_VXLAN,
5438 efx_ef10_filter_insert_def(efx, vlan, EFX_ENCAP_TYPE_VXLAN |
5439 EFX_ENCAP_FLAG_IPV6,
5441 efx_ef10_filter_insert_def(efx, vlan, EFX_ENCAP_TYPE_NVGRE,
5443 efx_ef10_filter_insert_def(efx, vlan, EFX_ENCAP_TYPE_NVGRE |
5444 EFX_ENCAP_FLAG_IPV6,
5446 efx_ef10_filter_insert_def(efx, vlan, EFX_ENCAP_TYPE_GENEVE,
5448 efx_ef10_filter_insert_def(efx, vlan, EFX_ENCAP_TYPE_GENEVE |
5449 EFX_ENCAP_FLAG_IPV6,
5452 /* Insert/renew multicast filters */
5453 /* If changing promiscuous state with cascaded multicast filters, remove
5454 * old filters first, so that packets are dropped rather than duplicated
5456 if (nic_data->workaround_26807 &&
5457 table->mc_promisc_last != table->mc_promisc)
5458 efx_ef10_filter_remove_old(efx);
5459 if (table->mc_promisc) {
5460 if (nic_data->workaround_26807) {
5461 /* If we failed to insert promiscuous filters, rollback
5462 * and fall back to individual multicast filters
5464 if (efx_ef10_filter_insert_def(efx, vlan,
5465 EFX_ENCAP_TYPE_NONE,
5467 /* Changing promisc state, so remove old filters */
5468 efx_ef10_filter_remove_old(efx);
5469 efx_ef10_filter_insert_addr_list(efx, vlan,
5473 /* If we failed to insert promiscuous filters, don't
5474 * rollback. Regardless, also insert the mc_list,
5475 * unless it's incomplete due to overflow
5477 efx_ef10_filter_insert_def(efx, vlan,
5478 EFX_ENCAP_TYPE_NONE,
5480 if (!table->mc_overflow)
5481 efx_ef10_filter_insert_addr_list(efx, vlan,
5485 /* If any filters failed to insert, rollback and fall back to
5486 * promiscuous mode - mc_def filter and maybe broadcast. If
5487 * that fails, roll back again and insert as many of our
5488 * individual multicast filters as we can.
5490 if (efx_ef10_filter_insert_addr_list(efx, vlan, true, true)) {
5491 /* Changing promisc state, so remove old filters */
5492 if (nic_data->workaround_26807)
5493 efx_ef10_filter_remove_old(efx);
5494 if (efx_ef10_filter_insert_def(efx, vlan,
5495 EFX_ENCAP_TYPE_NONE,
5497 efx_ef10_filter_insert_addr_list(efx, vlan,
5501 efx_ef10_filter_insert_def(efx, vlan, EFX_ENCAP_TYPE_VXLAN,
5503 efx_ef10_filter_insert_def(efx, vlan, EFX_ENCAP_TYPE_VXLAN |
5504 EFX_ENCAP_FLAG_IPV6,
5506 efx_ef10_filter_insert_def(efx, vlan, EFX_ENCAP_TYPE_NVGRE,
5508 efx_ef10_filter_insert_def(efx, vlan, EFX_ENCAP_TYPE_NVGRE |
5509 EFX_ENCAP_FLAG_IPV6,
5511 efx_ef10_filter_insert_def(efx, vlan, EFX_ENCAP_TYPE_GENEVE,
5513 efx_ef10_filter_insert_def(efx, vlan, EFX_ENCAP_TYPE_GENEVE |
5514 EFX_ENCAP_FLAG_IPV6,
5518 /* Caller must hold efx->filter_sem for read if race against
5519 * efx_ef10_filter_table_remove() is possible
5521 static void efx_ef10_filter_sync_rx_mode(struct efx_nic *efx)
5523 struct efx_ef10_filter_table *table = efx->filter_state;
5524 struct net_device *net_dev = efx->net_dev;
5525 struct efx_ef10_filter_vlan *vlan;
5528 if (!efx_dev_registered(efx))
5534 efx_ef10_filter_mark_old(efx);
5536 /* Copy/convert the address lists; add the primary station
5537 * address and broadcast address
5539 netif_addr_lock_bh(net_dev);
5540 efx_ef10_filter_uc_addr_list(efx);
5541 efx_ef10_filter_mc_addr_list(efx);
5542 netif_addr_unlock_bh(net_dev);
5544 /* If VLAN filtering changes, all old filters are finally removed.
5545 * Do it in advance to avoid conflicts for unicast untagged and
5546 * VLAN 0 tagged filters.
5548 vlan_filter = !!(net_dev->features & NETIF_F_HW_VLAN_CTAG_FILTER);
5549 if (table->vlan_filter != vlan_filter) {
5550 table->vlan_filter = vlan_filter;
5551 efx_ef10_filter_remove_old(efx);
5554 list_for_each_entry(vlan, &table->vlan_list, list)
5555 efx_ef10_filter_vlan_sync_rx_mode(efx, vlan);
5557 efx_ef10_filter_remove_old(efx);
5558 table->mc_promisc_last = table->mc_promisc;
5561 static struct efx_ef10_filter_vlan *efx_ef10_filter_find_vlan(struct efx_nic *efx, u16 vid)
5563 struct efx_ef10_filter_table *table = efx->filter_state;
5564 struct efx_ef10_filter_vlan *vlan;
5566 WARN_ON(!rwsem_is_locked(&efx->filter_sem));
5568 list_for_each_entry(vlan, &table->vlan_list, list) {
5569 if (vlan->vid == vid)
5576 static int efx_ef10_filter_add_vlan(struct efx_nic *efx, u16 vid)
5578 struct efx_ef10_filter_table *table = efx->filter_state;
5579 struct efx_ef10_filter_vlan *vlan;
5582 if (!efx_rwsem_assert_write_locked(&efx->filter_sem))
5585 vlan = efx_ef10_filter_find_vlan(efx, vid);
5586 if (WARN_ON(vlan)) {
5587 netif_err(efx, drv, efx->net_dev,
5588 "VLAN %u already added\n", vid);
5592 vlan = kzalloc(sizeof(*vlan), GFP_KERNEL);
5598 for (i = 0; i < ARRAY_SIZE(vlan->uc); i++)
5599 vlan->uc[i] = EFX_EF10_FILTER_ID_INVALID;
5600 for (i = 0; i < ARRAY_SIZE(vlan->mc); i++)
5601 vlan->mc[i] = EFX_EF10_FILTER_ID_INVALID;
5602 for (i = 0; i < EFX_EF10_NUM_DEFAULT_FILTERS; i++)
5603 vlan->default_filters[i] = EFX_EF10_FILTER_ID_INVALID;
5605 list_add_tail(&vlan->list, &table->vlan_list);
5607 if (efx_dev_registered(efx))
5608 efx_ef10_filter_vlan_sync_rx_mode(efx, vlan);
5613 static void efx_ef10_filter_del_vlan_internal(struct efx_nic *efx,
5614 struct efx_ef10_filter_vlan *vlan)
5618 /* See comment in efx_ef10_filter_table_remove() */
5619 if (!efx_rwsem_assert_write_locked(&efx->filter_sem))
5622 list_del(&vlan->list);
5624 for (i = 0; i < ARRAY_SIZE(vlan->uc); i++)
5625 efx_ef10_filter_remove_unsafe(efx, EFX_FILTER_PRI_AUTO,
5627 for (i = 0; i < ARRAY_SIZE(vlan->mc); i++)
5628 efx_ef10_filter_remove_unsafe(efx, EFX_FILTER_PRI_AUTO,
5630 for (i = 0; i < EFX_EF10_NUM_DEFAULT_FILTERS; i++)
5631 if (vlan->default_filters[i] != EFX_EF10_FILTER_ID_INVALID)
5632 efx_ef10_filter_remove_unsafe(efx, EFX_FILTER_PRI_AUTO,
5633 vlan->default_filters[i]);
5638 static void efx_ef10_filter_del_vlan(struct efx_nic *efx, u16 vid)
5640 struct efx_ef10_filter_vlan *vlan;
5642 /* See comment in efx_ef10_filter_table_remove() */
5643 if (!efx_rwsem_assert_write_locked(&efx->filter_sem))
5646 vlan = efx_ef10_filter_find_vlan(efx, vid);
5648 netif_err(efx, drv, efx->net_dev,
5649 "VLAN %u not found in filter state\n", vid);
5653 efx_ef10_filter_del_vlan_internal(efx, vlan);
5656 static int efx_ef10_set_mac_address(struct efx_nic *efx)
5658 MCDI_DECLARE_BUF(inbuf, MC_CMD_VADAPTOR_SET_MAC_IN_LEN);
5659 struct efx_ef10_nic_data *nic_data = efx->nic_data;
5660 bool was_enabled = efx->port_enabled;
5663 efx_device_detach_sync(efx);
5664 efx_net_stop(efx->net_dev);
5666 mutex_lock(&efx->mac_lock);
5667 down_write(&efx->filter_sem);
5668 efx_ef10_filter_table_remove(efx);
5670 ether_addr_copy(MCDI_PTR(inbuf, VADAPTOR_SET_MAC_IN_MACADDR),
5671 efx->net_dev->dev_addr);
5672 MCDI_SET_DWORD(inbuf, VADAPTOR_SET_MAC_IN_UPSTREAM_PORT_ID,
5673 nic_data->vport_id);
5674 rc = efx_mcdi_rpc_quiet(efx, MC_CMD_VADAPTOR_SET_MAC, inbuf,
5675 sizeof(inbuf), NULL, 0, NULL);
5677 efx_ef10_filter_table_probe(efx);
5678 up_write(&efx->filter_sem);
5679 mutex_unlock(&efx->mac_lock);
5682 efx_net_open(efx->net_dev);
5683 efx_device_attach_if_not_resetting(efx);
5685 #ifdef CONFIG_SFC_SRIOV
5686 if (efx->pci_dev->is_virtfn && efx->pci_dev->physfn) {
5687 struct pci_dev *pci_dev_pf = efx->pci_dev->physfn;
5690 struct efx_nic *efx_pf;
5692 /* Switch to PF and change MAC address on vport */
5693 efx_pf = pci_get_drvdata(pci_dev_pf);
5695 rc = efx_ef10_sriov_set_vf_mac(efx_pf,
5697 efx->net_dev->dev_addr);
5699 struct efx_nic *efx_pf = pci_get_drvdata(pci_dev_pf);
5700 struct efx_ef10_nic_data *nic_data = efx_pf->nic_data;
5703 /* MAC address successfully changed by VF (with MAC
5704 * spoofing) so update the parent PF if possible.
5706 for (i = 0; i < efx_pf->vf_count; ++i) {
5707 struct ef10_vf *vf = nic_data->vf + i;
5709 if (vf->efx == efx) {
5710 ether_addr_copy(vf->mac,
5711 efx->net_dev->dev_addr);
5719 netif_err(efx, drv, efx->net_dev,
5720 "Cannot change MAC address; use sfboot to enable"
5721 " mac-spoofing on this interface\n");
5722 } else if (rc == -ENOSYS && !efx_ef10_is_vf(efx)) {
5723 /* If the active MCFW does not support MC_CMD_VADAPTOR_SET_MAC
5724 * fall-back to the method of changing the MAC address on the
5725 * vport. This only applies to PFs because such versions of
5726 * MCFW do not support VFs.
5728 rc = efx_ef10_vport_set_mac_address(efx);
5730 efx_mcdi_display_error(efx, MC_CMD_VADAPTOR_SET_MAC,
5731 sizeof(inbuf), NULL, 0, rc);
5737 static int efx_ef10_mac_reconfigure(struct efx_nic *efx)
5739 efx_ef10_filter_sync_rx_mode(efx);
5741 return efx_mcdi_set_mac(efx);
5744 static int efx_ef10_mac_reconfigure_vf(struct efx_nic *efx)
5746 efx_ef10_filter_sync_rx_mode(efx);
5751 static int efx_ef10_start_bist(struct efx_nic *efx, u32 bist_type)
5753 MCDI_DECLARE_BUF(inbuf, MC_CMD_START_BIST_IN_LEN);
5755 MCDI_SET_DWORD(inbuf, START_BIST_IN_TYPE, bist_type);
5756 return efx_mcdi_rpc(efx, MC_CMD_START_BIST, inbuf, sizeof(inbuf),
5760 /* MC BISTs follow a different poll mechanism to phy BISTs.
5761 * The BIST is done in the poll handler on the MC, and the MCDI command
5762 * will block until the BIST is done.
5764 static int efx_ef10_poll_bist(struct efx_nic *efx)
5767 MCDI_DECLARE_BUF(outbuf, MC_CMD_POLL_BIST_OUT_LEN);
5771 rc = efx_mcdi_rpc(efx, MC_CMD_POLL_BIST, NULL, 0,
5772 outbuf, sizeof(outbuf), &outlen);
5776 if (outlen < MC_CMD_POLL_BIST_OUT_LEN)
5779 result = MCDI_DWORD(outbuf, POLL_BIST_OUT_RESULT);
5781 case MC_CMD_POLL_BIST_PASSED:
5782 netif_dbg(efx, hw, efx->net_dev, "BIST passed.\n");
5784 case MC_CMD_POLL_BIST_TIMEOUT:
5785 netif_err(efx, hw, efx->net_dev, "BIST timed out\n");
5787 case MC_CMD_POLL_BIST_FAILED:
5788 netif_err(efx, hw, efx->net_dev, "BIST failed.\n");
5791 netif_err(efx, hw, efx->net_dev,
5792 "BIST returned unknown result %u", result);
5797 static int efx_ef10_run_bist(struct efx_nic *efx, u32 bist_type)
5801 netif_dbg(efx, drv, efx->net_dev, "starting BIST type %u\n", bist_type);
5803 rc = efx_ef10_start_bist(efx, bist_type);
5807 return efx_ef10_poll_bist(efx);
5811 efx_ef10_test_chip(struct efx_nic *efx, struct efx_self_tests *tests)
5815 efx_reset_down(efx, RESET_TYPE_WORLD);
5817 rc = efx_mcdi_rpc(efx, MC_CMD_ENABLE_OFFLINE_BIST,
5818 NULL, 0, NULL, 0, NULL);
5822 tests->memory = efx_ef10_run_bist(efx, MC_CMD_MC_MEM_BIST) ? -1 : 1;
5823 tests->registers = efx_ef10_run_bist(efx, MC_CMD_REG_BIST) ? -1 : 1;
5825 rc = efx_mcdi_reset(efx, RESET_TYPE_WORLD);
5830 rc2 = efx_reset_up(efx, RESET_TYPE_WORLD, rc == 0);
5831 return rc ? rc : rc2;
5834 #ifdef CONFIG_SFC_MTD
5836 struct efx_ef10_nvram_type_info {
5837 u16 type, type_mask;
5842 static const struct efx_ef10_nvram_type_info efx_ef10_nvram_types[] = {
5843 { NVRAM_PARTITION_TYPE_MC_FIRMWARE, 0, 0, "sfc_mcfw" },
5844 { NVRAM_PARTITION_TYPE_MC_FIRMWARE_BACKUP, 0, 0, "sfc_mcfw_backup" },
5845 { NVRAM_PARTITION_TYPE_EXPANSION_ROM, 0, 0, "sfc_exp_rom" },
5846 { NVRAM_PARTITION_TYPE_STATIC_CONFIG, 0, 0, "sfc_static_cfg" },
5847 { NVRAM_PARTITION_TYPE_DYNAMIC_CONFIG, 0, 0, "sfc_dynamic_cfg" },
5848 { NVRAM_PARTITION_TYPE_EXPROM_CONFIG_PORT0, 0, 0, "sfc_exp_rom_cfg" },
5849 { NVRAM_PARTITION_TYPE_EXPROM_CONFIG_PORT1, 0, 1, "sfc_exp_rom_cfg" },
5850 { NVRAM_PARTITION_TYPE_EXPROM_CONFIG_PORT2, 0, 2, "sfc_exp_rom_cfg" },
5851 { NVRAM_PARTITION_TYPE_EXPROM_CONFIG_PORT3, 0, 3, "sfc_exp_rom_cfg" },
5852 { NVRAM_PARTITION_TYPE_LICENSE, 0, 0, "sfc_license" },
5853 { NVRAM_PARTITION_TYPE_PHY_MIN, 0xff, 0, "sfc_phy_fw" },
5856 static int efx_ef10_mtd_probe_partition(struct efx_nic *efx,
5857 struct efx_mcdi_mtd_partition *part,
5860 MCDI_DECLARE_BUF(inbuf, MC_CMD_NVRAM_METADATA_IN_LEN);
5861 MCDI_DECLARE_BUF(outbuf, MC_CMD_NVRAM_METADATA_OUT_LENMAX);
5862 const struct efx_ef10_nvram_type_info *info;
5863 size_t size, erase_size, outlen;
5867 for (info = efx_ef10_nvram_types; ; info++) {
5869 efx_ef10_nvram_types + ARRAY_SIZE(efx_ef10_nvram_types))
5871 if ((type & ~info->type_mask) == info->type)
5874 if (info->port != efx_port_num(efx))
5877 rc = efx_mcdi_nvram_info(efx, type, &size, &erase_size, &protected);
5881 return -ENODEV; /* hide it */
5883 part->nvram_type = type;
5885 MCDI_SET_DWORD(inbuf, NVRAM_METADATA_IN_TYPE, type);
5886 rc = efx_mcdi_rpc(efx, MC_CMD_NVRAM_METADATA, inbuf, sizeof(inbuf),
5887 outbuf, sizeof(outbuf), &outlen);
5890 if (outlen < MC_CMD_NVRAM_METADATA_OUT_LENMIN)
5892 if (MCDI_DWORD(outbuf, NVRAM_METADATA_OUT_FLAGS) &
5893 (1 << MC_CMD_NVRAM_METADATA_OUT_SUBTYPE_VALID_LBN))
5894 part->fw_subtype = MCDI_DWORD(outbuf,
5895 NVRAM_METADATA_OUT_SUBTYPE);
5897 part->common.dev_type_name = "EF10 NVRAM manager";
5898 part->common.type_name = info->name;
5900 part->common.mtd.type = MTD_NORFLASH;
5901 part->common.mtd.flags = MTD_CAP_NORFLASH;
5902 part->common.mtd.size = size;
5903 part->common.mtd.erasesize = erase_size;
5908 static int efx_ef10_mtd_probe(struct efx_nic *efx)
5910 MCDI_DECLARE_BUF(outbuf, MC_CMD_NVRAM_PARTITIONS_OUT_LENMAX);
5911 struct efx_mcdi_mtd_partition *parts;
5912 size_t outlen, n_parts_total, i, n_parts;
5918 BUILD_BUG_ON(MC_CMD_NVRAM_PARTITIONS_IN_LEN != 0);
5919 rc = efx_mcdi_rpc(efx, MC_CMD_NVRAM_PARTITIONS, NULL, 0,
5920 outbuf, sizeof(outbuf), &outlen);
5923 if (outlen < MC_CMD_NVRAM_PARTITIONS_OUT_LENMIN)
5926 n_parts_total = MCDI_DWORD(outbuf, NVRAM_PARTITIONS_OUT_NUM_PARTITIONS);
5928 MCDI_VAR_ARRAY_LEN(outlen, NVRAM_PARTITIONS_OUT_TYPE_ID))
5931 parts = kcalloc(n_parts_total, sizeof(*parts), GFP_KERNEL);
5936 for (i = 0; i < n_parts_total; i++) {
5937 type = MCDI_ARRAY_DWORD(outbuf, NVRAM_PARTITIONS_OUT_TYPE_ID,
5939 rc = efx_ef10_mtd_probe_partition(efx, &parts[n_parts], type);
5942 else if (rc != -ENODEV)
5946 rc = efx_mtd_add(efx, &parts[0].common, n_parts, sizeof(*parts));
5953 #endif /* CONFIG_SFC_MTD */
5955 static void efx_ef10_ptp_write_host_time(struct efx_nic *efx, u32 host_time)
5957 _efx_writed(efx, cpu_to_le32(host_time), ER_DZ_MC_DB_LWRD);
5960 static void efx_ef10_ptp_write_host_time_vf(struct efx_nic *efx,
5963 static int efx_ef10_rx_enable_timestamping(struct efx_channel *channel,
5966 MCDI_DECLARE_BUF(inbuf, MC_CMD_PTP_IN_TIME_EVENT_SUBSCRIBE_LEN);
5969 if (channel->sync_events_state == SYNC_EVENTS_REQUESTED ||
5970 channel->sync_events_state == SYNC_EVENTS_VALID ||
5971 (temp && channel->sync_events_state == SYNC_EVENTS_DISABLED))
5973 channel->sync_events_state = SYNC_EVENTS_REQUESTED;
5975 MCDI_SET_DWORD(inbuf, PTP_IN_OP, MC_CMD_PTP_OP_TIME_EVENT_SUBSCRIBE);
5976 MCDI_SET_DWORD(inbuf, PTP_IN_PERIPH_ID, 0);
5977 MCDI_SET_DWORD(inbuf, PTP_IN_TIME_EVENT_SUBSCRIBE_QUEUE,
5980 rc = efx_mcdi_rpc(channel->efx, MC_CMD_PTP,
5981 inbuf, sizeof(inbuf), NULL, 0, NULL);
5984 channel->sync_events_state = temp ? SYNC_EVENTS_QUIESCENT :
5985 SYNC_EVENTS_DISABLED;
5990 static int efx_ef10_rx_disable_timestamping(struct efx_channel *channel,
5993 MCDI_DECLARE_BUF(inbuf, MC_CMD_PTP_IN_TIME_EVENT_UNSUBSCRIBE_LEN);
5996 if (channel->sync_events_state == SYNC_EVENTS_DISABLED ||
5997 (temp && channel->sync_events_state == SYNC_EVENTS_QUIESCENT))
5999 if (channel->sync_events_state == SYNC_EVENTS_QUIESCENT) {
6000 channel->sync_events_state = SYNC_EVENTS_DISABLED;
6003 channel->sync_events_state = temp ? SYNC_EVENTS_QUIESCENT :
6004 SYNC_EVENTS_DISABLED;
6006 MCDI_SET_DWORD(inbuf, PTP_IN_OP, MC_CMD_PTP_OP_TIME_EVENT_UNSUBSCRIBE);
6007 MCDI_SET_DWORD(inbuf, PTP_IN_PERIPH_ID, 0);
6008 MCDI_SET_DWORD(inbuf, PTP_IN_TIME_EVENT_UNSUBSCRIBE_CONTROL,
6009 MC_CMD_PTP_IN_TIME_EVENT_UNSUBSCRIBE_SINGLE);
6010 MCDI_SET_DWORD(inbuf, PTP_IN_TIME_EVENT_UNSUBSCRIBE_QUEUE,
6013 rc = efx_mcdi_rpc(channel->efx, MC_CMD_PTP,
6014 inbuf, sizeof(inbuf), NULL, 0, NULL);
6019 static int efx_ef10_ptp_set_ts_sync_events(struct efx_nic *efx, bool en,
6022 int (*set)(struct efx_channel *channel, bool temp);
6023 struct efx_channel *channel;
6026 efx_ef10_rx_enable_timestamping :
6027 efx_ef10_rx_disable_timestamping;
6029 efx_for_each_channel(channel, efx) {
6030 int rc = set(channel, temp);
6031 if (en && rc != 0) {
6032 efx_ef10_ptp_set_ts_sync_events(efx, false, temp);
6040 static int efx_ef10_ptp_set_ts_config_vf(struct efx_nic *efx,
6041 struct hwtstamp_config *init)
6046 static int efx_ef10_ptp_set_ts_config(struct efx_nic *efx,
6047 struct hwtstamp_config *init)
6051 switch (init->rx_filter) {
6052 case HWTSTAMP_FILTER_NONE:
6053 efx_ef10_ptp_set_ts_sync_events(efx, false, false);
6054 /* if TX timestamping is still requested then leave PTP on */
6055 return efx_ptp_change_mode(efx,
6056 init->tx_type != HWTSTAMP_TX_OFF, 0);
6057 case HWTSTAMP_FILTER_ALL:
6058 case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
6059 case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
6060 case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
6061 case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
6062 case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
6063 case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
6064 case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
6065 case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
6066 case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
6067 case HWTSTAMP_FILTER_PTP_V2_EVENT:
6068 case HWTSTAMP_FILTER_PTP_V2_SYNC:
6069 case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
6070 case HWTSTAMP_FILTER_NTP_ALL:
6071 init->rx_filter = HWTSTAMP_FILTER_ALL;
6072 rc = efx_ptp_change_mode(efx, true, 0);
6074 rc = efx_ef10_ptp_set_ts_sync_events(efx, true, false);
6076 efx_ptp_change_mode(efx, false, 0);
6083 static int efx_ef10_get_phys_port_id(struct efx_nic *efx,
6084 struct netdev_phys_item_id *ppid)
6086 struct efx_ef10_nic_data *nic_data = efx->nic_data;
6088 if (!is_valid_ether_addr(nic_data->port_id))
6091 ppid->id_len = ETH_ALEN;
6092 memcpy(ppid->id, nic_data->port_id, ppid->id_len);
6097 static int efx_ef10_vlan_rx_add_vid(struct efx_nic *efx, __be16 proto, u16 vid)
6099 if (proto != htons(ETH_P_8021Q))
6102 return efx_ef10_add_vlan(efx, vid);
6105 static int efx_ef10_vlan_rx_kill_vid(struct efx_nic *efx, __be16 proto, u16 vid)
6107 if (proto != htons(ETH_P_8021Q))
6110 return efx_ef10_del_vlan(efx, vid);
6113 /* We rely on the MCDI wiping out our TX rings if it made any changes to the
6114 * ports table, ensuring that any TSO descriptors that were made on a now-
6115 * removed tunnel port will be blown away and won't break things when we try
6116 * to transmit them using the new ports table.
6118 static int efx_ef10_set_udp_tnl_ports(struct efx_nic *efx, bool unloading)
6120 struct efx_ef10_nic_data *nic_data = efx->nic_data;
6121 MCDI_DECLARE_BUF(inbuf, MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_IN_LENMAX);
6122 MCDI_DECLARE_BUF(outbuf, MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_OUT_LEN);
6123 bool will_reset = false;
6124 size_t num_entries = 0;
6125 size_t inlen, outlen;
6128 efx_dword_t flags_and_num_entries;
6130 WARN_ON(!mutex_is_locked(&nic_data->udp_tunnels_lock));
6132 nic_data->udp_tunnels_dirty = false;
6134 if (!(nic_data->datapath_caps &
6135 (1 << MC_CMD_GET_CAPABILITIES_OUT_VXLAN_NVGRE_LBN))) {
6136 efx_device_attach_if_not_resetting(efx);
6140 BUILD_BUG_ON(ARRAY_SIZE(nic_data->udp_tunnels) >
6141 MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_IN_ENTRIES_MAXNUM);
6143 for (i = 0; i < ARRAY_SIZE(nic_data->udp_tunnels); ++i) {
6144 if (nic_data->udp_tunnels[i].count &&
6145 nic_data->udp_tunnels[i].port) {
6148 EFX_POPULATE_DWORD_2(entry,
6149 TUNNEL_ENCAP_UDP_PORT_ENTRY_UDP_PORT,
6150 ntohs(nic_data->udp_tunnels[i].port),
6151 TUNNEL_ENCAP_UDP_PORT_ENTRY_PROTOCOL,
6152 nic_data->udp_tunnels[i].type);
6153 *_MCDI_ARRAY_DWORD(inbuf,
6154 SET_TUNNEL_ENCAP_UDP_PORTS_IN_ENTRIES,
6155 num_entries++) = entry;
6159 BUILD_BUG_ON((MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_IN_NUM_ENTRIES_OFST -
6160 MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_IN_FLAGS_OFST) * 8 !=
6162 BUILD_BUG_ON(MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_IN_NUM_ENTRIES_LEN * 8 !=
6164 EFX_POPULATE_DWORD_2(flags_and_num_entries,
6165 MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_IN_UNLOADING,
6167 EFX_WORD_1, num_entries);
6168 *_MCDI_DWORD(inbuf, SET_TUNNEL_ENCAP_UDP_PORTS_IN_FLAGS) =
6169 flags_and_num_entries;
6171 inlen = MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_IN_LEN(num_entries);
6173 rc = efx_mcdi_rpc_quiet(efx, MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS,
6174 inbuf, inlen, outbuf, sizeof(outbuf), &outlen);
6176 /* Most likely the MC rebooted due to another function also
6177 * setting its tunnel port list. Mark the tunnel port list as
6178 * dirty, so it will be pushed upon coming up from the reboot.
6180 nic_data->udp_tunnels_dirty = true;
6185 /* expected not available on unprivileged functions */
6187 netif_warn(efx, drv, efx->net_dev,
6188 "Unable to set UDP tunnel ports; rc=%d.\n", rc);
6189 } else if (MCDI_DWORD(outbuf, SET_TUNNEL_ENCAP_UDP_PORTS_OUT_FLAGS) &
6190 (1 << MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_OUT_RESETTING_LBN)) {
6191 netif_info(efx, drv, efx->net_dev,
6192 "Rebooting MC due to UDP tunnel port list change\n");
6195 /* Delay for the MC reset to complete. This will make
6196 * unloading other functions a bit smoother. This is a
6197 * race, but the other unload will work whichever way
6198 * it goes, this just avoids an unnecessary error
6203 if (!will_reset && !unloading) {
6204 /* The caller will have detached, relying on the MC reset to
6205 * trigger a re-attach. Since there won't be an MC reset, we
6206 * have to do the attach ourselves.
6208 efx_device_attach_if_not_resetting(efx);
6214 static int efx_ef10_udp_tnl_push_ports(struct efx_nic *efx)
6216 struct efx_ef10_nic_data *nic_data = efx->nic_data;
6219 mutex_lock(&nic_data->udp_tunnels_lock);
6220 if (nic_data->udp_tunnels_dirty) {
6221 /* Make sure all TX are stopped while we modify the table, else
6222 * we might race against an efx_features_check().
6224 efx_device_detach_sync(efx);
6225 rc = efx_ef10_set_udp_tnl_ports(efx, false);
6227 mutex_unlock(&nic_data->udp_tunnels_lock);
6231 static struct efx_udp_tunnel *__efx_ef10_udp_tnl_lookup_port(struct efx_nic *efx,
6234 struct efx_ef10_nic_data *nic_data = efx->nic_data;
6237 for (i = 0; i < ARRAY_SIZE(nic_data->udp_tunnels); ++i) {
6238 if (!nic_data->udp_tunnels[i].count)
6240 if (nic_data->udp_tunnels[i].port == port)
6241 return &nic_data->udp_tunnels[i];
6246 static int efx_ef10_udp_tnl_add_port(struct efx_nic *efx,
6247 struct efx_udp_tunnel tnl)
6249 struct efx_ef10_nic_data *nic_data = efx->nic_data;
6250 struct efx_udp_tunnel *match;
6255 if (!(nic_data->datapath_caps &
6256 (1 << MC_CMD_GET_CAPABILITIES_OUT_VXLAN_NVGRE_LBN)))
6259 efx_get_udp_tunnel_type_name(tnl.type, typebuf, sizeof(typebuf));
6260 netif_dbg(efx, drv, efx->net_dev, "Adding UDP tunnel (%s) port %d\n",
6261 typebuf, ntohs(tnl.port));
6263 mutex_lock(&nic_data->udp_tunnels_lock);
6264 /* Make sure all TX are stopped while we add to the table, else we
6265 * might race against an efx_features_check().
6267 efx_device_detach_sync(efx);
6269 match = __efx_ef10_udp_tnl_lookup_port(efx, tnl.port);
6270 if (match != NULL) {
6271 if (match->type == tnl.type) {
6272 netif_dbg(efx, drv, efx->net_dev,
6273 "Referencing existing tunnel entry\n");
6275 /* No need to cause an MCDI update */
6279 efx_get_udp_tunnel_type_name(match->type,
6280 typebuf, sizeof(typebuf));
6281 netif_dbg(efx, drv, efx->net_dev,
6282 "UDP port %d is already in use by %s\n",
6283 ntohs(tnl.port), typebuf);
6288 for (i = 0; i < ARRAY_SIZE(nic_data->udp_tunnels); ++i)
6289 if (!nic_data->udp_tunnels[i].count) {
6290 nic_data->udp_tunnels[i] = tnl;
6291 nic_data->udp_tunnels[i].count = 1;
6292 rc = efx_ef10_set_udp_tnl_ports(efx, false);
6296 netif_dbg(efx, drv, efx->net_dev,
6297 "Unable to add UDP tunnel (%s) port %d; insufficient resources.\n",
6298 typebuf, ntohs(tnl.port));
6303 mutex_unlock(&nic_data->udp_tunnels_lock);
6307 /* Called under the TX lock with the TX queue running, hence no-one can be
6308 * in the middle of updating the UDP tunnels table. However, they could
6309 * have tried and failed the MCDI, in which case they'll have set the dirty
6310 * flag before dropping their locks.
6312 static bool efx_ef10_udp_tnl_has_port(struct efx_nic *efx, __be16 port)
6314 struct efx_ef10_nic_data *nic_data = efx->nic_data;
6316 if (!(nic_data->datapath_caps &
6317 (1 << MC_CMD_GET_CAPABILITIES_OUT_VXLAN_NVGRE_LBN)))
6320 if (nic_data->udp_tunnels_dirty)
6321 /* SW table may not match HW state, so just assume we can't
6322 * use any UDP tunnel offloads.
6326 return __efx_ef10_udp_tnl_lookup_port(efx, port) != NULL;
6329 static int efx_ef10_udp_tnl_del_port(struct efx_nic *efx,
6330 struct efx_udp_tunnel tnl)
6332 struct efx_ef10_nic_data *nic_data = efx->nic_data;
6333 struct efx_udp_tunnel *match;
6337 if (!(nic_data->datapath_caps &
6338 (1 << MC_CMD_GET_CAPABILITIES_OUT_VXLAN_NVGRE_LBN)))
6341 efx_get_udp_tunnel_type_name(tnl.type, typebuf, sizeof(typebuf));
6342 netif_dbg(efx, drv, efx->net_dev, "Removing UDP tunnel (%s) port %d\n",
6343 typebuf, ntohs(tnl.port));
6345 mutex_lock(&nic_data->udp_tunnels_lock);
6346 /* Make sure all TX are stopped while we remove from the table, else we
6347 * might race against an efx_features_check().
6349 efx_device_detach_sync(efx);
6351 match = __efx_ef10_udp_tnl_lookup_port(efx, tnl.port);
6352 if (match != NULL) {
6353 if (match->type == tnl.type) {
6354 if (--match->count) {
6355 /* Port is still in use, so nothing to do */
6356 netif_dbg(efx, drv, efx->net_dev,
6357 "UDP tunnel port %d remains active\n",
6362 rc = efx_ef10_set_udp_tnl_ports(efx, false);
6365 efx_get_udp_tunnel_type_name(match->type,
6366 typebuf, sizeof(typebuf));
6367 netif_warn(efx, drv, efx->net_dev,
6368 "UDP port %d is actually in use by %s, not removing\n",
6369 ntohs(tnl.port), typebuf);
6374 mutex_unlock(&nic_data->udp_tunnels_lock);
6378 #define EF10_OFFLOAD_FEATURES \
6379 (NETIF_F_IP_CSUM | \
6380 NETIF_F_HW_VLAN_CTAG_FILTER | \
6381 NETIF_F_IPV6_CSUM | \
6385 const struct efx_nic_type efx_hunt_a0_vf_nic_type = {
6387 .mem_bar = EFX_MEM_VF_BAR,
6388 .mem_map_size = efx_ef10_mem_map_size,
6389 .probe = efx_ef10_probe_vf,
6390 .remove = efx_ef10_remove,
6391 .dimension_resources = efx_ef10_dimension_resources,
6392 .init = efx_ef10_init_nic,
6393 .fini = efx_port_dummy_op_void,
6394 .map_reset_reason = efx_ef10_map_reset_reason,
6395 .map_reset_flags = efx_ef10_map_reset_flags,
6396 .reset = efx_ef10_reset,
6397 .probe_port = efx_mcdi_port_probe,
6398 .remove_port = efx_mcdi_port_remove,
6399 .fini_dmaq = efx_ef10_fini_dmaq,
6400 .prepare_flr = efx_ef10_prepare_flr,
6401 .finish_flr = efx_port_dummy_op_void,
6402 .describe_stats = efx_ef10_describe_stats,
6403 .update_stats = efx_ef10_update_stats_vf,
6404 .start_stats = efx_port_dummy_op_void,
6405 .pull_stats = efx_port_dummy_op_void,
6406 .stop_stats = efx_port_dummy_op_void,
6407 .set_id_led = efx_mcdi_set_id_led,
6408 .push_irq_moderation = efx_ef10_push_irq_moderation,
6409 .reconfigure_mac = efx_ef10_mac_reconfigure_vf,
6410 .check_mac_fault = efx_mcdi_mac_check_fault,
6411 .reconfigure_port = efx_mcdi_port_reconfigure,
6412 .get_wol = efx_ef10_get_wol_vf,
6413 .set_wol = efx_ef10_set_wol_vf,
6414 .resume_wol = efx_port_dummy_op_void,
6415 .mcdi_request = efx_ef10_mcdi_request,
6416 .mcdi_poll_response = efx_ef10_mcdi_poll_response,
6417 .mcdi_read_response = efx_ef10_mcdi_read_response,
6418 .mcdi_poll_reboot = efx_ef10_mcdi_poll_reboot,
6419 .mcdi_reboot_detected = efx_ef10_mcdi_reboot_detected,
6420 .irq_enable_master = efx_port_dummy_op_void,
6421 .irq_test_generate = efx_ef10_irq_test_generate,
6422 .irq_disable_non_ev = efx_port_dummy_op_void,
6423 .irq_handle_msi = efx_ef10_msi_interrupt,
6424 .irq_handle_legacy = efx_ef10_legacy_interrupt,
6425 .tx_probe = efx_ef10_tx_probe,
6426 .tx_init = efx_ef10_tx_init,
6427 .tx_remove = efx_ef10_tx_remove,
6428 .tx_write = efx_ef10_tx_write,
6429 .tx_limit_len = efx_ef10_tx_limit_len,
6430 .rx_push_rss_config = efx_ef10_vf_rx_push_rss_config,
6431 .rx_pull_rss_config = efx_ef10_rx_pull_rss_config,
6432 .rx_probe = efx_ef10_rx_probe,
6433 .rx_init = efx_ef10_rx_init,
6434 .rx_remove = efx_ef10_rx_remove,
6435 .rx_write = efx_ef10_rx_write,
6436 .rx_defer_refill = efx_ef10_rx_defer_refill,
6437 .ev_probe = efx_ef10_ev_probe,
6438 .ev_init = efx_ef10_ev_init,
6439 .ev_fini = efx_ef10_ev_fini,
6440 .ev_remove = efx_ef10_ev_remove,
6441 .ev_process = efx_ef10_ev_process,
6442 .ev_read_ack = efx_ef10_ev_read_ack,
6443 .ev_test_generate = efx_ef10_ev_test_generate,
6444 .filter_table_probe = efx_ef10_filter_table_probe,
6445 .filter_table_restore = efx_ef10_filter_table_restore,
6446 .filter_table_remove = efx_ef10_filter_table_remove,
6447 .filter_update_rx_scatter = efx_ef10_filter_update_rx_scatter,
6448 .filter_insert = efx_ef10_filter_insert,
6449 .filter_remove_safe = efx_ef10_filter_remove_safe,
6450 .filter_get_safe = efx_ef10_filter_get_safe,
6451 .filter_clear_rx = efx_ef10_filter_clear_rx,
6452 .filter_count_rx_used = efx_ef10_filter_count_rx_used,
6453 .filter_get_rx_id_limit = efx_ef10_filter_get_rx_id_limit,
6454 .filter_get_rx_ids = efx_ef10_filter_get_rx_ids,
6455 #ifdef CONFIG_RFS_ACCEL
6456 .filter_rfs_insert = efx_ef10_filter_rfs_insert,
6457 .filter_rfs_expire_one = efx_ef10_filter_rfs_expire_one,
6459 #ifdef CONFIG_SFC_MTD
6460 .mtd_probe = efx_port_dummy_op_int,
6462 .ptp_write_host_time = efx_ef10_ptp_write_host_time_vf,
6463 .ptp_set_ts_config = efx_ef10_ptp_set_ts_config_vf,
6464 .vlan_rx_add_vid = efx_ef10_vlan_rx_add_vid,
6465 .vlan_rx_kill_vid = efx_ef10_vlan_rx_kill_vid,
6466 #ifdef CONFIG_SFC_SRIOV
6467 .vswitching_probe = efx_ef10_vswitching_probe_vf,
6468 .vswitching_restore = efx_ef10_vswitching_restore_vf,
6469 .vswitching_remove = efx_ef10_vswitching_remove_vf,
6471 .get_mac_address = efx_ef10_get_mac_address_vf,
6472 .set_mac_address = efx_ef10_set_mac_address,
6474 .get_phys_port_id = efx_ef10_get_phys_port_id,
6475 .revision = EFX_REV_HUNT_A0,
6476 .max_dma_mask = DMA_BIT_MASK(ESF_DZ_TX_KER_BUF_ADDR_WIDTH),
6477 .rx_prefix_size = ES_DZ_RX_PREFIX_SIZE,
6478 .rx_hash_offset = ES_DZ_RX_PREFIX_HASH_OFST,
6479 .rx_ts_offset = ES_DZ_RX_PREFIX_TSTAMP_OFST,
6480 .can_rx_scatter = true,
6481 .always_rx_scatter = true,
6482 .min_interrupt_mode = EFX_INT_MODE_MSIX,
6483 .max_interrupt_mode = EFX_INT_MODE_MSIX,
6484 .timer_period_max = 1 << ERF_DD_EVQ_IND_TIMER_VAL_WIDTH,
6485 .offload_features = EF10_OFFLOAD_FEATURES,
6487 .max_rx_ip_filters = HUNT_FILTER_TBL_ROWS,
6488 .hwtstamp_filters = 1 << HWTSTAMP_FILTER_NONE |
6489 1 << HWTSTAMP_FILTER_ALL,
6490 .rx_hash_key_size = 40,
6493 const struct efx_nic_type efx_hunt_a0_nic_type = {
6495 .mem_bar = EFX_MEM_BAR,
6496 .mem_map_size = efx_ef10_mem_map_size,
6497 .probe = efx_ef10_probe_pf,
6498 .remove = efx_ef10_remove,
6499 .dimension_resources = efx_ef10_dimension_resources,
6500 .init = efx_ef10_init_nic,
6501 .fini = efx_port_dummy_op_void,
6502 .map_reset_reason = efx_ef10_map_reset_reason,
6503 .map_reset_flags = efx_ef10_map_reset_flags,
6504 .reset = efx_ef10_reset,
6505 .probe_port = efx_mcdi_port_probe,
6506 .remove_port = efx_mcdi_port_remove,
6507 .fini_dmaq = efx_ef10_fini_dmaq,
6508 .prepare_flr = efx_ef10_prepare_flr,
6509 .finish_flr = efx_port_dummy_op_void,
6510 .describe_stats = efx_ef10_describe_stats,
6511 .update_stats = efx_ef10_update_stats_pf,
6512 .start_stats = efx_mcdi_mac_start_stats,
6513 .pull_stats = efx_mcdi_mac_pull_stats,
6514 .stop_stats = efx_mcdi_mac_stop_stats,
6515 .set_id_led = efx_mcdi_set_id_led,
6516 .push_irq_moderation = efx_ef10_push_irq_moderation,
6517 .reconfigure_mac = efx_ef10_mac_reconfigure,
6518 .check_mac_fault = efx_mcdi_mac_check_fault,
6519 .reconfigure_port = efx_mcdi_port_reconfigure,
6520 .get_wol = efx_ef10_get_wol,
6521 .set_wol = efx_ef10_set_wol,
6522 .resume_wol = efx_port_dummy_op_void,
6523 .test_chip = efx_ef10_test_chip,
6524 .test_nvram = efx_mcdi_nvram_test_all,
6525 .mcdi_request = efx_ef10_mcdi_request,
6526 .mcdi_poll_response = efx_ef10_mcdi_poll_response,
6527 .mcdi_read_response = efx_ef10_mcdi_read_response,
6528 .mcdi_poll_reboot = efx_ef10_mcdi_poll_reboot,
6529 .mcdi_reboot_detected = efx_ef10_mcdi_reboot_detected,
6530 .irq_enable_master = efx_port_dummy_op_void,
6531 .irq_test_generate = efx_ef10_irq_test_generate,
6532 .irq_disable_non_ev = efx_port_dummy_op_void,
6533 .irq_handle_msi = efx_ef10_msi_interrupt,
6534 .irq_handle_legacy = efx_ef10_legacy_interrupt,
6535 .tx_probe = efx_ef10_tx_probe,
6536 .tx_init = efx_ef10_tx_init,
6537 .tx_remove = efx_ef10_tx_remove,
6538 .tx_write = efx_ef10_tx_write,
6539 .tx_limit_len = efx_ef10_tx_limit_len,
6540 .rx_push_rss_config = efx_ef10_pf_rx_push_rss_config,
6541 .rx_pull_rss_config = efx_ef10_rx_pull_rss_config,
6542 .rx_probe = efx_ef10_rx_probe,
6543 .rx_init = efx_ef10_rx_init,
6544 .rx_remove = efx_ef10_rx_remove,
6545 .rx_write = efx_ef10_rx_write,
6546 .rx_defer_refill = efx_ef10_rx_defer_refill,
6547 .ev_probe = efx_ef10_ev_probe,
6548 .ev_init = efx_ef10_ev_init,
6549 .ev_fini = efx_ef10_ev_fini,
6550 .ev_remove = efx_ef10_ev_remove,
6551 .ev_process = efx_ef10_ev_process,
6552 .ev_read_ack = efx_ef10_ev_read_ack,
6553 .ev_test_generate = efx_ef10_ev_test_generate,
6554 .filter_table_probe = efx_ef10_filter_table_probe,
6555 .filter_table_restore = efx_ef10_filter_table_restore,
6556 .filter_table_remove = efx_ef10_filter_table_remove,
6557 .filter_update_rx_scatter = efx_ef10_filter_update_rx_scatter,
6558 .filter_insert = efx_ef10_filter_insert,
6559 .filter_remove_safe = efx_ef10_filter_remove_safe,
6560 .filter_get_safe = efx_ef10_filter_get_safe,
6561 .filter_clear_rx = efx_ef10_filter_clear_rx,
6562 .filter_count_rx_used = efx_ef10_filter_count_rx_used,
6563 .filter_get_rx_id_limit = efx_ef10_filter_get_rx_id_limit,
6564 .filter_get_rx_ids = efx_ef10_filter_get_rx_ids,
6565 #ifdef CONFIG_RFS_ACCEL
6566 .filter_rfs_insert = efx_ef10_filter_rfs_insert,
6567 .filter_rfs_expire_one = efx_ef10_filter_rfs_expire_one,
6569 #ifdef CONFIG_SFC_MTD
6570 .mtd_probe = efx_ef10_mtd_probe,
6571 .mtd_rename = efx_mcdi_mtd_rename,
6572 .mtd_read = efx_mcdi_mtd_read,
6573 .mtd_erase = efx_mcdi_mtd_erase,
6574 .mtd_write = efx_mcdi_mtd_write,
6575 .mtd_sync = efx_mcdi_mtd_sync,
6577 .ptp_write_host_time = efx_ef10_ptp_write_host_time,
6578 .ptp_set_ts_sync_events = efx_ef10_ptp_set_ts_sync_events,
6579 .ptp_set_ts_config = efx_ef10_ptp_set_ts_config,
6580 .vlan_rx_add_vid = efx_ef10_vlan_rx_add_vid,
6581 .vlan_rx_kill_vid = efx_ef10_vlan_rx_kill_vid,
6582 .udp_tnl_push_ports = efx_ef10_udp_tnl_push_ports,
6583 .udp_tnl_add_port = efx_ef10_udp_tnl_add_port,
6584 .udp_tnl_has_port = efx_ef10_udp_tnl_has_port,
6585 .udp_tnl_del_port = efx_ef10_udp_tnl_del_port,
6586 #ifdef CONFIG_SFC_SRIOV
6587 .sriov_configure = efx_ef10_sriov_configure,
6588 .sriov_init = efx_ef10_sriov_init,
6589 .sriov_fini = efx_ef10_sriov_fini,
6590 .sriov_wanted = efx_ef10_sriov_wanted,
6591 .sriov_reset = efx_ef10_sriov_reset,
6592 .sriov_flr = efx_ef10_sriov_flr,
6593 .sriov_set_vf_mac = efx_ef10_sriov_set_vf_mac,
6594 .sriov_set_vf_vlan = efx_ef10_sriov_set_vf_vlan,
6595 .sriov_set_vf_spoofchk = efx_ef10_sriov_set_vf_spoofchk,
6596 .sriov_get_vf_config = efx_ef10_sriov_get_vf_config,
6597 .sriov_set_vf_link_state = efx_ef10_sriov_set_vf_link_state,
6598 .vswitching_probe = efx_ef10_vswitching_probe_pf,
6599 .vswitching_restore = efx_ef10_vswitching_restore_pf,
6600 .vswitching_remove = efx_ef10_vswitching_remove_pf,
6602 .get_mac_address = efx_ef10_get_mac_address_pf,
6603 .set_mac_address = efx_ef10_set_mac_address,
6604 .tso_versions = efx_ef10_tso_versions,
6606 .get_phys_port_id = efx_ef10_get_phys_port_id,
6607 .revision = EFX_REV_HUNT_A0,
6608 .max_dma_mask = DMA_BIT_MASK(ESF_DZ_TX_KER_BUF_ADDR_WIDTH),
6609 .rx_prefix_size = ES_DZ_RX_PREFIX_SIZE,
6610 .rx_hash_offset = ES_DZ_RX_PREFIX_HASH_OFST,
6611 .rx_ts_offset = ES_DZ_RX_PREFIX_TSTAMP_OFST,
6612 .can_rx_scatter = true,
6613 .always_rx_scatter = true,
6614 .option_descriptors = true,
6615 .min_interrupt_mode = EFX_INT_MODE_LEGACY,
6616 .max_interrupt_mode = EFX_INT_MODE_MSIX,
6617 .timer_period_max = 1 << ERF_DD_EVQ_IND_TIMER_VAL_WIDTH,
6618 .offload_features = EF10_OFFLOAD_FEATURES,
6620 .max_rx_ip_filters = HUNT_FILTER_TBL_ROWS,
6621 .hwtstamp_filters = 1 << HWTSTAMP_FILTER_NONE |
6622 1 << HWTSTAMP_FILTER_ALL,
6623 .rx_hash_key_size = 40,