1 /*******************************************************************************
3 Intel 10 Gigabit PCI Express Linux driver
4 Copyright(c) 1999 - 2016 Intel Corporation.
6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License,
8 version 2, as published by the Free Software Foundation.
10 This program is distributed in the hope it will be useful, but WITHOUT
11 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
15 You should have received a copy of the GNU General Public License along with
16 this program; if not, write to the Free Software Foundation, Inc.,
17 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
19 The full GNU General Public License is included in this distribution in
20 the file called "COPYING".
23 Linux NICS <linux.nics@intel.com>
24 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
25 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
27 *******************************************************************************/
32 #include <linux/bitops.h>
33 #include <linux/types.h>
34 #include <linux/pci.h>
35 #include <linux/netdevice.h>
36 #include <linux/cpumask.h>
37 #include <linux/aer.h>
38 #include <linux/if_vlan.h>
39 #include <linux/jiffies.h>
41 #include <linux/timecounter.h>
42 #include <linux/net_tstamp.h>
43 #include <linux/ptp_clock_kernel.h>
45 #include "ixgbe_type.h"
46 #include "ixgbe_common.h"
47 #include "ixgbe_dcb.h"
48 #if IS_ENABLED(CONFIG_FCOE)
50 #include "ixgbe_fcoe.h"
51 #endif /* IS_ENABLED(CONFIG_FCOE) */
52 #ifdef CONFIG_IXGBE_DCA
53 #include <linux/dca.h>
56 #include <net/busy_poll.h>
58 /* common prefix used by pr_<> macros */
60 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
62 /* TX/RX descriptor defines */
63 #define IXGBE_DEFAULT_TXD 512
64 #define IXGBE_DEFAULT_TX_WORK 256
65 #define IXGBE_MAX_TXD 4096
66 #define IXGBE_MIN_TXD 64
68 #if (PAGE_SIZE < 8192)
69 #define IXGBE_DEFAULT_RXD 512
71 #define IXGBE_DEFAULT_RXD 128
73 #define IXGBE_MAX_RXD 4096
74 #define IXGBE_MIN_RXD 64
76 #define IXGBE_ETH_P_LLDP 0x88CC
79 #define IXGBE_MIN_FCRTL 0x40
80 #define IXGBE_MAX_FCRTL 0x7FF80
81 #define IXGBE_MIN_FCRTH 0x600
82 #define IXGBE_MAX_FCRTH 0x7FFF0
83 #define IXGBE_DEFAULT_FCPAUSE 0xFFFF
84 #define IXGBE_MIN_FCPAUSE 0
85 #define IXGBE_MAX_FCPAUSE 0xFFFF
87 /* Supported Rx Buffer Sizes */
88 #define IXGBE_RXBUFFER_256 256 /* Used for skb receive header */
89 #define IXGBE_RXBUFFER_1536 1536
90 #define IXGBE_RXBUFFER_2K 2048
91 #define IXGBE_RXBUFFER_3K 3072
92 #define IXGBE_RXBUFFER_4K 4096
93 #define IXGBE_MAX_RXBUFFER 16384 /* largest size for a single descriptor */
95 /* Attempt to maximize the headroom available for incoming frames. We
96 * use a 2K buffer for receives and need 1536/1534 to store the data for
97 * the frame. This leaves us with 512 bytes of room. From that we need
98 * to deduct the space needed for the shared info and the padding needed
99 * to IP align the frame.
101 * Note: For cache line sizes 256 or larger this value is going to end
102 * up negative. In these cases we should fall back to the 3K
105 #if (PAGE_SIZE < 8192)
106 #define IXGBE_MAX_2K_FRAME_BUILD_SKB (IXGBE_RXBUFFER_1536 - NET_IP_ALIGN)
107 #define IXGBE_2K_TOO_SMALL_WITH_PADDING \
108 ((NET_SKB_PAD + IXGBE_RXBUFFER_1536) > SKB_WITH_OVERHEAD(IXGBE_RXBUFFER_2K))
110 static inline int ixgbe_compute_pad(int rx_buf_len)
112 int page_size, pad_size;
114 page_size = ALIGN(rx_buf_len, PAGE_SIZE / 2);
115 pad_size = SKB_WITH_OVERHEAD(page_size) - rx_buf_len;
120 static inline int ixgbe_skb_pad(void)
124 /* If a 2K buffer cannot handle a standard Ethernet frame then
125 * optimize padding for a 3K buffer instead of a 1.5K buffer.
127 * For a 3K buffer we need to add enough padding to allow for
128 * tailroom due to NET_IP_ALIGN possibly shifting us out of
129 * cache-line alignment.
131 if (IXGBE_2K_TOO_SMALL_WITH_PADDING)
132 rx_buf_len = IXGBE_RXBUFFER_3K + SKB_DATA_ALIGN(NET_IP_ALIGN);
134 rx_buf_len = IXGBE_RXBUFFER_1536;
136 /* if needed make room for NET_IP_ALIGN */
137 rx_buf_len -= NET_IP_ALIGN;
139 return ixgbe_compute_pad(rx_buf_len);
142 #define IXGBE_SKB_PAD ixgbe_skb_pad()
144 #define IXGBE_SKB_PAD (NET_SKB_PAD + NET_IP_ALIGN)
148 * NOTE: netdev_alloc_skb reserves up to 64 bytes, NET_IP_ALIGN means we
149 * reserve 64 more, and skb_shared_info adds an additional 320 bytes more,
150 * this adds up to 448 bytes of extra data.
152 * Since netdev_alloc_skb now allocates a page fragment we can use a value
153 * of 256 and the resultant skb will have a truesize of 960 or less.
155 #define IXGBE_RX_HDR_SIZE IXGBE_RXBUFFER_256
157 /* How many Rx Buffers do we bundle into one write to the hardware ? */
158 #define IXGBE_RX_BUFFER_WRITE 16 /* Must be power of 2 */
160 #define IXGBE_RX_DMA_ATTR \
161 (DMA_ATTR_SKIP_CPU_SYNC | DMA_ATTR_WEAK_ORDERING)
163 enum ixgbe_tx_flags {
165 IXGBE_TX_FLAGS_HW_VLAN = 0x01,
166 IXGBE_TX_FLAGS_TSO = 0x02,
167 IXGBE_TX_FLAGS_TSTAMP = 0x04,
170 IXGBE_TX_FLAGS_CC = 0x08,
171 IXGBE_TX_FLAGS_IPV4 = 0x10,
172 IXGBE_TX_FLAGS_CSUM = 0x20,
174 /* software defined flags */
175 IXGBE_TX_FLAGS_SW_VLAN = 0x40,
176 IXGBE_TX_FLAGS_FCOE = 0x80,
180 #define IXGBE_TX_FLAGS_VLAN_MASK 0xffff0000
181 #define IXGBE_TX_FLAGS_VLAN_PRIO_MASK 0xe0000000
182 #define IXGBE_TX_FLAGS_VLAN_PRIO_SHIFT 29
183 #define IXGBE_TX_FLAGS_VLAN_SHIFT 16
185 #define IXGBE_MAX_VF_MC_ENTRIES 30
186 #define IXGBE_MAX_VF_FUNCTIONS 64
187 #define IXGBE_MAX_VFTA_ENTRIES 128
188 #define MAX_EMULATION_MAC_ADDRS 16
189 #define IXGBE_MAX_PF_MACVLANS 15
190 #define VMDQ_P(p) ((p) + adapter->ring_feature[RING_F_VMDQ].offset)
191 #define IXGBE_82599_VF_DEVICE_ID 0x10ED
192 #define IXGBE_X540_VF_DEVICE_ID 0x1515
194 struct vf_data_storage {
195 struct pci_dev *vfdev;
196 unsigned char vf_mac_addresses[ETH_ALEN];
197 u16 vf_mc_hashes[IXGBE_MAX_VF_MC_ENTRIES];
198 u16 num_vf_mc_hashes;
201 u16 pf_vlan; /* When set, guest VLAN config not allowed. */
205 bool rss_query_enabled;
211 enum ixgbevf_xcast_modes {
212 IXGBEVF_XCAST_MODE_NONE = 0,
213 IXGBEVF_XCAST_MODE_MULTI,
214 IXGBEVF_XCAST_MODE_ALLMULTI,
215 IXGBEVF_XCAST_MODE_PROMISC,
223 u8 vf_macvlan[ETH_ALEN];
226 #define IXGBE_MAX_TXD_PWR 14
227 #define IXGBE_MAX_DATA_PER_TXD (1u << IXGBE_MAX_TXD_PWR)
229 /* Tx Descriptors needed, worst case */
230 #define TXD_USE_COUNT(S) DIV_ROUND_UP((S), IXGBE_MAX_DATA_PER_TXD)
231 #define DESC_NEEDED (MAX_SKB_FRAGS + 4)
233 /* wrapper around a pointer to a socket buffer,
234 * so a DMA handle can be stored along with the buffer */
235 struct ixgbe_tx_buffer {
236 union ixgbe_adv_tx_desc *next_to_watch;
237 unsigned long time_stamp;
240 /* XDP uses address ptr on irq_clean */
243 unsigned int bytecount;
244 unsigned short gso_segs;
246 DEFINE_DMA_UNMAP_ADDR(dma);
247 DEFINE_DMA_UNMAP_LEN(len);
251 struct ixgbe_rx_buffer {
255 #if (BITS_PER_LONG > 32) || (PAGE_SIZE >= 65536)
263 struct ixgbe_queue_stats {
268 struct ixgbe_tx_queue_stats {
274 struct ixgbe_rx_queue_stats {
279 u64 alloc_rx_page_failed;
280 u64 alloc_rx_buff_failed;
284 #define IXGBE_TS_HDR_LEN 8
286 enum ixgbe_ring_state_t {
287 __IXGBE_RX_3K_BUFFER,
288 __IXGBE_RX_BUILD_SKB_ENABLED,
289 __IXGBE_RX_RSC_ENABLED,
290 __IXGBE_RX_CSUM_UDP_ZERO_ERR,
292 __IXGBE_TX_FDIR_INIT_DONE,
293 __IXGBE_TX_XPS_INIT_DONE,
294 __IXGBE_TX_DETECT_HANG,
295 __IXGBE_HANG_CHECK_ARMED,
299 #define ring_uses_build_skb(ring) \
300 test_bit(__IXGBE_RX_BUILD_SKB_ENABLED, &(ring)->state)
302 struct ixgbe_fwd_adapter {
303 unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)];
304 struct net_device *netdev;
305 struct ixgbe_adapter *real_adapter;
306 unsigned int tx_base_queue;
307 unsigned int rx_base_queue;
311 #define check_for_tx_hang(ring) \
312 test_bit(__IXGBE_TX_DETECT_HANG, &(ring)->state)
313 #define set_check_for_tx_hang(ring) \
314 set_bit(__IXGBE_TX_DETECT_HANG, &(ring)->state)
315 #define clear_check_for_tx_hang(ring) \
316 clear_bit(__IXGBE_TX_DETECT_HANG, &(ring)->state)
317 #define ring_is_rsc_enabled(ring) \
318 test_bit(__IXGBE_RX_RSC_ENABLED, &(ring)->state)
319 #define set_ring_rsc_enabled(ring) \
320 set_bit(__IXGBE_RX_RSC_ENABLED, &(ring)->state)
321 #define clear_ring_rsc_enabled(ring) \
322 clear_bit(__IXGBE_RX_RSC_ENABLED, &(ring)->state)
323 #define ring_is_xdp(ring) \
324 test_bit(__IXGBE_TX_XDP_RING, &(ring)->state)
325 #define set_ring_xdp(ring) \
326 set_bit(__IXGBE_TX_XDP_RING, &(ring)->state)
327 #define clear_ring_xdp(ring) \
328 clear_bit(__IXGBE_TX_XDP_RING, &(ring)->state)
330 struct ixgbe_ring *next; /* pointer to next ring in q_vector */
331 struct ixgbe_q_vector *q_vector; /* backpointer to host q_vector */
332 struct net_device *netdev; /* netdev ring belongs to */
333 struct bpf_prog *xdp_prog;
334 struct device *dev; /* device for DMA mapping */
335 struct ixgbe_fwd_adapter *l2_accel_priv;
336 void *desc; /* descriptor ring memory */
338 struct ixgbe_tx_buffer *tx_buffer_info;
339 struct ixgbe_rx_buffer *rx_buffer_info;
343 dma_addr_t dma; /* phys. address of descriptor ring */
344 unsigned int size; /* length in bytes */
346 u16 count; /* amount of descriptors */
348 u8 queue_index; /* needed for multiqueue queue management */
349 u8 reg_idx; /* holds the special value that gets
350 * the hardware register offset
351 * associated with this ring, which is
352 * different for DCB and RSS modes
357 unsigned long last_rx_timestamp;
368 struct ixgbe_queue_stats stats;
369 struct u64_stats_sync syncp;
371 struct ixgbe_tx_queue_stats tx_stats;
372 struct ixgbe_rx_queue_stats rx_stats;
374 } ____cacheline_internodealigned_in_smp;
376 enum ixgbe_ring_f_enum {
378 RING_F_VMDQ, /* SR-IOV uses the same ring feature */
383 #endif /* IXGBE_FCOE */
385 RING_F_ARRAY_SIZE /* must be last in enum set */
388 #define IXGBE_MAX_RSS_INDICES 16
389 #define IXGBE_MAX_RSS_INDICES_X550 63
390 #define IXGBE_MAX_VMDQ_INDICES 64
391 #define IXGBE_MAX_FDIR_INDICES 63 /* based on q_vector limit */
392 #define IXGBE_MAX_FCOE_INDICES 8
393 #define MAX_RX_QUEUES (IXGBE_MAX_FDIR_INDICES + 1)
394 #define MAX_TX_QUEUES (IXGBE_MAX_FDIR_INDICES + 1)
395 #define MAX_XDP_QUEUES (IXGBE_MAX_FDIR_INDICES + 1)
396 #define IXGBE_MAX_L2A_QUEUES 4
397 #define IXGBE_BAD_L2A_QUEUE 3
398 #define IXGBE_MAX_MACVLANS 31
399 #define IXGBE_MAX_DCBMACVLANS 8
401 struct ixgbe_ring_feature {
402 u16 limit; /* upper limit on feature indices */
403 u16 indices; /* current value of indices */
404 u16 mask; /* Mask used for feature to ring mapping */
405 u16 offset; /* offset to start of feature */
406 } ____cacheline_internodealigned_in_smp;
408 #define IXGBE_82599_VMDQ_8Q_MASK 0x78
409 #define IXGBE_82599_VMDQ_4Q_MASK 0x7C
410 #define IXGBE_82599_VMDQ_2Q_MASK 0x7E
413 * FCoE requires that all Rx buffers be over 2200 bytes in length. Since
414 * this is twice the size of a half page we need to double the page order
415 * for FCoE enabled Rx queues.
417 static inline unsigned int ixgbe_rx_bufsz(struct ixgbe_ring *ring)
419 if (test_bit(__IXGBE_RX_3K_BUFFER, &ring->state))
420 return IXGBE_RXBUFFER_3K;
421 #if (PAGE_SIZE < 8192)
422 if (ring_uses_build_skb(ring))
423 return IXGBE_MAX_2K_FRAME_BUILD_SKB;
425 return IXGBE_RXBUFFER_2K;
428 static inline unsigned int ixgbe_rx_pg_order(struct ixgbe_ring *ring)
430 #if (PAGE_SIZE < 8192)
431 if (test_bit(__IXGBE_RX_3K_BUFFER, &ring->state))
436 #define ixgbe_rx_pg_size(_ring) (PAGE_SIZE << ixgbe_rx_pg_order(_ring))
438 #define IXGBE_ITR_ADAPTIVE_MIN_INC 2
439 #define IXGBE_ITR_ADAPTIVE_MIN_USECS 10
440 #define IXGBE_ITR_ADAPTIVE_MAX_USECS 126
441 #define IXGBE_ITR_ADAPTIVE_LATENCY 0x80
442 #define IXGBE_ITR_ADAPTIVE_BULK 0x00
444 struct ixgbe_ring_container {
445 struct ixgbe_ring *ring; /* pointer to linked list of rings */
446 unsigned long next_update; /* jiffies value of last update */
447 unsigned int total_bytes; /* total bytes processed this int */
448 unsigned int total_packets; /* total packets processed this int */
449 u16 work_limit; /* total work allowed per interrupt */
450 u8 count; /* total number of rings in vector */
451 u8 itr; /* current ITR setting for ring */
454 /* iterator for handling rings in ring container */
455 #define ixgbe_for_each_ring(pos, head) \
456 for (pos = (head).ring; pos != NULL; pos = pos->next)
458 #define MAX_RX_PACKET_BUFFERS ((adapter->flags & IXGBE_FLAG_DCB_ENABLED) \
460 #define MAX_TX_PACKET_BUFFERS MAX_RX_PACKET_BUFFERS
462 /* MAX_Q_VECTORS of these are allocated,
463 * but we only use one per queue-specific vector.
465 struct ixgbe_q_vector {
466 struct ixgbe_adapter *adapter;
467 #ifdef CONFIG_IXGBE_DCA
468 int cpu; /* CPU for DCA */
470 u16 v_idx; /* index of q_vector within array, also used for
471 * finding the bit in EICR and friends that
472 * represents the vector for this ring */
473 u16 itr; /* Interrupt throttle rate written to EITR */
474 struct ixgbe_ring_container rx, tx;
476 struct napi_struct napi;
477 cpumask_t affinity_mask;
479 struct rcu_head rcu; /* to avoid race with update stats on free */
480 char name[IFNAMSIZ + 9];
482 /* for dynamic allocation of rings associated with this q_vector */
483 struct ixgbe_ring ring[0] ____cacheline_internodealigned_in_smp;
486 #ifdef CONFIG_IXGBE_HWMON
488 #define IXGBE_HWMON_TYPE_LOC 0
489 #define IXGBE_HWMON_TYPE_TEMP 1
490 #define IXGBE_HWMON_TYPE_CAUTION 2
491 #define IXGBE_HWMON_TYPE_MAX 3
494 struct device_attribute dev_attr;
496 struct ixgbe_thermal_diode_data *sensor;
501 struct attribute_group group;
502 const struct attribute_group *groups[2];
503 struct attribute *attrs[IXGBE_MAX_SENSORS * 4 + 1];
504 struct hwmon_attr hwmon_list[IXGBE_MAX_SENSORS * 4];
505 unsigned int n_hwmon;
507 #endif /* CONFIG_IXGBE_HWMON */
510 * microsecond values for various ITR rates shifted by 2 to fit itr register
511 * with the first 3 bits reserved 0
513 #define IXGBE_MIN_RSC_ITR 24
514 #define IXGBE_100K_ITR 40
515 #define IXGBE_20K_ITR 200
516 #define IXGBE_12K_ITR 336
518 /* ixgbe_test_staterr - tests bits in Rx descriptor status and error fields */
519 static inline __le32 ixgbe_test_staterr(union ixgbe_adv_rx_desc *rx_desc,
520 const u32 stat_err_bits)
522 return rx_desc->wb.upper.status_error & cpu_to_le32(stat_err_bits);
525 static inline u16 ixgbe_desc_unused(struct ixgbe_ring *ring)
527 u16 ntc = ring->next_to_clean;
528 u16 ntu = ring->next_to_use;
530 return ((ntc > ntu) ? 0 : ring->count) + ntc - ntu - 1;
533 #define IXGBE_RX_DESC(R, i) \
534 (&(((union ixgbe_adv_rx_desc *)((R)->desc))[i]))
535 #define IXGBE_TX_DESC(R, i) \
536 (&(((union ixgbe_adv_tx_desc *)((R)->desc))[i]))
537 #define IXGBE_TX_CTXTDESC(R, i) \
538 (&(((struct ixgbe_adv_tx_context_desc *)((R)->desc))[i]))
540 #define IXGBE_MAX_JUMBO_FRAME_SIZE 9728 /* Maximum Supported Size 9.5KB */
542 /* Use 3K as the baby jumbo frame size for FCoE */
543 #define IXGBE_FCOE_JUMBO_FRAME_SIZE 3072
544 #endif /* IXGBE_FCOE */
546 #define OTHER_VECTOR 1
547 #define NON_Q_VECTORS (OTHER_VECTOR)
549 #define MAX_MSIX_VECTORS_82599 64
550 #define MAX_Q_VECTORS_82599 64
551 #define MAX_MSIX_VECTORS_82598 18
552 #define MAX_Q_VECTORS_82598 16
554 struct ixgbe_mac_addr {
557 u16 state; /* bitmask */
560 #define IXGBE_MAC_STATE_DEFAULT 0x1
561 #define IXGBE_MAC_STATE_MODIFIED 0x2
562 #define IXGBE_MAC_STATE_IN_USE 0x4
564 #define MAX_Q_VECTORS MAX_Q_VECTORS_82599
565 #define MAX_MSIX_COUNT MAX_MSIX_VECTORS_82599
567 #define MIN_MSIX_Q_VECTORS 1
568 #define MIN_MSIX_COUNT (MIN_MSIX_Q_VECTORS + NON_Q_VECTORS)
570 /* default to trying for four seconds */
571 #define IXGBE_TRY_LINK_TIMEOUT (4 * HZ)
572 #define IXGBE_SFP_POLL_JIFFIES (2 * HZ) /* SFP poll every 2 seconds */
574 /* board specific private data structure */
575 struct ixgbe_adapter {
576 unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)];
577 /* OS defined structs */
578 struct net_device *netdev;
579 struct bpf_prog *xdp_prog;
580 struct pci_dev *pdev;
584 /* Some features need tri-state capability,
585 * thus the additional *_CAPABLE flags.
588 #define IXGBE_FLAG_MSI_ENABLED BIT(1)
589 #define IXGBE_FLAG_MSIX_ENABLED BIT(3)
590 #define IXGBE_FLAG_RX_1BUF_CAPABLE BIT(4)
591 #define IXGBE_FLAG_RX_PS_CAPABLE BIT(5)
592 #define IXGBE_FLAG_RX_PS_ENABLED BIT(6)
593 #define IXGBE_FLAG_DCA_ENABLED BIT(8)
594 #define IXGBE_FLAG_DCA_CAPABLE BIT(9)
595 #define IXGBE_FLAG_IMIR_ENABLED BIT(10)
596 #define IXGBE_FLAG_MQ_CAPABLE BIT(11)
597 #define IXGBE_FLAG_DCB_ENABLED BIT(12)
598 #define IXGBE_FLAG_VMDQ_CAPABLE BIT(13)
599 #define IXGBE_FLAG_VMDQ_ENABLED BIT(14)
600 #define IXGBE_FLAG_FAN_FAIL_CAPABLE BIT(15)
601 #define IXGBE_FLAG_NEED_LINK_UPDATE BIT(16)
602 #define IXGBE_FLAG_NEED_LINK_CONFIG BIT(17)
603 #define IXGBE_FLAG_FDIR_HASH_CAPABLE BIT(18)
604 #define IXGBE_FLAG_FDIR_PERFECT_CAPABLE BIT(19)
605 #define IXGBE_FLAG_FCOE_CAPABLE BIT(20)
606 #define IXGBE_FLAG_FCOE_ENABLED BIT(21)
607 #define IXGBE_FLAG_SRIOV_CAPABLE BIT(22)
608 #define IXGBE_FLAG_SRIOV_ENABLED BIT(23)
609 #define IXGBE_FLAG_VXLAN_OFFLOAD_CAPABLE BIT(24)
610 #define IXGBE_FLAG_RX_HWTSTAMP_ENABLED BIT(25)
611 #define IXGBE_FLAG_RX_HWTSTAMP_IN_REGISTER BIT(26)
612 #define IXGBE_FLAG_DCB_CAPABLE BIT(27)
613 #define IXGBE_FLAG_GENEVE_OFFLOAD_CAPABLE BIT(28)
616 #define IXGBE_FLAG2_RSC_CAPABLE BIT(0)
617 #define IXGBE_FLAG2_RSC_ENABLED BIT(1)
618 #define IXGBE_FLAG2_TEMP_SENSOR_CAPABLE BIT(2)
619 #define IXGBE_FLAG2_TEMP_SENSOR_EVENT BIT(3)
620 #define IXGBE_FLAG2_SEARCH_FOR_SFP BIT(4)
621 #define IXGBE_FLAG2_SFP_NEEDS_RESET BIT(5)
622 #define IXGBE_FLAG2_FDIR_REQUIRES_REINIT BIT(7)
623 #define IXGBE_FLAG2_RSS_FIELD_IPV4_UDP BIT(8)
624 #define IXGBE_FLAG2_RSS_FIELD_IPV6_UDP BIT(9)
625 #define IXGBE_FLAG2_PTP_PPS_ENABLED BIT(10)
626 #define IXGBE_FLAG2_PHY_INTERRUPT BIT(11)
627 #define IXGBE_FLAG2_UDP_TUN_REREG_NEEDED BIT(12)
628 #define IXGBE_FLAG2_VLAN_PROMISC BIT(13)
629 #define IXGBE_FLAG2_EEE_CAPABLE BIT(14)
630 #define IXGBE_FLAG2_EEE_ENABLED BIT(15)
631 #define IXGBE_FLAG2_RX_LEGACY BIT(16)
633 /* Tx fast path data */
638 /* Rx fast path data */
642 /* Port number used to identify VXLAN traffic */
648 struct ixgbe_ring *xdp_ring[MAX_XDP_QUEUES];
651 struct ixgbe_ring *tx_ring[MAX_TX_QUEUES] ____cacheline_aligned_in_smp;
655 u32 tx_timeout_count;
658 struct ixgbe_ring *rx_ring[MAX_RX_QUEUES];
659 int num_rx_pools; /* == num_rx_queues in 82598 */
660 int num_rx_queues_per_pool; /* 1 if 82598, can be many if 82599 */
661 u64 hw_csum_rx_error;
662 u64 hw_rx_no_dma_resources;
667 u32 alloc_rx_page_failed;
668 u32 alloc_rx_buff_failed;
670 struct ixgbe_q_vector *q_vector[MAX_Q_VECTORS];
673 struct ieee_pfc *ixgbe_ieee_pfc;
674 struct ieee_ets *ixgbe_ieee_ets;
675 struct ixgbe_dcb_config dcb_cfg;
676 struct ixgbe_dcb_config temp_dcb_cfg;
679 enum ixgbe_fc_mode last_lfc_mode;
681 int num_q_vectors; /* current number of q_vectors for device */
682 int max_q_vectors; /* true count of q_vectors for device */
683 struct ixgbe_ring_feature ring_feature[RING_F_ARRAY_SIZE];
684 struct msix_entry *msix_entries;
687 struct ixgbe_ring test_tx_ring;
688 struct ixgbe_ring test_rx_ring;
690 /* structs defined in ixgbe_hw.h */
693 struct ixgbe_hw_stats stats;
696 unsigned int tx_ring_count;
697 unsigned int xdp_ring_count;
698 unsigned int rx_ring_count;
702 unsigned long sfp_poll_time;
703 unsigned long link_check_timeout;
705 struct timer_list service_timer;
706 struct work_struct service_task;
708 struct hlist_head fdir_filter_list;
709 unsigned long fdir_overflow; /* number of times ATR was backed off */
710 union ixgbe_atr_input fdir_mask;
711 int fdir_filter_count;
714 spinlock_t fdir_perfect_lock;
717 struct ixgbe_fcoe fcoe;
718 #endif /* IXGBE_FCOE */
719 u8 __iomem *io_addr; /* Mainly for iounmap use */
731 struct ptp_clock *ptp_clock;
732 struct ptp_clock_info ptp_caps;
733 struct work_struct ptp_tx_work;
734 struct sk_buff *ptp_tx_skb;
735 struct hwtstamp_config tstamp_config;
736 unsigned long ptp_tx_start;
737 unsigned long last_overflow_check;
738 unsigned long last_rx_ptp_check;
739 unsigned long last_rx_timestamp;
740 spinlock_t tmreg_lock;
741 struct cyclecounter hw_cc;
742 struct timecounter hw_tc;
744 u32 tx_hwtstamp_timeouts;
745 u32 tx_hwtstamp_skipped;
746 u32 rx_hwtstamp_cleared;
747 void (*ptp_setup_sdp)(struct ixgbe_adapter *);
750 DECLARE_BITMAP(active_vfs, IXGBE_MAX_VF_FUNCTIONS);
751 unsigned int num_vfs;
752 struct vf_data_storage *vfinfo;
753 int vf_rate_link_speed;
754 struct vf_macvlans vf_mvs;
755 struct vf_macvlans *mv_list;
757 u32 timer_event_accumulator;
759 struct ixgbe_mac_addr *mac_table;
760 struct kobject *info_kobj;
761 #ifdef CONFIG_IXGBE_HWMON
762 struct hwmon_buff *ixgbe_hwmon_buff;
763 #endif /* CONFIG_IXGBE_HWMON */
764 #ifdef CONFIG_DEBUG_FS
765 struct dentry *ixgbe_dbg_adapter;
766 #endif /*CONFIG_DEBUG_FS*/
769 unsigned long fwd_bitmask; /* Bitmask indicating in use pools */
771 #define IXGBE_MAX_LINK_HANDLE 10
772 struct ixgbe_jump_table *jump_tables[IXGBE_MAX_LINK_HANDLE];
773 unsigned long tables;
775 /* maximum number of RETA entries among all devices supported by ixgbe
776 * driver: currently it's x550 device in non-SRIOV mode
778 #define IXGBE_MAX_RETA_ENTRIES 512
779 u8 rss_indir_tbl[IXGBE_MAX_RETA_ENTRIES];
781 #define IXGBE_RSS_KEY_SIZE 40 /* size of RSS Hash Key in bytes */
785 static inline u8 ixgbe_max_rss_indices(struct ixgbe_adapter *adapter)
787 switch (adapter->hw.mac.type) {
788 case ixgbe_mac_82598EB:
789 case ixgbe_mac_82599EB:
791 return IXGBE_MAX_RSS_INDICES;
793 case ixgbe_mac_X550EM_x:
794 case ixgbe_mac_x550em_a:
795 return IXGBE_MAX_RSS_INDICES_X550;
801 struct ixgbe_fdir_filter {
802 struct hlist_node fdir_node;
803 union ixgbe_atr_input filter;
814 __IXGBE_SERVICE_SCHED,
815 __IXGBE_SERVICE_INITED,
818 __IXGBE_PTP_TX_IN_PROGRESS,
819 __IXGBE_RESET_REQUESTED,
823 union { /* Union defining head/tail partner */
824 struct sk_buff *head;
825 struct sk_buff *tail;
831 #define IXGBE_CB(skb) ((struct ixgbe_cb *)(skb)->cb)
844 extern const struct ixgbe_info ixgbe_82598_info;
845 extern const struct ixgbe_info ixgbe_82599_info;
846 extern const struct ixgbe_info ixgbe_X540_info;
847 extern const struct ixgbe_info ixgbe_X550_info;
848 extern const struct ixgbe_info ixgbe_X550EM_x_info;
849 extern const struct ixgbe_info ixgbe_x550em_x_fw_info;
850 extern const struct ixgbe_info ixgbe_x550em_a_info;
851 extern const struct ixgbe_info ixgbe_x550em_a_fw_info;
852 #ifdef CONFIG_IXGBE_DCB
853 extern const struct dcbnl_rtnl_ops ixgbe_dcbnl_ops;
856 extern char ixgbe_driver_name[];
857 extern const char ixgbe_driver_version[];
859 extern char ixgbe_default_device_descr[];
860 #endif /* IXGBE_FCOE */
862 int ixgbe_open(struct net_device *netdev);
863 int ixgbe_close(struct net_device *netdev);
864 void ixgbe_up(struct ixgbe_adapter *adapter);
865 void ixgbe_down(struct ixgbe_adapter *adapter);
866 void ixgbe_reinit_locked(struct ixgbe_adapter *adapter);
867 void ixgbe_reset(struct ixgbe_adapter *adapter);
868 void ixgbe_set_ethtool_ops(struct net_device *netdev);
869 int ixgbe_setup_rx_resources(struct ixgbe_adapter *, struct ixgbe_ring *);
870 int ixgbe_setup_tx_resources(struct ixgbe_ring *);
871 void ixgbe_free_rx_resources(struct ixgbe_ring *);
872 void ixgbe_free_tx_resources(struct ixgbe_ring *);
873 void ixgbe_configure_rx_ring(struct ixgbe_adapter *, struct ixgbe_ring *);
874 void ixgbe_configure_tx_ring(struct ixgbe_adapter *, struct ixgbe_ring *);
875 void ixgbe_disable_rx_queue(struct ixgbe_adapter *adapter, struct ixgbe_ring *);
876 void ixgbe_update_stats(struct ixgbe_adapter *adapter);
877 int ixgbe_init_interrupt_scheme(struct ixgbe_adapter *adapter);
878 bool ixgbe_wol_supported(struct ixgbe_adapter *adapter, u16 device_id,
880 #ifdef CONFIG_PCI_IOV
881 void ixgbe_full_sync_mac_table(struct ixgbe_adapter *adapter);
883 int ixgbe_add_mac_filter(struct ixgbe_adapter *adapter,
884 const u8 *addr, u16 queue);
885 int ixgbe_del_mac_filter(struct ixgbe_adapter *adapter,
886 const u8 *addr, u16 queue);
887 void ixgbe_update_pf_promisc_vlvf(struct ixgbe_adapter *adapter, u32 vid);
888 void ixgbe_clear_interrupt_scheme(struct ixgbe_adapter *adapter);
889 netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *, struct ixgbe_adapter *,
890 struct ixgbe_ring *);
891 void ixgbe_unmap_and_free_tx_resource(struct ixgbe_ring *,
892 struct ixgbe_tx_buffer *);
893 void ixgbe_alloc_rx_buffers(struct ixgbe_ring *, u16);
894 void ixgbe_write_eitr(struct ixgbe_q_vector *);
895 int ixgbe_poll(struct napi_struct *napi, int budget);
896 int ethtool_ioctl(struct ifreq *ifr);
897 s32 ixgbe_reinit_fdir_tables_82599(struct ixgbe_hw *hw);
898 s32 ixgbe_init_fdir_signature_82599(struct ixgbe_hw *hw, u32 fdirctrl);
899 s32 ixgbe_init_fdir_perfect_82599(struct ixgbe_hw *hw, u32 fdirctrl);
900 s32 ixgbe_fdir_add_signature_filter_82599(struct ixgbe_hw *hw,
901 union ixgbe_atr_hash_dword input,
902 union ixgbe_atr_hash_dword common,
904 s32 ixgbe_fdir_set_input_mask_82599(struct ixgbe_hw *hw,
905 union ixgbe_atr_input *input_mask);
906 s32 ixgbe_fdir_write_perfect_filter_82599(struct ixgbe_hw *hw,
907 union ixgbe_atr_input *input,
908 u16 soft_id, u8 queue);
909 s32 ixgbe_fdir_erase_perfect_filter_82599(struct ixgbe_hw *hw,
910 union ixgbe_atr_input *input,
912 void ixgbe_atr_compute_perfect_hash_82599(union ixgbe_atr_input *input,
913 union ixgbe_atr_input *mask);
914 int ixgbe_update_ethtool_fdir_entry(struct ixgbe_adapter *adapter,
915 struct ixgbe_fdir_filter *input,
917 void ixgbe_set_rx_mode(struct net_device *netdev);
918 #ifdef CONFIG_IXGBE_DCB
919 void ixgbe_set_rx_drop_en(struct ixgbe_adapter *adapter);
921 int ixgbe_setup_tc(struct net_device *dev, u8 tc);
922 void ixgbe_tx_ctxtdesc(struct ixgbe_ring *, u32, u32, u32, u32);
923 void ixgbe_do_reset(struct net_device *netdev);
924 #ifdef CONFIG_IXGBE_HWMON
925 void ixgbe_sysfs_exit(struct ixgbe_adapter *adapter);
926 int ixgbe_sysfs_init(struct ixgbe_adapter *adapter);
927 #endif /* CONFIG_IXGBE_HWMON */
929 void ixgbe_configure_fcoe(struct ixgbe_adapter *adapter);
930 int ixgbe_fso(struct ixgbe_ring *tx_ring, struct ixgbe_tx_buffer *first,
932 int ixgbe_fcoe_ddp(struct ixgbe_adapter *adapter,
933 union ixgbe_adv_rx_desc *rx_desc, struct sk_buff *skb);
934 int ixgbe_fcoe_ddp_get(struct net_device *netdev, u16 xid,
935 struct scatterlist *sgl, unsigned int sgc);
936 int ixgbe_fcoe_ddp_target(struct net_device *netdev, u16 xid,
937 struct scatterlist *sgl, unsigned int sgc);
938 int ixgbe_fcoe_ddp_put(struct net_device *netdev, u16 xid);
939 int ixgbe_setup_fcoe_ddp_resources(struct ixgbe_adapter *adapter);
940 void ixgbe_free_fcoe_ddp_resources(struct ixgbe_adapter *adapter);
941 int ixgbe_fcoe_enable(struct net_device *netdev);
942 int ixgbe_fcoe_disable(struct net_device *netdev);
943 #ifdef CONFIG_IXGBE_DCB
944 u8 ixgbe_fcoe_getapp(struct ixgbe_adapter *adapter);
945 u8 ixgbe_fcoe_setapp(struct ixgbe_adapter *adapter, u8 up);
946 #endif /* CONFIG_IXGBE_DCB */
947 int ixgbe_fcoe_get_wwn(struct net_device *netdev, u64 *wwn, int type);
948 int ixgbe_fcoe_get_hbainfo(struct net_device *netdev,
949 struct netdev_fcoe_hbainfo *info);
950 u8 ixgbe_fcoe_get_tc(struct ixgbe_adapter *adapter);
951 #endif /* IXGBE_FCOE */
952 #ifdef CONFIG_DEBUG_FS
953 void ixgbe_dbg_adapter_init(struct ixgbe_adapter *adapter);
954 void ixgbe_dbg_adapter_exit(struct ixgbe_adapter *adapter);
955 void ixgbe_dbg_init(void);
956 void ixgbe_dbg_exit(void);
958 static inline void ixgbe_dbg_adapter_init(struct ixgbe_adapter *adapter) {}
959 static inline void ixgbe_dbg_adapter_exit(struct ixgbe_adapter *adapter) {}
960 static inline void ixgbe_dbg_init(void) {}
961 static inline void ixgbe_dbg_exit(void) {}
962 #endif /* CONFIG_DEBUG_FS */
963 static inline struct netdev_queue *txring_txq(const struct ixgbe_ring *ring)
965 return netdev_get_tx_queue(ring->netdev, ring->queue_index);
968 void ixgbe_ptp_init(struct ixgbe_adapter *adapter);
969 void ixgbe_ptp_suspend(struct ixgbe_adapter *adapter);
970 void ixgbe_ptp_stop(struct ixgbe_adapter *adapter);
971 void ixgbe_ptp_overflow_check(struct ixgbe_adapter *adapter);
972 void ixgbe_ptp_rx_hang(struct ixgbe_adapter *adapter);
973 void ixgbe_ptp_tx_hang(struct ixgbe_adapter *adapter);
974 void ixgbe_ptp_rx_pktstamp(struct ixgbe_q_vector *, struct sk_buff *);
975 void ixgbe_ptp_rx_rgtstamp(struct ixgbe_q_vector *, struct sk_buff *skb);
976 static inline void ixgbe_ptp_rx_hwtstamp(struct ixgbe_ring *rx_ring,
977 union ixgbe_adv_rx_desc *rx_desc,
980 if (unlikely(ixgbe_test_staterr(rx_desc, IXGBE_RXD_STAT_TSIP))) {
981 ixgbe_ptp_rx_pktstamp(rx_ring->q_vector, skb);
985 if (unlikely(!ixgbe_test_staterr(rx_desc, IXGBE_RXDADV_STAT_TS)))
988 ixgbe_ptp_rx_rgtstamp(rx_ring->q_vector, skb);
990 /* Update the last_rx_timestamp timer in order to enable watchdog check
991 * for error case of latched timestamp on a dropped packet.
993 rx_ring->last_rx_timestamp = jiffies;
996 int ixgbe_ptp_set_ts_config(struct ixgbe_adapter *adapter, struct ifreq *ifr);
997 int ixgbe_ptp_get_ts_config(struct ixgbe_adapter *adapter, struct ifreq *ifr);
998 void ixgbe_ptp_start_cyclecounter(struct ixgbe_adapter *adapter);
999 void ixgbe_ptp_reset(struct ixgbe_adapter *adapter);
1000 void ixgbe_ptp_check_pps_event(struct ixgbe_adapter *adapter);
1001 #ifdef CONFIG_PCI_IOV
1002 void ixgbe_sriov_reinit(struct ixgbe_adapter *adapter);
1005 netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *skb,
1006 struct ixgbe_adapter *adapter,
1007 struct ixgbe_ring *tx_ring);
1008 u32 ixgbe_rss_indir_tbl_entries(struct ixgbe_adapter *adapter);
1009 void ixgbe_store_key(struct ixgbe_adapter *adapter);
1010 void ixgbe_store_reta(struct ixgbe_adapter *adapter);
1011 s32 ixgbe_negotiate_fc(struct ixgbe_hw *hw, u32 adv_reg, u32 lp_reg,
1012 u32 adv_sym, u32 adv_asm, u32 lp_sym, u32 lp_asm);
1013 #endif /* _IXGBE_H_ */