1 /*******************************************************************************
3 * Intel Ethernet Controller XL710 Family Linux Driver
4 * Copyright(c) 2013 - 2016 Intel Corporation.
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License,
8 * version 2, as published by the Free Software Foundation.
10 * This program is distributed in the hope it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
15 * You should have received a copy of the GNU General Public License along
16 * with this program. If not, see <http://www.gnu.org/licenses/>.
18 * The full GNU General Public License is included in this distribution in
19 * the file called "COPYING".
21 * Contact Information:
22 * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
23 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
25 ******************************************************************************/
32 /* Interrupt Throttling and Rate Limiting Goodies */
34 #define I40E_MAX_ITR 0x0FF0 /* reg uses 2 usec resolution */
35 #define I40E_MIN_ITR 0x0001 /* reg uses 2 usec resolution */
36 #define I40E_ITR_100K 0x0005
37 #define I40E_ITR_50K 0x000A
38 #define I40E_ITR_20K 0x0019
39 #define I40E_ITR_18K 0x001B
40 #define I40E_ITR_8K 0x003E
41 #define I40E_ITR_4K 0x007A
42 #define I40E_MAX_INTRL 0x3B /* reg uses 4 usec resolution */
43 #define I40E_ITR_RX_DEF (ITR_REG_TO_USEC(I40E_ITR_20K) | \
45 #define I40E_ITR_TX_DEF (ITR_REG_TO_USEC(I40E_ITR_20K) | \
47 #define I40E_ITR_DYNAMIC 0x8000 /* use top bit as a flag */
48 #define I40E_MIN_INT_RATE 250 /* ~= 1000000 / (I40E_MAX_ITR * 2) */
49 #define I40E_MAX_INT_RATE 500000 /* == 1000000 / (I40E_MIN_ITR * 2) */
50 #define I40E_DEFAULT_IRQ_WORK 256
51 #define ITR_TO_REG(setting) ((setting & ~I40E_ITR_DYNAMIC) >> 1)
52 #define ITR_IS_DYNAMIC(setting) (!!(setting & I40E_ITR_DYNAMIC))
53 #define ITR_REG_TO_USEC(itr_reg) (itr_reg << 1)
54 /* 0x40 is the enable bit for interrupt rate limiting, and must be set if
55 * the value of the rate limit is non-zero
57 #define INTRL_ENA BIT(6)
58 #define INTRL_REG_TO_USEC(intrl) ((intrl & ~INTRL_ENA) << 2)
60 * i40e_intrl_usec_to_reg - convert interrupt rate limit to register
61 * @intrl: interrupt rate limit to convert
63 * This function converts a decimal interrupt rate limit to the appropriate
64 * register format expected by the firmware when setting interrupt rate limit.
66 static inline u16 i40e_intrl_usec_to_reg(int intrl)
69 return ((intrl >> 2) | INTRL_ENA);
73 #define I40E_INTRL_8K 125 /* 8000 ints/sec */
74 #define I40E_INTRL_62K 16 /* 62500 ints/sec */
75 #define I40E_INTRL_83K 12 /* 83333 ints/sec */
77 #define I40E_QUEUE_END_OF_LIST 0x7FF
79 /* this enum matches hardware bits and is meant to be used by DYN_CTLN
80 * registers and QINT registers or more generally anywhere in the manual
81 * mentioning ITR_INDX, ITR_NONE cannot be used as an index 'n' into any
82 * register but instead is a special value meaning "don't update" ITR0/1/2.
88 I40E_ITR_NONE = 3 /* ITR_NONE must not be used as an index */
91 /* these are indexes into ITRN registers */
92 #define I40E_RX_ITR I40E_IDX_ITR0
93 #define I40E_TX_ITR I40E_IDX_ITR1
94 #define I40E_PE_ITR I40E_IDX_ITR2
96 /* Supported RSS offloads */
97 #define I40E_DEFAULT_RSS_HENA ( \
98 BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_UDP) | \
99 BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_SCTP) | \
100 BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_TCP) | \
101 BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_OTHER) | \
102 BIT_ULL(I40E_FILTER_PCTYPE_FRAG_IPV4) | \
103 BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_UDP) | \
104 BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_TCP) | \
105 BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_SCTP) | \
106 BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_OTHER) | \
107 BIT_ULL(I40E_FILTER_PCTYPE_FRAG_IPV6) | \
108 BIT_ULL(I40E_FILTER_PCTYPE_L2_PAYLOAD))
110 #define I40E_DEFAULT_RSS_HENA_EXPANDED (I40E_DEFAULT_RSS_HENA | \
111 BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_TCP_SYN_NO_ACK) | \
112 BIT_ULL(I40E_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP) | \
113 BIT_ULL(I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV4_UDP) | \
114 BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_TCP_SYN_NO_ACK) | \
115 BIT_ULL(I40E_FILTER_PCTYPE_NONF_UNICAST_IPV6_UDP) | \
116 BIT_ULL(I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV6_UDP))
118 #define i40e_pf_get_default_rss_hena(pf) \
119 (((pf)->hw_features & I40E_HW_MULTIPLE_TCP_UDP_RSS_PCTYPE) ? \
120 I40E_DEFAULT_RSS_HENA_EXPANDED : I40E_DEFAULT_RSS_HENA)
122 /* Supported Rx Buffer Sizes (a multiple of 128) */
123 #define I40E_RXBUFFER_256 256
124 #define I40E_RXBUFFER_1536 1536 /* 128B aligned standard Ethernet frame */
125 #define I40E_RXBUFFER_2048 2048
126 #define I40E_RXBUFFER_3072 3072 /* Used for large frames w/ padding */
127 #define I40E_MAX_RXBUFFER 9728 /* largest size for single descriptor */
129 /* NOTE: netdev_alloc_skb reserves up to 64 bytes, NET_IP_ALIGN means we
130 * reserve 2 more, and skb_shared_info adds an additional 384 bytes more,
131 * this adds up to 512 bytes of extra data meaning the smallest allocation
132 * we could have is 1K.
133 * i.e. RXBUFFER_256 --> 960 byte skb (size-1024 slab)
134 * i.e. RXBUFFER_512 --> 1216 byte skb (size-2048 slab)
136 #define I40E_RX_HDR_SIZE I40E_RXBUFFER_256
137 #define I40E_PACKET_HDR_PAD (ETH_HLEN + ETH_FCS_LEN + (VLAN_HLEN * 2))
138 #define i40e_rx_desc i40e_32byte_rx_desc
140 #define I40E_RX_DMA_ATTR \
141 (DMA_ATTR_SKIP_CPU_SYNC | DMA_ATTR_WEAK_ORDERING)
143 /* Attempt to maximize the headroom available for incoming frames. We
144 * use a 2K buffer for receives and need 1536/1534 to store the data for
145 * the frame. This leaves us with 512 bytes of room. From that we need
146 * to deduct the space needed for the shared info and the padding needed
147 * to IP align the frame.
149 * Note: For cache line sizes 256 or larger this value is going to end
150 * up negative. In these cases we should fall back to the legacy
153 #if (PAGE_SIZE < 8192)
154 #define I40E_2K_TOO_SMALL_WITH_PADDING \
155 ((NET_SKB_PAD + I40E_RXBUFFER_1536) > SKB_WITH_OVERHEAD(I40E_RXBUFFER_2048))
157 static inline int i40e_compute_pad(int rx_buf_len)
159 int page_size, pad_size;
161 page_size = ALIGN(rx_buf_len, PAGE_SIZE / 2);
162 pad_size = SKB_WITH_OVERHEAD(page_size) - rx_buf_len;
167 static inline int i40e_skb_pad(void)
171 /* If a 2K buffer cannot handle a standard Ethernet frame then
172 * optimize padding for a 3K buffer instead of a 1.5K buffer.
174 * For a 3K buffer we need to add enough padding to allow for
175 * tailroom due to NET_IP_ALIGN possibly shifting us out of
176 * cache-line alignment.
178 if (I40E_2K_TOO_SMALL_WITH_PADDING)
179 rx_buf_len = I40E_RXBUFFER_3072 + SKB_DATA_ALIGN(NET_IP_ALIGN);
181 rx_buf_len = I40E_RXBUFFER_1536;
183 /* if needed make room for NET_IP_ALIGN */
184 rx_buf_len -= NET_IP_ALIGN;
186 return i40e_compute_pad(rx_buf_len);
189 #define I40E_SKB_PAD i40e_skb_pad()
191 #define I40E_2K_TOO_SMALL_WITH_PADDING false
192 #define I40E_SKB_PAD (NET_SKB_PAD + NET_IP_ALIGN)
196 * i40e_test_staterr - tests bits in Rx descriptor status and error fields
197 * @rx_desc: pointer to receive descriptor (in le64 format)
198 * @stat_err_bits: value to mask
200 * This function does some fast chicanery in order to return the
201 * value of the mask which is really only used for boolean tests.
202 * The status_error_len doesn't need to be shifted because it begins
205 static inline bool i40e_test_staterr(union i40e_rx_desc *rx_desc,
206 const u64 stat_err_bits)
208 return !!(rx_desc->wb.qword1.status_error_len &
209 cpu_to_le64(stat_err_bits));
212 /* How many Rx Buffers do we bundle into one write to the hardware ? */
213 #define I40E_RX_BUFFER_WRITE 32 /* Must be power of 2 */
214 #define I40E_RX_INCREMENT(r, i) \
217 if ((i) == (r)->count) \
219 r->next_to_clean = i; \
222 #define I40E_RX_NEXT_DESC(r, i, n) \
225 if ((i) == (r)->count) \
227 (n) = I40E_RX_DESC((r), (i)); \
230 #define I40E_RX_NEXT_DESC_PREFETCH(r, i, n) \
232 I40E_RX_NEXT_DESC((r), (i), (n)); \
236 #define I40E_MAX_BUFFER_TXD 8
237 #define I40E_MIN_TX_LEN 17
239 /* The size limit for a transmit buffer in a descriptor is (16K - 1).
240 * In order to align with the read requests we will align the value to
241 * the nearest 4K which represents our maximum read request size.
243 #define I40E_MAX_READ_REQ_SIZE 4096
244 #define I40E_MAX_DATA_PER_TXD (16 * 1024 - 1)
245 #define I40E_MAX_DATA_PER_TXD_ALIGNED \
246 (I40E_MAX_DATA_PER_TXD & ~(I40E_MAX_READ_REQ_SIZE - 1))
249 * i40e_txd_use_count - estimate the number of descriptors needed for Tx
250 * @size: transmit request size in bytes
252 * Due to hardware alignment restrictions (4K alignment), we need to
253 * assume that we can have no more than 12K of data per descriptor, even
254 * though each descriptor can take up to 16K - 1 bytes of aligned memory.
255 * Thus, we need to divide by 12K. But division is slow! Instead,
256 * we decompose the operation into shifts and one relatively cheap
257 * multiply operation.
259 * To divide by 12K, we first divide by 4K, then divide by 3:
260 * To divide by 4K, shift right by 12 bits
261 * To divide by 3, multiply by 85, then divide by 256
262 * (Divide by 256 is done by shifting right by 8 bits)
263 * Finally, we add one to round up. Because 256 isn't an exact multiple of
264 * 3, we'll underestimate near each multiple of 12K. This is actually more
265 * accurate as we have 4K - 1 of wiggle room that we can fit into the last
266 * segment. For our purposes this is accurate out to 1M which is orders of
267 * magnitude greater than our largest possible GSO size.
269 * This would then be implemented as:
270 * return (((size >> 12) * 85) >> 8) + 1;
272 * Since multiplication and division are commutative, we can reorder
274 * return ((size * 85) >> 20) + 1;
276 static inline unsigned int i40e_txd_use_count(unsigned int size)
278 return ((size * 85) >> 20) + 1;
281 /* Tx Descriptors needed, worst case */
282 #define DESC_NEEDED (MAX_SKB_FRAGS + 6)
283 #define I40E_MIN_DESC_PENDING 4
285 #define I40E_TX_FLAGS_HW_VLAN BIT(1)
286 #define I40E_TX_FLAGS_SW_VLAN BIT(2)
287 #define I40E_TX_FLAGS_TSO BIT(3)
288 #define I40E_TX_FLAGS_IPV4 BIT(4)
289 #define I40E_TX_FLAGS_IPV6 BIT(5)
290 #define I40E_TX_FLAGS_FCCRC BIT(6)
291 #define I40E_TX_FLAGS_FSO BIT(7)
292 #define I40E_TX_FLAGS_TSYN BIT(8)
293 #define I40E_TX_FLAGS_FD_SB BIT(9)
294 #define I40E_TX_FLAGS_UDP_TUNNEL BIT(10)
295 #define I40E_TX_FLAGS_VLAN_MASK 0xffff0000
296 #define I40E_TX_FLAGS_VLAN_PRIO_MASK 0xe0000000
297 #define I40E_TX_FLAGS_VLAN_PRIO_SHIFT 29
298 #define I40E_TX_FLAGS_VLAN_SHIFT 16
300 struct i40e_tx_buffer {
301 struct i40e_tx_desc *next_to_watch;
306 unsigned int bytecount;
307 unsigned short gso_segs;
309 DEFINE_DMA_UNMAP_ADDR(dma);
310 DEFINE_DMA_UNMAP_LEN(len);
314 struct i40e_rx_buffer {
317 #if (BITS_PER_LONG > 32) || (PAGE_SIZE >= 65536)
325 struct i40e_queue_stats {
330 struct i40e_tx_queue_stats {
339 struct i40e_rx_queue_stats {
341 u64 alloc_page_failed;
342 u64 alloc_buff_failed;
343 u64 page_reuse_count;
347 enum i40e_ring_state_t {
348 __I40E_TX_FDIR_INIT_DONE,
349 __I40E_TX_XPS_INIT_DONE,
350 __I40E_RING_STATE_NBITS /* must be last */
353 /* some useful defines for virtchannel interface, which
354 * is the only remaining user of header split
356 #define I40E_RX_DTYPE_NO_SPLIT 0
357 #define I40E_RX_DTYPE_HEADER_SPLIT 1
358 #define I40E_RX_DTYPE_SPLIT_ALWAYS 2
359 #define I40E_RX_SPLIT_L2 0x1
360 #define I40E_RX_SPLIT_IP 0x2
361 #define I40E_RX_SPLIT_TCP_UDP 0x4
362 #define I40E_RX_SPLIT_SCTP 0x8
364 /* struct that defines a descriptor ring, associated with a VSI */
366 struct i40e_ring *next; /* pointer to next ring in q_vector */
367 void *desc; /* Descriptor ring memory */
368 struct device *dev; /* Used for DMA mapping */
369 struct net_device *netdev; /* netdev ring maps to */
370 struct bpf_prog *xdp_prog;
372 struct i40e_tx_buffer *tx_bi;
373 struct i40e_rx_buffer *rx_bi;
375 DECLARE_BITMAP(state, __I40E_RING_STATE_NBITS);
376 u16 queue_index; /* Queue number of ring */
377 u8 dcb_tc; /* Traffic class of ring */
380 /* high bit set means dynamic, use accessor routines to read/write.
381 * hardware only supports 2us resolution for the ITR registers.
382 * these values always store the USER setting, and must be converted
383 * before programming to a register.
388 u16 count; /* Number of descriptors */
389 u16 reg_idx; /* HW register index of the ring */
392 /* used in interrupt processing */
399 bool ring_active; /* is ring online or not */
400 bool arm_wb; /* do something to arm write back */
404 #define I40E_TXR_FLAGS_WB_ON_ITR BIT(0)
405 #define I40E_RXR_FLAGS_BUILD_SKB_ENABLED BIT(1)
406 #define I40E_TXR_FLAGS_XDP BIT(2)
409 struct i40e_queue_stats stats;
410 struct u64_stats_sync syncp;
412 struct i40e_tx_queue_stats tx_stats;
413 struct i40e_rx_queue_stats rx_stats;
416 unsigned int size; /* length of descriptor ring in bytes */
417 dma_addr_t dma; /* physical address of ring */
419 struct i40e_vsi *vsi; /* Backreference to associated VSI */
420 struct i40e_q_vector *q_vector; /* Backreference to associated vector */
422 struct rcu_head rcu; /* to avoid race on free */
424 struct sk_buff *skb; /* When i40e_clean_rx_ring_irq() must
425 * return before it sees the EOP for
426 * the current packet, we save that skb
427 * here and resume receiving this
428 * packet the next time
429 * i40e_clean_rx_ring_irq() is called
433 struct i40e_channel *ch;
434 struct xdp_rxq_info xdp_rxq;
435 } ____cacheline_internodealigned_in_smp;
437 static inline bool ring_uses_build_skb(struct i40e_ring *ring)
439 return !!(ring->flags & I40E_RXR_FLAGS_BUILD_SKB_ENABLED);
442 static inline void set_ring_build_skb_enabled(struct i40e_ring *ring)
444 ring->flags |= I40E_RXR_FLAGS_BUILD_SKB_ENABLED;
447 static inline void clear_ring_build_skb_enabled(struct i40e_ring *ring)
449 ring->flags &= ~I40E_RXR_FLAGS_BUILD_SKB_ENABLED;
452 static inline bool ring_is_xdp(struct i40e_ring *ring)
454 return !!(ring->flags & I40E_TXR_FLAGS_XDP);
457 static inline void set_ring_xdp(struct i40e_ring *ring)
459 ring->flags |= I40E_TXR_FLAGS_XDP;
462 enum i40e_latency_range {
463 I40E_LOWEST_LATENCY = 0,
464 I40E_LOW_LATENCY = 1,
465 I40E_BULK_LATENCY = 2,
468 struct i40e_ring_container {
469 /* array of pointers to rings */
470 struct i40e_ring *ring;
471 unsigned int total_bytes; /* total bytes processed this int */
472 unsigned int total_packets; /* total packets processed this int */
473 unsigned long last_itr_update; /* jiffies of last ITR update */
475 enum i40e_latency_range latency_range;
479 /* iterator for handling rings in ring container */
480 #define i40e_for_each_ring(pos, head) \
481 for (pos = (head).ring; pos != NULL; pos = pos->next)
483 static inline unsigned int i40e_rx_pg_order(struct i40e_ring *ring)
485 #if (PAGE_SIZE < 8192)
486 if (ring->rx_buf_len > (PAGE_SIZE / 2))
492 #define i40e_rx_pg_size(_ring) (PAGE_SIZE << i40e_rx_pg_order(_ring))
494 bool i40e_alloc_rx_buffers(struct i40e_ring *rxr, u16 cleaned_count);
495 netdev_tx_t i40e_lan_xmit_frame(struct sk_buff *skb, struct net_device *netdev);
496 void i40e_clean_tx_ring(struct i40e_ring *tx_ring);
497 void i40e_clean_rx_ring(struct i40e_ring *rx_ring);
498 int i40e_setup_tx_descriptors(struct i40e_ring *tx_ring);
499 int i40e_setup_rx_descriptors(struct i40e_ring *rx_ring);
500 void i40e_free_tx_resources(struct i40e_ring *tx_ring);
501 void i40e_free_rx_resources(struct i40e_ring *rx_ring);
502 int i40e_napi_poll(struct napi_struct *napi, int budget);
503 void i40e_force_wb(struct i40e_vsi *vsi, struct i40e_q_vector *q_vector);
504 u32 i40e_get_tx_pending(struct i40e_ring *ring);
505 void i40e_detect_recover_hung(struct i40e_vsi *vsi);
506 int __i40e_maybe_stop_tx(struct i40e_ring *tx_ring, int size);
507 bool __i40e_chk_linearize(struct sk_buff *skb);
510 * i40e_get_head - Retrieve head from head writeback
511 * @tx_ring: tx ring to fetch head of
513 * Returns value of Tx ring head based on value stored
514 * in head write-back location
516 static inline u32 i40e_get_head(struct i40e_ring *tx_ring)
518 void *head = (struct i40e_tx_desc *)tx_ring->desc + tx_ring->count;
520 return le32_to_cpu(*(volatile __le32 *)head);
524 * i40e_xmit_descriptor_count - calculate number of Tx descriptors needed
526 * @tx_ring: ring to send buffer on
528 * Returns number of data descriptors needed for this skb. Returns 0 to indicate
529 * there is not enough descriptors available in this ring since we need at least
532 static inline int i40e_xmit_descriptor_count(struct sk_buff *skb)
534 const struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[0];
535 unsigned int nr_frags = skb_shinfo(skb)->nr_frags;
536 int count = 0, size = skb_headlen(skb);
539 count += i40e_txd_use_count(size);
544 size = skb_frag_size(frag++);
551 * i40e_maybe_stop_tx - 1st level check for Tx stop conditions
552 * @tx_ring: the ring to be checked
553 * @size: the size buffer we want to assure is available
555 * Returns 0 if stop is not needed
557 static inline int i40e_maybe_stop_tx(struct i40e_ring *tx_ring, int size)
559 if (likely(I40E_DESC_UNUSED(tx_ring) >= size))
561 return __i40e_maybe_stop_tx(tx_ring, size);
565 * i40e_chk_linearize - Check if there are more than 8 fragments per packet
567 * @count: number of buffers used
569 * Note: Our HW can't scatter-gather more than 8 fragments to build
570 * a packet on the wire and so we need to figure out the cases where we
571 * need to linearize the skb.
573 static inline bool i40e_chk_linearize(struct sk_buff *skb, int count)
575 /* Both TSO and single send will work if count is less than 8 */
576 if (likely(count < I40E_MAX_BUFFER_TXD))
580 return __i40e_chk_linearize(skb);
582 /* we can support up to 8 data buffers for a single send */
583 return count != I40E_MAX_BUFFER_TXD;
587 * txring_txq - Find the netdev Tx ring based on the i40e Tx ring
588 * @ring: Tx ring to find the netdev equivalent of
590 static inline struct netdev_queue *txring_txq(const struct i40e_ring *ring)
592 return netdev_get_tx_queue(ring->netdev, ring->queue_index);
594 #endif /* _I40E_TXRX_H_ */