1 /*******************************************************************************
3 * Intel Ethernet Controller XL710 Family Linux Driver
4 * Copyright(c) 2013 - 2016 Intel Corporation.
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License,
8 * version 2, as published by the Free Software Foundation.
10 * This program is distributed in the hope it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
15 * You should have received a copy of the GNU General Public License along
16 * with this program. If not, see <http://www.gnu.org/licenses/>.
18 * The full GNU General Public License is included in this distribution in
19 * the file called "COPYING".
21 * Contact Information:
22 * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
23 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
25 ******************************************************************************/
27 #include <linux/prefetch.h>
28 #include <net/busy_poll.h>
29 #include <linux/bpf_trace.h>
32 #include "i40e_trace.h"
33 #include "i40e_prototype.h"
35 static inline __le64 build_ctob(u32 td_cmd, u32 td_offset, unsigned int size,
38 return cpu_to_le64(I40E_TX_DESC_DTYPE_DATA |
39 ((u64)td_cmd << I40E_TXD_QW1_CMD_SHIFT) |
40 ((u64)td_offset << I40E_TXD_QW1_OFFSET_SHIFT) |
41 ((u64)size << I40E_TXD_QW1_TX_BUF_SZ_SHIFT) |
42 ((u64)td_tag << I40E_TXD_QW1_L2TAG1_SHIFT));
45 #define I40E_TXD_CMD (I40E_TX_DESC_CMD_EOP | I40E_TX_DESC_CMD_RS)
47 * i40e_fdir - Generate a Flow Director descriptor based on fdata
48 * @tx_ring: Tx ring to send buffer on
49 * @fdata: Flow director filter data
50 * @add: Indicate if we are adding a rule or deleting one
53 static void i40e_fdir(struct i40e_ring *tx_ring,
54 struct i40e_fdir_filter *fdata, bool add)
56 struct i40e_filter_program_desc *fdir_desc;
57 struct i40e_pf *pf = tx_ring->vsi->back;
58 u32 flex_ptype, dtype_cmd;
61 /* grab the next descriptor */
62 i = tx_ring->next_to_use;
63 fdir_desc = I40E_TX_FDIRDESC(tx_ring, i);
66 tx_ring->next_to_use = (i < tx_ring->count) ? i : 0;
68 flex_ptype = I40E_TXD_FLTR_QW0_QINDEX_MASK &
69 (fdata->q_index << I40E_TXD_FLTR_QW0_QINDEX_SHIFT);
71 flex_ptype |= I40E_TXD_FLTR_QW0_FLEXOFF_MASK &
72 (fdata->flex_off << I40E_TXD_FLTR_QW0_FLEXOFF_SHIFT);
74 flex_ptype |= I40E_TXD_FLTR_QW0_PCTYPE_MASK &
75 (fdata->pctype << I40E_TXD_FLTR_QW0_PCTYPE_SHIFT);
77 flex_ptype |= I40E_TXD_FLTR_QW0_PCTYPE_MASK &
78 (fdata->flex_offset << I40E_TXD_FLTR_QW0_FLEXOFF_SHIFT);
80 /* Use LAN VSI Id if not programmed by user */
81 flex_ptype |= I40E_TXD_FLTR_QW0_DEST_VSI_MASK &
82 ((u32)(fdata->dest_vsi ? : pf->vsi[pf->lan_vsi]->id) <<
83 I40E_TXD_FLTR_QW0_DEST_VSI_SHIFT);
85 dtype_cmd = I40E_TX_DESC_DTYPE_FILTER_PROG;
88 I40E_FILTER_PROGRAM_DESC_PCMD_ADD_UPDATE <<
89 I40E_TXD_FLTR_QW1_PCMD_SHIFT :
90 I40E_FILTER_PROGRAM_DESC_PCMD_REMOVE <<
91 I40E_TXD_FLTR_QW1_PCMD_SHIFT;
93 dtype_cmd |= I40E_TXD_FLTR_QW1_DEST_MASK &
94 (fdata->dest_ctl << I40E_TXD_FLTR_QW1_DEST_SHIFT);
96 dtype_cmd |= I40E_TXD_FLTR_QW1_FD_STATUS_MASK &
97 (fdata->fd_status << I40E_TXD_FLTR_QW1_FD_STATUS_SHIFT);
99 if (fdata->cnt_index) {
100 dtype_cmd |= I40E_TXD_FLTR_QW1_CNT_ENA_MASK;
101 dtype_cmd |= I40E_TXD_FLTR_QW1_CNTINDEX_MASK &
102 ((u32)fdata->cnt_index <<
103 I40E_TXD_FLTR_QW1_CNTINDEX_SHIFT);
106 fdir_desc->qindex_flex_ptype_vsi = cpu_to_le32(flex_ptype);
107 fdir_desc->rsvd = cpu_to_le32(0);
108 fdir_desc->dtype_cmd_cntindex = cpu_to_le32(dtype_cmd);
109 fdir_desc->fd_id = cpu_to_le32(fdata->fd_id);
112 #define I40E_FD_CLEAN_DELAY 10
114 * i40e_program_fdir_filter - Program a Flow Director filter
115 * @fdir_data: Packet data that will be filter parameters
116 * @raw_packet: the pre-allocated packet buffer for FDir
117 * @pf: The PF pointer
118 * @add: True for add/update, False for remove
120 static int i40e_program_fdir_filter(struct i40e_fdir_filter *fdir_data,
121 u8 *raw_packet, struct i40e_pf *pf,
124 struct i40e_tx_buffer *tx_buf, *first;
125 struct i40e_tx_desc *tx_desc;
126 struct i40e_ring *tx_ring;
127 struct i40e_vsi *vsi;
133 /* find existing FDIR VSI */
134 vsi = i40e_find_vsi_by_type(pf, I40E_VSI_FDIR);
138 tx_ring = vsi->tx_rings[0];
141 /* we need two descriptors to add/del a filter and we can wait */
142 for (i = I40E_FD_CLEAN_DELAY; I40E_DESC_UNUSED(tx_ring) < 2; i--) {
145 msleep_interruptible(1);
148 dma = dma_map_single(dev, raw_packet,
149 I40E_FDIR_MAX_RAW_PACKET_SIZE, DMA_TO_DEVICE);
150 if (dma_mapping_error(dev, dma))
153 /* grab the next descriptor */
154 i = tx_ring->next_to_use;
155 first = &tx_ring->tx_bi[i];
156 i40e_fdir(tx_ring, fdir_data, add);
158 /* Now program a dummy descriptor */
159 i = tx_ring->next_to_use;
160 tx_desc = I40E_TX_DESC(tx_ring, i);
161 tx_buf = &tx_ring->tx_bi[i];
163 tx_ring->next_to_use = ((i + 1) < tx_ring->count) ? i + 1 : 0;
165 memset(tx_buf, 0, sizeof(struct i40e_tx_buffer));
167 /* record length, and DMA address */
168 dma_unmap_len_set(tx_buf, len, I40E_FDIR_MAX_RAW_PACKET_SIZE);
169 dma_unmap_addr_set(tx_buf, dma, dma);
171 tx_desc->buffer_addr = cpu_to_le64(dma);
172 td_cmd = I40E_TXD_CMD | I40E_TX_DESC_CMD_DUMMY;
174 tx_buf->tx_flags = I40E_TX_FLAGS_FD_SB;
175 tx_buf->raw_buf = (void *)raw_packet;
177 tx_desc->cmd_type_offset_bsz =
178 build_ctob(td_cmd, 0, I40E_FDIR_MAX_RAW_PACKET_SIZE, 0);
180 /* Force memory writes to complete before letting h/w
181 * know there are new descriptors to fetch.
185 /* Mark the data descriptor to be watched */
186 first->next_to_watch = tx_desc;
188 writel(tx_ring->next_to_use, tx_ring->tail);
195 #define IP_HEADER_OFFSET 14
196 #define I40E_UDPIP_DUMMY_PACKET_LEN 42
198 * i40e_add_del_fdir_udpv4 - Add/Remove UDPv4 filters
199 * @vsi: pointer to the targeted VSI
200 * @fd_data: the flow director data required for the FDir descriptor
201 * @add: true adds a filter, false removes it
203 * Returns 0 if the filters were successfully added or removed
205 static int i40e_add_del_fdir_udpv4(struct i40e_vsi *vsi,
206 struct i40e_fdir_filter *fd_data,
209 struct i40e_pf *pf = vsi->back;
214 static char packet[] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0x08, 0,
215 0x45, 0, 0, 0x1c, 0, 0, 0x40, 0, 0x40, 0x11, 0, 0, 0, 0, 0, 0,
216 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
218 raw_packet = kzalloc(I40E_FDIR_MAX_RAW_PACKET_SIZE, GFP_KERNEL);
221 memcpy(raw_packet, packet, I40E_UDPIP_DUMMY_PACKET_LEN);
223 ip = (struct iphdr *)(raw_packet + IP_HEADER_OFFSET);
224 udp = (struct udphdr *)(raw_packet + IP_HEADER_OFFSET
225 + sizeof(struct iphdr));
227 ip->daddr = fd_data->dst_ip;
228 udp->dest = fd_data->dst_port;
229 ip->saddr = fd_data->src_ip;
230 udp->source = fd_data->src_port;
232 if (fd_data->flex_filter) {
233 u8 *payload = raw_packet + I40E_UDPIP_DUMMY_PACKET_LEN;
234 __be16 pattern = fd_data->flex_word;
235 u16 off = fd_data->flex_offset;
237 *((__force __be16 *)(payload + off)) = pattern;
240 fd_data->pctype = I40E_FILTER_PCTYPE_NONF_IPV4_UDP;
241 ret = i40e_program_fdir_filter(fd_data, raw_packet, pf, add);
243 dev_info(&pf->pdev->dev,
244 "PCTYPE:%d, Filter command send failed for fd_id:%d (ret = %d)\n",
245 fd_data->pctype, fd_data->fd_id, ret);
246 /* Free the packet buffer since it wasn't added to the ring */
249 } else if (I40E_DEBUG_FD & pf->hw.debug_mask) {
251 dev_info(&pf->pdev->dev,
252 "Filter OK for PCTYPE %d loc = %d\n",
253 fd_data->pctype, fd_data->fd_id);
255 dev_info(&pf->pdev->dev,
256 "Filter deleted for PCTYPE %d loc = %d\n",
257 fd_data->pctype, fd_data->fd_id);
261 pf->fd_udp4_filter_cnt++;
263 pf->fd_udp4_filter_cnt--;
268 #define I40E_TCPIP_DUMMY_PACKET_LEN 54
270 * i40e_add_del_fdir_tcpv4 - Add/Remove TCPv4 filters
271 * @vsi: pointer to the targeted VSI
272 * @fd_data: the flow director data required for the FDir descriptor
273 * @add: true adds a filter, false removes it
275 * Returns 0 if the filters were successfully added or removed
277 static int i40e_add_del_fdir_tcpv4(struct i40e_vsi *vsi,
278 struct i40e_fdir_filter *fd_data,
281 struct i40e_pf *pf = vsi->back;
287 static char packet[] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0x08, 0,
288 0x45, 0, 0, 0x28, 0, 0, 0x40, 0, 0x40, 0x6, 0, 0, 0, 0, 0, 0,
289 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0x80, 0x11,
290 0x0, 0x72, 0, 0, 0, 0};
292 raw_packet = kzalloc(I40E_FDIR_MAX_RAW_PACKET_SIZE, GFP_KERNEL);
295 memcpy(raw_packet, packet, I40E_TCPIP_DUMMY_PACKET_LEN);
297 ip = (struct iphdr *)(raw_packet + IP_HEADER_OFFSET);
298 tcp = (struct tcphdr *)(raw_packet + IP_HEADER_OFFSET
299 + sizeof(struct iphdr));
301 ip->daddr = fd_data->dst_ip;
302 tcp->dest = fd_data->dst_port;
303 ip->saddr = fd_data->src_ip;
304 tcp->source = fd_data->src_port;
306 if (fd_data->flex_filter) {
307 u8 *payload = raw_packet + I40E_TCPIP_DUMMY_PACKET_LEN;
308 __be16 pattern = fd_data->flex_word;
309 u16 off = fd_data->flex_offset;
311 *((__force __be16 *)(payload + off)) = pattern;
314 fd_data->pctype = I40E_FILTER_PCTYPE_NONF_IPV4_TCP;
315 ret = i40e_program_fdir_filter(fd_data, raw_packet, pf, add);
317 dev_info(&pf->pdev->dev,
318 "PCTYPE:%d, Filter command send failed for fd_id:%d (ret = %d)\n",
319 fd_data->pctype, fd_data->fd_id, ret);
320 /* Free the packet buffer since it wasn't added to the ring */
323 } else if (I40E_DEBUG_FD & pf->hw.debug_mask) {
325 dev_info(&pf->pdev->dev, "Filter OK for PCTYPE %d loc = %d)\n",
326 fd_data->pctype, fd_data->fd_id);
328 dev_info(&pf->pdev->dev,
329 "Filter deleted for PCTYPE %d loc = %d\n",
330 fd_data->pctype, fd_data->fd_id);
334 pf->fd_tcp4_filter_cnt++;
335 if ((pf->flags & I40E_FLAG_FD_ATR_ENABLED) &&
336 I40E_DEBUG_FD & pf->hw.debug_mask)
337 dev_info(&pf->pdev->dev, "Forcing ATR off, sideband rules for TCP/IPv4 flow being applied\n");
338 pf->flags |= I40E_FLAG_FD_ATR_AUTO_DISABLED;
340 pf->fd_tcp4_filter_cnt--;
346 #define I40E_SCTPIP_DUMMY_PACKET_LEN 46
348 * i40e_add_del_fdir_sctpv4 - Add/Remove SCTPv4 Flow Director filters for
349 * a specific flow spec
350 * @vsi: pointer to the targeted VSI
351 * @fd_data: the flow director data required for the FDir descriptor
352 * @add: true adds a filter, false removes it
354 * Returns 0 if the filters were successfully added or removed
356 static int i40e_add_del_fdir_sctpv4(struct i40e_vsi *vsi,
357 struct i40e_fdir_filter *fd_data,
360 struct i40e_pf *pf = vsi->back;
361 struct sctphdr *sctp;
366 static char packet[] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0x08, 0,
367 0x45, 0, 0, 0x20, 0, 0, 0x40, 0, 0x40, 0x84, 0, 0, 0, 0, 0, 0,
368 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
370 raw_packet = kzalloc(I40E_FDIR_MAX_RAW_PACKET_SIZE, GFP_KERNEL);
373 memcpy(raw_packet, packet, I40E_SCTPIP_DUMMY_PACKET_LEN);
375 ip = (struct iphdr *)(raw_packet + IP_HEADER_OFFSET);
376 sctp = (struct sctphdr *)(raw_packet + IP_HEADER_OFFSET
377 + sizeof(struct iphdr));
379 ip->daddr = fd_data->dst_ip;
380 sctp->dest = fd_data->dst_port;
381 ip->saddr = fd_data->src_ip;
382 sctp->source = fd_data->src_port;
384 if (fd_data->flex_filter) {
385 u8 *payload = raw_packet + I40E_SCTPIP_DUMMY_PACKET_LEN;
386 __be16 pattern = fd_data->flex_word;
387 u16 off = fd_data->flex_offset;
389 *((__force __be16 *)(payload + off)) = pattern;
392 fd_data->pctype = I40E_FILTER_PCTYPE_NONF_IPV4_SCTP;
393 ret = i40e_program_fdir_filter(fd_data, raw_packet, pf, add);
395 dev_info(&pf->pdev->dev,
396 "PCTYPE:%d, Filter command send failed for fd_id:%d (ret = %d)\n",
397 fd_data->pctype, fd_data->fd_id, ret);
398 /* Free the packet buffer since it wasn't added to the ring */
401 } else if (I40E_DEBUG_FD & pf->hw.debug_mask) {
403 dev_info(&pf->pdev->dev,
404 "Filter OK for PCTYPE %d loc = %d\n",
405 fd_data->pctype, fd_data->fd_id);
407 dev_info(&pf->pdev->dev,
408 "Filter deleted for PCTYPE %d loc = %d\n",
409 fd_data->pctype, fd_data->fd_id);
413 pf->fd_sctp4_filter_cnt++;
415 pf->fd_sctp4_filter_cnt--;
420 #define I40E_IP_DUMMY_PACKET_LEN 34
422 * i40e_add_del_fdir_ipv4 - Add/Remove IPv4 Flow Director filters for
423 * a specific flow spec
424 * @vsi: pointer to the targeted VSI
425 * @fd_data: the flow director data required for the FDir descriptor
426 * @add: true adds a filter, false removes it
428 * Returns 0 if the filters were successfully added or removed
430 static int i40e_add_del_fdir_ipv4(struct i40e_vsi *vsi,
431 struct i40e_fdir_filter *fd_data,
434 struct i40e_pf *pf = vsi->back;
439 static char packet[] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0x08, 0,
440 0x45, 0, 0, 0x14, 0, 0, 0x40, 0, 0x40, 0x10, 0, 0, 0, 0, 0, 0,
443 for (i = I40E_FILTER_PCTYPE_NONF_IPV4_OTHER;
444 i <= I40E_FILTER_PCTYPE_FRAG_IPV4; i++) {
445 raw_packet = kzalloc(I40E_FDIR_MAX_RAW_PACKET_SIZE, GFP_KERNEL);
448 memcpy(raw_packet, packet, I40E_IP_DUMMY_PACKET_LEN);
449 ip = (struct iphdr *)(raw_packet + IP_HEADER_OFFSET);
451 ip->saddr = fd_data->src_ip;
452 ip->daddr = fd_data->dst_ip;
455 if (fd_data->flex_filter) {
456 u8 *payload = raw_packet + I40E_IP_DUMMY_PACKET_LEN;
457 __be16 pattern = fd_data->flex_word;
458 u16 off = fd_data->flex_offset;
460 *((__force __be16 *)(payload + off)) = pattern;
464 ret = i40e_program_fdir_filter(fd_data, raw_packet, pf, add);
466 dev_info(&pf->pdev->dev,
467 "PCTYPE:%d, Filter command send failed for fd_id:%d (ret = %d)\n",
468 fd_data->pctype, fd_data->fd_id, ret);
469 /* The packet buffer wasn't added to the ring so we
470 * need to free it now.
474 } else if (I40E_DEBUG_FD & pf->hw.debug_mask) {
476 dev_info(&pf->pdev->dev,
477 "Filter OK for PCTYPE %d loc = %d\n",
478 fd_data->pctype, fd_data->fd_id);
480 dev_info(&pf->pdev->dev,
481 "Filter deleted for PCTYPE %d loc = %d\n",
482 fd_data->pctype, fd_data->fd_id);
487 pf->fd_ip4_filter_cnt++;
489 pf->fd_ip4_filter_cnt--;
495 * i40e_add_del_fdir - Build raw packets to add/del fdir filter
496 * @vsi: pointer to the targeted VSI
497 * @cmd: command to get or set RX flow classification rules
498 * @add: true adds a filter, false removes it
501 int i40e_add_del_fdir(struct i40e_vsi *vsi,
502 struct i40e_fdir_filter *input, bool add)
504 struct i40e_pf *pf = vsi->back;
507 switch (input->flow_type & ~FLOW_EXT) {
509 ret = i40e_add_del_fdir_tcpv4(vsi, input, add);
512 ret = i40e_add_del_fdir_udpv4(vsi, input, add);
515 ret = i40e_add_del_fdir_sctpv4(vsi, input, add);
518 switch (input->ip4_proto) {
520 ret = i40e_add_del_fdir_tcpv4(vsi, input, add);
523 ret = i40e_add_del_fdir_udpv4(vsi, input, add);
526 ret = i40e_add_del_fdir_sctpv4(vsi, input, add);
529 ret = i40e_add_del_fdir_ipv4(vsi, input, add);
532 /* We cannot support masking based on protocol */
533 dev_info(&pf->pdev->dev, "Unsupported IPv4 protocol 0x%02x\n",
539 dev_info(&pf->pdev->dev, "Unsupported flow type 0x%02x\n",
544 /* The buffer allocated here will be normally be freed by
545 * i40e_clean_fdir_tx_irq() as it reclaims resources after transmit
546 * completion. In the event of an error adding the buffer to the FDIR
547 * ring, it will immediately be freed. It may also be freed by
548 * i40e_clean_tx_ring() when closing the VSI.
554 * i40e_fd_handle_status - check the Programming Status for FD
555 * @rx_ring: the Rx ring for this descriptor
556 * @rx_desc: the Rx descriptor for programming Status, not a packet descriptor.
557 * @prog_id: the id originally used for programming
559 * This is used to verify if the FD programming or invalidation
560 * requested by SW to the HW is successful or not and take actions accordingly.
562 static void i40e_fd_handle_status(struct i40e_ring *rx_ring,
563 union i40e_rx_desc *rx_desc, u8 prog_id)
565 struct i40e_pf *pf = rx_ring->vsi->back;
566 struct pci_dev *pdev = pf->pdev;
567 u32 fcnt_prog, fcnt_avail;
571 qw = le64_to_cpu(rx_desc->wb.qword1.status_error_len);
572 error = (qw & I40E_RX_PROG_STATUS_DESC_QW1_ERROR_MASK) >>
573 I40E_RX_PROG_STATUS_DESC_QW1_ERROR_SHIFT;
575 if (error == BIT(I40E_RX_PROG_STATUS_DESC_FD_TBL_FULL_SHIFT)) {
576 pf->fd_inv = le32_to_cpu(rx_desc->wb.qword0.hi_dword.fd_id);
577 if ((rx_desc->wb.qword0.hi_dword.fd_id != 0) ||
578 (I40E_DEBUG_FD & pf->hw.debug_mask))
579 dev_warn(&pdev->dev, "ntuple filter loc = %d, could not be added\n",
582 /* Check if the programming error is for ATR.
583 * If so, auto disable ATR and set a state for
584 * flush in progress. Next time we come here if flush is in
585 * progress do nothing, once flush is complete the state will
588 if (test_bit(__I40E_FD_FLUSH_REQUESTED, pf->state))
592 /* store the current atr filter count */
593 pf->fd_atr_cnt = i40e_get_current_atr_cnt(pf);
595 if ((rx_desc->wb.qword0.hi_dword.fd_id == 0) &&
596 pf->flags & I40E_FLAG_FD_SB_AUTO_DISABLED) {
597 pf->flags |= I40E_FLAG_FD_ATR_AUTO_DISABLED;
598 set_bit(__I40E_FD_FLUSH_REQUESTED, pf->state);
601 /* filter programming failed most likely due to table full */
602 fcnt_prog = i40e_get_global_fd_count(pf);
603 fcnt_avail = pf->fdir_pf_filter_count;
604 /* If ATR is running fcnt_prog can quickly change,
605 * if we are very close to full, it makes sense to disable
606 * FD ATR/SB and then re-enable it when there is room.
608 if (fcnt_prog >= (fcnt_avail - I40E_FDIR_BUFFER_FULL_MARGIN)) {
609 if ((pf->flags & I40E_FLAG_FD_SB_ENABLED) &&
610 !(pf->flags & I40E_FLAG_FD_SB_AUTO_DISABLED)) {
611 pf->flags |= I40E_FLAG_FD_SB_AUTO_DISABLED;
612 if (I40E_DEBUG_FD & pf->hw.debug_mask)
613 dev_warn(&pdev->dev, "FD filter space full, new ntuple rules will not be added\n");
616 } else if (error == BIT(I40E_RX_PROG_STATUS_DESC_NO_FD_ENTRY_SHIFT)) {
617 if (I40E_DEBUG_FD & pf->hw.debug_mask)
618 dev_info(&pdev->dev, "ntuple filter fd_id = %d, could not be removed\n",
619 rx_desc->wb.qword0.hi_dword.fd_id);
624 * i40e_unmap_and_free_tx_resource - Release a Tx buffer
625 * @ring: the ring that owns the buffer
626 * @tx_buffer: the buffer to free
628 static void i40e_unmap_and_free_tx_resource(struct i40e_ring *ring,
629 struct i40e_tx_buffer *tx_buffer)
631 if (tx_buffer->skb) {
632 if (tx_buffer->tx_flags & I40E_TX_FLAGS_FD_SB)
633 kfree(tx_buffer->raw_buf);
634 else if (ring_is_xdp(ring))
635 page_frag_free(tx_buffer->raw_buf);
637 dev_kfree_skb_any(tx_buffer->skb);
638 if (dma_unmap_len(tx_buffer, len))
639 dma_unmap_single(ring->dev,
640 dma_unmap_addr(tx_buffer, dma),
641 dma_unmap_len(tx_buffer, len),
643 } else if (dma_unmap_len(tx_buffer, len)) {
644 dma_unmap_page(ring->dev,
645 dma_unmap_addr(tx_buffer, dma),
646 dma_unmap_len(tx_buffer, len),
650 tx_buffer->next_to_watch = NULL;
651 tx_buffer->skb = NULL;
652 dma_unmap_len_set(tx_buffer, len, 0);
653 /* tx_buffer must be completely set up in the transmit path */
657 * i40e_clean_tx_ring - Free any empty Tx buffers
658 * @tx_ring: ring to be cleaned
660 void i40e_clean_tx_ring(struct i40e_ring *tx_ring)
662 unsigned long bi_size;
665 /* ring already cleared, nothing to do */
669 /* Free all the Tx ring sk_buffs */
670 for (i = 0; i < tx_ring->count; i++)
671 i40e_unmap_and_free_tx_resource(tx_ring, &tx_ring->tx_bi[i]);
673 bi_size = sizeof(struct i40e_tx_buffer) * tx_ring->count;
674 memset(tx_ring->tx_bi, 0, bi_size);
676 /* Zero out the descriptor ring */
677 memset(tx_ring->desc, 0, tx_ring->size);
679 tx_ring->next_to_use = 0;
680 tx_ring->next_to_clean = 0;
682 if (!tx_ring->netdev)
685 /* cleanup Tx queue statistics */
686 netdev_tx_reset_queue(txring_txq(tx_ring));
690 * i40e_free_tx_resources - Free Tx resources per queue
691 * @tx_ring: Tx descriptor ring for a specific queue
693 * Free all transmit software resources
695 void i40e_free_tx_resources(struct i40e_ring *tx_ring)
697 i40e_clean_tx_ring(tx_ring);
698 kfree(tx_ring->tx_bi);
699 tx_ring->tx_bi = NULL;
702 dma_free_coherent(tx_ring->dev, tx_ring->size,
703 tx_ring->desc, tx_ring->dma);
704 tx_ring->desc = NULL;
709 * i40e_get_tx_pending - how many tx descriptors not processed
710 * @tx_ring: the ring of descriptors
712 * Since there is no access to the ring head register
713 * in XL710, we need to use our local copies
715 u32 i40e_get_tx_pending(struct i40e_ring *ring)
719 head = i40e_get_head(ring);
720 tail = readl(ring->tail);
723 return (head < tail) ?
724 tail - head : (tail + ring->count - head);
730 * i40e_detect_recover_hung - Function to detect and recover hung_queues
731 * @vsi: pointer to vsi struct with tx queues
733 * VSI has netdev and netdev has TX queues. This function is to check each of
734 * those TX queues if they are hung, trigger recovery by issuing SW interrupt.
736 void i40e_detect_recover_hung(struct i40e_vsi *vsi)
738 struct i40e_ring *tx_ring = NULL;
739 struct net_device *netdev;
746 if (test_bit(__I40E_VSI_DOWN, vsi->state))
749 netdev = vsi->netdev;
753 if (!netif_carrier_ok(netdev))
756 for (i = 0; i < vsi->num_queue_pairs; i++) {
757 tx_ring = vsi->tx_rings[i];
758 if (tx_ring && tx_ring->desc) {
759 /* If packet counter has not changed the queue is
760 * likely stalled, so force an interrupt for this
763 * prev_pkt_ctr would be negative if there was no
766 packets = tx_ring->stats.packets & INT_MAX;
767 if (tx_ring->tx_stats.prev_pkt_ctr == packets) {
768 i40e_force_wb(vsi, tx_ring->q_vector);
772 /* Memory barrier between read of packet count and call
773 * to i40e_get_tx_pending()
776 tx_ring->tx_stats.prev_pkt_ctr =
777 i40e_get_tx_pending(tx_ring) ? packets : -1;
785 * i40e_clean_tx_irq - Reclaim resources after transmit completes
786 * @vsi: the VSI we care about
787 * @tx_ring: Tx ring to clean
788 * @napi_budget: Used to determine if we are in netpoll
790 * Returns true if there's any budget left (e.g. the clean is finished)
792 static bool i40e_clean_tx_irq(struct i40e_vsi *vsi,
793 struct i40e_ring *tx_ring, int napi_budget)
795 u16 i = tx_ring->next_to_clean;
796 struct i40e_tx_buffer *tx_buf;
797 struct i40e_tx_desc *tx_head;
798 struct i40e_tx_desc *tx_desc;
799 unsigned int total_bytes = 0, total_packets = 0;
800 unsigned int budget = vsi->work_limit;
802 tx_buf = &tx_ring->tx_bi[i];
803 tx_desc = I40E_TX_DESC(tx_ring, i);
806 tx_head = I40E_TX_DESC(tx_ring, i40e_get_head(tx_ring));
809 struct i40e_tx_desc *eop_desc = tx_buf->next_to_watch;
811 /* if next_to_watch is not set then there is no work pending */
815 /* prevent any other reads prior to eop_desc */
818 i40e_trace(clean_tx_irq, tx_ring, tx_desc, tx_buf);
819 /* we have caught up to head, no work left to do */
820 if (tx_head == tx_desc)
823 /* clear next_to_watch to prevent false hangs */
824 tx_buf->next_to_watch = NULL;
826 /* update the statistics for this packet */
827 total_bytes += tx_buf->bytecount;
828 total_packets += tx_buf->gso_segs;
830 /* free the skb/XDP data */
831 if (ring_is_xdp(tx_ring))
832 page_frag_free(tx_buf->raw_buf);
834 napi_consume_skb(tx_buf->skb, napi_budget);
836 /* unmap skb header data */
837 dma_unmap_single(tx_ring->dev,
838 dma_unmap_addr(tx_buf, dma),
839 dma_unmap_len(tx_buf, len),
842 /* clear tx_buffer data */
844 dma_unmap_len_set(tx_buf, len, 0);
846 /* unmap remaining buffers */
847 while (tx_desc != eop_desc) {
848 i40e_trace(clean_tx_irq_unmap,
849 tx_ring, tx_desc, tx_buf);
856 tx_buf = tx_ring->tx_bi;
857 tx_desc = I40E_TX_DESC(tx_ring, 0);
860 /* unmap any remaining paged data */
861 if (dma_unmap_len(tx_buf, len)) {
862 dma_unmap_page(tx_ring->dev,
863 dma_unmap_addr(tx_buf, dma),
864 dma_unmap_len(tx_buf, len),
866 dma_unmap_len_set(tx_buf, len, 0);
870 /* move us one more past the eop_desc for start of next pkt */
876 tx_buf = tx_ring->tx_bi;
877 tx_desc = I40E_TX_DESC(tx_ring, 0);
882 /* update budget accounting */
884 } while (likely(budget));
887 tx_ring->next_to_clean = i;
888 u64_stats_update_begin(&tx_ring->syncp);
889 tx_ring->stats.bytes += total_bytes;
890 tx_ring->stats.packets += total_packets;
891 u64_stats_update_end(&tx_ring->syncp);
892 tx_ring->q_vector->tx.total_bytes += total_bytes;
893 tx_ring->q_vector->tx.total_packets += total_packets;
895 if (tx_ring->flags & I40E_TXR_FLAGS_WB_ON_ITR) {
896 /* check to see if there are < 4 descriptors
897 * waiting to be written back, then kick the hardware to force
898 * them to be written back in case we stay in NAPI.
899 * In this mode on X722 we do not enable Interrupt.
901 unsigned int j = i40e_get_tx_pending(tx_ring);
904 ((j / WB_STRIDE) == 0) && (j > 0) &&
905 !test_bit(__I40E_VSI_DOWN, vsi->state) &&
906 (I40E_DESC_UNUSED(tx_ring) != tx_ring->count))
907 tx_ring->arm_wb = true;
910 if (ring_is_xdp(tx_ring))
913 /* notify netdev of completed buffers */
914 netdev_tx_completed_queue(txring_txq(tx_ring),
915 total_packets, total_bytes);
917 #define TX_WAKE_THRESHOLD ((s16)(DESC_NEEDED * 2))
918 if (unlikely(total_packets && netif_carrier_ok(tx_ring->netdev) &&
919 (I40E_DESC_UNUSED(tx_ring) >= TX_WAKE_THRESHOLD))) {
920 /* Make sure that anybody stopping the queue after this
921 * sees the new next_to_clean.
924 if (__netif_subqueue_stopped(tx_ring->netdev,
925 tx_ring->queue_index) &&
926 !test_bit(__I40E_VSI_DOWN, vsi->state)) {
927 netif_wake_subqueue(tx_ring->netdev,
928 tx_ring->queue_index);
929 ++tx_ring->tx_stats.restart_queue;
937 * i40e_enable_wb_on_itr - Arm hardware to do a wb, interrupts are not enabled
938 * @vsi: the VSI we care about
939 * @q_vector: the vector on which to enable writeback
942 static void i40e_enable_wb_on_itr(struct i40e_vsi *vsi,
943 struct i40e_q_vector *q_vector)
945 u16 flags = q_vector->tx.ring[0].flags;
948 if (!(flags & I40E_TXR_FLAGS_WB_ON_ITR))
951 if (q_vector->arm_wb_state)
954 if (vsi->back->flags & I40E_FLAG_MSIX_ENABLED) {
955 val = I40E_PFINT_DYN_CTLN_WB_ON_ITR_MASK |
956 I40E_PFINT_DYN_CTLN_ITR_INDX_MASK; /* set noitr */
959 I40E_PFINT_DYN_CTLN(q_vector->reg_idx),
962 val = I40E_PFINT_DYN_CTL0_WB_ON_ITR_MASK |
963 I40E_PFINT_DYN_CTL0_ITR_INDX_MASK; /* set noitr */
965 wr32(&vsi->back->hw, I40E_PFINT_DYN_CTL0, val);
967 q_vector->arm_wb_state = true;
971 * i40e_force_wb - Issue SW Interrupt so HW does a wb
972 * @vsi: the VSI we care about
973 * @q_vector: the vector on which to force writeback
976 void i40e_force_wb(struct i40e_vsi *vsi, struct i40e_q_vector *q_vector)
978 if (vsi->back->flags & I40E_FLAG_MSIX_ENABLED) {
979 u32 val = I40E_PFINT_DYN_CTLN_INTENA_MASK |
980 I40E_PFINT_DYN_CTLN_ITR_INDX_MASK | /* set noitr */
981 I40E_PFINT_DYN_CTLN_SWINT_TRIG_MASK |
982 I40E_PFINT_DYN_CTLN_SW_ITR_INDX_ENA_MASK;
983 /* allow 00 to be written to the index */
986 I40E_PFINT_DYN_CTLN(q_vector->reg_idx), val);
988 u32 val = I40E_PFINT_DYN_CTL0_INTENA_MASK |
989 I40E_PFINT_DYN_CTL0_ITR_INDX_MASK | /* set noitr */
990 I40E_PFINT_DYN_CTL0_SWINT_TRIG_MASK |
991 I40E_PFINT_DYN_CTL0_SW_ITR_INDX_ENA_MASK;
992 /* allow 00 to be written to the index */
994 wr32(&vsi->back->hw, I40E_PFINT_DYN_CTL0, val);
999 * i40e_set_new_dynamic_itr - Find new ITR level
1000 * @rc: structure containing ring performance data
1002 * Returns true if ITR changed, false if not
1004 * Stores a new ITR value based on packets and byte counts during
1005 * the last interrupt. The advantage of per interrupt computation
1006 * is faster updates and more accurate ITR for the current traffic
1007 * pattern. Constants in this function were computed based on
1008 * theoretical maximum wire speed and thresholds were set based on
1009 * testing data as well as attempting to minimize response time
1010 * while increasing bulk throughput.
1012 static bool i40e_set_new_dynamic_itr(struct i40e_ring_container *rc)
1014 enum i40e_latency_range new_latency_range = rc->latency_range;
1015 u32 new_itr = rc->itr;
1017 unsigned int usecs, estimated_usecs;
1019 if (rc->total_packets == 0 || !rc->itr)
1022 usecs = (rc->itr << 1) * ITR_COUNTDOWN_START;
1023 bytes_per_usec = rc->total_bytes / usecs;
1025 /* The calculations in this algorithm depend on interrupts actually
1026 * firing at the ITR rate. This may not happen if the packet rate is
1027 * really low, or if we've been napi polling. Check to make sure
1028 * that's not the case before we continue.
1030 estimated_usecs = jiffies_to_usecs(jiffies - rc->last_itr_update);
1031 if (estimated_usecs > usecs) {
1032 new_latency_range = I40E_LOW_LATENCY;
1036 /* simple throttlerate management
1037 * 0-10MB/s lowest (50000 ints/s)
1038 * 10-20MB/s low (20000 ints/s)
1039 * 20-1249MB/s bulk (18000 ints/s)
1041 * The math works out because the divisor is in 10^(-6) which
1042 * turns the bytes/us input value into MB/s values, but
1043 * make sure to use usecs, as the register values written
1044 * are in 2 usec increments in the ITR registers, and make sure
1045 * to use the smoothed values that the countdown timer gives us.
1047 switch (new_latency_range) {
1048 case I40E_LOWEST_LATENCY:
1049 if (bytes_per_usec > 10)
1050 new_latency_range = I40E_LOW_LATENCY;
1052 case I40E_LOW_LATENCY:
1053 if (bytes_per_usec > 20)
1054 new_latency_range = I40E_BULK_LATENCY;
1055 else if (bytes_per_usec <= 10)
1056 new_latency_range = I40E_LOWEST_LATENCY;
1058 case I40E_BULK_LATENCY:
1060 if (bytes_per_usec <= 20)
1061 new_latency_range = I40E_LOW_LATENCY;
1066 rc->latency_range = new_latency_range;
1068 switch (new_latency_range) {
1069 case I40E_LOWEST_LATENCY:
1070 new_itr = I40E_ITR_50K;
1072 case I40E_LOW_LATENCY:
1073 new_itr = I40E_ITR_20K;
1075 case I40E_BULK_LATENCY:
1076 new_itr = I40E_ITR_18K;
1082 rc->total_bytes = 0;
1083 rc->total_packets = 0;
1084 rc->last_itr_update = jiffies;
1086 if (new_itr != rc->itr) {
1094 * i40e_reuse_rx_page - page flip buffer and store it back on the ring
1095 * @rx_ring: rx descriptor ring to store buffers on
1096 * @old_buff: donor buffer to have page reused
1098 * Synchronizes page for reuse by the adapter
1100 static void i40e_reuse_rx_page(struct i40e_ring *rx_ring,
1101 struct i40e_rx_buffer *old_buff)
1103 struct i40e_rx_buffer *new_buff;
1104 u16 nta = rx_ring->next_to_alloc;
1106 new_buff = &rx_ring->rx_bi[nta];
1108 /* update, and store next to alloc */
1110 rx_ring->next_to_alloc = (nta < rx_ring->count) ? nta : 0;
1112 /* transfer page from old buffer to new buffer */
1113 new_buff->dma = old_buff->dma;
1114 new_buff->page = old_buff->page;
1115 new_buff->page_offset = old_buff->page_offset;
1116 new_buff->pagecnt_bias = old_buff->pagecnt_bias;
1120 * i40e_rx_is_programming_status - check for programming status descriptor
1121 * @qw: qword representing status_error_len in CPU ordering
1123 * The value of in the descriptor length field indicate if this
1124 * is a programming status descriptor for flow director or FCoE
1125 * by the value of I40E_RX_PROG_STATUS_DESC_LENGTH, otherwise
1126 * it is a packet descriptor.
1128 static inline bool i40e_rx_is_programming_status(u64 qw)
1130 /* The Rx filter programming status and SPH bit occupy the same
1131 * spot in the descriptor. Since we don't support packet split we
1132 * can just reuse the bit as an indication that this is a
1133 * programming status descriptor.
1135 return qw & I40E_RXD_QW1_LENGTH_SPH_MASK;
1139 * i40e_clean_programming_status - clean the programming status descriptor
1140 * @rx_ring: the rx ring that has this descriptor
1141 * @rx_desc: the rx descriptor written back by HW
1142 * @qw: qword representing status_error_len in CPU ordering
1144 * Flow director should handle FD_FILTER_STATUS to check its filter programming
1145 * status being successful or not and take actions accordingly. FCoE should
1146 * handle its context/filter programming/invalidation status and take actions.
1149 static void i40e_clean_programming_status(struct i40e_ring *rx_ring,
1150 union i40e_rx_desc *rx_desc,
1153 struct i40e_rx_buffer *rx_buffer;
1154 u32 ntc = rx_ring->next_to_clean;
1157 /* fetch, update, and store next to clean */
1158 rx_buffer = &rx_ring->rx_bi[ntc++];
1159 ntc = (ntc < rx_ring->count) ? ntc : 0;
1160 rx_ring->next_to_clean = ntc;
1162 prefetch(I40E_RX_DESC(rx_ring, ntc));
1164 /* place unused page back on the ring */
1165 i40e_reuse_rx_page(rx_ring, rx_buffer);
1166 rx_ring->rx_stats.page_reuse_count++;
1168 /* clear contents of buffer_info */
1169 rx_buffer->page = NULL;
1171 id = (qw & I40E_RX_PROG_STATUS_DESC_QW1_PROGID_MASK) >>
1172 I40E_RX_PROG_STATUS_DESC_QW1_PROGID_SHIFT;
1174 if (id == I40E_RX_PROG_STATUS_DESC_FD_FILTER_STATUS)
1175 i40e_fd_handle_status(rx_ring, rx_desc, id);
1179 * i40e_setup_tx_descriptors - Allocate the Tx descriptors
1180 * @tx_ring: the tx ring to set up
1182 * Return 0 on success, negative on error
1184 int i40e_setup_tx_descriptors(struct i40e_ring *tx_ring)
1186 struct device *dev = tx_ring->dev;
1192 /* warn if we are about to overwrite the pointer */
1193 WARN_ON(tx_ring->tx_bi);
1194 bi_size = sizeof(struct i40e_tx_buffer) * tx_ring->count;
1195 tx_ring->tx_bi = kzalloc(bi_size, GFP_KERNEL);
1196 if (!tx_ring->tx_bi)
1199 u64_stats_init(&tx_ring->syncp);
1201 /* round up to nearest 4K */
1202 tx_ring->size = tx_ring->count * sizeof(struct i40e_tx_desc);
1203 /* add u32 for head writeback, align after this takes care of
1204 * guaranteeing this is at least one cache line in size
1206 tx_ring->size += sizeof(u32);
1207 tx_ring->size = ALIGN(tx_ring->size, 4096);
1208 tx_ring->desc = dma_alloc_coherent(dev, tx_ring->size,
1209 &tx_ring->dma, GFP_KERNEL);
1210 if (!tx_ring->desc) {
1211 dev_info(dev, "Unable to allocate memory for the Tx descriptor ring, size=%d\n",
1216 tx_ring->next_to_use = 0;
1217 tx_ring->next_to_clean = 0;
1218 tx_ring->tx_stats.prev_pkt_ctr = -1;
1222 kfree(tx_ring->tx_bi);
1223 tx_ring->tx_bi = NULL;
1228 * i40e_clean_rx_ring - Free Rx buffers
1229 * @rx_ring: ring to be cleaned
1231 void i40e_clean_rx_ring(struct i40e_ring *rx_ring)
1233 unsigned long bi_size;
1236 /* ring already cleared, nothing to do */
1237 if (!rx_ring->rx_bi)
1241 dev_kfree_skb(rx_ring->skb);
1242 rx_ring->skb = NULL;
1245 /* Free all the Rx ring sk_buffs */
1246 for (i = 0; i < rx_ring->count; i++) {
1247 struct i40e_rx_buffer *rx_bi = &rx_ring->rx_bi[i];
1252 /* Invalidate cache lines that may have been written to by
1253 * device so that we avoid corrupting memory.
1255 dma_sync_single_range_for_cpu(rx_ring->dev,
1258 rx_ring->rx_buf_len,
1261 /* free resources associated with mapping */
1262 dma_unmap_page_attrs(rx_ring->dev, rx_bi->dma,
1263 i40e_rx_pg_size(rx_ring),
1267 __page_frag_cache_drain(rx_bi->page, rx_bi->pagecnt_bias);
1270 rx_bi->page_offset = 0;
1273 bi_size = sizeof(struct i40e_rx_buffer) * rx_ring->count;
1274 memset(rx_ring->rx_bi, 0, bi_size);
1276 /* Zero out the descriptor ring */
1277 memset(rx_ring->desc, 0, rx_ring->size);
1279 rx_ring->next_to_alloc = 0;
1280 rx_ring->next_to_clean = 0;
1281 rx_ring->next_to_use = 0;
1285 * i40e_free_rx_resources - Free Rx resources
1286 * @rx_ring: ring to clean the resources from
1288 * Free all receive software resources
1290 void i40e_free_rx_resources(struct i40e_ring *rx_ring)
1292 i40e_clean_rx_ring(rx_ring);
1293 if (rx_ring->vsi->type == I40E_VSI_MAIN)
1294 xdp_rxq_info_unreg(&rx_ring->xdp_rxq);
1295 rx_ring->xdp_prog = NULL;
1296 kfree(rx_ring->rx_bi);
1297 rx_ring->rx_bi = NULL;
1299 if (rx_ring->desc) {
1300 dma_free_coherent(rx_ring->dev, rx_ring->size,
1301 rx_ring->desc, rx_ring->dma);
1302 rx_ring->desc = NULL;
1307 * i40e_setup_rx_descriptors - Allocate Rx descriptors
1308 * @rx_ring: Rx descriptor ring (for a specific queue) to setup
1310 * Returns 0 on success, negative on failure
1312 int i40e_setup_rx_descriptors(struct i40e_ring *rx_ring)
1314 struct device *dev = rx_ring->dev;
1318 /* warn if we are about to overwrite the pointer */
1319 WARN_ON(rx_ring->rx_bi);
1320 bi_size = sizeof(struct i40e_rx_buffer) * rx_ring->count;
1321 rx_ring->rx_bi = kzalloc(bi_size, GFP_KERNEL);
1322 if (!rx_ring->rx_bi)
1325 u64_stats_init(&rx_ring->syncp);
1327 /* Round up to nearest 4K */
1328 rx_ring->size = rx_ring->count * sizeof(union i40e_32byte_rx_desc);
1329 rx_ring->size = ALIGN(rx_ring->size, 4096);
1330 rx_ring->desc = dma_alloc_coherent(dev, rx_ring->size,
1331 &rx_ring->dma, GFP_KERNEL);
1333 if (!rx_ring->desc) {
1334 dev_info(dev, "Unable to allocate memory for the Rx descriptor ring, size=%d\n",
1339 rx_ring->next_to_alloc = 0;
1340 rx_ring->next_to_clean = 0;
1341 rx_ring->next_to_use = 0;
1343 /* XDP RX-queue info only needed for RX rings exposed to XDP */
1344 if (rx_ring->vsi->type == I40E_VSI_MAIN) {
1345 err = xdp_rxq_info_reg(&rx_ring->xdp_rxq, rx_ring->netdev,
1346 rx_ring->queue_index);
1351 rx_ring->xdp_prog = rx_ring->vsi->xdp_prog;
1355 kfree(rx_ring->rx_bi);
1356 rx_ring->rx_bi = NULL;
1361 * i40e_release_rx_desc - Store the new tail and head values
1362 * @rx_ring: ring to bump
1363 * @val: new head index
1365 static inline void i40e_release_rx_desc(struct i40e_ring *rx_ring, u32 val)
1367 rx_ring->next_to_use = val;
1369 /* update next to alloc since we have filled the ring */
1370 rx_ring->next_to_alloc = val;
1372 /* Force memory writes to complete before letting h/w
1373 * know there are new descriptors to fetch. (Only
1374 * applicable for weak-ordered memory model archs,
1378 writel(val, rx_ring->tail);
1382 * i40e_rx_offset - Return expected offset into page to access data
1383 * @rx_ring: Ring we are requesting offset of
1385 * Returns the offset value for ring into the data buffer.
1387 static inline unsigned int i40e_rx_offset(struct i40e_ring *rx_ring)
1389 return ring_uses_build_skb(rx_ring) ? I40E_SKB_PAD : 0;
1393 * i40e_alloc_mapped_page - recycle or make a new page
1394 * @rx_ring: ring to use
1395 * @bi: rx_buffer struct to modify
1397 * Returns true if the page was successfully allocated or
1400 static bool i40e_alloc_mapped_page(struct i40e_ring *rx_ring,
1401 struct i40e_rx_buffer *bi)
1403 struct page *page = bi->page;
1406 /* since we are recycling buffers we should seldom need to alloc */
1408 rx_ring->rx_stats.page_reuse_count++;
1412 /* alloc new page for storage */
1413 page = dev_alloc_pages(i40e_rx_pg_order(rx_ring));
1414 if (unlikely(!page)) {
1415 rx_ring->rx_stats.alloc_page_failed++;
1419 /* map page for use */
1420 dma = dma_map_page_attrs(rx_ring->dev, page, 0,
1421 i40e_rx_pg_size(rx_ring),
1425 /* if mapping failed free memory back to system since
1426 * there isn't much point in holding memory we can't use
1428 if (dma_mapping_error(rx_ring->dev, dma)) {
1429 __free_pages(page, i40e_rx_pg_order(rx_ring));
1430 rx_ring->rx_stats.alloc_page_failed++;
1436 bi->page_offset = i40e_rx_offset(rx_ring);
1438 /* initialize pagecnt_bias to 1 representing we fully own page */
1439 bi->pagecnt_bias = 1;
1445 * i40e_receive_skb - Send a completed packet up the stack
1446 * @rx_ring: rx ring in play
1447 * @skb: packet to send up
1448 * @vlan_tag: vlan tag for packet
1450 static void i40e_receive_skb(struct i40e_ring *rx_ring,
1451 struct sk_buff *skb, u16 vlan_tag)
1453 struct i40e_q_vector *q_vector = rx_ring->q_vector;
1455 if ((rx_ring->netdev->features & NETIF_F_HW_VLAN_CTAG_RX) &&
1456 (vlan_tag & VLAN_VID_MASK))
1457 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_tag);
1459 napi_gro_receive(&q_vector->napi, skb);
1463 * i40e_alloc_rx_buffers - Replace used receive buffers
1464 * @rx_ring: ring to place buffers on
1465 * @cleaned_count: number of buffers to replace
1467 * Returns false if all allocations were successful, true if any fail
1469 bool i40e_alloc_rx_buffers(struct i40e_ring *rx_ring, u16 cleaned_count)
1471 u16 ntu = rx_ring->next_to_use;
1472 union i40e_rx_desc *rx_desc;
1473 struct i40e_rx_buffer *bi;
1475 /* do nothing if no valid netdev defined */
1476 if (!rx_ring->netdev || !cleaned_count)
1479 rx_desc = I40E_RX_DESC(rx_ring, ntu);
1480 bi = &rx_ring->rx_bi[ntu];
1483 if (!i40e_alloc_mapped_page(rx_ring, bi))
1486 /* sync the buffer for use by the device */
1487 dma_sync_single_range_for_device(rx_ring->dev, bi->dma,
1489 rx_ring->rx_buf_len,
1492 /* Refresh the desc even if buffer_addrs didn't change
1493 * because each write-back erases this info.
1495 rx_desc->read.pkt_addr = cpu_to_le64(bi->dma + bi->page_offset);
1500 if (unlikely(ntu == rx_ring->count)) {
1501 rx_desc = I40E_RX_DESC(rx_ring, 0);
1502 bi = rx_ring->rx_bi;
1506 /* clear the status bits for the next_to_use descriptor */
1507 rx_desc->wb.qword1.status_error_len = 0;
1510 } while (cleaned_count);
1512 if (rx_ring->next_to_use != ntu)
1513 i40e_release_rx_desc(rx_ring, ntu);
1518 if (rx_ring->next_to_use != ntu)
1519 i40e_release_rx_desc(rx_ring, ntu);
1521 /* make sure to come back via polling to try again after
1522 * allocation failure
1528 * i40e_rx_checksum - Indicate in skb if hw indicated a good cksum
1529 * @vsi: the VSI we care about
1530 * @skb: skb currently being received and modified
1531 * @rx_desc: the receive descriptor
1533 static inline void i40e_rx_checksum(struct i40e_vsi *vsi,
1534 struct sk_buff *skb,
1535 union i40e_rx_desc *rx_desc)
1537 struct i40e_rx_ptype_decoded decoded;
1538 u32 rx_error, rx_status;
1543 qword = le64_to_cpu(rx_desc->wb.qword1.status_error_len);
1544 ptype = (qword & I40E_RXD_QW1_PTYPE_MASK) >> I40E_RXD_QW1_PTYPE_SHIFT;
1545 rx_error = (qword & I40E_RXD_QW1_ERROR_MASK) >>
1546 I40E_RXD_QW1_ERROR_SHIFT;
1547 rx_status = (qword & I40E_RXD_QW1_STATUS_MASK) >>
1548 I40E_RXD_QW1_STATUS_SHIFT;
1549 decoded = decode_rx_desc_ptype(ptype);
1551 skb->ip_summed = CHECKSUM_NONE;
1553 skb_checksum_none_assert(skb);
1555 /* Rx csum enabled and ip headers found? */
1556 if (!(vsi->netdev->features & NETIF_F_RXCSUM))
1559 /* did the hardware decode the packet and checksum? */
1560 if (!(rx_status & BIT(I40E_RX_DESC_STATUS_L3L4P_SHIFT)))
1563 /* both known and outer_ip must be set for the below code to work */
1564 if (!(decoded.known && decoded.outer_ip))
1567 ipv4 = (decoded.outer_ip == I40E_RX_PTYPE_OUTER_IP) &&
1568 (decoded.outer_ip_ver == I40E_RX_PTYPE_OUTER_IPV4);
1569 ipv6 = (decoded.outer_ip == I40E_RX_PTYPE_OUTER_IP) &&
1570 (decoded.outer_ip_ver == I40E_RX_PTYPE_OUTER_IPV6);
1573 (rx_error & (BIT(I40E_RX_DESC_ERROR_IPE_SHIFT) |
1574 BIT(I40E_RX_DESC_ERROR_EIPE_SHIFT))))
1577 /* likely incorrect csum if alternate IP extension headers found */
1579 rx_status & BIT(I40E_RX_DESC_STATUS_IPV6EXADD_SHIFT))
1580 /* don't increment checksum err here, non-fatal err */
1583 /* there was some L4 error, count error and punt packet to the stack */
1584 if (rx_error & BIT(I40E_RX_DESC_ERROR_L4E_SHIFT))
1587 /* handle packets that were not able to be checksummed due
1588 * to arrival speed, in this case the stack can compute
1591 if (rx_error & BIT(I40E_RX_DESC_ERROR_PPRS_SHIFT))
1594 /* If there is an outer header present that might contain a checksum
1595 * we need to bump the checksum level by 1 to reflect the fact that
1596 * we are indicating we validated the inner checksum.
1598 if (decoded.tunnel_type >= I40E_RX_PTYPE_TUNNEL_IP_GRENAT)
1599 skb->csum_level = 1;
1601 /* Only report checksum unnecessary for TCP, UDP, or SCTP */
1602 switch (decoded.inner_prot) {
1603 case I40E_RX_PTYPE_INNER_PROT_TCP:
1604 case I40E_RX_PTYPE_INNER_PROT_UDP:
1605 case I40E_RX_PTYPE_INNER_PROT_SCTP:
1606 skb->ip_summed = CHECKSUM_UNNECESSARY;
1615 vsi->back->hw_csum_rx_error++;
1619 * i40e_ptype_to_htype - get a hash type
1620 * @ptype: the ptype value from the descriptor
1622 * Returns a hash type to be used by skb_set_hash
1624 static inline int i40e_ptype_to_htype(u8 ptype)
1626 struct i40e_rx_ptype_decoded decoded = decode_rx_desc_ptype(ptype);
1629 return PKT_HASH_TYPE_NONE;
1631 if (decoded.outer_ip == I40E_RX_PTYPE_OUTER_IP &&
1632 decoded.payload_layer == I40E_RX_PTYPE_PAYLOAD_LAYER_PAY4)
1633 return PKT_HASH_TYPE_L4;
1634 else if (decoded.outer_ip == I40E_RX_PTYPE_OUTER_IP &&
1635 decoded.payload_layer == I40E_RX_PTYPE_PAYLOAD_LAYER_PAY3)
1636 return PKT_HASH_TYPE_L3;
1638 return PKT_HASH_TYPE_L2;
1642 * i40e_rx_hash - set the hash value in the skb
1643 * @ring: descriptor ring
1644 * @rx_desc: specific descriptor
1646 static inline void i40e_rx_hash(struct i40e_ring *ring,
1647 union i40e_rx_desc *rx_desc,
1648 struct sk_buff *skb,
1652 const __le64 rss_mask =
1653 cpu_to_le64((u64)I40E_RX_DESC_FLTSTAT_RSS_HASH <<
1654 I40E_RX_DESC_STATUS_FLTSTAT_SHIFT);
1656 if (!(ring->netdev->features & NETIF_F_RXHASH))
1659 if ((rx_desc->wb.qword1.status_error_len & rss_mask) == rss_mask) {
1660 hash = le32_to_cpu(rx_desc->wb.qword0.hi_dword.rss);
1661 skb_set_hash(skb, hash, i40e_ptype_to_htype(rx_ptype));
1666 * i40e_process_skb_fields - Populate skb header fields from Rx descriptor
1667 * @rx_ring: rx descriptor ring packet is being transacted on
1668 * @rx_desc: pointer to the EOP Rx descriptor
1669 * @skb: pointer to current skb being populated
1670 * @rx_ptype: the packet type decoded by hardware
1672 * This function checks the ring, descriptor, and packet information in
1673 * order to populate the hash, checksum, VLAN, protocol, and
1674 * other fields within the skb.
1677 void i40e_process_skb_fields(struct i40e_ring *rx_ring,
1678 union i40e_rx_desc *rx_desc, struct sk_buff *skb,
1681 u64 qword = le64_to_cpu(rx_desc->wb.qword1.status_error_len);
1682 u32 rx_status = (qword & I40E_RXD_QW1_STATUS_MASK) >>
1683 I40E_RXD_QW1_STATUS_SHIFT;
1684 u32 tsynvalid = rx_status & I40E_RXD_QW1_STATUS_TSYNVALID_MASK;
1685 u32 tsyn = (rx_status & I40E_RXD_QW1_STATUS_TSYNINDX_MASK) >>
1686 I40E_RXD_QW1_STATUS_TSYNINDX_SHIFT;
1688 if (unlikely(tsynvalid))
1689 i40e_ptp_rx_hwtstamp(rx_ring->vsi->back, skb, tsyn);
1691 i40e_rx_hash(rx_ring, rx_desc, skb, rx_ptype);
1693 i40e_rx_checksum(rx_ring->vsi, skb, rx_desc);
1695 skb_record_rx_queue(skb, rx_ring->queue_index);
1697 /* modifies the skb - consumes the enet header */
1698 skb->protocol = eth_type_trans(skb, rx_ring->netdev);
1702 * i40e_cleanup_headers - Correct empty headers
1703 * @rx_ring: rx descriptor ring packet is being transacted on
1704 * @skb: pointer to current skb being fixed
1705 * @rx_desc: pointer to the EOP Rx descriptor
1707 * Also address the case where we are pulling data in on pages only
1708 * and as such no data is present in the skb header.
1710 * In addition if skb is not at least 60 bytes we need to pad it so that
1711 * it is large enough to qualify as a valid Ethernet frame.
1713 * Returns true if an error was encountered and skb was freed.
1715 static bool i40e_cleanup_headers(struct i40e_ring *rx_ring, struct sk_buff *skb,
1716 union i40e_rx_desc *rx_desc)
1719 /* XDP packets use error pointer so abort at this point */
1723 /* ERR_MASK will only have valid bits if EOP set, and
1724 * what we are doing here is actually checking
1725 * I40E_RX_DESC_ERROR_RXE_SHIFT, since it is the zeroth bit in
1728 if (unlikely(i40e_test_staterr(rx_desc,
1729 BIT(I40E_RXD_QW1_ERROR_SHIFT)))) {
1730 dev_kfree_skb_any(skb);
1734 /* if eth_skb_pad returns an error the skb was freed */
1735 if (eth_skb_pad(skb))
1742 * i40e_page_is_reusable - check if any reuse is possible
1743 * @page: page struct to check
1745 * A page is not reusable if it was allocated under low memory
1746 * conditions, or it's not in the same NUMA node as this CPU.
1748 static inline bool i40e_page_is_reusable(struct page *page)
1750 return (page_to_nid(page) == numa_mem_id()) &&
1751 !page_is_pfmemalloc(page);
1755 * i40e_can_reuse_rx_page - Determine if this page can be reused by
1756 * the adapter for another receive
1758 * @rx_buffer: buffer containing the page
1760 * If page is reusable, rx_buffer->page_offset is adjusted to point to
1761 * an unused region in the page.
1763 * For small pages, @truesize will be a constant value, half the size
1764 * of the memory at page. We'll attempt to alternate between high and
1765 * low halves of the page, with one half ready for use by the hardware
1766 * and the other half being consumed by the stack. We use the page
1767 * ref count to determine whether the stack has finished consuming the
1768 * portion of this page that was passed up with a previous packet. If
1769 * the page ref count is >1, we'll assume the "other" half page is
1770 * still busy, and this page cannot be reused.
1772 * For larger pages, @truesize will be the actual space used by the
1773 * received packet (adjusted upward to an even multiple of the cache
1774 * line size). This will advance through the page by the amount
1775 * actually consumed by the received packets while there is still
1776 * space for a buffer. Each region of larger pages will be used at
1777 * most once, after which the page will not be reused.
1779 * In either case, if the page is reusable its refcount is increased.
1781 static bool i40e_can_reuse_rx_page(struct i40e_rx_buffer *rx_buffer)
1783 unsigned int pagecnt_bias = rx_buffer->pagecnt_bias;
1784 struct page *page = rx_buffer->page;
1786 /* Is any reuse possible? */
1787 if (unlikely(!i40e_page_is_reusable(page)))
1790 #if (PAGE_SIZE < 8192)
1791 /* if we are only owner of page we can reuse it */
1792 if (unlikely((page_count(page) - pagecnt_bias) > 1))
1795 #define I40E_LAST_OFFSET \
1796 (SKB_WITH_OVERHEAD(PAGE_SIZE) - I40E_RXBUFFER_2048)
1797 if (rx_buffer->page_offset > I40E_LAST_OFFSET)
1801 /* If we have drained the page fragment pool we need to update
1802 * the pagecnt_bias and page count so that we fully restock the
1803 * number of references the driver holds.
1805 if (unlikely(!pagecnt_bias)) {
1806 page_ref_add(page, USHRT_MAX);
1807 rx_buffer->pagecnt_bias = USHRT_MAX;
1814 * i40e_add_rx_frag - Add contents of Rx buffer to sk_buff
1815 * @rx_ring: rx descriptor ring to transact packets on
1816 * @rx_buffer: buffer containing page to add
1817 * @skb: sk_buff to place the data into
1818 * @size: packet length from rx_desc
1820 * This function will add the data contained in rx_buffer->page to the skb.
1821 * It will just attach the page as a frag to the skb.
1823 * The function will then update the page offset.
1825 static void i40e_add_rx_frag(struct i40e_ring *rx_ring,
1826 struct i40e_rx_buffer *rx_buffer,
1827 struct sk_buff *skb,
1830 #if (PAGE_SIZE < 8192)
1831 unsigned int truesize = i40e_rx_pg_size(rx_ring) / 2;
1833 unsigned int truesize = SKB_DATA_ALIGN(size + i40e_rx_offset(rx_ring));
1836 skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, rx_buffer->page,
1837 rx_buffer->page_offset, size, truesize);
1839 /* page is being used so we must update the page offset */
1840 #if (PAGE_SIZE < 8192)
1841 rx_buffer->page_offset ^= truesize;
1843 rx_buffer->page_offset += truesize;
1848 * i40e_get_rx_buffer - Fetch Rx buffer and synchronize data for use
1849 * @rx_ring: rx descriptor ring to transact packets on
1850 * @size: size of buffer to add to skb
1852 * This function will pull an Rx buffer from the ring and synchronize it
1853 * for use by the CPU.
1855 static struct i40e_rx_buffer *i40e_get_rx_buffer(struct i40e_ring *rx_ring,
1856 const unsigned int size)
1858 struct i40e_rx_buffer *rx_buffer;
1860 rx_buffer = &rx_ring->rx_bi[rx_ring->next_to_clean];
1861 prefetchw(rx_buffer->page);
1863 /* we are reusing so sync this buffer for CPU use */
1864 dma_sync_single_range_for_cpu(rx_ring->dev,
1866 rx_buffer->page_offset,
1870 /* We have pulled a buffer for use, so decrement pagecnt_bias */
1871 rx_buffer->pagecnt_bias--;
1877 * i40e_construct_skb - Allocate skb and populate it
1878 * @rx_ring: rx descriptor ring to transact packets on
1879 * @rx_buffer: rx buffer to pull data from
1880 * @xdp: xdp_buff pointing to the data
1882 * This function allocates an skb. It then populates it with the page
1883 * data from the current receive descriptor, taking care to set up the
1886 static struct sk_buff *i40e_construct_skb(struct i40e_ring *rx_ring,
1887 struct i40e_rx_buffer *rx_buffer,
1888 struct xdp_buff *xdp)
1890 unsigned int size = xdp->data_end - xdp->data;
1891 #if (PAGE_SIZE < 8192)
1892 unsigned int truesize = i40e_rx_pg_size(rx_ring) / 2;
1894 unsigned int truesize = SKB_DATA_ALIGN(size);
1896 unsigned int headlen;
1897 struct sk_buff *skb;
1899 /* prefetch first cache line of first page */
1900 prefetch(xdp->data);
1901 #if L1_CACHE_BYTES < 128
1902 prefetch(xdp->data + L1_CACHE_BYTES);
1905 /* allocate a skb to store the frags */
1906 skb = __napi_alloc_skb(&rx_ring->q_vector->napi,
1908 GFP_ATOMIC | __GFP_NOWARN);
1912 /* Determine available headroom for copy */
1914 if (headlen > I40E_RX_HDR_SIZE)
1915 headlen = eth_get_headlen(xdp->data, I40E_RX_HDR_SIZE);
1917 /* align pull length to size of long to optimize memcpy performance */
1918 memcpy(__skb_put(skb, headlen), xdp->data,
1919 ALIGN(headlen, sizeof(long)));
1921 /* update all of the pointers */
1924 skb_add_rx_frag(skb, 0, rx_buffer->page,
1925 rx_buffer->page_offset + headlen,
1928 /* buffer is used by skb, update page_offset */
1929 #if (PAGE_SIZE < 8192)
1930 rx_buffer->page_offset ^= truesize;
1932 rx_buffer->page_offset += truesize;
1935 /* buffer is unused, reset bias back to rx_buffer */
1936 rx_buffer->pagecnt_bias++;
1943 * i40e_build_skb - Build skb around an existing buffer
1944 * @rx_ring: Rx descriptor ring to transact packets on
1945 * @rx_buffer: Rx buffer to pull data from
1946 * @xdp: xdp_buff pointing to the data
1948 * This function builds an skb around an existing Rx buffer, taking care
1949 * to set up the skb correctly and avoid any memcpy overhead.
1951 static struct sk_buff *i40e_build_skb(struct i40e_ring *rx_ring,
1952 struct i40e_rx_buffer *rx_buffer,
1953 struct xdp_buff *xdp)
1955 unsigned int size = xdp->data_end - xdp->data;
1956 #if (PAGE_SIZE < 8192)
1957 unsigned int truesize = i40e_rx_pg_size(rx_ring) / 2;
1959 unsigned int truesize = SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) +
1960 SKB_DATA_ALIGN(I40E_SKB_PAD + size);
1962 struct sk_buff *skb;
1964 /* prefetch first cache line of first page */
1965 prefetch(xdp->data);
1966 #if L1_CACHE_BYTES < 128
1967 prefetch(xdp->data + L1_CACHE_BYTES);
1969 /* build an skb around the page buffer */
1970 skb = build_skb(xdp->data_hard_start, truesize);
1974 /* update pointers within the skb to store the data */
1975 skb_reserve(skb, I40E_SKB_PAD);
1976 __skb_put(skb, size);
1978 /* buffer is used by skb, update page_offset */
1979 #if (PAGE_SIZE < 8192)
1980 rx_buffer->page_offset ^= truesize;
1982 rx_buffer->page_offset += truesize;
1989 * i40e_put_rx_buffer - Clean up used buffer and either recycle or free
1990 * @rx_ring: rx descriptor ring to transact packets on
1991 * @rx_buffer: rx buffer to pull data from
1993 * This function will clean up the contents of the rx_buffer. It will
1994 * either recycle the buffer or unmap it and free the associated resources.
1996 static void i40e_put_rx_buffer(struct i40e_ring *rx_ring,
1997 struct i40e_rx_buffer *rx_buffer)
1999 if (i40e_can_reuse_rx_page(rx_buffer)) {
2000 /* hand second half of page back to the ring */
2001 i40e_reuse_rx_page(rx_ring, rx_buffer);
2002 rx_ring->rx_stats.page_reuse_count++;
2004 /* we are not reusing the buffer so unmap it */
2005 dma_unmap_page_attrs(rx_ring->dev, rx_buffer->dma,
2006 i40e_rx_pg_size(rx_ring),
2007 DMA_FROM_DEVICE, I40E_RX_DMA_ATTR);
2008 __page_frag_cache_drain(rx_buffer->page,
2009 rx_buffer->pagecnt_bias);
2012 /* clear contents of buffer_info */
2013 rx_buffer->page = NULL;
2017 * i40e_is_non_eop - process handling of non-EOP buffers
2018 * @rx_ring: Rx ring being processed
2019 * @rx_desc: Rx descriptor for current buffer
2020 * @skb: Current socket buffer containing buffer in progress
2022 * This function updates next to clean. If the buffer is an EOP buffer
2023 * this function exits returning false, otherwise it will place the
2024 * sk_buff in the next buffer to be chained and return true indicating
2025 * that this is in fact a non-EOP buffer.
2027 static bool i40e_is_non_eop(struct i40e_ring *rx_ring,
2028 union i40e_rx_desc *rx_desc,
2029 struct sk_buff *skb)
2031 u32 ntc = rx_ring->next_to_clean + 1;
2033 /* fetch, update, and store next to clean */
2034 ntc = (ntc < rx_ring->count) ? ntc : 0;
2035 rx_ring->next_to_clean = ntc;
2037 prefetch(I40E_RX_DESC(rx_ring, ntc));
2039 /* if we are the last buffer then there is nothing else to do */
2040 #define I40E_RXD_EOF BIT(I40E_RX_DESC_STATUS_EOF_SHIFT)
2041 if (likely(i40e_test_staterr(rx_desc, I40E_RXD_EOF)))
2044 rx_ring->rx_stats.non_eop_descs++;
2049 #define I40E_XDP_PASS 0
2050 #define I40E_XDP_CONSUMED 1
2051 #define I40E_XDP_TX 2
2053 static int i40e_xmit_xdp_ring(struct xdp_buff *xdp,
2054 struct i40e_ring *xdp_ring);
2057 * i40e_run_xdp - run an XDP program
2058 * @rx_ring: Rx ring being processed
2059 * @xdp: XDP buffer containing the frame
2061 static struct sk_buff *i40e_run_xdp(struct i40e_ring *rx_ring,
2062 struct xdp_buff *xdp)
2064 int result = I40E_XDP_PASS;
2065 struct i40e_ring *xdp_ring;
2066 struct bpf_prog *xdp_prog;
2070 xdp_prog = READ_ONCE(rx_ring->xdp_prog);
2075 act = bpf_prog_run_xdp(xdp_prog, xdp);
2080 xdp_ring = rx_ring->vsi->xdp_rings[rx_ring->queue_index];
2081 result = i40e_xmit_xdp_ring(xdp, xdp_ring);
2084 bpf_warn_invalid_xdp_action(act);
2086 trace_xdp_exception(rx_ring->netdev, xdp_prog, act);
2087 /* fallthrough -- handle aborts by dropping packet */
2089 result = I40E_XDP_CONSUMED;
2094 return ERR_PTR(-result);
2098 * i40e_rx_buffer_flip - adjusted rx_buffer to point to an unused region
2100 * @rx_buffer: Rx buffer to adjust
2101 * @size: Size of adjustment
2103 static void i40e_rx_buffer_flip(struct i40e_ring *rx_ring,
2104 struct i40e_rx_buffer *rx_buffer,
2107 #if (PAGE_SIZE < 8192)
2108 unsigned int truesize = i40e_rx_pg_size(rx_ring) / 2;
2110 rx_buffer->page_offset ^= truesize;
2112 unsigned int truesize = SKB_DATA_ALIGN(i40e_rx_offset(rx_ring) + size);
2114 rx_buffer->page_offset += truesize;
2119 * i40e_clean_rx_irq - Clean completed descriptors from Rx ring - bounce buf
2120 * @rx_ring: rx descriptor ring to transact packets on
2121 * @budget: Total limit on number of packets to process
2123 * This function provides a "bounce buffer" approach to Rx interrupt
2124 * processing. The advantage to this is that on systems that have
2125 * expensive overhead for IOMMU access this provides a means of avoiding
2126 * it by maintaining the mapping of the page to the system.
2128 * Returns amount of work completed
2130 static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget)
2132 unsigned int total_rx_bytes = 0, total_rx_packets = 0;
2133 struct sk_buff *skb = rx_ring->skb;
2134 u16 cleaned_count = I40E_DESC_UNUSED(rx_ring);
2135 bool failure = false, xdp_xmit = false;
2136 struct xdp_buff xdp;
2138 xdp.rxq = &rx_ring->xdp_rxq;
2140 while (likely(total_rx_packets < (unsigned int)budget)) {
2141 struct i40e_rx_buffer *rx_buffer;
2142 union i40e_rx_desc *rx_desc;
2148 /* return some buffers to hardware, one at a time is too slow */
2149 if (cleaned_count >= I40E_RX_BUFFER_WRITE) {
2150 failure = failure ||
2151 i40e_alloc_rx_buffers(rx_ring, cleaned_count);
2155 rx_desc = I40E_RX_DESC(rx_ring, rx_ring->next_to_clean);
2157 /* status_error_len will always be zero for unused descriptors
2158 * because it's cleared in cleanup, and overlaps with hdr_addr
2159 * which is always zero because packet split isn't used, if the
2160 * hardware wrote DD then the length will be non-zero
2162 qword = le64_to_cpu(rx_desc->wb.qword1.status_error_len);
2164 /* This memory barrier is needed to keep us from reading
2165 * any other fields out of the rx_desc until we have
2166 * verified the descriptor has been written back.
2170 if (unlikely(i40e_rx_is_programming_status(qword))) {
2171 i40e_clean_programming_status(rx_ring, rx_desc, qword);
2175 size = (qword & I40E_RXD_QW1_LENGTH_PBUF_MASK) >>
2176 I40E_RXD_QW1_LENGTH_PBUF_SHIFT;
2180 i40e_trace(clean_rx_irq, rx_ring, rx_desc, skb);
2181 rx_buffer = i40e_get_rx_buffer(rx_ring, size);
2183 /* retrieve a buffer from the ring */
2185 xdp.data = page_address(rx_buffer->page) +
2186 rx_buffer->page_offset;
2187 xdp_set_data_meta_invalid(&xdp);
2188 xdp.data_hard_start = xdp.data -
2189 i40e_rx_offset(rx_ring);
2190 xdp.data_end = xdp.data + size;
2192 skb = i40e_run_xdp(rx_ring, &xdp);
2196 if (PTR_ERR(skb) == -I40E_XDP_TX) {
2198 i40e_rx_buffer_flip(rx_ring, rx_buffer, size);
2200 rx_buffer->pagecnt_bias++;
2202 total_rx_bytes += size;
2205 i40e_add_rx_frag(rx_ring, rx_buffer, skb, size);
2206 } else if (ring_uses_build_skb(rx_ring)) {
2207 skb = i40e_build_skb(rx_ring, rx_buffer, &xdp);
2209 skb = i40e_construct_skb(rx_ring, rx_buffer, &xdp);
2212 /* exit if we failed to retrieve a buffer */
2214 rx_ring->rx_stats.alloc_buff_failed++;
2215 rx_buffer->pagecnt_bias++;
2219 i40e_put_rx_buffer(rx_ring, rx_buffer);
2222 if (i40e_is_non_eop(rx_ring, rx_desc, skb))
2225 if (i40e_cleanup_headers(rx_ring, skb, rx_desc)) {
2230 /* probably a little skewed due to removing CRC */
2231 total_rx_bytes += skb->len;
2233 qword = le64_to_cpu(rx_desc->wb.qword1.status_error_len);
2234 rx_ptype = (qword & I40E_RXD_QW1_PTYPE_MASK) >>
2235 I40E_RXD_QW1_PTYPE_SHIFT;
2237 /* populate checksum, VLAN, and protocol */
2238 i40e_process_skb_fields(rx_ring, rx_desc, skb, rx_ptype);
2240 vlan_tag = (qword & BIT(I40E_RX_DESC_STATUS_L2TAG1P_SHIFT)) ?
2241 le16_to_cpu(rx_desc->wb.qword0.lo_dword.l2tag1) : 0;
2243 i40e_trace(clean_rx_irq_rx, rx_ring, rx_desc, skb);
2244 i40e_receive_skb(rx_ring, skb, vlan_tag);
2247 /* update budget accounting */
2252 struct i40e_ring *xdp_ring;
2254 xdp_ring = rx_ring->vsi->xdp_rings[rx_ring->queue_index];
2256 /* Force memory writes to complete before letting h/w
2257 * know there are new descriptors to fetch.
2261 writel(xdp_ring->next_to_use, xdp_ring->tail);
2266 u64_stats_update_begin(&rx_ring->syncp);
2267 rx_ring->stats.packets += total_rx_packets;
2268 rx_ring->stats.bytes += total_rx_bytes;
2269 u64_stats_update_end(&rx_ring->syncp);
2270 rx_ring->q_vector->rx.total_packets += total_rx_packets;
2271 rx_ring->q_vector->rx.total_bytes += total_rx_bytes;
2273 /* guarantee a trip back through this routine if there was a failure */
2274 return failure ? budget : (int)total_rx_packets;
2277 static u32 i40e_buildreg_itr(const int type, const u16 itr)
2281 val = I40E_PFINT_DYN_CTLN_INTENA_MASK |
2282 I40E_PFINT_DYN_CTLN_CLEARPBA_MASK |
2283 (type << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT) |
2284 (itr << I40E_PFINT_DYN_CTLN_INTERVAL_SHIFT);
2289 /* a small macro to shorten up some long lines */
2290 #define INTREG I40E_PFINT_DYN_CTLN
2291 static inline int get_rx_itr(struct i40e_vsi *vsi, int idx)
2293 return vsi->rx_rings[idx]->rx_itr_setting;
2296 static inline int get_tx_itr(struct i40e_vsi *vsi, int idx)
2298 return vsi->tx_rings[idx]->tx_itr_setting;
2302 * i40e_update_enable_itr - Update itr and re-enable MSIX interrupt
2303 * @vsi: the VSI we care about
2304 * @q_vector: q_vector for which itr is being updated and interrupt enabled
2307 static inline void i40e_update_enable_itr(struct i40e_vsi *vsi,
2308 struct i40e_q_vector *q_vector)
2310 struct i40e_hw *hw = &vsi->back->hw;
2311 bool rx = false, tx = false;
2313 int idx = q_vector->v_idx;
2314 int rx_itr_setting, tx_itr_setting;
2316 /* If we don't have MSIX, then we only need to re-enable icr0 */
2317 if (!(vsi->back->flags & I40E_FLAG_MSIX_ENABLED)) {
2318 i40e_irq_dynamic_enable_icr0(vsi->back);
2322 /* avoid dynamic calculation if in countdown mode OR if
2323 * all dynamic is disabled
2325 rxval = txval = i40e_buildreg_itr(I40E_ITR_NONE, 0);
2327 rx_itr_setting = get_rx_itr(vsi, idx);
2328 tx_itr_setting = get_tx_itr(vsi, idx);
2330 if (q_vector->itr_countdown > 0 ||
2331 (!ITR_IS_DYNAMIC(rx_itr_setting) &&
2332 !ITR_IS_DYNAMIC(tx_itr_setting))) {
2336 if (ITR_IS_DYNAMIC(rx_itr_setting)) {
2337 rx = i40e_set_new_dynamic_itr(&q_vector->rx);
2338 rxval = i40e_buildreg_itr(I40E_RX_ITR, q_vector->rx.itr);
2341 if (ITR_IS_DYNAMIC(tx_itr_setting)) {
2342 tx = i40e_set_new_dynamic_itr(&q_vector->tx);
2343 txval = i40e_buildreg_itr(I40E_TX_ITR, q_vector->tx.itr);
2347 /* get the higher of the two ITR adjustments and
2348 * use the same value for both ITR registers
2349 * when in adaptive mode (Rx and/or Tx)
2351 u16 itr = max(q_vector->tx.itr, q_vector->rx.itr);
2353 q_vector->tx.itr = q_vector->rx.itr = itr;
2354 txval = i40e_buildreg_itr(I40E_TX_ITR, itr);
2356 rxval = i40e_buildreg_itr(I40E_RX_ITR, itr);
2360 /* only need to enable the interrupt once, but need
2361 * to possibly update both ITR values
2364 /* set the INTENA_MSK_MASK so that this first write
2365 * won't actually enable the interrupt, instead just
2366 * updating the ITR (it's bit 31 PF and VF)
2369 /* don't check _DOWN because interrupt isn't being enabled */
2370 wr32(hw, INTREG(q_vector->reg_idx), rxval);
2374 if (!test_bit(__I40E_VSI_DOWN, vsi->state))
2375 wr32(hw, INTREG(q_vector->reg_idx), txval);
2377 if (q_vector->itr_countdown)
2378 q_vector->itr_countdown--;
2380 q_vector->itr_countdown = ITR_COUNTDOWN_START;
2384 * i40e_napi_poll - NAPI polling Rx/Tx cleanup routine
2385 * @napi: napi struct with our devices info in it
2386 * @budget: amount of work driver is allowed to do this pass, in packets
2388 * This function will clean all queues associated with a q_vector.
2390 * Returns the amount of work done
2392 int i40e_napi_poll(struct napi_struct *napi, int budget)
2394 struct i40e_q_vector *q_vector =
2395 container_of(napi, struct i40e_q_vector, napi);
2396 struct i40e_vsi *vsi = q_vector->vsi;
2397 struct i40e_ring *ring;
2398 bool clean_complete = true;
2399 bool arm_wb = false;
2400 int budget_per_ring;
2403 if (test_bit(__I40E_VSI_DOWN, vsi->state)) {
2404 napi_complete(napi);
2408 /* Since the actual Tx work is minimal, we can give the Tx a larger
2409 * budget and be more aggressive about cleaning up the Tx descriptors.
2411 i40e_for_each_ring(ring, q_vector->tx) {
2412 if (!i40e_clean_tx_irq(vsi, ring, budget)) {
2413 clean_complete = false;
2416 arm_wb |= ring->arm_wb;
2417 ring->arm_wb = false;
2420 /* Handle case where we are called by netpoll with a budget of 0 */
2424 /* We attempt to distribute budget to each Rx queue fairly, but don't
2425 * allow the budget to go below 1 because that would exit polling early.
2427 budget_per_ring = max(budget/q_vector->num_ringpairs, 1);
2429 i40e_for_each_ring(ring, q_vector->rx) {
2430 int cleaned = i40e_clean_rx_irq(ring, budget_per_ring);
2432 work_done += cleaned;
2433 /* if we clean as many as budgeted, we must not be done */
2434 if (cleaned >= budget_per_ring)
2435 clean_complete = false;
2438 /* If work not completed, return budget and polling will return */
2439 if (!clean_complete) {
2440 int cpu_id = smp_processor_id();
2442 /* It is possible that the interrupt affinity has changed but,
2443 * if the cpu is pegged at 100%, polling will never exit while
2444 * traffic continues and the interrupt will be stuck on this
2445 * cpu. We check to make sure affinity is correct before we
2446 * continue to poll, otherwise we must stop polling so the
2447 * interrupt can move to the correct cpu.
2449 if (!cpumask_test_cpu(cpu_id, &q_vector->affinity_mask)) {
2450 /* Tell napi that we are done polling */
2451 napi_complete_done(napi, work_done);
2453 /* Force an interrupt */
2454 i40e_force_wb(vsi, q_vector);
2456 /* Return budget-1 so that polling stops */
2461 q_vector->tx.ring[0].tx_stats.tx_force_wb++;
2462 i40e_enable_wb_on_itr(vsi, q_vector);
2467 if (vsi->back->flags & I40E_TXR_FLAGS_WB_ON_ITR)
2468 q_vector->arm_wb_state = false;
2470 /* Work is done so exit the polling mode and re-enable the interrupt */
2471 napi_complete_done(napi, work_done);
2473 i40e_update_enable_itr(vsi, q_vector);
2475 return min(work_done, budget - 1);
2479 * i40e_atr - Add a Flow Director ATR filter
2480 * @tx_ring: ring to add programming descriptor to
2482 * @tx_flags: send tx flags
2484 static void i40e_atr(struct i40e_ring *tx_ring, struct sk_buff *skb,
2487 struct i40e_filter_program_desc *fdir_desc;
2488 struct i40e_pf *pf = tx_ring->vsi->back;
2490 unsigned char *network;
2492 struct ipv6hdr *ipv6;
2496 u32 flex_ptype, dtype_cmd;
2500 /* make sure ATR is enabled */
2501 if (!(pf->flags & I40E_FLAG_FD_ATR_ENABLED))
2504 if (pf->flags & I40E_FLAG_FD_ATR_AUTO_DISABLED)
2507 /* if sampling is disabled do nothing */
2508 if (!tx_ring->atr_sample_rate)
2511 /* Currently only IPv4/IPv6 with TCP is supported */
2512 if (!(tx_flags & (I40E_TX_FLAGS_IPV4 | I40E_TX_FLAGS_IPV6)))
2515 /* snag network header to get L4 type and address */
2516 hdr.network = (tx_flags & I40E_TX_FLAGS_UDP_TUNNEL) ?
2517 skb_inner_network_header(skb) : skb_network_header(skb);
2519 /* Note: tx_flags gets modified to reflect inner protocols in
2520 * tx_enable_csum function if encap is enabled.
2522 if (tx_flags & I40E_TX_FLAGS_IPV4) {
2523 /* access ihl as u8 to avoid unaligned access on ia64 */
2524 hlen = (hdr.network[0] & 0x0F) << 2;
2525 l4_proto = hdr.ipv4->protocol;
2527 /* find the start of the innermost ipv6 header */
2528 unsigned int inner_hlen = hdr.network - skb->data;
2529 unsigned int h_offset = inner_hlen;
2531 /* this function updates h_offset to the end of the header */
2533 ipv6_find_hdr(skb, &h_offset, IPPROTO_TCP, NULL, NULL);
2534 /* hlen will contain our best estimate of the tcp header */
2535 hlen = h_offset - inner_hlen;
2538 if (l4_proto != IPPROTO_TCP)
2541 th = (struct tcphdr *)(hdr.network + hlen);
2543 /* Due to lack of space, no more new filters can be programmed */
2544 if (th->syn && (pf->flags & I40E_FLAG_FD_ATR_AUTO_DISABLED))
2546 if (pf->flags & I40E_FLAG_HW_ATR_EVICT_ENABLED) {
2547 /* HW ATR eviction will take care of removing filters on FIN
2550 if (th->fin || th->rst)
2554 tx_ring->atr_count++;
2556 /* sample on all syn/fin/rst packets or once every atr sample rate */
2560 (tx_ring->atr_count < tx_ring->atr_sample_rate))
2563 tx_ring->atr_count = 0;
2565 /* grab the next descriptor */
2566 i = tx_ring->next_to_use;
2567 fdir_desc = I40E_TX_FDIRDESC(tx_ring, i);
2570 tx_ring->next_to_use = (i < tx_ring->count) ? i : 0;
2572 flex_ptype = (tx_ring->queue_index << I40E_TXD_FLTR_QW0_QINDEX_SHIFT) &
2573 I40E_TXD_FLTR_QW0_QINDEX_MASK;
2574 flex_ptype |= (tx_flags & I40E_TX_FLAGS_IPV4) ?
2575 (I40E_FILTER_PCTYPE_NONF_IPV4_TCP <<
2576 I40E_TXD_FLTR_QW0_PCTYPE_SHIFT) :
2577 (I40E_FILTER_PCTYPE_NONF_IPV6_TCP <<
2578 I40E_TXD_FLTR_QW0_PCTYPE_SHIFT);
2580 flex_ptype |= tx_ring->vsi->id << I40E_TXD_FLTR_QW0_DEST_VSI_SHIFT;
2582 dtype_cmd = I40E_TX_DESC_DTYPE_FILTER_PROG;
2584 dtype_cmd |= (th->fin || th->rst) ?
2585 (I40E_FILTER_PROGRAM_DESC_PCMD_REMOVE <<
2586 I40E_TXD_FLTR_QW1_PCMD_SHIFT) :
2587 (I40E_FILTER_PROGRAM_DESC_PCMD_ADD_UPDATE <<
2588 I40E_TXD_FLTR_QW1_PCMD_SHIFT);
2590 dtype_cmd |= I40E_FILTER_PROGRAM_DESC_DEST_DIRECT_PACKET_QINDEX <<
2591 I40E_TXD_FLTR_QW1_DEST_SHIFT;
2593 dtype_cmd |= I40E_FILTER_PROGRAM_DESC_FD_STATUS_FD_ID <<
2594 I40E_TXD_FLTR_QW1_FD_STATUS_SHIFT;
2596 dtype_cmd |= I40E_TXD_FLTR_QW1_CNT_ENA_MASK;
2597 if (!(tx_flags & I40E_TX_FLAGS_UDP_TUNNEL))
2599 ((u32)I40E_FD_ATR_STAT_IDX(pf->hw.pf_id) <<
2600 I40E_TXD_FLTR_QW1_CNTINDEX_SHIFT) &
2601 I40E_TXD_FLTR_QW1_CNTINDEX_MASK;
2604 ((u32)I40E_FD_ATR_TUNNEL_STAT_IDX(pf->hw.pf_id) <<
2605 I40E_TXD_FLTR_QW1_CNTINDEX_SHIFT) &
2606 I40E_TXD_FLTR_QW1_CNTINDEX_MASK;
2608 if (pf->flags & I40E_FLAG_HW_ATR_EVICT_ENABLED)
2609 dtype_cmd |= I40E_TXD_FLTR_QW1_ATR_MASK;
2611 fdir_desc->qindex_flex_ptype_vsi = cpu_to_le32(flex_ptype);
2612 fdir_desc->rsvd = cpu_to_le32(0);
2613 fdir_desc->dtype_cmd_cntindex = cpu_to_le32(dtype_cmd);
2614 fdir_desc->fd_id = cpu_to_le32(0);
2618 * i40e_tx_prepare_vlan_flags - prepare generic TX VLAN tagging flags for HW
2620 * @tx_ring: ring to send buffer on
2621 * @flags: the tx flags to be set
2623 * Checks the skb and set up correspondingly several generic transmit flags
2624 * related to VLAN tagging for the HW, such as VLAN, DCB, etc.
2626 * Returns error code indicate the frame should be dropped upon error and the
2627 * otherwise returns 0 to indicate the flags has been set properly.
2629 static inline int i40e_tx_prepare_vlan_flags(struct sk_buff *skb,
2630 struct i40e_ring *tx_ring,
2633 __be16 protocol = skb->protocol;
2636 if (protocol == htons(ETH_P_8021Q) &&
2637 !(tx_ring->netdev->features & NETIF_F_HW_VLAN_CTAG_TX)) {
2638 /* When HW VLAN acceleration is turned off by the user the
2639 * stack sets the protocol to 8021q so that the driver
2640 * can take any steps required to support the SW only
2641 * VLAN handling. In our case the driver doesn't need
2642 * to take any further steps so just set the protocol
2643 * to the encapsulated ethertype.
2645 skb->protocol = vlan_get_protocol(skb);
2649 /* if we have a HW VLAN tag being added, default to the HW one */
2650 if (skb_vlan_tag_present(skb)) {
2651 tx_flags |= skb_vlan_tag_get(skb) << I40E_TX_FLAGS_VLAN_SHIFT;
2652 tx_flags |= I40E_TX_FLAGS_HW_VLAN;
2653 /* else if it is a SW VLAN, check the next protocol and store the tag */
2654 } else if (protocol == htons(ETH_P_8021Q)) {
2655 struct vlan_hdr *vhdr, _vhdr;
2657 vhdr = skb_header_pointer(skb, ETH_HLEN, sizeof(_vhdr), &_vhdr);
2661 protocol = vhdr->h_vlan_encapsulated_proto;
2662 tx_flags |= ntohs(vhdr->h_vlan_TCI) << I40E_TX_FLAGS_VLAN_SHIFT;
2663 tx_flags |= I40E_TX_FLAGS_SW_VLAN;
2666 if (!(tx_ring->vsi->back->flags & I40E_FLAG_DCB_ENABLED))
2669 /* Insert 802.1p priority into VLAN header */
2670 if ((tx_flags & (I40E_TX_FLAGS_HW_VLAN | I40E_TX_FLAGS_SW_VLAN)) ||
2671 (skb->priority != TC_PRIO_CONTROL)) {
2672 tx_flags &= ~I40E_TX_FLAGS_VLAN_PRIO_MASK;
2673 tx_flags |= (skb->priority & 0x7) <<
2674 I40E_TX_FLAGS_VLAN_PRIO_SHIFT;
2675 if (tx_flags & I40E_TX_FLAGS_SW_VLAN) {
2676 struct vlan_ethhdr *vhdr;
2679 rc = skb_cow_head(skb, 0);
2682 vhdr = (struct vlan_ethhdr *)skb->data;
2683 vhdr->h_vlan_TCI = htons(tx_flags >>
2684 I40E_TX_FLAGS_VLAN_SHIFT);
2686 tx_flags |= I40E_TX_FLAGS_HW_VLAN;
2696 * i40e_tso - set up the tso context descriptor
2697 * @first: pointer to first Tx buffer for xmit
2698 * @hdr_len: ptr to the size of the packet header
2699 * @cd_type_cmd_tso_mss: Quad Word 1
2701 * Returns 0 if no TSO can happen, 1 if tso is going, or error
2703 static int i40e_tso(struct i40e_tx_buffer *first, u8 *hdr_len,
2704 u64 *cd_type_cmd_tso_mss)
2706 struct sk_buff *skb = first->skb;
2707 u64 cd_cmd, cd_tso_len, cd_mss;
2718 u32 paylen, l4_offset;
2719 u16 gso_segs, gso_size;
2722 if (skb->ip_summed != CHECKSUM_PARTIAL)
2725 if (!skb_is_gso(skb))
2728 err = skb_cow_head(skb, 0);
2732 ip.hdr = skb_network_header(skb);
2733 l4.hdr = skb_transport_header(skb);
2735 /* initialize outer IP header fields */
2736 if (ip.v4->version == 4) {
2740 ip.v6->payload_len = 0;
2743 if (skb_shinfo(skb)->gso_type & (SKB_GSO_GRE |
2747 SKB_GSO_UDP_TUNNEL |
2748 SKB_GSO_UDP_TUNNEL_CSUM)) {
2749 if (!(skb_shinfo(skb)->gso_type & SKB_GSO_PARTIAL) &&
2750 (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_TUNNEL_CSUM)) {
2753 /* determine offset of outer transport header */
2754 l4_offset = l4.hdr - skb->data;
2756 /* remove payload length from outer checksum */
2757 paylen = skb->len - l4_offset;
2758 csum_replace_by_diff(&l4.udp->check,
2759 (__force __wsum)htonl(paylen));
2762 /* reset pointers to inner headers */
2763 ip.hdr = skb_inner_network_header(skb);
2764 l4.hdr = skb_inner_transport_header(skb);
2766 /* initialize inner IP header fields */
2767 if (ip.v4->version == 4) {
2771 ip.v6->payload_len = 0;
2775 /* determine offset of inner transport header */
2776 l4_offset = l4.hdr - skb->data;
2778 /* remove payload length from inner checksum */
2779 paylen = skb->len - l4_offset;
2780 csum_replace_by_diff(&l4.tcp->check, (__force __wsum)htonl(paylen));
2782 /* compute length of segmentation header */
2783 *hdr_len = (l4.tcp->doff * 4) + l4_offset;
2785 /* pull values out of skb_shinfo */
2786 gso_size = skb_shinfo(skb)->gso_size;
2787 gso_segs = skb_shinfo(skb)->gso_segs;
2789 /* update GSO size and bytecount with header size */
2790 first->gso_segs = gso_segs;
2791 first->bytecount += (first->gso_segs - 1) * *hdr_len;
2793 /* find the field values */
2794 cd_cmd = I40E_TX_CTX_DESC_TSO;
2795 cd_tso_len = skb->len - *hdr_len;
2797 *cd_type_cmd_tso_mss |= (cd_cmd << I40E_TXD_CTX_QW1_CMD_SHIFT) |
2798 (cd_tso_len << I40E_TXD_CTX_QW1_TSO_LEN_SHIFT) |
2799 (cd_mss << I40E_TXD_CTX_QW1_MSS_SHIFT);
2804 * i40e_tsyn - set up the tsyn context descriptor
2805 * @tx_ring: ptr to the ring to send
2806 * @skb: ptr to the skb we're sending
2807 * @tx_flags: the collected send information
2808 * @cd_type_cmd_tso_mss: Quad Word 1
2810 * Returns 0 if no Tx timestamp can happen and 1 if the timestamp will happen
2812 static int i40e_tsyn(struct i40e_ring *tx_ring, struct sk_buff *skb,
2813 u32 tx_flags, u64 *cd_type_cmd_tso_mss)
2817 if (likely(!(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)))
2820 /* Tx timestamps cannot be sampled when doing TSO */
2821 if (tx_flags & I40E_TX_FLAGS_TSO)
2824 /* only timestamp the outbound packet if the user has requested it and
2825 * we are not already transmitting a packet to be timestamped
2827 pf = i40e_netdev_to_pf(tx_ring->netdev);
2828 if (!(pf->flags & I40E_FLAG_PTP))
2832 !test_and_set_bit_lock(__I40E_PTP_TX_IN_PROGRESS, pf->state)) {
2833 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
2834 pf->ptp_tx_start = jiffies;
2835 pf->ptp_tx_skb = skb_get(skb);
2837 pf->tx_hwtstamp_skipped++;
2841 *cd_type_cmd_tso_mss |= (u64)I40E_TX_CTX_DESC_TSYN <<
2842 I40E_TXD_CTX_QW1_CMD_SHIFT;
2848 * i40e_tx_enable_csum - Enable Tx checksum offloads
2850 * @tx_flags: pointer to Tx flags currently set
2851 * @td_cmd: Tx descriptor command bits to set
2852 * @td_offset: Tx descriptor header offsets to set
2853 * @tx_ring: Tx descriptor ring
2854 * @cd_tunneling: ptr to context desc bits
2856 static int i40e_tx_enable_csum(struct sk_buff *skb, u32 *tx_flags,
2857 u32 *td_cmd, u32 *td_offset,
2858 struct i40e_ring *tx_ring,
2871 unsigned char *exthdr;
2872 u32 offset, cmd = 0;
2876 if (skb->ip_summed != CHECKSUM_PARTIAL)
2879 ip.hdr = skb_network_header(skb);
2880 l4.hdr = skb_transport_header(skb);
2882 /* compute outer L2 header size */
2883 offset = ((ip.hdr - skb->data) / 2) << I40E_TX_DESC_LENGTH_MACLEN_SHIFT;
2885 if (skb->encapsulation) {
2887 /* define outer network header type */
2888 if (*tx_flags & I40E_TX_FLAGS_IPV4) {
2889 tunnel |= (*tx_flags & I40E_TX_FLAGS_TSO) ?
2890 I40E_TX_CTX_EXT_IP_IPV4 :
2891 I40E_TX_CTX_EXT_IP_IPV4_NO_CSUM;
2893 l4_proto = ip.v4->protocol;
2894 } else if (*tx_flags & I40E_TX_FLAGS_IPV6) {
2895 tunnel |= I40E_TX_CTX_EXT_IP_IPV6;
2897 exthdr = ip.hdr + sizeof(*ip.v6);
2898 l4_proto = ip.v6->nexthdr;
2899 if (l4.hdr != exthdr)
2900 ipv6_skip_exthdr(skb, exthdr - skb->data,
2901 &l4_proto, &frag_off);
2904 /* define outer transport */
2907 tunnel |= I40E_TXD_CTX_UDP_TUNNELING;
2908 *tx_flags |= I40E_TX_FLAGS_UDP_TUNNEL;
2911 tunnel |= I40E_TXD_CTX_GRE_TUNNELING;
2912 *tx_flags |= I40E_TX_FLAGS_UDP_TUNNEL;
2916 *tx_flags |= I40E_TX_FLAGS_UDP_TUNNEL;
2917 l4.hdr = skb_inner_network_header(skb);
2920 if (*tx_flags & I40E_TX_FLAGS_TSO)
2923 skb_checksum_help(skb);
2927 /* compute outer L3 header size */
2928 tunnel |= ((l4.hdr - ip.hdr) / 4) <<
2929 I40E_TXD_CTX_QW0_EXT_IPLEN_SHIFT;
2931 /* switch IP header pointer from outer to inner header */
2932 ip.hdr = skb_inner_network_header(skb);
2934 /* compute tunnel header size */
2935 tunnel |= ((ip.hdr - l4.hdr) / 2) <<
2936 I40E_TXD_CTX_QW0_NATLEN_SHIFT;
2938 /* indicate if we need to offload outer UDP header */
2939 if ((*tx_flags & I40E_TX_FLAGS_TSO) &&
2940 !(skb_shinfo(skb)->gso_type & SKB_GSO_PARTIAL) &&
2941 (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_TUNNEL_CSUM))
2942 tunnel |= I40E_TXD_CTX_QW0_L4T_CS_MASK;
2944 /* record tunnel offload values */
2945 *cd_tunneling |= tunnel;
2947 /* switch L4 header pointer from outer to inner */
2948 l4.hdr = skb_inner_transport_header(skb);
2951 /* reset type as we transition from outer to inner headers */
2952 *tx_flags &= ~(I40E_TX_FLAGS_IPV4 | I40E_TX_FLAGS_IPV6);
2953 if (ip.v4->version == 4)
2954 *tx_flags |= I40E_TX_FLAGS_IPV4;
2955 if (ip.v6->version == 6)
2956 *tx_flags |= I40E_TX_FLAGS_IPV6;
2959 /* Enable IP checksum offloads */
2960 if (*tx_flags & I40E_TX_FLAGS_IPV4) {
2961 l4_proto = ip.v4->protocol;
2962 /* the stack computes the IP header already, the only time we
2963 * need the hardware to recompute it is in the case of TSO.
2965 cmd |= (*tx_flags & I40E_TX_FLAGS_TSO) ?
2966 I40E_TX_DESC_CMD_IIPT_IPV4_CSUM :
2967 I40E_TX_DESC_CMD_IIPT_IPV4;
2968 } else if (*tx_flags & I40E_TX_FLAGS_IPV6) {
2969 cmd |= I40E_TX_DESC_CMD_IIPT_IPV6;
2971 exthdr = ip.hdr + sizeof(*ip.v6);
2972 l4_proto = ip.v6->nexthdr;
2973 if (l4.hdr != exthdr)
2974 ipv6_skip_exthdr(skb, exthdr - skb->data,
2975 &l4_proto, &frag_off);
2978 /* compute inner L3 header size */
2979 offset |= ((l4.hdr - ip.hdr) / 4) << I40E_TX_DESC_LENGTH_IPLEN_SHIFT;
2981 /* Enable L4 checksum offloads */
2984 /* enable checksum offloads */
2985 cmd |= I40E_TX_DESC_CMD_L4T_EOFT_TCP;
2986 offset |= l4.tcp->doff << I40E_TX_DESC_LENGTH_L4_FC_LEN_SHIFT;
2989 /* enable SCTP checksum offload */
2990 cmd |= I40E_TX_DESC_CMD_L4T_EOFT_SCTP;
2991 offset |= (sizeof(struct sctphdr) >> 2) <<
2992 I40E_TX_DESC_LENGTH_L4_FC_LEN_SHIFT;
2995 /* enable UDP checksum offload */
2996 cmd |= I40E_TX_DESC_CMD_L4T_EOFT_UDP;
2997 offset |= (sizeof(struct udphdr) >> 2) <<
2998 I40E_TX_DESC_LENGTH_L4_FC_LEN_SHIFT;
3001 if (*tx_flags & I40E_TX_FLAGS_TSO)
3003 skb_checksum_help(skb);
3008 *td_offset |= offset;
3014 * i40e_create_tx_ctx Build the Tx context descriptor
3015 * @tx_ring: ring to create the descriptor on
3016 * @cd_type_cmd_tso_mss: Quad Word 1
3017 * @cd_tunneling: Quad Word 0 - bits 0-31
3018 * @cd_l2tag2: Quad Word 0 - bits 32-63
3020 static void i40e_create_tx_ctx(struct i40e_ring *tx_ring,
3021 const u64 cd_type_cmd_tso_mss,
3022 const u32 cd_tunneling, const u32 cd_l2tag2)
3024 struct i40e_tx_context_desc *context_desc;
3025 int i = tx_ring->next_to_use;
3027 if ((cd_type_cmd_tso_mss == I40E_TX_DESC_DTYPE_CONTEXT) &&
3028 !cd_tunneling && !cd_l2tag2)
3031 /* grab the next descriptor */
3032 context_desc = I40E_TX_CTXTDESC(tx_ring, i);
3035 tx_ring->next_to_use = (i < tx_ring->count) ? i : 0;
3037 /* cpu_to_le32 and assign to struct fields */
3038 context_desc->tunneling_params = cpu_to_le32(cd_tunneling);
3039 context_desc->l2tag2 = cpu_to_le16(cd_l2tag2);
3040 context_desc->rsvd = cpu_to_le16(0);
3041 context_desc->type_cmd_tso_mss = cpu_to_le64(cd_type_cmd_tso_mss);
3045 * __i40e_maybe_stop_tx - 2nd level check for tx stop conditions
3046 * @tx_ring: the ring to be checked
3047 * @size: the size buffer we want to assure is available
3049 * Returns -EBUSY if a stop is needed, else 0
3051 int __i40e_maybe_stop_tx(struct i40e_ring *tx_ring, int size)
3053 netif_stop_subqueue(tx_ring->netdev, tx_ring->queue_index);
3054 /* Memory barrier before checking head and tail */
3057 /* Check again in a case another CPU has just made room available. */
3058 if (likely(I40E_DESC_UNUSED(tx_ring) < size))
3061 /* A reprieve! - use start_queue because it doesn't call schedule */
3062 netif_start_subqueue(tx_ring->netdev, tx_ring->queue_index);
3063 ++tx_ring->tx_stats.restart_queue;
3068 * __i40e_chk_linearize - Check if there are more than 8 buffers per packet
3071 * Note: Our HW can't DMA more than 8 buffers to build a packet on the wire
3072 * and so we need to figure out the cases where we need to linearize the skb.
3074 * For TSO we need to count the TSO header and segment payload separately.
3075 * As such we need to check cases where we have 7 fragments or more as we
3076 * can potentially require 9 DMA transactions, 1 for the TSO header, 1 for
3077 * the segment payload in the first descriptor, and another 7 for the
3080 bool __i40e_chk_linearize(struct sk_buff *skb)
3082 const struct skb_frag_struct *frag, *stale;
3085 /* no need to check if number of frags is less than 7 */
3086 nr_frags = skb_shinfo(skb)->nr_frags;
3087 if (nr_frags < (I40E_MAX_BUFFER_TXD - 1))
3090 /* We need to walk through the list and validate that each group
3091 * of 6 fragments totals at least gso_size.
3093 nr_frags -= I40E_MAX_BUFFER_TXD - 2;
3094 frag = &skb_shinfo(skb)->frags[0];
3096 /* Initialize size to the negative value of gso_size minus 1. We
3097 * use this as the worst case scenerio in which the frag ahead
3098 * of us only provides one byte which is why we are limited to 6
3099 * descriptors for a single transmit as the header and previous
3100 * fragment are already consuming 2 descriptors.
3102 sum = 1 - skb_shinfo(skb)->gso_size;
3104 /* Add size of frags 0 through 4 to create our initial sum */
3105 sum += skb_frag_size(frag++);
3106 sum += skb_frag_size(frag++);
3107 sum += skb_frag_size(frag++);
3108 sum += skb_frag_size(frag++);
3109 sum += skb_frag_size(frag++);
3111 /* Walk through fragments adding latest fragment, testing it, and
3112 * then removing stale fragments from the sum.
3114 for (stale = &skb_shinfo(skb)->frags[0];; stale++) {
3115 int stale_size = skb_frag_size(stale);
3117 sum += skb_frag_size(frag++);
3119 /* The stale fragment may present us with a smaller
3120 * descriptor than the actual fragment size. To account
3121 * for that we need to remove all the data on the front and
3122 * figure out what the remainder would be in the last
3123 * descriptor associated with the fragment.
3125 if (stale_size > I40E_MAX_DATA_PER_TXD) {
3126 int align_pad = -(stale->page_offset) &
3127 (I40E_MAX_READ_REQ_SIZE - 1);
3130 stale_size -= align_pad;
3133 sum -= I40E_MAX_DATA_PER_TXD_ALIGNED;
3134 stale_size -= I40E_MAX_DATA_PER_TXD_ALIGNED;
3135 } while (stale_size > I40E_MAX_DATA_PER_TXD);
3138 /* if sum is negative we failed to make sufficient progress */
3152 * i40e_tx_map - Build the Tx descriptor
3153 * @tx_ring: ring to send buffer on
3155 * @first: first buffer info buffer to use
3156 * @tx_flags: collected send information
3157 * @hdr_len: size of the packet header
3158 * @td_cmd: the command field in the descriptor
3159 * @td_offset: offset for checksum or crc
3161 * Returns 0 on success, -1 on failure to DMA
3163 static inline int i40e_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb,
3164 struct i40e_tx_buffer *first, u32 tx_flags,
3165 const u8 hdr_len, u32 td_cmd, u32 td_offset)
3167 unsigned int data_len = skb->data_len;
3168 unsigned int size = skb_headlen(skb);
3169 struct skb_frag_struct *frag;
3170 struct i40e_tx_buffer *tx_bi;
3171 struct i40e_tx_desc *tx_desc;
3172 u16 i = tx_ring->next_to_use;
3177 if (tx_flags & I40E_TX_FLAGS_HW_VLAN) {
3178 td_cmd |= I40E_TX_DESC_CMD_IL2TAG1;
3179 td_tag = (tx_flags & I40E_TX_FLAGS_VLAN_MASK) >>
3180 I40E_TX_FLAGS_VLAN_SHIFT;
3183 first->tx_flags = tx_flags;
3185 dma = dma_map_single(tx_ring->dev, skb->data, size, DMA_TO_DEVICE);
3187 tx_desc = I40E_TX_DESC(tx_ring, i);
3190 for (frag = &skb_shinfo(skb)->frags[0];; frag++) {
3191 unsigned int max_data = I40E_MAX_DATA_PER_TXD_ALIGNED;
3193 if (dma_mapping_error(tx_ring->dev, dma))
3196 /* record length, and DMA address */
3197 dma_unmap_len_set(tx_bi, len, size);
3198 dma_unmap_addr_set(tx_bi, dma, dma);
3200 /* align size to end of page */
3201 max_data += -dma & (I40E_MAX_READ_REQ_SIZE - 1);
3202 tx_desc->buffer_addr = cpu_to_le64(dma);
3204 while (unlikely(size > I40E_MAX_DATA_PER_TXD)) {
3205 tx_desc->cmd_type_offset_bsz =
3206 build_ctob(td_cmd, td_offset,
3213 if (i == tx_ring->count) {
3214 tx_desc = I40E_TX_DESC(tx_ring, 0);
3221 max_data = I40E_MAX_DATA_PER_TXD_ALIGNED;
3222 tx_desc->buffer_addr = cpu_to_le64(dma);
3225 if (likely(!data_len))
3228 tx_desc->cmd_type_offset_bsz = build_ctob(td_cmd, td_offset,
3235 if (i == tx_ring->count) {
3236 tx_desc = I40E_TX_DESC(tx_ring, 0);
3240 size = skb_frag_size(frag);
3243 dma = skb_frag_dma_map(tx_ring->dev, frag, 0, size,
3246 tx_bi = &tx_ring->tx_bi[i];
3249 netdev_tx_sent_queue(txring_txq(tx_ring), first->bytecount);
3252 if (i == tx_ring->count)
3255 tx_ring->next_to_use = i;
3257 i40e_maybe_stop_tx(tx_ring, DESC_NEEDED);
3259 /* write last descriptor with EOP bit */
3260 td_cmd |= I40E_TX_DESC_CMD_EOP;
3262 /* We OR these values together to check both against 4 (WB_STRIDE)
3263 * below. This is safe since we don't re-use desc_count afterwards.
3265 desc_count |= ++tx_ring->packet_stride;
3267 if (desc_count >= WB_STRIDE) {
3268 /* write last descriptor with RS bit set */
3269 td_cmd |= I40E_TX_DESC_CMD_RS;
3270 tx_ring->packet_stride = 0;
3273 tx_desc->cmd_type_offset_bsz =
3274 build_ctob(td_cmd, td_offset, size, td_tag);
3276 /* Force memory writes to complete before letting h/w know there
3277 * are new descriptors to fetch.
3279 * We also use this memory barrier to make certain all of the
3280 * status bits have been updated before next_to_watch is written.
3284 /* set next_to_watch value indicating a packet is present */
3285 first->next_to_watch = tx_desc;
3287 /* notify HW of packet */
3288 if (netif_xmit_stopped(txring_txq(tx_ring)) || !skb->xmit_more) {
3289 writel(i, tx_ring->tail);
3291 /* we need this if more than one processor can write to our tail
3292 * at a time, it synchronizes IO on IA64/Altix systems
3300 dev_info(tx_ring->dev, "TX DMA map failed\n");
3302 /* clear dma mappings for failed tx_bi map */
3304 tx_bi = &tx_ring->tx_bi[i];
3305 i40e_unmap_and_free_tx_resource(tx_ring, tx_bi);
3313 tx_ring->next_to_use = i;
3319 * i40e_xmit_xdp_ring - transmits an XDP buffer to an XDP Tx ring
3320 * @xdp: data to transmit
3321 * @xdp_ring: XDP Tx ring
3323 static int i40e_xmit_xdp_ring(struct xdp_buff *xdp,
3324 struct i40e_ring *xdp_ring)
3326 u32 size = xdp->data_end - xdp->data;
3327 u16 i = xdp_ring->next_to_use;
3328 struct i40e_tx_buffer *tx_bi;
3329 struct i40e_tx_desc *tx_desc;
3332 if (!unlikely(I40E_DESC_UNUSED(xdp_ring))) {
3333 xdp_ring->tx_stats.tx_busy++;
3334 return I40E_XDP_CONSUMED;
3337 dma = dma_map_single(xdp_ring->dev, xdp->data, size, DMA_TO_DEVICE);
3338 if (dma_mapping_error(xdp_ring->dev, dma))
3339 return I40E_XDP_CONSUMED;
3341 tx_bi = &xdp_ring->tx_bi[i];
3342 tx_bi->bytecount = size;
3343 tx_bi->gso_segs = 1;
3344 tx_bi->raw_buf = xdp->data;
3346 /* record length, and DMA address */
3347 dma_unmap_len_set(tx_bi, len, size);
3348 dma_unmap_addr_set(tx_bi, dma, dma);
3350 tx_desc = I40E_TX_DESC(xdp_ring, i);
3351 tx_desc->buffer_addr = cpu_to_le64(dma);
3352 tx_desc->cmd_type_offset_bsz = build_ctob(I40E_TX_DESC_CMD_ICRC
3356 /* Make certain all of the status bits have been updated
3357 * before next_to_watch is written.
3362 if (i == xdp_ring->count)
3365 tx_bi->next_to_watch = tx_desc;
3366 xdp_ring->next_to_use = i;
3372 * i40e_xmit_frame_ring - Sends buffer on Tx ring
3374 * @tx_ring: ring to send buffer on
3376 * Returns NETDEV_TX_OK if sent, else an error code
3378 static netdev_tx_t i40e_xmit_frame_ring(struct sk_buff *skb,
3379 struct i40e_ring *tx_ring)
3381 u64 cd_type_cmd_tso_mss = I40E_TX_DESC_DTYPE_CONTEXT;
3382 u32 cd_tunneling = 0, cd_l2tag2 = 0;
3383 struct i40e_tx_buffer *first;
3392 /* prefetch the data, we'll need it later */
3393 prefetch(skb->data);
3395 i40e_trace(xmit_frame_ring, skb, tx_ring);
3397 count = i40e_xmit_descriptor_count(skb);
3398 if (i40e_chk_linearize(skb, count)) {
3399 if (__skb_linearize(skb)) {
3400 dev_kfree_skb_any(skb);
3401 return NETDEV_TX_OK;
3403 count = i40e_txd_use_count(skb->len);
3404 tx_ring->tx_stats.tx_linearize++;
3407 /* need: 1 descriptor per page * PAGE_SIZE/I40E_MAX_DATA_PER_TXD,
3408 * + 1 desc for skb_head_len/I40E_MAX_DATA_PER_TXD,
3409 * + 4 desc gap to avoid the cache line where head is,
3410 * + 1 desc for context descriptor,
3411 * otherwise try next time
3413 if (i40e_maybe_stop_tx(tx_ring, count + 4 + 1)) {
3414 tx_ring->tx_stats.tx_busy++;
3415 return NETDEV_TX_BUSY;
3418 /* record the location of the first descriptor for this packet */
3419 first = &tx_ring->tx_bi[tx_ring->next_to_use];
3421 first->bytecount = skb->len;
3422 first->gso_segs = 1;
3424 /* prepare the xmit flags */
3425 if (i40e_tx_prepare_vlan_flags(skb, tx_ring, &tx_flags))
3428 /* obtain protocol of skb */
3429 protocol = vlan_get_protocol(skb);
3431 /* setup IPv4/IPv6 offloads */
3432 if (protocol == htons(ETH_P_IP))
3433 tx_flags |= I40E_TX_FLAGS_IPV4;
3434 else if (protocol == htons(ETH_P_IPV6))
3435 tx_flags |= I40E_TX_FLAGS_IPV6;
3437 tso = i40e_tso(first, &hdr_len, &cd_type_cmd_tso_mss);
3442 tx_flags |= I40E_TX_FLAGS_TSO;
3444 /* Always offload the checksum, since it's in the data descriptor */
3445 tso = i40e_tx_enable_csum(skb, &tx_flags, &td_cmd, &td_offset,
3446 tx_ring, &cd_tunneling);
3450 tsyn = i40e_tsyn(tx_ring, skb, tx_flags, &cd_type_cmd_tso_mss);
3453 tx_flags |= I40E_TX_FLAGS_TSYN;
3455 skb_tx_timestamp(skb);
3457 /* always enable CRC insertion offload */
3458 td_cmd |= I40E_TX_DESC_CMD_ICRC;
3460 i40e_create_tx_ctx(tx_ring, cd_type_cmd_tso_mss,
3461 cd_tunneling, cd_l2tag2);
3463 /* Add Flow Director ATR if it's enabled.
3465 * NOTE: this must always be directly before the data descriptor.
3467 i40e_atr(tx_ring, skb, tx_flags);
3469 if (i40e_tx_map(tx_ring, skb, first, tx_flags, hdr_len,
3471 goto cleanup_tx_tstamp;
3473 return NETDEV_TX_OK;
3476 i40e_trace(xmit_frame_ring_drop, first->skb, tx_ring);
3477 dev_kfree_skb_any(first->skb);
3480 if (unlikely(tx_flags & I40E_TX_FLAGS_TSYN)) {
3481 struct i40e_pf *pf = i40e_netdev_to_pf(tx_ring->netdev);
3483 dev_kfree_skb_any(pf->ptp_tx_skb);
3484 pf->ptp_tx_skb = NULL;
3485 clear_bit_unlock(__I40E_PTP_TX_IN_PROGRESS, pf->state);
3488 return NETDEV_TX_OK;
3492 * i40e_lan_xmit_frame - Selects the correct VSI and Tx queue to send buffer
3494 * @netdev: network interface device structure
3496 * Returns NETDEV_TX_OK if sent, else an error code
3498 netdev_tx_t i40e_lan_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
3500 struct i40e_netdev_priv *np = netdev_priv(netdev);
3501 struct i40e_vsi *vsi = np->vsi;
3502 struct i40e_ring *tx_ring = vsi->tx_rings[skb->queue_mapping];
3504 /* hardware can't handle really short frames, hardware padding works
3507 if (skb_put_padto(skb, I40E_MIN_TX_LEN))
3508 return NETDEV_TX_OK;
3510 return i40e_xmit_frame_ring(skb, tx_ring);