1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright(c) 2013 - 2018 Intel Corporation. */
4 #include <linux/prefetch.h>
5 #include <net/busy_poll.h>
6 #include <linux/bpf_trace.h>
9 #include "i40e_trace.h"
10 #include "i40e_prototype.h"
11 #include "i40e_txrx_common.h"
14 #define I40E_TXD_CMD (I40E_TX_DESC_CMD_EOP | I40E_TX_DESC_CMD_RS)
16 * i40e_fdir - Generate a Flow Director descriptor based on fdata
17 * @tx_ring: Tx ring to send buffer on
18 * @fdata: Flow director filter data
19 * @add: Indicate if we are adding a rule or deleting one
22 static void i40e_fdir(struct i40e_ring *tx_ring,
23 struct i40e_fdir_filter *fdata, bool add)
25 struct i40e_filter_program_desc *fdir_desc;
26 struct i40e_pf *pf = tx_ring->vsi->back;
27 u32 flex_ptype, dtype_cmd;
30 /* grab the next descriptor */
31 i = tx_ring->next_to_use;
32 fdir_desc = I40E_TX_FDIRDESC(tx_ring, i);
35 tx_ring->next_to_use = (i < tx_ring->count) ? i : 0;
37 flex_ptype = I40E_TXD_FLTR_QW0_QINDEX_MASK &
38 (fdata->q_index << I40E_TXD_FLTR_QW0_QINDEX_SHIFT);
40 flex_ptype |= I40E_TXD_FLTR_QW0_FLEXOFF_MASK &
41 (fdata->flex_off << I40E_TXD_FLTR_QW0_FLEXOFF_SHIFT);
43 flex_ptype |= I40E_TXD_FLTR_QW0_PCTYPE_MASK &
44 (fdata->pctype << I40E_TXD_FLTR_QW0_PCTYPE_SHIFT);
46 flex_ptype |= I40E_TXD_FLTR_QW0_PCTYPE_MASK &
47 (fdata->flex_offset << I40E_TXD_FLTR_QW0_FLEXOFF_SHIFT);
49 /* Use LAN VSI Id if not programmed by user */
50 flex_ptype |= I40E_TXD_FLTR_QW0_DEST_VSI_MASK &
51 ((u32)(fdata->dest_vsi ? : pf->vsi[pf->lan_vsi]->id) <<
52 I40E_TXD_FLTR_QW0_DEST_VSI_SHIFT);
54 dtype_cmd = I40E_TX_DESC_DTYPE_FILTER_PROG;
57 I40E_FILTER_PROGRAM_DESC_PCMD_ADD_UPDATE <<
58 I40E_TXD_FLTR_QW1_PCMD_SHIFT :
59 I40E_FILTER_PROGRAM_DESC_PCMD_REMOVE <<
60 I40E_TXD_FLTR_QW1_PCMD_SHIFT;
62 dtype_cmd |= I40E_TXD_FLTR_QW1_DEST_MASK &
63 (fdata->dest_ctl << I40E_TXD_FLTR_QW1_DEST_SHIFT);
65 dtype_cmd |= I40E_TXD_FLTR_QW1_FD_STATUS_MASK &
66 (fdata->fd_status << I40E_TXD_FLTR_QW1_FD_STATUS_SHIFT);
68 if (fdata->cnt_index) {
69 dtype_cmd |= I40E_TXD_FLTR_QW1_CNT_ENA_MASK;
70 dtype_cmd |= I40E_TXD_FLTR_QW1_CNTINDEX_MASK &
71 ((u32)fdata->cnt_index <<
72 I40E_TXD_FLTR_QW1_CNTINDEX_SHIFT);
75 fdir_desc->qindex_flex_ptype_vsi = cpu_to_le32(flex_ptype);
76 fdir_desc->rsvd = cpu_to_le32(0);
77 fdir_desc->dtype_cmd_cntindex = cpu_to_le32(dtype_cmd);
78 fdir_desc->fd_id = cpu_to_le32(fdata->fd_id);
81 #define I40E_FD_CLEAN_DELAY 10
83 * i40e_program_fdir_filter - Program a Flow Director filter
84 * @fdir_data: Packet data that will be filter parameters
85 * @raw_packet: the pre-allocated packet buffer for FDir
87 * @add: True for add/update, False for remove
89 static int i40e_program_fdir_filter(struct i40e_fdir_filter *fdir_data,
90 u8 *raw_packet, struct i40e_pf *pf,
93 struct i40e_tx_buffer *tx_buf, *first;
94 struct i40e_tx_desc *tx_desc;
95 struct i40e_ring *tx_ring;
102 /* find existing FDIR VSI */
103 vsi = i40e_find_vsi_by_type(pf, I40E_VSI_FDIR);
107 tx_ring = vsi->tx_rings[0];
110 /* we need two descriptors to add/del a filter and we can wait */
111 for (i = I40E_FD_CLEAN_DELAY; I40E_DESC_UNUSED(tx_ring) < 2; i--) {
114 msleep_interruptible(1);
117 dma = dma_map_single(dev, raw_packet,
118 I40E_FDIR_MAX_RAW_PACKET_SIZE, DMA_TO_DEVICE);
119 if (dma_mapping_error(dev, dma))
122 /* grab the next descriptor */
123 i = tx_ring->next_to_use;
124 first = &tx_ring->tx_bi[i];
125 i40e_fdir(tx_ring, fdir_data, add);
127 /* Now program a dummy descriptor */
128 i = tx_ring->next_to_use;
129 tx_desc = I40E_TX_DESC(tx_ring, i);
130 tx_buf = &tx_ring->tx_bi[i];
132 tx_ring->next_to_use = ((i + 1) < tx_ring->count) ? i + 1 : 0;
134 memset(tx_buf, 0, sizeof(struct i40e_tx_buffer));
136 /* record length, and DMA address */
137 dma_unmap_len_set(tx_buf, len, I40E_FDIR_MAX_RAW_PACKET_SIZE);
138 dma_unmap_addr_set(tx_buf, dma, dma);
140 tx_desc->buffer_addr = cpu_to_le64(dma);
141 td_cmd = I40E_TXD_CMD | I40E_TX_DESC_CMD_DUMMY;
143 tx_buf->tx_flags = I40E_TX_FLAGS_FD_SB;
144 tx_buf->raw_buf = (void *)raw_packet;
146 tx_desc->cmd_type_offset_bsz =
147 build_ctob(td_cmd, 0, I40E_FDIR_MAX_RAW_PACKET_SIZE, 0);
149 /* Force memory writes to complete before letting h/w
150 * know there are new descriptors to fetch.
154 /* Mark the data descriptor to be watched */
155 first->next_to_watch = tx_desc;
157 writel(tx_ring->next_to_use, tx_ring->tail);
164 #define IP_HEADER_OFFSET 14
165 #define I40E_UDPIP_DUMMY_PACKET_LEN 42
167 * i40e_add_del_fdir_udpv4 - Add/Remove UDPv4 filters
168 * @vsi: pointer to the targeted VSI
169 * @fd_data: the flow director data required for the FDir descriptor
170 * @add: true adds a filter, false removes it
172 * Returns 0 if the filters were successfully added or removed
174 static int i40e_add_del_fdir_udpv4(struct i40e_vsi *vsi,
175 struct i40e_fdir_filter *fd_data,
178 struct i40e_pf *pf = vsi->back;
183 static char packet[] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0x08, 0,
184 0x45, 0, 0, 0x1c, 0, 0, 0x40, 0, 0x40, 0x11, 0, 0, 0, 0, 0, 0,
185 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
187 raw_packet = kzalloc(I40E_FDIR_MAX_RAW_PACKET_SIZE, GFP_KERNEL);
190 memcpy(raw_packet, packet, I40E_UDPIP_DUMMY_PACKET_LEN);
192 ip = (struct iphdr *)(raw_packet + IP_HEADER_OFFSET);
193 udp = (struct udphdr *)(raw_packet + IP_HEADER_OFFSET
194 + sizeof(struct iphdr));
196 ip->daddr = fd_data->dst_ip;
197 udp->dest = fd_data->dst_port;
198 ip->saddr = fd_data->src_ip;
199 udp->source = fd_data->src_port;
201 if (fd_data->flex_filter) {
202 u8 *payload = raw_packet + I40E_UDPIP_DUMMY_PACKET_LEN;
203 __be16 pattern = fd_data->flex_word;
204 u16 off = fd_data->flex_offset;
206 *((__force __be16 *)(payload + off)) = pattern;
209 fd_data->pctype = I40E_FILTER_PCTYPE_NONF_IPV4_UDP;
210 ret = i40e_program_fdir_filter(fd_data, raw_packet, pf, add);
212 dev_info(&pf->pdev->dev,
213 "PCTYPE:%d, Filter command send failed for fd_id:%d (ret = %d)\n",
214 fd_data->pctype, fd_data->fd_id, ret);
215 /* Free the packet buffer since it wasn't added to the ring */
218 } else if (I40E_DEBUG_FD & pf->hw.debug_mask) {
220 dev_info(&pf->pdev->dev,
221 "Filter OK for PCTYPE %d loc = %d\n",
222 fd_data->pctype, fd_data->fd_id);
224 dev_info(&pf->pdev->dev,
225 "Filter deleted for PCTYPE %d loc = %d\n",
226 fd_data->pctype, fd_data->fd_id);
230 pf->fd_udp4_filter_cnt++;
232 pf->fd_udp4_filter_cnt--;
237 #define I40E_TCPIP_DUMMY_PACKET_LEN 54
239 * i40e_add_del_fdir_tcpv4 - Add/Remove TCPv4 filters
240 * @vsi: pointer to the targeted VSI
241 * @fd_data: the flow director data required for the FDir descriptor
242 * @add: true adds a filter, false removes it
244 * Returns 0 if the filters were successfully added or removed
246 static int i40e_add_del_fdir_tcpv4(struct i40e_vsi *vsi,
247 struct i40e_fdir_filter *fd_data,
250 struct i40e_pf *pf = vsi->back;
256 static char packet[] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0x08, 0,
257 0x45, 0, 0, 0x28, 0, 0, 0x40, 0, 0x40, 0x6, 0, 0, 0, 0, 0, 0,
258 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0x80, 0x11,
259 0x0, 0x72, 0, 0, 0, 0};
261 raw_packet = kzalloc(I40E_FDIR_MAX_RAW_PACKET_SIZE, GFP_KERNEL);
264 memcpy(raw_packet, packet, I40E_TCPIP_DUMMY_PACKET_LEN);
266 ip = (struct iphdr *)(raw_packet + IP_HEADER_OFFSET);
267 tcp = (struct tcphdr *)(raw_packet + IP_HEADER_OFFSET
268 + sizeof(struct iphdr));
270 ip->daddr = fd_data->dst_ip;
271 tcp->dest = fd_data->dst_port;
272 ip->saddr = fd_data->src_ip;
273 tcp->source = fd_data->src_port;
275 if (fd_data->flex_filter) {
276 u8 *payload = raw_packet + I40E_TCPIP_DUMMY_PACKET_LEN;
277 __be16 pattern = fd_data->flex_word;
278 u16 off = fd_data->flex_offset;
280 *((__force __be16 *)(payload + off)) = pattern;
283 fd_data->pctype = I40E_FILTER_PCTYPE_NONF_IPV4_TCP;
284 ret = i40e_program_fdir_filter(fd_data, raw_packet, pf, add);
286 dev_info(&pf->pdev->dev,
287 "PCTYPE:%d, Filter command send failed for fd_id:%d (ret = %d)\n",
288 fd_data->pctype, fd_data->fd_id, ret);
289 /* Free the packet buffer since it wasn't added to the ring */
292 } else if (I40E_DEBUG_FD & pf->hw.debug_mask) {
294 dev_info(&pf->pdev->dev, "Filter OK for PCTYPE %d loc = %d)\n",
295 fd_data->pctype, fd_data->fd_id);
297 dev_info(&pf->pdev->dev,
298 "Filter deleted for PCTYPE %d loc = %d\n",
299 fd_data->pctype, fd_data->fd_id);
303 pf->fd_tcp4_filter_cnt++;
304 if ((pf->flags & I40E_FLAG_FD_ATR_ENABLED) &&
305 I40E_DEBUG_FD & pf->hw.debug_mask)
306 dev_info(&pf->pdev->dev, "Forcing ATR off, sideband rules for TCP/IPv4 flow being applied\n");
307 set_bit(__I40E_FD_ATR_AUTO_DISABLED, pf->state);
309 pf->fd_tcp4_filter_cnt--;
315 #define I40E_SCTPIP_DUMMY_PACKET_LEN 46
317 * i40e_add_del_fdir_sctpv4 - Add/Remove SCTPv4 Flow Director filters for
318 * a specific flow spec
319 * @vsi: pointer to the targeted VSI
320 * @fd_data: the flow director data required for the FDir descriptor
321 * @add: true adds a filter, false removes it
323 * Returns 0 if the filters were successfully added or removed
325 static int i40e_add_del_fdir_sctpv4(struct i40e_vsi *vsi,
326 struct i40e_fdir_filter *fd_data,
329 struct i40e_pf *pf = vsi->back;
330 struct sctphdr *sctp;
335 static char packet[] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0x08, 0,
336 0x45, 0, 0, 0x20, 0, 0, 0x40, 0, 0x40, 0x84, 0, 0, 0, 0, 0, 0,
337 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
339 raw_packet = kzalloc(I40E_FDIR_MAX_RAW_PACKET_SIZE, GFP_KERNEL);
342 memcpy(raw_packet, packet, I40E_SCTPIP_DUMMY_PACKET_LEN);
344 ip = (struct iphdr *)(raw_packet + IP_HEADER_OFFSET);
345 sctp = (struct sctphdr *)(raw_packet + IP_HEADER_OFFSET
346 + sizeof(struct iphdr));
348 ip->daddr = fd_data->dst_ip;
349 sctp->dest = fd_data->dst_port;
350 ip->saddr = fd_data->src_ip;
351 sctp->source = fd_data->src_port;
353 if (fd_data->flex_filter) {
354 u8 *payload = raw_packet + I40E_SCTPIP_DUMMY_PACKET_LEN;
355 __be16 pattern = fd_data->flex_word;
356 u16 off = fd_data->flex_offset;
358 *((__force __be16 *)(payload + off)) = pattern;
361 fd_data->pctype = I40E_FILTER_PCTYPE_NONF_IPV4_SCTP;
362 ret = i40e_program_fdir_filter(fd_data, raw_packet, pf, add);
364 dev_info(&pf->pdev->dev,
365 "PCTYPE:%d, Filter command send failed for fd_id:%d (ret = %d)\n",
366 fd_data->pctype, fd_data->fd_id, ret);
367 /* Free the packet buffer since it wasn't added to the ring */
370 } else if (I40E_DEBUG_FD & pf->hw.debug_mask) {
372 dev_info(&pf->pdev->dev,
373 "Filter OK for PCTYPE %d loc = %d\n",
374 fd_data->pctype, fd_data->fd_id);
376 dev_info(&pf->pdev->dev,
377 "Filter deleted for PCTYPE %d loc = %d\n",
378 fd_data->pctype, fd_data->fd_id);
382 pf->fd_sctp4_filter_cnt++;
384 pf->fd_sctp4_filter_cnt--;
389 #define I40E_IP_DUMMY_PACKET_LEN 34
391 * i40e_add_del_fdir_ipv4 - Add/Remove IPv4 Flow Director filters for
392 * a specific flow spec
393 * @vsi: pointer to the targeted VSI
394 * @fd_data: the flow director data required for the FDir descriptor
395 * @add: true adds a filter, false removes it
397 * Returns 0 if the filters were successfully added or removed
399 static int i40e_add_del_fdir_ipv4(struct i40e_vsi *vsi,
400 struct i40e_fdir_filter *fd_data,
403 struct i40e_pf *pf = vsi->back;
408 static char packet[] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0x08, 0,
409 0x45, 0, 0, 0x14, 0, 0, 0x40, 0, 0x40, 0x10, 0, 0, 0, 0, 0, 0,
412 for (i = I40E_FILTER_PCTYPE_NONF_IPV4_OTHER;
413 i <= I40E_FILTER_PCTYPE_FRAG_IPV4; i++) {
414 raw_packet = kzalloc(I40E_FDIR_MAX_RAW_PACKET_SIZE, GFP_KERNEL);
417 memcpy(raw_packet, packet, I40E_IP_DUMMY_PACKET_LEN);
418 ip = (struct iphdr *)(raw_packet + IP_HEADER_OFFSET);
420 ip->saddr = fd_data->src_ip;
421 ip->daddr = fd_data->dst_ip;
424 if (fd_data->flex_filter) {
425 u8 *payload = raw_packet + I40E_IP_DUMMY_PACKET_LEN;
426 __be16 pattern = fd_data->flex_word;
427 u16 off = fd_data->flex_offset;
429 *((__force __be16 *)(payload + off)) = pattern;
433 ret = i40e_program_fdir_filter(fd_data, raw_packet, pf, add);
435 dev_info(&pf->pdev->dev,
436 "PCTYPE:%d, Filter command send failed for fd_id:%d (ret = %d)\n",
437 fd_data->pctype, fd_data->fd_id, ret);
438 /* The packet buffer wasn't added to the ring so we
439 * need to free it now.
443 } else if (I40E_DEBUG_FD & pf->hw.debug_mask) {
445 dev_info(&pf->pdev->dev,
446 "Filter OK for PCTYPE %d loc = %d\n",
447 fd_data->pctype, fd_data->fd_id);
449 dev_info(&pf->pdev->dev,
450 "Filter deleted for PCTYPE %d loc = %d\n",
451 fd_data->pctype, fd_data->fd_id);
456 pf->fd_ip4_filter_cnt++;
458 pf->fd_ip4_filter_cnt--;
464 * i40e_add_del_fdir - Build raw packets to add/del fdir filter
465 * @vsi: pointer to the targeted VSI
466 * @input: filter to add or delete
467 * @add: true adds a filter, false removes it
470 int i40e_add_del_fdir(struct i40e_vsi *vsi,
471 struct i40e_fdir_filter *input, bool add)
473 struct i40e_pf *pf = vsi->back;
476 switch (input->flow_type & ~FLOW_EXT) {
478 ret = i40e_add_del_fdir_tcpv4(vsi, input, add);
481 ret = i40e_add_del_fdir_udpv4(vsi, input, add);
484 ret = i40e_add_del_fdir_sctpv4(vsi, input, add);
487 switch (input->ip4_proto) {
489 ret = i40e_add_del_fdir_tcpv4(vsi, input, add);
492 ret = i40e_add_del_fdir_udpv4(vsi, input, add);
495 ret = i40e_add_del_fdir_sctpv4(vsi, input, add);
498 ret = i40e_add_del_fdir_ipv4(vsi, input, add);
501 /* We cannot support masking based on protocol */
502 dev_info(&pf->pdev->dev, "Unsupported IPv4 protocol 0x%02x\n",
508 dev_info(&pf->pdev->dev, "Unsupported flow type 0x%02x\n",
513 /* The buffer allocated here will be normally be freed by
514 * i40e_clean_fdir_tx_irq() as it reclaims resources after transmit
515 * completion. In the event of an error adding the buffer to the FDIR
516 * ring, it will immediately be freed. It may also be freed by
517 * i40e_clean_tx_ring() when closing the VSI.
523 * i40e_fd_handle_status - check the Programming Status for FD
524 * @rx_ring: the Rx ring for this descriptor
525 * @rx_desc: the Rx descriptor for programming Status, not a packet descriptor.
526 * @prog_id: the id originally used for programming
528 * This is used to verify if the FD programming or invalidation
529 * requested by SW to the HW is successful or not and take actions accordingly.
531 void i40e_fd_handle_status(struct i40e_ring *rx_ring,
532 union i40e_rx_desc *rx_desc, u8 prog_id)
534 struct i40e_pf *pf = rx_ring->vsi->back;
535 struct pci_dev *pdev = pf->pdev;
536 u32 fcnt_prog, fcnt_avail;
540 qw = le64_to_cpu(rx_desc->wb.qword1.status_error_len);
541 error = (qw & I40E_RX_PROG_STATUS_DESC_QW1_ERROR_MASK) >>
542 I40E_RX_PROG_STATUS_DESC_QW1_ERROR_SHIFT;
544 if (error == BIT(I40E_RX_PROG_STATUS_DESC_FD_TBL_FULL_SHIFT)) {
545 pf->fd_inv = le32_to_cpu(rx_desc->wb.qword0.hi_dword.fd_id);
546 if ((rx_desc->wb.qword0.hi_dword.fd_id != 0) ||
547 (I40E_DEBUG_FD & pf->hw.debug_mask))
548 dev_warn(&pdev->dev, "ntuple filter loc = %d, could not be added\n",
551 /* Check if the programming error is for ATR.
552 * If so, auto disable ATR and set a state for
553 * flush in progress. Next time we come here if flush is in
554 * progress do nothing, once flush is complete the state will
557 if (test_bit(__I40E_FD_FLUSH_REQUESTED, pf->state))
561 /* store the current atr filter count */
562 pf->fd_atr_cnt = i40e_get_current_atr_cnt(pf);
564 if ((rx_desc->wb.qword0.hi_dword.fd_id == 0) &&
565 test_bit(__I40E_FD_SB_AUTO_DISABLED, pf->state)) {
566 /* These set_bit() calls aren't atomic with the
567 * test_bit() here, but worse case we potentially
568 * disable ATR and queue a flush right after SB
569 * support is re-enabled. That shouldn't cause an
572 set_bit(__I40E_FD_ATR_AUTO_DISABLED, pf->state);
573 set_bit(__I40E_FD_FLUSH_REQUESTED, pf->state);
576 /* filter programming failed most likely due to table full */
577 fcnt_prog = i40e_get_global_fd_count(pf);
578 fcnt_avail = pf->fdir_pf_filter_count;
579 /* If ATR is running fcnt_prog can quickly change,
580 * if we are very close to full, it makes sense to disable
581 * FD ATR/SB and then re-enable it when there is room.
583 if (fcnt_prog >= (fcnt_avail - I40E_FDIR_BUFFER_FULL_MARGIN)) {
584 if ((pf->flags & I40E_FLAG_FD_SB_ENABLED) &&
585 !test_and_set_bit(__I40E_FD_SB_AUTO_DISABLED,
587 if (I40E_DEBUG_FD & pf->hw.debug_mask)
588 dev_warn(&pdev->dev, "FD filter space full, new ntuple rules will not be added\n");
590 } else if (error == BIT(I40E_RX_PROG_STATUS_DESC_NO_FD_ENTRY_SHIFT)) {
591 if (I40E_DEBUG_FD & pf->hw.debug_mask)
592 dev_info(&pdev->dev, "ntuple filter fd_id = %d, could not be removed\n",
593 rx_desc->wb.qword0.hi_dword.fd_id);
598 * i40e_unmap_and_free_tx_resource - Release a Tx buffer
599 * @ring: the ring that owns the buffer
600 * @tx_buffer: the buffer to free
602 static void i40e_unmap_and_free_tx_resource(struct i40e_ring *ring,
603 struct i40e_tx_buffer *tx_buffer)
605 if (tx_buffer->skb) {
606 if (tx_buffer->tx_flags & I40E_TX_FLAGS_FD_SB)
607 kfree(tx_buffer->raw_buf);
608 else if (ring_is_xdp(ring))
609 xdp_return_frame(tx_buffer->xdpf);
611 dev_kfree_skb_any(tx_buffer->skb);
612 if (dma_unmap_len(tx_buffer, len))
613 dma_unmap_single(ring->dev,
614 dma_unmap_addr(tx_buffer, dma),
615 dma_unmap_len(tx_buffer, len),
617 } else if (dma_unmap_len(tx_buffer, len)) {
618 dma_unmap_page(ring->dev,
619 dma_unmap_addr(tx_buffer, dma),
620 dma_unmap_len(tx_buffer, len),
624 tx_buffer->next_to_watch = NULL;
625 tx_buffer->skb = NULL;
626 dma_unmap_len_set(tx_buffer, len, 0);
627 /* tx_buffer must be completely set up in the transmit path */
631 * i40e_clean_tx_ring - Free any empty Tx buffers
632 * @tx_ring: ring to be cleaned
634 void i40e_clean_tx_ring(struct i40e_ring *tx_ring)
636 unsigned long bi_size;
639 if (ring_is_xdp(tx_ring) && tx_ring->xsk_umem) {
640 i40e_xsk_clean_tx_ring(tx_ring);
642 /* ring already cleared, nothing to do */
646 /* Free all the Tx ring sk_buffs */
647 for (i = 0; i < tx_ring->count; i++)
648 i40e_unmap_and_free_tx_resource(tx_ring,
652 bi_size = sizeof(struct i40e_tx_buffer) * tx_ring->count;
653 memset(tx_ring->tx_bi, 0, bi_size);
655 /* Zero out the descriptor ring */
656 memset(tx_ring->desc, 0, tx_ring->size);
658 tx_ring->next_to_use = 0;
659 tx_ring->next_to_clean = 0;
661 if (!tx_ring->netdev)
664 /* cleanup Tx queue statistics */
665 netdev_tx_reset_queue(txring_txq(tx_ring));
669 * i40e_free_tx_resources - Free Tx resources per queue
670 * @tx_ring: Tx descriptor ring for a specific queue
672 * Free all transmit software resources
674 void i40e_free_tx_resources(struct i40e_ring *tx_ring)
676 i40e_clean_tx_ring(tx_ring);
677 kfree(tx_ring->tx_bi);
678 tx_ring->tx_bi = NULL;
681 dma_free_coherent(tx_ring->dev, tx_ring->size,
682 tx_ring->desc, tx_ring->dma);
683 tx_ring->desc = NULL;
688 * i40e_get_tx_pending - how many tx descriptors not processed
689 * @ring: the ring of descriptors
690 * @in_sw: use SW variables
692 * Since there is no access to the ring head register
693 * in XL710, we need to use our local copies
695 u32 i40e_get_tx_pending(struct i40e_ring *ring, bool in_sw)
700 head = i40e_get_head(ring);
701 tail = readl(ring->tail);
703 head = ring->next_to_clean;
704 tail = ring->next_to_use;
708 return (head < tail) ?
709 tail - head : (tail + ring->count - head);
715 * i40e_detect_recover_hung - Function to detect and recover hung_queues
716 * @vsi: pointer to vsi struct with tx queues
718 * VSI has netdev and netdev has TX queues. This function is to check each of
719 * those TX queues if they are hung, trigger recovery by issuing SW interrupt.
721 void i40e_detect_recover_hung(struct i40e_vsi *vsi)
723 struct i40e_ring *tx_ring = NULL;
724 struct net_device *netdev;
731 if (test_bit(__I40E_VSI_DOWN, vsi->state))
734 netdev = vsi->netdev;
738 if (!netif_carrier_ok(netdev))
741 for (i = 0; i < vsi->num_queue_pairs; i++) {
742 tx_ring = vsi->tx_rings[i];
743 if (tx_ring && tx_ring->desc) {
744 /* If packet counter has not changed the queue is
745 * likely stalled, so force an interrupt for this
748 * prev_pkt_ctr would be negative if there was no
751 packets = tx_ring->stats.packets & INT_MAX;
752 if (tx_ring->tx_stats.prev_pkt_ctr == packets) {
753 i40e_force_wb(vsi, tx_ring->q_vector);
757 /* Memory barrier between read of packet count and call
758 * to i40e_get_tx_pending()
761 tx_ring->tx_stats.prev_pkt_ctr =
762 i40e_get_tx_pending(tx_ring, true) ? packets : -1;
768 * i40e_clean_tx_irq - Reclaim resources after transmit completes
769 * @vsi: the VSI we care about
770 * @tx_ring: Tx ring to clean
771 * @napi_budget: Used to determine if we are in netpoll
773 * Returns true if there's any budget left (e.g. the clean is finished)
775 static bool i40e_clean_tx_irq(struct i40e_vsi *vsi,
776 struct i40e_ring *tx_ring, int napi_budget)
778 u16 i = tx_ring->next_to_clean;
779 struct i40e_tx_buffer *tx_buf;
780 struct i40e_tx_desc *tx_head;
781 struct i40e_tx_desc *tx_desc;
782 unsigned int total_bytes = 0, total_packets = 0;
783 unsigned int budget = vsi->work_limit;
785 tx_buf = &tx_ring->tx_bi[i];
786 tx_desc = I40E_TX_DESC(tx_ring, i);
789 tx_head = I40E_TX_DESC(tx_ring, i40e_get_head(tx_ring));
792 struct i40e_tx_desc *eop_desc = tx_buf->next_to_watch;
794 /* if next_to_watch is not set then there is no work pending */
798 /* prevent any other reads prior to eop_desc */
801 i40e_trace(clean_tx_irq, tx_ring, tx_desc, tx_buf);
802 /* we have caught up to head, no work left to do */
803 if (tx_head == tx_desc)
806 /* clear next_to_watch to prevent false hangs */
807 tx_buf->next_to_watch = NULL;
809 /* update the statistics for this packet */
810 total_bytes += tx_buf->bytecount;
811 total_packets += tx_buf->gso_segs;
813 /* free the skb/XDP data */
814 if (ring_is_xdp(tx_ring))
815 xdp_return_frame(tx_buf->xdpf);
817 napi_consume_skb(tx_buf->skb, napi_budget);
819 /* unmap skb header data */
820 dma_unmap_single(tx_ring->dev,
821 dma_unmap_addr(tx_buf, dma),
822 dma_unmap_len(tx_buf, len),
825 /* clear tx_buffer data */
827 dma_unmap_len_set(tx_buf, len, 0);
829 /* unmap remaining buffers */
830 while (tx_desc != eop_desc) {
831 i40e_trace(clean_tx_irq_unmap,
832 tx_ring, tx_desc, tx_buf);
839 tx_buf = tx_ring->tx_bi;
840 tx_desc = I40E_TX_DESC(tx_ring, 0);
843 /* unmap any remaining paged data */
844 if (dma_unmap_len(tx_buf, len)) {
845 dma_unmap_page(tx_ring->dev,
846 dma_unmap_addr(tx_buf, dma),
847 dma_unmap_len(tx_buf, len),
849 dma_unmap_len_set(tx_buf, len, 0);
853 /* move us one more past the eop_desc for start of next pkt */
859 tx_buf = tx_ring->tx_bi;
860 tx_desc = I40E_TX_DESC(tx_ring, 0);
865 /* update budget accounting */
867 } while (likely(budget));
870 tx_ring->next_to_clean = i;
871 i40e_update_tx_stats(tx_ring, total_packets, total_bytes);
872 i40e_arm_wb(tx_ring, vsi, budget);
874 if (ring_is_xdp(tx_ring))
877 /* notify netdev of completed buffers */
878 netdev_tx_completed_queue(txring_txq(tx_ring),
879 total_packets, total_bytes);
881 #define TX_WAKE_THRESHOLD ((s16)(DESC_NEEDED * 2))
882 if (unlikely(total_packets && netif_carrier_ok(tx_ring->netdev) &&
883 (I40E_DESC_UNUSED(tx_ring) >= TX_WAKE_THRESHOLD))) {
884 /* Make sure that anybody stopping the queue after this
885 * sees the new next_to_clean.
888 if (__netif_subqueue_stopped(tx_ring->netdev,
889 tx_ring->queue_index) &&
890 !test_bit(__I40E_VSI_DOWN, vsi->state)) {
891 netif_wake_subqueue(tx_ring->netdev,
892 tx_ring->queue_index);
893 ++tx_ring->tx_stats.restart_queue;
901 * i40e_enable_wb_on_itr - Arm hardware to do a wb, interrupts are not enabled
902 * @vsi: the VSI we care about
903 * @q_vector: the vector on which to enable writeback
906 static void i40e_enable_wb_on_itr(struct i40e_vsi *vsi,
907 struct i40e_q_vector *q_vector)
909 u16 flags = q_vector->tx.ring[0].flags;
912 if (!(flags & I40E_TXR_FLAGS_WB_ON_ITR))
915 if (q_vector->arm_wb_state)
918 if (vsi->back->flags & I40E_FLAG_MSIX_ENABLED) {
919 val = I40E_PFINT_DYN_CTLN_WB_ON_ITR_MASK |
920 I40E_PFINT_DYN_CTLN_ITR_INDX_MASK; /* set noitr */
923 I40E_PFINT_DYN_CTLN(q_vector->reg_idx),
926 val = I40E_PFINT_DYN_CTL0_WB_ON_ITR_MASK |
927 I40E_PFINT_DYN_CTL0_ITR_INDX_MASK; /* set noitr */
929 wr32(&vsi->back->hw, I40E_PFINT_DYN_CTL0, val);
931 q_vector->arm_wb_state = true;
935 * i40e_force_wb - Issue SW Interrupt so HW does a wb
936 * @vsi: the VSI we care about
937 * @q_vector: the vector on which to force writeback
940 void i40e_force_wb(struct i40e_vsi *vsi, struct i40e_q_vector *q_vector)
942 if (vsi->back->flags & I40E_FLAG_MSIX_ENABLED) {
943 u32 val = I40E_PFINT_DYN_CTLN_INTENA_MASK |
944 I40E_PFINT_DYN_CTLN_ITR_INDX_MASK | /* set noitr */
945 I40E_PFINT_DYN_CTLN_SWINT_TRIG_MASK |
946 I40E_PFINT_DYN_CTLN_SW_ITR_INDX_ENA_MASK;
947 /* allow 00 to be written to the index */
950 I40E_PFINT_DYN_CTLN(q_vector->reg_idx), val);
952 u32 val = I40E_PFINT_DYN_CTL0_INTENA_MASK |
953 I40E_PFINT_DYN_CTL0_ITR_INDX_MASK | /* set noitr */
954 I40E_PFINT_DYN_CTL0_SWINT_TRIG_MASK |
955 I40E_PFINT_DYN_CTL0_SW_ITR_INDX_ENA_MASK;
956 /* allow 00 to be written to the index */
958 wr32(&vsi->back->hw, I40E_PFINT_DYN_CTL0, val);
962 static inline bool i40e_container_is_rx(struct i40e_q_vector *q_vector,
963 struct i40e_ring_container *rc)
965 return &q_vector->rx == rc;
968 static inline unsigned int i40e_itr_divisor(struct i40e_q_vector *q_vector)
970 unsigned int divisor;
972 switch (q_vector->vsi->back->hw.phy.link_info.link_speed) {
973 case I40E_LINK_SPEED_40GB:
974 divisor = I40E_ITR_ADAPTIVE_MIN_INC * 1024;
976 case I40E_LINK_SPEED_25GB:
977 case I40E_LINK_SPEED_20GB:
978 divisor = I40E_ITR_ADAPTIVE_MIN_INC * 512;
981 case I40E_LINK_SPEED_10GB:
982 divisor = I40E_ITR_ADAPTIVE_MIN_INC * 256;
984 case I40E_LINK_SPEED_1GB:
985 case I40E_LINK_SPEED_100MB:
986 divisor = I40E_ITR_ADAPTIVE_MIN_INC * 32;
994 * i40e_update_itr - update the dynamic ITR value based on statistics
995 * @q_vector: structure containing interrupt and ring information
996 * @rc: structure containing ring performance data
998 * Stores a new ITR value based on packets and byte
999 * counts during the last interrupt. The advantage of per interrupt
1000 * computation is faster updates and more accurate ITR for the current
1001 * traffic pattern. Constants in this function were computed
1002 * based on theoretical maximum wire speed and thresholds were set based
1003 * on testing data as well as attempting to minimize response time
1004 * while increasing bulk throughput.
1006 static void i40e_update_itr(struct i40e_q_vector *q_vector,
1007 struct i40e_ring_container *rc)
1009 unsigned int avg_wire_size, packets, bytes, itr;
1010 unsigned long next_update = jiffies;
1012 /* If we don't have any rings just leave ourselves set for maximum
1013 * possible latency so we take ourselves out of the equation.
1015 if (!rc->ring || !ITR_IS_DYNAMIC(rc->ring->itr_setting))
1018 /* For Rx we want to push the delay up and default to low latency.
1019 * for Tx we want to pull the delay down and default to high latency.
1021 itr = i40e_container_is_rx(q_vector, rc) ?
1022 I40E_ITR_ADAPTIVE_MIN_USECS | I40E_ITR_ADAPTIVE_LATENCY :
1023 I40E_ITR_ADAPTIVE_MAX_USECS | I40E_ITR_ADAPTIVE_LATENCY;
1025 /* If we didn't update within up to 1 - 2 jiffies we can assume
1026 * that either packets are coming in so slow there hasn't been
1027 * any work, or that there is so much work that NAPI is dealing
1028 * with interrupt moderation and we don't need to do anything.
1030 if (time_after(next_update, rc->next_update))
1033 /* If itr_countdown is set it means we programmed an ITR within
1034 * the last 4 interrupt cycles. This has a side effect of us
1035 * potentially firing an early interrupt. In order to work around
1036 * this we need to throw out any data received for a few
1037 * interrupts following the update.
1039 if (q_vector->itr_countdown) {
1040 itr = rc->target_itr;
1044 packets = rc->total_packets;
1045 bytes = rc->total_bytes;
1047 if (i40e_container_is_rx(q_vector, rc)) {
1048 /* If Rx there are 1 to 4 packets and bytes are less than
1049 * 9000 assume insufficient data to use bulk rate limiting
1050 * approach unless Tx is already in bulk rate limiting. We
1051 * are likely latency driven.
1053 if (packets && packets < 4 && bytes < 9000 &&
1054 (q_vector->tx.target_itr & I40E_ITR_ADAPTIVE_LATENCY)) {
1055 itr = I40E_ITR_ADAPTIVE_LATENCY;
1056 goto adjust_by_size;
1058 } else if (packets < 4) {
1059 /* If we have Tx and Rx ITR maxed and Tx ITR is running in
1060 * bulk mode and we are receiving 4 or fewer packets just
1061 * reset the ITR_ADAPTIVE_LATENCY bit for latency mode so
1062 * that the Rx can relax.
1064 if (rc->target_itr == I40E_ITR_ADAPTIVE_MAX_USECS &&
1065 (q_vector->rx.target_itr & I40E_ITR_MASK) ==
1066 I40E_ITR_ADAPTIVE_MAX_USECS)
1068 } else if (packets > 32) {
1069 /* If we have processed over 32 packets in a single interrupt
1070 * for Tx assume we need to switch over to "bulk" mode.
1072 rc->target_itr &= ~I40E_ITR_ADAPTIVE_LATENCY;
1075 /* We have no packets to actually measure against. This means
1076 * either one of the other queues on this vector is active or
1077 * we are a Tx queue doing TSO with too high of an interrupt rate.
1079 * Between 4 and 56 we can assume that our current interrupt delay
1080 * is only slightly too low. As such we should increase it by a small
1084 itr = rc->target_itr + I40E_ITR_ADAPTIVE_MIN_INC;
1085 if ((itr & I40E_ITR_MASK) > I40E_ITR_ADAPTIVE_MAX_USECS) {
1086 itr &= I40E_ITR_ADAPTIVE_LATENCY;
1087 itr += I40E_ITR_ADAPTIVE_MAX_USECS;
1092 if (packets <= 256) {
1093 itr = min(q_vector->tx.current_itr, q_vector->rx.current_itr);
1094 itr &= I40E_ITR_MASK;
1096 /* Between 56 and 112 is our "goldilocks" zone where we are
1097 * working out "just right". Just report that our current
1098 * ITR is good for us.
1103 /* If packet count is 128 or greater we are likely looking
1104 * at a slight overrun of the delay we want. Try halving
1105 * our delay to see if that will cut the number of packets
1106 * in half per interrupt.
1109 itr &= I40E_ITR_MASK;
1110 if (itr < I40E_ITR_ADAPTIVE_MIN_USECS)
1111 itr = I40E_ITR_ADAPTIVE_MIN_USECS;
1116 /* The paths below assume we are dealing with a bulk ITR since
1117 * number of packets is greater than 256. We are just going to have
1118 * to compute a value and try to bring the count under control,
1119 * though for smaller packet sizes there isn't much we can do as
1120 * NAPI polling will likely be kicking in sooner rather than later.
1122 itr = I40E_ITR_ADAPTIVE_BULK;
1125 /* If packet counts are 256 or greater we can assume we have a gross
1126 * overestimation of what the rate should be. Instead of trying to fine
1127 * tune it just use the formula below to try and dial in an exact value
1128 * give the current packet size of the frame.
1130 avg_wire_size = bytes / packets;
1132 /* The following is a crude approximation of:
1133 * wmem_default / (size + overhead) = desired_pkts_per_int
1134 * rate / bits_per_byte / (size + ethernet overhead) = pkt_rate
1135 * (desired_pkt_rate / pkt_rate) * usecs_per_sec = ITR value
1137 * Assuming wmem_default is 212992 and overhead is 640 bytes per
1138 * packet, (256 skb, 64 headroom, 320 shared info), we can reduce the
1141 * (170 * (size + 24)) / (size + 640) = ITR
1143 * We first do some math on the packet size and then finally bitshift
1144 * by 8 after rounding up. We also have to account for PCIe link speed
1145 * difference as ITR scales based on this.
1147 if (avg_wire_size <= 60) {
1148 /* Start at 250k ints/sec */
1149 avg_wire_size = 4096;
1150 } else if (avg_wire_size <= 380) {
1151 /* 250K ints/sec to 60K ints/sec */
1152 avg_wire_size *= 40;
1153 avg_wire_size += 1696;
1154 } else if (avg_wire_size <= 1084) {
1155 /* 60K ints/sec to 36K ints/sec */
1156 avg_wire_size *= 15;
1157 avg_wire_size += 11452;
1158 } else if (avg_wire_size <= 1980) {
1159 /* 36K ints/sec to 30K ints/sec */
1161 avg_wire_size += 22420;
1163 /* plateau at a limit of 30K ints/sec */
1164 avg_wire_size = 32256;
1167 /* If we are in low latency mode halve our delay which doubles the
1168 * rate to somewhere between 100K to 16K ints/sec
1170 if (itr & I40E_ITR_ADAPTIVE_LATENCY)
1173 /* Resultant value is 256 times larger than it needs to be. This
1174 * gives us room to adjust the value as needed to either increase
1175 * or decrease the value based on link speeds of 10G, 2.5G, 1G, etc.
1177 * Use addition as we have already recorded the new latency flag
1178 * for the ITR value.
1180 itr += DIV_ROUND_UP(avg_wire_size, i40e_itr_divisor(q_vector)) *
1181 I40E_ITR_ADAPTIVE_MIN_INC;
1183 if ((itr & I40E_ITR_MASK) > I40E_ITR_ADAPTIVE_MAX_USECS) {
1184 itr &= I40E_ITR_ADAPTIVE_LATENCY;
1185 itr += I40E_ITR_ADAPTIVE_MAX_USECS;
1189 /* write back value */
1190 rc->target_itr = itr;
1192 /* next update should occur within next jiffy */
1193 rc->next_update = next_update + 1;
1195 rc->total_bytes = 0;
1196 rc->total_packets = 0;
1200 * i40e_reuse_rx_page - page flip buffer and store it back on the ring
1201 * @rx_ring: rx descriptor ring to store buffers on
1202 * @old_buff: donor buffer to have page reused
1204 * Synchronizes page for reuse by the adapter
1206 static void i40e_reuse_rx_page(struct i40e_ring *rx_ring,
1207 struct i40e_rx_buffer *old_buff)
1209 struct i40e_rx_buffer *new_buff;
1210 u16 nta = rx_ring->next_to_alloc;
1212 new_buff = &rx_ring->rx_bi[nta];
1214 /* update, and store next to alloc */
1216 rx_ring->next_to_alloc = (nta < rx_ring->count) ? nta : 0;
1218 /* transfer page from old buffer to new buffer */
1219 new_buff->dma = old_buff->dma;
1220 new_buff->page = old_buff->page;
1221 new_buff->page_offset = old_buff->page_offset;
1222 new_buff->pagecnt_bias = old_buff->pagecnt_bias;
1224 rx_ring->rx_stats.page_reuse_count++;
1226 /* clear contents of buffer_info */
1227 old_buff->page = NULL;
1231 * i40e_rx_is_programming_status - check for programming status descriptor
1232 * @qw: qword representing status_error_len in CPU ordering
1234 * The value of in the descriptor length field indicate if this
1235 * is a programming status descriptor for flow director or FCoE
1236 * by the value of I40E_RX_PROG_STATUS_DESC_LENGTH, otherwise
1237 * it is a packet descriptor.
1239 static inline bool i40e_rx_is_programming_status(u64 qw)
1241 /* The Rx filter programming status and SPH bit occupy the same
1242 * spot in the descriptor. Since we don't support packet split we
1243 * can just reuse the bit as an indication that this is a
1244 * programming status descriptor.
1246 return qw & I40E_RXD_QW1_LENGTH_SPH_MASK;
1250 * i40e_clean_programming_status - try clean the programming status descriptor
1251 * @rx_ring: the rx ring that has this descriptor
1252 * @rx_desc: the rx descriptor written back by HW
1253 * @qw: qword representing status_error_len in CPU ordering
1255 * Flow director should handle FD_FILTER_STATUS to check its filter programming
1256 * status being successful or not and take actions accordingly. FCoE should
1257 * handle its context/filter programming/invalidation status and take actions.
1259 * Returns an i40e_rx_buffer to reuse if the cleanup occurred, otherwise NULL.
1261 struct i40e_rx_buffer *i40e_clean_programming_status(
1262 struct i40e_ring *rx_ring,
1263 union i40e_rx_desc *rx_desc,
1266 struct i40e_rx_buffer *rx_buffer;
1270 if (!i40e_rx_is_programming_status(qw))
1273 ntc = rx_ring->next_to_clean;
1275 /* fetch, update, and store next to clean */
1276 rx_buffer = &rx_ring->rx_bi[ntc++];
1277 ntc = (ntc < rx_ring->count) ? ntc : 0;
1278 rx_ring->next_to_clean = ntc;
1280 prefetch(I40E_RX_DESC(rx_ring, ntc));
1282 id = (qw & I40E_RX_PROG_STATUS_DESC_QW1_PROGID_MASK) >>
1283 I40E_RX_PROG_STATUS_DESC_QW1_PROGID_SHIFT;
1285 if (id == I40E_RX_PROG_STATUS_DESC_FD_FILTER_STATUS)
1286 i40e_fd_handle_status(rx_ring, rx_desc, id);
1292 * i40e_setup_tx_descriptors - Allocate the Tx descriptors
1293 * @tx_ring: the tx ring to set up
1295 * Return 0 on success, negative on error
1297 int i40e_setup_tx_descriptors(struct i40e_ring *tx_ring)
1299 struct device *dev = tx_ring->dev;
1305 /* warn if we are about to overwrite the pointer */
1306 WARN_ON(tx_ring->tx_bi);
1307 bi_size = sizeof(struct i40e_tx_buffer) * tx_ring->count;
1308 tx_ring->tx_bi = kzalloc(bi_size, GFP_KERNEL);
1309 if (!tx_ring->tx_bi)
1312 u64_stats_init(&tx_ring->syncp);
1314 /* round up to nearest 4K */
1315 tx_ring->size = tx_ring->count * sizeof(struct i40e_tx_desc);
1316 /* add u32 for head writeback, align after this takes care of
1317 * guaranteeing this is at least one cache line in size
1319 tx_ring->size += sizeof(u32);
1320 tx_ring->size = ALIGN(tx_ring->size, 4096);
1321 tx_ring->desc = dma_alloc_coherent(dev, tx_ring->size,
1322 &tx_ring->dma, GFP_KERNEL);
1323 if (!tx_ring->desc) {
1324 dev_info(dev, "Unable to allocate memory for the Tx descriptor ring, size=%d\n",
1329 tx_ring->next_to_use = 0;
1330 tx_ring->next_to_clean = 0;
1331 tx_ring->tx_stats.prev_pkt_ctr = -1;
1335 kfree(tx_ring->tx_bi);
1336 tx_ring->tx_bi = NULL;
1341 * i40e_clean_rx_ring - Free Rx buffers
1342 * @rx_ring: ring to be cleaned
1344 void i40e_clean_rx_ring(struct i40e_ring *rx_ring)
1346 unsigned long bi_size;
1349 /* ring already cleared, nothing to do */
1350 if (!rx_ring->rx_bi)
1354 dev_kfree_skb(rx_ring->skb);
1355 rx_ring->skb = NULL;
1358 if (rx_ring->xsk_umem) {
1359 i40e_xsk_clean_rx_ring(rx_ring);
1363 /* Free all the Rx ring sk_buffs */
1364 for (i = 0; i < rx_ring->count; i++) {
1365 struct i40e_rx_buffer *rx_bi = &rx_ring->rx_bi[i];
1370 /* Invalidate cache lines that may have been written to by
1371 * device so that we avoid corrupting memory.
1373 dma_sync_single_range_for_cpu(rx_ring->dev,
1376 rx_ring->rx_buf_len,
1379 /* free resources associated with mapping */
1380 dma_unmap_page_attrs(rx_ring->dev, rx_bi->dma,
1381 i40e_rx_pg_size(rx_ring),
1385 __page_frag_cache_drain(rx_bi->page, rx_bi->pagecnt_bias);
1388 rx_bi->page_offset = 0;
1392 bi_size = sizeof(struct i40e_rx_buffer) * rx_ring->count;
1393 memset(rx_ring->rx_bi, 0, bi_size);
1395 /* Zero out the descriptor ring */
1396 memset(rx_ring->desc, 0, rx_ring->size);
1398 rx_ring->next_to_alloc = 0;
1399 rx_ring->next_to_clean = 0;
1400 rx_ring->next_to_use = 0;
1404 * i40e_free_rx_resources - Free Rx resources
1405 * @rx_ring: ring to clean the resources from
1407 * Free all receive software resources
1409 void i40e_free_rx_resources(struct i40e_ring *rx_ring)
1411 i40e_clean_rx_ring(rx_ring);
1412 if (rx_ring->vsi->type == I40E_VSI_MAIN)
1413 xdp_rxq_info_unreg(&rx_ring->xdp_rxq);
1414 rx_ring->xdp_prog = NULL;
1415 kfree(rx_ring->rx_bi);
1416 rx_ring->rx_bi = NULL;
1418 if (rx_ring->desc) {
1419 dma_free_coherent(rx_ring->dev, rx_ring->size,
1420 rx_ring->desc, rx_ring->dma);
1421 rx_ring->desc = NULL;
1426 * i40e_setup_rx_descriptors - Allocate Rx descriptors
1427 * @rx_ring: Rx descriptor ring (for a specific queue) to setup
1429 * Returns 0 on success, negative on failure
1431 int i40e_setup_rx_descriptors(struct i40e_ring *rx_ring)
1433 struct device *dev = rx_ring->dev;
1437 /* warn if we are about to overwrite the pointer */
1438 WARN_ON(rx_ring->rx_bi);
1439 bi_size = sizeof(struct i40e_rx_buffer) * rx_ring->count;
1440 rx_ring->rx_bi = kzalloc(bi_size, GFP_KERNEL);
1441 if (!rx_ring->rx_bi)
1444 u64_stats_init(&rx_ring->syncp);
1446 /* Round up to nearest 4K */
1447 rx_ring->size = rx_ring->count * sizeof(union i40e_32byte_rx_desc);
1448 rx_ring->size = ALIGN(rx_ring->size, 4096);
1449 rx_ring->desc = dma_alloc_coherent(dev, rx_ring->size,
1450 &rx_ring->dma, GFP_KERNEL);
1452 if (!rx_ring->desc) {
1453 dev_info(dev, "Unable to allocate memory for the Rx descriptor ring, size=%d\n",
1458 rx_ring->next_to_alloc = 0;
1459 rx_ring->next_to_clean = 0;
1460 rx_ring->next_to_use = 0;
1462 /* XDP RX-queue info only needed for RX rings exposed to XDP */
1463 if (rx_ring->vsi->type == I40E_VSI_MAIN) {
1464 err = xdp_rxq_info_reg(&rx_ring->xdp_rxq, rx_ring->netdev,
1465 rx_ring->queue_index);
1470 rx_ring->xdp_prog = rx_ring->vsi->xdp_prog;
1474 kfree(rx_ring->rx_bi);
1475 rx_ring->rx_bi = NULL;
1480 * i40e_release_rx_desc - Store the new tail and head values
1481 * @rx_ring: ring to bump
1482 * @val: new head index
1484 void i40e_release_rx_desc(struct i40e_ring *rx_ring, u32 val)
1486 rx_ring->next_to_use = val;
1488 /* update next to alloc since we have filled the ring */
1489 rx_ring->next_to_alloc = val;
1491 /* Force memory writes to complete before letting h/w
1492 * know there are new descriptors to fetch. (Only
1493 * applicable for weak-ordered memory model archs,
1497 writel(val, rx_ring->tail);
1501 * i40e_rx_offset - Return expected offset into page to access data
1502 * @rx_ring: Ring we are requesting offset of
1504 * Returns the offset value for ring into the data buffer.
1506 static inline unsigned int i40e_rx_offset(struct i40e_ring *rx_ring)
1508 return ring_uses_build_skb(rx_ring) ? I40E_SKB_PAD : 0;
1512 * i40e_alloc_mapped_page - recycle or make a new page
1513 * @rx_ring: ring to use
1514 * @bi: rx_buffer struct to modify
1516 * Returns true if the page was successfully allocated or
1519 static bool i40e_alloc_mapped_page(struct i40e_ring *rx_ring,
1520 struct i40e_rx_buffer *bi)
1522 struct page *page = bi->page;
1525 /* since we are recycling buffers we should seldom need to alloc */
1527 rx_ring->rx_stats.page_reuse_count++;
1531 /* alloc new page for storage */
1532 page = dev_alloc_pages(i40e_rx_pg_order(rx_ring));
1533 if (unlikely(!page)) {
1534 rx_ring->rx_stats.alloc_page_failed++;
1538 /* map page for use */
1539 dma = dma_map_page_attrs(rx_ring->dev, page, 0,
1540 i40e_rx_pg_size(rx_ring),
1544 /* if mapping failed free memory back to system since
1545 * there isn't much point in holding memory we can't use
1547 if (dma_mapping_error(rx_ring->dev, dma)) {
1548 __free_pages(page, i40e_rx_pg_order(rx_ring));
1549 rx_ring->rx_stats.alloc_page_failed++;
1555 bi->page_offset = i40e_rx_offset(rx_ring);
1556 page_ref_add(page, USHRT_MAX - 1);
1557 bi->pagecnt_bias = USHRT_MAX;
1563 * i40e_receive_skb - Send a completed packet up the stack
1564 * @rx_ring: rx ring in play
1565 * @skb: packet to send up
1566 * @vlan_tag: vlan tag for packet
1568 void i40e_receive_skb(struct i40e_ring *rx_ring,
1569 struct sk_buff *skb, u16 vlan_tag)
1571 struct i40e_q_vector *q_vector = rx_ring->q_vector;
1573 if ((rx_ring->netdev->features & NETIF_F_HW_VLAN_CTAG_RX) &&
1574 (vlan_tag & VLAN_VID_MASK))
1575 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_tag);
1577 napi_gro_receive(&q_vector->napi, skb);
1581 * i40e_alloc_rx_buffers - Replace used receive buffers
1582 * @rx_ring: ring to place buffers on
1583 * @cleaned_count: number of buffers to replace
1585 * Returns false if all allocations were successful, true if any fail
1587 bool i40e_alloc_rx_buffers(struct i40e_ring *rx_ring, u16 cleaned_count)
1589 u16 ntu = rx_ring->next_to_use;
1590 union i40e_rx_desc *rx_desc;
1591 struct i40e_rx_buffer *bi;
1593 /* do nothing if no valid netdev defined */
1594 if (!rx_ring->netdev || !cleaned_count)
1597 rx_desc = I40E_RX_DESC(rx_ring, ntu);
1598 bi = &rx_ring->rx_bi[ntu];
1601 if (!i40e_alloc_mapped_page(rx_ring, bi))
1604 /* sync the buffer for use by the device */
1605 dma_sync_single_range_for_device(rx_ring->dev, bi->dma,
1607 rx_ring->rx_buf_len,
1610 /* Refresh the desc even if buffer_addrs didn't change
1611 * because each write-back erases this info.
1613 rx_desc->read.pkt_addr = cpu_to_le64(bi->dma + bi->page_offset);
1618 if (unlikely(ntu == rx_ring->count)) {
1619 rx_desc = I40E_RX_DESC(rx_ring, 0);
1620 bi = rx_ring->rx_bi;
1624 /* clear the status bits for the next_to_use descriptor */
1625 rx_desc->wb.qword1.status_error_len = 0;
1628 } while (cleaned_count);
1630 if (rx_ring->next_to_use != ntu)
1631 i40e_release_rx_desc(rx_ring, ntu);
1636 if (rx_ring->next_to_use != ntu)
1637 i40e_release_rx_desc(rx_ring, ntu);
1639 /* make sure to come back via polling to try again after
1640 * allocation failure
1646 * i40e_rx_checksum - Indicate in skb if hw indicated a good cksum
1647 * @vsi: the VSI we care about
1648 * @skb: skb currently being received and modified
1649 * @rx_desc: the receive descriptor
1651 static inline void i40e_rx_checksum(struct i40e_vsi *vsi,
1652 struct sk_buff *skb,
1653 union i40e_rx_desc *rx_desc)
1655 struct i40e_rx_ptype_decoded decoded;
1656 u32 rx_error, rx_status;
1661 qword = le64_to_cpu(rx_desc->wb.qword1.status_error_len);
1662 ptype = (qword & I40E_RXD_QW1_PTYPE_MASK) >> I40E_RXD_QW1_PTYPE_SHIFT;
1663 rx_error = (qword & I40E_RXD_QW1_ERROR_MASK) >>
1664 I40E_RXD_QW1_ERROR_SHIFT;
1665 rx_status = (qword & I40E_RXD_QW1_STATUS_MASK) >>
1666 I40E_RXD_QW1_STATUS_SHIFT;
1667 decoded = decode_rx_desc_ptype(ptype);
1669 skb->ip_summed = CHECKSUM_NONE;
1671 skb_checksum_none_assert(skb);
1673 /* Rx csum enabled and ip headers found? */
1674 if (!(vsi->netdev->features & NETIF_F_RXCSUM))
1677 /* did the hardware decode the packet and checksum? */
1678 if (!(rx_status & BIT(I40E_RX_DESC_STATUS_L3L4P_SHIFT)))
1681 /* both known and outer_ip must be set for the below code to work */
1682 if (!(decoded.known && decoded.outer_ip))
1685 ipv4 = (decoded.outer_ip == I40E_RX_PTYPE_OUTER_IP) &&
1686 (decoded.outer_ip_ver == I40E_RX_PTYPE_OUTER_IPV4);
1687 ipv6 = (decoded.outer_ip == I40E_RX_PTYPE_OUTER_IP) &&
1688 (decoded.outer_ip_ver == I40E_RX_PTYPE_OUTER_IPV6);
1691 (rx_error & (BIT(I40E_RX_DESC_ERROR_IPE_SHIFT) |
1692 BIT(I40E_RX_DESC_ERROR_EIPE_SHIFT))))
1695 /* likely incorrect csum if alternate IP extension headers found */
1697 rx_status & BIT(I40E_RX_DESC_STATUS_IPV6EXADD_SHIFT))
1698 /* don't increment checksum err here, non-fatal err */
1701 /* there was some L4 error, count error and punt packet to the stack */
1702 if (rx_error & BIT(I40E_RX_DESC_ERROR_L4E_SHIFT))
1705 /* handle packets that were not able to be checksummed due
1706 * to arrival speed, in this case the stack can compute
1709 if (rx_error & BIT(I40E_RX_DESC_ERROR_PPRS_SHIFT))
1712 /* If there is an outer header present that might contain a checksum
1713 * we need to bump the checksum level by 1 to reflect the fact that
1714 * we are indicating we validated the inner checksum.
1716 if (decoded.tunnel_type >= I40E_RX_PTYPE_TUNNEL_IP_GRENAT)
1717 skb->csum_level = 1;
1719 /* Only report checksum unnecessary for TCP, UDP, or SCTP */
1720 switch (decoded.inner_prot) {
1721 case I40E_RX_PTYPE_INNER_PROT_TCP:
1722 case I40E_RX_PTYPE_INNER_PROT_UDP:
1723 case I40E_RX_PTYPE_INNER_PROT_SCTP:
1724 skb->ip_summed = CHECKSUM_UNNECESSARY;
1733 vsi->back->hw_csum_rx_error++;
1737 * i40e_ptype_to_htype - get a hash type
1738 * @ptype: the ptype value from the descriptor
1740 * Returns a hash type to be used by skb_set_hash
1742 static inline int i40e_ptype_to_htype(u8 ptype)
1744 struct i40e_rx_ptype_decoded decoded = decode_rx_desc_ptype(ptype);
1747 return PKT_HASH_TYPE_NONE;
1749 if (decoded.outer_ip == I40E_RX_PTYPE_OUTER_IP &&
1750 decoded.payload_layer == I40E_RX_PTYPE_PAYLOAD_LAYER_PAY4)
1751 return PKT_HASH_TYPE_L4;
1752 else if (decoded.outer_ip == I40E_RX_PTYPE_OUTER_IP &&
1753 decoded.payload_layer == I40E_RX_PTYPE_PAYLOAD_LAYER_PAY3)
1754 return PKT_HASH_TYPE_L3;
1756 return PKT_HASH_TYPE_L2;
1760 * i40e_rx_hash - set the hash value in the skb
1761 * @ring: descriptor ring
1762 * @rx_desc: specific descriptor
1763 * @skb: skb currently being received and modified
1764 * @rx_ptype: Rx packet type
1766 static inline void i40e_rx_hash(struct i40e_ring *ring,
1767 union i40e_rx_desc *rx_desc,
1768 struct sk_buff *skb,
1772 const __le64 rss_mask =
1773 cpu_to_le64((u64)I40E_RX_DESC_FLTSTAT_RSS_HASH <<
1774 I40E_RX_DESC_STATUS_FLTSTAT_SHIFT);
1776 if (!(ring->netdev->features & NETIF_F_RXHASH))
1779 if ((rx_desc->wb.qword1.status_error_len & rss_mask) == rss_mask) {
1780 hash = le32_to_cpu(rx_desc->wb.qword0.hi_dword.rss);
1781 skb_set_hash(skb, hash, i40e_ptype_to_htype(rx_ptype));
1786 * i40e_process_skb_fields - Populate skb header fields from Rx descriptor
1787 * @rx_ring: rx descriptor ring packet is being transacted on
1788 * @rx_desc: pointer to the EOP Rx descriptor
1789 * @skb: pointer to current skb being populated
1790 * @rx_ptype: the packet type decoded by hardware
1792 * This function checks the ring, descriptor, and packet information in
1793 * order to populate the hash, checksum, VLAN, protocol, and
1794 * other fields within the skb.
1796 void i40e_process_skb_fields(struct i40e_ring *rx_ring,
1797 union i40e_rx_desc *rx_desc, struct sk_buff *skb,
1800 u64 qword = le64_to_cpu(rx_desc->wb.qword1.status_error_len);
1801 u32 rx_status = (qword & I40E_RXD_QW1_STATUS_MASK) >>
1802 I40E_RXD_QW1_STATUS_SHIFT;
1803 u32 tsynvalid = rx_status & I40E_RXD_QW1_STATUS_TSYNVALID_MASK;
1804 u32 tsyn = (rx_status & I40E_RXD_QW1_STATUS_TSYNINDX_MASK) >>
1805 I40E_RXD_QW1_STATUS_TSYNINDX_SHIFT;
1807 if (unlikely(tsynvalid))
1808 i40e_ptp_rx_hwtstamp(rx_ring->vsi->back, skb, tsyn);
1810 i40e_rx_hash(rx_ring, rx_desc, skb, rx_ptype);
1812 i40e_rx_checksum(rx_ring->vsi, skb, rx_desc);
1814 skb_record_rx_queue(skb, rx_ring->queue_index);
1816 /* modifies the skb - consumes the enet header */
1817 skb->protocol = eth_type_trans(skb, rx_ring->netdev);
1821 * i40e_cleanup_headers - Correct empty headers
1822 * @rx_ring: rx descriptor ring packet is being transacted on
1823 * @skb: pointer to current skb being fixed
1824 * @rx_desc: pointer to the EOP Rx descriptor
1826 * Also address the case where we are pulling data in on pages only
1827 * and as such no data is present in the skb header.
1829 * In addition if skb is not at least 60 bytes we need to pad it so that
1830 * it is large enough to qualify as a valid Ethernet frame.
1832 * Returns true if an error was encountered and skb was freed.
1834 static bool i40e_cleanup_headers(struct i40e_ring *rx_ring, struct sk_buff *skb,
1835 union i40e_rx_desc *rx_desc)
1838 /* XDP packets use error pointer so abort at this point */
1842 /* ERR_MASK will only have valid bits if EOP set, and
1843 * what we are doing here is actually checking
1844 * I40E_RX_DESC_ERROR_RXE_SHIFT, since it is the zeroth bit in
1847 if (unlikely(i40e_test_staterr(rx_desc,
1848 BIT(I40E_RXD_QW1_ERROR_SHIFT)))) {
1849 dev_kfree_skb_any(skb);
1853 /* if eth_skb_pad returns an error the skb was freed */
1854 if (eth_skb_pad(skb))
1861 * i40e_page_is_reusable - check if any reuse is possible
1862 * @page: page struct to check
1864 * A page is not reusable if it was allocated under low memory
1865 * conditions, or it's not in the same NUMA node as this CPU.
1867 static inline bool i40e_page_is_reusable(struct page *page)
1869 return (page_to_nid(page) == numa_mem_id()) &&
1870 !page_is_pfmemalloc(page);
1874 * i40e_can_reuse_rx_page - Determine if this page can be reused by
1875 * the adapter for another receive
1877 * @rx_buffer: buffer containing the page
1879 * If page is reusable, rx_buffer->page_offset is adjusted to point to
1880 * an unused region in the page.
1882 * For small pages, @truesize will be a constant value, half the size
1883 * of the memory at page. We'll attempt to alternate between high and
1884 * low halves of the page, with one half ready for use by the hardware
1885 * and the other half being consumed by the stack. We use the page
1886 * ref count to determine whether the stack has finished consuming the
1887 * portion of this page that was passed up with a previous packet. If
1888 * the page ref count is >1, we'll assume the "other" half page is
1889 * still busy, and this page cannot be reused.
1891 * For larger pages, @truesize will be the actual space used by the
1892 * received packet (adjusted upward to an even multiple of the cache
1893 * line size). This will advance through the page by the amount
1894 * actually consumed by the received packets while there is still
1895 * space for a buffer. Each region of larger pages will be used at
1896 * most once, after which the page will not be reused.
1898 * In either case, if the page is reusable its refcount is increased.
1900 static bool i40e_can_reuse_rx_page(struct i40e_rx_buffer *rx_buffer)
1902 unsigned int pagecnt_bias = rx_buffer->pagecnt_bias;
1903 struct page *page = rx_buffer->page;
1905 /* Is any reuse possible? */
1906 if (unlikely(!i40e_page_is_reusable(page)))
1909 #if (PAGE_SIZE < 8192)
1910 /* if we are only owner of page we can reuse it */
1911 if (unlikely((page_count(page) - pagecnt_bias) > 1))
1914 #define I40E_LAST_OFFSET \
1915 (SKB_WITH_OVERHEAD(PAGE_SIZE) - I40E_RXBUFFER_2048)
1916 if (rx_buffer->page_offset > I40E_LAST_OFFSET)
1920 /* If we have drained the page fragment pool we need to update
1921 * the pagecnt_bias and page count so that we fully restock the
1922 * number of references the driver holds.
1924 if (unlikely(pagecnt_bias == 1)) {
1925 page_ref_add(page, USHRT_MAX - 1);
1926 rx_buffer->pagecnt_bias = USHRT_MAX;
1933 * i40e_add_rx_frag - Add contents of Rx buffer to sk_buff
1934 * @rx_ring: rx descriptor ring to transact packets on
1935 * @rx_buffer: buffer containing page to add
1936 * @skb: sk_buff to place the data into
1937 * @size: packet length from rx_desc
1939 * This function will add the data contained in rx_buffer->page to the skb.
1940 * It will just attach the page as a frag to the skb.
1942 * The function will then update the page offset.
1944 static void i40e_add_rx_frag(struct i40e_ring *rx_ring,
1945 struct i40e_rx_buffer *rx_buffer,
1946 struct sk_buff *skb,
1949 #if (PAGE_SIZE < 8192)
1950 unsigned int truesize = i40e_rx_pg_size(rx_ring) / 2;
1952 unsigned int truesize = SKB_DATA_ALIGN(size + i40e_rx_offset(rx_ring));
1955 skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, rx_buffer->page,
1956 rx_buffer->page_offset, size, truesize);
1958 /* page is being used so we must update the page offset */
1959 #if (PAGE_SIZE < 8192)
1960 rx_buffer->page_offset ^= truesize;
1962 rx_buffer->page_offset += truesize;
1967 * i40e_get_rx_buffer - Fetch Rx buffer and synchronize data for use
1968 * @rx_ring: rx descriptor ring to transact packets on
1969 * @size: size of buffer to add to skb
1971 * This function will pull an Rx buffer from the ring and synchronize it
1972 * for use by the CPU.
1974 static struct i40e_rx_buffer *i40e_get_rx_buffer(struct i40e_ring *rx_ring,
1975 const unsigned int size)
1977 struct i40e_rx_buffer *rx_buffer;
1979 rx_buffer = &rx_ring->rx_bi[rx_ring->next_to_clean];
1980 prefetchw(rx_buffer->page);
1982 /* we are reusing so sync this buffer for CPU use */
1983 dma_sync_single_range_for_cpu(rx_ring->dev,
1985 rx_buffer->page_offset,
1989 /* We have pulled a buffer for use, so decrement pagecnt_bias */
1990 rx_buffer->pagecnt_bias--;
1996 * i40e_construct_skb - Allocate skb and populate it
1997 * @rx_ring: rx descriptor ring to transact packets on
1998 * @rx_buffer: rx buffer to pull data from
1999 * @xdp: xdp_buff pointing to the data
2001 * This function allocates an skb. It then populates it with the page
2002 * data from the current receive descriptor, taking care to set up the
2005 static struct sk_buff *i40e_construct_skb(struct i40e_ring *rx_ring,
2006 struct i40e_rx_buffer *rx_buffer,
2007 struct xdp_buff *xdp)
2009 unsigned int size = xdp->data_end - xdp->data;
2010 #if (PAGE_SIZE < 8192)
2011 unsigned int truesize = i40e_rx_pg_size(rx_ring) / 2;
2013 unsigned int truesize = SKB_DATA_ALIGN(size);
2015 unsigned int headlen;
2016 struct sk_buff *skb;
2018 /* prefetch first cache line of first page */
2019 prefetch(xdp->data);
2020 #if L1_CACHE_BYTES < 128
2021 prefetch(xdp->data + L1_CACHE_BYTES);
2023 /* Note, we get here by enabling legacy-rx via:
2025 * ethtool --set-priv-flags <dev> legacy-rx on
2027 * In this mode, we currently get 0 extra XDP headroom as
2028 * opposed to having legacy-rx off, where we process XDP
2029 * packets going to stack via i40e_build_skb(). The latter
2030 * provides us currently with 192 bytes of headroom.
2032 * For i40e_construct_skb() mode it means that the
2033 * xdp->data_meta will always point to xdp->data, since
2034 * the helper cannot expand the head. Should this ever
2035 * change in future for legacy-rx mode on, then lets also
2036 * add xdp->data_meta handling here.
2039 /* allocate a skb to store the frags */
2040 skb = __napi_alloc_skb(&rx_ring->q_vector->napi,
2042 GFP_ATOMIC | __GFP_NOWARN);
2046 /* Determine available headroom for copy */
2048 if (headlen > I40E_RX_HDR_SIZE)
2049 headlen = eth_get_headlen(xdp->data, I40E_RX_HDR_SIZE);
2051 /* align pull length to size of long to optimize memcpy performance */
2052 memcpy(__skb_put(skb, headlen), xdp->data,
2053 ALIGN(headlen, sizeof(long)));
2055 /* update all of the pointers */
2058 skb_add_rx_frag(skb, 0, rx_buffer->page,
2059 rx_buffer->page_offset + headlen,
2062 /* buffer is used by skb, update page_offset */
2063 #if (PAGE_SIZE < 8192)
2064 rx_buffer->page_offset ^= truesize;
2066 rx_buffer->page_offset += truesize;
2069 /* buffer is unused, reset bias back to rx_buffer */
2070 rx_buffer->pagecnt_bias++;
2077 * i40e_build_skb - Build skb around an existing buffer
2078 * @rx_ring: Rx descriptor ring to transact packets on
2079 * @rx_buffer: Rx buffer to pull data from
2080 * @xdp: xdp_buff pointing to the data
2082 * This function builds an skb around an existing Rx buffer, taking care
2083 * to set up the skb correctly and avoid any memcpy overhead.
2085 static struct sk_buff *i40e_build_skb(struct i40e_ring *rx_ring,
2086 struct i40e_rx_buffer *rx_buffer,
2087 struct xdp_buff *xdp)
2089 unsigned int metasize = xdp->data - xdp->data_meta;
2090 #if (PAGE_SIZE < 8192)
2091 unsigned int truesize = i40e_rx_pg_size(rx_ring) / 2;
2093 unsigned int truesize = SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) +
2094 SKB_DATA_ALIGN(xdp->data_end -
2095 xdp->data_hard_start);
2097 struct sk_buff *skb;
2099 /* Prefetch first cache line of first page. If xdp->data_meta
2100 * is unused, this points exactly as xdp->data, otherwise we
2101 * likely have a consumer accessing first few bytes of meta
2102 * data, and then actual data.
2104 prefetch(xdp->data_meta);
2105 #if L1_CACHE_BYTES < 128
2106 prefetch(xdp->data_meta + L1_CACHE_BYTES);
2108 /* build an skb around the page buffer */
2109 skb = build_skb(xdp->data_hard_start, truesize);
2113 /* update pointers within the skb to store the data */
2114 skb_reserve(skb, xdp->data - xdp->data_hard_start);
2115 __skb_put(skb, xdp->data_end - xdp->data);
2117 skb_metadata_set(skb, metasize);
2119 /* buffer is used by skb, update page_offset */
2120 #if (PAGE_SIZE < 8192)
2121 rx_buffer->page_offset ^= truesize;
2123 rx_buffer->page_offset += truesize;
2130 * i40e_put_rx_buffer - Clean up used buffer and either recycle or free
2131 * @rx_ring: rx descriptor ring to transact packets on
2132 * @rx_buffer: rx buffer to pull data from
2134 * This function will clean up the contents of the rx_buffer. It will
2135 * either recycle the buffer or unmap it and free the associated resources.
2137 static void i40e_put_rx_buffer(struct i40e_ring *rx_ring,
2138 struct i40e_rx_buffer *rx_buffer)
2140 if (i40e_can_reuse_rx_page(rx_buffer)) {
2141 /* hand second half of page back to the ring */
2142 i40e_reuse_rx_page(rx_ring, rx_buffer);
2144 /* we are not reusing the buffer so unmap it */
2145 dma_unmap_page_attrs(rx_ring->dev, rx_buffer->dma,
2146 i40e_rx_pg_size(rx_ring),
2147 DMA_FROM_DEVICE, I40E_RX_DMA_ATTR);
2148 __page_frag_cache_drain(rx_buffer->page,
2149 rx_buffer->pagecnt_bias);
2150 /* clear contents of buffer_info */
2151 rx_buffer->page = NULL;
2156 * i40e_is_non_eop - process handling of non-EOP buffers
2157 * @rx_ring: Rx ring being processed
2158 * @rx_desc: Rx descriptor for current buffer
2159 * @skb: Current socket buffer containing buffer in progress
2161 * This function updates next to clean. If the buffer is an EOP buffer
2162 * this function exits returning false, otherwise it will place the
2163 * sk_buff in the next buffer to be chained and return true indicating
2164 * that this is in fact a non-EOP buffer.
2166 static bool i40e_is_non_eop(struct i40e_ring *rx_ring,
2167 union i40e_rx_desc *rx_desc,
2168 struct sk_buff *skb)
2170 u32 ntc = rx_ring->next_to_clean + 1;
2172 /* fetch, update, and store next to clean */
2173 ntc = (ntc < rx_ring->count) ? ntc : 0;
2174 rx_ring->next_to_clean = ntc;
2176 prefetch(I40E_RX_DESC(rx_ring, ntc));
2178 /* if we are the last buffer then there is nothing else to do */
2179 #define I40E_RXD_EOF BIT(I40E_RX_DESC_STATUS_EOF_SHIFT)
2180 if (likely(i40e_test_staterr(rx_desc, I40E_RXD_EOF)))
2183 rx_ring->rx_stats.non_eop_descs++;
2188 static int i40e_xmit_xdp_ring(struct xdp_frame *xdpf,
2189 struct i40e_ring *xdp_ring);
2191 int i40e_xmit_xdp_tx_ring(struct xdp_buff *xdp, struct i40e_ring *xdp_ring)
2193 struct xdp_frame *xdpf = convert_to_xdp_frame(xdp);
2195 if (unlikely(!xdpf))
2196 return I40E_XDP_CONSUMED;
2198 return i40e_xmit_xdp_ring(xdpf, xdp_ring);
2202 * i40e_run_xdp - run an XDP program
2203 * @rx_ring: Rx ring being processed
2204 * @xdp: XDP buffer containing the frame
2206 static struct sk_buff *i40e_run_xdp(struct i40e_ring *rx_ring,
2207 struct xdp_buff *xdp)
2209 int err, result = I40E_XDP_PASS;
2210 struct i40e_ring *xdp_ring;
2211 struct bpf_prog *xdp_prog;
2215 xdp_prog = READ_ONCE(rx_ring->xdp_prog);
2220 prefetchw(xdp->data_hard_start); /* xdp_frame write */
2222 act = bpf_prog_run_xdp(xdp_prog, xdp);
2227 xdp_ring = rx_ring->vsi->xdp_rings[rx_ring->queue_index];
2228 result = i40e_xmit_xdp_tx_ring(xdp, xdp_ring);
2231 err = xdp_do_redirect(rx_ring->netdev, xdp, xdp_prog);
2232 result = !err ? I40E_XDP_REDIR : I40E_XDP_CONSUMED;
2235 bpf_warn_invalid_xdp_action(act);
2238 trace_xdp_exception(rx_ring->netdev, xdp_prog, act);
2239 /* fall through -- handle aborts by dropping packet */
2241 result = I40E_XDP_CONSUMED;
2246 return ERR_PTR(-result);
2250 * i40e_rx_buffer_flip - adjusted rx_buffer to point to an unused region
2252 * @rx_buffer: Rx buffer to adjust
2253 * @size: Size of adjustment
2255 static void i40e_rx_buffer_flip(struct i40e_ring *rx_ring,
2256 struct i40e_rx_buffer *rx_buffer,
2259 #if (PAGE_SIZE < 8192)
2260 unsigned int truesize = i40e_rx_pg_size(rx_ring) / 2;
2262 rx_buffer->page_offset ^= truesize;
2264 unsigned int truesize = SKB_DATA_ALIGN(i40e_rx_offset(rx_ring) + size);
2266 rx_buffer->page_offset += truesize;
2271 * i40e_xdp_ring_update_tail - Updates the XDP Tx ring tail register
2272 * @xdp_ring: XDP Tx ring
2274 * This function updates the XDP Tx ring tail register.
2276 void i40e_xdp_ring_update_tail(struct i40e_ring *xdp_ring)
2278 /* Force memory writes to complete before letting h/w
2279 * know there are new descriptors to fetch.
2282 writel_relaxed(xdp_ring->next_to_use, xdp_ring->tail);
2286 * i40e_update_rx_stats - Update Rx ring statistics
2287 * @rx_ring: rx descriptor ring
2288 * @total_rx_bytes: number of bytes received
2289 * @total_rx_packets: number of packets received
2291 * This function updates the Rx ring statistics.
2293 void i40e_update_rx_stats(struct i40e_ring *rx_ring,
2294 unsigned int total_rx_bytes,
2295 unsigned int total_rx_packets)
2297 u64_stats_update_begin(&rx_ring->syncp);
2298 rx_ring->stats.packets += total_rx_packets;
2299 rx_ring->stats.bytes += total_rx_bytes;
2300 u64_stats_update_end(&rx_ring->syncp);
2301 rx_ring->q_vector->rx.total_packets += total_rx_packets;
2302 rx_ring->q_vector->rx.total_bytes += total_rx_bytes;
2306 * i40e_finalize_xdp_rx - Bump XDP Tx tail and/or flush redirect map
2308 * @xdp_res: Result of the receive batch
2310 * This function bumps XDP Tx tail and/or flush redirect map, and
2311 * should be called when a batch of packets has been processed in the
2314 void i40e_finalize_xdp_rx(struct i40e_ring *rx_ring, unsigned int xdp_res)
2316 if (xdp_res & I40E_XDP_REDIR)
2319 if (xdp_res & I40E_XDP_TX) {
2320 struct i40e_ring *xdp_ring =
2321 rx_ring->vsi->xdp_rings[rx_ring->queue_index];
2323 i40e_xdp_ring_update_tail(xdp_ring);
2328 * i40e_clean_rx_irq - Clean completed descriptors from Rx ring - bounce buf
2329 * @rx_ring: rx descriptor ring to transact packets on
2330 * @budget: Total limit on number of packets to process
2332 * This function provides a "bounce buffer" approach to Rx interrupt
2333 * processing. The advantage to this is that on systems that have
2334 * expensive overhead for IOMMU access this provides a means of avoiding
2335 * it by maintaining the mapping of the page to the system.
2337 * Returns amount of work completed
2339 static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget)
2341 unsigned int total_rx_bytes = 0, total_rx_packets = 0;
2342 struct sk_buff *skb = rx_ring->skb;
2343 u16 cleaned_count = I40E_DESC_UNUSED(rx_ring);
2344 unsigned int xdp_xmit = 0;
2345 bool failure = false;
2346 struct xdp_buff xdp;
2348 xdp.rxq = &rx_ring->xdp_rxq;
2350 while (likely(total_rx_packets < (unsigned int)budget)) {
2351 struct i40e_rx_buffer *rx_buffer;
2352 union i40e_rx_desc *rx_desc;
2358 /* return some buffers to hardware, one at a time is too slow */
2359 if (cleaned_count >= I40E_RX_BUFFER_WRITE) {
2360 failure = failure ||
2361 i40e_alloc_rx_buffers(rx_ring, cleaned_count);
2365 rx_desc = I40E_RX_DESC(rx_ring, rx_ring->next_to_clean);
2367 /* status_error_len will always be zero for unused descriptors
2368 * because it's cleared in cleanup, and overlaps with hdr_addr
2369 * which is always zero because packet split isn't used, if the
2370 * hardware wrote DD then the length will be non-zero
2372 qword = le64_to_cpu(rx_desc->wb.qword1.status_error_len);
2374 /* This memory barrier is needed to keep us from reading
2375 * any other fields out of the rx_desc until we have
2376 * verified the descriptor has been written back.
2380 rx_buffer = i40e_clean_programming_status(rx_ring, rx_desc,
2382 if (unlikely(rx_buffer)) {
2383 i40e_reuse_rx_page(rx_ring, rx_buffer);
2388 size = (qword & I40E_RXD_QW1_LENGTH_PBUF_MASK) >>
2389 I40E_RXD_QW1_LENGTH_PBUF_SHIFT;
2393 i40e_trace(clean_rx_irq, rx_ring, rx_desc, skb);
2394 rx_buffer = i40e_get_rx_buffer(rx_ring, size);
2396 /* retrieve a buffer from the ring */
2398 xdp.data = page_address(rx_buffer->page) +
2399 rx_buffer->page_offset;
2400 xdp.data_meta = xdp.data;
2401 xdp.data_hard_start = xdp.data -
2402 i40e_rx_offset(rx_ring);
2403 xdp.data_end = xdp.data + size;
2405 skb = i40e_run_xdp(rx_ring, &xdp);
2409 unsigned int xdp_res = -PTR_ERR(skb);
2411 if (xdp_res & (I40E_XDP_TX | I40E_XDP_REDIR)) {
2412 xdp_xmit |= xdp_res;
2413 i40e_rx_buffer_flip(rx_ring, rx_buffer, size);
2415 rx_buffer->pagecnt_bias++;
2417 total_rx_bytes += size;
2420 i40e_add_rx_frag(rx_ring, rx_buffer, skb, size);
2421 } else if (ring_uses_build_skb(rx_ring)) {
2422 skb = i40e_build_skb(rx_ring, rx_buffer, &xdp);
2424 skb = i40e_construct_skb(rx_ring, rx_buffer, &xdp);
2427 /* exit if we failed to retrieve a buffer */
2429 rx_ring->rx_stats.alloc_buff_failed++;
2430 rx_buffer->pagecnt_bias++;
2434 i40e_put_rx_buffer(rx_ring, rx_buffer);
2437 if (i40e_is_non_eop(rx_ring, rx_desc, skb))
2440 if (i40e_cleanup_headers(rx_ring, skb, rx_desc)) {
2445 /* probably a little skewed due to removing CRC */
2446 total_rx_bytes += skb->len;
2448 qword = le64_to_cpu(rx_desc->wb.qword1.status_error_len);
2449 rx_ptype = (qword & I40E_RXD_QW1_PTYPE_MASK) >>
2450 I40E_RXD_QW1_PTYPE_SHIFT;
2452 /* populate checksum, VLAN, and protocol */
2453 i40e_process_skb_fields(rx_ring, rx_desc, skb, rx_ptype);
2455 vlan_tag = (qword & BIT(I40E_RX_DESC_STATUS_L2TAG1P_SHIFT)) ?
2456 le16_to_cpu(rx_desc->wb.qword0.lo_dword.l2tag1) : 0;
2458 i40e_trace(clean_rx_irq_rx, rx_ring, rx_desc, skb);
2459 i40e_receive_skb(rx_ring, skb, vlan_tag);
2462 /* update budget accounting */
2466 i40e_finalize_xdp_rx(rx_ring, xdp_xmit);
2469 i40e_update_rx_stats(rx_ring, total_rx_bytes, total_rx_packets);
2471 /* guarantee a trip back through this routine if there was a failure */
2472 return failure ? budget : (int)total_rx_packets;
2475 static inline u32 i40e_buildreg_itr(const int type, u16 itr)
2479 /* We don't bother with setting the CLEARPBA bit as the data sheet
2480 * points out doing so is "meaningless since it was already
2481 * auto-cleared". The auto-clearing happens when the interrupt is
2484 * Hardware errata 28 for also indicates that writing to a
2485 * xxINT_DYN_CTLx CSR with INTENA_MSK (bit 31) set to 0 will clear
2486 * an event in the PBA anyway so we need to rely on the automask
2487 * to hold pending events for us until the interrupt is re-enabled
2489 * The itr value is reported in microseconds, and the register
2490 * value is recorded in 2 microsecond units. For this reason we
2491 * only need to shift by the interval shift - 1 instead of the
2494 itr &= I40E_ITR_MASK;
2496 val = I40E_PFINT_DYN_CTLN_INTENA_MASK |
2497 (type << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT) |
2498 (itr << (I40E_PFINT_DYN_CTLN_INTERVAL_SHIFT - 1));
2503 /* a small macro to shorten up some long lines */
2504 #define INTREG I40E_PFINT_DYN_CTLN
2506 /* The act of updating the ITR will cause it to immediately trigger. In order
2507 * to prevent this from throwing off adaptive update statistics we defer the
2508 * update so that it can only happen so often. So after either Tx or Rx are
2509 * updated we make the adaptive scheme wait until either the ITR completely
2510 * expires via the next_update expiration or we have been through at least
2513 #define ITR_COUNTDOWN_START 3
2516 * i40e_update_enable_itr - Update itr and re-enable MSIX interrupt
2517 * @vsi: the VSI we care about
2518 * @q_vector: q_vector for which itr is being updated and interrupt enabled
2521 static inline void i40e_update_enable_itr(struct i40e_vsi *vsi,
2522 struct i40e_q_vector *q_vector)
2524 struct i40e_hw *hw = &vsi->back->hw;
2527 /* If we don't have MSIX, then we only need to re-enable icr0 */
2528 if (!(vsi->back->flags & I40E_FLAG_MSIX_ENABLED)) {
2529 i40e_irq_dynamic_enable_icr0(vsi->back);
2533 /* These will do nothing if dynamic updates are not enabled */
2534 i40e_update_itr(q_vector, &q_vector->tx);
2535 i40e_update_itr(q_vector, &q_vector->rx);
2537 /* This block of logic allows us to get away with only updating
2538 * one ITR value with each interrupt. The idea is to perform a
2539 * pseudo-lazy update with the following criteria.
2541 * 1. Rx is given higher priority than Tx if both are in same state
2542 * 2. If we must reduce an ITR that is given highest priority.
2543 * 3. We then give priority to increasing ITR based on amount.
2545 if (q_vector->rx.target_itr < q_vector->rx.current_itr) {
2546 /* Rx ITR needs to be reduced, this is highest priority */
2547 intval = i40e_buildreg_itr(I40E_RX_ITR,
2548 q_vector->rx.target_itr);
2549 q_vector->rx.current_itr = q_vector->rx.target_itr;
2550 q_vector->itr_countdown = ITR_COUNTDOWN_START;
2551 } else if ((q_vector->tx.target_itr < q_vector->tx.current_itr) ||
2552 ((q_vector->rx.target_itr - q_vector->rx.current_itr) <
2553 (q_vector->tx.target_itr - q_vector->tx.current_itr))) {
2554 /* Tx ITR needs to be reduced, this is second priority
2555 * Tx ITR needs to be increased more than Rx, fourth priority
2557 intval = i40e_buildreg_itr(I40E_TX_ITR,
2558 q_vector->tx.target_itr);
2559 q_vector->tx.current_itr = q_vector->tx.target_itr;
2560 q_vector->itr_countdown = ITR_COUNTDOWN_START;
2561 } else if (q_vector->rx.current_itr != q_vector->rx.target_itr) {
2562 /* Rx ITR needs to be increased, third priority */
2563 intval = i40e_buildreg_itr(I40E_RX_ITR,
2564 q_vector->rx.target_itr);
2565 q_vector->rx.current_itr = q_vector->rx.target_itr;
2566 q_vector->itr_countdown = ITR_COUNTDOWN_START;
2568 /* No ITR update, lowest priority */
2569 intval = i40e_buildreg_itr(I40E_ITR_NONE, 0);
2570 if (q_vector->itr_countdown)
2571 q_vector->itr_countdown--;
2574 if (!test_bit(__I40E_VSI_DOWN, vsi->state))
2575 wr32(hw, INTREG(q_vector->reg_idx), intval);
2579 * i40e_napi_poll - NAPI polling Rx/Tx cleanup routine
2580 * @napi: napi struct with our devices info in it
2581 * @budget: amount of work driver is allowed to do this pass, in packets
2583 * This function will clean all queues associated with a q_vector.
2585 * Returns the amount of work done
2587 int i40e_napi_poll(struct napi_struct *napi, int budget)
2589 struct i40e_q_vector *q_vector =
2590 container_of(napi, struct i40e_q_vector, napi);
2591 struct i40e_vsi *vsi = q_vector->vsi;
2592 struct i40e_ring *ring;
2593 bool clean_complete = true;
2594 bool arm_wb = false;
2595 int budget_per_ring;
2598 if (test_bit(__I40E_VSI_DOWN, vsi->state)) {
2599 napi_complete(napi);
2603 /* Since the actual Tx work is minimal, we can give the Tx a larger
2604 * budget and be more aggressive about cleaning up the Tx descriptors.
2606 i40e_for_each_ring(ring, q_vector->tx) {
2607 bool wd = ring->xsk_umem ?
2608 i40e_clean_xdp_tx_irq(vsi, ring, budget) :
2609 i40e_clean_tx_irq(vsi, ring, budget);
2612 clean_complete = false;
2615 arm_wb |= ring->arm_wb;
2616 ring->arm_wb = false;
2619 /* Handle case where we are called by netpoll with a budget of 0 */
2623 /* We attempt to distribute budget to each Rx queue fairly, but don't
2624 * allow the budget to go below 1 because that would exit polling early.
2626 budget_per_ring = max(budget/q_vector->num_ringpairs, 1);
2628 i40e_for_each_ring(ring, q_vector->rx) {
2629 int cleaned = ring->xsk_umem ?
2630 i40e_clean_rx_irq_zc(ring, budget_per_ring) :
2631 i40e_clean_rx_irq(ring, budget_per_ring);
2633 work_done += cleaned;
2634 /* if we clean as many as budgeted, we must not be done */
2635 if (cleaned >= budget_per_ring)
2636 clean_complete = false;
2639 /* If work not completed, return budget and polling will return */
2640 if (!clean_complete) {
2641 int cpu_id = smp_processor_id();
2643 /* It is possible that the interrupt affinity has changed but,
2644 * if the cpu is pegged at 100%, polling will never exit while
2645 * traffic continues and the interrupt will be stuck on this
2646 * cpu. We check to make sure affinity is correct before we
2647 * continue to poll, otherwise we must stop polling so the
2648 * interrupt can move to the correct cpu.
2650 if (!cpumask_test_cpu(cpu_id, &q_vector->affinity_mask)) {
2651 /* Tell napi that we are done polling */
2652 napi_complete_done(napi, work_done);
2654 /* Force an interrupt */
2655 i40e_force_wb(vsi, q_vector);
2657 /* Return budget-1 so that polling stops */
2662 q_vector->tx.ring[0].tx_stats.tx_force_wb++;
2663 i40e_enable_wb_on_itr(vsi, q_vector);
2668 if (vsi->back->flags & I40E_TXR_FLAGS_WB_ON_ITR)
2669 q_vector->arm_wb_state = false;
2671 /* Work is done so exit the polling mode and re-enable the interrupt */
2672 napi_complete_done(napi, work_done);
2674 i40e_update_enable_itr(vsi, q_vector);
2676 return min(work_done, budget - 1);
2680 * i40e_atr - Add a Flow Director ATR filter
2681 * @tx_ring: ring to add programming descriptor to
2683 * @tx_flags: send tx flags
2685 static void i40e_atr(struct i40e_ring *tx_ring, struct sk_buff *skb,
2688 struct i40e_filter_program_desc *fdir_desc;
2689 struct i40e_pf *pf = tx_ring->vsi->back;
2691 unsigned char *network;
2693 struct ipv6hdr *ipv6;
2697 u32 flex_ptype, dtype_cmd;
2701 /* make sure ATR is enabled */
2702 if (!(pf->flags & I40E_FLAG_FD_ATR_ENABLED))
2705 if (test_bit(__I40E_FD_ATR_AUTO_DISABLED, pf->state))
2708 /* if sampling is disabled do nothing */
2709 if (!tx_ring->atr_sample_rate)
2712 /* Currently only IPv4/IPv6 with TCP is supported */
2713 if (!(tx_flags & (I40E_TX_FLAGS_IPV4 | I40E_TX_FLAGS_IPV6)))
2716 /* snag network header to get L4 type and address */
2717 hdr.network = (tx_flags & I40E_TX_FLAGS_UDP_TUNNEL) ?
2718 skb_inner_network_header(skb) : skb_network_header(skb);
2720 /* Note: tx_flags gets modified to reflect inner protocols in
2721 * tx_enable_csum function if encap is enabled.
2723 if (tx_flags & I40E_TX_FLAGS_IPV4) {
2724 /* access ihl as u8 to avoid unaligned access on ia64 */
2725 hlen = (hdr.network[0] & 0x0F) << 2;
2726 l4_proto = hdr.ipv4->protocol;
2728 /* find the start of the innermost ipv6 header */
2729 unsigned int inner_hlen = hdr.network - skb->data;
2730 unsigned int h_offset = inner_hlen;
2732 /* this function updates h_offset to the end of the header */
2734 ipv6_find_hdr(skb, &h_offset, IPPROTO_TCP, NULL, NULL);
2735 /* hlen will contain our best estimate of the tcp header */
2736 hlen = h_offset - inner_hlen;
2739 if (l4_proto != IPPROTO_TCP)
2742 th = (struct tcphdr *)(hdr.network + hlen);
2744 /* Due to lack of space, no more new filters can be programmed */
2745 if (th->syn && test_bit(__I40E_FD_ATR_AUTO_DISABLED, pf->state))
2747 if (pf->flags & I40E_FLAG_HW_ATR_EVICT_ENABLED) {
2748 /* HW ATR eviction will take care of removing filters on FIN
2751 if (th->fin || th->rst)
2755 tx_ring->atr_count++;
2757 /* sample on all syn/fin/rst packets or once every atr sample rate */
2761 (tx_ring->atr_count < tx_ring->atr_sample_rate))
2764 tx_ring->atr_count = 0;
2766 /* grab the next descriptor */
2767 i = tx_ring->next_to_use;
2768 fdir_desc = I40E_TX_FDIRDESC(tx_ring, i);
2771 tx_ring->next_to_use = (i < tx_ring->count) ? i : 0;
2773 flex_ptype = (tx_ring->queue_index << I40E_TXD_FLTR_QW0_QINDEX_SHIFT) &
2774 I40E_TXD_FLTR_QW0_QINDEX_MASK;
2775 flex_ptype |= (tx_flags & I40E_TX_FLAGS_IPV4) ?
2776 (I40E_FILTER_PCTYPE_NONF_IPV4_TCP <<
2777 I40E_TXD_FLTR_QW0_PCTYPE_SHIFT) :
2778 (I40E_FILTER_PCTYPE_NONF_IPV6_TCP <<
2779 I40E_TXD_FLTR_QW0_PCTYPE_SHIFT);
2781 flex_ptype |= tx_ring->vsi->id << I40E_TXD_FLTR_QW0_DEST_VSI_SHIFT;
2783 dtype_cmd = I40E_TX_DESC_DTYPE_FILTER_PROG;
2785 dtype_cmd |= (th->fin || th->rst) ?
2786 (I40E_FILTER_PROGRAM_DESC_PCMD_REMOVE <<
2787 I40E_TXD_FLTR_QW1_PCMD_SHIFT) :
2788 (I40E_FILTER_PROGRAM_DESC_PCMD_ADD_UPDATE <<
2789 I40E_TXD_FLTR_QW1_PCMD_SHIFT);
2791 dtype_cmd |= I40E_FILTER_PROGRAM_DESC_DEST_DIRECT_PACKET_QINDEX <<
2792 I40E_TXD_FLTR_QW1_DEST_SHIFT;
2794 dtype_cmd |= I40E_FILTER_PROGRAM_DESC_FD_STATUS_FD_ID <<
2795 I40E_TXD_FLTR_QW1_FD_STATUS_SHIFT;
2797 dtype_cmd |= I40E_TXD_FLTR_QW1_CNT_ENA_MASK;
2798 if (!(tx_flags & I40E_TX_FLAGS_UDP_TUNNEL))
2800 ((u32)I40E_FD_ATR_STAT_IDX(pf->hw.pf_id) <<
2801 I40E_TXD_FLTR_QW1_CNTINDEX_SHIFT) &
2802 I40E_TXD_FLTR_QW1_CNTINDEX_MASK;
2805 ((u32)I40E_FD_ATR_TUNNEL_STAT_IDX(pf->hw.pf_id) <<
2806 I40E_TXD_FLTR_QW1_CNTINDEX_SHIFT) &
2807 I40E_TXD_FLTR_QW1_CNTINDEX_MASK;
2809 if (pf->flags & I40E_FLAG_HW_ATR_EVICT_ENABLED)
2810 dtype_cmd |= I40E_TXD_FLTR_QW1_ATR_MASK;
2812 fdir_desc->qindex_flex_ptype_vsi = cpu_to_le32(flex_ptype);
2813 fdir_desc->rsvd = cpu_to_le32(0);
2814 fdir_desc->dtype_cmd_cntindex = cpu_to_le32(dtype_cmd);
2815 fdir_desc->fd_id = cpu_to_le32(0);
2819 * i40e_tx_prepare_vlan_flags - prepare generic TX VLAN tagging flags for HW
2821 * @tx_ring: ring to send buffer on
2822 * @flags: the tx flags to be set
2824 * Checks the skb and set up correspondingly several generic transmit flags
2825 * related to VLAN tagging for the HW, such as VLAN, DCB, etc.
2827 * Returns error code indicate the frame should be dropped upon error and the
2828 * otherwise returns 0 to indicate the flags has been set properly.
2830 static inline int i40e_tx_prepare_vlan_flags(struct sk_buff *skb,
2831 struct i40e_ring *tx_ring,
2834 __be16 protocol = skb->protocol;
2837 if (protocol == htons(ETH_P_8021Q) &&
2838 !(tx_ring->netdev->features & NETIF_F_HW_VLAN_CTAG_TX)) {
2839 /* When HW VLAN acceleration is turned off by the user the
2840 * stack sets the protocol to 8021q so that the driver
2841 * can take any steps required to support the SW only
2842 * VLAN handling. In our case the driver doesn't need
2843 * to take any further steps so just set the protocol
2844 * to the encapsulated ethertype.
2846 skb->protocol = vlan_get_protocol(skb);
2850 /* if we have a HW VLAN tag being added, default to the HW one */
2851 if (skb_vlan_tag_present(skb)) {
2852 tx_flags |= skb_vlan_tag_get(skb) << I40E_TX_FLAGS_VLAN_SHIFT;
2853 tx_flags |= I40E_TX_FLAGS_HW_VLAN;
2854 /* else if it is a SW VLAN, check the next protocol and store the tag */
2855 } else if (protocol == htons(ETH_P_8021Q)) {
2856 struct vlan_hdr *vhdr, _vhdr;
2858 vhdr = skb_header_pointer(skb, ETH_HLEN, sizeof(_vhdr), &_vhdr);
2862 protocol = vhdr->h_vlan_encapsulated_proto;
2863 tx_flags |= ntohs(vhdr->h_vlan_TCI) << I40E_TX_FLAGS_VLAN_SHIFT;
2864 tx_flags |= I40E_TX_FLAGS_SW_VLAN;
2867 if (!(tx_ring->vsi->back->flags & I40E_FLAG_DCB_ENABLED))
2870 /* Insert 802.1p priority into VLAN header */
2871 if ((tx_flags & (I40E_TX_FLAGS_HW_VLAN | I40E_TX_FLAGS_SW_VLAN)) ||
2872 (skb->priority != TC_PRIO_CONTROL)) {
2873 tx_flags &= ~I40E_TX_FLAGS_VLAN_PRIO_MASK;
2874 tx_flags |= (skb->priority & 0x7) <<
2875 I40E_TX_FLAGS_VLAN_PRIO_SHIFT;
2876 if (tx_flags & I40E_TX_FLAGS_SW_VLAN) {
2877 struct vlan_ethhdr *vhdr;
2880 rc = skb_cow_head(skb, 0);
2883 vhdr = (struct vlan_ethhdr *)skb->data;
2884 vhdr->h_vlan_TCI = htons(tx_flags >>
2885 I40E_TX_FLAGS_VLAN_SHIFT);
2887 tx_flags |= I40E_TX_FLAGS_HW_VLAN;
2897 * i40e_tso - set up the tso context descriptor
2898 * @first: pointer to first Tx buffer for xmit
2899 * @hdr_len: ptr to the size of the packet header
2900 * @cd_type_cmd_tso_mss: Quad Word 1
2902 * Returns 0 if no TSO can happen, 1 if tso is going, or error
2904 static int i40e_tso(struct i40e_tx_buffer *first, u8 *hdr_len,
2905 u64 *cd_type_cmd_tso_mss)
2907 struct sk_buff *skb = first->skb;
2908 u64 cd_cmd, cd_tso_len, cd_mss;
2919 u32 paylen, l4_offset;
2920 u16 gso_segs, gso_size;
2923 if (skb->ip_summed != CHECKSUM_PARTIAL)
2926 if (!skb_is_gso(skb))
2929 err = skb_cow_head(skb, 0);
2933 ip.hdr = skb_network_header(skb);
2934 l4.hdr = skb_transport_header(skb);
2936 /* initialize outer IP header fields */
2937 if (ip.v4->version == 4) {
2941 ip.v6->payload_len = 0;
2944 if (skb_shinfo(skb)->gso_type & (SKB_GSO_GRE |
2948 SKB_GSO_UDP_TUNNEL |
2949 SKB_GSO_UDP_TUNNEL_CSUM)) {
2950 if (!(skb_shinfo(skb)->gso_type & SKB_GSO_PARTIAL) &&
2951 (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_TUNNEL_CSUM)) {
2954 /* determine offset of outer transport header */
2955 l4_offset = l4.hdr - skb->data;
2957 /* remove payload length from outer checksum */
2958 paylen = skb->len - l4_offset;
2959 csum_replace_by_diff(&l4.udp->check,
2960 (__force __wsum)htonl(paylen));
2963 /* reset pointers to inner headers */
2964 ip.hdr = skb_inner_network_header(skb);
2965 l4.hdr = skb_inner_transport_header(skb);
2967 /* initialize inner IP header fields */
2968 if (ip.v4->version == 4) {
2972 ip.v6->payload_len = 0;
2976 /* determine offset of inner transport header */
2977 l4_offset = l4.hdr - skb->data;
2979 /* remove payload length from inner checksum */
2980 paylen = skb->len - l4_offset;
2981 csum_replace_by_diff(&l4.tcp->check, (__force __wsum)htonl(paylen));
2983 /* compute length of segmentation header */
2984 *hdr_len = (l4.tcp->doff * 4) + l4_offset;
2986 /* pull values out of skb_shinfo */
2987 gso_size = skb_shinfo(skb)->gso_size;
2988 gso_segs = skb_shinfo(skb)->gso_segs;
2990 /* update GSO size and bytecount with header size */
2991 first->gso_segs = gso_segs;
2992 first->bytecount += (first->gso_segs - 1) * *hdr_len;
2994 /* find the field values */
2995 cd_cmd = I40E_TX_CTX_DESC_TSO;
2996 cd_tso_len = skb->len - *hdr_len;
2998 *cd_type_cmd_tso_mss |= (cd_cmd << I40E_TXD_CTX_QW1_CMD_SHIFT) |
2999 (cd_tso_len << I40E_TXD_CTX_QW1_TSO_LEN_SHIFT) |
3000 (cd_mss << I40E_TXD_CTX_QW1_MSS_SHIFT);
3005 * i40e_tsyn - set up the tsyn context descriptor
3006 * @tx_ring: ptr to the ring to send
3007 * @skb: ptr to the skb we're sending
3008 * @tx_flags: the collected send information
3009 * @cd_type_cmd_tso_mss: Quad Word 1
3011 * Returns 0 if no Tx timestamp can happen and 1 if the timestamp will happen
3013 static int i40e_tsyn(struct i40e_ring *tx_ring, struct sk_buff *skb,
3014 u32 tx_flags, u64 *cd_type_cmd_tso_mss)
3018 if (likely(!(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)))
3021 /* Tx timestamps cannot be sampled when doing TSO */
3022 if (tx_flags & I40E_TX_FLAGS_TSO)
3025 /* only timestamp the outbound packet if the user has requested it and
3026 * we are not already transmitting a packet to be timestamped
3028 pf = i40e_netdev_to_pf(tx_ring->netdev);
3029 if (!(pf->flags & I40E_FLAG_PTP))
3033 !test_and_set_bit_lock(__I40E_PTP_TX_IN_PROGRESS, pf->state)) {
3034 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
3035 pf->ptp_tx_start = jiffies;
3036 pf->ptp_tx_skb = skb_get(skb);
3038 pf->tx_hwtstamp_skipped++;
3042 *cd_type_cmd_tso_mss |= (u64)I40E_TX_CTX_DESC_TSYN <<
3043 I40E_TXD_CTX_QW1_CMD_SHIFT;
3049 * i40e_tx_enable_csum - Enable Tx checksum offloads
3051 * @tx_flags: pointer to Tx flags currently set
3052 * @td_cmd: Tx descriptor command bits to set
3053 * @td_offset: Tx descriptor header offsets to set
3054 * @tx_ring: Tx descriptor ring
3055 * @cd_tunneling: ptr to context desc bits
3057 static int i40e_tx_enable_csum(struct sk_buff *skb, u32 *tx_flags,
3058 u32 *td_cmd, u32 *td_offset,
3059 struct i40e_ring *tx_ring,
3072 unsigned char *exthdr;
3073 u32 offset, cmd = 0;
3077 if (skb->ip_summed != CHECKSUM_PARTIAL)
3080 ip.hdr = skb_network_header(skb);
3081 l4.hdr = skb_transport_header(skb);
3083 /* compute outer L2 header size */
3084 offset = ((ip.hdr - skb->data) / 2) << I40E_TX_DESC_LENGTH_MACLEN_SHIFT;
3086 if (skb->encapsulation) {
3088 /* define outer network header type */
3089 if (*tx_flags & I40E_TX_FLAGS_IPV4) {
3090 tunnel |= (*tx_flags & I40E_TX_FLAGS_TSO) ?
3091 I40E_TX_CTX_EXT_IP_IPV4 :
3092 I40E_TX_CTX_EXT_IP_IPV4_NO_CSUM;
3094 l4_proto = ip.v4->protocol;
3095 } else if (*tx_flags & I40E_TX_FLAGS_IPV6) {
3096 tunnel |= I40E_TX_CTX_EXT_IP_IPV6;
3098 exthdr = ip.hdr + sizeof(*ip.v6);
3099 l4_proto = ip.v6->nexthdr;
3100 if (l4.hdr != exthdr)
3101 ipv6_skip_exthdr(skb, exthdr - skb->data,
3102 &l4_proto, &frag_off);
3105 /* define outer transport */
3108 tunnel |= I40E_TXD_CTX_UDP_TUNNELING;
3109 *tx_flags |= I40E_TX_FLAGS_UDP_TUNNEL;
3112 tunnel |= I40E_TXD_CTX_GRE_TUNNELING;
3113 *tx_flags |= I40E_TX_FLAGS_UDP_TUNNEL;
3117 *tx_flags |= I40E_TX_FLAGS_UDP_TUNNEL;
3118 l4.hdr = skb_inner_network_header(skb);
3121 if (*tx_flags & I40E_TX_FLAGS_TSO)
3124 skb_checksum_help(skb);
3128 /* compute outer L3 header size */
3129 tunnel |= ((l4.hdr - ip.hdr) / 4) <<
3130 I40E_TXD_CTX_QW0_EXT_IPLEN_SHIFT;
3132 /* switch IP header pointer from outer to inner header */
3133 ip.hdr = skb_inner_network_header(skb);
3135 /* compute tunnel header size */
3136 tunnel |= ((ip.hdr - l4.hdr) / 2) <<
3137 I40E_TXD_CTX_QW0_NATLEN_SHIFT;
3139 /* indicate if we need to offload outer UDP header */
3140 if ((*tx_flags & I40E_TX_FLAGS_TSO) &&
3141 !(skb_shinfo(skb)->gso_type & SKB_GSO_PARTIAL) &&
3142 (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_TUNNEL_CSUM))
3143 tunnel |= I40E_TXD_CTX_QW0_L4T_CS_MASK;
3145 /* record tunnel offload values */
3146 *cd_tunneling |= tunnel;
3148 /* switch L4 header pointer from outer to inner */
3149 l4.hdr = skb_inner_transport_header(skb);
3152 /* reset type as we transition from outer to inner headers */
3153 *tx_flags &= ~(I40E_TX_FLAGS_IPV4 | I40E_TX_FLAGS_IPV6);
3154 if (ip.v4->version == 4)
3155 *tx_flags |= I40E_TX_FLAGS_IPV4;
3156 if (ip.v6->version == 6)
3157 *tx_flags |= I40E_TX_FLAGS_IPV6;
3160 /* Enable IP checksum offloads */
3161 if (*tx_flags & I40E_TX_FLAGS_IPV4) {
3162 l4_proto = ip.v4->protocol;
3163 /* the stack computes the IP header already, the only time we
3164 * need the hardware to recompute it is in the case of TSO.
3166 cmd |= (*tx_flags & I40E_TX_FLAGS_TSO) ?
3167 I40E_TX_DESC_CMD_IIPT_IPV4_CSUM :
3168 I40E_TX_DESC_CMD_IIPT_IPV4;
3169 } else if (*tx_flags & I40E_TX_FLAGS_IPV6) {
3170 cmd |= I40E_TX_DESC_CMD_IIPT_IPV6;
3172 exthdr = ip.hdr + sizeof(*ip.v6);
3173 l4_proto = ip.v6->nexthdr;
3174 if (l4.hdr != exthdr)
3175 ipv6_skip_exthdr(skb, exthdr - skb->data,
3176 &l4_proto, &frag_off);
3179 /* compute inner L3 header size */
3180 offset |= ((l4.hdr - ip.hdr) / 4) << I40E_TX_DESC_LENGTH_IPLEN_SHIFT;
3182 /* Enable L4 checksum offloads */
3185 /* enable checksum offloads */
3186 cmd |= I40E_TX_DESC_CMD_L4T_EOFT_TCP;
3187 offset |= l4.tcp->doff << I40E_TX_DESC_LENGTH_L4_FC_LEN_SHIFT;
3190 /* enable SCTP checksum offload */
3191 cmd |= I40E_TX_DESC_CMD_L4T_EOFT_SCTP;
3192 offset |= (sizeof(struct sctphdr) >> 2) <<
3193 I40E_TX_DESC_LENGTH_L4_FC_LEN_SHIFT;
3196 /* enable UDP checksum offload */
3197 cmd |= I40E_TX_DESC_CMD_L4T_EOFT_UDP;
3198 offset |= (sizeof(struct udphdr) >> 2) <<
3199 I40E_TX_DESC_LENGTH_L4_FC_LEN_SHIFT;
3202 if (*tx_flags & I40E_TX_FLAGS_TSO)
3204 skb_checksum_help(skb);
3209 *td_offset |= offset;
3215 * i40e_create_tx_ctx Build the Tx context descriptor
3216 * @tx_ring: ring to create the descriptor on
3217 * @cd_type_cmd_tso_mss: Quad Word 1
3218 * @cd_tunneling: Quad Word 0 - bits 0-31
3219 * @cd_l2tag2: Quad Word 0 - bits 32-63
3221 static void i40e_create_tx_ctx(struct i40e_ring *tx_ring,
3222 const u64 cd_type_cmd_tso_mss,
3223 const u32 cd_tunneling, const u32 cd_l2tag2)
3225 struct i40e_tx_context_desc *context_desc;
3226 int i = tx_ring->next_to_use;
3228 if ((cd_type_cmd_tso_mss == I40E_TX_DESC_DTYPE_CONTEXT) &&
3229 !cd_tunneling && !cd_l2tag2)
3232 /* grab the next descriptor */
3233 context_desc = I40E_TX_CTXTDESC(tx_ring, i);
3236 tx_ring->next_to_use = (i < tx_ring->count) ? i : 0;
3238 /* cpu_to_le32 and assign to struct fields */
3239 context_desc->tunneling_params = cpu_to_le32(cd_tunneling);
3240 context_desc->l2tag2 = cpu_to_le16(cd_l2tag2);
3241 context_desc->rsvd = cpu_to_le16(0);
3242 context_desc->type_cmd_tso_mss = cpu_to_le64(cd_type_cmd_tso_mss);
3246 * __i40e_maybe_stop_tx - 2nd level check for tx stop conditions
3247 * @tx_ring: the ring to be checked
3248 * @size: the size buffer we want to assure is available
3250 * Returns -EBUSY if a stop is needed, else 0
3252 int __i40e_maybe_stop_tx(struct i40e_ring *tx_ring, int size)
3254 netif_stop_subqueue(tx_ring->netdev, tx_ring->queue_index);
3255 /* Memory barrier before checking head and tail */
3258 /* Check again in a case another CPU has just made room available. */
3259 if (likely(I40E_DESC_UNUSED(tx_ring) < size))
3262 /* A reprieve! - use start_queue because it doesn't call schedule */
3263 netif_start_subqueue(tx_ring->netdev, tx_ring->queue_index);
3264 ++tx_ring->tx_stats.restart_queue;
3269 * __i40e_chk_linearize - Check if there are more than 8 buffers per packet
3272 * Note: Our HW can't DMA more than 8 buffers to build a packet on the wire
3273 * and so we need to figure out the cases where we need to linearize the skb.
3275 * For TSO we need to count the TSO header and segment payload separately.
3276 * As such we need to check cases where we have 7 fragments or more as we
3277 * can potentially require 9 DMA transactions, 1 for the TSO header, 1 for
3278 * the segment payload in the first descriptor, and another 7 for the
3281 bool __i40e_chk_linearize(struct sk_buff *skb)
3283 const struct skb_frag_struct *frag, *stale;
3286 /* no need to check if number of frags is less than 7 */
3287 nr_frags = skb_shinfo(skb)->nr_frags;
3288 if (nr_frags < (I40E_MAX_BUFFER_TXD - 1))
3291 /* We need to walk through the list and validate that each group
3292 * of 6 fragments totals at least gso_size.
3294 nr_frags -= I40E_MAX_BUFFER_TXD - 2;
3295 frag = &skb_shinfo(skb)->frags[0];
3297 /* Initialize size to the negative value of gso_size minus 1. We
3298 * use this as the worst case scenerio in which the frag ahead
3299 * of us only provides one byte which is why we are limited to 6
3300 * descriptors for a single transmit as the header and previous
3301 * fragment are already consuming 2 descriptors.
3303 sum = 1 - skb_shinfo(skb)->gso_size;
3305 /* Add size of frags 0 through 4 to create our initial sum */
3306 sum += skb_frag_size(frag++);
3307 sum += skb_frag_size(frag++);
3308 sum += skb_frag_size(frag++);
3309 sum += skb_frag_size(frag++);
3310 sum += skb_frag_size(frag++);
3312 /* Walk through fragments adding latest fragment, testing it, and
3313 * then removing stale fragments from the sum.
3315 for (stale = &skb_shinfo(skb)->frags[0];; stale++) {
3316 int stale_size = skb_frag_size(stale);
3318 sum += skb_frag_size(frag++);
3320 /* The stale fragment may present us with a smaller
3321 * descriptor than the actual fragment size. To account
3322 * for that we need to remove all the data on the front and
3323 * figure out what the remainder would be in the last
3324 * descriptor associated with the fragment.
3326 if (stale_size > I40E_MAX_DATA_PER_TXD) {
3327 int align_pad = -(stale->page_offset) &
3328 (I40E_MAX_READ_REQ_SIZE - 1);
3331 stale_size -= align_pad;
3334 sum -= I40E_MAX_DATA_PER_TXD_ALIGNED;
3335 stale_size -= I40E_MAX_DATA_PER_TXD_ALIGNED;
3336 } while (stale_size > I40E_MAX_DATA_PER_TXD);
3339 /* if sum is negative we failed to make sufficient progress */
3353 * i40e_tx_map - Build the Tx descriptor
3354 * @tx_ring: ring to send buffer on
3356 * @first: first buffer info buffer to use
3357 * @tx_flags: collected send information
3358 * @hdr_len: size of the packet header
3359 * @td_cmd: the command field in the descriptor
3360 * @td_offset: offset for checksum or crc
3362 * Returns 0 on success, -1 on failure to DMA
3364 static inline int i40e_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb,
3365 struct i40e_tx_buffer *first, u32 tx_flags,
3366 const u8 hdr_len, u32 td_cmd, u32 td_offset)
3368 unsigned int data_len = skb->data_len;
3369 unsigned int size = skb_headlen(skb);
3370 struct skb_frag_struct *frag;
3371 struct i40e_tx_buffer *tx_bi;
3372 struct i40e_tx_desc *tx_desc;
3373 u16 i = tx_ring->next_to_use;
3378 if (tx_flags & I40E_TX_FLAGS_HW_VLAN) {
3379 td_cmd |= I40E_TX_DESC_CMD_IL2TAG1;
3380 td_tag = (tx_flags & I40E_TX_FLAGS_VLAN_MASK) >>
3381 I40E_TX_FLAGS_VLAN_SHIFT;
3384 first->tx_flags = tx_flags;
3386 dma = dma_map_single(tx_ring->dev, skb->data, size, DMA_TO_DEVICE);
3388 tx_desc = I40E_TX_DESC(tx_ring, i);
3391 for (frag = &skb_shinfo(skb)->frags[0];; frag++) {
3392 unsigned int max_data = I40E_MAX_DATA_PER_TXD_ALIGNED;
3394 if (dma_mapping_error(tx_ring->dev, dma))
3397 /* record length, and DMA address */
3398 dma_unmap_len_set(tx_bi, len, size);
3399 dma_unmap_addr_set(tx_bi, dma, dma);
3401 /* align size to end of page */
3402 max_data += -dma & (I40E_MAX_READ_REQ_SIZE - 1);
3403 tx_desc->buffer_addr = cpu_to_le64(dma);
3405 while (unlikely(size > I40E_MAX_DATA_PER_TXD)) {
3406 tx_desc->cmd_type_offset_bsz =
3407 build_ctob(td_cmd, td_offset,
3414 if (i == tx_ring->count) {
3415 tx_desc = I40E_TX_DESC(tx_ring, 0);
3422 max_data = I40E_MAX_DATA_PER_TXD_ALIGNED;
3423 tx_desc->buffer_addr = cpu_to_le64(dma);
3426 if (likely(!data_len))
3429 tx_desc->cmd_type_offset_bsz = build_ctob(td_cmd, td_offset,
3436 if (i == tx_ring->count) {
3437 tx_desc = I40E_TX_DESC(tx_ring, 0);
3441 size = skb_frag_size(frag);
3444 dma = skb_frag_dma_map(tx_ring->dev, frag, 0, size,
3447 tx_bi = &tx_ring->tx_bi[i];
3450 netdev_tx_sent_queue(txring_txq(tx_ring), first->bytecount);
3453 if (i == tx_ring->count)
3456 tx_ring->next_to_use = i;
3458 i40e_maybe_stop_tx(tx_ring, DESC_NEEDED);
3460 /* write last descriptor with EOP bit */
3461 td_cmd |= I40E_TX_DESC_CMD_EOP;
3463 /* We OR these values together to check both against 4 (WB_STRIDE)
3464 * below. This is safe since we don't re-use desc_count afterwards.
3466 desc_count |= ++tx_ring->packet_stride;
3468 if (desc_count >= WB_STRIDE) {
3469 /* write last descriptor with RS bit set */
3470 td_cmd |= I40E_TX_DESC_CMD_RS;
3471 tx_ring->packet_stride = 0;
3474 tx_desc->cmd_type_offset_bsz =
3475 build_ctob(td_cmd, td_offset, size, td_tag);
3477 /* Force memory writes to complete before letting h/w know there
3478 * are new descriptors to fetch.
3480 * We also use this memory barrier to make certain all of the
3481 * status bits have been updated before next_to_watch is written.
3485 /* set next_to_watch value indicating a packet is present */
3486 first->next_to_watch = tx_desc;
3488 /* notify HW of packet */
3489 if (netif_xmit_stopped(txring_txq(tx_ring)) || !skb->xmit_more) {
3490 writel(i, tx_ring->tail);
3492 /* we need this if more than one processor can write to our tail
3493 * at a time, it synchronizes IO on IA64/Altix systems
3501 dev_info(tx_ring->dev, "TX DMA map failed\n");
3503 /* clear dma mappings for failed tx_bi map */
3505 tx_bi = &tx_ring->tx_bi[i];
3506 i40e_unmap_and_free_tx_resource(tx_ring, tx_bi);
3514 tx_ring->next_to_use = i;
3520 * i40e_xmit_xdp_ring - transmits an XDP buffer to an XDP Tx ring
3521 * @xdp: data to transmit
3522 * @xdp_ring: XDP Tx ring
3524 static int i40e_xmit_xdp_ring(struct xdp_frame *xdpf,
3525 struct i40e_ring *xdp_ring)
3527 u16 i = xdp_ring->next_to_use;
3528 struct i40e_tx_buffer *tx_bi;
3529 struct i40e_tx_desc *tx_desc;
3530 u32 size = xdpf->len;
3533 if (!unlikely(I40E_DESC_UNUSED(xdp_ring))) {
3534 xdp_ring->tx_stats.tx_busy++;
3535 return I40E_XDP_CONSUMED;
3538 dma = dma_map_single(xdp_ring->dev, xdpf->data, size, DMA_TO_DEVICE);
3539 if (dma_mapping_error(xdp_ring->dev, dma))
3540 return I40E_XDP_CONSUMED;
3542 tx_bi = &xdp_ring->tx_bi[i];
3543 tx_bi->bytecount = size;
3544 tx_bi->gso_segs = 1;
3547 /* record length, and DMA address */
3548 dma_unmap_len_set(tx_bi, len, size);
3549 dma_unmap_addr_set(tx_bi, dma, dma);
3551 tx_desc = I40E_TX_DESC(xdp_ring, i);
3552 tx_desc->buffer_addr = cpu_to_le64(dma);
3553 tx_desc->cmd_type_offset_bsz = build_ctob(I40E_TX_DESC_CMD_ICRC
3557 /* Make certain all of the status bits have been updated
3558 * before next_to_watch is written.
3563 if (i == xdp_ring->count)
3566 tx_bi->next_to_watch = tx_desc;
3567 xdp_ring->next_to_use = i;
3573 * i40e_xmit_frame_ring - Sends buffer on Tx ring
3575 * @tx_ring: ring to send buffer on
3577 * Returns NETDEV_TX_OK if sent, else an error code
3579 static netdev_tx_t i40e_xmit_frame_ring(struct sk_buff *skb,
3580 struct i40e_ring *tx_ring)
3582 u64 cd_type_cmd_tso_mss = I40E_TX_DESC_DTYPE_CONTEXT;
3583 u32 cd_tunneling = 0, cd_l2tag2 = 0;
3584 struct i40e_tx_buffer *first;
3593 /* prefetch the data, we'll need it later */
3594 prefetch(skb->data);
3596 i40e_trace(xmit_frame_ring, skb, tx_ring);
3598 count = i40e_xmit_descriptor_count(skb);
3599 if (i40e_chk_linearize(skb, count)) {
3600 if (__skb_linearize(skb)) {
3601 dev_kfree_skb_any(skb);
3602 return NETDEV_TX_OK;
3604 count = i40e_txd_use_count(skb->len);
3605 tx_ring->tx_stats.tx_linearize++;
3608 /* need: 1 descriptor per page * PAGE_SIZE/I40E_MAX_DATA_PER_TXD,
3609 * + 1 desc for skb_head_len/I40E_MAX_DATA_PER_TXD,
3610 * + 4 desc gap to avoid the cache line where head is,
3611 * + 1 desc for context descriptor,
3612 * otherwise try next time
3614 if (i40e_maybe_stop_tx(tx_ring, count + 4 + 1)) {
3615 tx_ring->tx_stats.tx_busy++;
3616 return NETDEV_TX_BUSY;
3619 /* record the location of the first descriptor for this packet */
3620 first = &tx_ring->tx_bi[tx_ring->next_to_use];
3622 first->bytecount = skb->len;
3623 first->gso_segs = 1;
3625 /* prepare the xmit flags */
3626 if (i40e_tx_prepare_vlan_flags(skb, tx_ring, &tx_flags))
3629 /* obtain protocol of skb */
3630 protocol = vlan_get_protocol(skb);
3632 /* setup IPv4/IPv6 offloads */
3633 if (protocol == htons(ETH_P_IP))
3634 tx_flags |= I40E_TX_FLAGS_IPV4;
3635 else if (protocol == htons(ETH_P_IPV6))
3636 tx_flags |= I40E_TX_FLAGS_IPV6;
3638 tso = i40e_tso(first, &hdr_len, &cd_type_cmd_tso_mss);
3643 tx_flags |= I40E_TX_FLAGS_TSO;
3645 /* Always offload the checksum, since it's in the data descriptor */
3646 tso = i40e_tx_enable_csum(skb, &tx_flags, &td_cmd, &td_offset,
3647 tx_ring, &cd_tunneling);
3651 tsyn = i40e_tsyn(tx_ring, skb, tx_flags, &cd_type_cmd_tso_mss);
3654 tx_flags |= I40E_TX_FLAGS_TSYN;
3656 skb_tx_timestamp(skb);
3658 /* always enable CRC insertion offload */
3659 td_cmd |= I40E_TX_DESC_CMD_ICRC;
3661 i40e_create_tx_ctx(tx_ring, cd_type_cmd_tso_mss,
3662 cd_tunneling, cd_l2tag2);
3664 /* Add Flow Director ATR if it's enabled.
3666 * NOTE: this must always be directly before the data descriptor.
3668 i40e_atr(tx_ring, skb, tx_flags);
3670 if (i40e_tx_map(tx_ring, skb, first, tx_flags, hdr_len,
3672 goto cleanup_tx_tstamp;
3674 return NETDEV_TX_OK;
3677 i40e_trace(xmit_frame_ring_drop, first->skb, tx_ring);
3678 dev_kfree_skb_any(first->skb);
3681 if (unlikely(tx_flags & I40E_TX_FLAGS_TSYN)) {
3682 struct i40e_pf *pf = i40e_netdev_to_pf(tx_ring->netdev);
3684 dev_kfree_skb_any(pf->ptp_tx_skb);
3685 pf->ptp_tx_skb = NULL;
3686 clear_bit_unlock(__I40E_PTP_TX_IN_PROGRESS, pf->state);
3689 return NETDEV_TX_OK;
3693 * i40e_lan_xmit_frame - Selects the correct VSI and Tx queue to send buffer
3695 * @netdev: network interface device structure
3697 * Returns NETDEV_TX_OK if sent, else an error code
3699 netdev_tx_t i40e_lan_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
3701 struct i40e_netdev_priv *np = netdev_priv(netdev);
3702 struct i40e_vsi *vsi = np->vsi;
3703 struct i40e_ring *tx_ring = vsi->tx_rings[skb->queue_mapping];
3705 /* hardware can't handle really short frames, hardware padding works
3708 if (skb_put_padto(skb, I40E_MIN_TX_LEN))
3709 return NETDEV_TX_OK;
3711 return i40e_xmit_frame_ring(skb, tx_ring);
3715 * i40e_xdp_xmit - Implements ndo_xdp_xmit
3719 * Returns number of frames successfully sent. Frames that fail are
3720 * free'ed via XDP return API.
3722 * For error cases, a negative errno code is returned and no-frames
3723 * are transmitted (caller must handle freeing frames).
3725 int i40e_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames,
3728 struct i40e_netdev_priv *np = netdev_priv(dev);
3729 unsigned int queue_index = smp_processor_id();
3730 struct i40e_vsi *vsi = np->vsi;
3731 struct i40e_ring *xdp_ring;
3735 if (test_bit(__I40E_VSI_DOWN, vsi->state))
3738 if (!i40e_enabled_xdp_vsi(vsi) || queue_index >= vsi->num_queue_pairs)
3741 if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK))
3744 xdp_ring = vsi->xdp_rings[queue_index];
3746 for (i = 0; i < n; i++) {
3747 struct xdp_frame *xdpf = frames[i];
3750 err = i40e_xmit_xdp_ring(xdpf, xdp_ring);
3751 if (err != I40E_XDP_TX) {
3752 xdp_return_frame_rx_napi(xdpf);
3757 if (unlikely(flags & XDP_XMIT_FLUSH))
3758 i40e_xdp_ring_update_tail(xdp_ring);