1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright(c) 2018 Intel Corporation. */
4 #include <linux/bpf_trace.h>
5 #include <net/xdp_sock.h>
9 #include "ixgbe_txrx_common.h"
11 struct xdp_umem *ixgbe_xsk_umem(struct ixgbe_adapter *adapter,
12 struct ixgbe_ring *ring)
14 bool xdp_on = READ_ONCE(adapter->xdp_prog);
15 int qid = ring->ring_idx;
17 if (!xdp_on || !test_bit(qid, adapter->af_xdp_zc_qps))
20 return xdp_get_umem_from_qid(adapter->netdev, qid);
23 static int ixgbe_xsk_umem_dma_map(struct ixgbe_adapter *adapter,
24 struct xdp_umem *umem)
26 struct device *dev = &adapter->pdev->dev;
30 for (i = 0; i < umem->npgs; i++) {
31 dma = dma_map_page_attrs(dev, umem->pgs[i], 0, PAGE_SIZE,
32 DMA_BIDIRECTIONAL, IXGBE_RX_DMA_ATTR);
33 if (dma_mapping_error(dev, dma))
36 umem->pages[i].dma = dma;
42 for (j = 0; j < i; j++) {
43 dma_unmap_page_attrs(dev, umem->pages[i].dma, PAGE_SIZE,
44 DMA_BIDIRECTIONAL, IXGBE_RX_DMA_ATTR);
45 umem->pages[i].dma = 0;
51 static void ixgbe_xsk_umem_dma_unmap(struct ixgbe_adapter *adapter,
52 struct xdp_umem *umem)
54 struct device *dev = &adapter->pdev->dev;
57 for (i = 0; i < umem->npgs; i++) {
58 dma_unmap_page_attrs(dev, umem->pages[i].dma, PAGE_SIZE,
59 DMA_BIDIRECTIONAL, IXGBE_RX_DMA_ATTR);
61 umem->pages[i].dma = 0;
65 static int ixgbe_xsk_umem_enable(struct ixgbe_adapter *adapter,
66 struct xdp_umem *umem,
69 struct net_device *netdev = adapter->netdev;
70 struct xdp_umem_fq_reuse *reuseq;
74 if (qid >= adapter->num_rx_queues)
77 if (qid >= netdev->real_num_rx_queues ||
78 qid >= netdev->real_num_tx_queues)
81 reuseq = xsk_reuseq_prepare(adapter->rx_ring[0]->count);
85 xsk_reuseq_free(xsk_reuseq_swap(umem, reuseq));
87 err = ixgbe_xsk_umem_dma_map(adapter, umem);
91 if_running = netif_running(adapter->netdev) &&
92 ixgbe_enabled_xdp_adapter(adapter);
95 ixgbe_txrx_ring_disable(adapter, qid);
97 set_bit(qid, adapter->af_xdp_zc_qps);
100 ixgbe_txrx_ring_enable(adapter, qid);
102 /* Kick start the NAPI context so that receiving will start */
103 err = ixgbe_xsk_async_xmit(adapter->netdev, qid);
111 static int ixgbe_xsk_umem_disable(struct ixgbe_adapter *adapter, u16 qid)
113 struct xdp_umem *umem;
116 umem = xdp_get_umem_from_qid(adapter->netdev, qid);
120 if_running = netif_running(adapter->netdev) &&
121 ixgbe_enabled_xdp_adapter(adapter);
124 ixgbe_txrx_ring_disable(adapter, qid);
126 clear_bit(qid, adapter->af_xdp_zc_qps);
127 ixgbe_xsk_umem_dma_unmap(adapter, umem);
130 ixgbe_txrx_ring_enable(adapter, qid);
135 int ixgbe_xsk_umem_setup(struct ixgbe_adapter *adapter, struct xdp_umem *umem,
138 return umem ? ixgbe_xsk_umem_enable(adapter, umem, qid) :
139 ixgbe_xsk_umem_disable(adapter, qid);
142 static int ixgbe_run_xdp_zc(struct ixgbe_adapter *adapter,
143 struct ixgbe_ring *rx_ring,
144 struct xdp_buff *xdp)
146 int err, result = IXGBE_XDP_PASS;
147 struct bpf_prog *xdp_prog;
148 struct xdp_frame *xdpf;
152 xdp_prog = READ_ONCE(rx_ring->xdp_prog);
153 act = bpf_prog_run_xdp(xdp_prog, xdp);
154 xdp->handle += xdp->data - xdp->data_hard_start;
159 xdpf = convert_to_xdp_frame(xdp);
160 if (unlikely(!xdpf)) {
161 result = IXGBE_XDP_CONSUMED;
164 result = ixgbe_xmit_xdp_ring(adapter, xdpf);
167 err = xdp_do_redirect(rx_ring->netdev, xdp, xdp_prog);
168 result = !err ? IXGBE_XDP_REDIR : IXGBE_XDP_CONSUMED;
171 bpf_warn_invalid_xdp_action(act);
174 trace_xdp_exception(rx_ring->netdev, xdp_prog, act);
175 /* fallthrough -- handle aborts by dropping packet */
177 result = IXGBE_XDP_CONSUMED;
185 ixgbe_rx_buffer *ixgbe_get_rx_buffer_zc(struct ixgbe_ring *rx_ring,
188 struct ixgbe_rx_buffer *bi;
190 bi = &rx_ring->rx_buffer_info[rx_ring->next_to_clean];
192 /* we are reusing so sync this buffer for CPU use */
193 dma_sync_single_range_for_cpu(rx_ring->dev,
201 static void ixgbe_reuse_rx_buffer_zc(struct ixgbe_ring *rx_ring,
202 struct ixgbe_rx_buffer *obi)
204 unsigned long mask = (unsigned long)rx_ring->xsk_umem->chunk_mask;
205 u64 hr = rx_ring->xsk_umem->headroom + XDP_PACKET_HEADROOM;
206 u16 nta = rx_ring->next_to_alloc;
207 struct ixgbe_rx_buffer *nbi;
209 nbi = &rx_ring->rx_buffer_info[rx_ring->next_to_alloc];
210 /* update, and store next to alloc */
212 rx_ring->next_to_alloc = (nta < rx_ring->count) ? nta : 0;
214 /* transfer page from old buffer to new buffer */
215 nbi->dma = obi->dma & mask;
218 nbi->addr = (void *)((unsigned long)obi->addr & mask);
221 nbi->handle = obi->handle & mask;
222 nbi->handle += rx_ring->xsk_umem->headroom;
228 void ixgbe_zca_free(struct zero_copy_allocator *alloc, unsigned long handle)
230 struct ixgbe_rx_buffer *bi;
231 struct ixgbe_ring *rx_ring;
235 rx_ring = container_of(alloc, struct ixgbe_ring, zca);
236 hr = rx_ring->xsk_umem->headroom + XDP_PACKET_HEADROOM;
237 mask = rx_ring->xsk_umem->chunk_mask;
239 nta = rx_ring->next_to_alloc;
240 bi = rx_ring->rx_buffer_info;
243 rx_ring->next_to_alloc = (nta < rx_ring->count) ? nta : 0;
247 bi->dma = xdp_umem_get_dma(rx_ring->xsk_umem, handle);
250 bi->addr = xdp_umem_get_data(rx_ring->xsk_umem, handle);
253 bi->handle = (u64)handle + rx_ring->xsk_umem->headroom;
256 static bool ixgbe_alloc_buffer_zc(struct ixgbe_ring *rx_ring,
257 struct ixgbe_rx_buffer *bi)
259 struct xdp_umem *umem = rx_ring->xsk_umem;
260 void *addr = bi->addr;
266 if (!xsk_umem_peek_addr(umem, &handle)) {
267 rx_ring->rx_stats.alloc_rx_page_failed++;
271 hr = umem->headroom + XDP_PACKET_HEADROOM;
273 bi->dma = xdp_umem_get_dma(umem, handle);
276 bi->addr = xdp_umem_get_data(umem, handle);
279 bi->handle = handle + umem->headroom;
281 xsk_umem_discard_addr(umem);
285 static bool ixgbe_alloc_buffer_slow_zc(struct ixgbe_ring *rx_ring,
286 struct ixgbe_rx_buffer *bi)
288 struct xdp_umem *umem = rx_ring->xsk_umem;
291 if (!xsk_umem_peek_addr_rq(umem, &handle)) {
292 rx_ring->rx_stats.alloc_rx_page_failed++;
296 handle &= rx_ring->xsk_umem->chunk_mask;
298 hr = umem->headroom + XDP_PACKET_HEADROOM;
300 bi->dma = xdp_umem_get_dma(umem, handle);
303 bi->addr = xdp_umem_get_data(umem, handle);
306 bi->handle = handle + umem->headroom;
308 xsk_umem_discard_addr_rq(umem);
312 static __always_inline bool
313 __ixgbe_alloc_rx_buffers_zc(struct ixgbe_ring *rx_ring, u16 cleaned_count,
314 bool alloc(struct ixgbe_ring *rx_ring,
315 struct ixgbe_rx_buffer *bi))
317 union ixgbe_adv_rx_desc *rx_desc;
318 struct ixgbe_rx_buffer *bi;
319 u16 i = rx_ring->next_to_use;
326 rx_desc = IXGBE_RX_DESC(rx_ring, i);
327 bi = &rx_ring->rx_buffer_info[i];
331 if (!alloc(rx_ring, bi)) {
336 /* sync the buffer for use by the device */
337 dma_sync_single_range_for_device(rx_ring->dev, bi->dma,
342 /* Refresh the desc even if buffer_addrs didn't change
343 * because each write-back erases this info.
345 rx_desc->read.pkt_addr = cpu_to_le64(bi->dma);
351 rx_desc = IXGBE_RX_DESC(rx_ring, 0);
352 bi = rx_ring->rx_buffer_info;
356 /* clear the length for the next_to_use descriptor */
357 rx_desc->wb.upper.length = 0;
360 } while (cleaned_count);
364 if (rx_ring->next_to_use != i) {
365 rx_ring->next_to_use = i;
367 /* update next to alloc since we have filled the ring */
368 rx_ring->next_to_alloc = i;
370 /* Force memory writes to complete before letting h/w
371 * know there are new descriptors to fetch. (Only
372 * applicable for weak-ordered memory model archs,
376 writel(i, rx_ring->tail);
382 void ixgbe_alloc_rx_buffers_zc(struct ixgbe_ring *rx_ring, u16 count)
384 __ixgbe_alloc_rx_buffers_zc(rx_ring, count,
385 ixgbe_alloc_buffer_slow_zc);
388 static bool ixgbe_alloc_rx_buffers_fast_zc(struct ixgbe_ring *rx_ring,
391 return __ixgbe_alloc_rx_buffers_zc(rx_ring, count,
392 ixgbe_alloc_buffer_zc);
395 static struct sk_buff *ixgbe_construct_skb_zc(struct ixgbe_ring *rx_ring,
396 struct ixgbe_rx_buffer *bi,
397 struct xdp_buff *xdp)
399 unsigned int metasize = xdp->data - xdp->data_meta;
400 unsigned int datasize = xdp->data_end - xdp->data;
403 /* allocate a skb to store the frags */
404 skb = __napi_alloc_skb(&rx_ring->q_vector->napi,
405 xdp->data_end - xdp->data_hard_start,
406 GFP_ATOMIC | __GFP_NOWARN);
410 skb_reserve(skb, xdp->data - xdp->data_hard_start);
411 memcpy(__skb_put(skb, datasize), xdp->data, datasize);
413 skb_metadata_set(skb, metasize);
415 ixgbe_reuse_rx_buffer_zc(rx_ring, bi);
419 static void ixgbe_inc_ntc(struct ixgbe_ring *rx_ring)
421 u32 ntc = rx_ring->next_to_clean + 1;
423 ntc = (ntc < rx_ring->count) ? ntc : 0;
424 rx_ring->next_to_clean = ntc;
425 prefetch(IXGBE_RX_DESC(rx_ring, ntc));
428 int ixgbe_clean_rx_irq_zc(struct ixgbe_q_vector *q_vector,
429 struct ixgbe_ring *rx_ring,
432 unsigned int total_rx_bytes = 0, total_rx_packets = 0;
433 struct ixgbe_adapter *adapter = q_vector->adapter;
434 u16 cleaned_count = ixgbe_desc_unused(rx_ring);
435 unsigned int xdp_res, xdp_xmit = 0;
436 bool failure = false;
440 xdp.rxq = &rx_ring->xdp_rxq;
442 while (likely(total_rx_packets < budget)) {
443 union ixgbe_adv_rx_desc *rx_desc;
444 struct ixgbe_rx_buffer *bi;
447 /* return some buffers to hardware, one at a time is too slow */
448 if (cleaned_count >= IXGBE_RX_BUFFER_WRITE) {
450 !ixgbe_alloc_rx_buffers_fast_zc(rx_ring,
455 rx_desc = IXGBE_RX_DESC(rx_ring, rx_ring->next_to_clean);
456 size = le16_to_cpu(rx_desc->wb.upper.length);
460 /* This memory barrier is needed to keep us from reading
461 * any other fields out of the rx_desc until we know the
462 * descriptor has been written back
466 bi = ixgbe_get_rx_buffer_zc(rx_ring, size);
468 if (unlikely(!ixgbe_test_staterr(rx_desc,
469 IXGBE_RXD_STAT_EOP))) {
470 struct ixgbe_rx_buffer *next_bi;
472 ixgbe_reuse_rx_buffer_zc(rx_ring, bi);
473 ixgbe_inc_ntc(rx_ring);
475 &rx_ring->rx_buffer_info[rx_ring->next_to_clean];
476 next_bi->skb = ERR_PTR(-EINVAL);
480 if (unlikely(bi->skb)) {
481 ixgbe_reuse_rx_buffer_zc(rx_ring, bi);
482 ixgbe_inc_ntc(rx_ring);
487 xdp.data_meta = xdp.data;
488 xdp.data_hard_start = xdp.data - XDP_PACKET_HEADROOM;
489 xdp.data_end = xdp.data + size;
490 xdp.handle = bi->handle;
492 xdp_res = ixgbe_run_xdp_zc(adapter, rx_ring, &xdp);
495 if (xdp_res & (IXGBE_XDP_TX | IXGBE_XDP_REDIR)) {
500 ixgbe_reuse_rx_buffer_zc(rx_ring, bi);
503 total_rx_bytes += size;
506 ixgbe_inc_ntc(rx_ring);
511 skb = ixgbe_construct_skb_zc(rx_ring, bi, &xdp);
513 rx_ring->rx_stats.alloc_rx_buff_failed++;
518 ixgbe_inc_ntc(rx_ring);
520 if (eth_skb_pad(skb))
523 total_rx_bytes += skb->len;
526 ixgbe_process_skb_fields(rx_ring, rx_desc, skb);
527 ixgbe_rx_skb(q_vector, skb);
530 if (xdp_xmit & IXGBE_XDP_REDIR)
533 if (xdp_xmit & IXGBE_XDP_TX) {
534 struct ixgbe_ring *ring = adapter->xdp_ring[smp_processor_id()];
536 /* Force memory writes to complete before letting h/w
537 * know there are new descriptors to fetch.
540 writel(ring->next_to_use, ring->tail);
543 u64_stats_update_begin(&rx_ring->syncp);
544 rx_ring->stats.packets += total_rx_packets;
545 rx_ring->stats.bytes += total_rx_bytes;
546 u64_stats_update_end(&rx_ring->syncp);
547 q_vector->rx.total_packets += total_rx_packets;
548 q_vector->rx.total_bytes += total_rx_bytes;
550 return failure ? budget : (int)total_rx_packets;
553 void ixgbe_xsk_clean_rx_ring(struct ixgbe_ring *rx_ring)
555 u16 i = rx_ring->next_to_clean;
556 struct ixgbe_rx_buffer *bi = &rx_ring->rx_buffer_info[i];
558 while (i != rx_ring->next_to_alloc) {
559 xsk_umem_fq_reuse(rx_ring->xsk_umem, bi->handle);
562 if (i == rx_ring->count) {
564 bi = rx_ring->rx_buffer_info;
569 static bool ixgbe_xmit_zc(struct ixgbe_ring *xdp_ring, unsigned int budget)
571 union ixgbe_adv_tx_desc *tx_desc = NULL;
572 struct ixgbe_tx_buffer *tx_bi;
573 bool work_done = true;
574 struct xdp_desc desc;
578 while (budget-- > 0) {
579 if (unlikely(!ixgbe_desc_unused(xdp_ring)) ||
580 !netif_carrier_ok(xdp_ring->netdev)) {
585 if (!xsk_umem_consume_tx(xdp_ring->xsk_umem, &desc))
588 dma = xdp_umem_get_dma(xdp_ring->xsk_umem, desc.addr);
590 dma_sync_single_for_device(xdp_ring->dev, dma, desc.len,
593 tx_bi = &xdp_ring->tx_buffer_info[xdp_ring->next_to_use];
594 tx_bi->bytecount = desc.len;
598 tx_desc = IXGBE_TX_DESC(xdp_ring, xdp_ring->next_to_use);
599 tx_desc->read.buffer_addr = cpu_to_le64(dma);
601 /* put descriptor type bits */
602 cmd_type = IXGBE_ADVTXD_DTYP_DATA |
603 IXGBE_ADVTXD_DCMD_DEXT |
604 IXGBE_ADVTXD_DCMD_IFCS;
605 cmd_type |= desc.len | IXGBE_TXD_CMD;
606 tx_desc->read.cmd_type_len = cpu_to_le32(cmd_type);
607 tx_desc->read.olinfo_status =
608 cpu_to_le32(desc.len << IXGBE_ADVTXD_PAYLEN_SHIFT);
610 xdp_ring->next_to_use++;
611 if (xdp_ring->next_to_use == xdp_ring->count)
612 xdp_ring->next_to_use = 0;
616 ixgbe_xdp_ring_update_tail(xdp_ring);
617 xsk_umem_consume_tx_done(xdp_ring->xsk_umem);
620 return !!budget && work_done;
623 static void ixgbe_clean_xdp_tx_buffer(struct ixgbe_ring *tx_ring,
624 struct ixgbe_tx_buffer *tx_bi)
626 xdp_return_frame(tx_bi->xdpf);
627 dma_unmap_single(tx_ring->dev,
628 dma_unmap_addr(tx_bi, dma),
629 dma_unmap_len(tx_bi, len), DMA_TO_DEVICE);
630 dma_unmap_len_set(tx_bi, len, 0);
633 bool ixgbe_clean_xdp_tx_irq(struct ixgbe_q_vector *q_vector,
634 struct ixgbe_ring *tx_ring, int napi_budget)
636 unsigned int total_packets = 0, total_bytes = 0;
637 u32 i = tx_ring->next_to_clean, xsk_frames = 0;
638 unsigned int budget = q_vector->tx.work_limit;
639 struct xdp_umem *umem = tx_ring->xsk_umem;
640 union ixgbe_adv_tx_desc *tx_desc;
641 struct ixgbe_tx_buffer *tx_bi;
644 tx_bi = &tx_ring->tx_buffer_info[i];
645 tx_desc = IXGBE_TX_DESC(tx_ring, i);
649 if (!(tx_desc->wb.status & cpu_to_le32(IXGBE_TXD_STAT_DD)))
652 total_bytes += tx_bi->bytecount;
653 total_packets += tx_bi->gso_segs;
656 ixgbe_clean_xdp_tx_buffer(tx_ring, tx_bi);
667 tx_bi = tx_ring->tx_buffer_info;
668 tx_desc = IXGBE_TX_DESC(tx_ring, 0);
671 /* issue prefetch for next Tx descriptor */
674 /* update budget accounting */
676 } while (likely(budget));
679 tx_ring->next_to_clean = i;
681 u64_stats_update_begin(&tx_ring->syncp);
682 tx_ring->stats.bytes += total_bytes;
683 tx_ring->stats.packets += total_packets;
684 u64_stats_update_end(&tx_ring->syncp);
685 q_vector->tx.total_bytes += total_bytes;
686 q_vector->tx.total_packets += total_packets;
689 xsk_umem_complete_tx(umem, xsk_frames);
691 xmit_done = ixgbe_xmit_zc(tx_ring, q_vector->tx.work_limit);
692 return budget > 0 && xmit_done;
695 int ixgbe_xsk_async_xmit(struct net_device *dev, u32 qid)
697 struct ixgbe_adapter *adapter = netdev_priv(dev);
698 struct ixgbe_ring *ring;
700 if (test_bit(__IXGBE_DOWN, &adapter->state))
703 if (!READ_ONCE(adapter->xdp_prog))
706 if (qid >= adapter->num_xdp_queues)
709 if (!adapter->xdp_ring[qid]->xsk_umem)
712 ring = adapter->xdp_ring[qid];
713 if (!napi_if_scheduled_mark_missed(&ring->q_vector->napi)) {
714 u64 eics = BIT_ULL(ring->q_vector->v_idx);
716 ixgbe_irq_rearm_queues(adapter, eics);
722 void ixgbe_xsk_clean_tx_ring(struct ixgbe_ring *tx_ring)
724 u16 ntc = tx_ring->next_to_clean, ntu = tx_ring->next_to_use;
725 struct xdp_umem *umem = tx_ring->xsk_umem;
726 struct ixgbe_tx_buffer *tx_bi;
730 tx_bi = &tx_ring->tx_buffer_info[ntc];
733 ixgbe_clean_xdp_tx_buffer(tx_ring, tx_bi);
740 if (ntc == tx_ring->count)
745 xsk_umem_complete_tx(umem, xsk_frames);