1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright(c) 2018 Intel Corporation. */
4 #include <linux/bpf_trace.h>
5 #include <net/xdp_sock.h>
9 #include "ixgbe_txrx_common.h"
11 struct xdp_umem *ixgbe_xsk_umem(struct ixgbe_adapter *adapter,
12 struct ixgbe_ring *ring)
14 bool xdp_on = READ_ONCE(adapter->xdp_prog);
15 int qid = ring->ring_idx;
17 if (!adapter->xsk_umems || !adapter->xsk_umems[qid] ||
18 qid >= adapter->num_xsk_umems || !xdp_on)
21 return adapter->xsk_umems[qid];
24 static int ixgbe_alloc_xsk_umems(struct ixgbe_adapter *adapter)
26 if (adapter->xsk_umems)
29 adapter->num_xsk_umems_used = 0;
30 adapter->num_xsk_umems = adapter->num_rx_queues;
31 adapter->xsk_umems = kcalloc(adapter->num_xsk_umems,
32 sizeof(*adapter->xsk_umems),
34 if (!adapter->xsk_umems) {
35 adapter->num_xsk_umems = 0;
42 static int ixgbe_add_xsk_umem(struct ixgbe_adapter *adapter,
43 struct xdp_umem *umem,
48 err = ixgbe_alloc_xsk_umems(adapter);
52 adapter->xsk_umems[qid] = umem;
53 adapter->num_xsk_umems_used++;
58 static void ixgbe_remove_xsk_umem(struct ixgbe_adapter *adapter, u16 qid)
60 adapter->xsk_umems[qid] = NULL;
61 adapter->num_xsk_umems_used--;
63 if (adapter->num_xsk_umems == 0) {
64 kfree(adapter->xsk_umems);
65 adapter->xsk_umems = NULL;
66 adapter->num_xsk_umems = 0;
70 static int ixgbe_xsk_umem_dma_map(struct ixgbe_adapter *adapter,
71 struct xdp_umem *umem)
73 struct device *dev = &adapter->pdev->dev;
77 for (i = 0; i < umem->npgs; i++) {
78 dma = dma_map_page_attrs(dev, umem->pgs[i], 0, PAGE_SIZE,
79 DMA_BIDIRECTIONAL, IXGBE_RX_DMA_ATTR);
80 if (dma_mapping_error(dev, dma))
83 umem->pages[i].dma = dma;
89 for (j = 0; j < i; j++) {
90 dma_unmap_page_attrs(dev, umem->pages[i].dma, PAGE_SIZE,
91 DMA_BIDIRECTIONAL, IXGBE_RX_DMA_ATTR);
92 umem->pages[i].dma = 0;
98 static void ixgbe_xsk_umem_dma_unmap(struct ixgbe_adapter *adapter,
99 struct xdp_umem *umem)
101 struct device *dev = &adapter->pdev->dev;
104 for (i = 0; i < umem->npgs; i++) {
105 dma_unmap_page_attrs(dev, umem->pages[i].dma, PAGE_SIZE,
106 DMA_BIDIRECTIONAL, IXGBE_RX_DMA_ATTR);
108 umem->pages[i].dma = 0;
112 static int ixgbe_xsk_umem_enable(struct ixgbe_adapter *adapter,
113 struct xdp_umem *umem,
116 struct xdp_umem_fq_reuse *reuseq;
120 if (qid >= adapter->num_rx_queues)
123 if (adapter->xsk_umems) {
124 if (qid >= adapter->num_xsk_umems)
126 if (adapter->xsk_umems[qid])
130 reuseq = xsk_reuseq_prepare(adapter->rx_ring[0]->count);
134 xsk_reuseq_free(xsk_reuseq_swap(umem, reuseq));
136 err = ixgbe_xsk_umem_dma_map(adapter, umem);
140 if_running = netif_running(adapter->netdev) &&
141 READ_ONCE(adapter->xdp_prog);
144 ixgbe_txrx_ring_disable(adapter, qid);
146 err = ixgbe_add_xsk_umem(adapter, umem, qid);
151 ixgbe_txrx_ring_enable(adapter, qid);
153 /* Kick start the NAPI context so that receiving will start */
154 err = ixgbe_xsk_async_xmit(adapter->netdev, qid);
162 static int ixgbe_xsk_umem_disable(struct ixgbe_adapter *adapter, u16 qid)
166 if (!adapter->xsk_umems || qid >= adapter->num_xsk_umems ||
167 !adapter->xsk_umems[qid])
170 if_running = netif_running(adapter->netdev) &&
171 READ_ONCE(adapter->xdp_prog);
174 ixgbe_txrx_ring_disable(adapter, qid);
176 ixgbe_xsk_umem_dma_unmap(adapter, adapter->xsk_umems[qid]);
177 ixgbe_remove_xsk_umem(adapter, qid);
180 ixgbe_txrx_ring_enable(adapter, qid);
185 int ixgbe_xsk_umem_query(struct ixgbe_adapter *adapter, struct xdp_umem **umem,
188 if (qid >= adapter->num_rx_queues)
191 if (adapter->xsk_umems) {
192 if (qid >= adapter->num_xsk_umems)
194 *umem = adapter->xsk_umems[qid];
202 int ixgbe_xsk_umem_setup(struct ixgbe_adapter *adapter, struct xdp_umem *umem,
205 return umem ? ixgbe_xsk_umem_enable(adapter, umem, qid) :
206 ixgbe_xsk_umem_disable(adapter, qid);
209 static int ixgbe_run_xdp_zc(struct ixgbe_adapter *adapter,
210 struct ixgbe_ring *rx_ring,
211 struct xdp_buff *xdp)
213 int err, result = IXGBE_XDP_PASS;
214 struct bpf_prog *xdp_prog;
215 struct xdp_frame *xdpf;
219 xdp_prog = READ_ONCE(rx_ring->xdp_prog);
220 act = bpf_prog_run_xdp(xdp_prog, xdp);
221 xdp->handle += xdp->data - xdp->data_hard_start;
226 xdpf = convert_to_xdp_frame(xdp);
227 if (unlikely(!xdpf)) {
228 result = IXGBE_XDP_CONSUMED;
231 result = ixgbe_xmit_xdp_ring(adapter, xdpf);
234 err = xdp_do_redirect(rx_ring->netdev, xdp, xdp_prog);
235 result = !err ? IXGBE_XDP_REDIR : IXGBE_XDP_CONSUMED;
238 bpf_warn_invalid_xdp_action(act);
241 trace_xdp_exception(rx_ring->netdev, xdp_prog, act);
242 /* fallthrough -- handle aborts by dropping packet */
244 result = IXGBE_XDP_CONSUMED;
252 ixgbe_rx_buffer *ixgbe_get_rx_buffer_zc(struct ixgbe_ring *rx_ring,
255 struct ixgbe_rx_buffer *bi;
257 bi = &rx_ring->rx_buffer_info[rx_ring->next_to_clean];
259 /* we are reusing so sync this buffer for CPU use */
260 dma_sync_single_range_for_cpu(rx_ring->dev,
268 static void ixgbe_reuse_rx_buffer_zc(struct ixgbe_ring *rx_ring,
269 struct ixgbe_rx_buffer *obi)
271 unsigned long mask = (unsigned long)rx_ring->xsk_umem->chunk_mask;
272 u64 hr = rx_ring->xsk_umem->headroom + XDP_PACKET_HEADROOM;
273 u16 nta = rx_ring->next_to_alloc;
274 struct ixgbe_rx_buffer *nbi;
276 nbi = &rx_ring->rx_buffer_info[rx_ring->next_to_alloc];
277 /* update, and store next to alloc */
279 rx_ring->next_to_alloc = (nta < rx_ring->count) ? nta : 0;
281 /* transfer page from old buffer to new buffer */
282 nbi->dma = obi->dma & mask;
285 nbi->addr = (void *)((unsigned long)obi->addr & mask);
288 nbi->handle = obi->handle & mask;
289 nbi->handle += rx_ring->xsk_umem->headroom;
295 void ixgbe_zca_free(struct zero_copy_allocator *alloc, unsigned long handle)
297 struct ixgbe_rx_buffer *bi;
298 struct ixgbe_ring *rx_ring;
302 rx_ring = container_of(alloc, struct ixgbe_ring, zca);
303 hr = rx_ring->xsk_umem->headroom + XDP_PACKET_HEADROOM;
304 mask = rx_ring->xsk_umem->chunk_mask;
306 nta = rx_ring->next_to_alloc;
307 bi = rx_ring->rx_buffer_info;
310 rx_ring->next_to_alloc = (nta < rx_ring->count) ? nta : 0;
314 bi->dma = xdp_umem_get_dma(rx_ring->xsk_umem, handle);
317 bi->addr = xdp_umem_get_data(rx_ring->xsk_umem, handle);
320 bi->handle = (u64)handle + rx_ring->xsk_umem->headroom;
323 static bool ixgbe_alloc_buffer_zc(struct ixgbe_ring *rx_ring,
324 struct ixgbe_rx_buffer *bi)
326 struct xdp_umem *umem = rx_ring->xsk_umem;
327 void *addr = bi->addr;
333 if (!xsk_umem_peek_addr(umem, &handle)) {
334 rx_ring->rx_stats.alloc_rx_page_failed++;
338 hr = umem->headroom + XDP_PACKET_HEADROOM;
340 bi->dma = xdp_umem_get_dma(umem, handle);
343 bi->addr = xdp_umem_get_data(umem, handle);
346 bi->handle = handle + umem->headroom;
348 xsk_umem_discard_addr(umem);
352 static bool ixgbe_alloc_buffer_slow_zc(struct ixgbe_ring *rx_ring,
353 struct ixgbe_rx_buffer *bi)
355 struct xdp_umem *umem = rx_ring->xsk_umem;
358 if (!xsk_umem_peek_addr_rq(umem, &handle)) {
359 rx_ring->rx_stats.alloc_rx_page_failed++;
363 handle &= rx_ring->xsk_umem->chunk_mask;
365 hr = umem->headroom + XDP_PACKET_HEADROOM;
367 bi->dma = xdp_umem_get_dma(umem, handle);
370 bi->addr = xdp_umem_get_data(umem, handle);
373 bi->handle = handle + umem->headroom;
375 xsk_umem_discard_addr_rq(umem);
379 static __always_inline bool
380 __ixgbe_alloc_rx_buffers_zc(struct ixgbe_ring *rx_ring, u16 cleaned_count,
381 bool alloc(struct ixgbe_ring *rx_ring,
382 struct ixgbe_rx_buffer *bi))
384 union ixgbe_adv_rx_desc *rx_desc;
385 struct ixgbe_rx_buffer *bi;
386 u16 i = rx_ring->next_to_use;
393 rx_desc = IXGBE_RX_DESC(rx_ring, i);
394 bi = &rx_ring->rx_buffer_info[i];
398 if (!alloc(rx_ring, bi)) {
403 /* sync the buffer for use by the device */
404 dma_sync_single_range_for_device(rx_ring->dev, bi->dma,
409 /* Refresh the desc even if buffer_addrs didn't change
410 * because each write-back erases this info.
412 rx_desc->read.pkt_addr = cpu_to_le64(bi->dma);
418 rx_desc = IXGBE_RX_DESC(rx_ring, 0);
419 bi = rx_ring->rx_buffer_info;
423 /* clear the length for the next_to_use descriptor */
424 rx_desc->wb.upper.length = 0;
427 } while (cleaned_count);
431 if (rx_ring->next_to_use != i) {
432 rx_ring->next_to_use = i;
434 /* update next to alloc since we have filled the ring */
435 rx_ring->next_to_alloc = i;
437 /* Force memory writes to complete before letting h/w
438 * know there are new descriptors to fetch. (Only
439 * applicable for weak-ordered memory model archs,
443 writel(i, rx_ring->tail);
449 void ixgbe_alloc_rx_buffers_zc(struct ixgbe_ring *rx_ring, u16 count)
451 __ixgbe_alloc_rx_buffers_zc(rx_ring, count,
452 ixgbe_alloc_buffer_slow_zc);
455 static bool ixgbe_alloc_rx_buffers_fast_zc(struct ixgbe_ring *rx_ring,
458 return __ixgbe_alloc_rx_buffers_zc(rx_ring, count,
459 ixgbe_alloc_buffer_zc);
462 static struct sk_buff *ixgbe_construct_skb_zc(struct ixgbe_ring *rx_ring,
463 struct ixgbe_rx_buffer *bi,
464 struct xdp_buff *xdp)
466 unsigned int metasize = xdp->data - xdp->data_meta;
467 unsigned int datasize = xdp->data_end - xdp->data;
470 /* allocate a skb to store the frags */
471 skb = __napi_alloc_skb(&rx_ring->q_vector->napi,
472 xdp->data_end - xdp->data_hard_start,
473 GFP_ATOMIC | __GFP_NOWARN);
477 skb_reserve(skb, xdp->data - xdp->data_hard_start);
478 memcpy(__skb_put(skb, datasize), xdp->data, datasize);
480 skb_metadata_set(skb, metasize);
482 ixgbe_reuse_rx_buffer_zc(rx_ring, bi);
486 static void ixgbe_inc_ntc(struct ixgbe_ring *rx_ring)
488 u32 ntc = rx_ring->next_to_clean + 1;
490 ntc = (ntc < rx_ring->count) ? ntc : 0;
491 rx_ring->next_to_clean = ntc;
492 prefetch(IXGBE_RX_DESC(rx_ring, ntc));
495 int ixgbe_clean_rx_irq_zc(struct ixgbe_q_vector *q_vector,
496 struct ixgbe_ring *rx_ring,
499 unsigned int total_rx_bytes = 0, total_rx_packets = 0;
500 struct ixgbe_adapter *adapter = q_vector->adapter;
501 u16 cleaned_count = ixgbe_desc_unused(rx_ring);
502 unsigned int xdp_res, xdp_xmit = 0;
503 bool failure = false;
507 xdp.rxq = &rx_ring->xdp_rxq;
509 while (likely(total_rx_packets < budget)) {
510 union ixgbe_adv_rx_desc *rx_desc;
511 struct ixgbe_rx_buffer *bi;
514 /* return some buffers to hardware, one at a time is too slow */
515 if (cleaned_count >= IXGBE_RX_BUFFER_WRITE) {
517 !ixgbe_alloc_rx_buffers_fast_zc(rx_ring,
522 rx_desc = IXGBE_RX_DESC(rx_ring, rx_ring->next_to_clean);
523 size = le16_to_cpu(rx_desc->wb.upper.length);
527 /* This memory barrier is needed to keep us from reading
528 * any other fields out of the rx_desc until we know the
529 * descriptor has been written back
533 bi = ixgbe_get_rx_buffer_zc(rx_ring, size);
535 if (unlikely(!ixgbe_test_staterr(rx_desc,
536 IXGBE_RXD_STAT_EOP))) {
537 struct ixgbe_rx_buffer *next_bi;
539 ixgbe_reuse_rx_buffer_zc(rx_ring, bi);
540 ixgbe_inc_ntc(rx_ring);
542 &rx_ring->rx_buffer_info[rx_ring->next_to_clean];
543 next_bi->skb = ERR_PTR(-EINVAL);
547 if (unlikely(bi->skb)) {
548 ixgbe_reuse_rx_buffer_zc(rx_ring, bi);
549 ixgbe_inc_ntc(rx_ring);
554 xdp.data_meta = xdp.data;
555 xdp.data_hard_start = xdp.data - XDP_PACKET_HEADROOM;
556 xdp.data_end = xdp.data + size;
557 xdp.handle = bi->handle;
559 xdp_res = ixgbe_run_xdp_zc(adapter, rx_ring, &xdp);
562 if (xdp_res & (IXGBE_XDP_TX | IXGBE_XDP_REDIR)) {
567 ixgbe_reuse_rx_buffer_zc(rx_ring, bi);
570 total_rx_bytes += size;
573 ixgbe_inc_ntc(rx_ring);
578 skb = ixgbe_construct_skb_zc(rx_ring, bi, &xdp);
580 rx_ring->rx_stats.alloc_rx_buff_failed++;
585 ixgbe_inc_ntc(rx_ring);
587 if (eth_skb_pad(skb))
590 total_rx_bytes += skb->len;
593 ixgbe_process_skb_fields(rx_ring, rx_desc, skb);
594 ixgbe_rx_skb(q_vector, skb);
597 if (xdp_xmit & IXGBE_XDP_REDIR)
600 if (xdp_xmit & IXGBE_XDP_TX) {
601 struct ixgbe_ring *ring = adapter->xdp_ring[smp_processor_id()];
603 /* Force memory writes to complete before letting h/w
604 * know there are new descriptors to fetch.
607 writel(ring->next_to_use, ring->tail);
610 u64_stats_update_begin(&rx_ring->syncp);
611 rx_ring->stats.packets += total_rx_packets;
612 rx_ring->stats.bytes += total_rx_bytes;
613 u64_stats_update_end(&rx_ring->syncp);
614 q_vector->rx.total_packets += total_rx_packets;
615 q_vector->rx.total_bytes += total_rx_bytes;
617 return failure ? budget : (int)total_rx_packets;
620 void ixgbe_xsk_clean_rx_ring(struct ixgbe_ring *rx_ring)
622 u16 i = rx_ring->next_to_clean;
623 struct ixgbe_rx_buffer *bi = &rx_ring->rx_buffer_info[i];
625 while (i != rx_ring->next_to_alloc) {
626 xsk_umem_fq_reuse(rx_ring->xsk_umem, bi->handle);
629 if (i == rx_ring->count) {
631 bi = rx_ring->rx_buffer_info;
636 static bool ixgbe_xmit_zc(struct ixgbe_ring *xdp_ring, unsigned int budget)
638 union ixgbe_adv_tx_desc *tx_desc = NULL;
639 struct ixgbe_tx_buffer *tx_bi;
640 bool work_done = true;
644 while (budget-- > 0) {
645 if (unlikely(!ixgbe_desc_unused(xdp_ring)) ||
646 !netif_carrier_ok(xdp_ring->netdev)) {
651 if (!xsk_umem_consume_tx(xdp_ring->xsk_umem, &dma, &len))
654 dma_sync_single_for_device(xdp_ring->dev, dma, len,
657 tx_bi = &xdp_ring->tx_buffer_info[xdp_ring->next_to_use];
658 tx_bi->bytecount = len;
661 tx_desc = IXGBE_TX_DESC(xdp_ring, xdp_ring->next_to_use);
662 tx_desc->read.buffer_addr = cpu_to_le64(dma);
664 /* put descriptor type bits */
665 cmd_type = IXGBE_ADVTXD_DTYP_DATA |
666 IXGBE_ADVTXD_DCMD_DEXT |
667 IXGBE_ADVTXD_DCMD_IFCS;
668 cmd_type |= len | IXGBE_TXD_CMD;
669 tx_desc->read.cmd_type_len = cpu_to_le32(cmd_type);
670 tx_desc->read.olinfo_status =
671 cpu_to_le32(len << IXGBE_ADVTXD_PAYLEN_SHIFT);
673 xdp_ring->next_to_use++;
674 if (xdp_ring->next_to_use == xdp_ring->count)
675 xdp_ring->next_to_use = 0;
679 ixgbe_xdp_ring_update_tail(xdp_ring);
680 xsk_umem_consume_tx_done(xdp_ring->xsk_umem);
683 return !!budget && work_done;
686 static void ixgbe_clean_xdp_tx_buffer(struct ixgbe_ring *tx_ring,
687 struct ixgbe_tx_buffer *tx_bi)
689 xdp_return_frame(tx_bi->xdpf);
690 dma_unmap_single(tx_ring->dev,
691 dma_unmap_addr(tx_bi, dma),
692 dma_unmap_len(tx_bi, len), DMA_TO_DEVICE);
693 dma_unmap_len_set(tx_bi, len, 0);
696 bool ixgbe_clean_xdp_tx_irq(struct ixgbe_q_vector *q_vector,
697 struct ixgbe_ring *tx_ring, int napi_budget)
699 unsigned int total_packets = 0, total_bytes = 0;
700 u32 i = tx_ring->next_to_clean, xsk_frames = 0;
701 unsigned int budget = q_vector->tx.work_limit;
702 struct xdp_umem *umem = tx_ring->xsk_umem;
703 union ixgbe_adv_tx_desc *tx_desc;
704 struct ixgbe_tx_buffer *tx_bi;
707 tx_bi = &tx_ring->tx_buffer_info[i];
708 tx_desc = IXGBE_TX_DESC(tx_ring, i);
712 if (!(tx_desc->wb.status & cpu_to_le32(IXGBE_TXD_STAT_DD)))
715 total_bytes += tx_bi->bytecount;
716 total_packets += tx_bi->gso_segs;
719 ixgbe_clean_xdp_tx_buffer(tx_ring, tx_bi);
724 total_bytes += tx_bi->bytecount;
731 tx_bi = tx_ring->tx_buffer_info;
732 tx_desc = IXGBE_TX_DESC(tx_ring, 0);
735 /* issue prefetch for next Tx descriptor */
738 /* update budget accounting */
740 } while (likely(budget));
743 tx_ring->next_to_clean = i;
745 u64_stats_update_begin(&tx_ring->syncp);
746 tx_ring->stats.bytes += total_bytes;
747 tx_ring->stats.packets += total_packets;
748 u64_stats_update_end(&tx_ring->syncp);
749 q_vector->tx.total_bytes += total_bytes;
750 q_vector->tx.total_packets += total_packets;
753 xsk_umem_complete_tx(umem, xsk_frames);
755 xmit_done = ixgbe_xmit_zc(tx_ring, q_vector->tx.work_limit);
756 return budget > 0 && xmit_done;
759 int ixgbe_xsk_async_xmit(struct net_device *dev, u32 qid)
761 struct ixgbe_adapter *adapter = netdev_priv(dev);
762 struct ixgbe_ring *ring;
764 if (test_bit(__IXGBE_DOWN, &adapter->state))
767 if (!READ_ONCE(adapter->xdp_prog))
770 if (qid >= adapter->num_xdp_queues)
773 if (!adapter->xsk_umems || !adapter->xsk_umems[qid])
776 ring = adapter->xdp_ring[qid];
777 if (!napi_if_scheduled_mark_missed(&ring->q_vector->napi)) {
778 u64 eics = BIT_ULL(ring->q_vector->v_idx);
780 ixgbe_irq_rearm_queues(adapter, eics);
786 void ixgbe_xsk_clean_tx_ring(struct ixgbe_ring *tx_ring)
788 u16 ntc = tx_ring->next_to_clean, ntu = tx_ring->next_to_use;
789 struct xdp_umem *umem = tx_ring->xsk_umem;
790 struct ixgbe_tx_buffer *tx_bi;
794 tx_bi = &tx_ring->tx_buffer_info[ntc];
797 ixgbe_clean_xdp_tx_buffer(tx_ring, tx_bi);
804 if (ntc == tx_ring->count)
809 xsk_umem_complete_tx(umem, xsk_frames);