HID: input: avoid polling stylus battery on Chromebook Pompom
[sfrench/cifs-2.6.git] / drivers / net / ethernet / broadcom / bnxt / bnxt_xdp.c
1 /* Broadcom NetXtreme-C/E network driver.
2  *
3  * Copyright (c) 2016-2017 Broadcom Limited
4  *
5  * This program is free software; you can redistribute it and/or modify
6  * it under the terms of the GNU General Public License as published by
7  * the Free Software Foundation.
8  */
9 #include <linux/kernel.h>
10 #include <linux/errno.h>
11 #include <linux/pci.h>
12 #include <linux/netdevice.h>
13 #include <linux/etherdevice.h>
14 #include <linux/if_vlan.h>
15 #include <linux/bpf.h>
16 #include <linux/bpf_trace.h>
17 #include <linux/filter.h>
18 #include <net/page_pool/helpers.h>
19 #include "bnxt_hsi.h"
20 #include "bnxt.h"
21 #include "bnxt_xdp.h"
22
23 DEFINE_STATIC_KEY_FALSE(bnxt_xdp_locking_key);
24
25 struct bnxt_sw_tx_bd *bnxt_xmit_bd(struct bnxt *bp,
26                                    struct bnxt_tx_ring_info *txr,
27                                    dma_addr_t mapping, u32 len,
28                                    struct xdp_buff *xdp)
29 {
30         struct skb_shared_info *sinfo;
31         struct bnxt_sw_tx_bd *tx_buf;
32         struct tx_bd *txbd;
33         int num_frags = 0;
34         u32 flags;
35         u16 prod;
36         int i;
37
38         if (xdp && xdp_buff_has_frags(xdp)) {
39                 sinfo = xdp_get_shared_info_from_buff(xdp);
40                 num_frags = sinfo->nr_frags;
41         }
42
43         /* fill up the first buffer */
44         prod = txr->tx_prod;
45         tx_buf = &txr->tx_buf_ring[RING_TX(bp, prod)];
46         tx_buf->nr_frags = num_frags;
47         if (xdp)
48                 tx_buf->page = virt_to_head_page(xdp->data);
49
50         txbd = &txr->tx_desc_ring[TX_RING(bp, prod)][TX_IDX(prod)];
51         flags = (len << TX_BD_LEN_SHIFT) |
52                 ((num_frags + 1) << TX_BD_FLAGS_BD_CNT_SHIFT) |
53                 bnxt_lhint_arr[len >> 9];
54         txbd->tx_bd_len_flags_type = cpu_to_le32(flags);
55         txbd->tx_bd_opaque = SET_TX_OPAQUE(bp, txr, prod, 1 + num_frags);
56         txbd->tx_bd_haddr = cpu_to_le64(mapping);
57
58         /* now let us fill up the frags into the next buffers */
59         for (i = 0; i < num_frags ; i++) {
60                 skb_frag_t *frag = &sinfo->frags[i];
61                 struct bnxt_sw_tx_bd *frag_tx_buf;
62                 dma_addr_t frag_mapping;
63                 int frag_len;
64
65                 prod = NEXT_TX(prod);
66                 WRITE_ONCE(txr->tx_prod, prod);
67
68                 /* first fill up the first buffer */
69                 frag_tx_buf = &txr->tx_buf_ring[RING_TX(bp, prod)];
70                 frag_tx_buf->page = skb_frag_page(frag);
71
72                 txbd = &txr->tx_desc_ring[TX_RING(bp, prod)][TX_IDX(prod)];
73
74                 frag_len = skb_frag_size(frag);
75                 flags = frag_len << TX_BD_LEN_SHIFT;
76                 txbd->tx_bd_len_flags_type = cpu_to_le32(flags);
77                 frag_mapping = page_pool_get_dma_addr(skb_frag_page(frag)) +
78                                skb_frag_off(frag);
79                 txbd->tx_bd_haddr = cpu_to_le64(frag_mapping);
80
81                 len = frag_len;
82         }
83
84         flags &= ~TX_BD_LEN;
85         txbd->tx_bd_len_flags_type = cpu_to_le32(((len) << TX_BD_LEN_SHIFT) | flags |
86                         TX_BD_FLAGS_PACKET_END);
87         /* Sync TX BD */
88         wmb();
89         prod = NEXT_TX(prod);
90         WRITE_ONCE(txr->tx_prod, prod);
91
92         return tx_buf;
93 }
94
95 static void __bnxt_xmit_xdp(struct bnxt *bp, struct bnxt_tx_ring_info *txr,
96                             dma_addr_t mapping, u32 len, u16 rx_prod,
97                             struct xdp_buff *xdp)
98 {
99         struct bnxt_sw_tx_bd *tx_buf;
100
101         tx_buf = bnxt_xmit_bd(bp, txr, mapping, len, xdp);
102         tx_buf->rx_prod = rx_prod;
103         tx_buf->action = XDP_TX;
104
105 }
106
107 static void __bnxt_xmit_xdp_redirect(struct bnxt *bp,
108                                      struct bnxt_tx_ring_info *txr,
109                                      dma_addr_t mapping, u32 len,
110                                      struct xdp_frame *xdpf)
111 {
112         struct bnxt_sw_tx_bd *tx_buf;
113
114         tx_buf = bnxt_xmit_bd(bp, txr, mapping, len, NULL);
115         tx_buf->action = XDP_REDIRECT;
116         tx_buf->xdpf = xdpf;
117         dma_unmap_addr_set(tx_buf, mapping, mapping);
118         dma_unmap_len_set(tx_buf, len, 0);
119 }
120
121 void bnxt_tx_int_xdp(struct bnxt *bp, struct bnxt_napi *bnapi, int budget)
122 {
123         struct bnxt_tx_ring_info *txr = bnapi->tx_ring[0];
124         struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
125         u16 tx_hw_cons = txr->tx_hw_cons;
126         bool rx_doorbell_needed = false;
127         struct bnxt_sw_tx_bd *tx_buf;
128         u16 tx_cons = txr->tx_cons;
129         u16 last_tx_cons = tx_cons;
130         int j, frags;
131
132         if (!budget)
133                 return;
134
135         while (RING_TX(bp, tx_cons) != tx_hw_cons) {
136                 tx_buf = &txr->tx_buf_ring[RING_TX(bp, tx_cons)];
137
138                 if (tx_buf->action == XDP_REDIRECT) {
139                         struct pci_dev *pdev = bp->pdev;
140
141                         dma_unmap_single(&pdev->dev,
142                                          dma_unmap_addr(tx_buf, mapping),
143                                          dma_unmap_len(tx_buf, len),
144                                          DMA_TO_DEVICE);
145                         xdp_return_frame(tx_buf->xdpf);
146                         tx_buf->action = 0;
147                         tx_buf->xdpf = NULL;
148                 } else if (tx_buf->action == XDP_TX) {
149                         tx_buf->action = 0;
150                         rx_doorbell_needed = true;
151                         last_tx_cons = tx_cons;
152
153                         frags = tx_buf->nr_frags;
154                         for (j = 0; j < frags; j++) {
155                                 tx_cons = NEXT_TX(tx_cons);
156                                 tx_buf = &txr->tx_buf_ring[RING_TX(bp, tx_cons)];
157                                 page_pool_recycle_direct(rxr->page_pool, tx_buf->page);
158                         }
159                 } else {
160                         bnxt_sched_reset_txr(bp, txr, tx_cons);
161                         return;
162                 }
163                 tx_cons = NEXT_TX(tx_cons);
164         }
165
166         bnapi->events &= ~BNXT_TX_CMP_EVENT;
167         WRITE_ONCE(txr->tx_cons, tx_cons);
168         if (rx_doorbell_needed) {
169                 tx_buf = &txr->tx_buf_ring[RING_TX(bp, last_tx_cons)];
170                 bnxt_db_write(bp, &rxr->rx_db, tx_buf->rx_prod);
171
172         }
173 }
174
175 bool bnxt_xdp_attached(struct bnxt *bp, struct bnxt_rx_ring_info *rxr)
176 {
177         struct bpf_prog *xdp_prog = READ_ONCE(rxr->xdp_prog);
178
179         return !!xdp_prog;
180 }
181
182 void bnxt_xdp_buff_init(struct bnxt *bp, struct bnxt_rx_ring_info *rxr,
183                         u16 cons, u8 *data_ptr, unsigned int len,
184                         struct xdp_buff *xdp)
185 {
186         u32 buflen = BNXT_RX_PAGE_SIZE;
187         struct bnxt_sw_rx_bd *rx_buf;
188         struct pci_dev *pdev;
189         dma_addr_t mapping;
190         u32 offset;
191
192         pdev = bp->pdev;
193         rx_buf = &rxr->rx_buf_ring[cons];
194         offset = bp->rx_offset;
195
196         mapping = rx_buf->mapping - bp->rx_dma_offset;
197         dma_sync_single_for_cpu(&pdev->dev, mapping + offset, len, bp->rx_dir);
198
199         xdp_init_buff(xdp, buflen, &rxr->xdp_rxq);
200         xdp_prepare_buff(xdp, data_ptr - offset, offset, len, false);
201 }
202
203 void bnxt_xdp_buff_frags_free(struct bnxt_rx_ring_info *rxr,
204                               struct xdp_buff *xdp)
205 {
206         struct skb_shared_info *shinfo;
207         int i;
208
209         if (!xdp || !xdp_buff_has_frags(xdp))
210                 return;
211         shinfo = xdp_get_shared_info_from_buff(xdp);
212         for (i = 0; i < shinfo->nr_frags; i++) {
213                 struct page *page = skb_frag_page(&shinfo->frags[i]);
214
215                 page_pool_recycle_direct(rxr->page_pool, page);
216         }
217         shinfo->nr_frags = 0;
218 }
219
220 /* returns the following:
221  * true    - packet consumed by XDP and new buffer is allocated.
222  * false   - packet should be passed to the stack.
223  */
224 bool bnxt_rx_xdp(struct bnxt *bp, struct bnxt_rx_ring_info *rxr, u16 cons,
225                  struct xdp_buff xdp, struct page *page, u8 **data_ptr,
226                  unsigned int *len, u8 *event)
227 {
228         struct bpf_prog *xdp_prog = READ_ONCE(rxr->xdp_prog);
229         struct bnxt_tx_ring_info *txr;
230         struct bnxt_sw_rx_bd *rx_buf;
231         struct pci_dev *pdev;
232         dma_addr_t mapping;
233         u32 tx_needed = 1;
234         void *orig_data;
235         u32 tx_avail;
236         u32 offset;
237         u32 act;
238
239         if (!xdp_prog)
240                 return false;
241
242         pdev = bp->pdev;
243         offset = bp->rx_offset;
244
245         txr = rxr->bnapi->tx_ring[0];
246         /* BNXT_RX_PAGE_MODE(bp) when XDP enabled */
247         orig_data = xdp.data;
248
249         act = bpf_prog_run_xdp(xdp_prog, &xdp);
250
251         tx_avail = bnxt_tx_avail(bp, txr);
252         /* If the tx ring is not full, we must not update the rx producer yet
253          * because we may still be transmitting on some BDs.
254          */
255         if (tx_avail != bp->tx_ring_size)
256                 *event &= ~BNXT_RX_EVENT;
257
258         *len = xdp.data_end - xdp.data;
259         if (orig_data != xdp.data) {
260                 offset = xdp.data - xdp.data_hard_start;
261                 *data_ptr = xdp.data_hard_start + offset;
262         }
263
264         switch (act) {
265         case XDP_PASS:
266                 return false;
267
268         case XDP_TX:
269                 rx_buf = &rxr->rx_buf_ring[cons];
270                 mapping = rx_buf->mapping - bp->rx_dma_offset;
271                 *event &= BNXT_TX_CMP_EVENT;
272
273                 if (unlikely(xdp_buff_has_frags(&xdp))) {
274                         struct skb_shared_info *sinfo = xdp_get_shared_info_from_buff(&xdp);
275
276                         tx_needed += sinfo->nr_frags;
277                         *event = BNXT_AGG_EVENT;
278                 }
279
280                 if (tx_avail < tx_needed) {
281                         trace_xdp_exception(bp->dev, xdp_prog, act);
282                         bnxt_xdp_buff_frags_free(rxr, &xdp);
283                         bnxt_reuse_rx_data(rxr, cons, page);
284                         return true;
285                 }
286
287                 dma_sync_single_for_device(&pdev->dev, mapping + offset, *len,
288                                            bp->rx_dir);
289
290                 *event |= BNXT_TX_EVENT;
291                 __bnxt_xmit_xdp(bp, txr, mapping + offset, *len,
292                                 NEXT_RX(rxr->rx_prod), &xdp);
293                 bnxt_reuse_rx_data(rxr, cons, page);
294                 return true;
295         case XDP_REDIRECT:
296                 /* if we are calling this here then we know that the
297                  * redirect is coming from a frame received by the
298                  * bnxt_en driver.
299                  */
300                 rx_buf = &rxr->rx_buf_ring[cons];
301                 mapping = rx_buf->mapping - bp->rx_dma_offset;
302                 dma_unmap_page_attrs(&pdev->dev, mapping,
303                                      BNXT_RX_PAGE_SIZE, bp->rx_dir,
304                                      DMA_ATTR_WEAK_ORDERING);
305
306                 /* if we are unable to allocate a new buffer, abort and reuse */
307                 if (bnxt_alloc_rx_data(bp, rxr, rxr->rx_prod, GFP_ATOMIC)) {
308                         trace_xdp_exception(bp->dev, xdp_prog, act);
309                         bnxt_xdp_buff_frags_free(rxr, &xdp);
310                         bnxt_reuse_rx_data(rxr, cons, page);
311                         return true;
312                 }
313
314                 if (xdp_do_redirect(bp->dev, &xdp, xdp_prog)) {
315                         trace_xdp_exception(bp->dev, xdp_prog, act);
316                         page_pool_recycle_direct(rxr->page_pool, page);
317                         return true;
318                 }
319
320                 *event |= BNXT_REDIRECT_EVENT;
321                 break;
322         default:
323                 bpf_warn_invalid_xdp_action(bp->dev, xdp_prog, act);
324                 fallthrough;
325         case XDP_ABORTED:
326                 trace_xdp_exception(bp->dev, xdp_prog, act);
327                 fallthrough;
328         case XDP_DROP:
329                 bnxt_xdp_buff_frags_free(rxr, &xdp);
330                 bnxt_reuse_rx_data(rxr, cons, page);
331                 break;
332         }
333         return true;
334 }
335
336 int bnxt_xdp_xmit(struct net_device *dev, int num_frames,
337                   struct xdp_frame **frames, u32 flags)
338 {
339         struct bnxt *bp = netdev_priv(dev);
340         struct bpf_prog *xdp_prog = READ_ONCE(bp->xdp_prog);
341         struct pci_dev *pdev = bp->pdev;
342         struct bnxt_tx_ring_info *txr;
343         dma_addr_t mapping;
344         int nxmit = 0;
345         int ring;
346         int i;
347
348         if (!test_bit(BNXT_STATE_OPEN, &bp->state) ||
349             !bp->tx_nr_rings_xdp ||
350             !xdp_prog)
351                 return -EINVAL;
352
353         ring = smp_processor_id() % bp->tx_nr_rings_xdp;
354         txr = &bp->tx_ring[ring];
355
356         if (READ_ONCE(txr->dev_state) == BNXT_DEV_STATE_CLOSING)
357                 return -EINVAL;
358
359         if (static_branch_unlikely(&bnxt_xdp_locking_key))
360                 spin_lock(&txr->xdp_tx_lock);
361
362         for (i = 0; i < num_frames; i++) {
363                 struct xdp_frame *xdp = frames[i];
364
365                 if (!bnxt_tx_avail(bp, txr))
366                         break;
367
368                 mapping = dma_map_single(&pdev->dev, xdp->data, xdp->len,
369                                          DMA_TO_DEVICE);
370
371                 if (dma_mapping_error(&pdev->dev, mapping))
372                         break;
373
374                 __bnxt_xmit_xdp_redirect(bp, txr, mapping, xdp->len, xdp);
375                 nxmit++;
376         }
377
378         if (flags & XDP_XMIT_FLUSH) {
379                 /* Sync BD data before updating doorbell */
380                 wmb();
381                 bnxt_db_write(bp, &txr->tx_db, txr->tx_prod);
382         }
383
384         if (static_branch_unlikely(&bnxt_xdp_locking_key))
385                 spin_unlock(&txr->xdp_tx_lock);
386
387         return nxmit;
388 }
389
390 /* Under rtnl_lock */
391 static int bnxt_xdp_set(struct bnxt *bp, struct bpf_prog *prog)
392 {
393         struct net_device *dev = bp->dev;
394         int tx_xdp = 0, tx_cp, rc, tc;
395         struct bpf_prog *old;
396
397         if (prog && !prog->aux->xdp_has_frags &&
398             bp->dev->mtu > BNXT_MAX_PAGE_MODE_MTU) {
399                 netdev_warn(dev, "MTU %d larger than %d without XDP frag support.\n",
400                             bp->dev->mtu, BNXT_MAX_PAGE_MODE_MTU);
401                 return -EOPNOTSUPP;
402         }
403         if (!(bp->flags & BNXT_FLAG_SHARED_RINGS)) {
404                 netdev_warn(dev, "ethtool rx/tx channels must be combined to support XDP.\n");
405                 return -EOPNOTSUPP;
406         }
407         if (prog)
408                 tx_xdp = bp->rx_nr_rings;
409
410         tc = netdev_get_num_tc(dev);
411         if (!tc)
412                 tc = 1;
413         rc = bnxt_check_rings(bp, bp->tx_nr_rings_per_tc, bp->rx_nr_rings,
414                               true, tc, tx_xdp);
415         if (rc) {
416                 netdev_warn(dev, "Unable to reserve enough TX rings to support XDP.\n");
417                 return rc;
418         }
419         if (netif_running(dev))
420                 bnxt_close_nic(bp, true, false);
421
422         old = xchg(&bp->xdp_prog, prog);
423         if (old)
424                 bpf_prog_put(old);
425
426         if (prog) {
427                 bnxt_set_rx_skb_mode(bp, true);
428                 xdp_features_set_redirect_target(dev, true);
429         } else {
430                 int rx, tx;
431
432                 xdp_features_clear_redirect_target(dev);
433                 bnxt_set_rx_skb_mode(bp, false);
434                 bnxt_get_max_rings(bp, &rx, &tx, true);
435                 if (rx > 1) {
436                         bp->flags &= ~BNXT_FLAG_NO_AGG_RINGS;
437                         bp->dev->hw_features |= NETIF_F_LRO;
438                 }
439         }
440         bp->tx_nr_rings_xdp = tx_xdp;
441         bp->tx_nr_rings = bp->tx_nr_rings_per_tc * tc + tx_xdp;
442         tx_cp = bnxt_num_tx_to_cp(bp, bp->tx_nr_rings);
443         bp->cp_nr_rings = max_t(int, tx_cp, bp->rx_nr_rings);
444         bnxt_set_tpa_flags(bp);
445         bnxt_set_ring_params(bp);
446
447         if (netif_running(dev))
448                 return bnxt_open_nic(bp, true, false);
449
450         return 0;
451 }
452
453 int bnxt_xdp(struct net_device *dev, struct netdev_bpf *xdp)
454 {
455         struct bnxt *bp = netdev_priv(dev);
456         int rc;
457
458         switch (xdp->command) {
459         case XDP_SETUP_PROG:
460                 rc = bnxt_xdp_set(bp, xdp->prog);
461                 break;
462         default:
463                 rc = -EINVAL;
464                 break;
465         }
466         return rc;
467 }
468
469 struct sk_buff *
470 bnxt_xdp_build_skb(struct bnxt *bp, struct sk_buff *skb, u8 num_frags,
471                    struct page_pool *pool, struct xdp_buff *xdp,
472                    struct rx_cmp_ext *rxcmp1)
473 {
474         struct skb_shared_info *sinfo = xdp_get_shared_info_from_buff(xdp);
475
476         if (!skb)
477                 return NULL;
478         skb_checksum_none_assert(skb);
479         if (RX_CMP_L4_CS_OK(rxcmp1)) {
480                 if (bp->dev->features & NETIF_F_RXCSUM) {
481                         skb->ip_summed = CHECKSUM_UNNECESSARY;
482                         skb->csum_level = RX_CMP_ENCAP(rxcmp1);
483                 }
484         }
485         xdp_update_skb_shared_info(skb, num_frags,
486                                    sinfo->xdp_frags_size,
487                                    BNXT_RX_PAGE_SIZE * sinfo->nr_frags,
488                                    xdp_buff_is_frag_pfmemalloc(xdp));
489         return skb;
490 }