1 // SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
2 /* Copyright 2017-2019 NXP */
5 #include <linux/bpf_trace.h>
8 #include <linux/vmalloc.h>
9 #include <linux/ptp_classify.h>
10 #include <net/ip6_checksum.h>
11 #include <net/pkt_sched.h>
14 u32 enetc_port_mac_rd(struct enetc_si *si, u32 reg)
16 return enetc_port_rd(&si->hw, reg);
18 EXPORT_SYMBOL_GPL(enetc_port_mac_rd);
20 void enetc_port_mac_wr(struct enetc_si *si, u32 reg, u32 val)
22 enetc_port_wr(&si->hw, reg, val);
23 if (si->hw_features & ENETC_SI_F_QBU)
24 enetc_port_wr(&si->hw, reg + ENETC_PMAC_OFFSET, val);
26 EXPORT_SYMBOL_GPL(enetc_port_mac_wr);
28 static void enetc_change_preemptible_tcs(struct enetc_ndev_priv *priv,
31 priv->preemptible_tcs = preemptible_tcs;
32 enetc_mm_commit_preemptible_tcs(priv);
35 static int enetc_num_stack_tx_queues(struct enetc_ndev_priv *priv)
37 int num_tx_rings = priv->num_tx_rings;
40 return num_tx_rings - num_possible_cpus();
45 static struct enetc_bdr *enetc_rx_ring_from_xdp_tx_ring(struct enetc_ndev_priv *priv,
46 struct enetc_bdr *tx_ring)
48 int index = &priv->tx_ring[tx_ring->index] - priv->xdp_tx_ring;
50 return priv->rx_ring[index];
53 static struct sk_buff *enetc_tx_swbd_get_skb(struct enetc_tx_swbd *tx_swbd)
55 if (tx_swbd->is_xdp_tx || tx_swbd->is_xdp_redirect)
61 static struct xdp_frame *
62 enetc_tx_swbd_get_xdp_frame(struct enetc_tx_swbd *tx_swbd)
64 if (tx_swbd->is_xdp_redirect)
65 return tx_swbd->xdp_frame;
70 static void enetc_unmap_tx_buff(struct enetc_bdr *tx_ring,
71 struct enetc_tx_swbd *tx_swbd)
73 /* For XDP_TX, pages come from RX, whereas for the other contexts where
74 * we have is_dma_page_set, those come from skb_frag_dma_map. We need
75 * to match the DMA mapping length, so we need to differentiate those.
77 if (tx_swbd->is_dma_page)
78 dma_unmap_page(tx_ring->dev, tx_swbd->dma,
79 tx_swbd->is_xdp_tx ? PAGE_SIZE : tx_swbd->len,
82 dma_unmap_single(tx_ring->dev, tx_swbd->dma,
83 tx_swbd->len, tx_swbd->dir);
87 static void enetc_free_tx_frame(struct enetc_bdr *tx_ring,
88 struct enetc_tx_swbd *tx_swbd)
90 struct xdp_frame *xdp_frame = enetc_tx_swbd_get_xdp_frame(tx_swbd);
91 struct sk_buff *skb = enetc_tx_swbd_get_skb(tx_swbd);
94 enetc_unmap_tx_buff(tx_ring, tx_swbd);
97 xdp_return_frame(tx_swbd->xdp_frame);
98 tx_swbd->xdp_frame = NULL;
100 dev_kfree_skb_any(skb);
105 /* Let H/W know BD ring has been updated */
106 static void enetc_update_tx_ring_tail(struct enetc_bdr *tx_ring)
109 enetc_wr_reg_hot(tx_ring->tpir, tx_ring->next_to_use);
112 static int enetc_ptp_parse(struct sk_buff *skb, u8 *udp,
113 u8 *msgtype, u8 *twostep,
114 u16 *correction_offset, u16 *body_offset)
116 unsigned int ptp_class;
117 struct ptp_header *hdr;
121 ptp_class = ptp_classify_raw(skb);
122 if (ptp_class == PTP_CLASS_NONE)
125 hdr = ptp_parse_header(skb, ptp_class);
129 type = ptp_class & PTP_CLASS_PMASK;
130 if (type == PTP_CLASS_IPV4 || type == PTP_CLASS_IPV6)
135 *msgtype = ptp_get_msgtype(hdr, ptp_class);
136 *twostep = hdr->flag_field[0] & 0x2;
138 base = skb_mac_header(skb);
139 *correction_offset = (u8 *)&hdr->correction - base;
140 *body_offset = (u8 *)hdr + sizeof(struct ptp_header) - base;
145 static int enetc_map_tx_buffs(struct enetc_bdr *tx_ring, struct sk_buff *skb)
147 bool do_vlan, do_onestep_tstamp = false, do_twostep_tstamp = false;
148 struct enetc_ndev_priv *priv = netdev_priv(tx_ring->ndev);
149 struct enetc_hw *hw = &priv->si->hw;
150 struct enetc_tx_swbd *tx_swbd;
151 int len = skb_headlen(skb);
152 union enetc_tx_bd temp_bd;
153 u8 msgtype, twostep, udp;
154 union enetc_tx_bd *txbd;
155 u16 offset1, offset2;
162 i = tx_ring->next_to_use;
163 txbd = ENETC_TXBD(*tx_ring, i);
166 dma = dma_map_single(tx_ring->dev, skb->data, len, DMA_TO_DEVICE);
167 if (unlikely(dma_mapping_error(tx_ring->dev, dma)))
170 temp_bd.addr = cpu_to_le64(dma);
171 temp_bd.buf_len = cpu_to_le16(len);
174 tx_swbd = &tx_ring->tx_swbd[i];
177 tx_swbd->is_dma_page = 0;
178 tx_swbd->dir = DMA_TO_DEVICE;
181 do_vlan = skb_vlan_tag_present(skb);
182 if (skb->cb[0] & ENETC_F_TX_ONESTEP_SYNC_TSTAMP) {
183 if (enetc_ptp_parse(skb, &udp, &msgtype, &twostep, &offset1,
185 msgtype != PTP_MSGTYPE_SYNC || twostep)
186 WARN_ONCE(1, "Bad packet for one-step timestamping\n");
188 do_onestep_tstamp = true;
189 } else if (skb->cb[0] & ENETC_F_TX_TSTAMP) {
190 do_twostep_tstamp = true;
193 tx_swbd->do_twostep_tstamp = do_twostep_tstamp;
194 tx_swbd->qbv_en = !!(priv->active_offloads & ENETC_F_QBV);
195 tx_swbd->check_wb = tx_swbd->do_twostep_tstamp || tx_swbd->qbv_en;
197 if (do_vlan || do_onestep_tstamp || do_twostep_tstamp)
198 flags |= ENETC_TXBD_FLAGS_EX;
200 if (tx_ring->tsd_enable)
201 flags |= ENETC_TXBD_FLAGS_TSE | ENETC_TXBD_FLAGS_TXSTART;
203 /* first BD needs frm_len and offload flags set */
204 temp_bd.frm_len = cpu_to_le16(skb->len);
205 temp_bd.flags = flags;
207 if (flags & ENETC_TXBD_FLAGS_TSE)
208 temp_bd.txstart = enetc_txbd_set_tx_start(skb->skb_mstamp_ns,
211 if (flags & ENETC_TXBD_FLAGS_EX) {
214 enetc_clear_tx_bd(&temp_bd);
216 /* add extension BD for VLAN and/or timestamping */
221 if (unlikely(i == tx_ring->bd_count)) {
223 tx_swbd = tx_ring->tx_swbd;
224 txbd = ENETC_TXBD(*tx_ring, 0);
229 temp_bd.ext.vid = cpu_to_le16(skb_vlan_tag_get(skb));
230 temp_bd.ext.tpid = 0; /* < C-TAG */
231 e_flags |= ENETC_TXBD_E_FLAGS_VLAN_INS;
234 if (do_onestep_tstamp) {
239 lo = enetc_rd_hot(hw, ENETC_SICTR0);
240 hi = enetc_rd_hot(hw, ENETC_SICTR1);
241 sec = (u64)hi << 32 | lo;
242 nsec = do_div(sec, 1000000000);
244 /* Configure extension BD */
245 temp_bd.ext.tstamp = cpu_to_le32(lo & 0x3fffffff);
246 e_flags |= ENETC_TXBD_E_FLAGS_ONE_STEP_PTP;
248 /* Update originTimestamp field of Sync packet
249 * - 48 bits seconds field
250 * - 32 bits nanseconds field
252 data = skb_mac_header(skb);
253 *(__be16 *)(data + offset2) =
254 htons((sec >> 32) & 0xffff);
255 *(__be32 *)(data + offset2 + 2) =
256 htonl(sec & 0xffffffff);
257 *(__be32 *)(data + offset2 + 6) = htonl(nsec);
259 /* Configure single-step register */
260 val = ENETC_PM0_SINGLE_STEP_EN;
261 val |= ENETC_SET_SINGLE_STEP_OFFSET(offset1);
263 val |= ENETC_PM0_SINGLE_STEP_CH;
265 enetc_port_mac_wr(priv->si, ENETC_PM0_SINGLE_STEP,
267 } else if (do_twostep_tstamp) {
268 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
269 e_flags |= ENETC_TXBD_E_FLAGS_TWO_STEP_PTP;
272 temp_bd.ext.e_flags = e_flags;
276 frag = &skb_shinfo(skb)->frags[0];
277 for (f = 0; f < skb_shinfo(skb)->nr_frags; f++, frag++) {
278 len = skb_frag_size(frag);
279 dma = skb_frag_dma_map(tx_ring->dev, frag, 0, len,
281 if (dma_mapping_error(tx_ring->dev, dma))
285 enetc_clear_tx_bd(&temp_bd);
291 if (unlikely(i == tx_ring->bd_count)) {
293 tx_swbd = tx_ring->tx_swbd;
294 txbd = ENETC_TXBD(*tx_ring, 0);
298 temp_bd.addr = cpu_to_le64(dma);
299 temp_bd.buf_len = cpu_to_le16(len);
303 tx_swbd->is_dma_page = 1;
304 tx_swbd->dir = DMA_TO_DEVICE;
308 /* last BD needs 'F' bit set */
309 flags |= ENETC_TXBD_FLAGS_F;
310 temp_bd.flags = flags;
313 tx_ring->tx_swbd[i].is_eof = true;
314 tx_ring->tx_swbd[i].skb = skb;
316 enetc_bdr_idx_inc(tx_ring, &i);
317 tx_ring->next_to_use = i;
319 skb_tx_timestamp(skb);
321 enetc_update_tx_ring_tail(tx_ring);
326 dev_err(tx_ring->dev, "DMA map error");
329 tx_swbd = &tx_ring->tx_swbd[i];
330 enetc_free_tx_frame(tx_ring, tx_swbd);
332 i = tx_ring->bd_count;
339 static void enetc_map_tx_tso_hdr(struct enetc_bdr *tx_ring, struct sk_buff *skb,
340 struct enetc_tx_swbd *tx_swbd,
341 union enetc_tx_bd *txbd, int *i, int hdr_len,
344 union enetc_tx_bd txbd_tmp;
345 u8 flags = 0, e_flags = 0;
348 enetc_clear_tx_bd(&txbd_tmp);
349 addr = tx_ring->tso_headers_dma + *i * TSO_HEADER_SIZE;
351 if (skb_vlan_tag_present(skb))
352 flags |= ENETC_TXBD_FLAGS_EX;
354 txbd_tmp.addr = cpu_to_le64(addr);
355 txbd_tmp.buf_len = cpu_to_le16(hdr_len);
357 /* first BD needs frm_len and offload flags set */
358 txbd_tmp.frm_len = cpu_to_le16(hdr_len + data_len);
359 txbd_tmp.flags = flags;
361 /* For the TSO header we do not set the dma address since we do not
362 * want it unmapped when we do cleanup. We still set len so that we
363 * count the bytes sent.
365 tx_swbd->len = hdr_len;
366 tx_swbd->do_twostep_tstamp = false;
367 tx_swbd->check_wb = false;
369 /* Actually write the header in the BD */
372 /* Add extension BD for VLAN */
373 if (flags & ENETC_TXBD_FLAGS_EX) {
374 /* Get the next BD */
375 enetc_bdr_idx_inc(tx_ring, i);
376 txbd = ENETC_TXBD(*tx_ring, *i);
377 tx_swbd = &tx_ring->tx_swbd[*i];
380 /* Setup the VLAN fields */
381 enetc_clear_tx_bd(&txbd_tmp);
382 txbd_tmp.ext.vid = cpu_to_le16(skb_vlan_tag_get(skb));
383 txbd_tmp.ext.tpid = 0; /* < C-TAG */
384 e_flags |= ENETC_TXBD_E_FLAGS_VLAN_INS;
387 txbd_tmp.ext.e_flags = e_flags;
392 static int enetc_map_tx_tso_data(struct enetc_bdr *tx_ring, struct sk_buff *skb,
393 struct enetc_tx_swbd *tx_swbd,
394 union enetc_tx_bd *txbd, char *data,
395 int size, bool last_bd)
397 union enetc_tx_bd txbd_tmp;
401 enetc_clear_tx_bd(&txbd_tmp);
403 addr = dma_map_single(tx_ring->dev, data, size, DMA_TO_DEVICE);
404 if (unlikely(dma_mapping_error(tx_ring->dev, addr))) {
405 netdev_err(tx_ring->ndev, "DMA map error\n");
410 flags |= ENETC_TXBD_FLAGS_F;
414 txbd_tmp.addr = cpu_to_le64(addr);
415 txbd_tmp.buf_len = cpu_to_le16(size);
416 txbd_tmp.flags = flags;
420 tx_swbd->dir = DMA_TO_DEVICE;
427 static __wsum enetc_tso_hdr_csum(struct tso_t *tso, struct sk_buff *skb,
428 char *hdr, int hdr_len, int *l4_hdr_len)
430 char *l4_hdr = hdr + skb_transport_offset(skb);
431 int mac_hdr_len = skb_network_offset(skb);
433 if (tso->tlen != sizeof(struct udphdr)) {
434 struct tcphdr *tcph = (struct tcphdr *)(l4_hdr);
438 struct udphdr *udph = (struct udphdr *)(l4_hdr);
443 /* Compute the IP checksum. This is necessary since tso_build_hdr()
444 * already incremented the IP ID field.
447 struct iphdr *iph = (void *)(hdr + mac_hdr_len);
450 iph->check = ip_fast_csum((unsigned char *)iph, iph->ihl);
453 /* Compute the checksum over the L4 header. */
454 *l4_hdr_len = hdr_len - skb_transport_offset(skb);
455 return csum_partial(l4_hdr, *l4_hdr_len, 0);
458 static void enetc_tso_complete_csum(struct enetc_bdr *tx_ring, struct tso_t *tso,
459 struct sk_buff *skb, char *hdr, int len,
462 char *l4_hdr = hdr + skb_transport_offset(skb);
465 /* Complete the L4 checksum by appending the pseudo-header to the
466 * already computed checksum.
469 csum_final = csum_tcpudp_magic(ip_hdr(skb)->saddr,
471 len, ip_hdr(skb)->protocol, sum);
473 csum_final = csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
474 &ipv6_hdr(skb)->daddr,
475 len, ipv6_hdr(skb)->nexthdr, sum);
477 if (tso->tlen != sizeof(struct udphdr)) {
478 struct tcphdr *tcph = (struct tcphdr *)(l4_hdr);
480 tcph->check = csum_final;
482 struct udphdr *udph = (struct udphdr *)(l4_hdr);
484 udph->check = csum_final;
488 static int enetc_map_tx_tso_buffs(struct enetc_bdr *tx_ring, struct sk_buff *skb)
490 int hdr_len, total_len, data_len;
491 struct enetc_tx_swbd *tx_swbd;
492 union enetc_tx_bd *txbd;
496 int err, i, bd_data_num;
498 /* Initialize the TSO handler, and prepare the first payload */
499 hdr_len = tso_start(skb, &tso);
500 total_len = skb->len - hdr_len;
501 i = tx_ring->next_to_use;
503 while (total_len > 0) {
507 txbd = ENETC_TXBD(*tx_ring, i);
508 tx_swbd = &tx_ring->tx_swbd[i];
511 /* Determine the length of this packet */
512 data_len = min_t(int, skb_shinfo(skb)->gso_size, total_len);
513 total_len -= data_len;
515 /* prepare packet headers: MAC + IP + TCP */
516 hdr = tx_ring->tso_headers + i * TSO_HEADER_SIZE;
517 tso_build_hdr(skb, hdr, &tso, data_len, total_len == 0);
519 /* compute the csum over the L4 header */
520 csum = enetc_tso_hdr_csum(&tso, skb, hdr, hdr_len, &pos);
521 enetc_map_tx_tso_hdr(tx_ring, skb, tx_swbd, txbd, &i, hdr_len, data_len);
525 while (data_len > 0) {
528 size = min_t(int, tso.size, data_len);
530 /* Advance the index in the BDR */
531 enetc_bdr_idx_inc(tx_ring, &i);
532 txbd = ENETC_TXBD(*tx_ring, i);
533 tx_swbd = &tx_ring->tx_swbd[i];
536 /* Compute the checksum over this segment of data and
537 * add it to the csum already computed (over the L4
538 * header and possible other data segments).
540 csum2 = csum_partial(tso.data, size, 0);
541 csum = csum_block_add(csum, csum2, pos);
544 err = enetc_map_tx_tso_data(tx_ring, skb, tx_swbd, txbd,
553 tso_build_data(skb, &tso, size);
555 if (unlikely(bd_data_num >= ENETC_MAX_SKB_FRAGS && data_len))
559 enetc_tso_complete_csum(tx_ring, &tso, skb, hdr, pos, csum);
564 /* Go to the next BD */
565 enetc_bdr_idx_inc(tx_ring, &i);
568 tx_ring->next_to_use = i;
569 enetc_update_tx_ring_tail(tx_ring);
574 dev_err(tx_ring->dev, "DMA map error");
578 tx_swbd = &tx_ring->tx_swbd[i];
579 enetc_free_tx_frame(tx_ring, tx_swbd);
581 i = tx_ring->bd_count;
588 static netdev_tx_t enetc_start_xmit(struct sk_buff *skb,
589 struct net_device *ndev)
591 struct enetc_ndev_priv *priv = netdev_priv(ndev);
592 struct enetc_bdr *tx_ring;
595 /* Queue one-step Sync packet if already locked */
596 if (skb->cb[0] & ENETC_F_TX_ONESTEP_SYNC_TSTAMP) {
597 if (test_and_set_bit_lock(ENETC_TX_ONESTEP_TSTAMP_IN_PROGRESS,
599 skb_queue_tail(&priv->tx_skbs, skb);
604 tx_ring = priv->tx_ring[skb->queue_mapping];
606 if (skb_is_gso(skb)) {
607 if (enetc_bd_unused(tx_ring) < tso_count_descs(skb)) {
608 netif_stop_subqueue(ndev, tx_ring->index);
609 return NETDEV_TX_BUSY;
613 count = enetc_map_tx_tso_buffs(tx_ring, skb);
616 if (unlikely(skb_shinfo(skb)->nr_frags > ENETC_MAX_SKB_FRAGS))
617 if (unlikely(skb_linearize(skb)))
618 goto drop_packet_err;
620 count = skb_shinfo(skb)->nr_frags + 1; /* fragments + head */
621 if (enetc_bd_unused(tx_ring) < ENETC_TXBDS_NEEDED(count)) {
622 netif_stop_subqueue(ndev, tx_ring->index);
623 return NETDEV_TX_BUSY;
626 if (skb->ip_summed == CHECKSUM_PARTIAL) {
627 err = skb_checksum_help(skb);
629 goto drop_packet_err;
632 count = enetc_map_tx_buffs(tx_ring, skb);
636 if (unlikely(!count))
637 goto drop_packet_err;
639 if (enetc_bd_unused(tx_ring) < ENETC_TXBDS_MAX_NEEDED)
640 netif_stop_subqueue(ndev, tx_ring->index);
645 dev_kfree_skb_any(skb);
649 netdev_tx_t enetc_xmit(struct sk_buff *skb, struct net_device *ndev)
651 struct enetc_ndev_priv *priv = netdev_priv(ndev);
652 u8 udp, msgtype, twostep;
653 u16 offset1, offset2;
655 /* Mark tx timestamp type on skb->cb[0] if requires */
656 if ((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
657 (priv->active_offloads & ENETC_F_TX_TSTAMP_MASK)) {
658 skb->cb[0] = priv->active_offloads & ENETC_F_TX_TSTAMP_MASK;
663 /* Fall back to two-step timestamp if not one-step Sync packet */
664 if (skb->cb[0] & ENETC_F_TX_ONESTEP_SYNC_TSTAMP) {
665 if (enetc_ptp_parse(skb, &udp, &msgtype, &twostep,
666 &offset1, &offset2) ||
667 msgtype != PTP_MSGTYPE_SYNC || twostep != 0)
668 skb->cb[0] = ENETC_F_TX_TSTAMP;
671 return enetc_start_xmit(skb, ndev);
673 EXPORT_SYMBOL_GPL(enetc_xmit);
675 static irqreturn_t enetc_msix(int irq, void *data)
677 struct enetc_int_vector *v = data;
682 /* disable interrupts */
683 enetc_wr_reg_hot(v->rbier, 0);
684 enetc_wr_reg_hot(v->ricr1, v->rx_ictt);
686 for_each_set_bit(i, &v->tx_rings_map, ENETC_MAX_NUM_TXQS)
687 enetc_wr_reg_hot(v->tbier_base + ENETC_BDR_OFF(i), 0);
691 napi_schedule(&v->napi);
696 static void enetc_rx_dim_work(struct work_struct *w)
698 struct dim *dim = container_of(w, struct dim, work);
699 struct dim_cq_moder moder =
700 net_dim_get_rx_moderation(dim->mode, dim->profile_ix);
701 struct enetc_int_vector *v =
702 container_of(dim, struct enetc_int_vector, rx_dim);
704 v->rx_ictt = enetc_usecs_to_cycles(moder.usec);
705 dim->state = DIM_START_MEASURE;
708 static void enetc_rx_net_dim(struct enetc_int_vector *v)
710 struct dim_sample dim_sample = {};
714 if (!v->rx_napi_work)
717 dim_update_sample(v->comp_cnt,
718 v->rx_ring.stats.packets,
719 v->rx_ring.stats.bytes,
721 net_dim(&v->rx_dim, dim_sample);
724 static int enetc_bd_ready_count(struct enetc_bdr *tx_ring, int ci)
726 int pi = enetc_rd_reg_hot(tx_ring->tcir) & ENETC_TBCIR_IDX_MASK;
728 return pi >= ci ? pi - ci : tx_ring->bd_count - ci + pi;
731 static bool enetc_page_reusable(struct page *page)
733 return (!page_is_pfmemalloc(page) && page_ref_count(page) == 1);
736 static void enetc_reuse_page(struct enetc_bdr *rx_ring,
737 struct enetc_rx_swbd *old)
739 struct enetc_rx_swbd *new;
741 new = &rx_ring->rx_swbd[rx_ring->next_to_alloc];
743 /* next buf that may reuse a page */
744 enetc_bdr_idx_inc(rx_ring, &rx_ring->next_to_alloc);
746 /* copy page reference */
750 static void enetc_get_tx_tstamp(struct enetc_hw *hw, union enetc_tx_bd *txbd,
753 u32 lo, hi, tstamp_lo;
755 lo = enetc_rd_hot(hw, ENETC_SICTR0);
756 hi = enetc_rd_hot(hw, ENETC_SICTR1);
757 tstamp_lo = le32_to_cpu(txbd->wb.tstamp);
760 *tstamp = (u64)hi << 32 | tstamp_lo;
763 static void enetc_tstamp_tx(struct sk_buff *skb, u64 tstamp)
765 struct skb_shared_hwtstamps shhwtstamps;
767 if (skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS) {
768 memset(&shhwtstamps, 0, sizeof(shhwtstamps));
769 shhwtstamps.hwtstamp = ns_to_ktime(tstamp);
770 skb_txtime_consumed(skb);
771 skb_tstamp_tx(skb, &shhwtstamps);
775 static void enetc_recycle_xdp_tx_buff(struct enetc_bdr *tx_ring,
776 struct enetc_tx_swbd *tx_swbd)
778 struct enetc_ndev_priv *priv = netdev_priv(tx_ring->ndev);
779 struct enetc_rx_swbd rx_swbd = {
781 .page = tx_swbd->page,
782 .page_offset = tx_swbd->page_offset,
786 struct enetc_bdr *rx_ring;
788 rx_ring = enetc_rx_ring_from_xdp_tx_ring(priv, tx_ring);
790 if (likely(enetc_swbd_unused(rx_ring))) {
791 enetc_reuse_page(rx_ring, &rx_swbd);
793 /* sync for use by the device */
794 dma_sync_single_range_for_device(rx_ring->dev, rx_swbd.dma,
796 ENETC_RXB_DMA_SIZE_XDP,
799 rx_ring->stats.recycles++;
801 /* RX ring is already full, we need to unmap and free the
802 * page, since there's nothing useful we can do with it.
804 rx_ring->stats.recycle_failures++;
806 dma_unmap_page(rx_ring->dev, rx_swbd.dma, PAGE_SIZE,
808 __free_page(rx_swbd.page);
811 rx_ring->xdp.xdp_tx_in_flight--;
814 static bool enetc_clean_tx_ring(struct enetc_bdr *tx_ring, int napi_budget)
816 int tx_frm_cnt = 0, tx_byte_cnt = 0, tx_win_drop = 0;
817 struct net_device *ndev = tx_ring->ndev;
818 struct enetc_ndev_priv *priv = netdev_priv(ndev);
819 struct enetc_tx_swbd *tx_swbd;
821 bool do_twostep_tstamp;
824 i = tx_ring->next_to_clean;
825 tx_swbd = &tx_ring->tx_swbd[i];
827 bds_to_clean = enetc_bd_ready_count(tx_ring, i);
829 do_twostep_tstamp = false;
831 while (bds_to_clean && tx_frm_cnt < ENETC_DEFAULT_TX_WORK) {
832 struct xdp_frame *xdp_frame = enetc_tx_swbd_get_xdp_frame(tx_swbd);
833 struct sk_buff *skb = enetc_tx_swbd_get_skb(tx_swbd);
834 bool is_eof = tx_swbd->is_eof;
836 if (unlikely(tx_swbd->check_wb)) {
837 union enetc_tx_bd *txbd = ENETC_TXBD(*tx_ring, i);
839 if (txbd->flags & ENETC_TXBD_FLAGS_W &&
840 tx_swbd->do_twostep_tstamp) {
841 enetc_get_tx_tstamp(&priv->si->hw, txbd,
843 do_twostep_tstamp = true;
846 if (tx_swbd->qbv_en &&
847 txbd->wb.status & ENETC_TXBD_STATS_WIN)
851 if (tx_swbd->is_xdp_tx)
852 enetc_recycle_xdp_tx_buff(tx_ring, tx_swbd);
853 else if (likely(tx_swbd->dma))
854 enetc_unmap_tx_buff(tx_ring, tx_swbd);
857 xdp_return_frame(xdp_frame);
859 if (unlikely(skb->cb[0] & ENETC_F_TX_ONESTEP_SYNC_TSTAMP)) {
860 /* Start work to release lock for next one-step
861 * timestamping packet. And send one skb in
862 * tx_skbs queue if has.
864 schedule_work(&priv->tx_onestep_tstamp);
865 } else if (unlikely(do_twostep_tstamp)) {
866 enetc_tstamp_tx(skb, tstamp);
867 do_twostep_tstamp = false;
869 napi_consume_skb(skb, napi_budget);
872 tx_byte_cnt += tx_swbd->len;
873 /* Scrub the swbd here so we don't have to do that
874 * when we reuse it during xmit
876 memset(tx_swbd, 0, sizeof(*tx_swbd));
881 if (unlikely(i == tx_ring->bd_count)) {
883 tx_swbd = tx_ring->tx_swbd;
886 /* BD iteration loop end */
889 /* re-arm interrupt source */
890 enetc_wr_reg_hot(tx_ring->idr, BIT(tx_ring->index) |
891 BIT(16 + tx_ring->index));
894 if (unlikely(!bds_to_clean))
895 bds_to_clean = enetc_bd_ready_count(tx_ring, i);
898 tx_ring->next_to_clean = i;
899 tx_ring->stats.packets += tx_frm_cnt;
900 tx_ring->stats.bytes += tx_byte_cnt;
901 tx_ring->stats.win_drop += tx_win_drop;
903 if (unlikely(tx_frm_cnt && netif_carrier_ok(ndev) &&
904 __netif_subqueue_stopped(ndev, tx_ring->index) &&
905 (enetc_bd_unused(tx_ring) >= ENETC_TXBDS_MAX_NEEDED))) {
906 netif_wake_subqueue(ndev, tx_ring->index);
909 return tx_frm_cnt != ENETC_DEFAULT_TX_WORK;
912 static bool enetc_new_page(struct enetc_bdr *rx_ring,
913 struct enetc_rx_swbd *rx_swbd)
915 bool xdp = !!(rx_ring->xdp.prog);
919 page = dev_alloc_page();
923 /* For XDP_TX, we forgo dma_unmap -> dma_map */
924 rx_swbd->dir = xdp ? DMA_BIDIRECTIONAL : DMA_FROM_DEVICE;
926 addr = dma_map_page(rx_ring->dev, page, 0, PAGE_SIZE, rx_swbd->dir);
927 if (unlikely(dma_mapping_error(rx_ring->dev, addr))) {
934 rx_swbd->page = page;
935 rx_swbd->page_offset = rx_ring->buffer_offset;
940 static int enetc_refill_rx_ring(struct enetc_bdr *rx_ring, const int buff_cnt)
942 struct enetc_rx_swbd *rx_swbd;
943 union enetc_rx_bd *rxbd;
946 i = rx_ring->next_to_use;
947 rx_swbd = &rx_ring->rx_swbd[i];
948 rxbd = enetc_rxbd(rx_ring, i);
950 for (j = 0; j < buff_cnt; j++) {
952 if (unlikely(!rx_swbd->page)) {
953 if (unlikely(!enetc_new_page(rx_ring, rx_swbd))) {
954 rx_ring->stats.rx_alloc_errs++;
960 rxbd->w.addr = cpu_to_le64(rx_swbd->dma +
961 rx_swbd->page_offset);
962 /* clear 'R" as well */
965 enetc_rxbd_next(rx_ring, &rxbd, &i);
966 rx_swbd = &rx_ring->rx_swbd[i];
970 rx_ring->next_to_alloc = i; /* keep track from page reuse */
971 rx_ring->next_to_use = i;
973 /* update ENETC's consumer index */
974 enetc_wr_reg_hot(rx_ring->rcir, rx_ring->next_to_use);
980 #ifdef CONFIG_FSL_ENETC_PTP_CLOCK
981 static void enetc_get_rx_tstamp(struct net_device *ndev,
982 union enetc_rx_bd *rxbd,
985 struct skb_shared_hwtstamps *shhwtstamps = skb_hwtstamps(skb);
986 struct enetc_ndev_priv *priv = netdev_priv(ndev);
987 struct enetc_hw *hw = &priv->si->hw;
988 u32 lo, hi, tstamp_lo;
991 if (le16_to_cpu(rxbd->r.flags) & ENETC_RXBD_FLAG_TSTMP) {
992 lo = enetc_rd_reg_hot(hw->reg + ENETC_SICTR0);
993 hi = enetc_rd_reg_hot(hw->reg + ENETC_SICTR1);
994 rxbd = enetc_rxbd_ext(rxbd);
995 tstamp_lo = le32_to_cpu(rxbd->ext.tstamp);
999 tstamp = (u64)hi << 32 | tstamp_lo;
1000 memset(shhwtstamps, 0, sizeof(*shhwtstamps));
1001 shhwtstamps->hwtstamp = ns_to_ktime(tstamp);
1006 static void enetc_get_offloads(struct enetc_bdr *rx_ring,
1007 union enetc_rx_bd *rxbd, struct sk_buff *skb)
1009 struct enetc_ndev_priv *priv = netdev_priv(rx_ring->ndev);
1012 if (rx_ring->ndev->features & NETIF_F_RXCSUM) {
1013 u16 inet_csum = le16_to_cpu(rxbd->r.inet_csum);
1015 skb->csum = csum_unfold((__force __sum16)~htons(inet_csum));
1016 skb->ip_summed = CHECKSUM_COMPLETE;
1019 if (le16_to_cpu(rxbd->r.flags) & ENETC_RXBD_FLAG_VLAN) {
1022 switch (le16_to_cpu(rxbd->r.flags) & ENETC_RXBD_FLAG_TPID) {
1024 tpid = htons(ETH_P_8021Q);
1027 tpid = htons(ETH_P_8021AD);
1030 tpid = htons(enetc_port_rd(&priv->si->hw,
1034 tpid = htons(enetc_port_rd(&priv->si->hw,
1041 __vlan_hwaccel_put_tag(skb, tpid, le16_to_cpu(rxbd->r.vlan_opt));
1044 #ifdef CONFIG_FSL_ENETC_PTP_CLOCK
1045 if (priv->active_offloads & ENETC_F_RX_TSTAMP)
1046 enetc_get_rx_tstamp(rx_ring->ndev, rxbd, skb);
1050 /* This gets called during the non-XDP NAPI poll cycle as well as on XDP_PASS,
1051 * so it needs to work with both DMA_FROM_DEVICE as well as DMA_BIDIRECTIONAL
1054 static struct enetc_rx_swbd *enetc_get_rx_buff(struct enetc_bdr *rx_ring,
1057 struct enetc_rx_swbd *rx_swbd = &rx_ring->rx_swbd[i];
1059 dma_sync_single_range_for_cpu(rx_ring->dev, rx_swbd->dma,
1060 rx_swbd->page_offset,
1061 size, rx_swbd->dir);
1065 /* Reuse the current page without performing half-page buffer flipping */
1066 static void enetc_put_rx_buff(struct enetc_bdr *rx_ring,
1067 struct enetc_rx_swbd *rx_swbd)
1069 size_t buffer_size = ENETC_RXB_TRUESIZE - rx_ring->buffer_offset;
1071 enetc_reuse_page(rx_ring, rx_swbd);
1073 dma_sync_single_range_for_device(rx_ring->dev, rx_swbd->dma,
1074 rx_swbd->page_offset,
1075 buffer_size, rx_swbd->dir);
1077 rx_swbd->page = NULL;
1080 /* Reuse the current page by performing half-page buffer flipping */
1081 static void enetc_flip_rx_buff(struct enetc_bdr *rx_ring,
1082 struct enetc_rx_swbd *rx_swbd)
1084 if (likely(enetc_page_reusable(rx_swbd->page))) {
1085 rx_swbd->page_offset ^= ENETC_RXB_TRUESIZE;
1086 page_ref_inc(rx_swbd->page);
1088 enetc_put_rx_buff(rx_ring, rx_swbd);
1090 dma_unmap_page(rx_ring->dev, rx_swbd->dma, PAGE_SIZE,
1092 rx_swbd->page = NULL;
1096 static struct sk_buff *enetc_map_rx_buff_to_skb(struct enetc_bdr *rx_ring,
1099 struct enetc_rx_swbd *rx_swbd = enetc_get_rx_buff(rx_ring, i, size);
1100 struct sk_buff *skb;
1103 ba = page_address(rx_swbd->page) + rx_swbd->page_offset;
1104 skb = build_skb(ba - rx_ring->buffer_offset, ENETC_RXB_TRUESIZE);
1105 if (unlikely(!skb)) {
1106 rx_ring->stats.rx_alloc_errs++;
1110 skb_reserve(skb, rx_ring->buffer_offset);
1111 __skb_put(skb, size);
1113 enetc_flip_rx_buff(rx_ring, rx_swbd);
1118 static void enetc_add_rx_buff_to_skb(struct enetc_bdr *rx_ring, int i,
1119 u16 size, struct sk_buff *skb)
1121 struct enetc_rx_swbd *rx_swbd = enetc_get_rx_buff(rx_ring, i, size);
1123 skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, rx_swbd->page,
1124 rx_swbd->page_offset, size, ENETC_RXB_TRUESIZE);
1126 enetc_flip_rx_buff(rx_ring, rx_swbd);
1129 static bool enetc_check_bd_errors_and_consume(struct enetc_bdr *rx_ring,
1131 union enetc_rx_bd **rxbd, int *i)
1133 if (likely(!(bd_status & ENETC_RXBD_LSTATUS(ENETC_RXBD_ERR_MASK))))
1136 enetc_put_rx_buff(rx_ring, &rx_ring->rx_swbd[*i]);
1137 enetc_rxbd_next(rx_ring, rxbd, i);
1139 while (!(bd_status & ENETC_RXBD_LSTATUS_F)) {
1141 bd_status = le32_to_cpu((*rxbd)->r.lstatus);
1143 enetc_put_rx_buff(rx_ring, &rx_ring->rx_swbd[*i]);
1144 enetc_rxbd_next(rx_ring, rxbd, i);
1147 rx_ring->ndev->stats.rx_dropped++;
1148 rx_ring->ndev->stats.rx_errors++;
1153 static struct sk_buff *enetc_build_skb(struct enetc_bdr *rx_ring,
1154 u32 bd_status, union enetc_rx_bd **rxbd,
1155 int *i, int *cleaned_cnt, int buffer_size)
1157 struct sk_buff *skb;
1160 size = le16_to_cpu((*rxbd)->r.buf_len);
1161 skb = enetc_map_rx_buff_to_skb(rx_ring, *i, size);
1165 enetc_get_offloads(rx_ring, *rxbd, skb);
1169 enetc_rxbd_next(rx_ring, rxbd, i);
1171 /* not last BD in frame? */
1172 while (!(bd_status & ENETC_RXBD_LSTATUS_F)) {
1173 bd_status = le32_to_cpu((*rxbd)->r.lstatus);
1176 if (bd_status & ENETC_RXBD_LSTATUS_F) {
1178 size = le16_to_cpu((*rxbd)->r.buf_len);
1181 enetc_add_rx_buff_to_skb(rx_ring, *i, size, skb);
1185 enetc_rxbd_next(rx_ring, rxbd, i);
1188 skb_record_rx_queue(skb, rx_ring->index);
1189 skb->protocol = eth_type_trans(skb, rx_ring->ndev);
1194 #define ENETC_RXBD_BUNDLE 16 /* # of BDs to update at once */
1196 static int enetc_clean_rx_ring(struct enetc_bdr *rx_ring,
1197 struct napi_struct *napi, int work_limit)
1199 int rx_frm_cnt = 0, rx_byte_cnt = 0;
1202 cleaned_cnt = enetc_bd_unused(rx_ring);
1203 /* next descriptor to process */
1204 i = rx_ring->next_to_clean;
1206 while (likely(rx_frm_cnt < work_limit)) {
1207 union enetc_rx_bd *rxbd;
1208 struct sk_buff *skb;
1211 if (cleaned_cnt >= ENETC_RXBD_BUNDLE)
1212 cleaned_cnt -= enetc_refill_rx_ring(rx_ring,
1215 rxbd = enetc_rxbd(rx_ring, i);
1216 bd_status = le32_to_cpu(rxbd->r.lstatus);
1220 enetc_wr_reg_hot(rx_ring->idr, BIT(rx_ring->index));
1221 dma_rmb(); /* for reading other rxbd fields */
1223 if (enetc_check_bd_errors_and_consume(rx_ring, bd_status,
1227 skb = enetc_build_skb(rx_ring, bd_status, &rxbd, &i,
1228 &cleaned_cnt, ENETC_RXB_DMA_SIZE);
1232 /* When set, the outer VLAN header is extracted and reported
1233 * in the receive buffer descriptor. So rx_byte_cnt should
1234 * add the length of the extracted VLAN header.
1236 if (bd_status & ENETC_RXBD_FLAG_VLAN)
1237 rx_byte_cnt += VLAN_HLEN;
1238 rx_byte_cnt += skb->len + ETH_HLEN;
1241 napi_gro_receive(napi, skb);
1244 rx_ring->next_to_clean = i;
1246 rx_ring->stats.packets += rx_frm_cnt;
1247 rx_ring->stats.bytes += rx_byte_cnt;
1252 static void enetc_xdp_map_tx_buff(struct enetc_bdr *tx_ring, int i,
1253 struct enetc_tx_swbd *tx_swbd,
1256 union enetc_tx_bd *txbd = ENETC_TXBD(*tx_ring, i);
1260 enetc_clear_tx_bd(txbd);
1261 txbd->addr = cpu_to_le64(tx_swbd->dma + tx_swbd->page_offset);
1262 txbd->buf_len = cpu_to_le16(tx_swbd->len);
1263 txbd->frm_len = cpu_to_le16(frm_len);
1265 memcpy(&tx_ring->tx_swbd[i], tx_swbd, sizeof(*tx_swbd));
1268 /* Puts in the TX ring one XDP frame, mapped as an array of TX software buffer
1271 static bool enetc_xdp_tx(struct enetc_bdr *tx_ring,
1272 struct enetc_tx_swbd *xdp_tx_arr, int num_tx_swbd)
1274 struct enetc_tx_swbd *tmp_tx_swbd = xdp_tx_arr;
1275 int i, k, frm_len = tmp_tx_swbd->len;
1277 if (unlikely(enetc_bd_unused(tx_ring) < ENETC_TXBDS_NEEDED(num_tx_swbd)))
1280 while (unlikely(!tmp_tx_swbd->is_eof)) {
1282 frm_len += tmp_tx_swbd->len;
1285 i = tx_ring->next_to_use;
1287 for (k = 0; k < num_tx_swbd; k++) {
1288 struct enetc_tx_swbd *xdp_tx_swbd = &xdp_tx_arr[k];
1290 enetc_xdp_map_tx_buff(tx_ring, i, xdp_tx_swbd, frm_len);
1292 /* last BD needs 'F' bit set */
1293 if (xdp_tx_swbd->is_eof) {
1294 union enetc_tx_bd *txbd = ENETC_TXBD(*tx_ring, i);
1296 txbd->flags = ENETC_TXBD_FLAGS_F;
1299 enetc_bdr_idx_inc(tx_ring, &i);
1302 tx_ring->next_to_use = i;
1307 static int enetc_xdp_frame_to_xdp_tx_swbd(struct enetc_bdr *tx_ring,
1308 struct enetc_tx_swbd *xdp_tx_arr,
1309 struct xdp_frame *xdp_frame)
1311 struct enetc_tx_swbd *xdp_tx_swbd = &xdp_tx_arr[0];
1312 struct skb_shared_info *shinfo;
1313 void *data = xdp_frame->data;
1314 int len = xdp_frame->len;
1320 dma = dma_map_single(tx_ring->dev, data, len, DMA_TO_DEVICE);
1321 if (unlikely(dma_mapping_error(tx_ring->dev, dma))) {
1322 netdev_err(tx_ring->ndev, "DMA map error\n");
1326 xdp_tx_swbd->dma = dma;
1327 xdp_tx_swbd->dir = DMA_TO_DEVICE;
1328 xdp_tx_swbd->len = len;
1329 xdp_tx_swbd->is_xdp_redirect = true;
1330 xdp_tx_swbd->is_eof = false;
1331 xdp_tx_swbd->xdp_frame = NULL;
1335 if (!xdp_frame_has_frags(xdp_frame))
1338 xdp_tx_swbd = &xdp_tx_arr[n];
1340 shinfo = xdp_get_shared_info_from_frame(xdp_frame);
1342 for (f = 0, frag = &shinfo->frags[0]; f < shinfo->nr_frags;
1344 data = skb_frag_address(frag);
1345 len = skb_frag_size(frag);
1347 dma = dma_map_single(tx_ring->dev, data, len, DMA_TO_DEVICE);
1348 if (unlikely(dma_mapping_error(tx_ring->dev, dma))) {
1349 /* Undo the DMA mapping for all fragments */
1351 enetc_unmap_tx_buff(tx_ring, &xdp_tx_arr[n]);
1353 netdev_err(tx_ring->ndev, "DMA map error\n");
1357 xdp_tx_swbd->dma = dma;
1358 xdp_tx_swbd->dir = DMA_TO_DEVICE;
1359 xdp_tx_swbd->len = len;
1360 xdp_tx_swbd->is_xdp_redirect = true;
1361 xdp_tx_swbd->is_eof = false;
1362 xdp_tx_swbd->xdp_frame = NULL;
1365 xdp_tx_swbd = &xdp_tx_arr[n];
1368 xdp_tx_arr[n - 1].is_eof = true;
1369 xdp_tx_arr[n - 1].xdp_frame = xdp_frame;
1374 int enetc_xdp_xmit(struct net_device *ndev, int num_frames,
1375 struct xdp_frame **frames, u32 flags)
1377 struct enetc_tx_swbd xdp_redirect_arr[ENETC_MAX_SKB_FRAGS] = {0};
1378 struct enetc_ndev_priv *priv = netdev_priv(ndev);
1379 struct enetc_bdr *tx_ring;
1380 int xdp_tx_bd_cnt, i, k;
1381 int xdp_tx_frm_cnt = 0;
1385 tx_ring = priv->xdp_tx_ring[smp_processor_id()];
1387 prefetchw(ENETC_TXBD(*tx_ring, tx_ring->next_to_use));
1389 for (k = 0; k < num_frames; k++) {
1390 xdp_tx_bd_cnt = enetc_xdp_frame_to_xdp_tx_swbd(tx_ring,
1393 if (unlikely(xdp_tx_bd_cnt < 0))
1396 if (unlikely(!enetc_xdp_tx(tx_ring, xdp_redirect_arr,
1398 for (i = 0; i < xdp_tx_bd_cnt; i++)
1399 enetc_unmap_tx_buff(tx_ring,
1400 &xdp_redirect_arr[i]);
1401 tx_ring->stats.xdp_tx_drops++;
1408 if (unlikely((flags & XDP_XMIT_FLUSH) || k != xdp_tx_frm_cnt))
1409 enetc_update_tx_ring_tail(tx_ring);
1411 tx_ring->stats.xdp_tx += xdp_tx_frm_cnt;
1413 enetc_unlock_mdio();
1415 return xdp_tx_frm_cnt;
1417 EXPORT_SYMBOL_GPL(enetc_xdp_xmit);
1419 static void enetc_map_rx_buff_to_xdp(struct enetc_bdr *rx_ring, int i,
1420 struct xdp_buff *xdp_buff, u16 size)
1422 struct enetc_rx_swbd *rx_swbd = enetc_get_rx_buff(rx_ring, i, size);
1423 void *hard_start = page_address(rx_swbd->page) + rx_swbd->page_offset;
1425 /* To be used for XDP_TX */
1426 rx_swbd->len = size;
1428 xdp_prepare_buff(xdp_buff, hard_start - rx_ring->buffer_offset,
1429 rx_ring->buffer_offset, size, false);
1432 static void enetc_add_rx_buff_to_xdp(struct enetc_bdr *rx_ring, int i,
1433 u16 size, struct xdp_buff *xdp_buff)
1435 struct skb_shared_info *shinfo = xdp_get_shared_info_from_buff(xdp_buff);
1436 struct enetc_rx_swbd *rx_swbd = enetc_get_rx_buff(rx_ring, i, size);
1439 /* To be used for XDP_TX */
1440 rx_swbd->len = size;
1442 if (!xdp_buff_has_frags(xdp_buff)) {
1443 xdp_buff_set_frags_flag(xdp_buff);
1444 shinfo->xdp_frags_size = size;
1445 shinfo->nr_frags = 0;
1447 shinfo->xdp_frags_size += size;
1450 if (page_is_pfmemalloc(rx_swbd->page))
1451 xdp_buff_set_frag_pfmemalloc(xdp_buff);
1453 frag = &shinfo->frags[shinfo->nr_frags];
1454 skb_frag_fill_page_desc(frag, rx_swbd->page, rx_swbd->page_offset,
1460 static void enetc_build_xdp_buff(struct enetc_bdr *rx_ring, u32 bd_status,
1461 union enetc_rx_bd **rxbd, int *i,
1462 int *cleaned_cnt, struct xdp_buff *xdp_buff)
1464 u16 size = le16_to_cpu((*rxbd)->r.buf_len);
1466 xdp_init_buff(xdp_buff, ENETC_RXB_TRUESIZE, &rx_ring->xdp.rxq);
1468 enetc_map_rx_buff_to_xdp(rx_ring, *i, xdp_buff, size);
1470 enetc_rxbd_next(rx_ring, rxbd, i);
1472 /* not last BD in frame? */
1473 while (!(bd_status & ENETC_RXBD_LSTATUS_F)) {
1474 bd_status = le32_to_cpu((*rxbd)->r.lstatus);
1475 size = ENETC_RXB_DMA_SIZE_XDP;
1477 if (bd_status & ENETC_RXBD_LSTATUS_F) {
1479 size = le16_to_cpu((*rxbd)->r.buf_len);
1482 enetc_add_rx_buff_to_xdp(rx_ring, *i, size, xdp_buff);
1484 enetc_rxbd_next(rx_ring, rxbd, i);
1488 /* Convert RX buffer descriptors to TX buffer descriptors. These will be
1489 * recycled back into the RX ring in enetc_clean_tx_ring.
1491 static int enetc_rx_swbd_to_xdp_tx_swbd(struct enetc_tx_swbd *xdp_tx_arr,
1492 struct enetc_bdr *rx_ring,
1493 int rx_ring_first, int rx_ring_last)
1497 for (; rx_ring_first != rx_ring_last;
1498 n++, enetc_bdr_idx_inc(rx_ring, &rx_ring_first)) {
1499 struct enetc_rx_swbd *rx_swbd = &rx_ring->rx_swbd[rx_ring_first];
1500 struct enetc_tx_swbd *tx_swbd = &xdp_tx_arr[n];
1502 /* No need to dma_map, we already have DMA_BIDIRECTIONAL */
1503 tx_swbd->dma = rx_swbd->dma;
1504 tx_swbd->dir = rx_swbd->dir;
1505 tx_swbd->page = rx_swbd->page;
1506 tx_swbd->page_offset = rx_swbd->page_offset;
1507 tx_swbd->len = rx_swbd->len;
1508 tx_swbd->is_dma_page = true;
1509 tx_swbd->is_xdp_tx = true;
1510 tx_swbd->is_eof = false;
1513 /* We rely on caller providing an rx_ring_last > rx_ring_first */
1514 xdp_tx_arr[n - 1].is_eof = true;
1519 static void enetc_xdp_drop(struct enetc_bdr *rx_ring, int rx_ring_first,
1522 while (rx_ring_first != rx_ring_last) {
1523 enetc_put_rx_buff(rx_ring,
1524 &rx_ring->rx_swbd[rx_ring_first]);
1525 enetc_bdr_idx_inc(rx_ring, &rx_ring_first);
1527 rx_ring->stats.xdp_drops++;
1530 static int enetc_clean_rx_ring_xdp(struct enetc_bdr *rx_ring,
1531 struct napi_struct *napi, int work_limit,
1532 struct bpf_prog *prog)
1534 int xdp_tx_bd_cnt, xdp_tx_frm_cnt = 0, xdp_redirect_frm_cnt = 0;
1535 struct enetc_tx_swbd xdp_tx_arr[ENETC_MAX_SKB_FRAGS] = {0};
1536 struct enetc_ndev_priv *priv = netdev_priv(rx_ring->ndev);
1537 int rx_frm_cnt = 0, rx_byte_cnt = 0;
1538 struct enetc_bdr *tx_ring;
1542 cleaned_cnt = enetc_bd_unused(rx_ring);
1543 /* next descriptor to process */
1544 i = rx_ring->next_to_clean;
1546 while (likely(rx_frm_cnt < work_limit)) {
1547 union enetc_rx_bd *rxbd, *orig_rxbd;
1548 int orig_i, orig_cleaned_cnt;
1549 struct xdp_buff xdp_buff;
1550 struct sk_buff *skb;
1554 rxbd = enetc_rxbd(rx_ring, i);
1555 bd_status = le32_to_cpu(rxbd->r.lstatus);
1559 enetc_wr_reg_hot(rx_ring->idr, BIT(rx_ring->index));
1560 dma_rmb(); /* for reading other rxbd fields */
1562 if (enetc_check_bd_errors_and_consume(rx_ring, bd_status,
1567 orig_cleaned_cnt = cleaned_cnt;
1570 enetc_build_xdp_buff(rx_ring, bd_status, &rxbd, &i,
1571 &cleaned_cnt, &xdp_buff);
1573 /* When set, the outer VLAN header is extracted and reported
1574 * in the receive buffer descriptor. So rx_byte_cnt should
1575 * add the length of the extracted VLAN header.
1577 if (bd_status & ENETC_RXBD_FLAG_VLAN)
1578 rx_byte_cnt += VLAN_HLEN;
1579 rx_byte_cnt += xdp_get_buff_len(&xdp_buff);
1581 xdp_act = bpf_prog_run_xdp(prog, &xdp_buff);
1585 bpf_warn_invalid_xdp_action(rx_ring->ndev, prog, xdp_act);
1588 trace_xdp_exception(rx_ring->ndev, prog, xdp_act);
1591 enetc_xdp_drop(rx_ring, orig_i, i);
1595 cleaned_cnt = orig_cleaned_cnt;
1598 skb = enetc_build_skb(rx_ring, bd_status, &rxbd,
1600 ENETC_RXB_DMA_SIZE_XDP);
1604 napi_gro_receive(napi, skb);
1607 tx_ring = priv->xdp_tx_ring[rx_ring->index];
1608 xdp_tx_bd_cnt = enetc_rx_swbd_to_xdp_tx_swbd(xdp_tx_arr,
1612 if (!enetc_xdp_tx(tx_ring, xdp_tx_arr, xdp_tx_bd_cnt)) {
1613 enetc_xdp_drop(rx_ring, orig_i, i);
1614 tx_ring->stats.xdp_tx_drops++;
1616 tx_ring->stats.xdp_tx += xdp_tx_bd_cnt;
1617 rx_ring->xdp.xdp_tx_in_flight += xdp_tx_bd_cnt;
1619 /* The XDP_TX enqueue was successful, so we
1620 * need to scrub the RX software BDs because
1621 * the ownership of the buffers no longer
1622 * belongs to the RX ring, and we must prevent
1623 * enetc_refill_rx_ring() from reusing
1626 while (orig_i != i) {
1627 rx_ring->rx_swbd[orig_i].page = NULL;
1628 enetc_bdr_idx_inc(rx_ring, &orig_i);
1633 err = xdp_do_redirect(rx_ring->ndev, &xdp_buff, prog);
1634 if (unlikely(err)) {
1635 enetc_xdp_drop(rx_ring, orig_i, i);
1636 rx_ring->stats.xdp_redirect_failures++;
1638 while (orig_i != i) {
1639 enetc_flip_rx_buff(rx_ring,
1640 &rx_ring->rx_swbd[orig_i]);
1641 enetc_bdr_idx_inc(rx_ring, &orig_i);
1643 xdp_redirect_frm_cnt++;
1644 rx_ring->stats.xdp_redirect++;
1652 rx_ring->next_to_clean = i;
1654 rx_ring->stats.packets += rx_frm_cnt;
1655 rx_ring->stats.bytes += rx_byte_cnt;
1657 if (xdp_redirect_frm_cnt)
1661 enetc_update_tx_ring_tail(tx_ring);
1663 if (cleaned_cnt > rx_ring->xdp.xdp_tx_in_flight)
1664 enetc_refill_rx_ring(rx_ring, enetc_bd_unused(rx_ring) -
1665 rx_ring->xdp.xdp_tx_in_flight);
1670 static int enetc_poll(struct napi_struct *napi, int budget)
1672 struct enetc_int_vector
1673 *v = container_of(napi, struct enetc_int_vector, napi);
1674 struct enetc_bdr *rx_ring = &v->rx_ring;
1675 struct bpf_prog *prog;
1676 bool complete = true;
1682 for (i = 0; i < v->count_tx_rings; i++)
1683 if (!enetc_clean_tx_ring(&v->tx_ring[i], budget))
1686 prog = rx_ring->xdp.prog;
1688 work_done = enetc_clean_rx_ring_xdp(rx_ring, napi, budget, prog);
1690 work_done = enetc_clean_rx_ring(rx_ring, napi, budget);
1691 if (work_done == budget)
1694 v->rx_napi_work = true;
1697 enetc_unlock_mdio();
1701 napi_complete_done(napi, work_done);
1703 if (likely(v->rx_dim_en))
1704 enetc_rx_net_dim(v);
1706 v->rx_napi_work = false;
1708 /* enable interrupts */
1709 enetc_wr_reg_hot(v->rbier, ENETC_RBIER_RXTIE);
1711 for_each_set_bit(i, &v->tx_rings_map, ENETC_MAX_NUM_TXQS)
1712 enetc_wr_reg_hot(v->tbier_base + ENETC_BDR_OFF(i),
1715 enetc_unlock_mdio();
1720 /* Probing and Init */
1721 #define ENETC_MAX_RFS_SIZE 64
1722 void enetc_get_si_caps(struct enetc_si *si)
1724 struct enetc_hw *hw = &si->hw;
1727 /* find out how many of various resources we have to work with */
1728 val = enetc_rd(hw, ENETC_SICAPR0);
1729 si->num_rx_rings = (val >> 16) & 0xff;
1730 si->num_tx_rings = val & 0xff;
1732 val = enetc_rd(hw, ENETC_SIRFSCAPR);
1733 si->num_fs_entries = ENETC_SIRFSCAPR_GET_NUM_RFS(val);
1734 si->num_fs_entries = min(si->num_fs_entries, ENETC_MAX_RFS_SIZE);
1737 val = enetc_rd(hw, ENETC_SIPCAPR0);
1738 if (val & ENETC_SIPCAPR0_RSS) {
1741 rss = enetc_rd(hw, ENETC_SIRSSCAPR);
1742 si->num_rss = ENETC_SIRSSCAPR_GET_NUM_RSS(rss);
1745 if (val & ENETC_SIPCAPR0_QBV)
1746 si->hw_features |= ENETC_SI_F_QBV;
1748 if (val & ENETC_SIPCAPR0_QBU)
1749 si->hw_features |= ENETC_SI_F_QBU;
1751 if (val & ENETC_SIPCAPR0_PSFP)
1752 si->hw_features |= ENETC_SI_F_PSFP;
1754 EXPORT_SYMBOL_GPL(enetc_get_si_caps);
1756 static int enetc_dma_alloc_bdr(struct enetc_bdr_resource *res)
1758 size_t bd_base_size = res->bd_count * res->bd_size;
1760 res->bd_base = dma_alloc_coherent(res->dev, bd_base_size,
1761 &res->bd_dma_base, GFP_KERNEL);
1765 /* h/w requires 128B alignment */
1766 if (!IS_ALIGNED(res->bd_dma_base, 128)) {
1767 dma_free_coherent(res->dev, bd_base_size, res->bd_base,
1775 static void enetc_dma_free_bdr(const struct enetc_bdr_resource *res)
1777 size_t bd_base_size = res->bd_count * res->bd_size;
1779 dma_free_coherent(res->dev, bd_base_size, res->bd_base,
1783 static int enetc_alloc_tx_resource(struct enetc_bdr_resource *res,
1784 struct device *dev, size_t bd_count)
1789 res->bd_count = bd_count;
1790 res->bd_size = sizeof(union enetc_tx_bd);
1792 res->tx_swbd = vcalloc(bd_count, sizeof(*res->tx_swbd));
1796 err = enetc_dma_alloc_bdr(res);
1800 res->tso_headers = dma_alloc_coherent(dev, bd_count * TSO_HEADER_SIZE,
1801 &res->tso_headers_dma,
1803 if (!res->tso_headers) {
1811 enetc_dma_free_bdr(res);
1813 vfree(res->tx_swbd);
1814 res->tx_swbd = NULL;
1819 static void enetc_free_tx_resource(const struct enetc_bdr_resource *res)
1821 dma_free_coherent(res->dev, res->bd_count * TSO_HEADER_SIZE,
1822 res->tso_headers, res->tso_headers_dma);
1823 enetc_dma_free_bdr(res);
1824 vfree(res->tx_swbd);
1827 static struct enetc_bdr_resource *
1828 enetc_alloc_tx_resources(struct enetc_ndev_priv *priv)
1830 struct enetc_bdr_resource *tx_res;
1833 tx_res = kcalloc(priv->num_tx_rings, sizeof(*tx_res), GFP_KERNEL);
1835 return ERR_PTR(-ENOMEM);
1837 for (i = 0; i < priv->num_tx_rings; i++) {
1838 struct enetc_bdr *tx_ring = priv->tx_ring[i];
1840 err = enetc_alloc_tx_resource(&tx_res[i], tx_ring->dev,
1850 enetc_free_tx_resource(&tx_res[i]);
1854 return ERR_PTR(err);
1857 static void enetc_free_tx_resources(const struct enetc_bdr_resource *tx_res,
1858 size_t num_resources)
1862 for (i = 0; i < num_resources; i++)
1863 enetc_free_tx_resource(&tx_res[i]);
1868 static int enetc_alloc_rx_resource(struct enetc_bdr_resource *res,
1869 struct device *dev, size_t bd_count,
1875 res->bd_count = bd_count;
1876 res->bd_size = sizeof(union enetc_rx_bd);
1880 res->rx_swbd = vcalloc(bd_count, sizeof(struct enetc_rx_swbd));
1884 err = enetc_dma_alloc_bdr(res);
1886 vfree(res->rx_swbd);
1893 static void enetc_free_rx_resource(const struct enetc_bdr_resource *res)
1895 enetc_dma_free_bdr(res);
1896 vfree(res->rx_swbd);
1899 static struct enetc_bdr_resource *
1900 enetc_alloc_rx_resources(struct enetc_ndev_priv *priv, bool extended)
1902 struct enetc_bdr_resource *rx_res;
1905 rx_res = kcalloc(priv->num_rx_rings, sizeof(*rx_res), GFP_KERNEL);
1907 return ERR_PTR(-ENOMEM);
1909 for (i = 0; i < priv->num_rx_rings; i++) {
1910 struct enetc_bdr *rx_ring = priv->rx_ring[i];
1912 err = enetc_alloc_rx_resource(&rx_res[i], rx_ring->dev,
1913 rx_ring->bd_count, extended);
1922 enetc_free_rx_resource(&rx_res[i]);
1926 return ERR_PTR(err);
1929 static void enetc_free_rx_resources(const struct enetc_bdr_resource *rx_res,
1930 size_t num_resources)
1934 for (i = 0; i < num_resources; i++)
1935 enetc_free_rx_resource(&rx_res[i]);
1940 static void enetc_assign_tx_resource(struct enetc_bdr *tx_ring,
1941 const struct enetc_bdr_resource *res)
1943 tx_ring->bd_base = res ? res->bd_base : NULL;
1944 tx_ring->bd_dma_base = res ? res->bd_dma_base : 0;
1945 tx_ring->tx_swbd = res ? res->tx_swbd : NULL;
1946 tx_ring->tso_headers = res ? res->tso_headers : NULL;
1947 tx_ring->tso_headers_dma = res ? res->tso_headers_dma : 0;
1950 static void enetc_assign_rx_resource(struct enetc_bdr *rx_ring,
1951 const struct enetc_bdr_resource *res)
1953 rx_ring->bd_base = res ? res->bd_base : NULL;
1954 rx_ring->bd_dma_base = res ? res->bd_dma_base : 0;
1955 rx_ring->rx_swbd = res ? res->rx_swbd : NULL;
1958 static void enetc_assign_tx_resources(struct enetc_ndev_priv *priv,
1959 const struct enetc_bdr_resource *res)
1964 enetc_free_tx_resources(priv->tx_res, priv->num_tx_rings);
1966 for (i = 0; i < priv->num_tx_rings; i++) {
1967 enetc_assign_tx_resource(priv->tx_ring[i],
1968 res ? &res[i] : NULL);
1974 static void enetc_assign_rx_resources(struct enetc_ndev_priv *priv,
1975 const struct enetc_bdr_resource *res)
1980 enetc_free_rx_resources(priv->rx_res, priv->num_rx_rings);
1982 for (i = 0; i < priv->num_rx_rings; i++) {
1983 enetc_assign_rx_resource(priv->rx_ring[i],
1984 res ? &res[i] : NULL);
1990 static void enetc_free_tx_ring(struct enetc_bdr *tx_ring)
1994 for (i = 0; i < tx_ring->bd_count; i++) {
1995 struct enetc_tx_swbd *tx_swbd = &tx_ring->tx_swbd[i];
1997 enetc_free_tx_frame(tx_ring, tx_swbd);
2001 static void enetc_free_rx_ring(struct enetc_bdr *rx_ring)
2005 for (i = 0; i < rx_ring->bd_count; i++) {
2006 struct enetc_rx_swbd *rx_swbd = &rx_ring->rx_swbd[i];
2011 dma_unmap_page(rx_ring->dev, rx_swbd->dma, PAGE_SIZE,
2013 __free_page(rx_swbd->page);
2014 rx_swbd->page = NULL;
2018 static void enetc_free_rxtx_rings(struct enetc_ndev_priv *priv)
2022 for (i = 0; i < priv->num_rx_rings; i++)
2023 enetc_free_rx_ring(priv->rx_ring[i]);
2025 for (i = 0; i < priv->num_tx_rings; i++)
2026 enetc_free_tx_ring(priv->tx_ring[i]);
2029 static int enetc_setup_default_rss_table(struct enetc_si *si, int num_groups)
2034 rss_table = kmalloc_array(si->num_rss, sizeof(*rss_table), GFP_KERNEL);
2038 /* Set up RSS table defaults */
2039 for (i = 0; i < si->num_rss; i++)
2040 rss_table[i] = i % num_groups;
2042 enetc_set_rss_table(si, rss_table, si->num_rss);
2049 int enetc_configure_si(struct enetc_ndev_priv *priv)
2051 struct enetc_si *si = priv->si;
2052 struct enetc_hw *hw = &si->hw;
2055 /* set SI cache attributes */
2056 enetc_wr(hw, ENETC_SICAR0,
2057 ENETC_SICAR_RD_COHERENT | ENETC_SICAR_WR_COHERENT);
2058 enetc_wr(hw, ENETC_SICAR1, ENETC_SICAR_MSI);
2060 enetc_wr(hw, ENETC_SIMR, ENETC_SIMR_EN);
2063 err = enetc_setup_default_rss_table(si, priv->num_rx_rings);
2070 EXPORT_SYMBOL_GPL(enetc_configure_si);
2072 void enetc_init_si_rings_params(struct enetc_ndev_priv *priv)
2074 struct enetc_si *si = priv->si;
2075 int cpus = num_online_cpus();
2077 priv->tx_bd_count = ENETC_TX_RING_DEFAULT_SIZE;
2078 priv->rx_bd_count = ENETC_RX_RING_DEFAULT_SIZE;
2080 /* Enable all available TX rings in order to configure as many
2081 * priorities as possible, when needed.
2082 * TODO: Make # of TX rings run-time configurable
2084 priv->num_rx_rings = min_t(int, cpus, si->num_rx_rings);
2085 priv->num_tx_rings = si->num_tx_rings;
2086 priv->bdr_int_num = cpus;
2087 priv->ic_mode = ENETC_IC_RX_ADAPTIVE | ENETC_IC_TX_MANUAL;
2088 priv->tx_ictt = ENETC_TXIC_TIMETHR;
2090 EXPORT_SYMBOL_GPL(enetc_init_si_rings_params);
2092 int enetc_alloc_si_resources(struct enetc_ndev_priv *priv)
2094 struct enetc_si *si = priv->si;
2096 priv->cls_rules = kcalloc(si->num_fs_entries, sizeof(*priv->cls_rules),
2098 if (!priv->cls_rules)
2103 EXPORT_SYMBOL_GPL(enetc_alloc_si_resources);
2105 void enetc_free_si_resources(struct enetc_ndev_priv *priv)
2107 kfree(priv->cls_rules);
2109 EXPORT_SYMBOL_GPL(enetc_free_si_resources);
2111 static void enetc_setup_txbdr(struct enetc_hw *hw, struct enetc_bdr *tx_ring)
2113 int idx = tx_ring->index;
2116 enetc_txbdr_wr(hw, idx, ENETC_TBBAR0,
2117 lower_32_bits(tx_ring->bd_dma_base));
2119 enetc_txbdr_wr(hw, idx, ENETC_TBBAR1,
2120 upper_32_bits(tx_ring->bd_dma_base));
2122 WARN_ON(!IS_ALIGNED(tx_ring->bd_count, 64)); /* multiple of 64 */
2123 enetc_txbdr_wr(hw, idx, ENETC_TBLENR,
2124 ENETC_RTBLENR_LEN(tx_ring->bd_count));
2126 /* clearing PI/CI registers for Tx not supported, adjust sw indexes */
2127 tx_ring->next_to_use = enetc_txbdr_rd(hw, idx, ENETC_TBPIR);
2128 tx_ring->next_to_clean = enetc_txbdr_rd(hw, idx, ENETC_TBCIR);
2130 /* enable Tx ints by setting pkt thr to 1 */
2131 enetc_txbdr_wr(hw, idx, ENETC_TBICR0, ENETC_TBICR0_ICEN | 0x1);
2133 tbmr = ENETC_TBMR_SET_PRIO(tx_ring->prio);
2134 if (tx_ring->ndev->features & NETIF_F_HW_VLAN_CTAG_TX)
2135 tbmr |= ENETC_TBMR_VIH;
2138 enetc_txbdr_wr(hw, idx, ENETC_TBMR, tbmr);
2140 tx_ring->tpir = hw->reg + ENETC_BDR(TX, idx, ENETC_TBPIR);
2141 tx_ring->tcir = hw->reg + ENETC_BDR(TX, idx, ENETC_TBCIR);
2142 tx_ring->idr = hw->reg + ENETC_SITXIDR;
2145 static void enetc_setup_rxbdr(struct enetc_hw *hw, struct enetc_bdr *rx_ring,
2148 int idx = rx_ring->index;
2151 enetc_rxbdr_wr(hw, idx, ENETC_RBBAR0,
2152 lower_32_bits(rx_ring->bd_dma_base));
2154 enetc_rxbdr_wr(hw, idx, ENETC_RBBAR1,
2155 upper_32_bits(rx_ring->bd_dma_base));
2157 WARN_ON(!IS_ALIGNED(rx_ring->bd_count, 64)); /* multiple of 64 */
2158 enetc_rxbdr_wr(hw, idx, ENETC_RBLENR,
2159 ENETC_RTBLENR_LEN(rx_ring->bd_count));
2161 if (rx_ring->xdp.prog)
2162 enetc_rxbdr_wr(hw, idx, ENETC_RBBSR, ENETC_RXB_DMA_SIZE_XDP);
2164 enetc_rxbdr_wr(hw, idx, ENETC_RBBSR, ENETC_RXB_DMA_SIZE);
2166 /* Also prepare the consumer index in case page allocation never
2167 * succeeds. In that case, hardware will never advance producer index
2168 * to match consumer index, and will drop all frames.
2170 enetc_rxbdr_wr(hw, idx, ENETC_RBPIR, 0);
2171 enetc_rxbdr_wr(hw, idx, ENETC_RBCIR, 1);
2173 /* enable Rx ints by setting pkt thr to 1 */
2174 enetc_rxbdr_wr(hw, idx, ENETC_RBICR0, ENETC_RBICR0_ICEN | 0x1);
2176 rx_ring->ext_en = extended;
2177 if (rx_ring->ext_en)
2178 rbmr |= ENETC_RBMR_BDS;
2180 if (rx_ring->ndev->features & NETIF_F_HW_VLAN_CTAG_RX)
2181 rbmr |= ENETC_RBMR_VTE;
2183 rx_ring->rcir = hw->reg + ENETC_BDR(RX, idx, ENETC_RBCIR);
2184 rx_ring->idr = hw->reg + ENETC_SIRXIDR;
2186 rx_ring->next_to_clean = 0;
2187 rx_ring->next_to_use = 0;
2188 rx_ring->next_to_alloc = 0;
2191 enetc_refill_rx_ring(rx_ring, enetc_bd_unused(rx_ring));
2192 enetc_unlock_mdio();
2194 enetc_rxbdr_wr(hw, idx, ENETC_RBMR, rbmr);
2197 static void enetc_setup_bdrs(struct enetc_ndev_priv *priv, bool extended)
2199 struct enetc_hw *hw = &priv->si->hw;
2202 for (i = 0; i < priv->num_tx_rings; i++)
2203 enetc_setup_txbdr(hw, priv->tx_ring[i]);
2205 for (i = 0; i < priv->num_rx_rings; i++)
2206 enetc_setup_rxbdr(hw, priv->rx_ring[i], extended);
2209 static void enetc_enable_txbdr(struct enetc_hw *hw, struct enetc_bdr *tx_ring)
2211 int idx = tx_ring->index;
2214 tbmr = enetc_txbdr_rd(hw, idx, ENETC_TBMR);
2215 tbmr |= ENETC_TBMR_EN;
2216 enetc_txbdr_wr(hw, idx, ENETC_TBMR, tbmr);
2219 static void enetc_enable_rxbdr(struct enetc_hw *hw, struct enetc_bdr *rx_ring)
2221 int idx = rx_ring->index;
2224 rbmr = enetc_rxbdr_rd(hw, idx, ENETC_RBMR);
2225 rbmr |= ENETC_RBMR_EN;
2226 enetc_rxbdr_wr(hw, idx, ENETC_RBMR, rbmr);
2229 static void enetc_enable_bdrs(struct enetc_ndev_priv *priv)
2231 struct enetc_hw *hw = &priv->si->hw;
2234 for (i = 0; i < priv->num_tx_rings; i++)
2235 enetc_enable_txbdr(hw, priv->tx_ring[i]);
2237 for (i = 0; i < priv->num_rx_rings; i++)
2238 enetc_enable_rxbdr(hw, priv->rx_ring[i]);
2241 static void enetc_disable_rxbdr(struct enetc_hw *hw, struct enetc_bdr *rx_ring)
2243 int idx = rx_ring->index;
2245 /* disable EN bit on ring */
2246 enetc_rxbdr_wr(hw, idx, ENETC_RBMR, 0);
2249 static void enetc_disable_txbdr(struct enetc_hw *hw, struct enetc_bdr *rx_ring)
2251 int idx = rx_ring->index;
2253 /* disable EN bit on ring */
2254 enetc_txbdr_wr(hw, idx, ENETC_TBMR, 0);
2257 static void enetc_disable_bdrs(struct enetc_ndev_priv *priv)
2259 struct enetc_hw *hw = &priv->si->hw;
2262 for (i = 0; i < priv->num_tx_rings; i++)
2263 enetc_disable_txbdr(hw, priv->tx_ring[i]);
2265 for (i = 0; i < priv->num_rx_rings; i++)
2266 enetc_disable_rxbdr(hw, priv->rx_ring[i]);
2269 static void enetc_wait_txbdr(struct enetc_hw *hw, struct enetc_bdr *tx_ring)
2271 int delay = 8, timeout = 100;
2272 int idx = tx_ring->index;
2274 /* wait for busy to clear */
2275 while (delay < timeout &&
2276 enetc_txbdr_rd(hw, idx, ENETC_TBSR) & ENETC_TBSR_BUSY) {
2281 if (delay >= timeout)
2282 netdev_warn(tx_ring->ndev, "timeout for tx ring #%d clear\n",
2286 static void enetc_wait_bdrs(struct enetc_ndev_priv *priv)
2288 struct enetc_hw *hw = &priv->si->hw;
2291 for (i = 0; i < priv->num_tx_rings; i++)
2292 enetc_wait_txbdr(hw, priv->tx_ring[i]);
2295 static int enetc_setup_irqs(struct enetc_ndev_priv *priv)
2297 struct pci_dev *pdev = priv->si->pdev;
2298 struct enetc_hw *hw = &priv->si->hw;
2301 for (i = 0; i < priv->bdr_int_num; i++) {
2302 int irq = pci_irq_vector(pdev, ENETC_BDR_INT_BASE_IDX + i);
2303 struct enetc_int_vector *v = priv->int_vector[i];
2304 int entry = ENETC_BDR_INT_BASE_IDX + i;
2306 snprintf(v->name, sizeof(v->name), "%s-rxtx%d",
2307 priv->ndev->name, i);
2308 err = request_irq(irq, enetc_msix, 0, v->name, v);
2310 dev_err(priv->dev, "request_irq() failed!\n");
2315 v->tbier_base = hw->reg + ENETC_BDR(TX, 0, ENETC_TBIER);
2316 v->rbier = hw->reg + ENETC_BDR(RX, i, ENETC_RBIER);
2317 v->ricr1 = hw->reg + ENETC_BDR(RX, i, ENETC_RBICR1);
2319 enetc_wr(hw, ENETC_SIMSIRRV(i), entry);
2321 for (j = 0; j < v->count_tx_rings; j++) {
2322 int idx = v->tx_ring[j].index;
2324 enetc_wr(hw, ENETC_SIMSITRV(idx), entry);
2326 irq_set_affinity_hint(irq, get_cpu_mask(i % num_online_cpus()));
2333 int irq = pci_irq_vector(pdev, ENETC_BDR_INT_BASE_IDX + i);
2335 irq_set_affinity_hint(irq, NULL);
2336 free_irq(irq, priv->int_vector[i]);
2342 static void enetc_free_irqs(struct enetc_ndev_priv *priv)
2344 struct pci_dev *pdev = priv->si->pdev;
2347 for (i = 0; i < priv->bdr_int_num; i++) {
2348 int irq = pci_irq_vector(pdev, ENETC_BDR_INT_BASE_IDX + i);
2350 irq_set_affinity_hint(irq, NULL);
2351 free_irq(irq, priv->int_vector[i]);
2355 static void enetc_setup_interrupts(struct enetc_ndev_priv *priv)
2357 struct enetc_hw *hw = &priv->si->hw;
2361 /* enable Tx & Rx event indication */
2363 (ENETC_IC_RX_MANUAL | ENETC_IC_RX_ADAPTIVE)) {
2364 icpt = ENETC_RBICR0_SET_ICPT(ENETC_RXIC_PKTTHR);
2365 /* init to non-0 minimum, will be adjusted later */
2368 icpt = 0x1; /* enable Rx ints by setting pkt thr to 1 */
2372 for (i = 0; i < priv->num_rx_rings; i++) {
2373 enetc_rxbdr_wr(hw, i, ENETC_RBICR1, ictt);
2374 enetc_rxbdr_wr(hw, i, ENETC_RBICR0, ENETC_RBICR0_ICEN | icpt);
2375 enetc_rxbdr_wr(hw, i, ENETC_RBIER, ENETC_RBIER_RXTIE);
2378 if (priv->ic_mode & ENETC_IC_TX_MANUAL)
2379 icpt = ENETC_TBICR0_SET_ICPT(ENETC_TXIC_PKTTHR);
2381 icpt = 0x1; /* enable Tx ints by setting pkt thr to 1 */
2383 for (i = 0; i < priv->num_tx_rings; i++) {
2384 enetc_txbdr_wr(hw, i, ENETC_TBICR1, priv->tx_ictt);
2385 enetc_txbdr_wr(hw, i, ENETC_TBICR0, ENETC_TBICR0_ICEN | icpt);
2386 enetc_txbdr_wr(hw, i, ENETC_TBIER, ENETC_TBIER_TXTIE);
2390 static void enetc_clear_interrupts(struct enetc_ndev_priv *priv)
2392 struct enetc_hw *hw = &priv->si->hw;
2395 for (i = 0; i < priv->num_tx_rings; i++)
2396 enetc_txbdr_wr(hw, i, ENETC_TBIER, 0);
2398 for (i = 0; i < priv->num_rx_rings; i++)
2399 enetc_rxbdr_wr(hw, i, ENETC_RBIER, 0);
2402 static int enetc_phylink_connect(struct net_device *ndev)
2404 struct enetc_ndev_priv *priv = netdev_priv(ndev);
2405 struct ethtool_eee edata;
2408 if (!priv->phylink) {
2410 netif_carrier_on(ndev);
2414 err = phylink_of_phy_connect(priv->phylink, priv->dev->of_node, 0);
2416 dev_err(&ndev->dev, "could not attach to PHY\n");
2420 /* disable EEE autoneg, until ENETC driver supports it */
2421 memset(&edata, 0, sizeof(struct ethtool_eee));
2422 phylink_ethtool_set_eee(priv->phylink, &edata);
2424 phylink_start(priv->phylink);
2429 static void enetc_tx_onestep_tstamp(struct work_struct *work)
2431 struct enetc_ndev_priv *priv;
2432 struct sk_buff *skb;
2434 priv = container_of(work, struct enetc_ndev_priv, tx_onestep_tstamp);
2436 netif_tx_lock_bh(priv->ndev);
2438 clear_bit_unlock(ENETC_TX_ONESTEP_TSTAMP_IN_PROGRESS, &priv->flags);
2439 skb = skb_dequeue(&priv->tx_skbs);
2441 enetc_start_xmit(skb, priv->ndev);
2443 netif_tx_unlock_bh(priv->ndev);
2446 static void enetc_tx_onestep_tstamp_init(struct enetc_ndev_priv *priv)
2448 INIT_WORK(&priv->tx_onestep_tstamp, enetc_tx_onestep_tstamp);
2449 skb_queue_head_init(&priv->tx_skbs);
2452 void enetc_start(struct net_device *ndev)
2454 struct enetc_ndev_priv *priv = netdev_priv(ndev);
2457 enetc_setup_interrupts(priv);
2459 for (i = 0; i < priv->bdr_int_num; i++) {
2460 int irq = pci_irq_vector(priv->si->pdev,
2461 ENETC_BDR_INT_BASE_IDX + i);
2463 napi_enable(&priv->int_vector[i]->napi);
2467 enetc_enable_bdrs(priv);
2469 netif_tx_start_all_queues(ndev);
2471 EXPORT_SYMBOL_GPL(enetc_start);
2473 int enetc_open(struct net_device *ndev)
2475 struct enetc_ndev_priv *priv = netdev_priv(ndev);
2476 struct enetc_bdr_resource *tx_res, *rx_res;
2480 extended = !!(priv->active_offloads & ENETC_F_RX_TSTAMP);
2482 err = enetc_setup_irqs(priv);
2486 err = enetc_phylink_connect(ndev);
2488 goto err_phy_connect;
2490 tx_res = enetc_alloc_tx_resources(priv);
2491 if (IS_ERR(tx_res)) {
2492 err = PTR_ERR(tx_res);
2496 rx_res = enetc_alloc_rx_resources(priv, extended);
2497 if (IS_ERR(rx_res)) {
2498 err = PTR_ERR(rx_res);
2502 enetc_tx_onestep_tstamp_init(priv);
2503 enetc_assign_tx_resources(priv, tx_res);
2504 enetc_assign_rx_resources(priv, rx_res);
2505 enetc_setup_bdrs(priv, extended);
2511 enetc_free_tx_resources(tx_res, priv->num_tx_rings);
2514 phylink_disconnect_phy(priv->phylink);
2516 enetc_free_irqs(priv);
2520 EXPORT_SYMBOL_GPL(enetc_open);
2522 void enetc_stop(struct net_device *ndev)
2524 struct enetc_ndev_priv *priv = netdev_priv(ndev);
2527 netif_tx_stop_all_queues(ndev);
2529 enetc_disable_bdrs(priv);
2531 for (i = 0; i < priv->bdr_int_num; i++) {
2532 int irq = pci_irq_vector(priv->si->pdev,
2533 ENETC_BDR_INT_BASE_IDX + i);
2536 napi_synchronize(&priv->int_vector[i]->napi);
2537 napi_disable(&priv->int_vector[i]->napi);
2540 enetc_wait_bdrs(priv);
2542 enetc_clear_interrupts(priv);
2544 EXPORT_SYMBOL_GPL(enetc_stop);
2546 int enetc_close(struct net_device *ndev)
2548 struct enetc_ndev_priv *priv = netdev_priv(ndev);
2552 if (priv->phylink) {
2553 phylink_stop(priv->phylink);
2554 phylink_disconnect_phy(priv->phylink);
2556 netif_carrier_off(ndev);
2559 enetc_free_rxtx_rings(priv);
2561 /* Avoids dangling pointers and also frees old resources */
2562 enetc_assign_rx_resources(priv, NULL);
2563 enetc_assign_tx_resources(priv, NULL);
2565 enetc_free_irqs(priv);
2569 EXPORT_SYMBOL_GPL(enetc_close);
2571 static int enetc_reconfigure(struct enetc_ndev_priv *priv, bool extended,
2572 int (*cb)(struct enetc_ndev_priv *priv, void *ctx),
2575 struct enetc_bdr_resource *tx_res, *rx_res;
2580 /* If the interface is down, run the callback right away,
2581 * without reconfiguration.
2583 if (!netif_running(priv->ndev)) {
2585 err = cb(priv, ctx);
2593 tx_res = enetc_alloc_tx_resources(priv);
2594 if (IS_ERR(tx_res)) {
2595 err = PTR_ERR(tx_res);
2599 rx_res = enetc_alloc_rx_resources(priv, extended);
2600 if (IS_ERR(rx_res)) {
2601 err = PTR_ERR(rx_res);
2602 goto out_free_tx_res;
2605 enetc_stop(priv->ndev);
2606 enetc_free_rxtx_rings(priv);
2608 /* Interface is down, run optional callback now */
2610 err = cb(priv, ctx);
2615 enetc_assign_tx_resources(priv, tx_res);
2616 enetc_assign_rx_resources(priv, rx_res);
2617 enetc_setup_bdrs(priv, extended);
2618 enetc_start(priv->ndev);
2623 enetc_setup_bdrs(priv, extended);
2624 enetc_start(priv->ndev);
2625 enetc_free_rx_resources(rx_res, priv->num_rx_rings);
2627 enetc_free_tx_resources(tx_res, priv->num_tx_rings);
2632 static void enetc_debug_tx_ring_prios(struct enetc_ndev_priv *priv)
2636 for (i = 0; i < priv->num_tx_rings; i++)
2637 netdev_dbg(priv->ndev, "TX ring %d prio %d\n", i,
2638 priv->tx_ring[i]->prio);
2641 void enetc_reset_tc_mqprio(struct net_device *ndev)
2643 struct enetc_ndev_priv *priv = netdev_priv(ndev);
2644 struct enetc_hw *hw = &priv->si->hw;
2645 struct enetc_bdr *tx_ring;
2646 int num_stack_tx_queues;
2649 num_stack_tx_queues = enetc_num_stack_tx_queues(priv);
2651 netdev_reset_tc(ndev);
2652 netif_set_real_num_tx_queues(ndev, num_stack_tx_queues);
2653 priv->min_num_stack_tx_queues = num_possible_cpus();
2655 /* Reset all ring priorities to 0 */
2656 for (i = 0; i < priv->num_tx_rings; i++) {
2657 tx_ring = priv->tx_ring[i];
2659 enetc_set_bdr_prio(hw, tx_ring->index, tx_ring->prio);
2662 enetc_debug_tx_ring_prios(priv);
2664 enetc_change_preemptible_tcs(priv, 0);
2666 EXPORT_SYMBOL_GPL(enetc_reset_tc_mqprio);
2668 int enetc_setup_tc_mqprio(struct net_device *ndev, void *type_data)
2670 struct tc_mqprio_qopt_offload *mqprio = type_data;
2671 struct enetc_ndev_priv *priv = netdev_priv(ndev);
2672 struct tc_mqprio_qopt *qopt = &mqprio->qopt;
2673 struct enetc_hw *hw = &priv->si->hw;
2674 int num_stack_tx_queues = 0;
2675 struct enetc_bdr *tx_ring;
2676 u8 num_tc = qopt->num_tc;
2681 enetc_reset_tc_mqprio(ndev);
2685 err = netdev_set_num_tc(ndev, num_tc);
2689 for (tc = 0; tc < num_tc; tc++) {
2690 offset = qopt->offset[tc];
2691 count = qopt->count[tc];
2692 num_stack_tx_queues += count;
2694 err = netdev_set_tc_queue(ndev, tc, count, offset);
2698 for (q = offset; q < offset + count; q++) {
2699 tx_ring = priv->tx_ring[q];
2700 /* The prio_tc_map is skb_tx_hash()'s way of selecting
2701 * between TX queues based on skb->priority. As such,
2702 * there's nothing to offload based on it.
2703 * Make the mqprio "traffic class" be the priority of
2704 * this ring group, and leave the Tx IPV to traffic
2705 * class mapping as its default mapping value of 1:1.
2708 enetc_set_bdr_prio(hw, tx_ring->index, tx_ring->prio);
2712 err = netif_set_real_num_tx_queues(ndev, num_stack_tx_queues);
2716 priv->min_num_stack_tx_queues = num_stack_tx_queues;
2718 enetc_debug_tx_ring_prios(priv);
2720 enetc_change_preemptible_tcs(priv, mqprio->preemptible_tcs);
2725 enetc_reset_tc_mqprio(ndev);
2728 EXPORT_SYMBOL_GPL(enetc_setup_tc_mqprio);
2730 static int enetc_reconfigure_xdp_cb(struct enetc_ndev_priv *priv, void *ctx)
2732 struct bpf_prog *old_prog, *prog = ctx;
2733 int num_stack_tx_queues;
2736 old_prog = xchg(&priv->xdp_prog, prog);
2738 num_stack_tx_queues = enetc_num_stack_tx_queues(priv);
2739 err = netif_set_real_num_tx_queues(priv->ndev, num_stack_tx_queues);
2741 xchg(&priv->xdp_prog, old_prog);
2746 bpf_prog_put(old_prog);
2748 for (i = 0; i < priv->num_rx_rings; i++) {
2749 struct enetc_bdr *rx_ring = priv->rx_ring[i];
2751 rx_ring->xdp.prog = prog;
2754 rx_ring->buffer_offset = XDP_PACKET_HEADROOM;
2756 rx_ring->buffer_offset = ENETC_RXB_PAD;
2762 static int enetc_setup_xdp_prog(struct net_device *ndev, struct bpf_prog *prog,
2763 struct netlink_ext_ack *extack)
2765 int num_xdp_tx_queues = prog ? num_possible_cpus() : 0;
2766 struct enetc_ndev_priv *priv = netdev_priv(ndev);
2769 if (priv->min_num_stack_tx_queues + num_xdp_tx_queues >
2770 priv->num_tx_rings) {
2771 NL_SET_ERR_MSG_FMT_MOD(extack,
2772 "Reserving %d XDP TXQs does not leave a minimum of %d for stack (total %d)",
2774 priv->min_num_stack_tx_queues,
2775 priv->num_tx_rings);
2779 extended = !!(priv->active_offloads & ENETC_F_RX_TSTAMP);
2781 /* The buffer layout is changing, so we need to drain the old
2782 * RX buffers and seed new ones.
2784 return enetc_reconfigure(priv, extended, enetc_reconfigure_xdp_cb, prog);
2787 int enetc_setup_bpf(struct net_device *ndev, struct netdev_bpf *bpf)
2789 switch (bpf->command) {
2790 case XDP_SETUP_PROG:
2791 return enetc_setup_xdp_prog(ndev, bpf->prog, bpf->extack);
2798 EXPORT_SYMBOL_GPL(enetc_setup_bpf);
2800 struct net_device_stats *enetc_get_stats(struct net_device *ndev)
2802 struct enetc_ndev_priv *priv = netdev_priv(ndev);
2803 struct net_device_stats *stats = &ndev->stats;
2804 unsigned long packets = 0, bytes = 0;
2805 unsigned long tx_dropped = 0;
2808 for (i = 0; i < priv->num_rx_rings; i++) {
2809 packets += priv->rx_ring[i]->stats.packets;
2810 bytes += priv->rx_ring[i]->stats.bytes;
2813 stats->rx_packets = packets;
2814 stats->rx_bytes = bytes;
2818 for (i = 0; i < priv->num_tx_rings; i++) {
2819 packets += priv->tx_ring[i]->stats.packets;
2820 bytes += priv->tx_ring[i]->stats.bytes;
2821 tx_dropped += priv->tx_ring[i]->stats.win_drop;
2824 stats->tx_packets = packets;
2825 stats->tx_bytes = bytes;
2826 stats->tx_dropped = tx_dropped;
2830 EXPORT_SYMBOL_GPL(enetc_get_stats);
2832 static int enetc_set_rss(struct net_device *ndev, int en)
2834 struct enetc_ndev_priv *priv = netdev_priv(ndev);
2835 struct enetc_hw *hw = &priv->si->hw;
2838 enetc_wr(hw, ENETC_SIRBGCR, priv->num_rx_rings);
2840 reg = enetc_rd(hw, ENETC_SIMR);
2841 reg &= ~ENETC_SIMR_RSSE;
2842 reg |= (en) ? ENETC_SIMR_RSSE : 0;
2843 enetc_wr(hw, ENETC_SIMR, reg);
2848 static void enetc_enable_rxvlan(struct net_device *ndev, bool en)
2850 struct enetc_ndev_priv *priv = netdev_priv(ndev);
2851 struct enetc_hw *hw = &priv->si->hw;
2854 for (i = 0; i < priv->num_rx_rings; i++)
2855 enetc_bdr_enable_rxvlan(hw, i, en);
2858 static void enetc_enable_txvlan(struct net_device *ndev, bool en)
2860 struct enetc_ndev_priv *priv = netdev_priv(ndev);
2861 struct enetc_hw *hw = &priv->si->hw;
2864 for (i = 0; i < priv->num_tx_rings; i++)
2865 enetc_bdr_enable_txvlan(hw, i, en);
2868 void enetc_set_features(struct net_device *ndev, netdev_features_t features)
2870 netdev_features_t changed = ndev->features ^ features;
2872 if (changed & NETIF_F_RXHASH)
2873 enetc_set_rss(ndev, !!(features & NETIF_F_RXHASH));
2875 if (changed & NETIF_F_HW_VLAN_CTAG_RX)
2876 enetc_enable_rxvlan(ndev,
2877 !!(features & NETIF_F_HW_VLAN_CTAG_RX));
2879 if (changed & NETIF_F_HW_VLAN_CTAG_TX)
2880 enetc_enable_txvlan(ndev,
2881 !!(features & NETIF_F_HW_VLAN_CTAG_TX));
2883 EXPORT_SYMBOL_GPL(enetc_set_features);
2885 #ifdef CONFIG_FSL_ENETC_PTP_CLOCK
2886 static int enetc_hwtstamp_set(struct net_device *ndev, struct ifreq *ifr)
2888 struct enetc_ndev_priv *priv = netdev_priv(ndev);
2889 int err, new_offloads = priv->active_offloads;
2890 struct hwtstamp_config config;
2892 if (copy_from_user(&config, ifr->ifr_data, sizeof(config)))
2895 switch (config.tx_type) {
2896 case HWTSTAMP_TX_OFF:
2897 new_offloads &= ~ENETC_F_TX_TSTAMP_MASK;
2899 case HWTSTAMP_TX_ON:
2900 new_offloads &= ~ENETC_F_TX_TSTAMP_MASK;
2901 new_offloads |= ENETC_F_TX_TSTAMP;
2903 case HWTSTAMP_TX_ONESTEP_SYNC:
2904 new_offloads &= ~ENETC_F_TX_TSTAMP_MASK;
2905 new_offloads |= ENETC_F_TX_ONESTEP_SYNC_TSTAMP;
2911 switch (config.rx_filter) {
2912 case HWTSTAMP_FILTER_NONE:
2913 new_offloads &= ~ENETC_F_RX_TSTAMP;
2916 new_offloads |= ENETC_F_RX_TSTAMP;
2917 config.rx_filter = HWTSTAMP_FILTER_ALL;
2920 if ((new_offloads ^ priv->active_offloads) & ENETC_F_RX_TSTAMP) {
2921 bool extended = !!(new_offloads & ENETC_F_RX_TSTAMP);
2923 err = enetc_reconfigure(priv, extended, NULL, NULL);
2928 priv->active_offloads = new_offloads;
2930 return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ?
2934 static int enetc_hwtstamp_get(struct net_device *ndev, struct ifreq *ifr)
2936 struct enetc_ndev_priv *priv = netdev_priv(ndev);
2937 struct hwtstamp_config config;
2941 if (priv->active_offloads & ENETC_F_TX_ONESTEP_SYNC_TSTAMP)
2942 config.tx_type = HWTSTAMP_TX_ONESTEP_SYNC;
2943 else if (priv->active_offloads & ENETC_F_TX_TSTAMP)
2944 config.tx_type = HWTSTAMP_TX_ON;
2946 config.tx_type = HWTSTAMP_TX_OFF;
2948 config.rx_filter = (priv->active_offloads & ENETC_F_RX_TSTAMP) ?
2949 HWTSTAMP_FILTER_ALL : HWTSTAMP_FILTER_NONE;
2951 return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ?
2956 int enetc_ioctl(struct net_device *ndev, struct ifreq *rq, int cmd)
2958 struct enetc_ndev_priv *priv = netdev_priv(ndev);
2959 #ifdef CONFIG_FSL_ENETC_PTP_CLOCK
2960 if (cmd == SIOCSHWTSTAMP)
2961 return enetc_hwtstamp_set(ndev, rq);
2962 if (cmd == SIOCGHWTSTAMP)
2963 return enetc_hwtstamp_get(ndev, rq);
2969 return phylink_mii_ioctl(priv->phylink, rq, cmd);
2971 EXPORT_SYMBOL_GPL(enetc_ioctl);
2973 int enetc_alloc_msix(struct enetc_ndev_priv *priv)
2975 struct pci_dev *pdev = priv->si->pdev;
2976 int num_stack_tx_queues;
2977 int first_xdp_tx_ring;
2978 int i, n, err, nvec;
2981 nvec = ENETC_BDR_INT_BASE_IDX + priv->bdr_int_num;
2982 /* allocate MSIX for both messaging and Rx/Tx interrupts */
2983 n = pci_alloc_irq_vectors(pdev, nvec, nvec, PCI_IRQ_MSIX);
2991 /* # of tx rings per int vector */
2992 v_tx_rings = priv->num_tx_rings / priv->bdr_int_num;
2994 for (i = 0; i < priv->bdr_int_num; i++) {
2995 struct enetc_int_vector *v;
2996 struct enetc_bdr *bdr;
2999 v = kzalloc(struct_size(v, tx_ring, v_tx_rings), GFP_KERNEL);
3005 priv->int_vector[i] = v;
3009 bdr->ndev = priv->ndev;
3010 bdr->dev = priv->dev;
3011 bdr->bd_count = priv->rx_bd_count;
3012 bdr->buffer_offset = ENETC_RXB_PAD;
3013 priv->rx_ring[i] = bdr;
3015 err = xdp_rxq_info_reg(&bdr->xdp.rxq, priv->ndev, i, 0);
3021 err = xdp_rxq_info_reg_mem_model(&bdr->xdp.rxq,
3022 MEM_TYPE_PAGE_SHARED, NULL);
3024 xdp_rxq_info_unreg(&bdr->xdp.rxq);
3029 /* init defaults for adaptive IC */
3030 if (priv->ic_mode & ENETC_IC_RX_ADAPTIVE) {
3032 v->rx_dim_en = true;
3034 INIT_WORK(&v->rx_dim.work, enetc_rx_dim_work);
3035 netif_napi_add(priv->ndev, &v->napi, enetc_poll);
3036 v->count_tx_rings = v_tx_rings;
3038 for (j = 0; j < v_tx_rings; j++) {
3041 /* default tx ring mapping policy */
3042 idx = priv->bdr_int_num * j + i;
3043 __set_bit(idx, &v->tx_rings_map);
3044 bdr = &v->tx_ring[j];
3046 bdr->ndev = priv->ndev;
3047 bdr->dev = priv->dev;
3048 bdr->bd_count = priv->tx_bd_count;
3049 priv->tx_ring[idx] = bdr;
3053 num_stack_tx_queues = enetc_num_stack_tx_queues(priv);
3055 err = netif_set_real_num_tx_queues(priv->ndev, num_stack_tx_queues);
3059 err = netif_set_real_num_rx_queues(priv->ndev, priv->num_rx_rings);
3063 priv->min_num_stack_tx_queues = num_possible_cpus();
3064 first_xdp_tx_ring = priv->num_tx_rings - num_possible_cpus();
3065 priv->xdp_tx_ring = &priv->tx_ring[first_xdp_tx_ring];
3071 struct enetc_int_vector *v = priv->int_vector[i];
3072 struct enetc_bdr *rx_ring = &v->rx_ring;
3074 xdp_rxq_info_unreg_mem_model(&rx_ring->xdp.rxq);
3075 xdp_rxq_info_unreg(&rx_ring->xdp.rxq);
3076 netif_napi_del(&v->napi);
3077 cancel_work_sync(&v->rx_dim.work);
3081 pci_free_irq_vectors(pdev);
3085 EXPORT_SYMBOL_GPL(enetc_alloc_msix);
3087 void enetc_free_msix(struct enetc_ndev_priv *priv)
3091 for (i = 0; i < priv->bdr_int_num; i++) {
3092 struct enetc_int_vector *v = priv->int_vector[i];
3093 struct enetc_bdr *rx_ring = &v->rx_ring;
3095 xdp_rxq_info_unreg_mem_model(&rx_ring->xdp.rxq);
3096 xdp_rxq_info_unreg(&rx_ring->xdp.rxq);
3097 netif_napi_del(&v->napi);
3098 cancel_work_sync(&v->rx_dim.work);
3101 for (i = 0; i < priv->num_rx_rings; i++)
3102 priv->rx_ring[i] = NULL;
3104 for (i = 0; i < priv->num_tx_rings; i++)
3105 priv->tx_ring[i] = NULL;
3107 for (i = 0; i < priv->bdr_int_num; i++) {
3108 kfree(priv->int_vector[i]);
3109 priv->int_vector[i] = NULL;
3112 /* disable all MSIX for this device */
3113 pci_free_irq_vectors(priv->si->pdev);
3115 EXPORT_SYMBOL_GPL(enetc_free_msix);
3117 static void enetc_kfree_si(struct enetc_si *si)
3119 char *p = (char *)si - si->pad;
3124 static void enetc_detect_errata(struct enetc_si *si)
3126 if (si->pdev->revision == ENETC_REV1)
3127 si->errata = ENETC_ERR_VLAN_ISOL | ENETC_ERR_UCMCSWP;
3130 int enetc_pci_probe(struct pci_dev *pdev, const char *name, int sizeof_priv)
3132 struct enetc_si *si, *p;
3133 struct enetc_hw *hw;
3138 err = pci_enable_device_mem(pdev);
3140 return dev_err_probe(&pdev->dev, err, "device enable failed\n");
3142 /* set up for high or low dma */
3143 err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
3145 dev_err(&pdev->dev, "DMA configuration failed: 0x%x\n", err);
3149 err = pci_request_mem_regions(pdev, name);
3151 dev_err(&pdev->dev, "pci_request_regions failed err=%d\n", err);
3152 goto err_pci_mem_reg;
3155 pci_set_master(pdev);
3157 alloc_size = sizeof(struct enetc_si);
3159 /* align priv to 32B */
3160 alloc_size = ALIGN(alloc_size, ENETC_SI_ALIGN);
3161 alloc_size += sizeof_priv;
3163 /* force 32B alignment for enetc_si */
3164 alloc_size += ENETC_SI_ALIGN - 1;
3166 p = kzalloc(alloc_size, GFP_KERNEL);
3172 si = PTR_ALIGN(p, ENETC_SI_ALIGN);
3173 si->pad = (char *)si - (char *)p;
3175 pci_set_drvdata(pdev, si);
3179 len = pci_resource_len(pdev, ENETC_BAR_REGS);
3180 hw->reg = ioremap(pci_resource_start(pdev, ENETC_BAR_REGS), len);
3183 dev_err(&pdev->dev, "ioremap() failed\n");
3186 if (len > ENETC_PORT_BASE)
3187 hw->port = hw->reg + ENETC_PORT_BASE;
3188 if (len > ENETC_GLOBAL_BASE)
3189 hw->global = hw->reg + ENETC_GLOBAL_BASE;
3191 enetc_detect_errata(si);
3198 pci_release_mem_regions(pdev);
3201 pci_disable_device(pdev);
3205 EXPORT_SYMBOL_GPL(enetc_pci_probe);
3207 void enetc_pci_remove(struct pci_dev *pdev)
3209 struct enetc_si *si = pci_get_drvdata(pdev);
3210 struct enetc_hw *hw = &si->hw;
3214 pci_release_mem_regions(pdev);
3215 pci_disable_device(pdev);
3217 EXPORT_SYMBOL_GPL(enetc_pci_remove);
3219 MODULE_DESCRIPTION("NXP ENETC Ethernet driver");
3220 MODULE_LICENSE("Dual BSD/GPL");