2 * Copyright (c) 2015-2016, Mellanox Technologies. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 #include <linux/tcp.h>
34 #include <linux/if_vlan.h>
35 #include <net/geneve.h>
36 #include <net/dsfield.h>
39 #include "ipoib/ipoib.h"
40 #include "en_accel/en_accel.h"
41 #include "en_accel/ipsec_rxtx.h"
42 #include "en_accel/macsec.h"
46 static void mlx5e_dma_unmap_wqe_err(struct mlx5e_txqsq *sq, u8 num_dma)
50 for (i = 0; i < num_dma; i++) {
51 struct mlx5e_sq_dma *last_pushed_dma =
52 mlx5e_dma_get(sq, --sq->dma_fifo_pc);
54 mlx5e_tx_dma_unmap(sq->pdev, last_pushed_dma);
58 static inline int mlx5e_skb_l2_header_offset(struct sk_buff *skb)
60 #define MLX5E_MIN_INLINE (ETH_HLEN + VLAN_HLEN)
62 return max(skb_network_offset(skb), MLX5E_MIN_INLINE);
65 static inline int mlx5e_skb_l3_header_offset(struct sk_buff *skb)
67 if (skb_transport_header_was_set(skb))
68 return skb_transport_offset(skb);
70 return mlx5e_skb_l2_header_offset(skb);
73 static inline u16 mlx5e_calc_min_inline(enum mlx5_inline_modes mode,
79 case MLX5_INLINE_MODE_NONE:
81 case MLX5_INLINE_MODE_TCP_UDP:
82 hlen = eth_get_headlen(skb->dev, skb->data, skb_headlen(skb));
83 if (hlen == ETH_HLEN && !skb_vlan_tag_present(skb))
86 case MLX5_INLINE_MODE_IP:
87 hlen = mlx5e_skb_l3_header_offset(skb);
89 case MLX5_INLINE_MODE_L2:
91 hlen = mlx5e_skb_l2_header_offset(skb);
93 return min_t(u16, hlen, skb_headlen(skb));
96 #define MLX5_UNSAFE_MEMCPY_DISCLAIMER \
97 "This copy has been bounds-checked earlier in " \
98 "mlx5i_sq_calc_wqe_attr() and intentionally " \
99 "crosses a flex array boundary. Since it is " \
100 "performance sensitive, splitting the copy is " \
103 static inline void mlx5e_insert_vlan(void *start, struct sk_buff *skb, u16 ihs)
105 struct vlan_ethhdr *vhdr = (struct vlan_ethhdr *)start;
106 int cpy1_sz = 2 * ETH_ALEN;
107 int cpy2_sz = ihs - cpy1_sz;
109 memcpy(&vhdr->addrs, skb->data, cpy1_sz);
110 vhdr->h_vlan_proto = skb->vlan_proto;
111 vhdr->h_vlan_TCI = cpu_to_be16(skb_vlan_tag_get(skb));
112 unsafe_memcpy(&vhdr->h_vlan_encapsulated_proto,
115 MLX5_UNSAFE_MEMCPY_DISCLAIMER);
119 mlx5e_txwqe_build_eseg_csum(struct mlx5e_txqsq *sq, struct sk_buff *skb,
120 struct mlx5e_accel_tx_state *accel,
121 struct mlx5_wqe_eth_seg *eseg)
123 if (unlikely(mlx5e_ipsec_txwqe_build_eseg_csum(sq, skb, eseg)))
126 if (likely(skb->ip_summed == CHECKSUM_PARTIAL)) {
127 eseg->cs_flags = MLX5_ETH_WQE_L3_CSUM;
128 if (skb->encapsulation) {
129 eseg->cs_flags |= MLX5_ETH_WQE_L3_INNER_CSUM |
130 MLX5_ETH_WQE_L4_INNER_CSUM;
131 sq->stats->csum_partial_inner++;
133 eseg->cs_flags |= MLX5_ETH_WQE_L4_CSUM;
134 sq->stats->csum_partial++;
136 #ifdef CONFIG_MLX5_EN_TLS
137 } else if (unlikely(accel && accel->tls.tls_tisn)) {
138 eseg->cs_flags = MLX5_ETH_WQE_L3_CSUM | MLX5_ETH_WQE_L4_CSUM;
139 sq->stats->csum_partial++;
142 sq->stats->csum_none++;
145 /* Returns the number of header bytes that we plan
146 * to inline later in the transmit descriptor
149 mlx5e_tx_get_gso_ihs(struct mlx5e_txqsq *sq, struct sk_buff *skb, int *hopbyhop)
151 struct mlx5e_sq_stats *stats = sq->stats;
155 if (skb->encapsulation) {
156 ihs = skb_inner_tcp_all_headers(skb);
157 stats->tso_inner_packets++;
158 stats->tso_inner_bytes += skb->len - ihs;
160 if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4) {
161 ihs = skb_transport_offset(skb) + sizeof(struct udphdr);
163 ihs = skb_tcp_all_headers(skb);
164 if (ipv6_has_hopopt_jumbo(skb)) {
165 *hopbyhop = sizeof(struct hop_jumbo_hdr);
166 ihs -= sizeof(struct hop_jumbo_hdr);
169 stats->tso_packets++;
170 stats->tso_bytes += skb->len - ihs - *hopbyhop;
177 mlx5e_txwqe_build_dsegs(struct mlx5e_txqsq *sq, struct sk_buff *skb,
178 unsigned char *skb_data, u16 headlen,
179 struct mlx5_wqe_data_seg *dseg)
181 dma_addr_t dma_addr = 0;
186 dma_addr = dma_map_single(sq->pdev, skb_data, headlen,
188 if (unlikely(dma_mapping_error(sq->pdev, dma_addr)))
189 goto dma_unmap_wqe_err;
191 dseg->addr = cpu_to_be64(dma_addr);
192 dseg->lkey = sq->mkey_be;
193 dseg->byte_count = cpu_to_be32(headlen);
195 mlx5e_dma_push(sq, dma_addr, headlen, MLX5E_DMA_MAP_SINGLE);
200 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
201 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
202 int fsz = skb_frag_size(frag);
204 dma_addr = skb_frag_dma_map(sq->pdev, frag, 0, fsz,
206 if (unlikely(dma_mapping_error(sq->pdev, dma_addr)))
207 goto dma_unmap_wqe_err;
209 dseg->addr = cpu_to_be64(dma_addr);
210 dseg->lkey = sq->mkey_be;
211 dseg->byte_count = cpu_to_be32(fsz);
213 mlx5e_dma_push(sq, dma_addr, fsz, MLX5E_DMA_MAP_PAGE);
221 mlx5e_dma_unmap_wqe_err(sq, num_dma);
225 struct mlx5e_tx_attr {
235 struct mlx5e_tx_wqe_attr {
243 mlx5e_tx_wqe_inline_mode(struct mlx5e_txqsq *sq, struct sk_buff *skb,
244 struct mlx5e_accel_tx_state *accel)
248 #ifdef CONFIG_MLX5_EN_TLS
249 if (accel && accel->tls.tls_tisn)
250 return MLX5_INLINE_MODE_TCP_UDP;
253 mode = sq->min_inline_mode;
255 if (skb_vlan_tag_present(skb) &&
256 test_bit(MLX5E_SQ_STATE_VLAN_NEED_L2_INLINE, &sq->state))
257 mode = max_t(u8, MLX5_INLINE_MODE_L2, mode);
262 static void mlx5e_sq_xmit_prepare(struct mlx5e_txqsq *sq, struct sk_buff *skb,
263 struct mlx5e_accel_tx_state *accel,
264 struct mlx5e_tx_attr *attr)
266 struct mlx5e_sq_stats *stats = sq->stats;
268 if (skb_is_gso(skb)) {
270 u16 ihs = mlx5e_tx_get_gso_ihs(sq, skb, &hopbyhop);
272 *attr = (struct mlx5e_tx_attr) {
273 .opcode = MLX5_OPCODE_LSO,
274 .mss = cpu_to_be16(skb_shinfo(skb)->gso_size),
276 .num_bytes = skb->len + (skb_shinfo(skb)->gso_segs - 1) * ihs,
277 .headlen = skb_headlen(skb) - ihs - hopbyhop,
278 .hopbyhop = hopbyhop,
281 stats->packets += skb_shinfo(skb)->gso_segs;
283 u8 mode = mlx5e_tx_wqe_inline_mode(sq, skb, accel);
284 u16 ihs = mlx5e_calc_min_inline(mode, skb);
286 *attr = (struct mlx5e_tx_attr) {
287 .opcode = MLX5_OPCODE_SEND,
288 .mss = cpu_to_be16(0),
290 .num_bytes = max_t(unsigned int, skb->len, ETH_ZLEN),
291 .headlen = skb_headlen(skb) - ihs,
297 attr->insz = mlx5e_accel_tx_ids_len(sq, accel);
298 stats->bytes += attr->num_bytes;
301 static void mlx5e_sq_calc_wqe_attr(struct sk_buff *skb, const struct mlx5e_tx_attr *attr,
302 struct mlx5e_tx_wqe_attr *wqe_attr)
304 u16 ds_cnt = MLX5E_TX_WQE_EMPTY_DS_COUNT;
308 /* Sync the calculation with MLX5E_MAX_TX_WQEBBS. */
311 ds_cnt_ids = DIV_ROUND_UP(sizeof(struct mlx5_wqe_inline_seg) + attr->insz,
314 ds_cnt += !!attr->headlen + skb_shinfo(skb)->nr_frags + ds_cnt_ids;
316 u16 inl = attr->ihs - INL_HDR_START_SZ;
318 if (skb_vlan_tag_present(skb))
321 ds_cnt_inl = DIV_ROUND_UP(inl, MLX5_SEND_WQE_DS);
322 if (WARN_ON_ONCE(ds_cnt_inl > MLX5E_MAX_TX_INLINE_DS))
323 netdev_warn(skb->dev, "ds_cnt_inl = %u > max %u\n", ds_cnt_inl,
324 (u16)MLX5E_MAX_TX_INLINE_DS);
325 ds_cnt += ds_cnt_inl;
328 *wqe_attr = (struct mlx5e_tx_wqe_attr) {
330 .ds_cnt_inl = ds_cnt_inl,
331 .ds_cnt_ids = ds_cnt_ids,
332 .num_wqebbs = DIV_ROUND_UP(ds_cnt, MLX5_SEND_WQEBB_NUM_DS),
336 static void mlx5e_tx_skb_update_hwts_flags(struct sk_buff *skb)
338 if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP))
339 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
342 static void mlx5e_tx_check_stop(struct mlx5e_txqsq *sq)
344 if (unlikely(!mlx5e_wqc_has_room_for(&sq->wq, sq->cc, sq->pc, sq->stop_room))) {
345 netif_tx_stop_queue(sq->txq);
346 sq->stats->stopped++;
350 static void mlx5e_tx_flush(struct mlx5e_txqsq *sq)
352 struct mlx5e_tx_wqe_info *wi;
353 struct mlx5e_tx_wqe *wqe;
356 /* Must not be called when a MPWQE session is active but empty. */
357 mlx5e_tx_mpwqe_ensure_complete(sq);
359 pi = mlx5_wq_cyc_ctr2ix(&sq->wq, sq->pc);
360 wi = &sq->db.wqe_info[pi];
362 *wi = (struct mlx5e_tx_wqe_info) {
366 wqe = mlx5e_post_nop(&sq->wq, sq->sqn, &sq->pc);
367 mlx5e_notify_hw(&sq->wq, sq->pc, sq->uar_map, &wqe->ctrl);
371 mlx5e_txwqe_complete(struct mlx5e_txqsq *sq, struct sk_buff *skb,
372 const struct mlx5e_tx_attr *attr,
373 const struct mlx5e_tx_wqe_attr *wqe_attr, u8 num_dma,
374 struct mlx5e_tx_wqe_info *wi, struct mlx5_wqe_ctrl_seg *cseg,
375 struct mlx5_wqe_eth_seg *eseg, bool xmit_more)
377 struct mlx5_wq_cyc *wq = &sq->wq;
380 *wi = (struct mlx5e_tx_wqe_info) {
382 .num_bytes = attr->num_bytes,
384 .num_wqebbs = wqe_attr->num_wqebbs,
388 cseg->opmod_idx_opcode = cpu_to_be32((sq->pc << 8) | attr->opcode);
389 cseg->qpn_ds = cpu_to_be32((sq->sqn << 8) | wqe_attr->ds_cnt);
391 mlx5e_tx_skb_update_hwts_flags(skb);
393 sq->pc += wi->num_wqebbs;
395 mlx5e_tx_check_stop(sq);
397 if (unlikely(sq->ptpsq &&
398 (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP))) {
399 u8 metadata_index = be32_to_cpu(eseg->flow_table_metadata);
401 mlx5e_skb_cb_hwtstamp_init(skb);
402 mlx5e_ptp_metadata_map_put(&sq->ptpsq->metadata_map, skb,
404 mlx5e_ptpsq_track_metadata(sq->ptpsq, metadata_index);
405 if (!netif_tx_queue_stopped(sq->txq) &&
406 mlx5e_ptpsq_metadata_freelist_empty(sq->ptpsq)) {
407 netif_tx_stop_queue(sq->txq);
408 sq->stats->stopped++;
413 send_doorbell = __netdev_tx_sent_queue(sq->txq, attr->num_bytes, xmit_more);
415 mlx5e_notify_hw(wq, sq->pc, sq->uar_map, cseg);
419 mlx5e_sq_xmit_wqe(struct mlx5e_txqsq *sq, struct sk_buff *skb,
420 const struct mlx5e_tx_attr *attr, const struct mlx5e_tx_wqe_attr *wqe_attr,
421 struct mlx5e_tx_wqe *wqe, u16 pi, bool xmit_more)
423 struct mlx5_wqe_ctrl_seg *cseg;
424 struct mlx5_wqe_eth_seg *eseg;
425 struct mlx5_wqe_data_seg *dseg;
426 struct mlx5e_tx_wqe_info *wi;
429 struct mlx5e_sq_stats *stats = sq->stats;
432 stats->xmit_more += xmit_more;
435 wi = &sq->db.wqe_info[pi];
440 eseg->mss = attr->mss;
443 u8 *start = eseg->inline_hdr.start;
445 if (unlikely(attr->hopbyhop)) {
446 /* remove the HBH header.
447 * Layout: [Ethernet header][IPv6 header][HBH][TCP header]
449 if (skb_vlan_tag_present(skb)) {
450 mlx5e_insert_vlan(start, skb, ETH_HLEN + sizeof(*h6));
452 h6 = (struct ipv6hdr *)(start + sizeof(struct vlan_ethhdr));
454 unsafe_memcpy(start, skb->data,
455 ETH_HLEN + sizeof(*h6),
456 MLX5_UNSAFE_MEMCPY_DISCLAIMER);
457 h6 = (struct ipv6hdr *)(start + ETH_HLEN);
459 h6->nexthdr = IPPROTO_TCP;
460 /* Copy the TCP header after the IPv6 one */
462 skb->data + ETH_HLEN + sizeof(*h6) +
463 sizeof(struct hop_jumbo_hdr),
465 /* Leave ipv6 payload_len set to 0, as LSO v2 specs request. */
466 } else if (skb_vlan_tag_present(skb)) {
467 mlx5e_insert_vlan(start, skb, ihs);
469 stats->added_vlan_packets++;
471 unsafe_memcpy(eseg->inline_hdr.start, skb->data,
473 MLX5_UNSAFE_MEMCPY_DISCLAIMER);
475 eseg->inline_hdr.sz |= cpu_to_be16(ihs);
476 dseg += wqe_attr->ds_cnt_inl;
477 } else if (skb_vlan_tag_present(skb)) {
478 eseg->insert.type = cpu_to_be16(MLX5_ETH_WQE_INSERT_VLAN);
479 if (skb->vlan_proto == cpu_to_be16(ETH_P_8021AD))
480 eseg->insert.type |= cpu_to_be16(MLX5_ETH_WQE_SVLAN);
481 eseg->insert.vlan_tci = cpu_to_be16(skb_vlan_tag_get(skb));
482 stats->added_vlan_packets++;
485 dseg += wqe_attr->ds_cnt_ids;
486 num_dma = mlx5e_txwqe_build_dsegs(sq, skb, skb->data + attr->ihs + attr->hopbyhop,
487 attr->headlen, dseg);
488 if (unlikely(num_dma < 0))
491 mlx5e_txwqe_complete(sq, skb, attr, wqe_attr, num_dma, wi, cseg, eseg, xmit_more);
497 if (unlikely(sq->ptpsq && (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)))
498 mlx5e_ptp_metadata_fifo_push(&sq->ptpsq->metadata_freelist,
499 be32_to_cpu(eseg->flow_table_metadata));
500 dev_kfree_skb_any(skb);
504 static bool mlx5e_tx_skb_supports_mpwqe(struct sk_buff *skb, struct mlx5e_tx_attr *attr)
506 return !skb_is_nonlinear(skb) && !skb_vlan_tag_present(skb) && !attr->ihs &&
507 !attr->insz && !mlx5e_macsec_skb_is_offload(skb);
510 static bool mlx5e_tx_mpwqe_same_eseg(struct mlx5e_txqsq *sq, struct mlx5_wqe_eth_seg *eseg)
512 struct mlx5e_tx_mpwqe *session = &sq->mpwqe;
514 /* Assumes the session is already running and has at least one packet. */
515 return !memcmp(&session->wqe->eth, eseg, MLX5E_ACCEL_ESEG_LEN);
518 static void mlx5e_tx_mpwqe_session_start(struct mlx5e_txqsq *sq,
519 struct mlx5_wqe_eth_seg *eseg)
521 struct mlx5e_tx_mpwqe *session = &sq->mpwqe;
522 struct mlx5e_tx_wqe *wqe;
525 pi = mlx5e_txqsq_get_next_pi(sq, sq->max_sq_mpw_wqebbs);
526 wqe = MLX5E_TX_FETCH_WQE(sq, pi);
527 net_prefetchw(wqe->data);
529 *session = (struct mlx5e_tx_mpwqe) {
532 .ds_count = MLX5E_TX_WQE_EMPTY_DS_COUNT,
537 memcpy(&session->wqe->eth, eseg, MLX5E_ACCEL_ESEG_LEN);
539 sq->stats->mpwqe_blks++;
542 static bool mlx5e_tx_mpwqe_session_is_active(struct mlx5e_txqsq *sq)
544 return sq->mpwqe.wqe;
547 static void mlx5e_tx_mpwqe_add_dseg(struct mlx5e_txqsq *sq, struct mlx5e_xmit_data *txd)
549 struct mlx5e_tx_mpwqe *session = &sq->mpwqe;
550 struct mlx5_wqe_data_seg *dseg;
552 dseg = (struct mlx5_wqe_data_seg *)session->wqe + session->ds_count;
554 session->pkt_count++;
555 session->bytes_count += txd->len;
557 dseg->addr = cpu_to_be64(txd->dma_addr);
558 dseg->byte_count = cpu_to_be32(txd->len);
559 dseg->lkey = sq->mkey_be;
562 sq->stats->mpwqe_pkts++;
565 static struct mlx5_wqe_ctrl_seg *mlx5e_tx_mpwqe_session_complete(struct mlx5e_txqsq *sq)
567 struct mlx5e_tx_mpwqe *session = &sq->mpwqe;
568 u8 ds_count = session->ds_count;
569 struct mlx5_wqe_ctrl_seg *cseg;
570 struct mlx5e_tx_wqe_info *wi;
573 cseg = &session->wqe->ctrl;
574 cseg->opmod_idx_opcode = cpu_to_be32((sq->pc << 8) | MLX5_OPCODE_ENHANCED_MPSW);
575 cseg->qpn_ds = cpu_to_be32((sq->sqn << 8) | ds_count);
577 pi = mlx5_wq_cyc_ctr2ix(&sq->wq, sq->pc);
578 wi = &sq->db.wqe_info[pi];
579 *wi = (struct mlx5e_tx_wqe_info) {
581 .num_bytes = session->bytes_count,
582 .num_wqebbs = DIV_ROUND_UP(ds_count, MLX5_SEND_WQEBB_NUM_DS),
583 .num_dma = session->pkt_count,
584 .num_fifo_pkts = session->pkt_count,
587 sq->pc += wi->num_wqebbs;
591 mlx5e_tx_check_stop(sq);
597 mlx5e_sq_xmit_mpwqe(struct mlx5e_txqsq *sq, struct sk_buff *skb,
598 struct mlx5_wqe_eth_seg *eseg, bool xmit_more)
600 struct mlx5_wqe_ctrl_seg *cseg;
601 struct mlx5e_xmit_data txd;
603 txd.data = skb->data;
606 txd.dma_addr = dma_map_single(sq->pdev, txd.data, txd.len, DMA_TO_DEVICE);
607 if (unlikely(dma_mapping_error(sq->pdev, txd.dma_addr)))
610 if (!mlx5e_tx_mpwqe_session_is_active(sq)) {
611 mlx5e_tx_mpwqe_session_start(sq, eseg);
612 } else if (!mlx5e_tx_mpwqe_same_eseg(sq, eseg)) {
613 mlx5e_tx_mpwqe_session_complete(sq);
614 mlx5e_tx_mpwqe_session_start(sq, eseg);
617 sq->stats->xmit_more += xmit_more;
619 mlx5e_dma_push(sq, txd.dma_addr, txd.len, MLX5E_DMA_MAP_SINGLE);
620 mlx5e_skb_fifo_push(&sq->db.skb_fifo, skb);
621 mlx5e_tx_mpwqe_add_dseg(sq, &txd);
622 mlx5e_tx_skb_update_hwts_flags(skb);
624 if (unlikely(mlx5e_tx_mpwqe_is_full(&sq->mpwqe, sq->max_sq_mpw_wqebbs))) {
625 /* Might stop the queue and affect the retval of __netdev_tx_sent_queue. */
626 cseg = mlx5e_tx_mpwqe_session_complete(sq);
628 if (__netdev_tx_sent_queue(sq->txq, txd.len, xmit_more))
629 mlx5e_notify_hw(&sq->wq, sq->pc, sq->uar_map, cseg);
630 } else if (__netdev_tx_sent_queue(sq->txq, txd.len, xmit_more)) {
631 /* Might stop the queue, but we were asked to ring the doorbell anyway. */
632 cseg = mlx5e_tx_mpwqe_session_complete(sq);
634 mlx5e_notify_hw(&sq->wq, sq->pc, sq->uar_map, cseg);
640 mlx5e_dma_unmap_wqe_err(sq, 1);
641 sq->stats->dropped++;
642 dev_kfree_skb_any(skb);
646 void mlx5e_tx_mpwqe_ensure_complete(struct mlx5e_txqsq *sq)
648 /* Unlikely in non-MPWQE workloads; not important in MPWQE workloads. */
649 if (unlikely(mlx5e_tx_mpwqe_session_is_active(sq)))
650 mlx5e_tx_mpwqe_session_complete(sq);
653 static void mlx5e_cqe_ts_id_eseg(struct mlx5e_ptpsq *ptpsq, struct sk_buff *skb,
654 struct mlx5_wqe_eth_seg *eseg)
656 if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP))
657 eseg->flow_table_metadata =
658 cpu_to_be32(mlx5e_ptp_metadata_fifo_pop(&ptpsq->metadata_freelist));
661 static void mlx5e_txwqe_build_eseg(struct mlx5e_priv *priv, struct mlx5e_txqsq *sq,
662 struct sk_buff *skb, struct mlx5e_accel_tx_state *accel,
663 struct mlx5_wqe_eth_seg *eseg, u16 ihs)
665 mlx5e_accel_tx_eseg(priv, skb, eseg, ihs);
666 mlx5e_txwqe_build_eseg_csum(sq, skb, accel, eseg);
667 if (unlikely(sq->ptpsq))
668 mlx5e_cqe_ts_id_eseg(sq->ptpsq, skb, eseg);
671 netdev_tx_t mlx5e_xmit(struct sk_buff *skb, struct net_device *dev)
673 struct mlx5e_priv *priv = netdev_priv(dev);
674 struct mlx5e_accel_tx_state accel = {};
675 struct mlx5e_tx_wqe_attr wqe_attr;
676 struct mlx5e_tx_attr attr;
677 struct mlx5e_tx_wqe *wqe;
678 struct mlx5e_txqsq *sq;
681 /* All changes to txq2sq are performed in sync with mlx5e_xmit, when the
682 * queue being changed is disabled, and smp_wmb guarantees that the
683 * changes are visible before mlx5e_xmit tries to read from txq2sq. It
684 * guarantees that the value of txq2sq[qid] doesn't change while
685 * mlx5e_xmit is running on queue number qid. smb_wmb is paired with
686 * HARD_TX_LOCK around ndo_start_xmit, which serves as an ACQUIRE.
688 sq = priv->txq2sq[skb_get_queue_mapping(skb)];
690 /* Two cases when sq can be NULL:
691 * 1. The HTB node is registered, and mlx5e_select_queue
692 * selected its queue ID, but the SQ itself is not yet created.
693 * 2. HTB SQ creation failed. Similar to the previous case, but
694 * the SQ won't be created.
696 dev_kfree_skb_any(skb);
700 /* May send SKBs and WQEs. */
701 if (unlikely(!mlx5e_accel_tx_begin(dev, sq, skb, &accel)))
704 mlx5e_sq_xmit_prepare(sq, skb, &accel, &attr);
706 if (test_bit(MLX5E_SQ_STATE_MPWQE, &sq->state)) {
707 if (mlx5e_tx_skb_supports_mpwqe(skb, &attr)) {
708 struct mlx5_wqe_eth_seg eseg = {};
710 mlx5e_txwqe_build_eseg(priv, sq, skb, &accel, &eseg, attr.ihs);
711 mlx5e_sq_xmit_mpwqe(sq, skb, &eseg, netdev_xmit_more());
715 mlx5e_tx_mpwqe_ensure_complete(sq);
718 mlx5e_sq_calc_wqe_attr(skb, &attr, &wqe_attr);
719 pi = mlx5e_txqsq_get_next_pi(sq, wqe_attr.num_wqebbs);
720 wqe = MLX5E_TX_FETCH_WQE(sq, pi);
722 /* May update the WQE, but may not post other WQEs. */
723 mlx5e_accel_tx_finish(sq, wqe, &accel,
724 (struct mlx5_wqe_inline_seg *)(wqe->data + wqe_attr.ds_cnt_inl));
725 mlx5e_txwqe_build_eseg(priv, sq, skb, &accel, &wqe->eth, attr.ihs);
726 mlx5e_sq_xmit_wqe(sq, skb, &attr, &wqe_attr, wqe, pi, netdev_xmit_more());
731 static void mlx5e_tx_wi_dma_unmap(struct mlx5e_txqsq *sq, struct mlx5e_tx_wqe_info *wi,
736 for (i = 0; i < wi->num_dma; i++) {
737 struct mlx5e_sq_dma *dma = mlx5e_dma_get(sq, (*dma_fifo_cc)++);
739 mlx5e_tx_dma_unmap(sq->pdev, dma);
743 static void mlx5e_consume_skb(struct mlx5e_txqsq *sq, struct sk_buff *skb,
744 struct mlx5_cqe64 *cqe, int napi_budget)
746 if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) {
747 struct skb_shared_hwtstamps hwts = {};
748 u64 ts = get_cqe_ts(cqe);
750 hwts.hwtstamp = mlx5e_cqe_ts_to_ns(sq->ptp_cyc2time, sq->clock, ts);
752 mlx5e_skb_cb_hwtstamp_handler(skb, MLX5E_SKB_CB_CQE_HWTSTAMP,
753 hwts.hwtstamp, sq->ptpsq->cq_stats);
755 skb_tstamp_tx(skb, &hwts);
758 napi_consume_skb(skb, napi_budget);
761 static void mlx5e_tx_wi_consume_fifo_skbs(struct mlx5e_txqsq *sq, struct mlx5e_tx_wqe_info *wi,
762 struct mlx5_cqe64 *cqe, int napi_budget)
766 for (i = 0; i < wi->num_fifo_pkts; i++) {
767 struct sk_buff *skb = mlx5e_skb_fifo_pop(&sq->db.skb_fifo);
769 mlx5e_consume_skb(sq, skb, cqe, napi_budget);
773 void mlx5e_txqsq_wake(struct mlx5e_txqsq *sq)
775 if (netif_tx_queue_stopped(sq->txq) &&
776 mlx5e_wqc_has_room_for(&sq->wq, sq->cc, sq->pc, sq->stop_room) &&
777 !mlx5e_ptpsq_metadata_freelist_empty(sq->ptpsq) &&
778 !test_bit(MLX5E_SQ_STATE_RECOVERING, &sq->state)) {
779 netif_tx_wake_queue(sq->txq);
784 bool mlx5e_poll_tx_cq(struct mlx5e_cq *cq, int napi_budget)
786 struct mlx5e_sq_stats *stats;
787 struct mlx5e_txqsq *sq;
788 struct mlx5_cqe64 *cqe;
795 sq = container_of(cq, struct mlx5e_txqsq, cq);
797 if (unlikely(!test_bit(MLX5E_SQ_STATE_ENABLED, &sq->state)))
800 cqe = mlx5_cqwq_get_cqe(&cq->wq);
809 /* sq->cc must be updated only after mlx5_cqwq_update_db_record(),
810 * otherwise a cq overrun may occur
814 /* avoid dirtying sq cache line every cqe */
815 dma_fifo_cc = sq->dma_fifo_cc;
819 struct mlx5e_tx_wqe_info *wi;
824 mlx5_cqwq_pop(&cq->wq);
826 wqe_counter = be16_to_cpu(cqe->wqe_counter);
829 last_wqe = (sqcc == wqe_counter);
831 ci = mlx5_wq_cyc_ctr2ix(&sq->wq, sqcc);
832 wi = &sq->db.wqe_info[ci];
834 sqcc += wi->num_wqebbs;
836 if (likely(wi->skb)) {
837 mlx5e_tx_wi_dma_unmap(sq, wi, &dma_fifo_cc);
838 mlx5e_consume_skb(sq, wi->skb, cqe, napi_budget);
841 nbytes += wi->num_bytes;
845 if (unlikely(mlx5e_ktls_tx_try_handle_resync_dump_comp(sq, wi,
849 if (wi->num_fifo_pkts) {
850 mlx5e_tx_wi_dma_unmap(sq, wi, &dma_fifo_cc);
851 mlx5e_tx_wi_consume_fifo_skbs(sq, wi, cqe, napi_budget);
853 npkts += wi->num_fifo_pkts;
854 nbytes += wi->num_bytes;
858 if (unlikely(get_cqe_opcode(cqe) == MLX5_CQE_REQ_ERR)) {
859 if (!test_and_set_bit(MLX5E_SQ_STATE_RECOVERING,
861 mlx5e_dump_error_cqe(&sq->cq, sq->sqn,
862 (struct mlx5_err_cqe *)cqe);
863 mlx5_wq_cyc_wqe_dump(&sq->wq, ci, wi->num_wqebbs);
864 queue_work(cq->workqueue, &sq->recover_work);
869 } while ((++i < MLX5E_TX_CQ_POLL_BUDGET) && (cqe = mlx5_cqwq_get_cqe(&cq->wq)));
873 mlx5_cqwq_update_db_record(&cq->wq);
875 /* ensure cq space is freed before enabling more cqes */
878 sq->dma_fifo_cc = dma_fifo_cc;
881 netdev_tx_completed_queue(sq->txq, npkts, nbytes);
883 mlx5e_txqsq_wake(sq);
885 return (i == MLX5E_TX_CQ_POLL_BUDGET);
888 static void mlx5e_tx_wi_kfree_fifo_skbs(struct mlx5e_txqsq *sq, struct mlx5e_tx_wqe_info *wi)
892 for (i = 0; i < wi->num_fifo_pkts; i++)
893 dev_kfree_skb_any(mlx5e_skb_fifo_pop(&sq->db.skb_fifo));
896 void mlx5e_free_txqsq_descs(struct mlx5e_txqsq *sq)
898 struct mlx5e_tx_wqe_info *wi;
899 u32 dma_fifo_cc, nbytes = 0;
900 u16 ci, sqcc, npkts = 0;
903 dma_fifo_cc = sq->dma_fifo_cc;
905 while (sqcc != sq->pc) {
906 ci = mlx5_wq_cyc_ctr2ix(&sq->wq, sqcc);
907 wi = &sq->db.wqe_info[ci];
909 sqcc += wi->num_wqebbs;
911 if (likely(wi->skb)) {
912 mlx5e_tx_wi_dma_unmap(sq, wi, &dma_fifo_cc);
913 dev_kfree_skb_any(wi->skb);
916 nbytes += wi->num_bytes;
920 if (unlikely(mlx5e_ktls_tx_try_handle_resync_dump_comp(sq, wi, &dma_fifo_cc)))
923 if (wi->num_fifo_pkts) {
924 mlx5e_tx_wi_dma_unmap(sq, wi, &dma_fifo_cc);
925 mlx5e_tx_wi_kfree_fifo_skbs(sq, wi);
927 npkts += wi->num_fifo_pkts;
928 nbytes += wi->num_bytes;
932 sq->dma_fifo_cc = dma_fifo_cc;
935 netdev_tx_completed_queue(sq->txq, npkts, nbytes);
938 #ifdef CONFIG_MLX5_CORE_IPOIB
940 mlx5i_txwqe_build_datagram(struct mlx5_av *av, u32 dqpn, u32 dqkey,
941 struct mlx5_wqe_datagram_seg *dseg)
943 memcpy(&dseg->av, av, sizeof(struct mlx5_av));
944 dseg->av.dqp_dct = cpu_to_be32(dqpn | MLX5_EXTENDED_UD_AV);
945 dseg->av.key.qkey.qkey = cpu_to_be32(dqkey);
948 static void mlx5i_sq_calc_wqe_attr(struct sk_buff *skb,
949 const struct mlx5e_tx_attr *attr,
950 struct mlx5e_tx_wqe_attr *wqe_attr)
952 u16 ds_cnt = sizeof(struct mlx5i_tx_wqe) / MLX5_SEND_WQE_DS;
955 ds_cnt += !!attr->headlen + skb_shinfo(skb)->nr_frags;
958 u16 inl = attr->ihs - INL_HDR_START_SZ;
960 ds_cnt_inl = DIV_ROUND_UP(inl, MLX5_SEND_WQE_DS);
961 ds_cnt += ds_cnt_inl;
964 *wqe_attr = (struct mlx5e_tx_wqe_attr) {
966 .ds_cnt_inl = ds_cnt_inl,
967 .num_wqebbs = DIV_ROUND_UP(ds_cnt, MLX5_SEND_WQEBB_NUM_DS),
971 void mlx5i_sq_xmit(struct mlx5e_txqsq *sq, struct sk_buff *skb,
972 struct mlx5_av *av, u32 dqpn, u32 dqkey, bool xmit_more)
974 struct mlx5e_tx_wqe_attr wqe_attr;
975 struct mlx5e_tx_attr attr;
976 struct mlx5i_tx_wqe *wqe;
978 struct mlx5_wqe_datagram_seg *datagram;
979 struct mlx5_wqe_ctrl_seg *cseg;
980 struct mlx5_wqe_eth_seg *eseg;
981 struct mlx5_wqe_data_seg *dseg;
982 struct mlx5e_tx_wqe_info *wi;
984 struct mlx5e_sq_stats *stats = sq->stats;
988 mlx5e_sq_xmit_prepare(sq, skb, NULL, &attr);
989 mlx5i_sq_calc_wqe_attr(skb, &attr, &wqe_attr);
991 pi = mlx5e_txqsq_get_next_pi(sq, wqe_attr.num_wqebbs);
992 wqe = MLX5I_SQ_FETCH_WQE(sq, pi);
994 stats->xmit_more += xmit_more;
997 wi = &sq->db.wqe_info[pi];
999 datagram = &wqe->datagram;
1003 mlx5i_txwqe_build_datagram(av, dqpn, dqkey, datagram);
1005 mlx5e_txwqe_build_eseg_csum(sq, skb, NULL, eseg);
1007 eseg->mss = attr.mss;
1010 if (unlikely(attr.hopbyhop)) {
1013 /* remove the HBH header.
1014 * Layout: [Ethernet header][IPv6 header][HBH][TCP header]
1016 unsafe_memcpy(eseg->inline_hdr.start, skb->data,
1017 ETH_HLEN + sizeof(*h6),
1018 MLX5_UNSAFE_MEMCPY_DISCLAIMER);
1019 h6 = (struct ipv6hdr *)((char *)eseg->inline_hdr.start + ETH_HLEN);
1020 h6->nexthdr = IPPROTO_TCP;
1021 /* Copy the TCP header after the IPv6 one */
1022 unsafe_memcpy(h6 + 1,
1023 skb->data + ETH_HLEN + sizeof(*h6) +
1024 sizeof(struct hop_jumbo_hdr),
1026 MLX5_UNSAFE_MEMCPY_DISCLAIMER);
1027 /* Leave ipv6 payload_len set to 0, as LSO v2 specs request. */
1029 unsafe_memcpy(eseg->inline_hdr.start, skb->data,
1031 MLX5_UNSAFE_MEMCPY_DISCLAIMER);
1033 eseg->inline_hdr.sz = cpu_to_be16(attr.ihs);
1034 dseg += wqe_attr.ds_cnt_inl;
1037 num_dma = mlx5e_txwqe_build_dsegs(sq, skb, skb->data + attr.ihs + attr.hopbyhop,
1038 attr.headlen, dseg);
1039 if (unlikely(num_dma < 0))
1042 mlx5e_txwqe_complete(sq, skb, &attr, &wqe_attr, num_dma, wi, cseg, eseg, xmit_more);
1048 dev_kfree_skb_any(skb);