2 * Copyright (c) 2015-2016, Mellanox Technologies. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 #include <linux/tcp.h>
34 #include <linux/if_vlan.h>
35 #include <net/dsfield.h>
37 #include "ipoib/ipoib.h"
38 #include "en_accel/en_accel.h"
39 #include "lib/clock.h"
41 #define MLX5E_SQ_NOPS_ROOM MLX5_SEND_WQE_MAX_WQEBBS
43 #ifndef CONFIG_MLX5_EN_TLS
44 #define MLX5E_SQ_STOP_ROOM (MLX5_SEND_WQE_MAX_WQEBBS +\
47 /* TLS offload requires MLX5E_SQ_STOP_ROOM to have
48 * enough room for a resync SKB, a normal SKB and a NOP
50 #define MLX5E_SQ_STOP_ROOM (2 * MLX5_SEND_WQE_MAX_WQEBBS +\
54 static inline void mlx5e_tx_dma_unmap(struct device *pdev,
55 struct mlx5e_sq_dma *dma)
58 case MLX5E_DMA_MAP_SINGLE:
59 dma_unmap_single(pdev, dma->addr, dma->size, DMA_TO_DEVICE);
61 case MLX5E_DMA_MAP_PAGE:
62 dma_unmap_page(pdev, dma->addr, dma->size, DMA_TO_DEVICE);
65 WARN_ONCE(true, "mlx5e_tx_dma_unmap unknown DMA type!\n");
69 static inline struct mlx5e_sq_dma *mlx5e_dma_get(struct mlx5e_txqsq *sq, u32 i)
71 return &sq->db.dma_fifo[i & sq->dma_fifo_mask];
74 static inline void mlx5e_dma_push(struct mlx5e_txqsq *sq,
77 enum mlx5e_dma_map_type map_type)
79 struct mlx5e_sq_dma *dma = mlx5e_dma_get(sq, sq->dma_fifo_pc++);
86 static void mlx5e_dma_unmap_wqe_err(struct mlx5e_txqsq *sq, u8 num_dma)
90 for (i = 0; i < num_dma; i++) {
91 struct mlx5e_sq_dma *last_pushed_dma =
92 mlx5e_dma_get(sq, --sq->dma_fifo_pc);
94 mlx5e_tx_dma_unmap(sq->pdev, last_pushed_dma);
98 #ifdef CONFIG_MLX5_CORE_EN_DCB
99 static inline int mlx5e_get_dscp_up(struct mlx5e_priv *priv, struct sk_buff *skb)
103 if (skb->protocol == htons(ETH_P_IP))
104 dscp_cp = ipv4_get_dsfield(ip_hdr(skb)) >> 2;
105 else if (skb->protocol == htons(ETH_P_IPV6))
106 dscp_cp = ipv6_get_dsfield(ipv6_hdr(skb)) >> 2;
108 return priv->dcbx_dp.dscp2prio[dscp_cp];
112 u16 mlx5e_select_queue(struct net_device *dev, struct sk_buff *skb,
113 struct net_device *sb_dev,
114 select_queue_fallback_t fallback)
116 struct mlx5e_priv *priv = netdev_priv(dev);
117 int channel_ix = fallback(dev, skb, NULL);
121 if (!netdev_get_num_tc(dev))
124 #ifdef CONFIG_MLX5_CORE_EN_DCB
125 if (priv->dcbx_dp.trust_state == MLX5_QPTS_TRUST_DSCP)
126 up = mlx5e_get_dscp_up(priv, skb);
129 if (skb_vlan_tag_present(skb))
130 up = skb->vlan_tci >> VLAN_PRIO_SHIFT;
132 /* channel_ix can be larger than num_channels since
133 * dev->num_real_tx_queues = num_channels * num_tc
135 num_channels = priv->channels.params.num_channels;
136 if (channel_ix >= num_channels)
137 channel_ix = reciprocal_scale(channel_ix, num_channels);
139 return priv->channel_tc2txq[channel_ix][up];
142 static inline int mlx5e_skb_l2_header_offset(struct sk_buff *skb)
144 #define MLX5E_MIN_INLINE (ETH_HLEN + VLAN_HLEN)
146 return max(skb_network_offset(skb), MLX5E_MIN_INLINE);
149 static inline int mlx5e_skb_l3_header_offset(struct sk_buff *skb)
151 struct flow_keys keys;
153 if (skb_transport_header_was_set(skb))
154 return skb_transport_offset(skb);
155 else if (skb_flow_dissect_flow_keys(skb, &keys, 0))
156 return keys.control.thoff;
158 return mlx5e_skb_l2_header_offset(skb);
161 static inline u16 mlx5e_calc_min_inline(enum mlx5_inline_modes mode,
167 case MLX5_INLINE_MODE_NONE:
169 case MLX5_INLINE_MODE_TCP_UDP:
170 hlen = eth_get_headlen(skb->data, skb_headlen(skb));
171 if (hlen == ETH_HLEN && !skb_vlan_tag_present(skb))
174 case MLX5_INLINE_MODE_IP:
175 /* When transport header is set to zero, it means no transport
176 * header. When transport header is set to 0xff's, it means
177 * transport header wasn't set.
179 if (skb_transport_offset(skb)) {
180 hlen = mlx5e_skb_l3_header_offset(skb);
184 case MLX5_INLINE_MODE_L2:
186 hlen = mlx5e_skb_l2_header_offset(skb);
188 return min_t(u16, hlen, skb_headlen(skb));
191 static inline void mlx5e_insert_vlan(void *start, struct sk_buff *skb, u16 ihs)
193 struct vlan_ethhdr *vhdr = (struct vlan_ethhdr *)start;
194 int cpy1_sz = 2 * ETH_ALEN;
195 int cpy2_sz = ihs - cpy1_sz;
197 memcpy(vhdr, skb->data, cpy1_sz);
198 vhdr->h_vlan_proto = skb->vlan_proto;
199 vhdr->h_vlan_TCI = cpu_to_be16(skb_vlan_tag_get(skb));
200 memcpy(&vhdr->h_vlan_encapsulated_proto, skb->data + cpy1_sz, cpy2_sz);
204 mlx5e_txwqe_build_eseg_csum(struct mlx5e_txqsq *sq, struct sk_buff *skb, struct mlx5_wqe_eth_seg *eseg)
206 if (likely(skb->ip_summed == CHECKSUM_PARTIAL)) {
207 eseg->cs_flags = MLX5_ETH_WQE_L3_CSUM;
208 if (skb->encapsulation) {
209 eseg->cs_flags |= MLX5_ETH_WQE_L3_INNER_CSUM |
210 MLX5_ETH_WQE_L4_INNER_CSUM;
211 sq->stats->csum_partial_inner++;
213 eseg->cs_flags |= MLX5_ETH_WQE_L4_CSUM;
214 sq->stats->csum_partial++;
217 sq->stats->csum_none++;
221 mlx5e_tx_get_gso_ihs(struct mlx5e_txqsq *sq, struct sk_buff *skb)
223 struct mlx5e_sq_stats *stats = sq->stats;
226 if (skb->encapsulation) {
227 ihs = skb_inner_transport_offset(skb) + inner_tcp_hdrlen(skb);
228 stats->tso_inner_packets++;
229 stats->tso_inner_bytes += skb->len - ihs;
231 if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4)
232 ihs = skb_transport_offset(skb) + sizeof(struct udphdr);
234 ihs = skb_transport_offset(skb) + tcp_hdrlen(skb);
235 stats->tso_packets++;
236 stats->tso_bytes += skb->len - ihs;
243 mlx5e_txwqe_build_dsegs(struct mlx5e_txqsq *sq, struct sk_buff *skb,
244 unsigned char *skb_data, u16 headlen,
245 struct mlx5_wqe_data_seg *dseg)
247 dma_addr_t dma_addr = 0;
252 dma_addr = dma_map_single(sq->pdev, skb_data, headlen,
254 if (unlikely(dma_mapping_error(sq->pdev, dma_addr)))
255 goto dma_unmap_wqe_err;
257 dseg->addr = cpu_to_be64(dma_addr);
258 dseg->lkey = sq->mkey_be;
259 dseg->byte_count = cpu_to_be32(headlen);
261 mlx5e_dma_push(sq, dma_addr, headlen, MLX5E_DMA_MAP_SINGLE);
266 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
267 struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i];
268 int fsz = skb_frag_size(frag);
270 dma_addr = skb_frag_dma_map(sq->pdev, frag, 0, fsz,
272 if (unlikely(dma_mapping_error(sq->pdev, dma_addr)))
273 goto dma_unmap_wqe_err;
275 dseg->addr = cpu_to_be64(dma_addr);
276 dseg->lkey = sq->mkey_be;
277 dseg->byte_count = cpu_to_be32(fsz);
279 mlx5e_dma_push(sq, dma_addr, fsz, MLX5E_DMA_MAP_PAGE);
287 mlx5e_dma_unmap_wqe_err(sq, num_dma);
291 static inline void mlx5e_fill_sq_frag_edge(struct mlx5e_txqsq *sq,
292 struct mlx5_wq_cyc *wq,
295 struct mlx5e_tx_wqe_info *edge_wi, *wi = &sq->db.wqe_info[pi];
296 u8 nnops = mlx5_wq_cyc_get_frag_size(wq) - frag_pi;
298 edge_wi = wi + nnops;
300 /* fill sq frag edge with nops to avoid wqe wrapping two pages */
301 for (; wi < edge_wi; wi++) {
304 mlx5e_post_nop(wq, sq->sqn, &sq->pc);
306 sq->stats->nop += nnops;
310 mlx5e_txwqe_complete(struct mlx5e_txqsq *sq, struct sk_buff *skb,
311 u8 opcode, u16 ds_cnt, u8 num_wqebbs, u32 num_bytes, u8 num_dma,
312 struct mlx5e_tx_wqe_info *wi, struct mlx5_wqe_ctrl_seg *cseg)
314 struct mlx5_wq_cyc *wq = &sq->wq;
316 wi->num_bytes = num_bytes;
317 wi->num_dma = num_dma;
318 wi->num_wqebbs = num_wqebbs;
321 cseg->opmod_idx_opcode = cpu_to_be32((sq->pc << 8) | opcode);
322 cseg->qpn_ds = cpu_to_be32((sq->sqn << 8) | ds_cnt);
324 netdev_tx_sent_queue(sq->txq, num_bytes);
326 if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP))
327 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
329 sq->pc += wi->num_wqebbs;
330 if (unlikely(!mlx5e_wqc_has_room_for(wq, sq->cc, sq->pc, MLX5E_SQ_STOP_ROOM))) {
331 netif_tx_stop_queue(sq->txq);
332 sq->stats->stopped++;
335 if (!skb->xmit_more || netif_xmit_stopped(sq->txq))
336 mlx5e_notify_hw(wq, sq->pc, sq->uar_map, cseg);
339 #define INL_HDR_START_SZ (sizeof(((struct mlx5_wqe_eth_seg *)NULL)->inline_hdr.start))
341 netdev_tx_t mlx5e_sq_xmit(struct mlx5e_txqsq *sq, struct sk_buff *skb,
342 struct mlx5e_tx_wqe *wqe, u16 pi)
344 struct mlx5_wq_cyc *wq = &sq->wq;
345 struct mlx5_wqe_ctrl_seg *cseg;
346 struct mlx5_wqe_eth_seg *eseg;
347 struct mlx5_wqe_data_seg *dseg;
348 struct mlx5e_tx_wqe_info *wi;
350 struct mlx5e_sq_stats *stats = sq->stats;
351 u16 ds_cnt, ds_cnt_inl = 0;
352 u16 headlen, ihs, frag_pi;
353 u8 num_wqebbs, opcode;
358 /* Calc ihs and ds cnt, no writes to wqe yet */
359 ds_cnt = sizeof(*wqe) / MLX5_SEND_WQE_DS;
360 if (skb_is_gso(skb)) {
361 opcode = MLX5_OPCODE_LSO;
362 mss = cpu_to_be16(skb_shinfo(skb)->gso_size);
363 ihs = mlx5e_tx_get_gso_ihs(sq, skb);
364 num_bytes = skb->len + (skb_shinfo(skb)->gso_segs - 1) * ihs;
365 stats->packets += skb_shinfo(skb)->gso_segs;
367 opcode = MLX5_OPCODE_SEND;
369 ihs = mlx5e_calc_min_inline(sq->min_inline_mode, skb);
370 num_bytes = max_t(unsigned int, skb->len, ETH_ZLEN);
374 stats->bytes += num_bytes;
375 stats->xmit_more += skb->xmit_more;
377 headlen = skb->len - ihs - skb->data_len;
379 ds_cnt += skb_shinfo(skb)->nr_frags;
382 ihs += !!skb_vlan_tag_present(skb) * VLAN_HLEN;
384 ds_cnt_inl = DIV_ROUND_UP(ihs - INL_HDR_START_SZ, MLX5_SEND_WQE_DS);
385 ds_cnt += ds_cnt_inl;
388 num_wqebbs = DIV_ROUND_UP(ds_cnt, MLX5_SEND_WQEBB_NUM_DS);
389 frag_pi = mlx5_wq_cyc_ctr2fragix(wq, sq->pc);
390 if (unlikely(frag_pi + num_wqebbs > mlx5_wq_cyc_get_frag_size(wq))) {
391 mlx5e_fill_sq_frag_edge(sq, wq, pi, frag_pi);
392 mlx5e_sq_fetch_wqe(sq, &wqe, &pi);
396 wi = &sq->db.wqe_info[pi];
401 mlx5e_txwqe_build_eseg_csum(sq, skb, eseg);
406 eseg->inline_hdr.sz = cpu_to_be16(ihs);
407 if (skb_vlan_tag_present(skb)) {
409 mlx5e_insert_vlan(eseg->inline_hdr.start, skb, ihs);
410 stats->added_vlan_packets++;
412 memcpy(eseg->inline_hdr.start, skb->data, ihs);
415 } else if (skb_vlan_tag_present(skb)) {
416 eseg->insert.type = cpu_to_be16(MLX5_ETH_WQE_INSERT_VLAN);
417 if (skb->vlan_proto == cpu_to_be16(ETH_P_8021AD))
418 eseg->insert.type |= cpu_to_be16(MLX5_ETH_WQE_SVLAN);
419 eseg->insert.vlan_tci = cpu_to_be16(skb_vlan_tag_get(skb));
420 stats->added_vlan_packets++;
423 num_dma = mlx5e_txwqe_build_dsegs(sq, skb, skb->data + ihs, headlen, dseg);
424 if (unlikely(num_dma < 0))
427 mlx5e_txwqe_complete(sq, skb, opcode, ds_cnt, num_wqebbs, num_bytes,
434 dev_kfree_skb_any(skb);
439 netdev_tx_t mlx5e_xmit(struct sk_buff *skb, struct net_device *dev)
441 struct mlx5e_priv *priv = netdev_priv(dev);
442 struct mlx5e_tx_wqe *wqe;
443 struct mlx5e_txqsq *sq;
446 sq = priv->txq2sq[skb_get_queue_mapping(skb)];
447 mlx5e_sq_fetch_wqe(sq, &wqe, &pi);
449 /* might send skbs and update wqe and pi */
450 skb = mlx5e_accel_handle_tx(skb, sq, dev, &wqe, &pi);
454 return mlx5e_sq_xmit(sq, skb, wqe, pi);
457 static void mlx5e_dump_error_cqe(struct mlx5e_txqsq *sq,
458 struct mlx5_err_cqe *err_cqe)
460 u32 ci = mlx5_cqwq_get_ci(&sq->cq.wq);
462 netdev_err(sq->channel->netdev,
463 "Error cqe on cqn 0x%x, ci 0x%x, sqn 0x%x, syndrome 0x%x, vendor syndrome 0x%x\n",
464 sq->cq.mcq.cqn, ci, sq->sqn, err_cqe->syndrome,
465 err_cqe->vendor_err_synd);
466 mlx5_dump_err_cqe(sq->cq.mdev, err_cqe);
469 bool mlx5e_poll_tx_cq(struct mlx5e_cq *cq, int napi_budget)
471 struct mlx5e_sq_stats *stats;
472 struct mlx5e_txqsq *sq;
473 struct mlx5_cqe64 *cqe;
480 sq = container_of(cq, struct mlx5e_txqsq, cq);
482 if (unlikely(!test_bit(MLX5E_SQ_STATE_ENABLED, &sq->state)))
485 cqe = mlx5_cqwq_get_cqe(&cq->wq);
494 /* sq->cc must be updated only after mlx5_cqwq_update_db_record(),
495 * otherwise a cq overrun may occur
499 /* avoid dirtying sq cache line every cqe */
500 dma_fifo_cc = sq->dma_fifo_cc;
507 mlx5_cqwq_pop(&cq->wq);
509 wqe_counter = be16_to_cpu(cqe->wqe_counter);
511 if (unlikely(cqe->op_own >> 4 == MLX5_CQE_REQ_ERR)) {
512 if (!test_and_set_bit(MLX5E_SQ_STATE_RECOVERING,
514 mlx5e_dump_error_cqe(sq,
515 (struct mlx5_err_cqe *)cqe);
516 queue_work(cq->channel->priv->wq,
517 &sq->recover.recover_work);
523 struct mlx5e_tx_wqe_info *wi;
528 last_wqe = (sqcc == wqe_counter);
530 ci = mlx5_wq_cyc_ctr2ix(&sq->wq, sqcc);
531 wi = &sq->db.wqe_info[ci];
534 if (unlikely(!skb)) { /* nop */
539 if (unlikely(skb_shinfo(skb)->tx_flags &
541 struct skb_shared_hwtstamps hwts = {};
544 mlx5_timecounter_cyc2time(sq->clock,
546 skb_tstamp_tx(skb, &hwts);
549 for (j = 0; j < wi->num_dma; j++) {
550 struct mlx5e_sq_dma *dma =
551 mlx5e_dma_get(sq, dma_fifo_cc++);
553 mlx5e_tx_dma_unmap(sq->pdev, dma);
557 nbytes += wi->num_bytes;
558 sqcc += wi->num_wqebbs;
559 napi_consume_skb(skb, napi_budget);
562 } while ((++i < MLX5E_TX_CQ_POLL_BUDGET) && (cqe = mlx5_cqwq_get_cqe(&cq->wq)));
566 mlx5_cqwq_update_db_record(&cq->wq);
568 /* ensure cq space is freed before enabling more cqes */
571 sq->dma_fifo_cc = dma_fifo_cc;
574 netdev_tx_completed_queue(sq->txq, npkts, nbytes);
576 if (netif_tx_queue_stopped(sq->txq) &&
577 mlx5e_wqc_has_room_for(&sq->wq, sq->cc, sq->pc,
578 MLX5E_SQ_STOP_ROOM) &&
579 !test_bit(MLX5E_SQ_STATE_RECOVERING, &sq->state)) {
580 netif_tx_wake_queue(sq->txq);
584 return (i == MLX5E_TX_CQ_POLL_BUDGET);
587 void mlx5e_free_txqsq_descs(struct mlx5e_txqsq *sq)
589 struct mlx5e_tx_wqe_info *wi;
594 while (sq->cc != sq->pc) {
595 ci = mlx5_wq_cyc_ctr2ix(&sq->wq, sq->cc);
596 wi = &sq->db.wqe_info[ci];
599 if (!skb) { /* nop */
604 for (i = 0; i < wi->num_dma; i++) {
605 struct mlx5e_sq_dma *dma =
606 mlx5e_dma_get(sq, sq->dma_fifo_cc++);
608 mlx5e_tx_dma_unmap(sq->pdev, dma);
611 dev_kfree_skb_any(skb);
612 sq->cc += wi->num_wqebbs;
616 #ifdef CONFIG_MLX5_CORE_IPOIB
618 mlx5i_txwqe_build_datagram(struct mlx5_av *av, u32 dqpn, u32 dqkey,
619 struct mlx5_wqe_datagram_seg *dseg)
621 memcpy(&dseg->av, av, sizeof(struct mlx5_av));
622 dseg->av.dqp_dct = cpu_to_be32(dqpn | MLX5_EXTENDED_UD_AV);
623 dseg->av.key.qkey.qkey = cpu_to_be32(dqkey);
626 netdev_tx_t mlx5i_sq_xmit(struct mlx5e_txqsq *sq, struct sk_buff *skb,
627 struct mlx5_av *av, u32 dqpn, u32 dqkey)
629 struct mlx5_wq_cyc *wq = &sq->wq;
630 struct mlx5i_tx_wqe *wqe;
632 struct mlx5_wqe_datagram_seg *datagram;
633 struct mlx5_wqe_ctrl_seg *cseg;
634 struct mlx5_wqe_eth_seg *eseg;
635 struct mlx5_wqe_data_seg *dseg;
636 struct mlx5e_tx_wqe_info *wi;
638 struct mlx5e_sq_stats *stats = sq->stats;
639 u16 headlen, ihs, pi, frag_pi;
640 u16 ds_cnt, ds_cnt_inl = 0;
641 u8 num_wqebbs, opcode;
646 /* Calc ihs and ds cnt, no writes to wqe yet */
647 ds_cnt = sizeof(*wqe) / MLX5_SEND_WQE_DS;
648 if (skb_is_gso(skb)) {
649 opcode = MLX5_OPCODE_LSO;
650 mss = cpu_to_be16(skb_shinfo(skb)->gso_size);
651 ihs = mlx5e_tx_get_gso_ihs(sq, skb);
652 num_bytes = skb->len + (skb_shinfo(skb)->gso_segs - 1) * ihs;
653 stats->packets += skb_shinfo(skb)->gso_segs;
655 opcode = MLX5_OPCODE_SEND;
657 ihs = mlx5e_calc_min_inline(sq->min_inline_mode, skb);
658 num_bytes = max_t(unsigned int, skb->len, ETH_ZLEN);
662 stats->bytes += num_bytes;
663 stats->xmit_more += skb->xmit_more;
665 headlen = skb->len - ihs - skb->data_len;
667 ds_cnt += skb_shinfo(skb)->nr_frags;
670 ds_cnt_inl = DIV_ROUND_UP(ihs - INL_HDR_START_SZ, MLX5_SEND_WQE_DS);
671 ds_cnt += ds_cnt_inl;
674 num_wqebbs = DIV_ROUND_UP(ds_cnt, MLX5_SEND_WQEBB_NUM_DS);
675 frag_pi = mlx5_wq_cyc_ctr2fragix(wq, sq->pc);
676 if (unlikely(frag_pi + num_wqebbs > mlx5_wq_cyc_get_frag_size(wq))) {
677 pi = mlx5_wq_cyc_ctr2ix(wq, sq->pc);
678 mlx5e_fill_sq_frag_edge(sq, wq, pi, frag_pi);
681 mlx5i_sq_fetch_wqe(sq, &wqe, &pi);
684 wi = &sq->db.wqe_info[pi];
686 datagram = &wqe->datagram;
690 mlx5i_txwqe_build_datagram(av, dqpn, dqkey, datagram);
692 mlx5e_txwqe_build_eseg_csum(sq, skb, eseg);
697 memcpy(eseg->inline_hdr.start, skb->data, ihs);
698 eseg->inline_hdr.sz = cpu_to_be16(ihs);
702 num_dma = mlx5e_txwqe_build_dsegs(sq, skb, skb->data + ihs, headlen, dseg);
703 if (unlikely(num_dma < 0))
706 mlx5e_txwqe_complete(sq, skb, opcode, ds_cnt, num_wqebbs, num_bytes,
713 dev_kfree_skb_any(skb);