2 * Copyright (c) 2015-2016, Mellanox Technologies. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 #include <linux/tcp.h>
34 #include <linux/if_vlan.h>
35 #include <net/dsfield.h>
37 #include "ipoib/ipoib.h"
38 #include "en_accel/ipsec_rxtx.h"
39 #include "lib/clock.h"
41 #define MLX5E_SQ_NOPS_ROOM MLX5_SEND_WQE_MAX_WQEBBS
42 #define MLX5E_SQ_STOP_ROOM (MLX5_SEND_WQE_MAX_WQEBBS +\
45 static inline void mlx5e_tx_dma_unmap(struct device *pdev,
46 struct mlx5e_sq_dma *dma)
49 case MLX5E_DMA_MAP_SINGLE:
50 dma_unmap_single(pdev, dma->addr, dma->size, DMA_TO_DEVICE);
52 case MLX5E_DMA_MAP_PAGE:
53 dma_unmap_page(pdev, dma->addr, dma->size, DMA_TO_DEVICE);
56 WARN_ONCE(true, "mlx5e_tx_dma_unmap unknown DMA type!\n");
60 static inline void mlx5e_dma_push(struct mlx5e_txqsq *sq,
63 enum mlx5e_dma_map_type map_type)
65 u32 i = sq->dma_fifo_pc & sq->dma_fifo_mask;
67 sq->db.dma_fifo[i].addr = addr;
68 sq->db.dma_fifo[i].size = size;
69 sq->db.dma_fifo[i].type = map_type;
73 static inline struct mlx5e_sq_dma *mlx5e_dma_get(struct mlx5e_txqsq *sq, u32 i)
75 return &sq->db.dma_fifo[i & sq->dma_fifo_mask];
78 static void mlx5e_dma_unmap_wqe_err(struct mlx5e_txqsq *sq, u8 num_dma)
82 for (i = 0; i < num_dma; i++) {
83 struct mlx5e_sq_dma *last_pushed_dma =
84 mlx5e_dma_get(sq, --sq->dma_fifo_pc);
86 mlx5e_tx_dma_unmap(sq->pdev, last_pushed_dma);
90 #ifdef CONFIG_MLX5_CORE_EN_DCB
91 static inline int mlx5e_get_dscp_up(struct mlx5e_priv *priv, struct sk_buff *skb)
95 if (skb->protocol == htons(ETH_P_IP))
96 dscp_cp = ipv4_get_dsfield(ip_hdr(skb)) >> 2;
97 else if (skb->protocol == htons(ETH_P_IPV6))
98 dscp_cp = ipv6_get_dsfield(ipv6_hdr(skb)) >> 2;
100 return priv->dcbx_dp.dscp2prio[dscp_cp];
104 u16 mlx5e_select_queue(struct net_device *dev, struct sk_buff *skb,
105 void *accel_priv, select_queue_fallback_t fallback)
107 struct mlx5e_priv *priv = netdev_priv(dev);
108 int channel_ix = fallback(dev, skb);
112 if (!netdev_get_num_tc(dev))
115 #ifdef CONFIG_MLX5_CORE_EN_DCB
116 if (priv->dcbx_dp.trust_state == MLX5_QPTS_TRUST_DSCP)
117 up = mlx5e_get_dscp_up(priv, skb);
120 if (skb_vlan_tag_present(skb))
121 up = skb->vlan_tci >> VLAN_PRIO_SHIFT;
123 /* channel_ix can be larger than num_channels since
124 * dev->num_real_tx_queues = num_channels * num_tc
126 num_channels = priv->channels.params.num_channels;
127 if (channel_ix >= num_channels)
128 channel_ix = reciprocal_scale(channel_ix, num_channels);
130 return priv->channel_tc2txq[channel_ix][up];
133 static inline int mlx5e_skb_l2_header_offset(struct sk_buff *skb)
135 #define MLX5E_MIN_INLINE (ETH_HLEN + VLAN_HLEN)
137 return max(skb_network_offset(skb), MLX5E_MIN_INLINE);
140 static inline int mlx5e_skb_l3_header_offset(struct sk_buff *skb)
142 struct flow_keys keys;
144 if (skb_transport_header_was_set(skb))
145 return skb_transport_offset(skb);
146 else if (skb_flow_dissect_flow_keys(skb, &keys, 0))
147 return keys.control.thoff;
149 return mlx5e_skb_l2_header_offset(skb);
152 static inline u16 mlx5e_calc_min_inline(enum mlx5_inline_modes mode,
158 case MLX5_INLINE_MODE_NONE:
160 case MLX5_INLINE_MODE_TCP_UDP:
161 hlen = eth_get_headlen(skb->data, skb_headlen(skb));
162 if (hlen == ETH_HLEN && !skb_vlan_tag_present(skb))
165 case MLX5_INLINE_MODE_IP:
166 /* When transport header is set to zero, it means no transport
167 * header. When transport header is set to 0xff's, it means
168 * transport header wasn't set.
170 if (skb_transport_offset(skb)) {
171 hlen = mlx5e_skb_l3_header_offset(skb);
175 case MLX5_INLINE_MODE_L2:
177 hlen = mlx5e_skb_l2_header_offset(skb);
179 return min_t(u16, hlen, skb_headlen(skb));
182 static inline void mlx5e_tx_skb_pull_inline(unsigned char **skb_data,
183 unsigned int *skb_len,
190 static inline void mlx5e_insert_vlan(void *start, struct sk_buff *skb, u16 ihs,
191 unsigned char **skb_data,
192 unsigned int *skb_len)
194 struct vlan_ethhdr *vhdr = (struct vlan_ethhdr *)start;
195 int cpy1_sz = 2 * ETH_ALEN;
196 int cpy2_sz = ihs - cpy1_sz;
198 memcpy(vhdr, *skb_data, cpy1_sz);
199 mlx5e_tx_skb_pull_inline(skb_data, skb_len, cpy1_sz);
200 vhdr->h_vlan_proto = skb->vlan_proto;
201 vhdr->h_vlan_TCI = cpu_to_be16(skb_vlan_tag_get(skb));
202 memcpy(&vhdr->h_vlan_encapsulated_proto, *skb_data, cpy2_sz);
203 mlx5e_tx_skb_pull_inline(skb_data, skb_len, cpy2_sz);
207 mlx5e_txwqe_build_eseg_csum(struct mlx5e_txqsq *sq, struct sk_buff *skb, struct mlx5_wqe_eth_seg *eseg)
209 if (likely(skb->ip_summed == CHECKSUM_PARTIAL)) {
210 eseg->cs_flags = MLX5_ETH_WQE_L3_CSUM;
211 if (skb->encapsulation) {
212 eseg->cs_flags |= MLX5_ETH_WQE_L3_INNER_CSUM |
213 MLX5_ETH_WQE_L4_INNER_CSUM;
214 sq->stats.csum_partial_inner++;
216 eseg->cs_flags |= MLX5_ETH_WQE_L4_CSUM;
217 sq->stats.csum_partial++;
220 sq->stats.csum_none++;
224 mlx5e_txwqe_build_eseg_gso(struct mlx5e_txqsq *sq, struct sk_buff *skb,
225 struct mlx5_wqe_eth_seg *eseg, unsigned int *num_bytes)
229 eseg->mss = cpu_to_be16(skb_shinfo(skb)->gso_size);
231 if (skb->encapsulation) {
232 ihs = skb_inner_transport_offset(skb) + inner_tcp_hdrlen(skb);
233 sq->stats.tso_inner_packets++;
234 sq->stats.tso_inner_bytes += skb->len - ihs;
236 ihs = skb_transport_offset(skb) + tcp_hdrlen(skb);
237 sq->stats.tso_packets++;
238 sq->stats.tso_bytes += skb->len - ihs;
241 *num_bytes = skb->len + (skb_shinfo(skb)->gso_segs - 1) * ihs;
246 mlx5e_txwqe_build_dsegs(struct mlx5e_txqsq *sq, struct sk_buff *skb,
247 unsigned char *skb_data, u16 headlen,
248 struct mlx5_wqe_data_seg *dseg)
250 dma_addr_t dma_addr = 0;
255 dma_addr = dma_map_single(sq->pdev, skb_data, headlen,
257 if (unlikely(dma_mapping_error(sq->pdev, dma_addr)))
260 dseg->addr = cpu_to_be64(dma_addr);
261 dseg->lkey = sq->mkey_be;
262 dseg->byte_count = cpu_to_be32(headlen);
264 mlx5e_dma_push(sq, dma_addr, headlen, MLX5E_DMA_MAP_SINGLE);
269 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
270 struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i];
271 int fsz = skb_frag_size(frag);
273 dma_addr = skb_frag_dma_map(sq->pdev, frag, 0, fsz,
275 if (unlikely(dma_mapping_error(sq->pdev, dma_addr)))
278 dseg->addr = cpu_to_be64(dma_addr);
279 dseg->lkey = sq->mkey_be;
280 dseg->byte_count = cpu_to_be32(fsz);
282 mlx5e_dma_push(sq, dma_addr, fsz, MLX5E_DMA_MAP_PAGE);
291 mlx5e_txwqe_complete(struct mlx5e_txqsq *sq, struct sk_buff *skb,
292 u8 opcode, u16 ds_cnt, u32 num_bytes, u8 num_dma,
293 struct mlx5e_tx_wqe_info *wi, struct mlx5_wqe_ctrl_seg *cseg)
295 struct mlx5_wq_cyc *wq = &sq->wq;
298 wi->num_bytes = num_bytes;
299 wi->num_dma = num_dma;
300 wi->num_wqebbs = DIV_ROUND_UP(ds_cnt, MLX5_SEND_WQEBB_NUM_DS);
303 cseg->opmod_idx_opcode = cpu_to_be32((sq->pc << 8) | opcode);
304 cseg->qpn_ds = cpu_to_be32((sq->sqn << 8) | ds_cnt);
306 netdev_tx_sent_queue(sq->txq, num_bytes);
308 if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP))
309 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
311 sq->pc += wi->num_wqebbs;
312 if (unlikely(!mlx5e_wqc_has_room_for(wq, sq->cc, sq->pc, MLX5E_SQ_STOP_ROOM))) {
313 netif_tx_stop_queue(sq->txq);
317 if (!skb->xmit_more || netif_xmit_stopped(sq->txq))
318 mlx5e_notify_hw(wq, sq->pc, sq->uar_map, cseg);
320 /* fill sq edge with nops to avoid wqe wrap around */
321 while ((pi = (sq->pc & wq->sz_m1)) > sq->edge) {
322 sq->db.wqe_info[pi].skb = NULL;
323 mlx5e_post_nop(wq, sq->sqn, &sq->pc);
328 static netdev_tx_t mlx5e_sq_xmit(struct mlx5e_txqsq *sq, struct sk_buff *skb,
329 struct mlx5e_tx_wqe *wqe, u16 pi)
331 struct mlx5e_tx_wqe_info *wi = &sq->db.wqe_info[pi];
333 struct mlx5_wqe_ctrl_seg *cseg = &wqe->ctrl;
334 struct mlx5_wqe_eth_seg *eseg = &wqe->eth;
336 unsigned char *skb_data = skb->data;
337 unsigned int skb_len = skb->len;
338 u8 opcode = MLX5_OPCODE_SEND;
339 unsigned int num_bytes;
345 mlx5e_txwqe_build_eseg_csum(sq, skb, eseg);
347 if (skb_is_gso(skb)) {
348 opcode = MLX5_OPCODE_LSO;
349 ihs = mlx5e_txwqe_build_eseg_gso(sq, skb, eseg, &num_bytes);
350 sq->stats.packets += skb_shinfo(skb)->gso_segs;
352 ihs = mlx5e_calc_min_inline(sq->min_inline_mode, skb);
353 num_bytes = max_t(unsigned int, skb->len, ETH_ZLEN);
356 sq->stats.bytes += num_bytes;
357 sq->stats.xmit_more += skb->xmit_more;
359 ds_cnt = sizeof(*wqe) / MLX5_SEND_WQE_DS;
361 if (skb_vlan_tag_present(skb)) {
362 mlx5e_insert_vlan(eseg->inline_hdr.start, skb, ihs, &skb_data, &skb_len);
364 sq->stats.added_vlan_packets++;
366 memcpy(eseg->inline_hdr.start, skb_data, ihs);
367 mlx5e_tx_skb_pull_inline(&skb_data, &skb_len, ihs);
369 eseg->inline_hdr.sz = cpu_to_be16(ihs);
370 ds_cnt += DIV_ROUND_UP(ihs - sizeof(eseg->inline_hdr.start), MLX5_SEND_WQE_DS);
371 } else if (skb_vlan_tag_present(skb)) {
372 eseg->insert.type = cpu_to_be16(MLX5_ETH_WQE_INSERT_VLAN);
373 if (skb->vlan_proto == cpu_to_be16(ETH_P_8021AD))
374 eseg->insert.type |= cpu_to_be16(MLX5_ETH_WQE_SVLAN);
375 eseg->insert.vlan_tci = cpu_to_be16(skb_vlan_tag_get(skb));
376 sq->stats.added_vlan_packets++;
379 headlen = skb_len - skb->data_len;
380 num_dma = mlx5e_txwqe_build_dsegs(sq, skb, skb_data, headlen,
381 (struct mlx5_wqe_data_seg *)cseg + ds_cnt);
382 if (unlikely(num_dma < 0))
383 goto dma_unmap_wqe_err;
385 mlx5e_txwqe_complete(sq, skb, opcode, ds_cnt + num_dma,
386 num_bytes, num_dma, wi, cseg);
392 mlx5e_dma_unmap_wqe_err(sq, wi->num_dma);
394 dev_kfree_skb_any(skb);
399 netdev_tx_t mlx5e_xmit(struct sk_buff *skb, struct net_device *dev)
401 struct mlx5e_priv *priv = netdev_priv(dev);
402 struct mlx5e_txqsq *sq = priv->txq2sq[skb_get_queue_mapping(skb)];
403 struct mlx5_wq_cyc *wq = &sq->wq;
404 u16 pi = sq->pc & wq->sz_m1;
405 struct mlx5e_tx_wqe *wqe = mlx5_wq_cyc_get_wqe(wq, pi);
407 memset(wqe, 0, sizeof(*wqe));
409 #ifdef CONFIG_MLX5_EN_IPSEC
410 if (sq->state & BIT(MLX5E_SQ_STATE_IPSEC)) {
411 skb = mlx5e_ipsec_handle_tx_skb(dev, wqe, skb);
417 return mlx5e_sq_xmit(sq, skb, wqe, pi);
420 bool mlx5e_poll_tx_cq(struct mlx5e_cq *cq, int napi_budget)
422 struct mlx5e_txqsq *sq;
423 struct mlx5_cqe64 *cqe;
430 sq = container_of(cq, struct mlx5e_txqsq, cq);
432 if (unlikely(!MLX5E_TEST_BIT(sq->state, MLX5E_SQ_STATE_ENABLED)))
435 cqe = mlx5_cqwq_get_cqe(&cq->wq);
442 /* sq->cc must be updated only after mlx5_cqwq_update_db_record(),
443 * otherwise a cq overrun may occur
447 /* avoid dirtying sq cache line every cqe */
448 dma_fifo_cc = sq->dma_fifo_cc;
455 mlx5_cqwq_pop(&cq->wq);
457 wqe_counter = be16_to_cpu(cqe->wqe_counter);
460 struct mlx5e_tx_wqe_info *wi;
465 last_wqe = (sqcc == wqe_counter);
467 ci = sqcc & sq->wq.sz_m1;
468 wi = &sq->db.wqe_info[ci];
471 if (unlikely(!skb)) { /* nop */
476 if (unlikely(skb_shinfo(skb)->tx_flags &
478 struct skb_shared_hwtstamps hwts = {};
481 mlx5_timecounter_cyc2time(sq->clock,
483 skb_tstamp_tx(skb, &hwts);
486 for (j = 0; j < wi->num_dma; j++) {
487 struct mlx5e_sq_dma *dma =
488 mlx5e_dma_get(sq, dma_fifo_cc++);
490 mlx5e_tx_dma_unmap(sq->pdev, dma);
494 nbytes += wi->num_bytes;
495 sqcc += wi->num_wqebbs;
496 napi_consume_skb(skb, napi_budget);
499 } while ((++i < MLX5E_TX_CQ_POLL_BUDGET) && (cqe = mlx5_cqwq_get_cqe(&cq->wq)));
501 mlx5_cqwq_update_db_record(&cq->wq);
503 /* ensure cq space is freed before enabling more cqes */
506 sq->dma_fifo_cc = dma_fifo_cc;
509 netdev_tx_completed_queue(sq->txq, npkts, nbytes);
511 if (netif_tx_queue_stopped(sq->txq) &&
512 mlx5e_wqc_has_room_for(&sq->wq, sq->cc, sq->pc, MLX5E_SQ_STOP_ROOM)) {
513 netif_tx_wake_queue(sq->txq);
517 return (i == MLX5E_TX_CQ_POLL_BUDGET);
520 void mlx5e_free_txqsq_descs(struct mlx5e_txqsq *sq)
522 struct mlx5e_tx_wqe_info *wi;
527 while (sq->cc != sq->pc) {
528 ci = sq->cc & sq->wq.sz_m1;
529 wi = &sq->db.wqe_info[ci];
532 if (!skb) { /* nop */
537 for (i = 0; i < wi->num_dma; i++) {
538 struct mlx5e_sq_dma *dma =
539 mlx5e_dma_get(sq, sq->dma_fifo_cc++);
541 mlx5e_tx_dma_unmap(sq->pdev, dma);
544 dev_kfree_skb_any(skb);
545 sq->cc += wi->num_wqebbs;
549 #ifdef CONFIG_MLX5_CORE_IPOIB
551 struct mlx5_wqe_eth_pad {
555 struct mlx5i_tx_wqe {
556 struct mlx5_wqe_ctrl_seg ctrl;
557 struct mlx5_wqe_datagram_seg datagram;
558 struct mlx5_wqe_eth_pad pad;
559 struct mlx5_wqe_eth_seg eth;
563 mlx5i_txwqe_build_datagram(struct mlx5_av *av, u32 dqpn, u32 dqkey,
564 struct mlx5_wqe_datagram_seg *dseg)
566 memcpy(&dseg->av, av, sizeof(struct mlx5_av));
567 dseg->av.dqp_dct = cpu_to_be32(dqpn | MLX5_EXTENDED_UD_AV);
568 dseg->av.key.qkey.qkey = cpu_to_be32(dqkey);
571 netdev_tx_t mlx5i_sq_xmit(struct mlx5e_txqsq *sq, struct sk_buff *skb,
572 struct mlx5_av *av, u32 dqpn, u32 dqkey)
574 struct mlx5_wq_cyc *wq = &sq->wq;
575 u16 pi = sq->pc & wq->sz_m1;
576 struct mlx5i_tx_wqe *wqe = mlx5_wq_cyc_get_wqe(wq, pi);
577 struct mlx5e_tx_wqe_info *wi = &sq->db.wqe_info[pi];
579 struct mlx5_wqe_ctrl_seg *cseg = &wqe->ctrl;
580 struct mlx5_wqe_datagram_seg *datagram = &wqe->datagram;
581 struct mlx5_wqe_eth_seg *eseg = &wqe->eth;
583 unsigned char *skb_data = skb->data;
584 unsigned int skb_len = skb->len;
585 u8 opcode = MLX5_OPCODE_SEND;
586 unsigned int num_bytes;
592 memset(wqe, 0, sizeof(*wqe));
594 mlx5i_txwqe_build_datagram(av, dqpn, dqkey, datagram);
596 mlx5e_txwqe_build_eseg_csum(sq, skb, eseg);
598 if (skb_is_gso(skb)) {
599 opcode = MLX5_OPCODE_LSO;
600 ihs = mlx5e_txwqe_build_eseg_gso(sq, skb, eseg, &num_bytes);
601 sq->stats.packets += skb_shinfo(skb)->gso_segs;
603 ihs = mlx5e_calc_min_inline(sq->min_inline_mode, skb);
604 num_bytes = max_t(unsigned int, skb->len, ETH_ZLEN);
608 sq->stats.bytes += num_bytes;
609 sq->stats.xmit_more += skb->xmit_more;
611 ds_cnt = sizeof(*wqe) / MLX5_SEND_WQE_DS;
613 memcpy(eseg->inline_hdr.start, skb_data, ihs);
614 mlx5e_tx_skb_pull_inline(&skb_data, &skb_len, ihs);
615 eseg->inline_hdr.sz = cpu_to_be16(ihs);
616 ds_cnt += DIV_ROUND_UP(ihs - sizeof(eseg->inline_hdr.start), MLX5_SEND_WQE_DS);
619 headlen = skb_len - skb->data_len;
620 num_dma = mlx5e_txwqe_build_dsegs(sq, skb, skb_data, headlen,
621 (struct mlx5_wqe_data_seg *)cseg + ds_cnt);
622 if (unlikely(num_dma < 0))
623 goto dma_unmap_wqe_err;
625 mlx5e_txwqe_complete(sq, skb, opcode, ds_cnt + num_dma,
626 num_bytes, num_dma, wi, cseg);
632 mlx5e_dma_unmap_wqe_err(sq, wi->num_dma);
634 dev_kfree_skb_any(skb);