2 * Copyright (c) 2018 Mellanox Technologies. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
34 #include "en_accel/tls.h"
35 #include "en_accel/tls_rxtx.h"
37 #define SYNDROME_OFFLOAD_REQUIRED 32
38 #define SYNDROME_SYNC 33
44 skb_frag_t frags[MAX_SKB_FRAGS];
47 struct mlx5e_tls_metadata {
48 /* One byte of syndrome followed by 3 bytes of swid */
51 /* packet type ID field */
55 static int mlx5e_tls_add_metadata(struct sk_buff *skb, __be32 swid)
57 struct mlx5e_tls_metadata *pet;
60 if (skb_cow_head(skb, sizeof(struct mlx5e_tls_metadata)))
63 eth = (struct ethhdr *)skb_push(skb, sizeof(struct mlx5e_tls_metadata));
64 skb->mac_header -= sizeof(struct mlx5e_tls_metadata);
65 pet = (struct mlx5e_tls_metadata *)(eth + 1);
67 memmove(skb->data, skb->data + sizeof(struct mlx5e_tls_metadata),
70 eth->h_proto = cpu_to_be16(MLX5E_METADATA_ETHER_TYPE);
71 pet->syndrome_swid = htonl(SYNDROME_OFFLOAD_REQUIRED << 24) | swid;
76 static int mlx5e_tls_get_sync_data(struct mlx5e_tls_offload_context *context,
77 u32 tcp_seq, struct sync_info *info)
79 int remaining, i = 0, ret = -EINVAL;
80 struct tls_record_info *record;
84 spin_lock_irqsave(&context->base.lock, flags);
85 record = tls_get_record(&context->base, tcp_seq, &info->rcd_sn);
87 if (unlikely(!record))
90 sync_size = tcp_seq - tls_record_start_seq(record);
91 info->sync_len = sync_size;
92 if (unlikely(sync_size < 0)) {
93 if (tls_record_is_start_marker(record))
99 remaining = sync_size;
100 while (remaining > 0) {
101 info->frags[i] = record->frags[i];
102 __skb_frag_ref(&info->frags[i]);
103 remaining -= skb_frag_size(&info->frags[i]);
106 skb_frag_size_add(&info->frags[i], remaining);
114 spin_unlock_irqrestore(&context->base.lock, flags);
118 static void mlx5e_tls_complete_sync_skb(struct sk_buff *skb,
119 struct sk_buff *nskb, u32 tcp_seq,
120 int headln, __be64 rcd_sn)
122 struct mlx5e_tls_metadata *pet;
123 u8 syndrome = SYNDROME_SYNC;
128 nskb->dev = skb->dev;
129 skb_reset_mac_header(nskb);
130 skb_set_network_header(nskb, skb_network_offset(skb));
131 skb_set_transport_header(nskb, skb_transport_offset(skb));
132 memcpy(nskb->data, skb->data, headln);
133 memcpy(nskb->data + headln, &rcd_sn, sizeof(rcd_sn));
136 iph->tot_len = htons(nskb->len - skb_network_offset(nskb));
138 data_len = nskb->len - headln;
140 th->seq = htonl(tcp_seq);
142 mss = nskb->dev->mtu - (headln - skb_network_offset(nskb));
143 skb_shinfo(nskb)->gso_size = 0;
144 if (data_len > mss) {
145 skb_shinfo(nskb)->gso_size = mss;
146 skb_shinfo(nskb)->gso_segs = DIV_ROUND_UP(data_len, mss);
148 skb_shinfo(nskb)->gso_type = skb_shinfo(skb)->gso_type;
150 pet = (struct mlx5e_tls_metadata *)(nskb->data + sizeof(struct ethhdr));
151 memcpy(pet, &syndrome, sizeof(syndrome));
152 pet->first_seq = htons(tcp_seq);
154 /* MLX5 devices don't care about the checksum partial start, offset
157 nskb->ip_summed = CHECKSUM_PARTIAL;
160 nskb->queue_mapping = skb->queue_mapping;
163 static struct sk_buff *
164 mlx5e_tls_handle_ooo(struct mlx5e_tls_offload_context *context,
165 struct mlx5e_txqsq *sq, struct sk_buff *skb,
166 struct mlx5e_tx_wqe **wqe,
168 struct mlx5e_tls *tls)
170 u32 tcp_seq = ntohl(tcp_hdr(skb)->seq);
171 struct sync_info info;
172 struct sk_buff *nskb;
179 if (mlx5e_tls_get_sync_data(context, tcp_seq, &info)) {
180 /* We might get here if a retransmission reaches the driver
181 * after the relevant record is acked.
182 * It should be safe to drop the packet in this case
184 atomic64_inc(&tls->sw_stats.tx_tls_drop_no_sync_data);
188 if (unlikely(info.sync_len < 0)) {
191 headln = skb_transport_offset(skb) + tcp_hdrlen(skb);
192 payload = skb->len - headln;
193 if (likely(payload <= -info.sync_len))
194 /* SKB payload doesn't require offload
198 atomic64_inc(&tls->sw_stats.tx_tls_drop_bypass_required);
202 if (unlikely(mlx5e_tls_add_metadata(skb, context->swid))) {
203 atomic64_inc(&tls->sw_stats.tx_tls_drop_metadata);
207 headln = skb_transport_offset(skb) + tcp_hdrlen(skb);
208 linear_len += headln + sizeof(info.rcd_sn);
209 nskb = alloc_skb(linear_len, GFP_ATOMIC);
210 if (unlikely(!nskb)) {
211 atomic64_inc(&tls->sw_stats.tx_tls_drop_resync_alloc);
215 context->expected_seq = tcp_seq + skb->len - headln;
216 skb_put(nskb, linear_len);
217 for (i = 0; i < info.nr_frags; i++)
218 skb_shinfo(nskb)->frags[i] = info.frags[i];
220 skb_shinfo(nskb)->nr_frags = info.nr_frags;
221 nskb->data_len = info.sync_len;
222 nskb->len += info.sync_len;
223 sq->stats.tls_resync_bytes += nskb->len;
224 mlx5e_tls_complete_sync_skb(skb, nskb, tcp_seq, headln,
225 cpu_to_be64(info.rcd_sn));
226 mlx5e_sq_xmit(sq, nskb, *wqe, *pi);
227 mlx5e_sq_fetch_wqe(sq, wqe, pi);
231 dev_kfree_skb_any(skb);
235 struct sk_buff *mlx5e_tls_handle_tx_skb(struct net_device *netdev,
236 struct mlx5e_txqsq *sq,
238 struct mlx5e_tx_wqe **wqe,
241 struct mlx5e_priv *priv = netdev_priv(netdev);
242 struct mlx5e_tls_offload_context *context;
243 struct tls_context *tls_ctx;
248 if (!skb->sk || !tls_is_sk_tx_device_offloaded(skb->sk))
251 datalen = skb->len - (skb_transport_offset(skb) + tcp_hdrlen(skb));
255 tls_ctx = tls_get_ctx(skb->sk);
256 if (unlikely(tls_ctx->netdev != netdev))
259 skb_seq = ntohl(tcp_hdr(skb)->seq);
260 context = mlx5e_get_tls_tx_context(tls_ctx);
261 expected_seq = context->expected_seq;
263 if (unlikely(expected_seq != skb_seq)) {
264 skb = mlx5e_tls_handle_ooo(context, sq, skb, wqe, pi, priv->tls);
268 if (unlikely(mlx5e_tls_add_metadata(skb, context->swid))) {
269 atomic64_inc(&priv->tls->sw_stats.tx_tls_drop_metadata);
270 dev_kfree_skb_any(skb);
275 context->expected_seq = skb_seq + datalen;