Merge tag 'for-linus-2019-08-17' of git://git.kernel.dk/linux-block
[sfrench/cifs-2.6.git] / drivers / net / ethernet / mellanox / mlx5 / core / en_accel / ktls_tx.c
1 // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
2 // Copyright (c) 2019 Mellanox Technologies.
3
4 #include <linux/tls.h>
5 #include "en.h"
6 #include "en/txrx.h"
7 #include "en_accel/ktls.h"
8
9 enum {
10         MLX5E_STATIC_PARAMS_CONTEXT_TLS_1_2 = 0x2,
11 };
12
13 enum {
14         MLX5E_ENCRYPTION_STANDARD_TLS = 0x1,
15 };
16
17 #define EXTRACT_INFO_FIELDS do { \
18         salt    = info->salt;    \
19         rec_seq = info->rec_seq; \
20         salt_sz    = sizeof(info->salt);    \
21         rec_seq_sz = sizeof(info->rec_seq); \
22 } while (0)
23
24 static void
25 fill_static_params_ctx(void *ctx, struct mlx5e_ktls_offload_context_tx *priv_tx)
26 {
27         struct tls_crypto_info *crypto_info = priv_tx->crypto_info;
28         struct tls12_crypto_info_aes_gcm_128 *info;
29         char *initial_rn, *gcm_iv;
30         u16 salt_sz, rec_seq_sz;
31         char *salt, *rec_seq;
32         u8 tls_version;
33
34         if (WARN_ON(crypto_info->cipher_type != TLS_CIPHER_AES_GCM_128))
35                 return;
36
37         info = (struct tls12_crypto_info_aes_gcm_128 *)crypto_info;
38         EXTRACT_INFO_FIELDS;
39
40         gcm_iv      = MLX5_ADDR_OF(tls_static_params, ctx, gcm_iv);
41         initial_rn  = MLX5_ADDR_OF(tls_static_params, ctx, initial_record_number);
42
43         memcpy(gcm_iv,      salt,    salt_sz);
44         memcpy(initial_rn,  rec_seq, rec_seq_sz);
45
46         tls_version = MLX5E_STATIC_PARAMS_CONTEXT_TLS_1_2;
47
48         MLX5_SET(tls_static_params, ctx, tls_version, tls_version);
49         MLX5_SET(tls_static_params, ctx, const_1, 1);
50         MLX5_SET(tls_static_params, ctx, const_2, 2);
51         MLX5_SET(tls_static_params, ctx, encryption_standard,
52                  MLX5E_ENCRYPTION_STANDARD_TLS);
53         MLX5_SET(tls_static_params, ctx, dek_index, priv_tx->key_id);
54 }
55
56 static void
57 build_static_params(struct mlx5e_umr_wqe *wqe, u16 pc, u32 sqn,
58                     struct mlx5e_ktls_offload_context_tx *priv_tx,
59                     bool fence)
60 {
61         struct mlx5_wqe_ctrl_seg     *cseg  = &wqe->ctrl;
62         struct mlx5_wqe_umr_ctrl_seg *ucseg = &wqe->uctrl;
63
64 #define STATIC_PARAMS_DS_CNT \
65         DIV_ROUND_UP(MLX5E_KTLS_STATIC_UMR_WQE_SZ, MLX5_SEND_WQE_DS)
66
67         cseg->opmod_idx_opcode = cpu_to_be32((pc << 8) | MLX5_OPCODE_UMR |
68                                              (MLX5_OPC_MOD_TLS_TIS_STATIC_PARAMS << 24));
69         cseg->qpn_ds           = cpu_to_be32((sqn << MLX5_WQE_CTRL_QPN_SHIFT) |
70                                              STATIC_PARAMS_DS_CNT);
71         cseg->fm_ce_se         = fence ? MLX5_FENCE_MODE_INITIATOR_SMALL : 0;
72         cseg->imm              = cpu_to_be32(priv_tx->tisn);
73
74         ucseg->flags = MLX5_UMR_INLINE;
75         ucseg->bsf_octowords = cpu_to_be16(MLX5_ST_SZ_BYTES(tls_static_params) / 16);
76
77         fill_static_params_ctx(wqe->tls_static_params_ctx, priv_tx);
78 }
79
80 static void
81 fill_progress_params_ctx(void *ctx, struct mlx5e_ktls_offload_context_tx *priv_tx)
82 {
83         MLX5_SET(tls_progress_params, ctx, pd, priv_tx->tisn);
84         MLX5_SET(tls_progress_params, ctx, record_tracker_state,
85                  MLX5E_TLS_PROGRESS_PARAMS_RECORD_TRACKER_STATE_START);
86         MLX5_SET(tls_progress_params, ctx, auth_state,
87                  MLX5E_TLS_PROGRESS_PARAMS_AUTH_STATE_NO_OFFLOAD);
88 }
89
90 static void
91 build_progress_params(struct mlx5e_tx_wqe *wqe, u16 pc, u32 sqn,
92                       struct mlx5e_ktls_offload_context_tx *priv_tx,
93                       bool fence)
94 {
95         struct mlx5_wqe_ctrl_seg *cseg = &wqe->ctrl;
96
97 #define PROGRESS_PARAMS_DS_CNT \
98         DIV_ROUND_UP(MLX5E_KTLS_PROGRESS_WQE_SZ, MLX5_SEND_WQE_DS)
99
100         cseg->opmod_idx_opcode =
101                 cpu_to_be32((pc << 8) | MLX5_OPCODE_SET_PSV |
102                             (MLX5_OPC_MOD_TLS_TIS_PROGRESS_PARAMS << 24));
103         cseg->qpn_ds           = cpu_to_be32((sqn << MLX5_WQE_CTRL_QPN_SHIFT) |
104                                              PROGRESS_PARAMS_DS_CNT);
105         cseg->fm_ce_se         = fence ? MLX5_FENCE_MODE_INITIATOR_SMALL : 0;
106
107         fill_progress_params_ctx(wqe->data, priv_tx);
108 }
109
110 static void tx_fill_wi(struct mlx5e_txqsq *sq,
111                        u16 pi, u8 num_wqebbs,
112                        skb_frag_t *resync_dump_frag)
113 {
114         struct mlx5e_tx_wqe_info *wi = &sq->db.wqe_info[pi];
115
116         wi->skb              = NULL;
117         wi->num_wqebbs       = num_wqebbs;
118         wi->resync_dump_frag = resync_dump_frag;
119 }
120
121 void mlx5e_ktls_tx_offload_set_pending(struct mlx5e_ktls_offload_context_tx *priv_tx)
122 {
123         priv_tx->ctx_post_pending = true;
124 }
125
126 static bool
127 mlx5e_ktls_tx_offload_test_and_clear_pending(struct mlx5e_ktls_offload_context_tx *priv_tx)
128 {
129         bool ret = priv_tx->ctx_post_pending;
130
131         priv_tx->ctx_post_pending = false;
132
133         return ret;
134 }
135
136 static void
137 post_static_params(struct mlx5e_txqsq *sq,
138                    struct mlx5e_ktls_offload_context_tx *priv_tx,
139                    bool fence)
140 {
141         struct mlx5e_umr_wqe *umr_wqe;
142         u16 pi;
143
144         umr_wqe = mlx5e_sq_fetch_wqe(sq, MLX5E_KTLS_STATIC_UMR_WQE_SZ, &pi);
145         build_static_params(umr_wqe, sq->pc, sq->sqn, priv_tx, fence);
146         tx_fill_wi(sq, pi, MLX5E_KTLS_STATIC_WQEBBS, NULL);
147         sq->pc += MLX5E_KTLS_STATIC_WQEBBS;
148 }
149
150 static void
151 post_progress_params(struct mlx5e_txqsq *sq,
152                      struct mlx5e_ktls_offload_context_tx *priv_tx,
153                      bool fence)
154 {
155         struct mlx5e_tx_wqe *wqe;
156         u16 pi;
157
158         wqe = mlx5e_sq_fetch_wqe(sq, MLX5E_KTLS_PROGRESS_WQE_SZ, &pi);
159         build_progress_params(wqe, sq->pc, sq->sqn, priv_tx, fence);
160         tx_fill_wi(sq, pi, MLX5E_KTLS_PROGRESS_WQEBBS, NULL);
161         sq->pc += MLX5E_KTLS_PROGRESS_WQEBBS;
162 }
163
164 static void
165 mlx5e_ktls_tx_post_param_wqes(struct mlx5e_txqsq *sq,
166                               struct mlx5e_ktls_offload_context_tx *priv_tx,
167                               bool skip_static_post, bool fence_first_post)
168 {
169         bool progress_fence = skip_static_post || !fence_first_post;
170
171         if (!skip_static_post)
172                 post_static_params(sq, priv_tx, fence_first_post);
173
174         post_progress_params(sq, priv_tx, progress_fence);
175 }
176
177 struct tx_sync_info {
178         u64 rcd_sn;
179         s32 sync_len;
180         int nr_frags;
181         skb_frag_t *frags[MAX_SKB_FRAGS];
182 };
183
184 static bool tx_sync_info_get(struct mlx5e_ktls_offload_context_tx *priv_tx,
185                              u32 tcp_seq, struct tx_sync_info *info)
186 {
187         struct tls_offload_context_tx *tx_ctx = priv_tx->tx_ctx;
188         struct tls_record_info *record;
189         int remaining, i = 0;
190         unsigned long flags;
191         bool ret = true;
192
193         spin_lock_irqsave(&tx_ctx->lock, flags);
194         record = tls_get_record(tx_ctx, tcp_seq, &info->rcd_sn);
195
196         if (unlikely(!record)) {
197                 ret = false;
198                 goto out;
199         }
200
201         if (unlikely(tcp_seq < tls_record_start_seq(record))) {
202                 if (!tls_record_is_start_marker(record))
203                         ret = false;
204                 goto out;
205         }
206
207         info->sync_len = tcp_seq - tls_record_start_seq(record);
208         remaining = info->sync_len;
209         while (remaining > 0) {
210                 skb_frag_t *frag = &record->frags[i];
211
212                 __skb_frag_ref(frag);
213                 remaining -= skb_frag_size(frag);
214                 info->frags[i++] = frag;
215         }
216         /* reduce the part which will be sent with the original SKB */
217         if (remaining < 0)
218                 skb_frag_size_add(info->frags[i - 1], remaining);
219         info->nr_frags = i;
220 out:
221         spin_unlock_irqrestore(&tx_ctx->lock, flags);
222         return ret;
223 }
224
225 static void
226 tx_post_resync_params(struct mlx5e_txqsq *sq,
227                       struct mlx5e_ktls_offload_context_tx *priv_tx,
228                       u64 rcd_sn)
229 {
230         struct tls_crypto_info *crypto_info = priv_tx->crypto_info;
231         struct tls12_crypto_info_aes_gcm_128 *info;
232         __be64 rn_be = cpu_to_be64(rcd_sn);
233         bool skip_static_post;
234         u16 rec_seq_sz;
235         char *rec_seq;
236
237         if (WARN_ON(crypto_info->cipher_type != TLS_CIPHER_AES_GCM_128))
238                 return;
239
240         info = (struct tls12_crypto_info_aes_gcm_128 *)crypto_info;
241         rec_seq = info->rec_seq;
242         rec_seq_sz = sizeof(info->rec_seq);
243
244         skip_static_post = !memcmp(rec_seq, &rn_be, rec_seq_sz);
245         if (!skip_static_post)
246                 memcpy(rec_seq, &rn_be, rec_seq_sz);
247
248         mlx5e_ktls_tx_post_param_wqes(sq, priv_tx, skip_static_post, true);
249 }
250
251 static int
252 tx_post_resync_dump(struct mlx5e_txqsq *sq, struct sk_buff *skb,
253                     skb_frag_t *frag, u32 tisn, bool first)
254 {
255         struct mlx5_wqe_ctrl_seg *cseg;
256         struct mlx5_wqe_eth_seg  *eseg;
257         struct mlx5_wqe_data_seg *dseg;
258         struct mlx5e_tx_wqe *wqe;
259         dma_addr_t dma_addr = 0;
260         u16 ds_cnt, ds_cnt_inl;
261         u8  num_wqebbs;
262         u16 pi, ihs;
263         int fsz;
264
265         ds_cnt = sizeof(*wqe) / MLX5_SEND_WQE_DS;
266         ihs    = eth_get_headlen(skb->dev, skb->data, skb_headlen(skb));
267         ds_cnt_inl = DIV_ROUND_UP(ihs - INL_HDR_START_SZ, MLX5_SEND_WQE_DS);
268         ds_cnt += ds_cnt_inl;
269         ds_cnt += 1; /* one frag */
270
271         wqe = mlx5e_sq_fetch_wqe(sq, sizeof(*wqe), &pi);
272
273         num_wqebbs = DIV_ROUND_UP(ds_cnt, MLX5_SEND_WQEBB_NUM_DS);
274
275         cseg = &wqe->ctrl;
276         eseg = &wqe->eth;
277         dseg =  wqe->data;
278
279         cseg->opmod_idx_opcode = cpu_to_be32((sq->pc << 8)  | MLX5_OPCODE_DUMP);
280         cseg->qpn_ds           = cpu_to_be32((sq->sqn << 8) | ds_cnt);
281         cseg->imm              = cpu_to_be32(tisn);
282         cseg->fm_ce_se         = first ? MLX5_FENCE_MODE_INITIATOR_SMALL : 0;
283
284         eseg->inline_hdr.sz = cpu_to_be16(ihs);
285         memcpy(eseg->inline_hdr.start, skb->data, ihs);
286         dseg += ds_cnt_inl;
287
288         fsz = skb_frag_size(frag);
289         dma_addr = skb_frag_dma_map(sq->pdev, frag, 0, fsz,
290                                     DMA_TO_DEVICE);
291         if (unlikely(dma_mapping_error(sq->pdev, dma_addr)))
292                 return -ENOMEM;
293
294         dseg->addr       = cpu_to_be64(dma_addr);
295         dseg->lkey       = sq->mkey_be;
296         dseg->byte_count = cpu_to_be32(fsz);
297         mlx5e_dma_push(sq, dma_addr, fsz, MLX5E_DMA_MAP_PAGE);
298
299         tx_fill_wi(sq, pi, num_wqebbs, frag);
300         sq->pc += num_wqebbs;
301
302         WARN(num_wqebbs > MLX5E_KTLS_MAX_DUMP_WQEBBS,
303              "unexpected DUMP num_wqebbs, %d > %d",
304              num_wqebbs, MLX5E_KTLS_MAX_DUMP_WQEBBS);
305
306         return 0;
307 }
308
309 void mlx5e_ktls_tx_handle_resync_dump_comp(struct mlx5e_txqsq *sq,
310                                            struct mlx5e_tx_wqe_info *wi,
311                                            struct mlx5e_sq_dma *dma)
312 {
313         struct mlx5e_sq_stats *stats = sq->stats;
314
315         mlx5e_tx_dma_unmap(sq->pdev, dma);
316         __skb_frag_unref(wi->resync_dump_frag);
317         stats->tls_dump_packets++;
318         stats->tls_dump_bytes += wi->num_bytes;
319 }
320
321 static void tx_post_fence_nop(struct mlx5e_txqsq *sq)
322 {
323         struct mlx5_wq_cyc *wq = &sq->wq;
324         u16 pi = mlx5_wq_cyc_ctr2ix(wq, sq->pc);
325
326         tx_fill_wi(sq, pi, 1, NULL);
327
328         mlx5e_post_nop_fence(wq, sq->sqn, &sq->pc);
329 }
330
331 static struct sk_buff *
332 mlx5e_ktls_tx_handle_ooo(struct mlx5e_ktls_offload_context_tx *priv_tx,
333                          struct mlx5e_txqsq *sq,
334                          struct sk_buff *skb,
335                          u32 seq)
336 {
337         struct mlx5e_sq_stats *stats = sq->stats;
338         struct mlx5_wq_cyc *wq = &sq->wq;
339         struct tx_sync_info info = {};
340         u16 contig_wqebbs_room, pi;
341         u8 num_wqebbs;
342         int i;
343
344         if (!tx_sync_info_get(priv_tx, seq, &info)) {
345                 /* We might get here if a retransmission reaches the driver
346                  * after the relevant record is acked.
347                  * It should be safe to drop the packet in this case
348                  */
349                 stats->tls_drop_no_sync_data++;
350                 goto err_out;
351         }
352
353         if (unlikely(info.sync_len < 0)) {
354                 u32 payload;
355                 int headln;
356
357                 headln = skb_transport_offset(skb) + tcp_hdrlen(skb);
358                 payload = skb->len - headln;
359                 if (likely(payload <= -info.sync_len))
360                         return skb;
361
362                 stats->tls_drop_bypass_req++;
363                 goto err_out;
364         }
365
366         stats->tls_ooo++;
367
368         num_wqebbs = MLX5E_KTLS_STATIC_WQEBBS + MLX5E_KTLS_PROGRESS_WQEBBS +
369                 (info.nr_frags ? info.nr_frags * MLX5E_KTLS_MAX_DUMP_WQEBBS : 1);
370         pi = mlx5_wq_cyc_ctr2ix(wq, sq->pc);
371         contig_wqebbs_room = mlx5_wq_cyc_get_contig_wqebbs(wq, pi);
372         if (unlikely(contig_wqebbs_room < num_wqebbs))
373                 mlx5e_fill_sq_frag_edge(sq, wq, pi, contig_wqebbs_room);
374
375         tx_post_resync_params(sq, priv_tx, info.rcd_sn);
376
377         for (i = 0; i < info.nr_frags; i++)
378                 if (tx_post_resync_dump(sq, skb, info.frags[i],
379                                         priv_tx->tisn, !i))
380                         goto err_out;
381
382         /* If no dump WQE was sent, we need to have a fence NOP WQE before the
383          * actual data xmit.
384          */
385         if (!info.nr_frags)
386                 tx_post_fence_nop(sq);
387
388         return skb;
389
390 err_out:
391         dev_kfree_skb_any(skb);
392         return NULL;
393 }
394
395 struct sk_buff *mlx5e_ktls_handle_tx_skb(struct net_device *netdev,
396                                          struct mlx5e_txqsq *sq,
397                                          struct sk_buff *skb,
398                                          struct mlx5e_tx_wqe **wqe, u16 *pi)
399 {
400         struct mlx5e_ktls_offload_context_tx *priv_tx;
401         struct mlx5e_sq_stats *stats = sq->stats;
402         struct mlx5_wqe_ctrl_seg *cseg;
403         struct tls_context *tls_ctx;
404         int datalen;
405         u32 seq;
406
407         if (!skb->sk || !tls_is_sk_tx_device_offloaded(skb->sk))
408                 goto out;
409
410         datalen = skb->len - (skb_transport_offset(skb) + tcp_hdrlen(skb));
411         if (!datalen)
412                 goto out;
413
414         tls_ctx = tls_get_ctx(skb->sk);
415         if (unlikely(WARN_ON_ONCE(tls_ctx->netdev != netdev)))
416                 goto err_out;
417
418         priv_tx = mlx5e_get_ktls_tx_priv_ctx(tls_ctx);
419
420         if (unlikely(mlx5e_ktls_tx_offload_test_and_clear_pending(priv_tx))) {
421                 mlx5e_ktls_tx_post_param_wqes(sq, priv_tx, false, false);
422                 *wqe = mlx5e_sq_fetch_wqe(sq, sizeof(**wqe), pi);
423                 stats->tls_ctx++;
424         }
425
426         seq = ntohl(tcp_hdr(skb)->seq);
427         if (unlikely(priv_tx->expected_seq != seq)) {
428                 skb = mlx5e_ktls_tx_handle_ooo(priv_tx, sq, skb, seq);
429                 if (unlikely(!skb))
430                         goto out;
431                 *wqe = mlx5e_sq_fetch_wqe(sq, sizeof(**wqe), pi);
432         }
433
434         priv_tx->expected_seq = seq + datalen;
435
436         cseg = &(*wqe)->ctrl;
437         cseg->imm = cpu_to_be32(priv_tx->tisn);
438
439         stats->tls_encrypted_packets += skb_is_gso(skb) ? skb_shinfo(skb)->gso_segs : 1;
440         stats->tls_encrypted_bytes   += datalen;
441
442 out:
443         return skb;
444
445 err_out:
446         dev_kfree_skb_any(skb);
447         return NULL;
448 }