Merge tag 'dma-mapping-4.20-3' of git://git.infradead.org/users/hch/dma-mapping
[sfrench/cifs-2.6.git] / drivers / net / ethernet / mellanox / mlx5 / core / fpga / ipsec.c
1 /*
2  * Copyright (c) 2017 Mellanox Technologies. All rights reserved.
3  *
4  * This software is available to you under a choice of one of two
5  * licenses.  You may choose to be licensed under the terms of the GNU
6  * General Public License (GPL) Version 2, available from the file
7  * COPYING in the main directory of this source tree, or the
8  * OpenIB.org BSD license below:
9  *
10  *     Redistribution and use in source and binary forms, with or
11  *     without modification, are permitted provided that the following
12  *     conditions are met:
13  *
14  *      - Redistributions of source code must retain the above
15  *        copyright notice, this list of conditions and the following
16  *        disclaimer.
17  *
18  *      - Redistributions in binary form must reproduce the above
19  *        copyright notice, this list of conditions and the following
20  *        disclaimer in the documentation and/or other materials
21  *        provided with the distribution.
22  *
23  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30  * SOFTWARE.
31  *
32  */
33
34 #include <linux/rhashtable.h>
35 #include <linux/mlx5/driver.h>
36 #include <linux/mlx5/fs_helpers.h>
37 #include <linux/mlx5/fs.h>
38 #include <linux/rbtree.h>
39
40 #include "mlx5_core.h"
41 #include "fs_cmd.h"
42 #include "fpga/ipsec.h"
43 #include "fpga/sdk.h"
44 #include "fpga/core.h"
45
46 enum mlx5_fpga_ipsec_cmd_status {
47         MLX5_FPGA_IPSEC_CMD_PENDING,
48         MLX5_FPGA_IPSEC_CMD_SEND_FAIL,
49         MLX5_FPGA_IPSEC_CMD_COMPLETE,
50 };
51
52 struct mlx5_fpga_ipsec_cmd_context {
53         struct mlx5_fpga_dma_buf buf;
54         enum mlx5_fpga_ipsec_cmd_status status;
55         struct mlx5_ifc_fpga_ipsec_cmd_resp resp;
56         int status_code;
57         struct completion complete;
58         struct mlx5_fpga_device *dev;
59         struct list_head list; /* Item in pending_cmds */
60         u8 command[0];
61 };
62
63 struct mlx5_fpga_esp_xfrm;
64
65 struct mlx5_fpga_ipsec_sa_ctx {
66         struct rhash_head               hash;
67         struct mlx5_ifc_fpga_ipsec_sa   hw_sa;
68         struct mlx5_core_dev            *dev;
69         struct mlx5_fpga_esp_xfrm       *fpga_xfrm;
70 };
71
72 struct mlx5_fpga_esp_xfrm {
73         unsigned int                    num_rules;
74         struct mlx5_fpga_ipsec_sa_ctx   *sa_ctx;
75         struct mutex                    lock; /* xfrm lock */
76         struct mlx5_accel_esp_xfrm      accel_xfrm;
77 };
78
79 struct mlx5_fpga_ipsec_rule {
80         struct rb_node                  node;
81         struct fs_fte                   *fte;
82         struct mlx5_fpga_ipsec_sa_ctx   *ctx;
83 };
84
85 static const struct rhashtable_params rhash_sa = {
86         /* Keep out "cmd" field from the key as it's
87          * value is not constant during the lifetime
88          * of the key object.
89          */
90         .key_len = FIELD_SIZEOF(struct mlx5_fpga_ipsec_sa_ctx, hw_sa) -
91                    FIELD_SIZEOF(struct mlx5_ifc_fpga_ipsec_sa_v1, cmd),
92         .key_offset = offsetof(struct mlx5_fpga_ipsec_sa_ctx, hw_sa) +
93                       FIELD_SIZEOF(struct mlx5_ifc_fpga_ipsec_sa_v1, cmd),
94         .head_offset = offsetof(struct mlx5_fpga_ipsec_sa_ctx, hash),
95         .automatic_shrinking = true,
96         .min_size = 1,
97 };
98
99 struct mlx5_fpga_ipsec {
100         struct mlx5_fpga_device *fdev;
101         struct list_head pending_cmds;
102         spinlock_t pending_cmds_lock; /* Protects pending_cmds */
103         u32 caps[MLX5_ST_SZ_DW(ipsec_extended_cap)];
104         struct mlx5_fpga_conn *conn;
105
106         struct notifier_block   fs_notifier_ingress_bypass;
107         struct notifier_block   fs_notifier_egress;
108
109         /* Map hardware SA           -->  SA context
110          *     (mlx5_fpga_ipsec_sa)       (mlx5_fpga_ipsec_sa_ctx)
111          * We will use this hash to avoid SAs duplication in fpga which
112          * aren't allowed
113          */
114         struct rhashtable sa_hash;      /* hw_sa -> mlx5_fpga_ipsec_sa_ctx */
115         struct mutex sa_hash_lock;
116
117         /* Tree holding all rules for this fpga device
118          * Key for searching a rule (mlx5_fpga_ipsec_rule) is (ft, id)
119          */
120         struct rb_root rules_rb;
121         struct mutex rules_rb_lock; /* rules lock */
122 };
123
124 static bool mlx5_fpga_is_ipsec_device(struct mlx5_core_dev *mdev)
125 {
126         if (!mdev->fpga || !MLX5_CAP_GEN(mdev, fpga))
127                 return false;
128
129         if (MLX5_CAP_FPGA(mdev, ieee_vendor_id) !=
130             MLX5_FPGA_CAP_SANDBOX_VENDOR_ID_MLNX)
131                 return false;
132
133         if (MLX5_CAP_FPGA(mdev, sandbox_product_id) !=
134             MLX5_FPGA_CAP_SANDBOX_PRODUCT_ID_IPSEC)
135                 return false;
136
137         return true;
138 }
139
140 static void mlx5_fpga_ipsec_send_complete(struct mlx5_fpga_conn *conn,
141                                           struct mlx5_fpga_device *fdev,
142                                           struct mlx5_fpga_dma_buf *buf,
143                                           u8 status)
144 {
145         struct mlx5_fpga_ipsec_cmd_context *context;
146
147         if (status) {
148                 context = container_of(buf, struct mlx5_fpga_ipsec_cmd_context,
149                                        buf);
150                 mlx5_fpga_warn(fdev, "IPSec command send failed with status %u\n",
151                                status);
152                 context->status = MLX5_FPGA_IPSEC_CMD_SEND_FAIL;
153                 complete(&context->complete);
154         }
155 }
156
157 static inline
158 int syndrome_to_errno(enum mlx5_ifc_fpga_ipsec_response_syndrome syndrome)
159 {
160         switch (syndrome) {
161         case MLX5_FPGA_IPSEC_RESPONSE_SUCCESS:
162                 return 0;
163         case MLX5_FPGA_IPSEC_RESPONSE_SADB_ISSUE:
164                 return -EEXIST;
165         case MLX5_FPGA_IPSEC_RESPONSE_ILLEGAL_REQUEST:
166                 return -EINVAL;
167         case MLX5_FPGA_IPSEC_RESPONSE_WRITE_RESPONSE_ISSUE:
168                 return -EIO;
169         }
170         return -EIO;
171 }
172
173 static void mlx5_fpga_ipsec_recv(void *cb_arg, struct mlx5_fpga_dma_buf *buf)
174 {
175         struct mlx5_ifc_fpga_ipsec_cmd_resp *resp = buf->sg[0].data;
176         struct mlx5_fpga_ipsec_cmd_context *context;
177         enum mlx5_ifc_fpga_ipsec_response_syndrome syndrome;
178         struct mlx5_fpga_device *fdev = cb_arg;
179         unsigned long flags;
180
181         if (buf->sg[0].size < sizeof(*resp)) {
182                 mlx5_fpga_warn(fdev, "Short receive from FPGA IPSec: %u < %zu bytes\n",
183                                buf->sg[0].size, sizeof(*resp));
184                 return;
185         }
186
187         mlx5_fpga_dbg(fdev, "mlx5_ipsec recv_cb syndrome %08x\n",
188                       ntohl(resp->syndrome));
189
190         spin_lock_irqsave(&fdev->ipsec->pending_cmds_lock, flags);
191         context = list_first_entry_or_null(&fdev->ipsec->pending_cmds,
192                                            struct mlx5_fpga_ipsec_cmd_context,
193                                            list);
194         if (context)
195                 list_del(&context->list);
196         spin_unlock_irqrestore(&fdev->ipsec->pending_cmds_lock, flags);
197
198         if (!context) {
199                 mlx5_fpga_warn(fdev, "Received IPSec offload response without pending command request\n");
200                 return;
201         }
202         mlx5_fpga_dbg(fdev, "Handling response for %p\n", context);
203
204         syndrome = ntohl(resp->syndrome);
205         context->status_code = syndrome_to_errno(syndrome);
206         context->status = MLX5_FPGA_IPSEC_CMD_COMPLETE;
207         memcpy(&context->resp, resp, sizeof(*resp));
208
209         if (context->status_code)
210                 mlx5_fpga_warn(fdev, "IPSec command failed with syndrome %08x\n",
211                                syndrome);
212
213         complete(&context->complete);
214 }
215
216 static void *mlx5_fpga_ipsec_cmd_exec(struct mlx5_core_dev *mdev,
217                                       const void *cmd, int cmd_size)
218 {
219         struct mlx5_fpga_ipsec_cmd_context *context;
220         struct mlx5_fpga_device *fdev = mdev->fpga;
221         unsigned long flags;
222         int res;
223
224         if (!fdev || !fdev->ipsec)
225                 return ERR_PTR(-EOPNOTSUPP);
226
227         if (cmd_size & 3)
228                 return ERR_PTR(-EINVAL);
229
230         context = kzalloc(sizeof(*context) + cmd_size, GFP_ATOMIC);
231         if (!context)
232                 return ERR_PTR(-ENOMEM);
233
234         context->status = MLX5_FPGA_IPSEC_CMD_PENDING;
235         context->dev = fdev;
236         context->buf.complete = mlx5_fpga_ipsec_send_complete;
237         init_completion(&context->complete);
238         memcpy(&context->command, cmd, cmd_size);
239         context->buf.sg[0].size = cmd_size;
240         context->buf.sg[0].data = &context->command;
241
242         spin_lock_irqsave(&fdev->ipsec->pending_cmds_lock, flags);
243         res = mlx5_fpga_sbu_conn_sendmsg(fdev->ipsec->conn, &context->buf);
244         if (!res)
245                 list_add_tail(&context->list, &fdev->ipsec->pending_cmds);
246         spin_unlock_irqrestore(&fdev->ipsec->pending_cmds_lock, flags);
247
248         if (res) {
249                 mlx5_fpga_warn(fdev, "Failed to send IPSec command: %d\n", res);
250                 kfree(context);
251                 return ERR_PTR(res);
252         }
253
254         /* Context should be freed by the caller after completion. */
255         return context;
256 }
257
258 static int mlx5_fpga_ipsec_cmd_wait(void *ctx)
259 {
260         struct mlx5_fpga_ipsec_cmd_context *context = ctx;
261         unsigned long timeout =
262                 msecs_to_jiffies(MLX5_FPGA_CMD_TIMEOUT_MSEC);
263         int res;
264
265         res = wait_for_completion_timeout(&context->complete, timeout);
266         if (!res) {
267                 mlx5_fpga_warn(context->dev, "Failure waiting for IPSec command response\n");
268                 return -ETIMEDOUT;
269         }
270
271         if (context->status == MLX5_FPGA_IPSEC_CMD_COMPLETE)
272                 res = context->status_code;
273         else
274                 res = -EIO;
275
276         return res;
277 }
278
279 static inline bool is_v2_sadb_supported(struct mlx5_fpga_ipsec *fipsec)
280 {
281         if (MLX5_GET(ipsec_extended_cap, fipsec->caps, v2_command))
282                 return true;
283         return false;
284 }
285
286 static int mlx5_fpga_ipsec_update_hw_sa(struct mlx5_fpga_device *fdev,
287                                         struct mlx5_ifc_fpga_ipsec_sa *hw_sa,
288                                         int opcode)
289 {
290         struct mlx5_core_dev *dev = fdev->mdev;
291         struct mlx5_ifc_fpga_ipsec_sa *sa;
292         struct mlx5_fpga_ipsec_cmd_context *cmd_context;
293         size_t sa_cmd_size;
294         int err;
295
296         hw_sa->ipsec_sa_v1.cmd = htonl(opcode);
297         if (is_v2_sadb_supported(fdev->ipsec))
298                 sa_cmd_size = sizeof(*hw_sa);
299         else
300                 sa_cmd_size = sizeof(hw_sa->ipsec_sa_v1);
301
302         cmd_context = (struct mlx5_fpga_ipsec_cmd_context *)
303                         mlx5_fpga_ipsec_cmd_exec(dev, hw_sa, sa_cmd_size);
304         if (IS_ERR(cmd_context))
305                 return PTR_ERR(cmd_context);
306
307         err = mlx5_fpga_ipsec_cmd_wait(cmd_context);
308         if (err)
309                 goto out;
310
311         sa = (struct mlx5_ifc_fpga_ipsec_sa *)&cmd_context->command;
312         if (sa->ipsec_sa_v1.sw_sa_handle != cmd_context->resp.sw_sa_handle) {
313                 mlx5_fpga_err(fdev, "mismatch SA handle. cmd 0x%08x vs resp 0x%08x\n",
314                               ntohl(sa->ipsec_sa_v1.sw_sa_handle),
315                               ntohl(cmd_context->resp.sw_sa_handle));
316                 err = -EIO;
317         }
318
319 out:
320         kfree(cmd_context);
321         return err;
322 }
323
324 u32 mlx5_fpga_ipsec_device_caps(struct mlx5_core_dev *mdev)
325 {
326         struct mlx5_fpga_device *fdev = mdev->fpga;
327         u32 ret = 0;
328
329         if (mlx5_fpga_is_ipsec_device(mdev)) {
330                 ret |= MLX5_ACCEL_IPSEC_CAP_DEVICE;
331                 ret |= MLX5_ACCEL_IPSEC_CAP_REQUIRED_METADATA;
332         } else {
333                 return ret;
334         }
335
336         if (!fdev->ipsec)
337                 return ret;
338
339         if (MLX5_GET(ipsec_extended_cap, fdev->ipsec->caps, esp))
340                 ret |= MLX5_ACCEL_IPSEC_CAP_ESP;
341
342         if (MLX5_GET(ipsec_extended_cap, fdev->ipsec->caps, ipv6))
343                 ret |= MLX5_ACCEL_IPSEC_CAP_IPV6;
344
345         if (MLX5_GET(ipsec_extended_cap, fdev->ipsec->caps, lso))
346                 ret |= MLX5_ACCEL_IPSEC_CAP_LSO;
347
348         if (MLX5_GET(ipsec_extended_cap, fdev->ipsec->caps, rx_no_trailer))
349                 ret |= MLX5_ACCEL_IPSEC_CAP_RX_NO_TRAILER;
350
351         if (MLX5_GET(ipsec_extended_cap, fdev->ipsec->caps, esn)) {
352                 ret |= MLX5_ACCEL_IPSEC_CAP_ESN;
353                 ret |= MLX5_ACCEL_IPSEC_CAP_TX_IV_IS_ESN;
354         }
355
356         return ret;
357 }
358
359 unsigned int mlx5_fpga_ipsec_counters_count(struct mlx5_core_dev *mdev)
360 {
361         struct mlx5_fpga_device *fdev = mdev->fpga;
362
363         if (!fdev || !fdev->ipsec)
364                 return 0;
365
366         return MLX5_GET(ipsec_extended_cap, fdev->ipsec->caps,
367                         number_of_ipsec_counters);
368 }
369
370 int mlx5_fpga_ipsec_counters_read(struct mlx5_core_dev *mdev, u64 *counters,
371                                   unsigned int counters_count)
372 {
373         struct mlx5_fpga_device *fdev = mdev->fpga;
374         unsigned int i;
375         __be32 *data;
376         u32 count;
377         u64 addr;
378         int ret;
379
380         if (!fdev || !fdev->ipsec)
381                 return 0;
382
383         addr = (u64)MLX5_GET(ipsec_extended_cap, fdev->ipsec->caps,
384                              ipsec_counters_addr_low) +
385                ((u64)MLX5_GET(ipsec_extended_cap, fdev->ipsec->caps,
386                              ipsec_counters_addr_high) << 32);
387
388         count = mlx5_fpga_ipsec_counters_count(mdev);
389
390         data = kzalloc(array3_size(sizeof(*data), count, 2), GFP_KERNEL);
391         if (!data) {
392                 ret = -ENOMEM;
393                 goto out;
394         }
395
396         ret = mlx5_fpga_mem_read(fdev, count * sizeof(u64), addr, data,
397                                  MLX5_FPGA_ACCESS_TYPE_DONTCARE);
398         if (ret < 0) {
399                 mlx5_fpga_err(fdev, "Failed to read IPSec counters from HW: %d\n",
400                               ret);
401                 goto out;
402         }
403         ret = 0;
404
405         if (count > counters_count)
406                 count = counters_count;
407
408         /* Each counter is low word, then high. But each word is big-endian */
409         for (i = 0; i < count; i++)
410                 counters[i] = (u64)ntohl(data[i * 2]) |
411                               ((u64)ntohl(data[i * 2 + 1]) << 32);
412
413 out:
414         kfree(data);
415         return ret;
416 }
417
418 static int mlx5_fpga_ipsec_set_caps(struct mlx5_core_dev *mdev, u32 flags)
419 {
420         struct mlx5_fpga_ipsec_cmd_context *context;
421         struct mlx5_ifc_fpga_ipsec_cmd_cap cmd = {0};
422         int err;
423
424         cmd.cmd = htonl(MLX5_FPGA_IPSEC_CMD_OP_SET_CAP);
425         cmd.flags = htonl(flags);
426         context = mlx5_fpga_ipsec_cmd_exec(mdev, &cmd, sizeof(cmd));
427         if (IS_ERR(context))
428                 return PTR_ERR(context);
429
430         err = mlx5_fpga_ipsec_cmd_wait(context);
431         if (err)
432                 goto out;
433
434         if ((context->resp.flags & cmd.flags) != cmd.flags) {
435                 mlx5_fpga_err(context->dev, "Failed to set capabilities. cmd 0x%08x vs resp 0x%08x\n",
436                               cmd.flags,
437                               context->resp.flags);
438                 err = -EIO;
439         }
440
441 out:
442         kfree(context);
443         return err;
444 }
445
446 static int mlx5_fpga_ipsec_enable_supported_caps(struct mlx5_core_dev *mdev)
447 {
448         u32 dev_caps = mlx5_fpga_ipsec_device_caps(mdev);
449         u32 flags = 0;
450
451         if (dev_caps & MLX5_ACCEL_IPSEC_CAP_RX_NO_TRAILER)
452                 flags |= MLX5_FPGA_IPSEC_CAP_NO_TRAILER;
453
454         return mlx5_fpga_ipsec_set_caps(mdev, flags);
455 }
456
457 static void
458 mlx5_fpga_ipsec_build_hw_xfrm(struct mlx5_core_dev *mdev,
459                               const struct mlx5_accel_esp_xfrm_attrs *xfrm_attrs,
460                               struct mlx5_ifc_fpga_ipsec_sa *hw_sa)
461 {
462         const struct aes_gcm_keymat *aes_gcm = &xfrm_attrs->keymat.aes_gcm;
463
464         /* key */
465         memcpy(&hw_sa->ipsec_sa_v1.key_enc, aes_gcm->aes_key,
466                aes_gcm->key_len / 8);
467         /* Duplicate 128 bit key twice according to HW layout */
468         if (aes_gcm->key_len == 128)
469                 memcpy(&hw_sa->ipsec_sa_v1.key_enc[16],
470                        aes_gcm->aes_key, aes_gcm->key_len / 8);
471
472         /* salt and seq_iv */
473         memcpy(&hw_sa->ipsec_sa_v1.gcm.salt_iv, &aes_gcm->seq_iv,
474                sizeof(aes_gcm->seq_iv));
475         memcpy(&hw_sa->ipsec_sa_v1.gcm.salt, &aes_gcm->salt,
476                sizeof(aes_gcm->salt));
477
478         /* esn */
479         if (xfrm_attrs->flags & MLX5_ACCEL_ESP_FLAGS_ESN_TRIGGERED) {
480                 hw_sa->ipsec_sa_v1.flags |= MLX5_FPGA_IPSEC_SA_ESN_EN;
481                 hw_sa->ipsec_sa_v1.flags |=
482                                 (xfrm_attrs->flags &
483                                  MLX5_ACCEL_ESP_FLAGS_ESN_STATE_OVERLAP) ?
484                                         MLX5_FPGA_IPSEC_SA_ESN_OVERLAP : 0;
485                 hw_sa->esn = htonl(xfrm_attrs->esn);
486         } else {
487                 hw_sa->ipsec_sa_v1.flags &= ~MLX5_FPGA_IPSEC_SA_ESN_EN;
488                 hw_sa->ipsec_sa_v1.flags &=
489                                 ~(xfrm_attrs->flags &
490                                   MLX5_ACCEL_ESP_FLAGS_ESN_STATE_OVERLAP) ?
491                                         MLX5_FPGA_IPSEC_SA_ESN_OVERLAP : 0;
492                 hw_sa->esn = 0;
493         }
494
495         /* rx handle */
496         hw_sa->ipsec_sa_v1.sw_sa_handle = htonl(xfrm_attrs->sa_handle);
497
498         /* enc mode */
499         switch (aes_gcm->key_len) {
500         case 128:
501                 hw_sa->ipsec_sa_v1.enc_mode =
502                         MLX5_FPGA_IPSEC_SA_ENC_MODE_AES_GCM_128_AUTH_128;
503                 break;
504         case 256:
505                 hw_sa->ipsec_sa_v1.enc_mode =
506                         MLX5_FPGA_IPSEC_SA_ENC_MODE_AES_GCM_256_AUTH_128;
507                 break;
508         }
509
510         /* flags */
511         hw_sa->ipsec_sa_v1.flags |= MLX5_FPGA_IPSEC_SA_SA_VALID |
512                         MLX5_FPGA_IPSEC_SA_SPI_EN |
513                         MLX5_FPGA_IPSEC_SA_IP_ESP;
514
515         if (xfrm_attrs->action & MLX5_ACCEL_ESP_ACTION_ENCRYPT)
516                 hw_sa->ipsec_sa_v1.flags |= MLX5_FPGA_IPSEC_SA_DIR_SX;
517         else
518                 hw_sa->ipsec_sa_v1.flags &= ~MLX5_FPGA_IPSEC_SA_DIR_SX;
519 }
520
521 static void
522 mlx5_fpga_ipsec_build_hw_sa(struct mlx5_core_dev *mdev,
523                             struct mlx5_accel_esp_xfrm_attrs *xfrm_attrs,
524                             const __be32 saddr[4],
525                             const __be32 daddr[4],
526                             const __be32 spi, bool is_ipv6,
527                             struct mlx5_ifc_fpga_ipsec_sa *hw_sa)
528 {
529         mlx5_fpga_ipsec_build_hw_xfrm(mdev, xfrm_attrs, hw_sa);
530
531         /* IPs */
532         memcpy(hw_sa->ipsec_sa_v1.sip, saddr, sizeof(hw_sa->ipsec_sa_v1.sip));
533         memcpy(hw_sa->ipsec_sa_v1.dip, daddr, sizeof(hw_sa->ipsec_sa_v1.dip));
534
535         /* SPI */
536         hw_sa->ipsec_sa_v1.spi = spi;
537
538         /* flags */
539         if (is_ipv6)
540                 hw_sa->ipsec_sa_v1.flags |= MLX5_FPGA_IPSEC_SA_IPV6;
541 }
542
543 static bool is_full_mask(const void *p, size_t len)
544 {
545         WARN_ON(len % 4);
546
547         return !memchr_inv(p, 0xff, len);
548 }
549
550 static bool validate_fpga_full_mask(struct mlx5_core_dev *dev,
551                                     const u32 *match_c,
552                                     const u32 *match_v)
553 {
554         const void *misc_params_c = MLX5_ADDR_OF(fte_match_param,
555                                                  match_c,
556                                                  misc_parameters);
557         const void *headers_c = MLX5_ADDR_OF(fte_match_param,
558                                              match_c,
559                                              outer_headers);
560         const void *headers_v = MLX5_ADDR_OF(fte_match_param,
561                                              match_v,
562                                              outer_headers);
563
564         if (mlx5_fs_is_outer_ipv4_flow(dev, headers_c, headers_v)) {
565                 const void *s_ipv4_c = MLX5_ADDR_OF(fte_match_set_lyr_2_4,
566                                                     headers_c,
567                                                     src_ipv4_src_ipv6.ipv4_layout.ipv4);
568                 const void *d_ipv4_c = MLX5_ADDR_OF(fte_match_set_lyr_2_4,
569                                                     headers_c,
570                                                     dst_ipv4_dst_ipv6.ipv4_layout.ipv4);
571
572                 if (!is_full_mask(s_ipv4_c, MLX5_FLD_SZ_BYTES(ipv4_layout,
573                                                               ipv4)) ||
574                     !is_full_mask(d_ipv4_c, MLX5_FLD_SZ_BYTES(ipv4_layout,
575                                                               ipv4)))
576                         return false;
577         } else {
578                 const void *s_ipv6_c = MLX5_ADDR_OF(fte_match_set_lyr_2_4,
579                                                     headers_c,
580                                                     src_ipv4_src_ipv6.ipv6_layout.ipv6);
581                 const void *d_ipv6_c = MLX5_ADDR_OF(fte_match_set_lyr_2_4,
582                                                     headers_c,
583                                                     dst_ipv4_dst_ipv6.ipv6_layout.ipv6);
584
585                 if (!is_full_mask(s_ipv6_c, MLX5_FLD_SZ_BYTES(ipv6_layout,
586                                                               ipv6)) ||
587                     !is_full_mask(d_ipv6_c, MLX5_FLD_SZ_BYTES(ipv6_layout,
588                                                               ipv6)))
589                         return false;
590         }
591
592         if (!is_full_mask(MLX5_ADDR_OF(fte_match_set_misc, misc_params_c,
593                                        outer_esp_spi),
594                           MLX5_FLD_SZ_BYTES(fte_match_set_misc, outer_esp_spi)))
595                 return false;
596
597         return true;
598 }
599
600 static bool mlx5_is_fpga_ipsec_rule(struct mlx5_core_dev *dev,
601                                     u8 match_criteria_enable,
602                                     const u32 *match_c,
603                                     const u32 *match_v)
604 {
605         u32 ipsec_dev_caps = mlx5_accel_ipsec_device_caps(dev);
606         bool ipv6_flow;
607
608         ipv6_flow = mlx5_fs_is_outer_ipv6_flow(dev, match_c, match_v);
609
610         if (!(match_criteria_enable & MLX5_MATCH_OUTER_HEADERS) ||
611             mlx5_fs_is_outer_udp_flow(match_c, match_v) ||
612             mlx5_fs_is_outer_tcp_flow(match_c, match_v) ||
613             mlx5_fs_is_vxlan_flow(match_c) ||
614             !(mlx5_fs_is_outer_ipv4_flow(dev, match_c, match_v) ||
615               ipv6_flow))
616                 return false;
617
618         if (!(ipsec_dev_caps & MLX5_ACCEL_IPSEC_CAP_DEVICE))
619                 return false;
620
621         if (!(ipsec_dev_caps & MLX5_ACCEL_IPSEC_CAP_ESP) &&
622             mlx5_fs_is_outer_ipsec_flow(match_c))
623                 return false;
624
625         if (!(ipsec_dev_caps & MLX5_ACCEL_IPSEC_CAP_IPV6) &&
626             ipv6_flow)
627                 return false;
628
629         if (!validate_fpga_full_mask(dev, match_c, match_v))
630                 return false;
631
632         return true;
633 }
634
635 static bool mlx5_is_fpga_egress_ipsec_rule(struct mlx5_core_dev *dev,
636                                            u8 match_criteria_enable,
637                                            const u32 *match_c,
638                                            const u32 *match_v,
639                                            struct mlx5_flow_act *flow_act)
640 {
641         const void *outer_c = MLX5_ADDR_OF(fte_match_param, match_c,
642                                            outer_headers);
643         bool is_dmac = MLX5_GET(fte_match_set_lyr_2_4, outer_c, dmac_47_16) ||
644                         MLX5_GET(fte_match_set_lyr_2_4, outer_c, dmac_15_0);
645         bool is_smac = MLX5_GET(fte_match_set_lyr_2_4, outer_c, smac_47_16) ||
646                         MLX5_GET(fte_match_set_lyr_2_4, outer_c, smac_15_0);
647         int ret;
648
649         ret = mlx5_is_fpga_ipsec_rule(dev, match_criteria_enable, match_c,
650                                       match_v);
651         if (!ret)
652                 return ret;
653
654         if (is_dmac || is_smac ||
655             (match_criteria_enable &
656              ~(MLX5_MATCH_OUTER_HEADERS | MLX5_MATCH_MISC_PARAMETERS)) ||
657             (flow_act->action & ~(MLX5_FLOW_CONTEXT_ACTION_ENCRYPT | MLX5_FLOW_CONTEXT_ACTION_ALLOW)) ||
658              (flow_act->flags & FLOW_ACT_HAS_TAG))
659                 return false;
660
661         return true;
662 }
663
664 void *mlx5_fpga_ipsec_create_sa_ctx(struct mlx5_core_dev *mdev,
665                                     struct mlx5_accel_esp_xfrm *accel_xfrm,
666                                     const __be32 saddr[4],
667                                     const __be32 daddr[4],
668                                     const __be32 spi, bool is_ipv6)
669 {
670         struct mlx5_fpga_ipsec_sa_ctx *sa_ctx;
671         struct mlx5_fpga_esp_xfrm *fpga_xfrm =
672                         container_of(accel_xfrm, typeof(*fpga_xfrm),
673                                      accel_xfrm);
674         struct mlx5_fpga_device *fdev = mdev->fpga;
675         struct mlx5_fpga_ipsec *fipsec = fdev->ipsec;
676         int opcode, err;
677         void *context;
678
679         /* alloc SA */
680         sa_ctx = kzalloc(sizeof(*sa_ctx), GFP_KERNEL);
681         if (!sa_ctx)
682                 return ERR_PTR(-ENOMEM);
683
684         sa_ctx->dev = mdev;
685
686         /* build candidate SA */
687         mlx5_fpga_ipsec_build_hw_sa(mdev, &accel_xfrm->attrs,
688                                     saddr, daddr, spi, is_ipv6,
689                                     &sa_ctx->hw_sa);
690
691         mutex_lock(&fpga_xfrm->lock);
692
693         if (fpga_xfrm->sa_ctx) {        /* multiple rules for same accel_xfrm */
694                 /* all rules must be with same IPs and SPI */
695                 if (memcmp(&sa_ctx->hw_sa, &fpga_xfrm->sa_ctx->hw_sa,
696                            sizeof(sa_ctx->hw_sa))) {
697                         context = ERR_PTR(-EINVAL);
698                         goto exists;
699                 }
700
701                 ++fpga_xfrm->num_rules;
702                 context = fpga_xfrm->sa_ctx;
703                 goto exists;
704         }
705
706         /* This is unbounded fpga_xfrm, try to add to hash */
707         mutex_lock(&fipsec->sa_hash_lock);
708
709         err = rhashtable_lookup_insert_fast(&fipsec->sa_hash, &sa_ctx->hash,
710                                             rhash_sa);
711         if (err) {
712                 /* Can't bound different accel_xfrm to already existing sa_ctx.
713                  * This is because we can't support multiple ketmats for
714                  * same IPs and SPI
715                  */
716                 context = ERR_PTR(-EEXIST);
717                 goto unlock_hash;
718         }
719
720         /* Bound accel_xfrm to sa_ctx */
721         opcode = is_v2_sadb_supported(fdev->ipsec) ?
722                         MLX5_FPGA_IPSEC_CMD_OP_ADD_SA_V2 :
723                         MLX5_FPGA_IPSEC_CMD_OP_ADD_SA;
724         err = mlx5_fpga_ipsec_update_hw_sa(fdev, &sa_ctx->hw_sa, opcode);
725         sa_ctx->hw_sa.ipsec_sa_v1.cmd = 0;
726         if (err) {
727                 context = ERR_PTR(err);
728                 goto delete_hash;
729         }
730
731         mutex_unlock(&fipsec->sa_hash_lock);
732
733         ++fpga_xfrm->num_rules;
734         fpga_xfrm->sa_ctx = sa_ctx;
735         sa_ctx->fpga_xfrm = fpga_xfrm;
736
737         mutex_unlock(&fpga_xfrm->lock);
738
739         return sa_ctx;
740
741 delete_hash:
742         WARN_ON(rhashtable_remove_fast(&fipsec->sa_hash, &sa_ctx->hash,
743                                        rhash_sa));
744 unlock_hash:
745         mutex_unlock(&fipsec->sa_hash_lock);
746
747 exists:
748         mutex_unlock(&fpga_xfrm->lock);
749         kfree(sa_ctx);
750         return context;
751 }
752
753 static void *
754 mlx5_fpga_ipsec_fs_create_sa_ctx(struct mlx5_core_dev *mdev,
755                                  struct fs_fte *fte,
756                                  bool is_egress)
757 {
758         struct mlx5_accel_esp_xfrm *accel_xfrm;
759         __be32 saddr[4], daddr[4], spi;
760         struct mlx5_flow_group *fg;
761         bool is_ipv6 = false;
762
763         fs_get_obj(fg, fte->node.parent);
764         /* validate */
765         if (is_egress &&
766             !mlx5_is_fpga_egress_ipsec_rule(mdev,
767                                             fg->mask.match_criteria_enable,
768                                             fg->mask.match_criteria,
769                                             fte->val,
770                                             &fte->action))
771                 return ERR_PTR(-EINVAL);
772         else if (!mlx5_is_fpga_ipsec_rule(mdev,
773                                           fg->mask.match_criteria_enable,
774                                           fg->mask.match_criteria,
775                                           fte->val))
776                 return ERR_PTR(-EINVAL);
777
778         /* get xfrm context */
779         accel_xfrm =
780                 (struct mlx5_accel_esp_xfrm *)fte->action.esp_id;
781
782         /* IPs */
783         if (mlx5_fs_is_outer_ipv4_flow(mdev, fg->mask.match_criteria,
784                                        fte->val)) {
785                 memcpy(&saddr[3],
786                        MLX5_ADDR_OF(fte_match_set_lyr_2_4,
787                                     fte->val,
788                                     src_ipv4_src_ipv6.ipv4_layout.ipv4),
789                                     sizeof(saddr[3]));
790                 memcpy(&daddr[3],
791                        MLX5_ADDR_OF(fte_match_set_lyr_2_4,
792                                     fte->val,
793                                     dst_ipv4_dst_ipv6.ipv4_layout.ipv4),
794                                     sizeof(daddr[3]));
795         } else {
796                 memcpy(saddr,
797                        MLX5_ADDR_OF(fte_match_param,
798                                     fte->val,
799                                     outer_headers.src_ipv4_src_ipv6.ipv6_layout.ipv6),
800                                     sizeof(saddr));
801                 memcpy(daddr,
802                        MLX5_ADDR_OF(fte_match_param,
803                                     fte->val,
804                                     outer_headers.dst_ipv4_dst_ipv6.ipv6_layout.ipv6),
805                                     sizeof(daddr));
806                 is_ipv6 = true;
807         }
808
809         /* SPI */
810         spi = MLX5_GET_BE(typeof(spi),
811                           fte_match_param, fte->val,
812                           misc_parameters.outer_esp_spi);
813
814         /* create */
815         return mlx5_fpga_ipsec_create_sa_ctx(mdev, accel_xfrm,
816                                              saddr, daddr,
817                                              spi, is_ipv6);
818 }
819
820 static void
821 mlx5_fpga_ipsec_release_sa_ctx(struct mlx5_fpga_ipsec_sa_ctx *sa_ctx)
822 {
823         struct mlx5_fpga_device *fdev = sa_ctx->dev->fpga;
824         struct mlx5_fpga_ipsec *fipsec = fdev->ipsec;
825         int opcode = is_v2_sadb_supported(fdev->ipsec) ?
826                         MLX5_FPGA_IPSEC_CMD_OP_DEL_SA_V2 :
827                         MLX5_FPGA_IPSEC_CMD_OP_DEL_SA;
828         int err;
829
830         err = mlx5_fpga_ipsec_update_hw_sa(fdev, &sa_ctx->hw_sa, opcode);
831         sa_ctx->hw_sa.ipsec_sa_v1.cmd = 0;
832         if (err) {
833                 WARN_ON(err);
834                 return;
835         }
836
837         mutex_lock(&fipsec->sa_hash_lock);
838         WARN_ON(rhashtable_remove_fast(&fipsec->sa_hash, &sa_ctx->hash,
839                                        rhash_sa));
840         mutex_unlock(&fipsec->sa_hash_lock);
841 }
842
843 void mlx5_fpga_ipsec_delete_sa_ctx(void *context)
844 {
845         struct mlx5_fpga_esp_xfrm *fpga_xfrm =
846                         ((struct mlx5_fpga_ipsec_sa_ctx *)context)->fpga_xfrm;
847
848         mutex_lock(&fpga_xfrm->lock);
849         if (!--fpga_xfrm->num_rules) {
850                 mlx5_fpga_ipsec_release_sa_ctx(fpga_xfrm->sa_ctx);
851                 fpga_xfrm->sa_ctx = NULL;
852         }
853         mutex_unlock(&fpga_xfrm->lock);
854 }
855
856 static inline struct mlx5_fpga_ipsec_rule *
857 _rule_search(struct rb_root *root, struct fs_fte *fte)
858 {
859         struct rb_node *node = root->rb_node;
860
861         while (node) {
862                 struct mlx5_fpga_ipsec_rule *rule =
863                                 container_of(node, struct mlx5_fpga_ipsec_rule,
864                                              node);
865
866                 if (rule->fte < fte)
867                         node = node->rb_left;
868                 else if (rule->fte > fte)
869                         node = node->rb_right;
870                 else
871                         return rule;
872         }
873         return NULL;
874 }
875
876 static struct mlx5_fpga_ipsec_rule *
877 rule_search(struct mlx5_fpga_ipsec *ipsec_dev, struct fs_fte *fte)
878 {
879         struct mlx5_fpga_ipsec_rule *rule;
880
881         mutex_lock(&ipsec_dev->rules_rb_lock);
882         rule = _rule_search(&ipsec_dev->rules_rb, fte);
883         mutex_unlock(&ipsec_dev->rules_rb_lock);
884
885         return rule;
886 }
887
888 static inline int _rule_insert(struct rb_root *root,
889                                struct mlx5_fpga_ipsec_rule *rule)
890 {
891         struct rb_node **new = &root->rb_node, *parent = NULL;
892
893         /* Figure out where to put new node */
894         while (*new) {
895                 struct mlx5_fpga_ipsec_rule *this =
896                                 container_of(*new, struct mlx5_fpga_ipsec_rule,
897                                              node);
898
899                 parent = *new;
900                 if (rule->fte < this->fte)
901                         new = &((*new)->rb_left);
902                 else if (rule->fte > this->fte)
903                         new = &((*new)->rb_right);
904                 else
905                         return -EEXIST;
906         }
907
908         /* Add new node and rebalance tree. */
909         rb_link_node(&rule->node, parent, new);
910         rb_insert_color(&rule->node, root);
911
912         return 0;
913 }
914
915 static int rule_insert(struct mlx5_fpga_ipsec *ipsec_dev,
916                        struct mlx5_fpga_ipsec_rule *rule)
917 {
918         int ret;
919
920         mutex_lock(&ipsec_dev->rules_rb_lock);
921         ret = _rule_insert(&ipsec_dev->rules_rb, rule);
922         mutex_unlock(&ipsec_dev->rules_rb_lock);
923
924         return ret;
925 }
926
927 static inline void _rule_delete(struct mlx5_fpga_ipsec *ipsec_dev,
928                                 struct mlx5_fpga_ipsec_rule *rule)
929 {
930         struct rb_root *root = &ipsec_dev->rules_rb;
931
932         mutex_lock(&ipsec_dev->rules_rb_lock);
933         rb_erase(&rule->node, root);
934         mutex_unlock(&ipsec_dev->rules_rb_lock);
935 }
936
937 static void rule_delete(struct mlx5_fpga_ipsec *ipsec_dev,
938                         struct mlx5_fpga_ipsec_rule *rule)
939 {
940         _rule_delete(ipsec_dev, rule);
941         kfree(rule);
942 }
943
944 struct mailbox_mod {
945         uintptr_t                       saved_esp_id;
946         u32                             saved_action;
947         u32                             saved_outer_esp_spi_value;
948 };
949
950 static void restore_spec_mailbox(struct fs_fte *fte,
951                                  struct mailbox_mod *mbox_mod)
952 {
953         char *misc_params_v = MLX5_ADDR_OF(fte_match_param,
954                                            fte->val,
955                                            misc_parameters);
956
957         MLX5_SET(fte_match_set_misc, misc_params_v, outer_esp_spi,
958                  mbox_mod->saved_outer_esp_spi_value);
959         fte->action.action |= mbox_mod->saved_action;
960         fte->action.esp_id = (uintptr_t)mbox_mod->saved_esp_id;
961 }
962
963 static void modify_spec_mailbox(struct mlx5_core_dev *mdev,
964                                 struct fs_fte *fte,
965                                 struct mailbox_mod *mbox_mod)
966 {
967         char *misc_params_v = MLX5_ADDR_OF(fte_match_param,
968                                            fte->val,
969                                            misc_parameters);
970
971         mbox_mod->saved_esp_id = fte->action.esp_id;
972         mbox_mod->saved_action = fte->action.action &
973                         (MLX5_FLOW_CONTEXT_ACTION_ENCRYPT |
974                          MLX5_FLOW_CONTEXT_ACTION_DECRYPT);
975         mbox_mod->saved_outer_esp_spi_value =
976                         MLX5_GET(fte_match_set_misc, misc_params_v,
977                                  outer_esp_spi);
978
979         fte->action.esp_id = 0;
980         fte->action.action &= ~(MLX5_FLOW_CONTEXT_ACTION_ENCRYPT |
981                                 MLX5_FLOW_CONTEXT_ACTION_DECRYPT);
982         if (!MLX5_CAP_FLOWTABLE(mdev,
983                                 flow_table_properties_nic_receive.ft_field_support.outer_esp_spi))
984                 MLX5_SET(fte_match_set_misc, misc_params_v, outer_esp_spi, 0);
985 }
986
987 static enum fs_flow_table_type egress_to_fs_ft(bool egress)
988 {
989         return egress ? FS_FT_NIC_TX : FS_FT_NIC_RX;
990 }
991
992 static int fpga_ipsec_fs_create_flow_group(struct mlx5_core_dev *dev,
993                                            struct mlx5_flow_table *ft,
994                                            u32 *in,
995                                            unsigned int *group_id,
996                                            bool is_egress)
997 {
998         int (*create_flow_group)(struct mlx5_core_dev *dev,
999                                  struct mlx5_flow_table *ft, u32 *in,
1000                                  unsigned int *group_id) =
1001                 mlx5_fs_cmd_get_default(egress_to_fs_ft(is_egress))->create_flow_group;
1002         char *misc_params_c = MLX5_ADDR_OF(create_flow_group_in, in,
1003                                            match_criteria.misc_parameters);
1004         u32 saved_outer_esp_spi_mask;
1005         u8 match_criteria_enable;
1006         int ret;
1007
1008         if (MLX5_CAP_FLOWTABLE(dev,
1009                                flow_table_properties_nic_receive.ft_field_support.outer_esp_spi))
1010                 return create_flow_group(dev, ft, in, group_id);
1011
1012         match_criteria_enable =
1013                 MLX5_GET(create_flow_group_in, in, match_criteria_enable);
1014         saved_outer_esp_spi_mask =
1015                 MLX5_GET(fte_match_set_misc, misc_params_c, outer_esp_spi);
1016         if (!match_criteria_enable || !saved_outer_esp_spi_mask)
1017                 return create_flow_group(dev, ft, in, group_id);
1018
1019         MLX5_SET(fte_match_set_misc, misc_params_c, outer_esp_spi, 0);
1020
1021         if (!(*misc_params_c) &&
1022             !memcmp(misc_params_c, misc_params_c + 1, MLX5_ST_SZ_BYTES(fte_match_set_misc) - 1))
1023                 MLX5_SET(create_flow_group_in, in, match_criteria_enable,
1024                          match_criteria_enable & ~MLX5_MATCH_MISC_PARAMETERS);
1025
1026         ret = create_flow_group(dev, ft, in, group_id);
1027
1028         MLX5_SET(fte_match_set_misc, misc_params_c, outer_esp_spi, saved_outer_esp_spi_mask);
1029         MLX5_SET(create_flow_group_in, in, match_criteria_enable, match_criteria_enable);
1030
1031         return ret;
1032 }
1033
1034 static int fpga_ipsec_fs_create_fte(struct mlx5_core_dev *dev,
1035                                     struct mlx5_flow_table *ft,
1036                                     struct mlx5_flow_group *fg,
1037                                     struct fs_fte *fte,
1038                                     bool is_egress)
1039 {
1040         int (*create_fte)(struct mlx5_core_dev *dev,
1041                           struct mlx5_flow_table *ft,
1042                           struct mlx5_flow_group *fg,
1043                           struct fs_fte *fte) =
1044                 mlx5_fs_cmd_get_default(egress_to_fs_ft(is_egress))->create_fte;
1045         struct mlx5_fpga_device *fdev = dev->fpga;
1046         struct mlx5_fpga_ipsec *fipsec = fdev->ipsec;
1047         struct mlx5_fpga_ipsec_rule *rule;
1048         bool is_esp = fte->action.esp_id;
1049         struct mailbox_mod mbox_mod;
1050         int ret;
1051
1052         if (!is_esp ||
1053             !(fte->action.action &
1054               (MLX5_FLOW_CONTEXT_ACTION_ENCRYPT |
1055                MLX5_FLOW_CONTEXT_ACTION_DECRYPT)))
1056                 return create_fte(dev, ft, fg, fte);
1057
1058         rule = kzalloc(sizeof(*rule), GFP_KERNEL);
1059         if (!rule)
1060                 return -ENOMEM;
1061
1062         rule->ctx = mlx5_fpga_ipsec_fs_create_sa_ctx(dev, fte, is_egress);
1063         if (IS_ERR(rule->ctx)) {
1064                 int err = PTR_ERR(rule->ctx);
1065                 kfree(rule);
1066                 return err;
1067         }
1068
1069         rule->fte = fte;
1070         WARN_ON(rule_insert(fipsec, rule));
1071
1072         modify_spec_mailbox(dev, fte, &mbox_mod);
1073         ret = create_fte(dev, ft, fg, fte);
1074         restore_spec_mailbox(fte, &mbox_mod);
1075         if (ret) {
1076                 _rule_delete(fipsec, rule);
1077                 mlx5_fpga_ipsec_delete_sa_ctx(rule->ctx);
1078                 kfree(rule);
1079         }
1080
1081         return ret;
1082 }
1083
1084 static int fpga_ipsec_fs_update_fte(struct mlx5_core_dev *dev,
1085                                     struct mlx5_flow_table *ft,
1086                                     unsigned int group_id,
1087                                     int modify_mask,
1088                                     struct fs_fte *fte,
1089                                     bool is_egress)
1090 {
1091         int (*update_fte)(struct mlx5_core_dev *dev,
1092                           struct mlx5_flow_table *ft,
1093                           unsigned int group_id,
1094                           int modify_mask,
1095                           struct fs_fte *fte) =
1096                 mlx5_fs_cmd_get_default(egress_to_fs_ft(is_egress))->update_fte;
1097         bool is_esp = fte->action.esp_id;
1098         struct mailbox_mod mbox_mod;
1099         int ret;
1100
1101         if (!is_esp ||
1102             !(fte->action.action &
1103               (MLX5_FLOW_CONTEXT_ACTION_ENCRYPT |
1104                MLX5_FLOW_CONTEXT_ACTION_DECRYPT)))
1105                 return update_fte(dev, ft, group_id, modify_mask, fte);
1106
1107         modify_spec_mailbox(dev, fte, &mbox_mod);
1108         ret = update_fte(dev, ft, group_id, modify_mask, fte);
1109         restore_spec_mailbox(fte, &mbox_mod);
1110
1111         return ret;
1112 }
1113
1114 static int fpga_ipsec_fs_delete_fte(struct mlx5_core_dev *dev,
1115                                     struct mlx5_flow_table *ft,
1116                                     struct fs_fte *fte,
1117                                     bool is_egress)
1118 {
1119         int (*delete_fte)(struct mlx5_core_dev *dev,
1120                           struct mlx5_flow_table *ft,
1121                           struct fs_fte *fte) =
1122                 mlx5_fs_cmd_get_default(egress_to_fs_ft(is_egress))->delete_fte;
1123         struct mlx5_fpga_device *fdev = dev->fpga;
1124         struct mlx5_fpga_ipsec *fipsec = fdev->ipsec;
1125         struct mlx5_fpga_ipsec_rule *rule;
1126         bool is_esp = fte->action.esp_id;
1127         struct mailbox_mod mbox_mod;
1128         int ret;
1129
1130         if (!is_esp ||
1131             !(fte->action.action &
1132               (MLX5_FLOW_CONTEXT_ACTION_ENCRYPT |
1133                MLX5_FLOW_CONTEXT_ACTION_DECRYPT)))
1134                 return delete_fte(dev, ft, fte);
1135
1136         rule = rule_search(fipsec, fte);
1137         if (!rule)
1138                 return -ENOENT;
1139
1140         mlx5_fpga_ipsec_delete_sa_ctx(rule->ctx);
1141         rule_delete(fipsec, rule);
1142
1143         modify_spec_mailbox(dev, fte, &mbox_mod);
1144         ret = delete_fte(dev, ft, fte);
1145         restore_spec_mailbox(fte, &mbox_mod);
1146
1147         return ret;
1148 }
1149
1150 static int
1151 mlx5_fpga_ipsec_fs_create_flow_group_egress(struct mlx5_core_dev *dev,
1152                                             struct mlx5_flow_table *ft,
1153                                             u32 *in,
1154                                             unsigned int *group_id)
1155 {
1156         return fpga_ipsec_fs_create_flow_group(dev, ft, in, group_id, true);
1157 }
1158
1159 static int
1160 mlx5_fpga_ipsec_fs_create_fte_egress(struct mlx5_core_dev *dev,
1161                                      struct mlx5_flow_table *ft,
1162                                      struct mlx5_flow_group *fg,
1163                                      struct fs_fte *fte)
1164 {
1165         return fpga_ipsec_fs_create_fte(dev, ft, fg, fte, true);
1166 }
1167
1168 static int
1169 mlx5_fpga_ipsec_fs_update_fte_egress(struct mlx5_core_dev *dev,
1170                                      struct mlx5_flow_table *ft,
1171                                      unsigned int group_id,
1172                                      int modify_mask,
1173                                      struct fs_fte *fte)
1174 {
1175         return fpga_ipsec_fs_update_fte(dev, ft, group_id, modify_mask, fte,
1176                                         true);
1177 }
1178
1179 static int
1180 mlx5_fpga_ipsec_fs_delete_fte_egress(struct mlx5_core_dev *dev,
1181                                      struct mlx5_flow_table *ft,
1182                                      struct fs_fte *fte)
1183 {
1184         return fpga_ipsec_fs_delete_fte(dev, ft, fte, true);
1185 }
1186
1187 static int
1188 mlx5_fpga_ipsec_fs_create_flow_group_ingress(struct mlx5_core_dev *dev,
1189                                              struct mlx5_flow_table *ft,
1190                                              u32 *in,
1191                                              unsigned int *group_id)
1192 {
1193         return fpga_ipsec_fs_create_flow_group(dev, ft, in, group_id, false);
1194 }
1195
1196 static int
1197 mlx5_fpga_ipsec_fs_create_fte_ingress(struct mlx5_core_dev *dev,
1198                                       struct mlx5_flow_table *ft,
1199                                       struct mlx5_flow_group *fg,
1200                                       struct fs_fte *fte)
1201 {
1202         return fpga_ipsec_fs_create_fte(dev, ft, fg, fte, false);
1203 }
1204
1205 static int
1206 mlx5_fpga_ipsec_fs_update_fte_ingress(struct mlx5_core_dev *dev,
1207                                       struct mlx5_flow_table *ft,
1208                                       unsigned int group_id,
1209                                       int modify_mask,
1210                                       struct fs_fte *fte)
1211 {
1212         return fpga_ipsec_fs_update_fte(dev, ft, group_id, modify_mask, fte,
1213                                         false);
1214 }
1215
1216 static int
1217 mlx5_fpga_ipsec_fs_delete_fte_ingress(struct mlx5_core_dev *dev,
1218                                       struct mlx5_flow_table *ft,
1219                                       struct fs_fte *fte)
1220 {
1221         return fpga_ipsec_fs_delete_fte(dev, ft, fte, false);
1222 }
1223
1224 static struct mlx5_flow_cmds fpga_ipsec_ingress;
1225 static struct mlx5_flow_cmds fpga_ipsec_egress;
1226
1227 const struct mlx5_flow_cmds *mlx5_fs_cmd_get_default_ipsec_fpga_cmds(enum fs_flow_table_type type)
1228 {
1229         switch (type) {
1230         case FS_FT_NIC_RX:
1231                 return &fpga_ipsec_ingress;
1232         case FS_FT_NIC_TX:
1233                 return &fpga_ipsec_egress;
1234         default:
1235                 WARN_ON(true);
1236                 return NULL;
1237         }
1238 }
1239
1240 int mlx5_fpga_ipsec_init(struct mlx5_core_dev *mdev)
1241 {
1242         struct mlx5_fpga_conn_attr init_attr = {0};
1243         struct mlx5_fpga_device *fdev = mdev->fpga;
1244         struct mlx5_fpga_conn *conn;
1245         int err;
1246
1247         if (!mlx5_fpga_is_ipsec_device(mdev))
1248                 return 0;
1249
1250         fdev->ipsec = kzalloc(sizeof(*fdev->ipsec), GFP_KERNEL);
1251         if (!fdev->ipsec)
1252                 return -ENOMEM;
1253
1254         fdev->ipsec->fdev = fdev;
1255
1256         err = mlx5_fpga_get_sbu_caps(fdev, sizeof(fdev->ipsec->caps),
1257                                      fdev->ipsec->caps);
1258         if (err) {
1259                 mlx5_fpga_err(fdev, "Failed to retrieve IPSec extended capabilities: %d\n",
1260                               err);
1261                 goto error;
1262         }
1263
1264         INIT_LIST_HEAD(&fdev->ipsec->pending_cmds);
1265         spin_lock_init(&fdev->ipsec->pending_cmds_lock);
1266
1267         init_attr.rx_size = SBU_QP_QUEUE_SIZE;
1268         init_attr.tx_size = SBU_QP_QUEUE_SIZE;
1269         init_attr.recv_cb = mlx5_fpga_ipsec_recv;
1270         init_attr.cb_arg = fdev;
1271         conn = mlx5_fpga_sbu_conn_create(fdev, &init_attr);
1272         if (IS_ERR(conn)) {
1273                 err = PTR_ERR(conn);
1274                 mlx5_fpga_err(fdev, "Error creating IPSec command connection %d\n",
1275                               err);
1276                 goto error;
1277         }
1278         fdev->ipsec->conn = conn;
1279
1280         err = rhashtable_init(&fdev->ipsec->sa_hash, &rhash_sa);
1281         if (err)
1282                 goto err_destroy_conn;
1283         mutex_init(&fdev->ipsec->sa_hash_lock);
1284
1285         fdev->ipsec->rules_rb = RB_ROOT;
1286         mutex_init(&fdev->ipsec->rules_rb_lock);
1287
1288         err = mlx5_fpga_ipsec_enable_supported_caps(mdev);
1289         if (err) {
1290                 mlx5_fpga_err(fdev, "Failed to enable IPSec extended capabilities: %d\n",
1291                               err);
1292                 goto err_destroy_hash;
1293         }
1294
1295         return 0;
1296
1297 err_destroy_hash:
1298         rhashtable_destroy(&fdev->ipsec->sa_hash);
1299
1300 err_destroy_conn:
1301         mlx5_fpga_sbu_conn_destroy(conn);
1302
1303 error:
1304         kfree(fdev->ipsec);
1305         fdev->ipsec = NULL;
1306         return err;
1307 }
1308
1309 static void destroy_rules_rb(struct rb_root *root)
1310 {
1311         struct mlx5_fpga_ipsec_rule *r, *tmp;
1312
1313         rbtree_postorder_for_each_entry_safe(r, tmp, root, node) {
1314                 rb_erase(&r->node, root);
1315                 mlx5_fpga_ipsec_delete_sa_ctx(r->ctx);
1316                 kfree(r);
1317         }
1318 }
1319
1320 void mlx5_fpga_ipsec_cleanup(struct mlx5_core_dev *mdev)
1321 {
1322         struct mlx5_fpga_device *fdev = mdev->fpga;
1323
1324         if (!mlx5_fpga_is_ipsec_device(mdev))
1325                 return;
1326
1327         destroy_rules_rb(&fdev->ipsec->rules_rb);
1328         rhashtable_destroy(&fdev->ipsec->sa_hash);
1329
1330         mlx5_fpga_sbu_conn_destroy(fdev->ipsec->conn);
1331         kfree(fdev->ipsec);
1332         fdev->ipsec = NULL;
1333 }
1334
1335 void mlx5_fpga_ipsec_build_fs_cmds(void)
1336 {
1337         /* ingress */
1338         fpga_ipsec_ingress.create_flow_table =
1339                 mlx5_fs_cmd_get_default(egress_to_fs_ft(false))->create_flow_table;
1340         fpga_ipsec_ingress.destroy_flow_table =
1341                 mlx5_fs_cmd_get_default(egress_to_fs_ft(false))->destroy_flow_table;
1342         fpga_ipsec_ingress.modify_flow_table =
1343                 mlx5_fs_cmd_get_default(egress_to_fs_ft(false))->modify_flow_table;
1344         fpga_ipsec_ingress.create_flow_group =
1345                 mlx5_fpga_ipsec_fs_create_flow_group_ingress;
1346         fpga_ipsec_ingress.destroy_flow_group =
1347                  mlx5_fs_cmd_get_default(egress_to_fs_ft(false))->destroy_flow_group;
1348         fpga_ipsec_ingress.create_fte =
1349                 mlx5_fpga_ipsec_fs_create_fte_ingress;
1350         fpga_ipsec_ingress.update_fte =
1351                 mlx5_fpga_ipsec_fs_update_fte_ingress;
1352         fpga_ipsec_ingress.delete_fte =
1353                 mlx5_fpga_ipsec_fs_delete_fte_ingress;
1354         fpga_ipsec_ingress.update_root_ft =
1355                 mlx5_fs_cmd_get_default(egress_to_fs_ft(false))->update_root_ft;
1356
1357         /* egress */
1358         fpga_ipsec_egress.create_flow_table =
1359                 mlx5_fs_cmd_get_default(egress_to_fs_ft(true))->create_flow_table;
1360         fpga_ipsec_egress.destroy_flow_table =
1361                 mlx5_fs_cmd_get_default(egress_to_fs_ft(true))->destroy_flow_table;
1362         fpga_ipsec_egress.modify_flow_table =
1363                 mlx5_fs_cmd_get_default(egress_to_fs_ft(true))->modify_flow_table;
1364         fpga_ipsec_egress.create_flow_group =
1365                 mlx5_fpga_ipsec_fs_create_flow_group_egress;
1366         fpga_ipsec_egress.destroy_flow_group =
1367                 mlx5_fs_cmd_get_default(egress_to_fs_ft(true))->destroy_flow_group;
1368         fpga_ipsec_egress.create_fte =
1369                 mlx5_fpga_ipsec_fs_create_fte_egress;
1370         fpga_ipsec_egress.update_fte =
1371                 mlx5_fpga_ipsec_fs_update_fte_egress;
1372         fpga_ipsec_egress.delete_fte =
1373                 mlx5_fpga_ipsec_fs_delete_fte_egress;
1374         fpga_ipsec_egress.update_root_ft =
1375                 mlx5_fs_cmd_get_default(egress_to_fs_ft(true))->update_root_ft;
1376 }
1377
1378 static int
1379 mlx5_fpga_esp_validate_xfrm_attrs(struct mlx5_core_dev *mdev,
1380                                   const struct mlx5_accel_esp_xfrm_attrs *attrs)
1381 {
1382         if (attrs->tfc_pad) {
1383                 mlx5_core_err(mdev, "Cannot offload xfrm states with tfc padding\n");
1384                 return -EOPNOTSUPP;
1385         }
1386
1387         if (attrs->replay_type != MLX5_ACCEL_ESP_REPLAY_NONE) {
1388                 mlx5_core_err(mdev, "Cannot offload xfrm states with anti replay\n");
1389                 return -EOPNOTSUPP;
1390         }
1391
1392         if (attrs->keymat_type != MLX5_ACCEL_ESP_KEYMAT_AES_GCM) {
1393                 mlx5_core_err(mdev, "Only aes gcm keymat is supported\n");
1394                 return -EOPNOTSUPP;
1395         }
1396
1397         if (attrs->keymat.aes_gcm.iv_algo !=
1398             MLX5_ACCEL_ESP_AES_GCM_IV_ALGO_SEQ) {
1399                 mlx5_core_err(mdev, "Only iv sequence algo is supported\n");
1400                 return -EOPNOTSUPP;
1401         }
1402
1403         if (attrs->keymat.aes_gcm.icv_len != 128) {
1404                 mlx5_core_err(mdev, "Cannot offload xfrm states with AEAD ICV length other than 128bit\n");
1405                 return -EOPNOTSUPP;
1406         }
1407
1408         if (attrs->keymat.aes_gcm.key_len != 128 &&
1409             attrs->keymat.aes_gcm.key_len != 256) {
1410                 mlx5_core_err(mdev, "Cannot offload xfrm states with AEAD key length other than 128/256 bit\n");
1411                 return -EOPNOTSUPP;
1412         }
1413
1414         if ((attrs->flags & MLX5_ACCEL_ESP_FLAGS_ESN_TRIGGERED) &&
1415             (!MLX5_GET(ipsec_extended_cap, mdev->fpga->ipsec->caps,
1416                        v2_command))) {
1417                 mlx5_core_err(mdev, "Cannot offload xfrm states with AEAD key length other than 128/256 bit\n");
1418                 return -EOPNOTSUPP;
1419         }
1420
1421         return 0;
1422 }
1423
1424 struct mlx5_accel_esp_xfrm *
1425 mlx5_fpga_esp_create_xfrm(struct mlx5_core_dev *mdev,
1426                           const struct mlx5_accel_esp_xfrm_attrs *attrs,
1427                           u32 flags)
1428 {
1429         struct mlx5_fpga_esp_xfrm *fpga_xfrm;
1430
1431         if (!(flags & MLX5_ACCEL_XFRM_FLAG_REQUIRE_METADATA)) {
1432                 mlx5_core_warn(mdev, "Tried to create an esp action without metadata\n");
1433                 return ERR_PTR(-EINVAL);
1434         }
1435
1436         if (mlx5_fpga_esp_validate_xfrm_attrs(mdev, attrs)) {
1437                 mlx5_core_warn(mdev, "Tried to create an esp with unsupported attrs\n");
1438                 return ERR_PTR(-EOPNOTSUPP);
1439         }
1440
1441         fpga_xfrm = kzalloc(sizeof(*fpga_xfrm), GFP_KERNEL);
1442         if (!fpga_xfrm)
1443                 return ERR_PTR(-ENOMEM);
1444
1445         mutex_init(&fpga_xfrm->lock);
1446         memcpy(&fpga_xfrm->accel_xfrm.attrs, attrs,
1447                sizeof(fpga_xfrm->accel_xfrm.attrs));
1448
1449         return &fpga_xfrm->accel_xfrm;
1450 }
1451
1452 void mlx5_fpga_esp_destroy_xfrm(struct mlx5_accel_esp_xfrm *xfrm)
1453 {
1454         struct mlx5_fpga_esp_xfrm *fpga_xfrm =
1455                         container_of(xfrm, struct mlx5_fpga_esp_xfrm,
1456                                      accel_xfrm);
1457         /* assuming no sa_ctx are connected to this xfrm_ctx */
1458         kfree(fpga_xfrm);
1459 }
1460
1461 int mlx5_fpga_esp_modify_xfrm(struct mlx5_accel_esp_xfrm *xfrm,
1462                               const struct mlx5_accel_esp_xfrm_attrs *attrs)
1463 {
1464         struct mlx5_core_dev *mdev = xfrm->mdev;
1465         struct mlx5_fpga_device *fdev = mdev->fpga;
1466         struct mlx5_fpga_ipsec *fipsec = fdev->ipsec;
1467         struct mlx5_fpga_esp_xfrm *fpga_xfrm;
1468         struct mlx5_ifc_fpga_ipsec_sa org_hw_sa;
1469
1470         int err = 0;
1471
1472         if (!memcmp(&xfrm->attrs, attrs, sizeof(xfrm->attrs)))
1473                 return 0;
1474
1475         if (!mlx5_fpga_esp_validate_xfrm_attrs(mdev, attrs)) {
1476                 mlx5_core_warn(mdev, "Tried to create an esp with unsupported attrs\n");
1477                 return -EOPNOTSUPP;
1478         }
1479
1480         if (is_v2_sadb_supported(fipsec)) {
1481                 mlx5_core_warn(mdev, "Modify esp is not supported\n");
1482                 return -EOPNOTSUPP;
1483         }
1484
1485         fpga_xfrm = container_of(xfrm, struct mlx5_fpga_esp_xfrm, accel_xfrm);
1486
1487         mutex_lock(&fpga_xfrm->lock);
1488
1489         if (!fpga_xfrm->sa_ctx)
1490                 /* Unbounded xfrm, chane only sw attrs */
1491                 goto change_sw_xfrm_attrs;
1492
1493         /* copy original hw sa */
1494         memcpy(&org_hw_sa, &fpga_xfrm->sa_ctx->hw_sa, sizeof(org_hw_sa));
1495         mutex_lock(&fipsec->sa_hash_lock);
1496         /* remove original hw sa from hash */
1497         WARN_ON(rhashtable_remove_fast(&fipsec->sa_hash,
1498                                        &fpga_xfrm->sa_ctx->hash, rhash_sa));
1499         /* update hw_sa with new xfrm attrs*/
1500         mlx5_fpga_ipsec_build_hw_xfrm(xfrm->mdev, attrs,
1501                                       &fpga_xfrm->sa_ctx->hw_sa);
1502         /* try to insert new hw_sa to hash */
1503         err = rhashtable_insert_fast(&fipsec->sa_hash,
1504                                      &fpga_xfrm->sa_ctx->hash, rhash_sa);
1505         if (err)
1506                 goto rollback_sa;
1507
1508         /* modify device with new hw_sa */
1509         err = mlx5_fpga_ipsec_update_hw_sa(fdev, &fpga_xfrm->sa_ctx->hw_sa,
1510                                            MLX5_FPGA_IPSEC_CMD_OP_MOD_SA_V2);
1511         fpga_xfrm->sa_ctx->hw_sa.ipsec_sa_v1.cmd = 0;
1512         if (err)
1513                 WARN_ON(rhashtable_remove_fast(&fipsec->sa_hash,
1514                                                &fpga_xfrm->sa_ctx->hash,
1515                                                rhash_sa));
1516 rollback_sa:
1517         if (err) {
1518                 /* return original hw_sa to hash */
1519                 memcpy(&fpga_xfrm->sa_ctx->hw_sa, &org_hw_sa,
1520                        sizeof(org_hw_sa));
1521                 WARN_ON(rhashtable_insert_fast(&fipsec->sa_hash,
1522                                                &fpga_xfrm->sa_ctx->hash,
1523                                                rhash_sa));
1524         }
1525         mutex_unlock(&fipsec->sa_hash_lock);
1526
1527 change_sw_xfrm_attrs:
1528         if (!err)
1529                 memcpy(&xfrm->attrs, attrs, sizeof(xfrm->attrs));
1530         mutex_unlock(&fpga_xfrm->lock);
1531         return err;
1532 }