20a4f1e585aff33354cb95e0f2d4b4fdf90c5cb6
[sfrench/cifs-2.6.git] / drivers / net / ethernet / mellanox / mlx5 / core / en_accel / fs_tcp.c
1 // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
2 /* Copyright (c) 2020, Mellanox Technologies inc. All rights reserved. */
3
4 #include <linux/netdevice.h>
5 #include "en_accel/fs_tcp.h"
6 #include "fs_core.h"
7
8 enum accel_fs_tcp_type {
9         ACCEL_FS_IPV4_TCP,
10         ACCEL_FS_IPV6_TCP,
11         ACCEL_FS_TCP_NUM_TYPES,
12 };
13
14 struct mlx5e_accel_fs_tcp {
15         struct mlx5e_flow_table tables[ACCEL_FS_TCP_NUM_TYPES];
16         struct mlx5_flow_handle *default_rules[ACCEL_FS_TCP_NUM_TYPES];
17 };
18
19 static enum mlx5_traffic_types fs_accel2tt(enum accel_fs_tcp_type i)
20 {
21         switch (i) {
22         case ACCEL_FS_IPV4_TCP:
23                 return MLX5_TT_IPV4_TCP;
24         default: /* ACCEL_FS_IPV6_TCP */
25                 return MLX5_TT_IPV6_TCP;
26         }
27 }
28
29 static void accel_fs_tcp_set_ipv4_flow(struct mlx5_flow_spec *spec, struct sock *sk)
30 {
31         MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.ip_protocol);
32         MLX5_SET(fte_match_param, spec->match_value, outer_headers.ip_protocol, IPPROTO_TCP);
33         MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.ip_version);
34         MLX5_SET(fte_match_param, spec->match_value, outer_headers.ip_version, 4);
35         memcpy(MLX5_ADDR_OF(fte_match_param, spec->match_value,
36                             outer_headers.src_ipv4_src_ipv6.ipv4_layout.ipv4),
37                &inet_sk(sk)->inet_daddr, 4);
38         memcpy(MLX5_ADDR_OF(fte_match_param, spec->match_value,
39                             outer_headers.dst_ipv4_dst_ipv6.ipv4_layout.ipv4),
40                &inet_sk(sk)->inet_rcv_saddr, 4);
41         MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria,
42                          outer_headers.src_ipv4_src_ipv6.ipv4_layout.ipv4);
43         MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria,
44                          outer_headers.dst_ipv4_dst_ipv6.ipv4_layout.ipv4);
45 }
46
47 #if IS_ENABLED(CONFIG_IPV6)
48 static void accel_fs_tcp_set_ipv6_flow(struct mlx5_flow_spec *spec, struct sock *sk)
49 {
50         MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.ip_protocol);
51         MLX5_SET(fte_match_param, spec->match_value, outer_headers.ip_protocol, IPPROTO_TCP);
52         MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.ip_version);
53         MLX5_SET(fte_match_param, spec->match_value, outer_headers.ip_version, 6);
54         memcpy(MLX5_ADDR_OF(fte_match_param, spec->match_value,
55                             outer_headers.src_ipv4_src_ipv6.ipv6_layout.ipv6),
56                &sk->sk_v6_daddr, 16);
57         memcpy(MLX5_ADDR_OF(fte_match_param, spec->match_value,
58                             outer_headers.dst_ipv4_dst_ipv6.ipv6_layout.ipv6),
59                &inet6_sk(sk)->saddr, 16);
60         memset(MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
61                             outer_headers.src_ipv4_src_ipv6.ipv6_layout.ipv6),
62                0xff, 16);
63         memset(MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
64                             outer_headers.dst_ipv4_dst_ipv6.ipv6_layout.ipv6),
65                0xff, 16);
66 }
67 #endif
68
69 void mlx5e_accel_fs_del_sk(struct mlx5_flow_handle *rule)
70 {
71         mlx5_del_flow_rules(rule);
72 }
73
74 struct mlx5_flow_handle *mlx5e_accel_fs_add_sk(struct mlx5e_priv *priv,
75                                                struct sock *sk, u32 tirn,
76                                                uint32_t flow_tag)
77 {
78         struct mlx5_flow_destination dest = {};
79         struct mlx5e_flow_table *ft = NULL;
80         struct mlx5e_accel_fs_tcp *fs_tcp;
81         MLX5_DECLARE_FLOW_ACT(flow_act);
82         struct mlx5_flow_handle *flow;
83         struct mlx5_flow_spec *spec;
84
85         spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
86         if (!spec)
87                 return ERR_PTR(-ENOMEM);
88
89         fs_tcp = priv->fs->accel_tcp;
90
91         spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
92
93         switch (sk->sk_family) {
94         case AF_INET:
95                 accel_fs_tcp_set_ipv4_flow(spec, sk);
96                 ft = &fs_tcp->tables[ACCEL_FS_IPV4_TCP];
97                 mlx5e_dbg(HW, priv, "%s flow is %pI4:%d -> %pI4:%d\n", __func__,
98                           &inet_sk(sk)->inet_rcv_saddr,
99                           inet_sk(sk)->inet_sport,
100                           &inet_sk(sk)->inet_daddr,
101                           inet_sk(sk)->inet_dport);
102                 break;
103 #if IS_ENABLED(CONFIG_IPV6)
104         case AF_INET6:
105                 if (!ipv6_only_sock(sk) &&
106                     ipv6_addr_type(&sk->sk_v6_daddr) == IPV6_ADDR_MAPPED) {
107                         accel_fs_tcp_set_ipv4_flow(spec, sk);
108                         ft = &fs_tcp->tables[ACCEL_FS_IPV4_TCP];
109                 } else {
110                         accel_fs_tcp_set_ipv6_flow(spec, sk);
111                         ft = &fs_tcp->tables[ACCEL_FS_IPV6_TCP];
112                 }
113                 break;
114 #endif
115         default:
116                 break;
117         }
118
119         if (!ft) {
120                 flow = ERR_PTR(-EINVAL);
121                 goto out;
122         }
123
124         MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria,
125                          outer_headers.tcp_dport);
126         MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria,
127                          outer_headers.tcp_sport);
128         MLX5_SET(fte_match_param, spec->match_value, outer_headers.tcp_dport,
129                  ntohs(inet_sk(sk)->inet_sport));
130         MLX5_SET(fte_match_param, spec->match_value, outer_headers.tcp_sport,
131                  ntohs(inet_sk(sk)->inet_dport));
132
133         dest.type = MLX5_FLOW_DESTINATION_TYPE_TIR;
134         dest.tir_num = tirn;
135         if (flow_tag != MLX5_FS_DEFAULT_FLOW_TAG) {
136                 spec->flow_context.flow_tag = flow_tag;
137                 spec->flow_context.flags = FLOW_CONTEXT_HAS_TAG;
138         }
139
140         flow = mlx5_add_flow_rules(ft->t, spec, &flow_act, &dest, 1);
141
142         if (IS_ERR(flow))
143                 netdev_err(priv->netdev, "mlx5_add_flow_rules() failed, flow is %ld\n",
144                            PTR_ERR(flow));
145
146 out:
147         kvfree(spec);
148         return flow;
149 }
150
151 static int accel_fs_tcp_add_default_rule(struct mlx5e_priv *priv,
152                                          enum accel_fs_tcp_type type)
153 {
154         struct mlx5e_flow_table *accel_fs_t;
155         struct mlx5_flow_destination dest;
156         struct mlx5e_accel_fs_tcp *fs_tcp;
157         MLX5_DECLARE_FLOW_ACT(flow_act);
158         struct mlx5_flow_handle *rule;
159         int err = 0;
160
161         fs_tcp = priv->fs->accel_tcp;
162         accel_fs_t = &fs_tcp->tables[type];
163
164         dest = mlx5_ttc_get_default_dest(priv->fs->ttc, fs_accel2tt(type));
165         rule = mlx5_add_flow_rules(accel_fs_t->t, NULL, &flow_act, &dest, 1);
166         if (IS_ERR(rule)) {
167                 err = PTR_ERR(rule);
168                 netdev_err(priv->netdev,
169                            "%s: add default rule failed, accel_fs type=%d, err %d\n",
170                            __func__, type, err);
171                 return err;
172         }
173
174         fs_tcp->default_rules[type] = rule;
175         return 0;
176 }
177
178 #define MLX5E_ACCEL_FS_TCP_NUM_GROUPS   (2)
179 #define MLX5E_ACCEL_FS_TCP_GROUP1_SIZE  (BIT(16) - 1)
180 #define MLX5E_ACCEL_FS_TCP_GROUP2_SIZE  (BIT(0))
181 #define MLX5E_ACCEL_FS_TCP_TABLE_SIZE   (MLX5E_ACCEL_FS_TCP_GROUP1_SIZE +\
182                                          MLX5E_ACCEL_FS_TCP_GROUP2_SIZE)
183 static int accel_fs_tcp_create_groups(struct mlx5e_flow_table *ft,
184                                       enum accel_fs_tcp_type type)
185 {
186         int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
187         void *outer_headers_c;
188         int ix = 0;
189         u32 *in;
190         int err;
191         u8 *mc;
192
193         ft->g = kcalloc(MLX5E_ACCEL_FS_TCP_NUM_GROUPS, sizeof(*ft->g), GFP_KERNEL);
194         in = kvzalloc(inlen, GFP_KERNEL);
195         if  (!in || !ft->g) {
196                 kfree(ft->g);
197                 kvfree(in);
198                 return -ENOMEM;
199         }
200
201         mc = MLX5_ADDR_OF(create_flow_group_in, in, match_criteria);
202         outer_headers_c = MLX5_ADDR_OF(fte_match_param, mc, outer_headers);
203         MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, outer_headers_c, ip_protocol);
204         MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, outer_headers_c, ip_version);
205
206         switch (type) {
207         case ACCEL_FS_IPV4_TCP:
208         case ACCEL_FS_IPV6_TCP:
209                 MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, outer_headers_c, tcp_dport);
210                 MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, outer_headers_c, tcp_sport);
211                 break;
212         default:
213                 err = -EINVAL;
214                 goto out;
215         }
216
217         switch (type) {
218         case ACCEL_FS_IPV4_TCP:
219                 MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, outer_headers_c,
220                                  src_ipv4_src_ipv6.ipv4_layout.ipv4);
221                 MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, outer_headers_c,
222                                  dst_ipv4_dst_ipv6.ipv4_layout.ipv4);
223                 break;
224         case ACCEL_FS_IPV6_TCP:
225                 memset(MLX5_ADDR_OF(fte_match_set_lyr_2_4, outer_headers_c,
226                                     src_ipv4_src_ipv6.ipv6_layout.ipv6),
227                        0xff, 16);
228                 memset(MLX5_ADDR_OF(fte_match_set_lyr_2_4, outer_headers_c,
229                                     dst_ipv4_dst_ipv6.ipv6_layout.ipv6),
230                        0xff, 16);
231                 break;
232         default:
233                 err = -EINVAL;
234                 goto out;
235         }
236
237         MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
238         MLX5_SET_CFG(in, start_flow_index, ix);
239         ix += MLX5E_ACCEL_FS_TCP_GROUP1_SIZE;
240         MLX5_SET_CFG(in, end_flow_index, ix - 1);
241         ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
242         if (IS_ERR(ft->g[ft->num_groups]))
243                 goto err;
244         ft->num_groups++;
245
246         /* Default Flow Group */
247         memset(in, 0, inlen);
248         MLX5_SET_CFG(in, start_flow_index, ix);
249         ix += MLX5E_ACCEL_FS_TCP_GROUP2_SIZE;
250         MLX5_SET_CFG(in, end_flow_index, ix - 1);
251         ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
252         if (IS_ERR(ft->g[ft->num_groups]))
253                 goto err;
254         ft->num_groups++;
255
256         kvfree(in);
257         return 0;
258
259 err:
260         err = PTR_ERR(ft->g[ft->num_groups]);
261         ft->g[ft->num_groups] = NULL;
262 out:
263         kvfree(in);
264
265         return err;
266 }
267
268 static int accel_fs_tcp_create_table(struct mlx5e_priv *priv, enum accel_fs_tcp_type type)
269 {
270         struct mlx5e_flow_table *ft = &priv->fs->accel_tcp->tables[type];
271         struct mlx5_flow_table_attr ft_attr = {};
272         int err;
273
274         ft->num_groups = 0;
275
276         ft_attr.max_fte = MLX5E_ACCEL_FS_TCP_TABLE_SIZE;
277         ft_attr.level = MLX5E_ACCEL_FS_TCP_FT_LEVEL;
278         ft_attr.prio = MLX5E_NIC_PRIO;
279
280         ft->t = mlx5_create_flow_table(priv->fs->ns, &ft_attr);
281         if (IS_ERR(ft->t)) {
282                 err = PTR_ERR(ft->t);
283                 ft->t = NULL;
284                 return err;
285         }
286
287         netdev_dbg(priv->netdev, "Created fs accel table id %u level %u\n",
288                    ft->t->id, ft->t->level);
289
290         err = accel_fs_tcp_create_groups(ft, type);
291         if (err)
292                 goto err;
293
294         err = accel_fs_tcp_add_default_rule(priv, type);
295         if (err)
296                 goto err;
297
298         return 0;
299 err:
300         mlx5e_destroy_flow_table(ft);
301         return err;
302 }
303
304 static int accel_fs_tcp_disable(struct mlx5e_priv *priv)
305 {
306         int err, i;
307
308         for (i = 0; i < ACCEL_FS_TCP_NUM_TYPES; i++) {
309                 /* Modify ttc rules destination to point back to the indir TIRs */
310                 err = mlx5_ttc_fwd_default_dest(priv->fs->ttc, fs_accel2tt(i));
311                 if (err) {
312                         netdev_err(priv->netdev,
313                                    "%s: modify ttc[%d] default destination failed, err(%d)\n",
314                                    __func__, fs_accel2tt(i), err);
315                         return err;
316                 }
317         }
318
319         return 0;
320 }
321
322 static int accel_fs_tcp_enable(struct mlx5e_priv *priv)
323 {
324         struct mlx5_flow_destination dest = {};
325         int err, i;
326
327         dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
328         for (i = 0; i < ACCEL_FS_TCP_NUM_TYPES; i++) {
329                 dest.ft = priv->fs->accel_tcp->tables[i].t;
330
331                 /* Modify ttc rules destination to point on the accel_fs FTs */
332                 err = mlx5_ttc_fwd_dest(priv->fs->ttc, fs_accel2tt(i), &dest);
333                 if (err) {
334                         netdev_err(priv->netdev,
335                                    "%s: modify ttc[%d] destination to accel failed, err(%d)\n",
336                                    __func__, fs_accel2tt(i), err);
337                         return err;
338                 }
339         }
340         return 0;
341 }
342
343 static void accel_fs_tcp_destroy_table(struct mlx5e_priv *priv, int i)
344 {
345         struct mlx5e_accel_fs_tcp *fs_tcp;
346
347         fs_tcp = priv->fs->accel_tcp;
348         if (IS_ERR_OR_NULL(fs_tcp->tables[i].t))
349                 return;
350
351         mlx5_del_flow_rules(fs_tcp->default_rules[i]);
352         mlx5e_destroy_flow_table(&fs_tcp->tables[i]);
353         fs_tcp->tables[i].t = NULL;
354 }
355
356 void mlx5e_accel_fs_tcp_destroy(struct mlx5e_priv *priv)
357 {
358         int i;
359
360         if (!priv->fs->accel_tcp)
361                 return;
362
363         accel_fs_tcp_disable(priv);
364
365         for (i = 0; i < ACCEL_FS_TCP_NUM_TYPES; i++)
366                 accel_fs_tcp_destroy_table(priv, i);
367
368         kfree(priv->fs->accel_tcp);
369         priv->fs->accel_tcp = NULL;
370 }
371
372 int mlx5e_accel_fs_tcp_create(struct mlx5e_priv *priv)
373 {
374         int i, err;
375
376         if (!MLX5_CAP_FLOWTABLE_NIC_RX(priv->mdev, ft_field_support.outer_ip_version))
377                 return -EOPNOTSUPP;
378
379         priv->fs->accel_tcp = kzalloc(sizeof(*priv->fs->accel_tcp), GFP_KERNEL);
380         if (!priv->fs->accel_tcp)
381                 return -ENOMEM;
382
383         for (i = 0; i < ACCEL_FS_TCP_NUM_TYPES; i++) {
384                 err = accel_fs_tcp_create_table(priv, i);
385                 if (err)
386                         goto err_destroy_tables;
387         }
388
389         err = accel_fs_tcp_enable(priv);
390         if (err)
391                 goto err_destroy_tables;
392
393         return 0;
394
395 err_destroy_tables:
396         while (--i >= 0)
397                 accel_fs_tcp_destroy_table(priv, i);
398
399         kfree(priv->fs->accel_tcp);
400         priv->fs->accel_tcp = NULL;
401         return err;
402 }