Merge git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net
[sfrench/cifs-2.6.git] / drivers / infiniband / hw / mlx5 / main.c
1 /*
2  * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved.
3  *
4  * This software is available to you under a choice of one of two
5  * licenses.  You may choose to be licensed under the terms of the GNU
6  * General Public License (GPL) Version 2, available from the file
7  * COPYING in the main directory of this source tree, or the
8  * OpenIB.org BSD license below:
9  *
10  *     Redistribution and use in source and binary forms, with or
11  *     without modification, are permitted provided that the following
12  *     conditions are met:
13  *
14  *      - Redistributions of source code must retain the above
15  *        copyright notice, this list of conditions and the following
16  *        disclaimer.
17  *
18  *      - Redistributions in binary form must reproduce the above
19  *        copyright notice, this list of conditions and the following
20  *        disclaimer in the documentation and/or other materials
21  *        provided with the distribution.
22  *
23  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30  * SOFTWARE.
31  */
32
33 #include <linux/debugfs.h>
34 #include <linux/highmem.h>
35 #include <linux/module.h>
36 #include <linux/init.h>
37 #include <linux/errno.h>
38 #include <linux/pci.h>
39 #include <linux/dma-mapping.h>
40 #include <linux/slab.h>
41 #include <linux/bitmap.h>
42 #if defined(CONFIG_X86)
43 #include <asm/pat.h>
44 #endif
45 #include <linux/sched.h>
46 #include <linux/sched/mm.h>
47 #include <linux/sched/task.h>
48 #include <linux/delay.h>
49 #include <rdma/ib_user_verbs.h>
50 #include <rdma/ib_addr.h>
51 #include <rdma/ib_cache.h>
52 #include <linux/mlx5/port.h>
53 #include <linux/mlx5/vport.h>
54 #include <linux/mlx5/fs.h>
55 #include <linux/mlx5/eswitch.h>
56 #include <linux/list.h>
57 #include <rdma/ib_smi.h>
58 #include <rdma/ib_umem.h>
59 #include <linux/in.h>
60 #include <linux/etherdevice.h>
61 #include "mlx5_ib.h"
62 #include "ib_rep.h"
63 #include "cmd.h"
64 #include "srq.h"
65 #include <linux/mlx5/fs_helpers.h>
66 #include <linux/mlx5/accel.h>
67 #include <rdma/uverbs_std_types.h>
68 #include <rdma/mlx5_user_ioctl_verbs.h>
69 #include <rdma/mlx5_user_ioctl_cmds.h>
70 #include <rdma/ib_umem_odp.h>
71
72 #define UVERBS_MODULE_NAME mlx5_ib
73 #include <rdma/uverbs_named_ioctl.h>
74
75 #define DRIVER_NAME "mlx5_ib"
76 #define DRIVER_VERSION "5.0-0"
77
78 MODULE_AUTHOR("Eli Cohen <eli@mellanox.com>");
79 MODULE_DESCRIPTION("Mellanox Connect-IB HCA IB driver");
80 MODULE_LICENSE("Dual BSD/GPL");
81
82 static char mlx5_version[] =
83         DRIVER_NAME ": Mellanox Connect-IB Infiniband driver v"
84         DRIVER_VERSION "\n";
85
86 struct mlx5_ib_event_work {
87         struct work_struct      work;
88         union {
89                 struct mlx5_ib_dev            *dev;
90                 struct mlx5_ib_multiport_info *mpi;
91         };
92         bool                    is_slave;
93         unsigned int            event;
94         void                    *param;
95 };
96
97 enum {
98         MLX5_ATOMIC_SIZE_QP_8BYTES = 1 << 3,
99 };
100
101 static struct workqueue_struct *mlx5_ib_event_wq;
102 static LIST_HEAD(mlx5_ib_unaffiliated_port_list);
103 static LIST_HEAD(mlx5_ib_dev_list);
104 /*
105  * This mutex should be held when accessing either of the above lists
106  */
107 static DEFINE_MUTEX(mlx5_ib_multiport_mutex);
108
109 /* We can't use an array for xlt_emergency_page because dma_map_single
110  * doesn't work on kernel modules memory
111  */
112 static unsigned long xlt_emergency_page;
113 static struct mutex xlt_emergency_page_mutex;
114
115 struct mlx5_ib_dev *mlx5_ib_get_ibdev_from_mpi(struct mlx5_ib_multiport_info *mpi)
116 {
117         struct mlx5_ib_dev *dev;
118
119         mutex_lock(&mlx5_ib_multiport_mutex);
120         dev = mpi->ibdev;
121         mutex_unlock(&mlx5_ib_multiport_mutex);
122         return dev;
123 }
124
125 static enum rdma_link_layer
126 mlx5_port_type_cap_to_rdma_ll(int port_type_cap)
127 {
128         switch (port_type_cap) {
129         case MLX5_CAP_PORT_TYPE_IB:
130                 return IB_LINK_LAYER_INFINIBAND;
131         case MLX5_CAP_PORT_TYPE_ETH:
132                 return IB_LINK_LAYER_ETHERNET;
133         default:
134                 return IB_LINK_LAYER_UNSPECIFIED;
135         }
136 }
137
138 static enum rdma_link_layer
139 mlx5_ib_port_link_layer(struct ib_device *device, u8 port_num)
140 {
141         struct mlx5_ib_dev *dev = to_mdev(device);
142         int port_type_cap = MLX5_CAP_GEN(dev->mdev, port_type);
143
144         return mlx5_port_type_cap_to_rdma_ll(port_type_cap);
145 }
146
147 static int get_port_state(struct ib_device *ibdev,
148                           u8 port_num,
149                           enum ib_port_state *state)
150 {
151         struct ib_port_attr attr;
152         int ret;
153
154         memset(&attr, 0, sizeof(attr));
155         ret = ibdev->ops.query_port(ibdev, port_num, &attr);
156         if (!ret)
157                 *state = attr.state;
158         return ret;
159 }
160
161 static struct mlx5_roce *mlx5_get_rep_roce(struct mlx5_ib_dev *dev,
162                                            struct net_device *ndev,
163                                            u8 *port_num)
164 {
165         struct mlx5_eswitch *esw = dev->mdev->priv.eswitch;
166         struct net_device *rep_ndev;
167         struct mlx5_ib_port *port;
168         int i;
169
170         for (i = 0; i < dev->num_ports; i++) {
171                 port  = &dev->port[i];
172                 if (!port->rep)
173                         continue;
174
175                 read_lock(&port->roce.netdev_lock);
176                 rep_ndev = mlx5_ib_get_rep_netdev(esw,
177                                                   port->rep->vport);
178                 if (rep_ndev == ndev) {
179                         read_unlock(&port->roce.netdev_lock);
180                         *port_num = i + 1;
181                         return &port->roce;
182                 }
183                 read_unlock(&port->roce.netdev_lock);
184         }
185
186         return NULL;
187 }
188
189 static int mlx5_netdev_event(struct notifier_block *this,
190                              unsigned long event, void *ptr)
191 {
192         struct mlx5_roce *roce = container_of(this, struct mlx5_roce, nb);
193         struct net_device *ndev = netdev_notifier_info_to_dev(ptr);
194         u8 port_num = roce->native_port_num;
195         struct mlx5_core_dev *mdev;
196         struct mlx5_ib_dev *ibdev;
197
198         ibdev = roce->dev;
199         mdev = mlx5_ib_get_native_port_mdev(ibdev, port_num, NULL);
200         if (!mdev)
201                 return NOTIFY_DONE;
202
203         switch (event) {
204         case NETDEV_REGISTER:
205                 /* Should already be registered during the load */
206                 if (ibdev->is_rep)
207                         break;
208                 write_lock(&roce->netdev_lock);
209                 if (ndev->dev.parent == mdev->device)
210                         roce->netdev = ndev;
211                 write_unlock(&roce->netdev_lock);
212                 break;
213
214         case NETDEV_UNREGISTER:
215                 /* In case of reps, ib device goes away before the netdevs */
216                 write_lock(&roce->netdev_lock);
217                 if (roce->netdev == ndev)
218                         roce->netdev = NULL;
219                 write_unlock(&roce->netdev_lock);
220                 break;
221
222         case NETDEV_CHANGE:
223         case NETDEV_UP:
224         case NETDEV_DOWN: {
225                 struct net_device *lag_ndev = mlx5_lag_get_roce_netdev(mdev);
226                 struct net_device *upper = NULL;
227
228                 if (lag_ndev) {
229                         upper = netdev_master_upper_dev_get(lag_ndev);
230                         dev_put(lag_ndev);
231                 }
232
233                 if (ibdev->is_rep)
234                         roce = mlx5_get_rep_roce(ibdev, ndev, &port_num);
235                 if (!roce)
236                         return NOTIFY_DONE;
237                 if ((upper == ndev || (!upper && ndev == roce->netdev))
238                     && ibdev->ib_active) {
239                         struct ib_event ibev = { };
240                         enum ib_port_state port_state;
241
242                         if (get_port_state(&ibdev->ib_dev, port_num,
243                                            &port_state))
244                                 goto done;
245
246                         if (roce->last_port_state == port_state)
247                                 goto done;
248
249                         roce->last_port_state = port_state;
250                         ibev.device = &ibdev->ib_dev;
251                         if (port_state == IB_PORT_DOWN)
252                                 ibev.event = IB_EVENT_PORT_ERR;
253                         else if (port_state == IB_PORT_ACTIVE)
254                                 ibev.event = IB_EVENT_PORT_ACTIVE;
255                         else
256                                 goto done;
257
258                         ibev.element.port_num = port_num;
259                         ib_dispatch_event(&ibev);
260                 }
261                 break;
262         }
263
264         default:
265                 break;
266         }
267 done:
268         mlx5_ib_put_native_port_mdev(ibdev, port_num);
269         return NOTIFY_DONE;
270 }
271
272 static struct net_device *mlx5_ib_get_netdev(struct ib_device *device,
273                                              u8 port_num)
274 {
275         struct mlx5_ib_dev *ibdev = to_mdev(device);
276         struct net_device *ndev;
277         struct mlx5_core_dev *mdev;
278
279         mdev = mlx5_ib_get_native_port_mdev(ibdev, port_num, NULL);
280         if (!mdev)
281                 return NULL;
282
283         ndev = mlx5_lag_get_roce_netdev(mdev);
284         if (ndev)
285                 goto out;
286
287         /* Ensure ndev does not disappear before we invoke dev_hold()
288          */
289         read_lock(&ibdev->port[port_num - 1].roce.netdev_lock);
290         ndev = ibdev->port[port_num - 1].roce.netdev;
291         if (ndev)
292                 dev_hold(ndev);
293         read_unlock(&ibdev->port[port_num - 1].roce.netdev_lock);
294
295 out:
296         mlx5_ib_put_native_port_mdev(ibdev, port_num);
297         return ndev;
298 }
299
300 struct mlx5_core_dev *mlx5_ib_get_native_port_mdev(struct mlx5_ib_dev *ibdev,
301                                                    u8 ib_port_num,
302                                                    u8 *native_port_num)
303 {
304         enum rdma_link_layer ll = mlx5_ib_port_link_layer(&ibdev->ib_dev,
305                                                           ib_port_num);
306         struct mlx5_core_dev *mdev = NULL;
307         struct mlx5_ib_multiport_info *mpi;
308         struct mlx5_ib_port *port;
309
310         if (!mlx5_core_mp_enabled(ibdev->mdev) ||
311             ll != IB_LINK_LAYER_ETHERNET) {
312                 if (native_port_num)
313                         *native_port_num = ib_port_num;
314                 return ibdev->mdev;
315         }
316
317         if (native_port_num)
318                 *native_port_num = 1;
319
320         port = &ibdev->port[ib_port_num - 1];
321         if (!port)
322                 return NULL;
323
324         spin_lock(&port->mp.mpi_lock);
325         mpi = ibdev->port[ib_port_num - 1].mp.mpi;
326         if (mpi && !mpi->unaffiliate) {
327                 mdev = mpi->mdev;
328                 /* If it's the master no need to refcount, it'll exist
329                  * as long as the ib_dev exists.
330                  */
331                 if (!mpi->is_master)
332                         mpi->mdev_refcnt++;
333         }
334         spin_unlock(&port->mp.mpi_lock);
335
336         return mdev;
337 }
338
339 void mlx5_ib_put_native_port_mdev(struct mlx5_ib_dev *ibdev, u8 port_num)
340 {
341         enum rdma_link_layer ll = mlx5_ib_port_link_layer(&ibdev->ib_dev,
342                                                           port_num);
343         struct mlx5_ib_multiport_info *mpi;
344         struct mlx5_ib_port *port;
345
346         if (!mlx5_core_mp_enabled(ibdev->mdev) || ll != IB_LINK_LAYER_ETHERNET)
347                 return;
348
349         port = &ibdev->port[port_num - 1];
350
351         spin_lock(&port->mp.mpi_lock);
352         mpi = ibdev->port[port_num - 1].mp.mpi;
353         if (mpi->is_master)
354                 goto out;
355
356         mpi->mdev_refcnt--;
357         if (mpi->unaffiliate)
358                 complete(&mpi->unref_comp);
359 out:
360         spin_unlock(&port->mp.mpi_lock);
361 }
362
363 static int translate_eth_legacy_proto_oper(u32 eth_proto_oper, u8 *active_speed,
364                                            u8 *active_width)
365 {
366         switch (eth_proto_oper) {
367         case MLX5E_PROT_MASK(MLX5E_1000BASE_CX_SGMII):
368         case MLX5E_PROT_MASK(MLX5E_1000BASE_KX):
369         case MLX5E_PROT_MASK(MLX5E_100BASE_TX):
370         case MLX5E_PROT_MASK(MLX5E_1000BASE_T):
371                 *active_width = IB_WIDTH_1X;
372                 *active_speed = IB_SPEED_SDR;
373                 break;
374         case MLX5E_PROT_MASK(MLX5E_10GBASE_T):
375         case MLX5E_PROT_MASK(MLX5E_10GBASE_CX4):
376         case MLX5E_PROT_MASK(MLX5E_10GBASE_KX4):
377         case MLX5E_PROT_MASK(MLX5E_10GBASE_KR):
378         case MLX5E_PROT_MASK(MLX5E_10GBASE_CR):
379         case MLX5E_PROT_MASK(MLX5E_10GBASE_SR):
380         case MLX5E_PROT_MASK(MLX5E_10GBASE_ER):
381                 *active_width = IB_WIDTH_1X;
382                 *active_speed = IB_SPEED_QDR;
383                 break;
384         case MLX5E_PROT_MASK(MLX5E_25GBASE_CR):
385         case MLX5E_PROT_MASK(MLX5E_25GBASE_KR):
386         case MLX5E_PROT_MASK(MLX5E_25GBASE_SR):
387                 *active_width = IB_WIDTH_1X;
388                 *active_speed = IB_SPEED_EDR;
389                 break;
390         case MLX5E_PROT_MASK(MLX5E_40GBASE_CR4):
391         case MLX5E_PROT_MASK(MLX5E_40GBASE_KR4):
392         case MLX5E_PROT_MASK(MLX5E_40GBASE_SR4):
393         case MLX5E_PROT_MASK(MLX5E_40GBASE_LR4):
394                 *active_width = IB_WIDTH_4X;
395                 *active_speed = IB_SPEED_QDR;
396                 break;
397         case MLX5E_PROT_MASK(MLX5E_50GBASE_CR2):
398         case MLX5E_PROT_MASK(MLX5E_50GBASE_KR2):
399         case MLX5E_PROT_MASK(MLX5E_50GBASE_SR2):
400                 *active_width = IB_WIDTH_1X;
401                 *active_speed = IB_SPEED_HDR;
402                 break;
403         case MLX5E_PROT_MASK(MLX5E_56GBASE_R4):
404                 *active_width = IB_WIDTH_4X;
405                 *active_speed = IB_SPEED_FDR;
406                 break;
407         case MLX5E_PROT_MASK(MLX5E_100GBASE_CR4):
408         case MLX5E_PROT_MASK(MLX5E_100GBASE_SR4):
409         case MLX5E_PROT_MASK(MLX5E_100GBASE_KR4):
410         case MLX5E_PROT_MASK(MLX5E_100GBASE_LR4):
411                 *active_width = IB_WIDTH_4X;
412                 *active_speed = IB_SPEED_EDR;
413                 break;
414         default:
415                 return -EINVAL;
416         }
417
418         return 0;
419 }
420
421 static int translate_eth_ext_proto_oper(u32 eth_proto_oper, u8 *active_speed,
422                                         u8 *active_width)
423 {
424         switch (eth_proto_oper) {
425         case MLX5E_PROT_MASK(MLX5E_SGMII_100M):
426         case MLX5E_PROT_MASK(MLX5E_1000BASE_X_SGMII):
427                 *active_width = IB_WIDTH_1X;
428                 *active_speed = IB_SPEED_SDR;
429                 break;
430         case MLX5E_PROT_MASK(MLX5E_5GBASE_R):
431                 *active_width = IB_WIDTH_1X;
432                 *active_speed = IB_SPEED_DDR;
433                 break;
434         case MLX5E_PROT_MASK(MLX5E_10GBASE_XFI_XAUI_1):
435                 *active_width = IB_WIDTH_1X;
436                 *active_speed = IB_SPEED_QDR;
437                 break;
438         case MLX5E_PROT_MASK(MLX5E_40GBASE_XLAUI_4_XLPPI_4):
439                 *active_width = IB_WIDTH_4X;
440                 *active_speed = IB_SPEED_QDR;
441                 break;
442         case MLX5E_PROT_MASK(MLX5E_25GAUI_1_25GBASE_CR_KR):
443                 *active_width = IB_WIDTH_1X;
444                 *active_speed = IB_SPEED_EDR;
445                 break;
446         case MLX5E_PROT_MASK(MLX5E_50GAUI_2_LAUI_2_50GBASE_CR2_KR2):
447                 *active_width = IB_WIDTH_2X;
448                 *active_speed = IB_SPEED_EDR;
449                 break;
450         case MLX5E_PROT_MASK(MLX5E_50GAUI_1_LAUI_1_50GBASE_CR_KR):
451                 *active_width = IB_WIDTH_1X;
452                 *active_speed = IB_SPEED_HDR;
453                 break;
454         case MLX5E_PROT_MASK(MLX5E_CAUI_4_100GBASE_CR4_KR4):
455                 *active_width = IB_WIDTH_4X;
456                 *active_speed = IB_SPEED_EDR;
457                 break;
458         case MLX5E_PROT_MASK(MLX5E_100GAUI_2_100GBASE_CR2_KR2):
459                 *active_width = IB_WIDTH_2X;
460                 *active_speed = IB_SPEED_HDR;
461                 break;
462         case MLX5E_PROT_MASK(MLX5E_200GAUI_4_200GBASE_CR4_KR4):
463                 *active_width = IB_WIDTH_4X;
464                 *active_speed = IB_SPEED_HDR;
465                 break;
466         default:
467                 return -EINVAL;
468         }
469
470         return 0;
471 }
472
473 static int translate_eth_proto_oper(u32 eth_proto_oper, u8 *active_speed,
474                                     u8 *active_width, bool ext)
475 {
476         return ext ?
477                 translate_eth_ext_proto_oper(eth_proto_oper, active_speed,
478                                              active_width) :
479                 translate_eth_legacy_proto_oper(eth_proto_oper, active_speed,
480                                                 active_width);
481 }
482
483 static int mlx5_query_port_roce(struct ib_device *device, u8 port_num,
484                                 struct ib_port_attr *props)
485 {
486         struct mlx5_ib_dev *dev = to_mdev(device);
487         u32 out[MLX5_ST_SZ_DW(ptys_reg)] = {0};
488         struct mlx5_core_dev *mdev;
489         struct net_device *ndev, *upper;
490         enum ib_mtu ndev_ib_mtu;
491         bool put_mdev = true;
492         u16 qkey_viol_cntr;
493         u32 eth_prot_oper;
494         u8 mdev_port_num;
495         bool ext;
496         int err;
497
498         mdev = mlx5_ib_get_native_port_mdev(dev, port_num, &mdev_port_num);
499         if (!mdev) {
500                 /* This means the port isn't affiliated yet. Get the
501                  * info for the master port instead.
502                  */
503                 put_mdev = false;
504                 mdev = dev->mdev;
505                 mdev_port_num = 1;
506                 port_num = 1;
507         }
508
509         /* Possible bad flows are checked before filling out props so in case
510          * of an error it will still be zeroed out.
511          * Use native port in case of reps
512          */
513         if (dev->is_rep)
514                 err = mlx5_query_port_ptys(mdev, out, sizeof(out), MLX5_PTYS_EN,
515                                            1);
516         else
517                 err = mlx5_query_port_ptys(mdev, out, sizeof(out), MLX5_PTYS_EN,
518                                            mdev_port_num);
519         if (err)
520                 goto out;
521         ext = MLX5_CAP_PCAM_FEATURE(dev->mdev, ptys_extended_ethernet);
522         eth_prot_oper = MLX5_GET_ETH_PROTO(ptys_reg, out, ext, eth_proto_oper);
523
524         props->active_width     = IB_WIDTH_4X;
525         props->active_speed     = IB_SPEED_QDR;
526
527         translate_eth_proto_oper(eth_prot_oper, &props->active_speed,
528                                  &props->active_width, ext);
529
530         props->port_cap_flags |= IB_PORT_CM_SUP;
531         props->ip_gids = true;
532
533         props->gid_tbl_len      = MLX5_CAP_ROCE(dev->mdev,
534                                                 roce_address_table_size);
535         props->max_mtu          = IB_MTU_4096;
536         props->max_msg_sz       = 1 << MLX5_CAP_GEN(dev->mdev, log_max_msg);
537         props->pkey_tbl_len     = 1;
538         props->state            = IB_PORT_DOWN;
539         props->phys_state       = IB_PORT_PHYS_STATE_DISABLED;
540
541         mlx5_query_nic_vport_qkey_viol_cntr(mdev, &qkey_viol_cntr);
542         props->qkey_viol_cntr = qkey_viol_cntr;
543
544         /* If this is a stub query for an unaffiliated port stop here */
545         if (!put_mdev)
546                 goto out;
547
548         ndev = mlx5_ib_get_netdev(device, port_num);
549         if (!ndev)
550                 goto out;
551
552         if (dev->lag_active) {
553                 rcu_read_lock();
554                 upper = netdev_master_upper_dev_get_rcu(ndev);
555                 if (upper) {
556                         dev_put(ndev);
557                         ndev = upper;
558                         dev_hold(ndev);
559                 }
560                 rcu_read_unlock();
561         }
562
563         if (netif_running(ndev) && netif_carrier_ok(ndev)) {
564                 props->state      = IB_PORT_ACTIVE;
565                 props->phys_state = IB_PORT_PHYS_STATE_LINK_UP;
566         }
567
568         ndev_ib_mtu = iboe_get_mtu(ndev->mtu);
569
570         dev_put(ndev);
571
572         props->active_mtu       = min(props->max_mtu, ndev_ib_mtu);
573 out:
574         if (put_mdev)
575                 mlx5_ib_put_native_port_mdev(dev, port_num);
576         return err;
577 }
578
579 static int set_roce_addr(struct mlx5_ib_dev *dev, u8 port_num,
580                          unsigned int index, const union ib_gid *gid,
581                          const struct ib_gid_attr *attr)
582 {
583         enum ib_gid_type gid_type = IB_GID_TYPE_IB;
584         u16 vlan_id = 0xffff;
585         u8 roce_version = 0;
586         u8 roce_l3_type = 0;
587         u8 mac[ETH_ALEN];
588         int ret;
589
590         if (gid) {
591                 gid_type = attr->gid_type;
592                 ret = rdma_read_gid_l2_fields(attr, &vlan_id, &mac[0]);
593                 if (ret)
594                         return ret;
595         }
596
597         switch (gid_type) {
598         case IB_GID_TYPE_IB:
599                 roce_version = MLX5_ROCE_VERSION_1;
600                 break;
601         case IB_GID_TYPE_ROCE_UDP_ENCAP:
602                 roce_version = MLX5_ROCE_VERSION_2;
603                 if (ipv6_addr_v4mapped((void *)gid))
604                         roce_l3_type = MLX5_ROCE_L3_TYPE_IPV4;
605                 else
606                         roce_l3_type = MLX5_ROCE_L3_TYPE_IPV6;
607                 break;
608
609         default:
610                 mlx5_ib_warn(dev, "Unexpected GID type %u\n", gid_type);
611         }
612
613         return mlx5_core_roce_gid_set(dev->mdev, index, roce_version,
614                                       roce_l3_type, gid->raw, mac,
615                                       vlan_id < VLAN_CFI_MASK, vlan_id,
616                                       port_num);
617 }
618
619 static int mlx5_ib_add_gid(const struct ib_gid_attr *attr,
620                            __always_unused void **context)
621 {
622         return set_roce_addr(to_mdev(attr->device), attr->port_num,
623                              attr->index, &attr->gid, attr);
624 }
625
626 static int mlx5_ib_del_gid(const struct ib_gid_attr *attr,
627                            __always_unused void **context)
628 {
629         return set_roce_addr(to_mdev(attr->device), attr->port_num,
630                              attr->index, NULL, NULL);
631 }
632
633 __be16 mlx5_get_roce_udp_sport(struct mlx5_ib_dev *dev,
634                                const struct ib_gid_attr *attr)
635 {
636         if (attr->gid_type != IB_GID_TYPE_ROCE_UDP_ENCAP)
637                 return 0;
638
639         return cpu_to_be16(MLX5_CAP_ROCE(dev->mdev, r_roce_min_src_udp_port));
640 }
641
642 static int mlx5_use_mad_ifc(struct mlx5_ib_dev *dev)
643 {
644         if (MLX5_CAP_GEN(dev->mdev, port_type) == MLX5_CAP_PORT_TYPE_IB)
645                 return !MLX5_CAP_GEN(dev->mdev, ib_virt);
646         return 0;
647 }
648
649 enum {
650         MLX5_VPORT_ACCESS_METHOD_MAD,
651         MLX5_VPORT_ACCESS_METHOD_HCA,
652         MLX5_VPORT_ACCESS_METHOD_NIC,
653 };
654
655 static int mlx5_get_vport_access_method(struct ib_device *ibdev)
656 {
657         if (mlx5_use_mad_ifc(to_mdev(ibdev)))
658                 return MLX5_VPORT_ACCESS_METHOD_MAD;
659
660         if (mlx5_ib_port_link_layer(ibdev, 1) ==
661             IB_LINK_LAYER_ETHERNET)
662                 return MLX5_VPORT_ACCESS_METHOD_NIC;
663
664         return MLX5_VPORT_ACCESS_METHOD_HCA;
665 }
666
667 static void get_atomic_caps(struct mlx5_ib_dev *dev,
668                             u8 atomic_size_qp,
669                             struct ib_device_attr *props)
670 {
671         u8 tmp;
672         u8 atomic_operations = MLX5_CAP_ATOMIC(dev->mdev, atomic_operations);
673         u8 atomic_req_8B_endianness_mode =
674                 MLX5_CAP_ATOMIC(dev->mdev, atomic_req_8B_endianness_mode);
675
676         /* Check if HW supports 8 bytes standard atomic operations and capable
677          * of host endianness respond
678          */
679         tmp = MLX5_ATOMIC_OPS_CMP_SWAP | MLX5_ATOMIC_OPS_FETCH_ADD;
680         if (((atomic_operations & tmp) == tmp) &&
681             (atomic_size_qp & MLX5_ATOMIC_SIZE_QP_8BYTES) &&
682             (atomic_req_8B_endianness_mode)) {
683                 props->atomic_cap = IB_ATOMIC_HCA;
684         } else {
685                 props->atomic_cap = IB_ATOMIC_NONE;
686         }
687 }
688
689 static void get_atomic_caps_qp(struct mlx5_ib_dev *dev,
690                                struct ib_device_attr *props)
691 {
692         u8 atomic_size_qp = MLX5_CAP_ATOMIC(dev->mdev, atomic_size_qp);
693
694         get_atomic_caps(dev, atomic_size_qp, props);
695 }
696
697 static int mlx5_query_system_image_guid(struct ib_device *ibdev,
698                                         __be64 *sys_image_guid)
699 {
700         struct mlx5_ib_dev *dev = to_mdev(ibdev);
701         struct mlx5_core_dev *mdev = dev->mdev;
702         u64 tmp;
703         int err;
704
705         switch (mlx5_get_vport_access_method(ibdev)) {
706         case MLX5_VPORT_ACCESS_METHOD_MAD:
707                 return mlx5_query_mad_ifc_system_image_guid(ibdev,
708                                                             sys_image_guid);
709
710         case MLX5_VPORT_ACCESS_METHOD_HCA:
711                 err = mlx5_query_hca_vport_system_image_guid(mdev, &tmp);
712                 break;
713
714         case MLX5_VPORT_ACCESS_METHOD_NIC:
715                 err = mlx5_query_nic_vport_system_image_guid(mdev, &tmp);
716                 break;
717
718         default:
719                 return -EINVAL;
720         }
721
722         if (!err)
723                 *sys_image_guid = cpu_to_be64(tmp);
724
725         return err;
726
727 }
728
729 static int mlx5_query_max_pkeys(struct ib_device *ibdev,
730                                 u16 *max_pkeys)
731 {
732         struct mlx5_ib_dev *dev = to_mdev(ibdev);
733         struct mlx5_core_dev *mdev = dev->mdev;
734
735         switch (mlx5_get_vport_access_method(ibdev)) {
736         case MLX5_VPORT_ACCESS_METHOD_MAD:
737                 return mlx5_query_mad_ifc_max_pkeys(ibdev, max_pkeys);
738
739         case MLX5_VPORT_ACCESS_METHOD_HCA:
740         case MLX5_VPORT_ACCESS_METHOD_NIC:
741                 *max_pkeys = mlx5_to_sw_pkey_sz(MLX5_CAP_GEN(mdev,
742                                                 pkey_table_size));
743                 return 0;
744
745         default:
746                 return -EINVAL;
747         }
748 }
749
750 static int mlx5_query_vendor_id(struct ib_device *ibdev,
751                                 u32 *vendor_id)
752 {
753         struct mlx5_ib_dev *dev = to_mdev(ibdev);
754
755         switch (mlx5_get_vport_access_method(ibdev)) {
756         case MLX5_VPORT_ACCESS_METHOD_MAD:
757                 return mlx5_query_mad_ifc_vendor_id(ibdev, vendor_id);
758
759         case MLX5_VPORT_ACCESS_METHOD_HCA:
760         case MLX5_VPORT_ACCESS_METHOD_NIC:
761                 return mlx5_core_query_vendor_id(dev->mdev, vendor_id);
762
763         default:
764                 return -EINVAL;
765         }
766 }
767
768 static int mlx5_query_node_guid(struct mlx5_ib_dev *dev,
769                                 __be64 *node_guid)
770 {
771         u64 tmp;
772         int err;
773
774         switch (mlx5_get_vport_access_method(&dev->ib_dev)) {
775         case MLX5_VPORT_ACCESS_METHOD_MAD:
776                 return mlx5_query_mad_ifc_node_guid(dev, node_guid);
777
778         case MLX5_VPORT_ACCESS_METHOD_HCA:
779                 err = mlx5_query_hca_vport_node_guid(dev->mdev, &tmp);
780                 break;
781
782         case MLX5_VPORT_ACCESS_METHOD_NIC:
783                 err = mlx5_query_nic_vport_node_guid(dev->mdev, &tmp);
784                 break;
785
786         default:
787                 return -EINVAL;
788         }
789
790         if (!err)
791                 *node_guid = cpu_to_be64(tmp);
792
793         return err;
794 }
795
796 struct mlx5_reg_node_desc {
797         u8      desc[IB_DEVICE_NODE_DESC_MAX];
798 };
799
800 static int mlx5_query_node_desc(struct mlx5_ib_dev *dev, char *node_desc)
801 {
802         struct mlx5_reg_node_desc in;
803
804         if (mlx5_use_mad_ifc(dev))
805                 return mlx5_query_mad_ifc_node_desc(dev, node_desc);
806
807         memset(&in, 0, sizeof(in));
808
809         return mlx5_core_access_reg(dev->mdev, &in, sizeof(in), node_desc,
810                                     sizeof(struct mlx5_reg_node_desc),
811                                     MLX5_REG_NODE_DESC, 0, 0);
812 }
813
814 static int mlx5_ib_query_device(struct ib_device *ibdev,
815                                 struct ib_device_attr *props,
816                                 struct ib_udata *uhw)
817 {
818         struct mlx5_ib_dev *dev = to_mdev(ibdev);
819         struct mlx5_core_dev *mdev = dev->mdev;
820         int err = -ENOMEM;
821         int max_sq_desc;
822         int max_rq_sg;
823         int max_sq_sg;
824         u64 min_page_size = 1ull << MLX5_CAP_GEN(mdev, log_pg_sz);
825         bool raw_support = !mlx5_core_mp_enabled(mdev);
826         struct mlx5_ib_query_device_resp resp = {};
827         size_t resp_len;
828         u64 max_tso;
829
830         resp_len = sizeof(resp.comp_mask) + sizeof(resp.response_length);
831         if (uhw->outlen && uhw->outlen < resp_len)
832                 return -EINVAL;
833
834         resp.response_length = resp_len;
835
836         if (uhw->inlen && !ib_is_udata_cleared(uhw, 0, uhw->inlen))
837                 return -EINVAL;
838
839         memset(props, 0, sizeof(*props));
840         err = mlx5_query_system_image_guid(ibdev,
841                                            &props->sys_image_guid);
842         if (err)
843                 return err;
844
845         err = mlx5_query_max_pkeys(ibdev, &props->max_pkeys);
846         if (err)
847                 return err;
848
849         err = mlx5_query_vendor_id(ibdev, &props->vendor_id);
850         if (err)
851                 return err;
852
853         props->fw_ver = ((u64)fw_rev_maj(dev->mdev) << 32) |
854                 (fw_rev_min(dev->mdev) << 16) |
855                 fw_rev_sub(dev->mdev);
856         props->device_cap_flags    = IB_DEVICE_CHANGE_PHY_PORT |
857                 IB_DEVICE_PORT_ACTIVE_EVENT             |
858                 IB_DEVICE_SYS_IMAGE_GUID                |
859                 IB_DEVICE_RC_RNR_NAK_GEN;
860
861         if (MLX5_CAP_GEN(mdev, pkv))
862                 props->device_cap_flags |= IB_DEVICE_BAD_PKEY_CNTR;
863         if (MLX5_CAP_GEN(mdev, qkv))
864                 props->device_cap_flags |= IB_DEVICE_BAD_QKEY_CNTR;
865         if (MLX5_CAP_GEN(mdev, apm))
866                 props->device_cap_flags |= IB_DEVICE_AUTO_PATH_MIG;
867         if (MLX5_CAP_GEN(mdev, xrc))
868                 props->device_cap_flags |= IB_DEVICE_XRC;
869         if (MLX5_CAP_GEN(mdev, imaicl)) {
870                 props->device_cap_flags |= IB_DEVICE_MEM_WINDOW |
871                                            IB_DEVICE_MEM_WINDOW_TYPE_2B;
872                 props->max_mw = 1 << MLX5_CAP_GEN(mdev, log_max_mkey);
873                 /* We support 'Gappy' memory registration too */
874                 props->device_cap_flags |= IB_DEVICE_SG_GAPS_REG;
875         }
876         props->device_cap_flags |= IB_DEVICE_MEM_MGT_EXTENSIONS;
877         if (MLX5_CAP_GEN(mdev, sho)) {
878                 props->device_cap_flags |= IB_DEVICE_INTEGRITY_HANDOVER;
879                 /* At this stage no support for signature handover */
880                 props->sig_prot_cap = IB_PROT_T10DIF_TYPE_1 |
881                                       IB_PROT_T10DIF_TYPE_2 |
882                                       IB_PROT_T10DIF_TYPE_3;
883                 props->sig_guard_cap = IB_GUARD_T10DIF_CRC |
884                                        IB_GUARD_T10DIF_CSUM;
885         }
886         if (MLX5_CAP_GEN(mdev, block_lb_mc))
887                 props->device_cap_flags |= IB_DEVICE_BLOCK_MULTICAST_LOOPBACK;
888
889         if (MLX5_CAP_GEN(dev->mdev, eth_net_offloads) && raw_support) {
890                 if (MLX5_CAP_ETH(mdev, csum_cap)) {
891                         /* Legacy bit to support old userspace libraries */
892                         props->device_cap_flags |= IB_DEVICE_RAW_IP_CSUM;
893                         props->raw_packet_caps |= IB_RAW_PACKET_CAP_IP_CSUM;
894                 }
895
896                 if (MLX5_CAP_ETH(dev->mdev, vlan_cap))
897                         props->raw_packet_caps |=
898                                 IB_RAW_PACKET_CAP_CVLAN_STRIPPING;
899
900                 if (field_avail(typeof(resp), tso_caps, uhw->outlen)) {
901                         max_tso = MLX5_CAP_ETH(mdev, max_lso_cap);
902                         if (max_tso) {
903                                 resp.tso_caps.max_tso = 1 << max_tso;
904                                 resp.tso_caps.supported_qpts |=
905                                         1 << IB_QPT_RAW_PACKET;
906                                 resp.response_length += sizeof(resp.tso_caps);
907                         }
908                 }
909
910                 if (field_avail(typeof(resp), rss_caps, uhw->outlen)) {
911                         resp.rss_caps.rx_hash_function =
912                                                 MLX5_RX_HASH_FUNC_TOEPLITZ;
913                         resp.rss_caps.rx_hash_fields_mask =
914                                                 MLX5_RX_HASH_SRC_IPV4 |
915                                                 MLX5_RX_HASH_DST_IPV4 |
916                                                 MLX5_RX_HASH_SRC_IPV6 |
917                                                 MLX5_RX_HASH_DST_IPV6 |
918                                                 MLX5_RX_HASH_SRC_PORT_TCP |
919                                                 MLX5_RX_HASH_DST_PORT_TCP |
920                                                 MLX5_RX_HASH_SRC_PORT_UDP |
921                                                 MLX5_RX_HASH_DST_PORT_UDP |
922                                                 MLX5_RX_HASH_INNER;
923                         if (mlx5_accel_ipsec_device_caps(dev->mdev) &
924                             MLX5_ACCEL_IPSEC_CAP_DEVICE)
925                                 resp.rss_caps.rx_hash_fields_mask |=
926                                         MLX5_RX_HASH_IPSEC_SPI;
927                         resp.response_length += sizeof(resp.rss_caps);
928                 }
929         } else {
930                 if (field_avail(typeof(resp), tso_caps, uhw->outlen))
931                         resp.response_length += sizeof(resp.tso_caps);
932                 if (field_avail(typeof(resp), rss_caps, uhw->outlen))
933                         resp.response_length += sizeof(resp.rss_caps);
934         }
935
936         if (MLX5_CAP_GEN(mdev, ipoib_basic_offloads)) {
937                 props->device_cap_flags |= IB_DEVICE_UD_IP_CSUM;
938                 props->device_cap_flags |= IB_DEVICE_UD_TSO;
939         }
940
941         if (MLX5_CAP_GEN(dev->mdev, rq_delay_drop) &&
942             MLX5_CAP_GEN(dev->mdev, general_notification_event) &&
943             raw_support)
944                 props->raw_packet_caps |= IB_RAW_PACKET_CAP_DELAY_DROP;
945
946         if (MLX5_CAP_GEN(mdev, ipoib_enhanced_offloads) &&
947             MLX5_CAP_IPOIB_ENHANCED(mdev, csum_cap))
948                 props->device_cap_flags |= IB_DEVICE_UD_IP_CSUM;
949
950         if (MLX5_CAP_GEN(dev->mdev, eth_net_offloads) &&
951             MLX5_CAP_ETH(dev->mdev, scatter_fcs) &&
952             raw_support) {
953                 /* Legacy bit to support old userspace libraries */
954                 props->device_cap_flags |= IB_DEVICE_RAW_SCATTER_FCS;
955                 props->raw_packet_caps |= IB_RAW_PACKET_CAP_SCATTER_FCS;
956         }
957
958         if (MLX5_CAP_DEV_MEM(mdev, memic)) {
959                 props->max_dm_size =
960                         MLX5_CAP_DEV_MEM(mdev, max_memic_size);
961         }
962
963         if (mlx5_get_flow_namespace(dev->mdev, MLX5_FLOW_NAMESPACE_BYPASS))
964                 props->device_cap_flags |= IB_DEVICE_MANAGED_FLOW_STEERING;
965
966         if (MLX5_CAP_GEN(mdev, end_pad))
967                 props->device_cap_flags |= IB_DEVICE_PCI_WRITE_END_PADDING;
968
969         props->vendor_part_id      = mdev->pdev->device;
970         props->hw_ver              = mdev->pdev->revision;
971
972         props->max_mr_size         = ~0ull;
973         props->page_size_cap       = ~(min_page_size - 1);
974         props->max_qp              = 1 << MLX5_CAP_GEN(mdev, log_max_qp);
975         props->max_qp_wr           = 1 << MLX5_CAP_GEN(mdev, log_max_qp_sz);
976         max_rq_sg =  MLX5_CAP_GEN(mdev, max_wqe_sz_rq) /
977                      sizeof(struct mlx5_wqe_data_seg);
978         max_sq_desc = min_t(int, MLX5_CAP_GEN(mdev, max_wqe_sz_sq), 512);
979         max_sq_sg = (max_sq_desc - sizeof(struct mlx5_wqe_ctrl_seg) -
980                      sizeof(struct mlx5_wqe_raddr_seg)) /
981                 sizeof(struct mlx5_wqe_data_seg);
982         props->max_send_sge = max_sq_sg;
983         props->max_recv_sge = max_rq_sg;
984         props->max_sge_rd          = MLX5_MAX_SGE_RD;
985         props->max_cq              = 1 << MLX5_CAP_GEN(mdev, log_max_cq);
986         props->max_cqe = (1 << MLX5_CAP_GEN(mdev, log_max_cq_sz)) - 1;
987         props->max_mr              = 1 << MLX5_CAP_GEN(mdev, log_max_mkey);
988         props->max_pd              = 1 << MLX5_CAP_GEN(mdev, log_max_pd);
989         props->max_qp_rd_atom      = 1 << MLX5_CAP_GEN(mdev, log_max_ra_req_qp);
990         props->max_qp_init_rd_atom = 1 << MLX5_CAP_GEN(mdev, log_max_ra_res_qp);
991         props->max_srq             = 1 << MLX5_CAP_GEN(mdev, log_max_srq);
992         props->max_srq_wr = (1 << MLX5_CAP_GEN(mdev, log_max_srq_sz)) - 1;
993         props->local_ca_ack_delay  = MLX5_CAP_GEN(mdev, local_ca_ack_delay);
994         props->max_res_rd_atom     = props->max_qp_rd_atom * props->max_qp;
995         props->max_srq_sge         = max_rq_sg - 1;
996         props->max_fast_reg_page_list_len =
997                 1 << MLX5_CAP_GEN(mdev, log_max_klm_list_size);
998         props->max_pi_fast_reg_page_list_len =
999                 props->max_fast_reg_page_list_len / 2;
1000         props->max_sgl_rd =
1001                 MLX5_CAP_GEN(mdev, max_sgl_for_optimized_performance);
1002         get_atomic_caps_qp(dev, props);
1003         props->masked_atomic_cap   = IB_ATOMIC_NONE;
1004         props->max_mcast_grp       = 1 << MLX5_CAP_GEN(mdev, log_max_mcg);
1005         props->max_mcast_qp_attach = MLX5_CAP_GEN(mdev, max_qp_mcg);
1006         props->max_total_mcast_qp_attach = props->max_mcast_qp_attach *
1007                                            props->max_mcast_grp;
1008         props->max_map_per_fmr = INT_MAX; /* no limit in ConnectIB */
1009         props->max_ah = INT_MAX;
1010         props->hca_core_clock = MLX5_CAP_GEN(mdev, device_frequency_khz);
1011         props->timestamp_mask = 0x7FFFFFFFFFFFFFFFULL;
1012
1013         if (IS_ENABLED(CONFIG_INFINIBAND_ON_DEMAND_PAGING)) {
1014                 if (dev->odp_caps.general_caps & IB_ODP_SUPPORT)
1015                         props->device_cap_flags |= IB_DEVICE_ON_DEMAND_PAGING;
1016                 props->odp_caps = dev->odp_caps;
1017         }
1018
1019         if (MLX5_CAP_GEN(mdev, cd))
1020                 props->device_cap_flags |= IB_DEVICE_CROSS_CHANNEL;
1021
1022         if (mlx5_core_is_vf(mdev))
1023                 props->device_cap_flags |= IB_DEVICE_VIRTUAL_FUNCTION;
1024
1025         if (mlx5_ib_port_link_layer(ibdev, 1) ==
1026             IB_LINK_LAYER_ETHERNET && raw_support) {
1027                 props->rss_caps.max_rwq_indirection_tables =
1028                         1 << MLX5_CAP_GEN(dev->mdev, log_max_rqt);
1029                 props->rss_caps.max_rwq_indirection_table_size =
1030                         1 << MLX5_CAP_GEN(dev->mdev, log_max_rqt_size);
1031                 props->rss_caps.supported_qpts = 1 << IB_QPT_RAW_PACKET;
1032                 props->max_wq_type_rq =
1033                         1 << MLX5_CAP_GEN(dev->mdev, log_max_rq);
1034         }
1035
1036         if (MLX5_CAP_GEN(mdev, tag_matching)) {
1037                 props->tm_caps.max_num_tags =
1038                         (1 << MLX5_CAP_GEN(mdev, log_tag_matching_list_sz)) - 1;
1039                 props->tm_caps.max_ops =
1040                         1 << MLX5_CAP_GEN(mdev, log_max_qp_sz);
1041                 props->tm_caps.max_sge = MLX5_TM_MAX_SGE;
1042         }
1043
1044         if (MLX5_CAP_GEN(mdev, tag_matching) &&
1045             MLX5_CAP_GEN(mdev, rndv_offload_rc)) {
1046                 props->tm_caps.flags = IB_TM_CAP_RNDV_RC;
1047                 props->tm_caps.max_rndv_hdr_size = MLX5_TM_MAX_RNDV_MSG_SIZE;
1048         }
1049
1050         if (MLX5_CAP_GEN(dev->mdev, cq_moderation)) {
1051                 props->cq_caps.max_cq_moderation_count =
1052                                                 MLX5_MAX_CQ_COUNT;
1053                 props->cq_caps.max_cq_moderation_period =
1054                                                 MLX5_MAX_CQ_PERIOD;
1055         }
1056
1057         if (field_avail(typeof(resp), cqe_comp_caps, uhw->outlen)) {
1058                 resp.response_length += sizeof(resp.cqe_comp_caps);
1059
1060                 if (MLX5_CAP_GEN(dev->mdev, cqe_compression)) {
1061                         resp.cqe_comp_caps.max_num =
1062                                 MLX5_CAP_GEN(dev->mdev,
1063                                              cqe_compression_max_num);
1064
1065                         resp.cqe_comp_caps.supported_format =
1066                                 MLX5_IB_CQE_RES_FORMAT_HASH |
1067                                 MLX5_IB_CQE_RES_FORMAT_CSUM;
1068
1069                         if (MLX5_CAP_GEN(dev->mdev, mini_cqe_resp_stride_index))
1070                                 resp.cqe_comp_caps.supported_format |=
1071                                         MLX5_IB_CQE_RES_FORMAT_CSUM_STRIDX;
1072                 }
1073         }
1074
1075         if (field_avail(typeof(resp), packet_pacing_caps, uhw->outlen) &&
1076             raw_support) {
1077                 if (MLX5_CAP_QOS(mdev, packet_pacing) &&
1078                     MLX5_CAP_GEN(mdev, qos)) {
1079                         resp.packet_pacing_caps.qp_rate_limit_max =
1080                                 MLX5_CAP_QOS(mdev, packet_pacing_max_rate);
1081                         resp.packet_pacing_caps.qp_rate_limit_min =
1082                                 MLX5_CAP_QOS(mdev, packet_pacing_min_rate);
1083                         resp.packet_pacing_caps.supported_qpts |=
1084                                 1 << IB_QPT_RAW_PACKET;
1085                         if (MLX5_CAP_QOS(mdev, packet_pacing_burst_bound) &&
1086                             MLX5_CAP_QOS(mdev, packet_pacing_typical_size))
1087                                 resp.packet_pacing_caps.cap_flags |=
1088                                         MLX5_IB_PP_SUPPORT_BURST;
1089                 }
1090                 resp.response_length += sizeof(resp.packet_pacing_caps);
1091         }
1092
1093         if (field_avail(typeof(resp), mlx5_ib_support_multi_pkt_send_wqes,
1094                         uhw->outlen)) {
1095                 if (MLX5_CAP_ETH(mdev, multi_pkt_send_wqe))
1096                         resp.mlx5_ib_support_multi_pkt_send_wqes =
1097                                 MLX5_IB_ALLOW_MPW;
1098
1099                 if (MLX5_CAP_ETH(mdev, enhanced_multi_pkt_send_wqe))
1100                         resp.mlx5_ib_support_multi_pkt_send_wqes |=
1101                                 MLX5_IB_SUPPORT_EMPW;
1102
1103                 resp.response_length +=
1104                         sizeof(resp.mlx5_ib_support_multi_pkt_send_wqes);
1105         }
1106
1107         if (field_avail(typeof(resp), flags, uhw->outlen)) {
1108                 resp.response_length += sizeof(resp.flags);
1109
1110                 if (MLX5_CAP_GEN(mdev, cqe_compression_128))
1111                         resp.flags |=
1112                                 MLX5_IB_QUERY_DEV_RESP_FLAGS_CQE_128B_COMP;
1113
1114                 if (MLX5_CAP_GEN(mdev, cqe_128_always))
1115                         resp.flags |= MLX5_IB_QUERY_DEV_RESP_FLAGS_CQE_128B_PAD;
1116                 if (MLX5_CAP_GEN(mdev, qp_packet_based))
1117                         resp.flags |=
1118                                 MLX5_IB_QUERY_DEV_RESP_PACKET_BASED_CREDIT_MODE;
1119
1120                 resp.flags |= MLX5_IB_QUERY_DEV_RESP_FLAGS_SCAT2CQE_DCT;
1121         }
1122
1123         if (field_avail(typeof(resp), sw_parsing_caps,
1124                         uhw->outlen)) {
1125                 resp.response_length += sizeof(resp.sw_parsing_caps);
1126                 if (MLX5_CAP_ETH(mdev, swp)) {
1127                         resp.sw_parsing_caps.sw_parsing_offloads |=
1128                                 MLX5_IB_SW_PARSING;
1129
1130                         if (MLX5_CAP_ETH(mdev, swp_csum))
1131                                 resp.sw_parsing_caps.sw_parsing_offloads |=
1132                                         MLX5_IB_SW_PARSING_CSUM;
1133
1134                         if (MLX5_CAP_ETH(mdev, swp_lso))
1135                                 resp.sw_parsing_caps.sw_parsing_offloads |=
1136                                         MLX5_IB_SW_PARSING_LSO;
1137
1138                         if (resp.sw_parsing_caps.sw_parsing_offloads)
1139                                 resp.sw_parsing_caps.supported_qpts =
1140                                         BIT(IB_QPT_RAW_PACKET);
1141                 }
1142         }
1143
1144         if (field_avail(typeof(resp), striding_rq_caps, uhw->outlen) &&
1145             raw_support) {
1146                 resp.response_length += sizeof(resp.striding_rq_caps);
1147                 if (MLX5_CAP_GEN(mdev, striding_rq)) {
1148                         resp.striding_rq_caps.min_single_stride_log_num_of_bytes =
1149                                 MLX5_MIN_SINGLE_STRIDE_LOG_NUM_BYTES;
1150                         resp.striding_rq_caps.max_single_stride_log_num_of_bytes =
1151                                 MLX5_MAX_SINGLE_STRIDE_LOG_NUM_BYTES;
1152                         if (MLX5_CAP_GEN(dev->mdev, ext_stride_num_range))
1153                                 resp.striding_rq_caps
1154                                         .min_single_wqe_log_num_of_strides =
1155                                         MLX5_EXT_MIN_SINGLE_WQE_LOG_NUM_STRIDES;
1156                         else
1157                                 resp.striding_rq_caps
1158                                         .min_single_wqe_log_num_of_strides =
1159                                         MLX5_MIN_SINGLE_WQE_LOG_NUM_STRIDES;
1160                         resp.striding_rq_caps.max_single_wqe_log_num_of_strides =
1161                                 MLX5_MAX_SINGLE_WQE_LOG_NUM_STRIDES;
1162                         resp.striding_rq_caps.supported_qpts =
1163                                 BIT(IB_QPT_RAW_PACKET);
1164                 }
1165         }
1166
1167         if (field_avail(typeof(resp), tunnel_offloads_caps,
1168                         uhw->outlen)) {
1169                 resp.response_length += sizeof(resp.tunnel_offloads_caps);
1170                 if (MLX5_CAP_ETH(mdev, tunnel_stateless_vxlan))
1171                         resp.tunnel_offloads_caps |=
1172                                 MLX5_IB_TUNNELED_OFFLOADS_VXLAN;
1173                 if (MLX5_CAP_ETH(mdev, tunnel_stateless_geneve_rx))
1174                         resp.tunnel_offloads_caps |=
1175                                 MLX5_IB_TUNNELED_OFFLOADS_GENEVE;
1176                 if (MLX5_CAP_ETH(mdev, tunnel_stateless_gre))
1177                         resp.tunnel_offloads_caps |=
1178                                 MLX5_IB_TUNNELED_OFFLOADS_GRE;
1179                 if (MLX5_CAP_GEN(mdev, flex_parser_protocols) &
1180                     MLX5_FLEX_PROTO_CW_MPLS_GRE)
1181                         resp.tunnel_offloads_caps |=
1182                                 MLX5_IB_TUNNELED_OFFLOADS_MPLS_GRE;
1183                 if (MLX5_CAP_GEN(mdev, flex_parser_protocols) &
1184                     MLX5_FLEX_PROTO_CW_MPLS_UDP)
1185                         resp.tunnel_offloads_caps |=
1186                                 MLX5_IB_TUNNELED_OFFLOADS_MPLS_UDP;
1187         }
1188
1189         if (uhw->outlen) {
1190                 err = ib_copy_to_udata(uhw, &resp, resp.response_length);
1191
1192                 if (err)
1193                         return err;
1194         }
1195
1196         return 0;
1197 }
1198
1199 enum mlx5_ib_width {
1200         MLX5_IB_WIDTH_1X        = 1 << 0,
1201         MLX5_IB_WIDTH_2X        = 1 << 1,
1202         MLX5_IB_WIDTH_4X        = 1 << 2,
1203         MLX5_IB_WIDTH_8X        = 1 << 3,
1204         MLX5_IB_WIDTH_12X       = 1 << 4
1205 };
1206
1207 static void translate_active_width(struct ib_device *ibdev, u8 active_width,
1208                                   u8 *ib_width)
1209 {
1210         struct mlx5_ib_dev *dev = to_mdev(ibdev);
1211
1212         if (active_width & MLX5_IB_WIDTH_1X)
1213                 *ib_width = IB_WIDTH_1X;
1214         else if (active_width & MLX5_IB_WIDTH_2X)
1215                 *ib_width = IB_WIDTH_2X;
1216         else if (active_width & MLX5_IB_WIDTH_4X)
1217                 *ib_width = IB_WIDTH_4X;
1218         else if (active_width & MLX5_IB_WIDTH_8X)
1219                 *ib_width = IB_WIDTH_8X;
1220         else if (active_width & MLX5_IB_WIDTH_12X)
1221                 *ib_width = IB_WIDTH_12X;
1222         else {
1223                 mlx5_ib_dbg(dev, "Invalid active_width %d, setting width to default value: 4x\n",
1224                             (int)active_width);
1225                 *ib_width = IB_WIDTH_4X;
1226         }
1227
1228         return;
1229 }
1230
1231 static int mlx5_mtu_to_ib_mtu(int mtu)
1232 {
1233         switch (mtu) {
1234         case 256: return 1;
1235         case 512: return 2;
1236         case 1024: return 3;
1237         case 2048: return 4;
1238         case 4096: return 5;
1239         default:
1240                 pr_warn("invalid mtu\n");
1241                 return -1;
1242         }
1243 }
1244
1245 enum ib_max_vl_num {
1246         __IB_MAX_VL_0           = 1,
1247         __IB_MAX_VL_0_1         = 2,
1248         __IB_MAX_VL_0_3         = 3,
1249         __IB_MAX_VL_0_7         = 4,
1250         __IB_MAX_VL_0_14        = 5,
1251 };
1252
1253 enum mlx5_vl_hw_cap {
1254         MLX5_VL_HW_0    = 1,
1255         MLX5_VL_HW_0_1  = 2,
1256         MLX5_VL_HW_0_2  = 3,
1257         MLX5_VL_HW_0_3  = 4,
1258         MLX5_VL_HW_0_4  = 5,
1259         MLX5_VL_HW_0_5  = 6,
1260         MLX5_VL_HW_0_6  = 7,
1261         MLX5_VL_HW_0_7  = 8,
1262         MLX5_VL_HW_0_14 = 15
1263 };
1264
1265 static int translate_max_vl_num(struct ib_device *ibdev, u8 vl_hw_cap,
1266                                 u8 *max_vl_num)
1267 {
1268         switch (vl_hw_cap) {
1269         case MLX5_VL_HW_0:
1270                 *max_vl_num = __IB_MAX_VL_0;
1271                 break;
1272         case MLX5_VL_HW_0_1:
1273                 *max_vl_num = __IB_MAX_VL_0_1;
1274                 break;
1275         case MLX5_VL_HW_0_3:
1276                 *max_vl_num = __IB_MAX_VL_0_3;
1277                 break;
1278         case MLX5_VL_HW_0_7:
1279                 *max_vl_num = __IB_MAX_VL_0_7;
1280                 break;
1281         case MLX5_VL_HW_0_14:
1282                 *max_vl_num = __IB_MAX_VL_0_14;
1283                 break;
1284
1285         default:
1286                 return -EINVAL;
1287         }
1288
1289         return 0;
1290 }
1291
1292 static int mlx5_query_hca_port(struct ib_device *ibdev, u8 port,
1293                                struct ib_port_attr *props)
1294 {
1295         struct mlx5_ib_dev *dev = to_mdev(ibdev);
1296         struct mlx5_core_dev *mdev = dev->mdev;
1297         struct mlx5_hca_vport_context *rep;
1298         u16 max_mtu;
1299         u16 oper_mtu;
1300         int err;
1301         u8 ib_link_width_oper;
1302         u8 vl_hw_cap;
1303
1304         rep = kzalloc(sizeof(*rep), GFP_KERNEL);
1305         if (!rep) {
1306                 err = -ENOMEM;
1307                 goto out;
1308         }
1309
1310         /* props being zeroed by the caller, avoid zeroing it here */
1311
1312         err = mlx5_query_hca_vport_context(mdev, 0, port, 0, rep);
1313         if (err)
1314                 goto out;
1315
1316         props->lid              = rep->lid;
1317         props->lmc              = rep->lmc;
1318         props->sm_lid           = rep->sm_lid;
1319         props->sm_sl            = rep->sm_sl;
1320         props->state            = rep->vport_state;
1321         props->phys_state       = rep->port_physical_state;
1322         props->port_cap_flags   = rep->cap_mask1;
1323         props->gid_tbl_len      = mlx5_get_gid_table_len(MLX5_CAP_GEN(mdev, gid_table_size));
1324         props->max_msg_sz       = 1 << MLX5_CAP_GEN(mdev, log_max_msg);
1325         props->pkey_tbl_len     = mlx5_to_sw_pkey_sz(MLX5_CAP_GEN(mdev, pkey_table_size));
1326         props->bad_pkey_cntr    = rep->pkey_violation_counter;
1327         props->qkey_viol_cntr   = rep->qkey_violation_counter;
1328         props->subnet_timeout   = rep->subnet_timeout;
1329         props->init_type_reply  = rep->init_type_reply;
1330
1331         if (props->port_cap_flags & IB_PORT_CAP_MASK2_SUP)
1332                 props->port_cap_flags2 = rep->cap_mask2;
1333
1334         err = mlx5_query_port_link_width_oper(mdev, &ib_link_width_oper, port);
1335         if (err)
1336                 goto out;
1337
1338         translate_active_width(ibdev, ib_link_width_oper, &props->active_width);
1339
1340         err = mlx5_query_port_ib_proto_oper(mdev, &props->active_speed, port);
1341         if (err)
1342                 goto out;
1343
1344         mlx5_query_port_max_mtu(mdev, &max_mtu, port);
1345
1346         props->max_mtu = mlx5_mtu_to_ib_mtu(max_mtu);
1347
1348         mlx5_query_port_oper_mtu(mdev, &oper_mtu, port);
1349
1350         props->active_mtu = mlx5_mtu_to_ib_mtu(oper_mtu);
1351
1352         err = mlx5_query_port_vl_hw_cap(mdev, &vl_hw_cap, port);
1353         if (err)
1354                 goto out;
1355
1356         err = translate_max_vl_num(ibdev, vl_hw_cap,
1357                                    &props->max_vl_num);
1358 out:
1359         kfree(rep);
1360         return err;
1361 }
1362
1363 int mlx5_ib_query_port(struct ib_device *ibdev, u8 port,
1364                        struct ib_port_attr *props)
1365 {
1366         unsigned int count;
1367         int ret;
1368
1369         switch (mlx5_get_vport_access_method(ibdev)) {
1370         case MLX5_VPORT_ACCESS_METHOD_MAD:
1371                 ret = mlx5_query_mad_ifc_port(ibdev, port, props);
1372                 break;
1373
1374         case MLX5_VPORT_ACCESS_METHOD_HCA:
1375                 ret = mlx5_query_hca_port(ibdev, port, props);
1376                 break;
1377
1378         case MLX5_VPORT_ACCESS_METHOD_NIC:
1379                 ret = mlx5_query_port_roce(ibdev, port, props);
1380                 break;
1381
1382         default:
1383                 ret = -EINVAL;
1384         }
1385
1386         if (!ret && props) {
1387                 struct mlx5_ib_dev *dev = to_mdev(ibdev);
1388                 struct mlx5_core_dev *mdev;
1389                 bool put_mdev = true;
1390
1391                 mdev = mlx5_ib_get_native_port_mdev(dev, port, NULL);
1392                 if (!mdev) {
1393                         /* If the port isn't affiliated yet query the master.
1394                          * The master and slave will have the same values.
1395                          */
1396                         mdev = dev->mdev;
1397                         port = 1;
1398                         put_mdev = false;
1399                 }
1400                 count = mlx5_core_reserved_gids_count(mdev);
1401                 if (put_mdev)
1402                         mlx5_ib_put_native_port_mdev(dev, port);
1403                 props->gid_tbl_len -= count;
1404         }
1405         return ret;
1406 }
1407
1408 static int mlx5_ib_rep_query_port(struct ib_device *ibdev, u8 port,
1409                                   struct ib_port_attr *props)
1410 {
1411         int ret;
1412
1413         /* Only link layer == ethernet is valid for representors
1414          * and we always use port 1
1415          */
1416         ret = mlx5_query_port_roce(ibdev, port, props);
1417         if (ret || !props)
1418                 return ret;
1419
1420         /* We don't support GIDS */
1421         props->gid_tbl_len = 0;
1422
1423         return ret;
1424 }
1425
1426 static int mlx5_ib_query_gid(struct ib_device *ibdev, u8 port, int index,
1427                              union ib_gid *gid)
1428 {
1429         struct mlx5_ib_dev *dev = to_mdev(ibdev);
1430         struct mlx5_core_dev *mdev = dev->mdev;
1431
1432         switch (mlx5_get_vport_access_method(ibdev)) {
1433         case MLX5_VPORT_ACCESS_METHOD_MAD:
1434                 return mlx5_query_mad_ifc_gids(ibdev, port, index, gid);
1435
1436         case MLX5_VPORT_ACCESS_METHOD_HCA:
1437                 return mlx5_query_hca_vport_gid(mdev, 0, port, 0, index, gid);
1438
1439         default:
1440                 return -EINVAL;
1441         }
1442
1443 }
1444
1445 static int mlx5_query_hca_nic_pkey(struct ib_device *ibdev, u8 port,
1446                                    u16 index, u16 *pkey)
1447 {
1448         struct mlx5_ib_dev *dev = to_mdev(ibdev);
1449         struct mlx5_core_dev *mdev;
1450         bool put_mdev = true;
1451         u8 mdev_port_num;
1452         int err;
1453
1454         mdev = mlx5_ib_get_native_port_mdev(dev, port, &mdev_port_num);
1455         if (!mdev) {
1456                 /* The port isn't affiliated yet, get the PKey from the master
1457                  * port. For RoCE the PKey tables will be the same.
1458                  */
1459                 put_mdev = false;
1460                 mdev = dev->mdev;
1461                 mdev_port_num = 1;
1462         }
1463
1464         err = mlx5_query_hca_vport_pkey(mdev, 0, mdev_port_num, 0,
1465                                         index, pkey);
1466         if (put_mdev)
1467                 mlx5_ib_put_native_port_mdev(dev, port);
1468
1469         return err;
1470 }
1471
1472 static int mlx5_ib_query_pkey(struct ib_device *ibdev, u8 port, u16 index,
1473                               u16 *pkey)
1474 {
1475         switch (mlx5_get_vport_access_method(ibdev)) {
1476         case MLX5_VPORT_ACCESS_METHOD_MAD:
1477                 return mlx5_query_mad_ifc_pkey(ibdev, port, index, pkey);
1478
1479         case MLX5_VPORT_ACCESS_METHOD_HCA:
1480         case MLX5_VPORT_ACCESS_METHOD_NIC:
1481                 return mlx5_query_hca_nic_pkey(ibdev, port, index, pkey);
1482         default:
1483                 return -EINVAL;
1484         }
1485 }
1486
1487 static int mlx5_ib_modify_device(struct ib_device *ibdev, int mask,
1488                                  struct ib_device_modify *props)
1489 {
1490         struct mlx5_ib_dev *dev = to_mdev(ibdev);
1491         struct mlx5_reg_node_desc in;
1492         struct mlx5_reg_node_desc out;
1493         int err;
1494
1495         if (mask & ~IB_DEVICE_MODIFY_NODE_DESC)
1496                 return -EOPNOTSUPP;
1497
1498         if (!(mask & IB_DEVICE_MODIFY_NODE_DESC))
1499                 return 0;
1500
1501         /*
1502          * If possible, pass node desc to FW, so it can generate
1503          * a 144 trap.  If cmd fails, just ignore.
1504          */
1505         memcpy(&in, props->node_desc, IB_DEVICE_NODE_DESC_MAX);
1506         err = mlx5_core_access_reg(dev->mdev, &in, sizeof(in), &out,
1507                                    sizeof(out), MLX5_REG_NODE_DESC, 0, 1);
1508         if (err)
1509                 return err;
1510
1511         memcpy(ibdev->node_desc, props->node_desc, IB_DEVICE_NODE_DESC_MAX);
1512
1513         return err;
1514 }
1515
1516 static int set_port_caps_atomic(struct mlx5_ib_dev *dev, u8 port_num, u32 mask,
1517                                 u32 value)
1518 {
1519         struct mlx5_hca_vport_context ctx = {};
1520         struct mlx5_core_dev *mdev;
1521         u8 mdev_port_num;
1522         int err;
1523
1524         mdev = mlx5_ib_get_native_port_mdev(dev, port_num, &mdev_port_num);
1525         if (!mdev)
1526                 return -ENODEV;
1527
1528         err = mlx5_query_hca_vport_context(mdev, 0, mdev_port_num, 0, &ctx);
1529         if (err)
1530                 goto out;
1531
1532         if (~ctx.cap_mask1_perm & mask) {
1533                 mlx5_ib_warn(dev, "trying to change bitmask 0x%X but change supported 0x%X\n",
1534                              mask, ctx.cap_mask1_perm);
1535                 err = -EINVAL;
1536                 goto out;
1537         }
1538
1539         ctx.cap_mask1 = value;
1540         ctx.cap_mask1_perm = mask;
1541         err = mlx5_core_modify_hca_vport_context(mdev, 0, mdev_port_num,
1542                                                  0, &ctx);
1543
1544 out:
1545         mlx5_ib_put_native_port_mdev(dev, port_num);
1546
1547         return err;
1548 }
1549
1550 static int mlx5_ib_modify_port(struct ib_device *ibdev, u8 port, int mask,
1551                                struct ib_port_modify *props)
1552 {
1553         struct mlx5_ib_dev *dev = to_mdev(ibdev);
1554         struct ib_port_attr attr;
1555         u32 tmp;
1556         int err;
1557         u32 change_mask;
1558         u32 value;
1559         bool is_ib = (mlx5_ib_port_link_layer(ibdev, port) ==
1560                       IB_LINK_LAYER_INFINIBAND);
1561
1562         /* CM layer calls ib_modify_port() regardless of the link layer. For
1563          * Ethernet ports, qkey violation and Port capabilities are meaningless.
1564          */
1565         if (!is_ib)
1566                 return 0;
1567
1568         if (MLX5_CAP_GEN(dev->mdev, ib_virt) && is_ib) {
1569                 change_mask = props->clr_port_cap_mask | props->set_port_cap_mask;
1570                 value = ~props->clr_port_cap_mask | props->set_port_cap_mask;
1571                 return set_port_caps_atomic(dev, port, change_mask, value);
1572         }
1573
1574         mutex_lock(&dev->cap_mask_mutex);
1575
1576         err = ib_query_port(ibdev, port, &attr);
1577         if (err)
1578                 goto out;
1579
1580         tmp = (attr.port_cap_flags | props->set_port_cap_mask) &
1581                 ~props->clr_port_cap_mask;
1582
1583         err = mlx5_set_port_caps(dev->mdev, port, tmp);
1584
1585 out:
1586         mutex_unlock(&dev->cap_mask_mutex);
1587         return err;
1588 }
1589
1590 static void print_lib_caps(struct mlx5_ib_dev *dev, u64 caps)
1591 {
1592         mlx5_ib_dbg(dev, "MLX5_LIB_CAP_4K_UAR = %s\n",
1593                     caps & MLX5_LIB_CAP_4K_UAR ? "y" : "n");
1594 }
1595
1596 static u16 calc_dynamic_bfregs(int uars_per_sys_page)
1597 {
1598         /* Large page with non 4k uar support might limit the dynamic size */
1599         if (uars_per_sys_page == 1  && PAGE_SIZE > 4096)
1600                 return MLX5_MIN_DYN_BFREGS;
1601
1602         return MLX5_MAX_DYN_BFREGS;
1603 }
1604
1605 static int calc_total_bfregs(struct mlx5_ib_dev *dev, bool lib_uar_4k,
1606                              struct mlx5_ib_alloc_ucontext_req_v2 *req,
1607                              struct mlx5_bfreg_info *bfregi)
1608 {
1609         int uars_per_sys_page;
1610         int bfregs_per_sys_page;
1611         int ref_bfregs = req->total_num_bfregs;
1612
1613         if (req->total_num_bfregs == 0)
1614                 return -EINVAL;
1615
1616         BUILD_BUG_ON(MLX5_MAX_BFREGS % MLX5_NON_FP_BFREGS_IN_PAGE);
1617         BUILD_BUG_ON(MLX5_MAX_BFREGS < MLX5_NON_FP_BFREGS_IN_PAGE);
1618
1619         if (req->total_num_bfregs > MLX5_MAX_BFREGS)
1620                 return -ENOMEM;
1621
1622         uars_per_sys_page = get_uars_per_sys_page(dev, lib_uar_4k);
1623         bfregs_per_sys_page = uars_per_sys_page * MLX5_NON_FP_BFREGS_PER_UAR;
1624         /* This holds the required static allocation asked by the user */
1625         req->total_num_bfregs = ALIGN(req->total_num_bfregs, bfregs_per_sys_page);
1626         if (req->num_low_latency_bfregs > req->total_num_bfregs - 1)
1627                 return -EINVAL;
1628
1629         bfregi->num_static_sys_pages = req->total_num_bfregs / bfregs_per_sys_page;
1630         bfregi->num_dyn_bfregs = ALIGN(calc_dynamic_bfregs(uars_per_sys_page), bfregs_per_sys_page);
1631         bfregi->total_num_bfregs = req->total_num_bfregs + bfregi->num_dyn_bfregs;
1632         bfregi->num_sys_pages = bfregi->total_num_bfregs / bfregs_per_sys_page;
1633
1634         mlx5_ib_dbg(dev, "uar_4k: fw support %s, lib support %s, user requested %d bfregs, allocated %d, total bfregs %d, using %d sys pages\n",
1635                     MLX5_CAP_GEN(dev->mdev, uar_4k) ? "yes" : "no",
1636                     lib_uar_4k ? "yes" : "no", ref_bfregs,
1637                     req->total_num_bfregs, bfregi->total_num_bfregs,
1638                     bfregi->num_sys_pages);
1639
1640         return 0;
1641 }
1642
1643 static int allocate_uars(struct mlx5_ib_dev *dev, struct mlx5_ib_ucontext *context)
1644 {
1645         struct mlx5_bfreg_info *bfregi;
1646         int err;
1647         int i;
1648
1649         bfregi = &context->bfregi;
1650         for (i = 0; i < bfregi->num_static_sys_pages; i++) {
1651                 err = mlx5_cmd_alloc_uar(dev->mdev, &bfregi->sys_pages[i]);
1652                 if (err)
1653                         goto error;
1654
1655                 mlx5_ib_dbg(dev, "allocated uar %d\n", bfregi->sys_pages[i]);
1656         }
1657
1658         for (i = bfregi->num_static_sys_pages; i < bfregi->num_sys_pages; i++)
1659                 bfregi->sys_pages[i] = MLX5_IB_INVALID_UAR_INDEX;
1660
1661         return 0;
1662
1663 error:
1664         for (--i; i >= 0; i--)
1665                 if (mlx5_cmd_free_uar(dev->mdev, bfregi->sys_pages[i]))
1666                         mlx5_ib_warn(dev, "failed to free uar %d\n", i);
1667
1668         return err;
1669 }
1670
1671 static void deallocate_uars(struct mlx5_ib_dev *dev,
1672                             struct mlx5_ib_ucontext *context)
1673 {
1674         struct mlx5_bfreg_info *bfregi;
1675         int i;
1676
1677         bfregi = &context->bfregi;
1678         for (i = 0; i < bfregi->num_sys_pages; i++)
1679                 if (i < bfregi->num_static_sys_pages ||
1680                     bfregi->sys_pages[i] != MLX5_IB_INVALID_UAR_INDEX)
1681                         mlx5_cmd_free_uar(dev->mdev, bfregi->sys_pages[i]);
1682 }
1683
1684 int mlx5_ib_enable_lb(struct mlx5_ib_dev *dev, bool td, bool qp)
1685 {
1686         int err = 0;
1687
1688         mutex_lock(&dev->lb.mutex);
1689         if (td)
1690                 dev->lb.user_td++;
1691         if (qp)
1692                 dev->lb.qps++;
1693
1694         if (dev->lb.user_td == 2 ||
1695             dev->lb.qps == 1) {
1696                 if (!dev->lb.enabled) {
1697                         err = mlx5_nic_vport_update_local_lb(dev->mdev, true);
1698                         dev->lb.enabled = true;
1699                 }
1700         }
1701
1702         mutex_unlock(&dev->lb.mutex);
1703
1704         return err;
1705 }
1706
1707 void mlx5_ib_disable_lb(struct mlx5_ib_dev *dev, bool td, bool qp)
1708 {
1709         mutex_lock(&dev->lb.mutex);
1710         if (td)
1711                 dev->lb.user_td--;
1712         if (qp)
1713                 dev->lb.qps--;
1714
1715         if (dev->lb.user_td == 1 &&
1716             dev->lb.qps == 0) {
1717                 if (dev->lb.enabled) {
1718                         mlx5_nic_vport_update_local_lb(dev->mdev, false);
1719                         dev->lb.enabled = false;
1720                 }
1721         }
1722
1723         mutex_unlock(&dev->lb.mutex);
1724 }
1725
1726 static int mlx5_ib_alloc_transport_domain(struct mlx5_ib_dev *dev, u32 *tdn,
1727                                           u16 uid)
1728 {
1729         int err;
1730
1731         if (!MLX5_CAP_GEN(dev->mdev, log_max_transport_domain))
1732                 return 0;
1733
1734         err = mlx5_cmd_alloc_transport_domain(dev->mdev, tdn, uid);
1735         if (err)
1736                 return err;
1737
1738         if ((MLX5_CAP_GEN(dev->mdev, port_type) != MLX5_CAP_PORT_TYPE_ETH) ||
1739             (!MLX5_CAP_GEN(dev->mdev, disable_local_lb_uc) &&
1740              !MLX5_CAP_GEN(dev->mdev, disable_local_lb_mc)))
1741                 return err;
1742
1743         return mlx5_ib_enable_lb(dev, true, false);
1744 }
1745
1746 static void mlx5_ib_dealloc_transport_domain(struct mlx5_ib_dev *dev, u32 tdn,
1747                                              u16 uid)
1748 {
1749         if (!MLX5_CAP_GEN(dev->mdev, log_max_transport_domain))
1750                 return;
1751
1752         mlx5_cmd_dealloc_transport_domain(dev->mdev, tdn, uid);
1753
1754         if ((MLX5_CAP_GEN(dev->mdev, port_type) != MLX5_CAP_PORT_TYPE_ETH) ||
1755             (!MLX5_CAP_GEN(dev->mdev, disable_local_lb_uc) &&
1756              !MLX5_CAP_GEN(dev->mdev, disable_local_lb_mc)))
1757                 return;
1758
1759         mlx5_ib_disable_lb(dev, true, false);
1760 }
1761
1762 static int mlx5_ib_alloc_ucontext(struct ib_ucontext *uctx,
1763                                   struct ib_udata *udata)
1764 {
1765         struct ib_device *ibdev = uctx->device;
1766         struct mlx5_ib_dev *dev = to_mdev(ibdev);
1767         struct mlx5_ib_alloc_ucontext_req_v2 req = {};
1768         struct mlx5_ib_alloc_ucontext_resp resp = {};
1769         struct mlx5_core_dev *mdev = dev->mdev;
1770         struct mlx5_ib_ucontext *context = to_mucontext(uctx);
1771         struct mlx5_bfreg_info *bfregi;
1772         int ver;
1773         int err;
1774         size_t min_req_v2 = offsetof(struct mlx5_ib_alloc_ucontext_req_v2,
1775                                      max_cqe_version);
1776         u32 dump_fill_mkey;
1777         bool lib_uar_4k;
1778
1779         if (!dev->ib_active)
1780                 return -EAGAIN;
1781
1782         if (udata->inlen == sizeof(struct mlx5_ib_alloc_ucontext_req))
1783                 ver = 0;
1784         else if (udata->inlen >= min_req_v2)
1785                 ver = 2;
1786         else
1787                 return -EINVAL;
1788
1789         err = ib_copy_from_udata(&req, udata, min(udata->inlen, sizeof(req)));
1790         if (err)
1791                 return err;
1792
1793         if (req.flags & ~MLX5_IB_ALLOC_UCTX_DEVX)
1794                 return -EOPNOTSUPP;
1795
1796         if (req.comp_mask || req.reserved0 || req.reserved1 || req.reserved2)
1797                 return -EOPNOTSUPP;
1798
1799         req.total_num_bfregs = ALIGN(req.total_num_bfregs,
1800                                     MLX5_NON_FP_BFREGS_PER_UAR);
1801         if (req.num_low_latency_bfregs > req.total_num_bfregs - 1)
1802                 return -EINVAL;
1803
1804         resp.qp_tab_size = 1 << MLX5_CAP_GEN(dev->mdev, log_max_qp);
1805         if (dev->wc_support)
1806                 resp.bf_reg_size = 1 << MLX5_CAP_GEN(dev->mdev, log_bf_reg_size);
1807         resp.cache_line_size = cache_line_size();
1808         resp.max_sq_desc_sz = MLX5_CAP_GEN(dev->mdev, max_wqe_sz_sq);
1809         resp.max_rq_desc_sz = MLX5_CAP_GEN(dev->mdev, max_wqe_sz_rq);
1810         resp.max_send_wqebb = 1 << MLX5_CAP_GEN(dev->mdev, log_max_qp_sz);
1811         resp.max_recv_wr = 1 << MLX5_CAP_GEN(dev->mdev, log_max_qp_sz);
1812         resp.max_srq_recv_wr = 1 << MLX5_CAP_GEN(dev->mdev, log_max_srq_sz);
1813         resp.cqe_version = min_t(__u8,
1814                                  (__u8)MLX5_CAP_GEN(dev->mdev, cqe_version),
1815                                  req.max_cqe_version);
1816         resp.log_uar_size = MLX5_CAP_GEN(dev->mdev, uar_4k) ?
1817                                 MLX5_ADAPTER_PAGE_SHIFT : PAGE_SHIFT;
1818         resp.num_uars_per_page = MLX5_CAP_GEN(dev->mdev, uar_4k) ?
1819                                         MLX5_CAP_GEN(dev->mdev, num_of_uars_per_page) : 1;
1820         resp.response_length = min(offsetof(typeof(resp), response_length) +
1821                                    sizeof(resp.response_length), udata->outlen);
1822
1823         if (mlx5_accel_ipsec_device_caps(dev->mdev) & MLX5_ACCEL_IPSEC_CAP_DEVICE) {
1824                 if (mlx5_get_flow_namespace(dev->mdev, MLX5_FLOW_NAMESPACE_EGRESS))
1825                         resp.flow_action_flags |= MLX5_USER_ALLOC_UCONTEXT_FLOW_ACTION_FLAGS_ESP_AES_GCM;
1826                 if (mlx5_accel_ipsec_device_caps(dev->mdev) & MLX5_ACCEL_IPSEC_CAP_REQUIRED_METADATA)
1827                         resp.flow_action_flags |= MLX5_USER_ALLOC_UCONTEXT_FLOW_ACTION_FLAGS_ESP_AES_GCM_REQ_METADATA;
1828                 if (MLX5_CAP_FLOWTABLE(dev->mdev, flow_table_properties_nic_receive.ft_field_support.outer_esp_spi))
1829                         resp.flow_action_flags |= MLX5_USER_ALLOC_UCONTEXT_FLOW_ACTION_FLAGS_ESP_AES_GCM_SPI_STEERING;
1830                 if (mlx5_accel_ipsec_device_caps(dev->mdev) & MLX5_ACCEL_IPSEC_CAP_TX_IV_IS_ESN)
1831                         resp.flow_action_flags |= MLX5_USER_ALLOC_UCONTEXT_FLOW_ACTION_FLAGS_ESP_AES_GCM_TX_IV_IS_ESN;
1832                 /* MLX5_USER_ALLOC_UCONTEXT_FLOW_ACTION_FLAGS_ESP_AES_GCM_FULL_OFFLOAD is currently always 0 */
1833         }
1834
1835         lib_uar_4k = req.lib_caps & MLX5_LIB_CAP_4K_UAR;
1836         bfregi = &context->bfregi;
1837
1838         /* updates req->total_num_bfregs */
1839         err = calc_total_bfregs(dev, lib_uar_4k, &req, bfregi);
1840         if (err)
1841                 goto out_ctx;
1842
1843         mutex_init(&bfregi->lock);
1844         bfregi->lib_uar_4k = lib_uar_4k;
1845         bfregi->count = kcalloc(bfregi->total_num_bfregs, sizeof(*bfregi->count),
1846                                 GFP_KERNEL);
1847         if (!bfregi->count) {
1848                 err = -ENOMEM;
1849                 goto out_ctx;
1850         }
1851
1852         bfregi->sys_pages = kcalloc(bfregi->num_sys_pages,
1853                                     sizeof(*bfregi->sys_pages),
1854                                     GFP_KERNEL);
1855         if (!bfregi->sys_pages) {
1856                 err = -ENOMEM;
1857                 goto out_count;
1858         }
1859
1860         err = allocate_uars(dev, context);
1861         if (err)
1862                 goto out_sys_pages;
1863
1864         if (req.flags & MLX5_IB_ALLOC_UCTX_DEVX) {
1865                 err = mlx5_ib_devx_create(dev, true);
1866                 if (err < 0)
1867                         goto out_uars;
1868                 context->devx_uid = err;
1869         }
1870
1871         err = mlx5_ib_alloc_transport_domain(dev, &context->tdn,
1872                                              context->devx_uid);
1873         if (err)
1874                 goto out_devx;
1875
1876         if (MLX5_CAP_GEN(dev->mdev, dump_fill_mkey)) {
1877                 err = mlx5_cmd_dump_fill_mkey(dev->mdev, &dump_fill_mkey);
1878                 if (err)
1879                         goto out_mdev;
1880         }
1881
1882         INIT_LIST_HEAD(&context->db_page_list);
1883         mutex_init(&context->db_page_mutex);
1884
1885         resp.tot_bfregs = req.total_num_bfregs;
1886         resp.num_ports = dev->num_ports;
1887
1888         if (field_avail(typeof(resp), cqe_version, udata->outlen))
1889                 resp.response_length += sizeof(resp.cqe_version);
1890
1891         if (field_avail(typeof(resp), cmds_supp_uhw, udata->outlen)) {
1892                 resp.cmds_supp_uhw |= MLX5_USER_CMDS_SUPP_UHW_QUERY_DEVICE |
1893                                       MLX5_USER_CMDS_SUPP_UHW_CREATE_AH;
1894                 resp.response_length += sizeof(resp.cmds_supp_uhw);
1895         }
1896
1897         if (field_avail(typeof(resp), eth_min_inline, udata->outlen)) {
1898                 if (mlx5_ib_port_link_layer(ibdev, 1) == IB_LINK_LAYER_ETHERNET) {
1899                         mlx5_query_min_inline(dev->mdev, &resp.eth_min_inline);
1900                         resp.eth_min_inline++;
1901                 }
1902                 resp.response_length += sizeof(resp.eth_min_inline);
1903         }
1904
1905         if (field_avail(typeof(resp), clock_info_versions, udata->outlen)) {
1906                 if (mdev->clock_info)
1907                         resp.clock_info_versions = BIT(MLX5_IB_CLOCK_INFO_V1);
1908                 resp.response_length += sizeof(resp.clock_info_versions);
1909         }
1910
1911         /*
1912          * We don't want to expose information from the PCI bar that is located
1913          * after 4096 bytes, so if the arch only supports larger pages, let's
1914          * pretend we don't support reading the HCA's core clock. This is also
1915          * forced by mmap function.
1916          */
1917         if (field_avail(typeof(resp), hca_core_clock_offset, udata->outlen)) {
1918                 if (PAGE_SIZE <= 4096) {
1919                         resp.comp_mask |=
1920                                 MLX5_IB_ALLOC_UCONTEXT_RESP_MASK_CORE_CLOCK_OFFSET;
1921                         resp.hca_core_clock_offset =
1922                                 offsetof(struct mlx5_init_seg, internal_timer_h) % PAGE_SIZE;
1923                 }
1924                 resp.response_length += sizeof(resp.hca_core_clock_offset);
1925         }
1926
1927         if (field_avail(typeof(resp), log_uar_size, udata->outlen))
1928                 resp.response_length += sizeof(resp.log_uar_size);
1929
1930         if (field_avail(typeof(resp), num_uars_per_page, udata->outlen))
1931                 resp.response_length += sizeof(resp.num_uars_per_page);
1932
1933         if (field_avail(typeof(resp), num_dyn_bfregs, udata->outlen)) {
1934                 resp.num_dyn_bfregs = bfregi->num_dyn_bfregs;
1935                 resp.response_length += sizeof(resp.num_dyn_bfregs);
1936         }
1937
1938         if (field_avail(typeof(resp), dump_fill_mkey, udata->outlen)) {
1939                 if (MLX5_CAP_GEN(dev->mdev, dump_fill_mkey)) {
1940                         resp.dump_fill_mkey = dump_fill_mkey;
1941                         resp.comp_mask |=
1942                                 MLX5_IB_ALLOC_UCONTEXT_RESP_MASK_DUMP_FILL_MKEY;
1943                 }
1944                 resp.response_length += sizeof(resp.dump_fill_mkey);
1945         }
1946
1947         err = ib_copy_to_udata(udata, &resp, resp.response_length);
1948         if (err)
1949                 goto out_mdev;
1950
1951         bfregi->ver = ver;
1952         bfregi->num_low_latency_bfregs = req.num_low_latency_bfregs;
1953         context->cqe_version = resp.cqe_version;
1954         context->lib_caps = req.lib_caps;
1955         print_lib_caps(dev, context->lib_caps);
1956
1957         if (dev->lag_active) {
1958                 u8 port = mlx5_core_native_port_num(dev->mdev) - 1;
1959
1960                 atomic_set(&context->tx_port_affinity,
1961                            atomic_add_return(
1962                                    1, &dev->port[port].roce.tx_port_affinity));
1963         }
1964
1965         return 0;
1966
1967 out_mdev:
1968         mlx5_ib_dealloc_transport_domain(dev, context->tdn, context->devx_uid);
1969 out_devx:
1970         if (req.flags & MLX5_IB_ALLOC_UCTX_DEVX)
1971                 mlx5_ib_devx_destroy(dev, context->devx_uid);
1972
1973 out_uars:
1974         deallocate_uars(dev, context);
1975
1976 out_sys_pages:
1977         kfree(bfregi->sys_pages);
1978
1979 out_count:
1980         kfree(bfregi->count);
1981
1982 out_ctx:
1983         return err;
1984 }
1985
1986 static void mlx5_ib_dealloc_ucontext(struct ib_ucontext *ibcontext)
1987 {
1988         struct mlx5_ib_ucontext *context = to_mucontext(ibcontext);
1989         struct mlx5_ib_dev *dev = to_mdev(ibcontext->device);
1990         struct mlx5_bfreg_info *bfregi;
1991
1992         bfregi = &context->bfregi;
1993         mlx5_ib_dealloc_transport_domain(dev, context->tdn, context->devx_uid);
1994
1995         if (context->devx_uid)
1996                 mlx5_ib_devx_destroy(dev, context->devx_uid);
1997
1998         deallocate_uars(dev, context);
1999         kfree(bfregi->sys_pages);
2000         kfree(bfregi->count);
2001 }
2002
2003 static phys_addr_t uar_index2pfn(struct mlx5_ib_dev *dev,
2004                                  int uar_idx)
2005 {
2006         int fw_uars_per_page;
2007
2008         fw_uars_per_page = MLX5_CAP_GEN(dev->mdev, uar_4k) ? MLX5_UARS_IN_PAGE : 1;
2009
2010         return (dev->mdev->bar_addr >> PAGE_SHIFT) + uar_idx / fw_uars_per_page;
2011 }
2012
2013 static int get_command(unsigned long offset)
2014 {
2015         return (offset >> MLX5_IB_MMAP_CMD_SHIFT) & MLX5_IB_MMAP_CMD_MASK;
2016 }
2017
2018 static int get_arg(unsigned long offset)
2019 {
2020         return offset & ((1 << MLX5_IB_MMAP_CMD_SHIFT) - 1);
2021 }
2022
2023 static int get_index(unsigned long offset)
2024 {
2025         return get_arg(offset);
2026 }
2027
2028 /* Index resides in an extra byte to enable larger values than 255 */
2029 static int get_extended_index(unsigned long offset)
2030 {
2031         return get_arg(offset) | ((offset >> 16) & 0xff) << 8;
2032 }
2033
2034
2035 static void mlx5_ib_disassociate_ucontext(struct ib_ucontext *ibcontext)
2036 {
2037 }
2038
2039 static inline char *mmap_cmd2str(enum mlx5_ib_mmap_cmd cmd)
2040 {
2041         switch (cmd) {
2042         case MLX5_IB_MMAP_WC_PAGE:
2043                 return "WC";
2044         case MLX5_IB_MMAP_REGULAR_PAGE:
2045                 return "best effort WC";
2046         case MLX5_IB_MMAP_NC_PAGE:
2047                 return "NC";
2048         case MLX5_IB_MMAP_DEVICE_MEM:
2049                 return "Device Memory";
2050         default:
2051                 return NULL;
2052         }
2053 }
2054
2055 static int mlx5_ib_mmap_clock_info_page(struct mlx5_ib_dev *dev,
2056                                         struct vm_area_struct *vma,
2057                                         struct mlx5_ib_ucontext *context)
2058 {
2059         if ((vma->vm_end - vma->vm_start != PAGE_SIZE) ||
2060             !(vma->vm_flags & VM_SHARED))
2061                 return -EINVAL;
2062
2063         if (get_index(vma->vm_pgoff) != MLX5_IB_CLOCK_INFO_V1)
2064                 return -EOPNOTSUPP;
2065
2066         if (vma->vm_flags & (VM_WRITE | VM_EXEC))
2067                 return -EPERM;
2068         vma->vm_flags &= ~VM_MAYWRITE;
2069
2070         if (!dev->mdev->clock_info)
2071                 return -EOPNOTSUPP;
2072
2073         return vm_insert_page(vma, vma->vm_start,
2074                               virt_to_page(dev->mdev->clock_info));
2075 }
2076
2077 static int uar_mmap(struct mlx5_ib_dev *dev, enum mlx5_ib_mmap_cmd cmd,
2078                     struct vm_area_struct *vma,
2079                     struct mlx5_ib_ucontext *context)
2080 {
2081         struct mlx5_bfreg_info *bfregi = &context->bfregi;
2082         int err;
2083         unsigned long idx;
2084         phys_addr_t pfn;
2085         pgprot_t prot;
2086         u32 bfreg_dyn_idx = 0;
2087         u32 uar_index;
2088         int dyn_uar = (cmd == MLX5_IB_MMAP_ALLOC_WC);
2089         int max_valid_idx = dyn_uar ? bfregi->num_sys_pages :
2090                                 bfregi->num_static_sys_pages;
2091
2092         if (vma->vm_end - vma->vm_start != PAGE_SIZE)
2093                 return -EINVAL;
2094
2095         if (dyn_uar)
2096                 idx = get_extended_index(vma->vm_pgoff) + bfregi->num_static_sys_pages;
2097         else
2098                 idx = get_index(vma->vm_pgoff);
2099
2100         if (idx >= max_valid_idx) {
2101                 mlx5_ib_warn(dev, "invalid uar index %lu, max=%d\n",
2102                              idx, max_valid_idx);
2103                 return -EINVAL;
2104         }
2105
2106         switch (cmd) {
2107         case MLX5_IB_MMAP_WC_PAGE:
2108         case MLX5_IB_MMAP_ALLOC_WC:
2109 /* Some architectures don't support WC memory */
2110 #if defined(CONFIG_X86)
2111                 if (!pat_enabled())
2112                         return -EPERM;
2113 #elif !(defined(CONFIG_PPC) || (defined(CONFIG_ARM) && defined(CONFIG_MMU)))
2114                         return -EPERM;
2115 #endif
2116         /* fall through */
2117         case MLX5_IB_MMAP_REGULAR_PAGE:
2118                 /* For MLX5_IB_MMAP_REGULAR_PAGE do the best effort to get WC */
2119                 prot = pgprot_writecombine(vma->vm_page_prot);
2120                 break;
2121         case MLX5_IB_MMAP_NC_PAGE:
2122                 prot = pgprot_noncached(vma->vm_page_prot);
2123                 break;
2124         default:
2125                 return -EINVAL;
2126         }
2127
2128         if (dyn_uar) {
2129                 int uars_per_page;
2130
2131                 uars_per_page = get_uars_per_sys_page(dev, bfregi->lib_uar_4k);
2132                 bfreg_dyn_idx = idx * (uars_per_page * MLX5_NON_FP_BFREGS_PER_UAR);
2133                 if (bfreg_dyn_idx >= bfregi->total_num_bfregs) {
2134                         mlx5_ib_warn(dev, "invalid bfreg_dyn_idx %u, max=%u\n",
2135                                      bfreg_dyn_idx, bfregi->total_num_bfregs);
2136                         return -EINVAL;
2137                 }
2138
2139                 mutex_lock(&bfregi->lock);
2140                 /* Fail if uar already allocated, first bfreg index of each
2141                  * page holds its count.
2142                  */
2143                 if (bfregi->count[bfreg_dyn_idx]) {
2144                         mlx5_ib_warn(dev, "wrong offset, idx %lu is busy, bfregn=%u\n", idx, bfreg_dyn_idx);
2145                         mutex_unlock(&bfregi->lock);
2146                         return -EINVAL;
2147                 }
2148
2149                 bfregi->count[bfreg_dyn_idx]++;
2150                 mutex_unlock(&bfregi->lock);
2151
2152                 err = mlx5_cmd_alloc_uar(dev->mdev, &uar_index);
2153                 if (err) {
2154                         mlx5_ib_warn(dev, "UAR alloc failed\n");
2155                         goto free_bfreg;
2156                 }
2157         } else {
2158                 uar_index = bfregi->sys_pages[idx];
2159         }
2160
2161         pfn = uar_index2pfn(dev, uar_index);
2162         mlx5_ib_dbg(dev, "uar idx 0x%lx, pfn %pa\n", idx, &pfn);
2163
2164         err = rdma_user_mmap_io(&context->ibucontext, vma, pfn, PAGE_SIZE,
2165                                 prot, NULL);
2166         if (err) {
2167                 mlx5_ib_err(dev,
2168                             "rdma_user_mmap_io failed with error=%d, mmap_cmd=%s\n",
2169                             err, mmap_cmd2str(cmd));
2170                 goto err;
2171         }
2172
2173         if (dyn_uar)
2174                 bfregi->sys_pages[idx] = uar_index;
2175         return 0;
2176
2177 err:
2178         if (!dyn_uar)
2179                 return err;
2180
2181         mlx5_cmd_free_uar(dev->mdev, idx);
2182
2183 free_bfreg:
2184         mlx5_ib_free_bfreg(dev, bfregi, bfreg_dyn_idx);
2185
2186         return err;
2187 }
2188
2189 static int dm_mmap(struct ib_ucontext *context, struct vm_area_struct *vma)
2190 {
2191         struct mlx5_ib_ucontext *mctx = to_mucontext(context);
2192         struct mlx5_ib_dev *dev = to_mdev(context->device);
2193         u16 page_idx = get_extended_index(vma->vm_pgoff);
2194         size_t map_size = vma->vm_end - vma->vm_start;
2195         u32 npages = map_size >> PAGE_SHIFT;
2196         phys_addr_t pfn;
2197
2198         if (find_next_zero_bit(mctx->dm_pages, page_idx + npages, page_idx) !=
2199             page_idx + npages)
2200                 return -EINVAL;
2201
2202         pfn = ((dev->mdev->bar_addr +
2203               MLX5_CAP64_DEV_MEM(dev->mdev, memic_bar_start_addr)) >>
2204               PAGE_SHIFT) +
2205               page_idx;
2206         return rdma_user_mmap_io(context, vma, pfn, map_size,
2207                                  pgprot_writecombine(vma->vm_page_prot),
2208                                  NULL);
2209 }
2210
2211 static int mlx5_ib_mmap(struct ib_ucontext *ibcontext, struct vm_area_struct *vma)
2212 {
2213         struct mlx5_ib_ucontext *context = to_mucontext(ibcontext);
2214         struct mlx5_ib_dev *dev = to_mdev(ibcontext->device);
2215         unsigned long command;
2216         phys_addr_t pfn;
2217
2218         command = get_command(vma->vm_pgoff);
2219         switch (command) {
2220         case MLX5_IB_MMAP_WC_PAGE:
2221         case MLX5_IB_MMAP_NC_PAGE:
2222         case MLX5_IB_MMAP_REGULAR_PAGE:
2223         case MLX5_IB_MMAP_ALLOC_WC:
2224                 return uar_mmap(dev, command, vma, context);
2225
2226         case MLX5_IB_MMAP_GET_CONTIGUOUS_PAGES:
2227                 return -ENOSYS;
2228
2229         case MLX5_IB_MMAP_CORE_CLOCK:
2230                 if (vma->vm_end - vma->vm_start != PAGE_SIZE)
2231                         return -EINVAL;
2232
2233                 if (vma->vm_flags & VM_WRITE)
2234                         return -EPERM;
2235                 vma->vm_flags &= ~VM_MAYWRITE;
2236
2237                 /* Don't expose to user-space information it shouldn't have */
2238                 if (PAGE_SIZE > 4096)
2239                         return -EOPNOTSUPP;
2240
2241                 pfn = (dev->mdev->iseg_base +
2242                        offsetof(struct mlx5_init_seg, internal_timer_h)) >>
2243                         PAGE_SHIFT;
2244                 return rdma_user_mmap_io(&context->ibucontext, vma, pfn,
2245                                          PAGE_SIZE,
2246                                          pgprot_noncached(vma->vm_page_prot),
2247                                          NULL);
2248         case MLX5_IB_MMAP_CLOCK_INFO:
2249                 return mlx5_ib_mmap_clock_info_page(dev, vma, context);
2250
2251         case MLX5_IB_MMAP_DEVICE_MEM:
2252                 return dm_mmap(ibcontext, vma);
2253
2254         default:
2255                 return -EINVAL;
2256         }
2257
2258         return 0;
2259 }
2260
2261 static inline int check_dm_type_support(struct mlx5_ib_dev *dev,
2262                                         u32 type)
2263 {
2264         switch (type) {
2265         case MLX5_IB_UAPI_DM_TYPE_MEMIC:
2266                 if (!MLX5_CAP_DEV_MEM(dev->mdev, memic))
2267                         return -EOPNOTSUPP;
2268                 break;
2269         case MLX5_IB_UAPI_DM_TYPE_STEERING_SW_ICM:
2270         case MLX5_IB_UAPI_DM_TYPE_HEADER_MODIFY_SW_ICM:
2271                 if (!capable(CAP_SYS_RAWIO) ||
2272                     !capable(CAP_NET_RAW))
2273                         return -EPERM;
2274
2275                 if (!(MLX5_CAP_FLOWTABLE_NIC_RX(dev->mdev, sw_owner) ||
2276                       MLX5_CAP_FLOWTABLE_NIC_TX(dev->mdev, sw_owner)))
2277                         return -EOPNOTSUPP;
2278                 break;
2279         }
2280
2281         return 0;
2282 }
2283
2284 static int handle_alloc_dm_memic(struct ib_ucontext *ctx,
2285                                  struct mlx5_ib_dm *dm,
2286                                  struct ib_dm_alloc_attr *attr,
2287                                  struct uverbs_attr_bundle *attrs)
2288 {
2289         struct mlx5_dm *dm_db = &to_mdev(ctx->device)->dm;
2290         u64 start_offset;
2291         u32 page_idx;
2292         int err;
2293
2294         dm->size = roundup(attr->length, MLX5_MEMIC_BASE_SIZE);
2295
2296         err = mlx5_cmd_alloc_memic(dm_db, &dm->dev_addr,
2297                                    dm->size, attr->alignment);
2298         if (err)
2299                 return err;
2300
2301         page_idx = (dm->dev_addr - pci_resource_start(dm_db->dev->pdev, 0) -
2302                     MLX5_CAP64_DEV_MEM(dm_db->dev, memic_bar_start_addr)) >>
2303                     PAGE_SHIFT;
2304
2305         err = uverbs_copy_to(attrs,
2306                              MLX5_IB_ATTR_ALLOC_DM_RESP_PAGE_INDEX,
2307                              &page_idx, sizeof(page_idx));
2308         if (err)
2309                 goto err_dealloc;
2310
2311         start_offset = dm->dev_addr & ~PAGE_MASK;
2312         err = uverbs_copy_to(attrs,
2313                              MLX5_IB_ATTR_ALLOC_DM_RESP_START_OFFSET,
2314                              &start_offset, sizeof(start_offset));
2315         if (err)
2316                 goto err_dealloc;
2317
2318         bitmap_set(to_mucontext(ctx)->dm_pages, page_idx,
2319                    DIV_ROUND_UP(dm->size, PAGE_SIZE));
2320
2321         return 0;
2322
2323 err_dealloc:
2324         mlx5_cmd_dealloc_memic(dm_db, dm->dev_addr, dm->size);
2325
2326         return err;
2327 }
2328
2329 static int handle_alloc_dm_sw_icm(struct ib_ucontext *ctx,
2330                                   struct mlx5_ib_dm *dm,
2331                                   struct ib_dm_alloc_attr *attr,
2332                                   struct uverbs_attr_bundle *attrs,
2333                                   int type)
2334 {
2335         struct mlx5_core_dev *dev = to_mdev(ctx->device)->mdev;
2336         u64 act_size;
2337         int err;
2338
2339         /* Allocation size must a multiple of the basic block size
2340          * and a power of 2.
2341          */
2342         act_size = round_up(attr->length, MLX5_SW_ICM_BLOCK_SIZE(dev));
2343         act_size = roundup_pow_of_two(act_size);
2344
2345         dm->size = act_size;
2346         err = mlx5_dm_sw_icm_alloc(dev, type, act_size,
2347                                    to_mucontext(ctx)->devx_uid, &dm->dev_addr,
2348                                    &dm->icm_dm.obj_id);
2349         if (err)
2350                 return err;
2351
2352         err = uverbs_copy_to(attrs,
2353                              MLX5_IB_ATTR_ALLOC_DM_RESP_START_OFFSET,
2354                              &dm->dev_addr, sizeof(dm->dev_addr));
2355         if (err)
2356                 mlx5_dm_sw_icm_dealloc(dev, type, dm->size,
2357                                        to_mucontext(ctx)->devx_uid, dm->dev_addr,
2358                                        dm->icm_dm.obj_id);
2359
2360         return err;
2361 }
2362
2363 struct ib_dm *mlx5_ib_alloc_dm(struct ib_device *ibdev,
2364                                struct ib_ucontext *context,
2365                                struct ib_dm_alloc_attr *attr,
2366                                struct uverbs_attr_bundle *attrs)
2367 {
2368         struct mlx5_ib_dm *dm;
2369         enum mlx5_ib_uapi_dm_type type;
2370         int err;
2371
2372         err = uverbs_get_const_default(&type, attrs,
2373                                        MLX5_IB_ATTR_ALLOC_DM_REQ_TYPE,
2374                                        MLX5_IB_UAPI_DM_TYPE_MEMIC);
2375         if (err)
2376                 return ERR_PTR(err);
2377
2378         mlx5_ib_dbg(to_mdev(ibdev), "alloc_dm req: dm_type=%d user_length=0x%llx log_alignment=%d\n",
2379                     type, attr->length, attr->alignment);
2380
2381         err = check_dm_type_support(to_mdev(ibdev), type);
2382         if (err)
2383                 return ERR_PTR(err);
2384
2385         dm = kzalloc(sizeof(*dm), GFP_KERNEL);
2386         if (!dm)
2387                 return ERR_PTR(-ENOMEM);
2388
2389         dm->type = type;
2390
2391         switch (type) {
2392         case MLX5_IB_UAPI_DM_TYPE_MEMIC:
2393                 err = handle_alloc_dm_memic(context, dm,
2394                                             attr,
2395                                             attrs);
2396                 break;
2397         case MLX5_IB_UAPI_DM_TYPE_STEERING_SW_ICM:
2398                 err = handle_alloc_dm_sw_icm(context, dm,
2399                                              attr, attrs,
2400                                              MLX5_SW_ICM_TYPE_STEERING);
2401                 break;
2402         case MLX5_IB_UAPI_DM_TYPE_HEADER_MODIFY_SW_ICM:
2403                 err = handle_alloc_dm_sw_icm(context, dm,
2404                                              attr, attrs,
2405                                              MLX5_SW_ICM_TYPE_HEADER_MODIFY);
2406                 break;
2407         default:
2408                 err = -EOPNOTSUPP;
2409         }
2410
2411         if (err)
2412                 goto err_free;
2413
2414         return &dm->ibdm;
2415
2416 err_free:
2417         kfree(dm);
2418         return ERR_PTR(err);
2419 }
2420
2421 int mlx5_ib_dealloc_dm(struct ib_dm *ibdm, struct uverbs_attr_bundle *attrs)
2422 {
2423         struct mlx5_ib_ucontext *ctx = rdma_udata_to_drv_context(
2424                 &attrs->driver_udata, struct mlx5_ib_ucontext, ibucontext);
2425         struct mlx5_core_dev *dev = to_mdev(ibdm->device)->mdev;
2426         struct mlx5_dm *dm_db = &to_mdev(ibdm->device)->dm;
2427         struct mlx5_ib_dm *dm = to_mdm(ibdm);
2428         u32 page_idx;
2429         int ret;
2430
2431         switch (dm->type) {
2432         case MLX5_IB_UAPI_DM_TYPE_MEMIC:
2433                 ret = mlx5_cmd_dealloc_memic(dm_db, dm->dev_addr, dm->size);
2434                 if (ret)
2435                         return ret;
2436
2437                 page_idx = (dm->dev_addr - pci_resource_start(dev->pdev, 0) -
2438                             MLX5_CAP64_DEV_MEM(dev, memic_bar_start_addr)) >>
2439                             PAGE_SHIFT;
2440                 bitmap_clear(ctx->dm_pages, page_idx,
2441                              DIV_ROUND_UP(dm->size, PAGE_SIZE));
2442                 break;
2443         case MLX5_IB_UAPI_DM_TYPE_STEERING_SW_ICM:
2444                 ret = mlx5_dm_sw_icm_dealloc(dev, MLX5_SW_ICM_TYPE_STEERING,
2445                                              dm->size, ctx->devx_uid, dm->dev_addr,
2446                                              dm->icm_dm.obj_id);
2447                 if (ret)
2448                         return ret;
2449                 break;
2450         case MLX5_IB_UAPI_DM_TYPE_HEADER_MODIFY_SW_ICM:
2451                 ret = mlx5_dm_sw_icm_dealloc(dev, MLX5_SW_ICM_TYPE_HEADER_MODIFY,
2452                                              dm->size, ctx->devx_uid, dm->dev_addr,
2453                                              dm->icm_dm.obj_id);
2454                 if (ret)
2455                         return ret;
2456                 break;
2457         default:
2458                 return -EOPNOTSUPP;
2459         }
2460
2461         kfree(dm);
2462
2463         return 0;
2464 }
2465
2466 static int mlx5_ib_alloc_pd(struct ib_pd *ibpd, struct ib_udata *udata)
2467 {
2468         struct mlx5_ib_pd *pd = to_mpd(ibpd);
2469         struct ib_device *ibdev = ibpd->device;
2470         struct mlx5_ib_alloc_pd_resp resp;
2471         int err;
2472         u32 out[MLX5_ST_SZ_DW(alloc_pd_out)] = {};
2473         u32 in[MLX5_ST_SZ_DW(alloc_pd_in)]   = {};
2474         u16 uid = 0;
2475         struct mlx5_ib_ucontext *context = rdma_udata_to_drv_context(
2476                 udata, struct mlx5_ib_ucontext, ibucontext);
2477
2478         uid = context ? context->devx_uid : 0;
2479         MLX5_SET(alloc_pd_in, in, opcode, MLX5_CMD_OP_ALLOC_PD);
2480         MLX5_SET(alloc_pd_in, in, uid, uid);
2481         err = mlx5_cmd_exec(to_mdev(ibdev)->mdev, in, sizeof(in),
2482                             out, sizeof(out));
2483         if (err)
2484                 return err;
2485
2486         pd->pdn = MLX5_GET(alloc_pd_out, out, pd);
2487         pd->uid = uid;
2488         if (udata) {
2489                 resp.pdn = pd->pdn;
2490                 if (ib_copy_to_udata(udata, &resp, sizeof(resp))) {
2491                         mlx5_cmd_dealloc_pd(to_mdev(ibdev)->mdev, pd->pdn, uid);
2492                         return -EFAULT;
2493                 }
2494         }
2495
2496         return 0;
2497 }
2498
2499 static void mlx5_ib_dealloc_pd(struct ib_pd *pd, struct ib_udata *udata)
2500 {
2501         struct mlx5_ib_dev *mdev = to_mdev(pd->device);
2502         struct mlx5_ib_pd *mpd = to_mpd(pd);
2503
2504         mlx5_cmd_dealloc_pd(mdev->mdev, mpd->pdn, mpd->uid);
2505 }
2506
2507 enum {
2508         MATCH_CRITERIA_ENABLE_OUTER_BIT,
2509         MATCH_CRITERIA_ENABLE_MISC_BIT,
2510         MATCH_CRITERIA_ENABLE_INNER_BIT,
2511         MATCH_CRITERIA_ENABLE_MISC2_BIT
2512 };
2513
2514 #define HEADER_IS_ZERO(match_criteria, headers)                            \
2515         !(memchr_inv(MLX5_ADDR_OF(fte_match_param, match_criteria, headers), \
2516                     0, MLX5_FLD_SZ_BYTES(fte_match_param, headers)))       \
2517
2518 static u8 get_match_criteria_enable(u32 *match_criteria)
2519 {
2520         u8 match_criteria_enable;
2521
2522         match_criteria_enable =
2523                 (!HEADER_IS_ZERO(match_criteria, outer_headers)) <<
2524                 MATCH_CRITERIA_ENABLE_OUTER_BIT;
2525         match_criteria_enable |=
2526                 (!HEADER_IS_ZERO(match_criteria, misc_parameters)) <<
2527                 MATCH_CRITERIA_ENABLE_MISC_BIT;
2528         match_criteria_enable |=
2529                 (!HEADER_IS_ZERO(match_criteria, inner_headers)) <<
2530                 MATCH_CRITERIA_ENABLE_INNER_BIT;
2531         match_criteria_enable |=
2532                 (!HEADER_IS_ZERO(match_criteria, misc_parameters_2)) <<
2533                 MATCH_CRITERIA_ENABLE_MISC2_BIT;
2534
2535         return match_criteria_enable;
2536 }
2537
2538 static int set_proto(void *outer_c, void *outer_v, u8 mask, u8 val)
2539 {
2540         u8 entry_mask;
2541         u8 entry_val;
2542         int err = 0;
2543
2544         if (!mask)
2545                 goto out;
2546
2547         entry_mask = MLX5_GET(fte_match_set_lyr_2_4, outer_c,
2548                               ip_protocol);
2549         entry_val = MLX5_GET(fte_match_set_lyr_2_4, outer_v,
2550                              ip_protocol);
2551         if (!entry_mask) {
2552                 MLX5_SET(fte_match_set_lyr_2_4, outer_c, ip_protocol, mask);
2553                 MLX5_SET(fte_match_set_lyr_2_4, outer_v, ip_protocol, val);
2554                 goto out;
2555         }
2556         /* Don't override existing ip protocol */
2557         if (mask != entry_mask || val != entry_val)
2558                 err = -EINVAL;
2559 out:
2560         return err;
2561 }
2562
2563 static void set_flow_label(void *misc_c, void *misc_v, u32 mask, u32 val,
2564                            bool inner)
2565 {
2566         if (inner) {
2567                 MLX5_SET(fte_match_set_misc,
2568                          misc_c, inner_ipv6_flow_label, mask);
2569                 MLX5_SET(fte_match_set_misc,
2570                          misc_v, inner_ipv6_flow_label, val);
2571         } else {
2572                 MLX5_SET(fte_match_set_misc,
2573                          misc_c, outer_ipv6_flow_label, mask);
2574                 MLX5_SET(fte_match_set_misc,
2575                          misc_v, outer_ipv6_flow_label, val);
2576         }
2577 }
2578
2579 static void set_tos(void *outer_c, void *outer_v, u8 mask, u8 val)
2580 {
2581         MLX5_SET(fte_match_set_lyr_2_4, outer_c, ip_ecn, mask);
2582         MLX5_SET(fte_match_set_lyr_2_4, outer_v, ip_ecn, val);
2583         MLX5_SET(fte_match_set_lyr_2_4, outer_c, ip_dscp, mask >> 2);
2584         MLX5_SET(fte_match_set_lyr_2_4, outer_v, ip_dscp, val >> 2);
2585 }
2586
2587 static int check_mpls_supp_fields(u32 field_support, const __be32 *set_mask)
2588 {
2589         if (MLX5_GET(fte_match_mpls, set_mask, mpls_label) &&
2590             !(field_support & MLX5_FIELD_SUPPORT_MPLS_LABEL))
2591                 return -EOPNOTSUPP;
2592
2593         if (MLX5_GET(fte_match_mpls, set_mask, mpls_exp) &&
2594             !(field_support & MLX5_FIELD_SUPPORT_MPLS_EXP))
2595                 return -EOPNOTSUPP;
2596
2597         if (MLX5_GET(fte_match_mpls, set_mask, mpls_s_bos) &&
2598             !(field_support & MLX5_FIELD_SUPPORT_MPLS_S_BOS))
2599                 return -EOPNOTSUPP;
2600
2601         if (MLX5_GET(fte_match_mpls, set_mask, mpls_ttl) &&
2602             !(field_support & MLX5_FIELD_SUPPORT_MPLS_TTL))
2603                 return -EOPNOTSUPP;
2604
2605         return 0;
2606 }
2607
2608 #define LAST_ETH_FIELD vlan_tag
2609 #define LAST_IB_FIELD sl
2610 #define LAST_IPV4_FIELD tos
2611 #define LAST_IPV6_FIELD traffic_class
2612 #define LAST_TCP_UDP_FIELD src_port
2613 #define LAST_TUNNEL_FIELD tunnel_id
2614 #define LAST_FLOW_TAG_FIELD tag_id
2615 #define LAST_DROP_FIELD size
2616 #define LAST_COUNTERS_FIELD counters
2617
2618 /* Field is the last supported field */
2619 #define FIELDS_NOT_SUPPORTED(filter, field)\
2620         memchr_inv((void *)&filter.field  +\
2621                    sizeof(filter.field), 0,\
2622                    sizeof(filter) -\
2623                    offsetof(typeof(filter), field) -\
2624                    sizeof(filter.field))
2625
2626 int parse_flow_flow_action(struct mlx5_ib_flow_action *maction,
2627                            bool is_egress,
2628                            struct mlx5_flow_act *action)
2629 {
2630
2631         switch (maction->ib_action.type) {
2632         case IB_FLOW_ACTION_ESP:
2633                 if (action->action & (MLX5_FLOW_CONTEXT_ACTION_ENCRYPT |
2634                                       MLX5_FLOW_CONTEXT_ACTION_DECRYPT))
2635                         return -EINVAL;
2636                 /* Currently only AES_GCM keymat is supported by the driver */
2637                 action->esp_id = (uintptr_t)maction->esp_aes_gcm.ctx;
2638                 action->action |= is_egress ?
2639                         MLX5_FLOW_CONTEXT_ACTION_ENCRYPT :
2640                         MLX5_FLOW_CONTEXT_ACTION_DECRYPT;
2641                 return 0;
2642         case IB_FLOW_ACTION_UNSPECIFIED:
2643                 if (maction->flow_action_raw.sub_type ==
2644                     MLX5_IB_FLOW_ACTION_MODIFY_HEADER) {
2645                         if (action->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR)
2646                                 return -EINVAL;
2647                         action->action |= MLX5_FLOW_CONTEXT_ACTION_MOD_HDR;
2648                         action->modify_hdr =
2649                                 maction->flow_action_raw.modify_hdr;
2650                         return 0;
2651                 }
2652                 if (maction->flow_action_raw.sub_type ==
2653                     MLX5_IB_FLOW_ACTION_DECAP) {
2654                         if (action->action & MLX5_FLOW_CONTEXT_ACTION_DECAP)
2655                                 return -EINVAL;
2656                         action->action |= MLX5_FLOW_CONTEXT_ACTION_DECAP;
2657                         return 0;
2658                 }
2659                 if (maction->flow_action_raw.sub_type ==
2660                     MLX5_IB_FLOW_ACTION_PACKET_REFORMAT) {
2661                         if (action->action &
2662                             MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT)
2663                                 return -EINVAL;
2664                         action->action |=
2665                                 MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT;
2666                         action->pkt_reformat =
2667                                 maction->flow_action_raw.pkt_reformat;
2668                         return 0;
2669                 }
2670                 /* fall through */
2671         default:
2672                 return -EOPNOTSUPP;
2673         }
2674 }
2675
2676 static int parse_flow_attr(struct mlx5_core_dev *mdev,
2677                            struct mlx5_flow_spec *spec,
2678                            const union ib_flow_spec *ib_spec,
2679                            const struct ib_flow_attr *flow_attr,
2680                            struct mlx5_flow_act *action, u32 prev_type)
2681 {
2682         struct mlx5_flow_context *flow_context = &spec->flow_context;
2683         u32 *match_c = spec->match_criteria;
2684         u32 *match_v = spec->match_value;
2685         void *misc_params_c = MLX5_ADDR_OF(fte_match_param, match_c,
2686                                            misc_parameters);
2687         void *misc_params_v = MLX5_ADDR_OF(fte_match_param, match_v,
2688                                            misc_parameters);
2689         void *misc_params2_c = MLX5_ADDR_OF(fte_match_param, match_c,
2690                                             misc_parameters_2);
2691         void *misc_params2_v = MLX5_ADDR_OF(fte_match_param, match_v,
2692                                             misc_parameters_2);
2693         void *headers_c;
2694         void *headers_v;
2695         int match_ipv;
2696         int ret;
2697
2698         if (ib_spec->type & IB_FLOW_SPEC_INNER) {
2699                 headers_c = MLX5_ADDR_OF(fte_match_param, match_c,
2700                                          inner_headers);
2701                 headers_v = MLX5_ADDR_OF(fte_match_param, match_v,
2702                                          inner_headers);
2703                 match_ipv = MLX5_CAP_FLOWTABLE_NIC_RX(mdev,
2704                                         ft_field_support.inner_ip_version);
2705         } else {
2706                 headers_c = MLX5_ADDR_OF(fte_match_param, match_c,
2707                                          outer_headers);
2708                 headers_v = MLX5_ADDR_OF(fte_match_param, match_v,
2709                                          outer_headers);
2710                 match_ipv = MLX5_CAP_FLOWTABLE_NIC_RX(mdev,
2711                                         ft_field_support.outer_ip_version);
2712         }
2713
2714         switch (ib_spec->type & ~IB_FLOW_SPEC_INNER) {
2715         case IB_FLOW_SPEC_ETH:
2716                 if (FIELDS_NOT_SUPPORTED(ib_spec->eth.mask, LAST_ETH_FIELD))
2717                         return -EOPNOTSUPP;
2718
2719                 ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
2720                                              dmac_47_16),
2721                                 ib_spec->eth.mask.dst_mac);
2722                 ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
2723                                              dmac_47_16),
2724                                 ib_spec->eth.val.dst_mac);
2725
2726                 ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
2727                                              smac_47_16),
2728                                 ib_spec->eth.mask.src_mac);
2729                 ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
2730                                              smac_47_16),
2731                                 ib_spec->eth.val.src_mac);
2732
2733                 if (ib_spec->eth.mask.vlan_tag) {
2734                         MLX5_SET(fte_match_set_lyr_2_4, headers_c,
2735                                  cvlan_tag, 1);
2736                         MLX5_SET(fte_match_set_lyr_2_4, headers_v,
2737                                  cvlan_tag, 1);
2738
2739                         MLX5_SET(fte_match_set_lyr_2_4, headers_c,
2740                                  first_vid, ntohs(ib_spec->eth.mask.vlan_tag));
2741                         MLX5_SET(fte_match_set_lyr_2_4, headers_v,
2742                                  first_vid, ntohs(ib_spec->eth.val.vlan_tag));
2743
2744                         MLX5_SET(fte_match_set_lyr_2_4, headers_c,
2745                                  first_cfi,
2746                                  ntohs(ib_spec->eth.mask.vlan_tag) >> 12);
2747                         MLX5_SET(fte_match_set_lyr_2_4, headers_v,
2748                                  first_cfi,
2749                                  ntohs(ib_spec->eth.val.vlan_tag) >> 12);
2750
2751                         MLX5_SET(fte_match_set_lyr_2_4, headers_c,
2752                                  first_prio,
2753                                  ntohs(ib_spec->eth.mask.vlan_tag) >> 13);
2754                         MLX5_SET(fte_match_set_lyr_2_4, headers_v,
2755                                  first_prio,
2756                                  ntohs(ib_spec->eth.val.vlan_tag) >> 13);
2757                 }
2758                 MLX5_SET(fte_match_set_lyr_2_4, headers_c,
2759                          ethertype, ntohs(ib_spec->eth.mask.ether_type));
2760                 MLX5_SET(fte_match_set_lyr_2_4, headers_v,
2761                          ethertype, ntohs(ib_spec->eth.val.ether_type));
2762                 break;
2763         case IB_FLOW_SPEC_IPV4:
2764                 if (FIELDS_NOT_SUPPORTED(ib_spec->ipv4.mask, LAST_IPV4_FIELD))
2765                         return -EOPNOTSUPP;
2766
2767                 if (match_ipv) {
2768                         MLX5_SET(fte_match_set_lyr_2_4, headers_c,
2769                                  ip_version, 0xf);
2770                         MLX5_SET(fte_match_set_lyr_2_4, headers_v,
2771                                  ip_version, MLX5_FS_IPV4_VERSION);
2772                 } else {
2773                         MLX5_SET(fte_match_set_lyr_2_4, headers_c,
2774                                  ethertype, 0xffff);
2775                         MLX5_SET(fte_match_set_lyr_2_4, headers_v,
2776                                  ethertype, ETH_P_IP);
2777                 }
2778
2779                 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
2780                                     src_ipv4_src_ipv6.ipv4_layout.ipv4),
2781                        &ib_spec->ipv4.mask.src_ip,
2782                        sizeof(ib_spec->ipv4.mask.src_ip));
2783                 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
2784                                     src_ipv4_src_ipv6.ipv4_layout.ipv4),
2785                        &ib_spec->ipv4.val.src_ip,
2786                        sizeof(ib_spec->ipv4.val.src_ip));
2787                 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
2788                                     dst_ipv4_dst_ipv6.ipv4_layout.ipv4),
2789                        &ib_spec->ipv4.mask.dst_ip,
2790                        sizeof(ib_spec->ipv4.mask.dst_ip));
2791                 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
2792                                     dst_ipv4_dst_ipv6.ipv4_layout.ipv4),
2793                        &ib_spec->ipv4.val.dst_ip,
2794                        sizeof(ib_spec->ipv4.val.dst_ip));
2795
2796                 set_tos(headers_c, headers_v,
2797                         ib_spec->ipv4.mask.tos, ib_spec->ipv4.val.tos);
2798
2799                 if (set_proto(headers_c, headers_v,
2800                               ib_spec->ipv4.mask.proto,
2801                               ib_spec->ipv4.val.proto))
2802                         return -EINVAL;
2803                 break;
2804         case IB_FLOW_SPEC_IPV6:
2805                 if (FIELDS_NOT_SUPPORTED(ib_spec->ipv6.mask, LAST_IPV6_FIELD))
2806                         return -EOPNOTSUPP;
2807
2808                 if (match_ipv) {
2809                         MLX5_SET(fte_match_set_lyr_2_4, headers_c,
2810                                  ip_version, 0xf);
2811                         MLX5_SET(fte_match_set_lyr_2_4, headers_v,
2812                                  ip_version, MLX5_FS_IPV6_VERSION);
2813                 } else {
2814                         MLX5_SET(fte_match_set_lyr_2_4, headers_c,
2815                                  ethertype, 0xffff);
2816                         MLX5_SET(fte_match_set_lyr_2_4, headers_v,
2817                                  ethertype, ETH_P_IPV6);
2818                 }
2819
2820                 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
2821                                     src_ipv4_src_ipv6.ipv6_layout.ipv6),
2822                        &ib_spec->ipv6.mask.src_ip,
2823                        sizeof(ib_spec->ipv6.mask.src_ip));
2824                 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
2825                                     src_ipv4_src_ipv6.ipv6_layout.ipv6),
2826                        &ib_spec->ipv6.val.src_ip,
2827                        sizeof(ib_spec->ipv6.val.src_ip));
2828                 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
2829                                     dst_ipv4_dst_ipv6.ipv6_layout.ipv6),
2830                        &ib_spec->ipv6.mask.dst_ip,
2831                        sizeof(ib_spec->ipv6.mask.dst_ip));
2832                 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
2833                                     dst_ipv4_dst_ipv6.ipv6_layout.ipv6),
2834                        &ib_spec->ipv6.val.dst_ip,
2835                        sizeof(ib_spec->ipv6.val.dst_ip));
2836
2837                 set_tos(headers_c, headers_v,
2838                         ib_spec->ipv6.mask.traffic_class,
2839                         ib_spec->ipv6.val.traffic_class);
2840
2841                 if (set_proto(headers_c, headers_v,
2842                               ib_spec->ipv6.mask.next_hdr,
2843                               ib_spec->ipv6.val.next_hdr))
2844                         return -EINVAL;
2845
2846                 set_flow_label(misc_params_c, misc_params_v,
2847                                ntohl(ib_spec->ipv6.mask.flow_label),
2848                                ntohl(ib_spec->ipv6.val.flow_label),
2849                                ib_spec->type & IB_FLOW_SPEC_INNER);
2850                 break;
2851         case IB_FLOW_SPEC_ESP:
2852                 if (ib_spec->esp.mask.seq)
2853                         return -EOPNOTSUPP;
2854
2855                 MLX5_SET(fte_match_set_misc, misc_params_c, outer_esp_spi,
2856                          ntohl(ib_spec->esp.mask.spi));
2857                 MLX5_SET(fte_match_set_misc, misc_params_v, outer_esp_spi,
2858                          ntohl(ib_spec->esp.val.spi));
2859                 break;
2860         case IB_FLOW_SPEC_TCP:
2861                 if (FIELDS_NOT_SUPPORTED(ib_spec->tcp_udp.mask,
2862                                          LAST_TCP_UDP_FIELD))
2863                         return -EOPNOTSUPP;
2864
2865                 if (set_proto(headers_c, headers_v, 0xff, IPPROTO_TCP))
2866                         return -EINVAL;
2867
2868                 MLX5_SET(fte_match_set_lyr_2_4, headers_c, tcp_sport,
2869                          ntohs(ib_spec->tcp_udp.mask.src_port));
2870                 MLX5_SET(fte_match_set_lyr_2_4, headers_v, tcp_sport,
2871                          ntohs(ib_spec->tcp_udp.val.src_port));
2872
2873                 MLX5_SET(fte_match_set_lyr_2_4, headers_c, tcp_dport,
2874                          ntohs(ib_spec->tcp_udp.mask.dst_port));
2875                 MLX5_SET(fte_match_set_lyr_2_4, headers_v, tcp_dport,
2876                          ntohs(ib_spec->tcp_udp.val.dst_port));
2877                 break;
2878         case IB_FLOW_SPEC_UDP:
2879                 if (FIELDS_NOT_SUPPORTED(ib_spec->tcp_udp.mask,
2880                                          LAST_TCP_UDP_FIELD))
2881                         return -EOPNOTSUPP;
2882
2883                 if (set_proto(headers_c, headers_v, 0xff, IPPROTO_UDP))
2884                         return -EINVAL;
2885
2886                 MLX5_SET(fte_match_set_lyr_2_4, headers_c, udp_sport,
2887                          ntohs(ib_spec->tcp_udp.mask.src_port));
2888                 MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_sport,
2889                          ntohs(ib_spec->tcp_udp.val.src_port));
2890
2891                 MLX5_SET(fte_match_set_lyr_2_4, headers_c, udp_dport,
2892                          ntohs(ib_spec->tcp_udp.mask.dst_port));
2893                 MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_dport,
2894                          ntohs(ib_spec->tcp_udp.val.dst_port));
2895                 break;
2896         case IB_FLOW_SPEC_GRE:
2897                 if (ib_spec->gre.mask.c_ks_res0_ver)
2898                         return -EOPNOTSUPP;
2899
2900                 if (set_proto(headers_c, headers_v, 0xff, IPPROTO_GRE))
2901                         return -EINVAL;
2902
2903                 MLX5_SET(fte_match_set_lyr_2_4, headers_c, ip_protocol,
2904                          0xff);
2905                 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol,
2906                          IPPROTO_GRE);
2907
2908                 MLX5_SET(fte_match_set_misc, misc_params_c, gre_protocol,
2909                          ntohs(ib_spec->gre.mask.protocol));
2910                 MLX5_SET(fte_match_set_misc, misc_params_v, gre_protocol,
2911                          ntohs(ib_spec->gre.val.protocol));
2912
2913                 memcpy(MLX5_ADDR_OF(fte_match_set_misc, misc_params_c,
2914                                     gre_key.nvgre.hi),
2915                        &ib_spec->gre.mask.key,
2916                        sizeof(ib_spec->gre.mask.key));
2917                 memcpy(MLX5_ADDR_OF(fte_match_set_misc, misc_params_v,
2918                                     gre_key.nvgre.hi),
2919                        &ib_spec->gre.val.key,
2920                        sizeof(ib_spec->gre.val.key));
2921                 break;
2922         case IB_FLOW_SPEC_MPLS:
2923                 switch (prev_type) {
2924                 case IB_FLOW_SPEC_UDP:
2925                         if (check_mpls_supp_fields(MLX5_CAP_FLOWTABLE_NIC_RX(mdev,
2926                                                    ft_field_support.outer_first_mpls_over_udp),
2927                                                    &ib_spec->mpls.mask.tag))
2928                                 return -EOPNOTSUPP;
2929
2930                         memcpy(MLX5_ADDR_OF(fte_match_set_misc2, misc_params2_v,
2931                                             outer_first_mpls_over_udp),
2932                                &ib_spec->mpls.val.tag,
2933                                sizeof(ib_spec->mpls.val.tag));
2934                         memcpy(MLX5_ADDR_OF(fte_match_set_misc2, misc_params2_c,
2935                                             outer_first_mpls_over_udp),
2936                                &ib_spec->mpls.mask.tag,
2937                                sizeof(ib_spec->mpls.mask.tag));
2938                         break;
2939                 case IB_FLOW_SPEC_GRE:
2940                         if (check_mpls_supp_fields(MLX5_CAP_FLOWTABLE_NIC_RX(mdev,
2941                                                    ft_field_support.outer_first_mpls_over_gre),
2942                                                    &ib_spec->mpls.mask.tag))
2943                                 return -EOPNOTSUPP;
2944
2945                         memcpy(MLX5_ADDR_OF(fte_match_set_misc2, misc_params2_v,
2946                                             outer_first_mpls_over_gre),
2947                                &ib_spec->mpls.val.tag,
2948                                sizeof(ib_spec->mpls.val.tag));
2949                         memcpy(MLX5_ADDR_OF(fte_match_set_misc2, misc_params2_c,
2950                                             outer_first_mpls_over_gre),
2951                                &ib_spec->mpls.mask.tag,
2952                                sizeof(ib_spec->mpls.mask.tag));
2953                         break;
2954                 default:
2955                         if (ib_spec->type & IB_FLOW_SPEC_INNER) {
2956                                 if (check_mpls_supp_fields(MLX5_CAP_FLOWTABLE_NIC_RX(mdev,
2957                                                            ft_field_support.inner_first_mpls),
2958                                                            &ib_spec->mpls.mask.tag))
2959                                         return -EOPNOTSUPP;
2960
2961                                 memcpy(MLX5_ADDR_OF(fte_match_set_misc2, misc_params2_v,
2962                                                     inner_first_mpls),
2963                                        &ib_spec->mpls.val.tag,
2964                                        sizeof(ib_spec->mpls.val.tag));
2965                                 memcpy(MLX5_ADDR_OF(fte_match_set_misc2, misc_params2_c,
2966                                                     inner_first_mpls),
2967                                        &ib_spec->mpls.mask.tag,
2968                                        sizeof(ib_spec->mpls.mask.tag));
2969                         } else {
2970                                 if (check_mpls_supp_fields(MLX5_CAP_FLOWTABLE_NIC_RX(mdev,
2971                                                            ft_field_support.outer_first_mpls),
2972                                                            &ib_spec->mpls.mask.tag))
2973                                         return -EOPNOTSUPP;
2974
2975                                 memcpy(MLX5_ADDR_OF(fte_match_set_misc2, misc_params2_v,
2976                                                     outer_first_mpls),
2977                                        &ib_spec->mpls.val.tag,
2978                                        sizeof(ib_spec->mpls.val.tag));
2979                                 memcpy(MLX5_ADDR_OF(fte_match_set_misc2, misc_params2_c,
2980                                                     outer_first_mpls),
2981                                        &ib_spec->mpls.mask.tag,
2982                                        sizeof(ib_spec->mpls.mask.tag));
2983                         }
2984                 }
2985                 break;
2986         case IB_FLOW_SPEC_VXLAN_TUNNEL:
2987                 if (FIELDS_NOT_SUPPORTED(ib_spec->tunnel.mask,
2988                                          LAST_TUNNEL_FIELD))
2989                         return -EOPNOTSUPP;
2990
2991                 MLX5_SET(fte_match_set_misc, misc_params_c, vxlan_vni,
2992                          ntohl(ib_spec->tunnel.mask.tunnel_id));
2993                 MLX5_SET(fte_match_set_misc, misc_params_v, vxlan_vni,
2994                          ntohl(ib_spec->tunnel.val.tunnel_id));
2995                 break;
2996         case IB_FLOW_SPEC_ACTION_TAG:
2997                 if (FIELDS_NOT_SUPPORTED(ib_spec->flow_tag,
2998                                          LAST_FLOW_TAG_FIELD))
2999                         return -EOPNOTSUPP;
3000                 if (ib_spec->flow_tag.tag_id >= BIT(24))
3001                         return -EINVAL;
3002
3003                 flow_context->flow_tag = ib_spec->flow_tag.tag_id;
3004                 flow_context->flags |= FLOW_CONTEXT_HAS_TAG;
3005                 break;
3006         case IB_FLOW_SPEC_ACTION_DROP:
3007                 if (FIELDS_NOT_SUPPORTED(ib_spec->drop,
3008                                          LAST_DROP_FIELD))
3009                         return -EOPNOTSUPP;
3010                 action->action |= MLX5_FLOW_CONTEXT_ACTION_DROP;
3011                 break;
3012         case IB_FLOW_SPEC_ACTION_HANDLE:
3013                 ret = parse_flow_flow_action(to_mflow_act(ib_spec->action.act),
3014                         flow_attr->flags & IB_FLOW_ATTR_FLAGS_EGRESS, action);
3015                 if (ret)
3016                         return ret;
3017                 break;
3018         case IB_FLOW_SPEC_ACTION_COUNT:
3019                 if (FIELDS_NOT_SUPPORTED(ib_spec->flow_count,
3020                                          LAST_COUNTERS_FIELD))
3021                         return -EOPNOTSUPP;
3022
3023                 /* for now support only one counters spec per flow */
3024                 if (action->action & MLX5_FLOW_CONTEXT_ACTION_COUNT)
3025                         return -EINVAL;
3026
3027                 action->counters = ib_spec->flow_count.counters;
3028                 action->action |= MLX5_FLOW_CONTEXT_ACTION_COUNT;
3029                 break;
3030         default:
3031                 return -EINVAL;
3032         }
3033
3034         return 0;
3035 }
3036
3037 /* If a flow could catch both multicast and unicast packets,
3038  * it won't fall into the multicast flow steering table and this rule
3039  * could steal other multicast packets.
3040  */
3041 static bool flow_is_multicast_only(const struct ib_flow_attr *ib_attr)
3042 {
3043         union ib_flow_spec *flow_spec;
3044
3045         if (ib_attr->type != IB_FLOW_ATTR_NORMAL ||
3046             ib_attr->num_of_specs < 1)
3047                 return false;
3048
3049         flow_spec = (union ib_flow_spec *)(ib_attr + 1);
3050         if (flow_spec->type == IB_FLOW_SPEC_IPV4) {
3051                 struct ib_flow_spec_ipv4 *ipv4_spec;
3052
3053                 ipv4_spec = (struct ib_flow_spec_ipv4 *)flow_spec;
3054                 if (ipv4_is_multicast(ipv4_spec->val.dst_ip))
3055                         return true;
3056
3057                 return false;
3058         }
3059
3060         if (flow_spec->type == IB_FLOW_SPEC_ETH) {
3061                 struct ib_flow_spec_eth *eth_spec;
3062
3063                 eth_spec = (struct ib_flow_spec_eth *)flow_spec;
3064                 return is_multicast_ether_addr(eth_spec->mask.dst_mac) &&
3065                        is_multicast_ether_addr(eth_spec->val.dst_mac);
3066         }
3067
3068         return false;
3069 }
3070
3071 enum valid_spec {
3072         VALID_SPEC_INVALID,
3073         VALID_SPEC_VALID,
3074         VALID_SPEC_NA,
3075 };
3076
3077 static enum valid_spec
3078 is_valid_esp_aes_gcm(struct mlx5_core_dev *mdev,
3079                      const struct mlx5_flow_spec *spec,
3080                      const struct mlx5_flow_act *flow_act,
3081                      bool egress)
3082 {
3083         const u32 *match_c = spec->match_criteria;
3084         bool is_crypto =
3085                 (flow_act->action & (MLX5_FLOW_CONTEXT_ACTION_ENCRYPT |
3086                                      MLX5_FLOW_CONTEXT_ACTION_DECRYPT));
3087         bool is_ipsec = mlx5_fs_is_ipsec_flow(match_c);
3088         bool is_drop = flow_act->action & MLX5_FLOW_CONTEXT_ACTION_DROP;
3089
3090         /*
3091          * Currently only crypto is supported in egress, when regular egress
3092          * rules would be supported, always return VALID_SPEC_NA.
3093          */
3094         if (!is_crypto)
3095                 return VALID_SPEC_NA;
3096
3097         return is_crypto && is_ipsec &&
3098                 (!egress || (!is_drop &&
3099                              !(spec->flow_context.flags & FLOW_CONTEXT_HAS_TAG))) ?
3100                 VALID_SPEC_VALID : VALID_SPEC_INVALID;
3101 }
3102
3103 static bool is_valid_spec(struct mlx5_core_dev *mdev,
3104                           const struct mlx5_flow_spec *spec,
3105                           const struct mlx5_flow_act *flow_act,
3106                           bool egress)
3107 {
3108         /* We curretly only support ipsec egress flow */
3109         return is_valid_esp_aes_gcm(mdev, spec, flow_act, egress) != VALID_SPEC_INVALID;
3110 }
3111
3112 static bool is_valid_ethertype(struct mlx5_core_dev *mdev,
3113                                const struct ib_flow_attr *flow_attr,
3114                                bool check_inner)
3115 {
3116         union ib_flow_spec *ib_spec = (union ib_flow_spec *)(flow_attr + 1);
3117         int match_ipv = check_inner ?
3118                         MLX5_CAP_FLOWTABLE_NIC_RX(mdev,
3119                                         ft_field_support.inner_ip_version) :
3120                         MLX5_CAP_FLOWTABLE_NIC_RX(mdev,
3121                                         ft_field_support.outer_ip_version);
3122         int inner_bit = check_inner ? IB_FLOW_SPEC_INNER : 0;
3123         bool ipv4_spec_valid, ipv6_spec_valid;
3124         unsigned int ip_spec_type = 0;
3125         bool has_ethertype = false;
3126         unsigned int spec_index;
3127         bool mask_valid = true;
3128         u16 eth_type = 0;
3129         bool type_valid;
3130
3131         /* Validate that ethertype is correct */
3132         for (spec_index = 0; spec_index < flow_attr->num_of_specs; spec_index++) {
3133                 if ((ib_spec->type == (IB_FLOW_SPEC_ETH | inner_bit)) &&
3134                     ib_spec->eth.mask.ether_type) {
3135                         mask_valid = (ib_spec->eth.mask.ether_type ==
3136                                       htons(0xffff));
3137                         has_ethertype = true;
3138                         eth_type = ntohs(ib_spec->eth.val.ether_type);
3139                 } else if ((ib_spec->type == (IB_FLOW_SPEC_IPV4 | inner_bit)) ||
3140                            (ib_spec->type == (IB_FLOW_SPEC_IPV6 | inner_bit))) {
3141                         ip_spec_type = ib_spec->type;
3142                 }
3143                 ib_spec = (void *)ib_spec + ib_spec->size;
3144         }
3145
3146         type_valid = (!has_ethertype) || (!ip_spec_type);
3147         if (!type_valid && mask_valid) {
3148                 ipv4_spec_valid = (eth_type == ETH_P_IP) &&
3149                         (ip_spec_type == (IB_FLOW_SPEC_IPV4 | inner_bit));
3150                 ipv6_spec_valid = (eth_type == ETH_P_IPV6) &&
3151                         (ip_spec_type == (IB_FLOW_SPEC_IPV6 | inner_bit));
3152
3153                 type_valid = (ipv4_spec_valid) || (ipv6_spec_valid) ||
3154                              (((eth_type == ETH_P_MPLS_UC) ||
3155                                (eth_type == ETH_P_MPLS_MC)) && match_ipv);
3156         }
3157
3158         return type_valid;
3159 }
3160
3161 static bool is_valid_attr(struct mlx5_core_dev *mdev,
3162                           const struct ib_flow_attr *flow_attr)
3163 {
3164         return is_valid_ethertype(mdev, flow_attr, false) &&
3165                is_valid_ethertype(mdev, flow_attr, true);
3166 }
3167
3168 static void put_flow_table(struct mlx5_ib_dev *dev,
3169                            struct mlx5_ib_flow_prio *prio, bool ft_added)
3170 {
3171         prio->refcount -= !!ft_added;
3172         if (!prio->refcount) {
3173                 mlx5_destroy_flow_table(prio->flow_table);
3174                 prio->flow_table = NULL;
3175         }
3176 }
3177
3178 static void counters_clear_description(struct ib_counters *counters)
3179 {
3180         struct mlx5_ib_mcounters *mcounters = to_mcounters(counters);
3181
3182         mutex_lock(&mcounters->mcntrs_mutex);
3183         kfree(mcounters->counters_data);
3184         mcounters->counters_data = NULL;
3185         mcounters->cntrs_max_index = 0;
3186         mutex_unlock(&mcounters->mcntrs_mutex);
3187 }
3188
3189 static int mlx5_ib_destroy_flow(struct ib_flow *flow_id)
3190 {
3191         struct mlx5_ib_flow_handler *handler = container_of(flow_id,
3192                                                           struct mlx5_ib_flow_handler,
3193                                                           ibflow);
3194         struct mlx5_ib_flow_handler *iter, *tmp;
3195         struct mlx5_ib_dev *dev = handler->dev;
3196
3197         mutex_lock(&dev->flow_db->lock);
3198
3199         list_for_each_entry_safe(iter, tmp, &handler->list, list) {
3200                 mlx5_del_flow_rules(iter->rule);
3201                 put_flow_table(dev, iter->prio, true);
3202                 list_del(&iter->list);
3203                 kfree(iter);
3204         }
3205
3206         mlx5_del_flow_rules(handler->rule);
3207         put_flow_table(dev, handler->prio, true);
3208         if (handler->ibcounters &&
3209             atomic_read(&handler->ibcounters->usecnt) == 1)
3210                 counters_clear_description(handler->ibcounters);
3211
3212         mutex_unlock(&dev->flow_db->lock);
3213         if (handler->flow_matcher)
3214                 atomic_dec(&handler->flow_matcher->usecnt);
3215         kfree(handler);
3216
3217         return 0;
3218 }
3219
3220 static int ib_prio_to_core_prio(unsigned int priority, bool dont_trap)
3221 {
3222         priority *= 2;
3223         if (!dont_trap)
3224                 priority++;
3225         return priority;
3226 }
3227
3228 enum flow_table_type {
3229         MLX5_IB_FT_RX,
3230         MLX5_IB_FT_TX
3231 };
3232
3233 #define MLX5_FS_MAX_TYPES        6
3234 #define MLX5_FS_MAX_ENTRIES      BIT(16)
3235
3236 static struct mlx5_ib_flow_prio *_get_prio(struct mlx5_flow_namespace *ns,
3237                                            struct mlx5_ib_flow_prio *prio,
3238                                            int priority,
3239                                            int num_entries, int num_groups,
3240                                            u32 flags)
3241 {
3242         struct mlx5_flow_table *ft;
3243
3244         ft = mlx5_create_auto_grouped_flow_table(ns, priority,
3245                                                  num_entries,
3246                                                  num_groups,
3247                                                  0, flags);
3248         if (IS_ERR(ft))
3249                 return ERR_CAST(ft);
3250
3251         prio->flow_table = ft;
3252         prio->refcount = 0;
3253         return prio;
3254 }
3255
3256 static struct mlx5_ib_flow_prio *get_flow_table(struct mlx5_ib_dev *dev,
3257                                                 struct ib_flow_attr *flow_attr,
3258                                                 enum flow_table_type ft_type)
3259 {
3260         bool dont_trap = flow_attr->flags & IB_FLOW_ATTR_FLAGS_DONT_TRAP;
3261         struct mlx5_flow_namespace *ns = NULL;
3262         struct mlx5_ib_flow_prio *prio;
3263         struct mlx5_flow_table *ft;
3264         int max_table_size;
3265         int num_entries;
3266         int num_groups;
3267         bool esw_encap;
3268         u32 flags = 0;
3269         int priority;
3270
3271         max_table_size = BIT(MLX5_CAP_FLOWTABLE_NIC_RX(dev->mdev,
3272                                                        log_max_ft_size));
3273         esw_encap = mlx5_eswitch_get_encap_mode(dev->mdev) !=
3274                 DEVLINK_ESWITCH_ENCAP_MODE_NONE;
3275         if (flow_attr->type == IB_FLOW_ATTR_NORMAL) {
3276                 enum mlx5_flow_namespace_type fn_type;
3277
3278                 if (flow_is_multicast_only(flow_attr) &&
3279                     !dont_trap)
3280                         priority = MLX5_IB_FLOW_MCAST_PRIO;
3281                 else
3282                         priority = ib_prio_to_core_prio(flow_attr->priority,
3283                                                         dont_trap);
3284                 if (ft_type == MLX5_IB_FT_RX) {
3285                         fn_type = MLX5_FLOW_NAMESPACE_BYPASS;
3286                         prio = &dev->flow_db->prios[priority];
3287                         if (!dev->is_rep && !esw_encap &&
3288                             MLX5_CAP_FLOWTABLE_NIC_RX(dev->mdev, decap))
3289                                 flags |= MLX5_FLOW_TABLE_TUNNEL_EN_DECAP;
3290                         if (!dev->is_rep && !esw_encap &&
3291                             MLX5_CAP_FLOWTABLE_NIC_RX(dev->mdev,
3292                                         reformat_l3_tunnel_to_l2))
3293                                 flags |= MLX5_FLOW_TABLE_TUNNEL_EN_REFORMAT;
3294                 } else {
3295                         max_table_size =
3296                                 BIT(MLX5_CAP_FLOWTABLE_NIC_TX(dev->mdev,
3297                                                               log_max_ft_size));
3298                         fn_type = MLX5_FLOW_NAMESPACE_EGRESS;
3299                         prio = &dev->flow_db->egress_prios[priority];
3300                         if (!dev->is_rep && !esw_encap &&
3301                             MLX5_CAP_FLOWTABLE_NIC_TX(dev->mdev, reformat))
3302                                 flags |= MLX5_FLOW_TABLE_TUNNEL_EN_REFORMAT;
3303                 }
3304                 ns = mlx5_get_flow_namespace(dev->mdev, fn_type);
3305                 num_entries = MLX5_FS_MAX_ENTRIES;
3306                 num_groups = MLX5_FS_MAX_TYPES;
3307         } else if (flow_attr->type == IB_FLOW_ATTR_ALL_DEFAULT ||
3308                    flow_attr->type == IB_FLOW_ATTR_MC_DEFAULT) {
3309                 ns = mlx5_get_flow_namespace(dev->mdev,
3310                                              MLX5_FLOW_NAMESPACE_LEFTOVERS);
3311                 build_leftovers_ft_param(&priority,
3312                                          &num_entries,
3313                                          &num_groups);
3314                 prio = &dev->flow_db->prios[MLX5_IB_FLOW_LEFTOVERS_PRIO];
3315         } else if (flow_attr->type == IB_FLOW_ATTR_SNIFFER) {
3316                 if (!MLX5_CAP_FLOWTABLE(dev->mdev,
3317                                         allow_sniffer_and_nic_rx_shared_tir))
3318                         return ERR_PTR(-ENOTSUPP);
3319
3320                 ns = mlx5_get_flow_namespace(dev->mdev, ft_type == MLX5_IB_FT_RX ?
3321                                              MLX5_FLOW_NAMESPACE_SNIFFER_RX :
3322                                              MLX5_FLOW_NAMESPACE_SNIFFER_TX);
3323
3324                 prio = &dev->flow_db->sniffer[ft_type];
3325                 priority = 0;
3326                 num_entries = 1;
3327                 num_groups = 1;
3328         }
3329
3330         if (!ns)
3331                 return ERR_PTR(-ENOTSUPP);
3332
3333         max_table_size = min_t(int, num_entries, max_table_size);
3334
3335         ft = prio->flow_table;
3336         if (!ft)
3337                 return _get_prio(ns, prio, priority, max_table_size, num_groups,
3338                                  flags);
3339
3340         return prio;
3341 }
3342
3343 static void set_underlay_qp(struct mlx5_ib_dev *dev,
3344                             struct mlx5_flow_spec *spec,
3345                             u32 underlay_qpn)
3346 {
3347         void *misc_params_c = MLX5_ADDR_OF(fte_match_param,
3348                                            spec->match_criteria,
3349                                            misc_parameters);
3350         void *misc_params_v = MLX5_ADDR_OF(fte_match_param, spec->match_value,
3351                                            misc_parameters);
3352
3353         if (underlay_qpn &&
3354             MLX5_CAP_FLOWTABLE_NIC_RX(dev->mdev,
3355                                       ft_field_support.bth_dst_qp)) {
3356                 MLX5_SET(fte_match_set_misc,
3357                          misc_params_v, bth_dst_qp, underlay_qpn);
3358                 MLX5_SET(fte_match_set_misc,
3359                          misc_params_c, bth_dst_qp, 0xffffff);
3360         }
3361 }
3362
3363 static int read_flow_counters(struct ib_device *ibdev,
3364                               struct mlx5_read_counters_attr *read_attr)
3365 {
3366         struct mlx5_fc *fc = read_attr->hw_cntrs_hndl;
3367         struct mlx5_ib_dev *dev = to_mdev(ibdev);
3368
3369         return mlx5_fc_query(dev->mdev, fc,
3370                              &read_attr->out[IB_COUNTER_PACKETS],
3371                              &read_attr->out[IB_COUNTER_BYTES]);
3372 }
3373
3374 /* flow counters currently expose two counters packets and bytes */
3375 #define FLOW_COUNTERS_NUM 2
3376 static int counters_set_description(struct ib_counters *counters,
3377                                     enum mlx5_ib_counters_type counters_type,
3378                                     struct mlx5_ib_flow_counters_desc *desc_data,
3379                                     u32 ncounters)
3380 {
3381         struct mlx5_ib_mcounters *mcounters = to_mcounters(counters);
3382         u32 cntrs_max_index = 0;
3383         int i;
3384
3385         if (counters_type != MLX5_IB_COUNTERS_FLOW)
3386                 return -EINVAL;
3387
3388         /* init the fields for the object */
3389         mcounters->type = counters_type;
3390         mcounters->read_counters = read_flow_counters;
3391         mcounters->counters_num = FLOW_COUNTERS_NUM;
3392         mcounters->ncounters = ncounters;
3393         /* each counter entry have both description and index pair */
3394         for (i = 0; i < ncounters; i++) {
3395                 if (desc_data[i].description > IB_COUNTER_BYTES)
3396                         return -EINVAL;
3397
3398                 if (cntrs_max_index <= desc_data[i].index)
3399                         cntrs_max_index = desc_data[i].index + 1;
3400         }
3401
3402         mutex_lock(&mcounters->mcntrs_mutex);
3403         mcounters->counters_data = desc_data;
3404         mcounters->cntrs_max_index = cntrs_max_index;
3405         mutex_unlock(&mcounters->mcntrs_mutex);
3406
3407         return 0;
3408 }
3409
3410 #define MAX_COUNTERS_NUM (USHRT_MAX / (sizeof(u32) * 2))
3411 static int flow_counters_set_data(struct ib_counters *ibcounters,
3412                                   struct mlx5_ib_create_flow *ucmd)
3413 {
3414         struct mlx5_ib_mcounters *mcounters = to_mcounters(ibcounters);
3415         struct mlx5_ib_flow_counters_data *cntrs_data = NULL;
3416         struct mlx5_ib_flow_counters_desc *desc_data = NULL;
3417         bool hw_hndl = false;
3418         int ret = 0;
3419
3420         if (ucmd && ucmd->ncounters_data != 0) {
3421                 cntrs_data = ucmd->data;
3422                 if (cntrs_data->ncounters > MAX_COUNTERS_NUM)
3423                         return -EINVAL;
3424
3425                 desc_data = kcalloc(cntrs_data->ncounters,
3426                                     sizeof(*desc_data),
3427                                     GFP_KERNEL);
3428                 if (!desc_data)
3429                         return  -ENOMEM;
3430
3431                 if (copy_from_user(desc_data,
3432                                    u64_to_user_ptr(cntrs_data->counters_data),
3433                                    sizeof(*desc_data) * cntrs_data->ncounters)) {
3434                         ret = -EFAULT;
3435                         goto free;
3436                 }
3437         }
3438
3439         if (!mcounters->hw_cntrs_hndl) {
3440                 mcounters->hw_cntrs_hndl = mlx5_fc_create(
3441                         to_mdev(ibcounters->device)->mdev, false);
3442                 if (IS_ERR(mcounters->hw_cntrs_hndl)) {
3443                         ret = PTR_ERR(mcounters->hw_cntrs_hndl);
3444                         goto free;
3445                 }
3446                 hw_hndl = true;
3447         }
3448
3449         if (desc_data) {
3450                 /* counters already bound to at least one flow */
3451                 if (mcounters->cntrs_max_index) {
3452                         ret = -EINVAL;
3453                         goto free_hndl;
3454                 }
3455
3456                 ret = counters_set_description(ibcounters,
3457                                                MLX5_IB_COUNTERS_FLOW,
3458                                                desc_data,
3459                                                cntrs_data->ncounters);
3460                 if (ret)
3461                         goto free_hndl;
3462
3463         } else if (!mcounters->cntrs_max_index) {
3464                 /* counters not bound yet, must have udata passed */
3465                 ret = -EINVAL;
3466                 goto free_hndl;
3467         }
3468
3469         return 0;
3470
3471 free_hndl:
3472         if (hw_hndl) {
3473                 mlx5_fc_destroy(to_mdev(ibcounters->device)->mdev,
3474                                 mcounters->hw_cntrs_hndl);
3475                 mcounters->hw_cntrs_hndl = NULL;
3476         }
3477 free:
3478         kfree(desc_data);
3479         return ret;
3480 }
3481
3482 static void mlx5_ib_set_rule_source_port(struct mlx5_ib_dev *dev,
3483                                          struct mlx5_flow_spec *spec,
3484                                          struct mlx5_eswitch_rep *rep)
3485 {
3486         struct mlx5_eswitch *esw = dev->mdev->priv.eswitch;
3487         void *misc;
3488
3489         if (mlx5_eswitch_vport_match_metadata_enabled(esw)) {
3490                 misc = MLX5_ADDR_OF(fte_match_param, spec->match_value,
3491                                     misc_parameters_2);
3492
3493                 MLX5_SET(fte_match_set_misc2, misc, metadata_reg_c_0,
3494                          mlx5_eswitch_get_vport_metadata_for_match(esw,
3495                                                                    rep->vport));
3496                 misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
3497                                     misc_parameters_2);
3498
3499                 MLX5_SET_TO_ONES(fte_match_set_misc2, misc, metadata_reg_c_0);
3500         } else {
3501                 misc = MLX5_ADDR_OF(fte_match_param, spec->match_value,
3502                                     misc_parameters);
3503
3504                 MLX5_SET(fte_match_set_misc, misc, source_port, rep->vport);
3505
3506                 misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
3507                                     misc_parameters);
3508
3509                 MLX5_SET_TO_ONES(fte_match_set_misc, misc, source_port);
3510         }
3511 }
3512
3513 static struct mlx5_ib_flow_handler *_create_flow_rule(struct mlx5_ib_dev *dev,
3514                                                       struct mlx5_ib_flow_prio *ft_prio,
3515                                                       const struct ib_flow_attr *flow_attr,
3516                                                       struct mlx5_flow_destination *dst,
3517                                                       u32 underlay_qpn,
3518                                                       struct mlx5_ib_create_flow *ucmd)
3519 {
3520         struct mlx5_flow_table  *ft = ft_prio->flow_table;
3521         struct mlx5_ib_flow_handler *handler;
3522         struct mlx5_flow_act flow_act = {};
3523         struct mlx5_flow_spec *spec;
3524         struct mlx5_flow_destination dest_arr[2] = {};
3525         struct mlx5_flow_destination *rule_dst = dest_arr;
3526         const void *ib_flow = (const void *)flow_attr + sizeof(*flow_attr);
3527         unsigned int spec_index;
3528         u32 prev_type = 0;
3529         int err = 0;
3530         int dest_num = 0;
3531         bool is_egress = flow_attr->flags & IB_FLOW_ATTR_FLAGS_EGRESS;
3532
3533         if (!is_valid_attr(dev->mdev, flow_attr))
3534                 return ERR_PTR(-EINVAL);
3535
3536         if (dev->is_rep && is_egress)
3537                 return ERR_PTR(-EINVAL);
3538
3539         spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
3540         handler = kzalloc(sizeof(*handler), GFP_KERNEL);
3541         if (!handler || !spec) {
3542                 err = -ENOMEM;
3543                 goto free;
3544         }
3545
3546         INIT_LIST_HEAD(&handler->list);
3547         if (dst) {
3548                 memcpy(&dest_arr[0], dst, sizeof(*dst));
3549                 dest_num++;
3550         }
3551
3552         for (spec_index = 0; spec_index < flow_attr->num_of_specs; spec_index++) {
3553                 err = parse_flow_attr(dev->mdev, spec,
3554                                       ib_flow, flow_attr, &flow_act,
3555                                       prev_type);
3556                 if (err < 0)
3557                         goto free;
3558
3559                 prev_type = ((union ib_flow_spec *)ib_flow)->type;
3560                 ib_flow += ((union ib_flow_spec *)ib_flow)->size;
3561         }
3562
3563         if (!flow_is_multicast_only(flow_attr))
3564                 set_underlay_qp(dev, spec, underlay_qpn);
3565
3566         if (dev->is_rep) {
3567                 struct mlx5_eswitch_rep *rep;
3568
3569                 rep = dev->port[flow_attr->port - 1].rep;
3570                 if (!rep) {
3571                         err = -EINVAL;
3572                         goto free;
3573                 }
3574
3575                 mlx5_ib_set_rule_source_port(dev, spec, rep);
3576         }
3577
3578         spec->match_criteria_enable = get_match_criteria_enable(spec->match_criteria);
3579
3580         if (is_egress &&
3581             !is_valid_spec(dev->mdev, spec, &flow_act, is_egress)) {
3582                 err = -EINVAL;
3583                 goto free;
3584         }
3585
3586         if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_COUNT) {
3587                 struct mlx5_ib_mcounters *mcounters;
3588
3589                 err = flow_counters_set_data(flow_act.counters, ucmd);
3590                 if (err)
3591                         goto free;
3592
3593                 mcounters = to_mcounters(flow_act.counters);
3594                 handler->ibcounters = flow_act.counters;
3595                 dest_arr[dest_num].type =
3596                         MLX5_FLOW_DESTINATION_TYPE_COUNTER;
3597                 dest_arr[dest_num].counter_id =
3598                         mlx5_fc_id(mcounters->hw_cntrs_hndl);
3599                 dest_num++;
3600         }
3601
3602         if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_DROP) {
3603                 if (!(flow_act.action & MLX5_FLOW_CONTEXT_ACTION_COUNT)) {
3604                         rule_dst = NULL;
3605                         dest_num = 0;
3606                 }
3607         } else {
3608                 if (is_egress)
3609                         flow_act.action |= MLX5_FLOW_CONTEXT_ACTION_ALLOW;
3610                 else
3611                         flow_act.action |=
3612                                 dest_num ?  MLX5_FLOW_CONTEXT_ACTION_FWD_DEST :
3613                                         MLX5_FLOW_CONTEXT_ACTION_FWD_NEXT_PRIO;
3614         }
3615
3616         if ((spec->flow_context.flags & FLOW_CONTEXT_HAS_TAG)  &&
3617             (flow_attr->type == IB_FLOW_ATTR_ALL_DEFAULT ||
3618              flow_attr->type == IB_FLOW_ATTR_MC_DEFAULT)) {
3619                 mlx5_ib_warn(dev, "Flow tag %u and attribute type %x isn't allowed in leftovers\n",
3620                              spec->flow_context.flow_tag, flow_attr->type);
3621                 err = -EINVAL;
3622                 goto free;
3623         }
3624         handler->rule = mlx5_add_flow_rules(ft, spec,
3625                                             &flow_act,
3626                                             rule_dst, dest_num);
3627
3628         if (IS_ERR(handler->rule)) {
3629                 err = PTR_ERR(handler->rule);
3630                 goto free;
3631         }
3632
3633         ft_prio->refcount++;
3634         handler->prio = ft_prio;
3635         handler->dev = dev;
3636
3637         ft_prio->flow_table = ft;
3638 free:
3639         if (err && handler) {
3640                 if (handler->ibcounters &&
3641                     atomic_read(&handler->ibcounters->usecnt) == 1)
3642                         counters_clear_description(handler->ibcounters);
3643                 kfree(handler);
3644         }
3645         kvfree(spec);
3646         return err ? ERR_PTR(err) : handler;
3647 }
3648
3649 static struct mlx5_ib_flow_handler *create_flow_rule(struct mlx5_ib_dev *dev,
3650                                                      struct mlx5_ib_flow_prio *ft_prio,
3651                                                      const struct ib_flow_attr *flow_attr,
3652                                                      struct mlx5_flow_destination *dst)
3653 {
3654         return _create_flow_rule(dev, ft_prio, flow_attr, dst, 0, NULL);
3655 }
3656
3657 static struct mlx5_ib_flow_handler *create_dont_trap_rule(struct mlx5_ib_dev *dev,
3658                                                           struct mlx5_ib_flow_prio *ft_prio,
3659                                                           struct ib_flow_attr *flow_attr,
3660                                                           struct mlx5_flow_destination *dst)
3661 {
3662         struct mlx5_ib_flow_handler *handler_dst = NULL;
3663         struct mlx5_ib_flow_handler *handler = NULL;
3664
3665         handler = create_flow_rule(dev, ft_prio, flow_attr, NULL);
3666         if (!IS_ERR(handler)) {
3667                 handler_dst = create_flow_rule(dev, ft_prio,
3668                                                flow_attr, dst);
3669                 if (IS_ERR(handler_dst)) {
3670                         mlx5_del_flow_rules(handler->rule);
3671                         ft_prio->refcount--;
3672                         kfree(handler);
3673                         handler = handler_dst;
3674                 } else {
3675                         list_add(&handler_dst->list, &handler->list);
3676                 }
3677         }
3678
3679         return handler;
3680 }
3681 enum {
3682         LEFTOVERS_MC,
3683         LEFTOVERS_UC,
3684 };
3685
3686 static struct mlx5_ib_flow_handler *create_leftovers_rule(struct mlx5_ib_dev *dev,
3687                                                           struct mlx5_ib_flow_prio *ft_prio,
3688                                                           struct ib_flow_attr *flow_attr,
3689                                                           struct mlx5_flow_destination *dst)
3690 {
3691         struct mlx5_ib_flow_handler *handler_ucast = NULL;
3692         struct mlx5_ib_flow_handler *handler = NULL;
3693
3694         static struct {
3695                 struct ib_flow_attr     flow_attr;
3696                 struct ib_flow_spec_eth eth_flow;
3697         } leftovers_specs[] = {
3698                 [LEFTOVERS_MC] = {
3699                         .flow_attr = {
3700                                 .num_of_specs = 1,
3701                                 .size = sizeof(leftovers_specs[0])
3702                         },
3703                         .eth_flow = {
3704                                 .type = IB_FLOW_SPEC_ETH,
3705                                 .size = sizeof(struct ib_flow_spec_eth),
3706                                 .mask = {.dst_mac = {0x1} },
3707                                 .val =  {.dst_mac = {0x1} }
3708                         }
3709                 },
3710                 [LEFTOVERS_UC] = {
3711                         .flow_attr = {
3712                                 .num_of_specs = 1,
3713                                 .size = sizeof(leftovers_specs[0])
3714                         },
3715                         .eth_flow = {
3716                                 .type = IB_FLOW_SPEC_ETH,
3717                                 .size = sizeof(struct ib_flow_spec_eth),
3718                                 .mask = {.dst_mac = {0x1} },
3719                                 .val = {.dst_mac = {} }
3720                         }
3721                 }
3722         };
3723
3724         handler = create_flow_rule(dev, ft_prio,
3725                                    &leftovers_specs[LEFTOVERS_MC].flow_attr,
3726                                    dst);
3727         if (!IS_ERR(handler) &&
3728             flow_attr->type == IB_FLOW_ATTR_ALL_DEFAULT) {
3729                 handler_ucast = create_flow_rule(dev, ft_prio,
3730                                                  &leftovers_specs[LEFTOVERS_UC].flow_attr,
3731                                                  dst);
3732                 if (IS_ERR(handler_ucast)) {
3733                         mlx5_del_flow_rules(handler->rule);
3734                         ft_prio->refcount--;
3735                         kfree(handler);
3736                         handler = handler_ucast;
3737                 } else {
3738                         list_add(&handler_ucast->list, &handler->list);
3739                 }
3740         }
3741
3742         return handler;
3743 }
3744
3745 static struct mlx5_ib_flow_handler *create_sniffer_rule(struct mlx5_ib_dev *dev,
3746                                                         struct mlx5_ib_flow_prio *ft_rx,
3747                                                         struct mlx5_ib_flow_prio *ft_tx,
3748                                                         struct mlx5_flow_destination *dst)
3749 {
3750         struct mlx5_ib_flow_handler *handler_rx;
3751         struct mlx5_ib_flow_handler *handler_tx;
3752         int err;
3753         static const struct ib_flow_attr flow_attr  = {
3754                 .num_of_specs = 0,
3755                 .size = sizeof(flow_attr)
3756         };
3757
3758         handler_rx = create_flow_rule(dev, ft_rx, &flow_attr, dst);
3759         if (IS_ERR(handler_rx)) {
3760                 err = PTR_ERR(handler_rx);
3761                 goto err;
3762         }
3763
3764         handler_tx = create_flow_rule(dev, ft_tx, &flow_attr, dst);
3765         if (IS_ERR(handler_tx)) {
3766                 err = PTR_ERR(handler_tx);
3767                 goto err_tx;
3768         }
3769
3770         list_add(&handler_tx->list, &handler_rx->list);
3771
3772         return handler_rx;
3773
3774 err_tx:
3775         mlx5_del_flow_rules(handler_rx->rule);
3776         ft_rx->refcount--;
3777         kfree(handler_rx);
3778 err:
3779         return ERR_PTR(err);
3780 }
3781
3782 static struct ib_flow *mlx5_ib_create_flow(struct ib_qp *qp,
3783                                            struct ib_flow_attr *flow_attr,
3784                                            int domain,
3785                                            struct ib_udata *udata)
3786 {
3787         struct mlx5_ib_dev *dev = to_mdev(qp->device);
3788         struct mlx5_ib_qp *mqp = to_mqp(qp);
3789         struct mlx5_ib_flow_handler *handler = NULL;
3790         struct mlx5_flow_destination *dst = NULL;
3791         struct mlx5_ib_flow_prio *ft_prio_tx = NULL;
3792         struct mlx5_ib_flow_prio *ft_prio;
3793         bool is_egress = flow_attr->flags & IB_FLOW_ATTR_FLAGS_EGRESS;
3794         struct mlx5_ib_create_flow *ucmd = NULL, ucmd_hdr;
3795         size_t min_ucmd_sz, required_ucmd_sz;
3796         int err;
3797         int underlay_qpn;
3798
3799         if (udata && udata->inlen) {
3800                 min_ucmd_sz = offsetof(typeof(ucmd_hdr), reserved) +
3801                                 sizeof(ucmd_hdr.reserved);
3802                 if (udata->inlen < min_ucmd_sz)
3803                         return ERR_PTR(-EOPNOTSUPP);
3804
3805                 err = ib_copy_from_udata(&ucmd_hdr, udata, min_ucmd_sz);
3806                 if (err)
3807                         return ERR_PTR(err);
3808
3809                 /* currently supports only one counters data */
3810                 if (ucmd_hdr.ncounters_data > 1)
3811                         return ERR_PTR(-EINVAL);
3812
3813                 required_ucmd_sz = min_ucmd_sz +
3814                         sizeof(struct mlx5_ib_flow_counters_data) *
3815                         ucmd_hdr.ncounters_data;
3816                 if (udata->inlen > required_ucmd_sz &&
3817                     !ib_is_udata_cleared(udata, required_ucmd_sz,
3818                                          udata->inlen - required_ucmd_sz))
3819                         return ERR_PTR(-EOPNOTSUPP);
3820
3821                 ucmd = kzalloc(required_ucmd_sz, GFP_KERNEL);
3822                 if (!ucmd)
3823                         return ERR_PTR(-ENOMEM);
3824
3825                 err = ib_copy_from_udata(ucmd, udata, required_ucmd_sz);
3826                 if (err)
3827                         goto free_ucmd;
3828         }
3829
3830         if (flow_attr->priority > MLX5_IB_FLOW_LAST_PRIO) {
3831                 err = -ENOMEM;
3832                 goto free_ucmd;
3833         }
3834
3835         if (domain != IB_FLOW_DOMAIN_USER ||
3836             flow_attr->port > dev->num_ports ||
3837             (flow_attr->flags & ~(IB_FLOW_ATTR_FLAGS_DONT_TRAP |
3838                                   IB_FLOW_ATTR_FLAGS_EGRESS))) {
3839                 err = -EINVAL;
3840                 goto free_ucmd;
3841         }
3842
3843         if (is_egress &&
3844             (flow_attr->type == IB_FLOW_ATTR_ALL_DEFAULT ||
3845              flow_attr->type == IB_FLOW_ATTR_MC_DEFAULT)) {
3846                 err = -EINVAL;
3847                 goto free_ucmd;
3848         }
3849
3850         dst = kzalloc(sizeof(*dst), GFP_KERNEL);
3851         if (!dst) {
3852                 err = -ENOMEM;
3853                 goto free_ucmd;
3854         }
3855
3856         mutex_lock(&dev->flow_db->lock);
3857
3858         ft_prio = get_flow_table(dev, flow_attr,
3859                                  is_egress ? MLX5_IB_FT_TX : MLX5_IB_FT_RX);
3860         if (IS_ERR(ft_prio)) {
3861                 err = PTR_ERR(ft_prio);
3862                 goto unlock;
3863         }
3864         if (flow_attr->type == IB_FLOW_ATTR_SNIFFER) {
3865                 ft_prio_tx = get_flow_table(dev, flow_attr, MLX5_IB_FT_TX);
3866                 if (IS_ERR(ft_prio_tx)) {
3867                         err = PTR_ERR(ft_prio_tx);
3868                         ft_prio_tx = NULL;
3869                         goto destroy_ft;
3870                 }
3871         }
3872
3873         if (is_egress) {
3874                 dst->type = MLX5_FLOW_DESTINATION_TYPE_PORT;
3875         } else {
3876                 dst->type = MLX5_FLOW_DESTINATION_TYPE_TIR;
3877                 if (mqp->flags & MLX5_IB_QP_RSS)
3878                         dst->tir_num = mqp->rss_qp.tirn;
3879                 else
3880                         dst->tir_num = mqp->raw_packet_qp.rq.tirn;
3881         }
3882
3883         if (flow_attr->type == IB_FLOW_ATTR_NORMAL) {
3884                 if (flow_attr->flags & IB_FLOW_ATTR_FLAGS_DONT_TRAP)  {
3885                         handler = create_dont_trap_rule(dev, ft_prio,
3886                                                         flow_attr, dst);
3887                 } else {
3888                         underlay_qpn = (mqp->flags & MLX5_IB_QP_UNDERLAY) ?
3889                                         mqp->underlay_qpn : 0;
3890                         handler = _create_flow_rule(dev, ft_prio, flow_attr,
3891                                                     dst, underlay_qpn, ucmd);
3892                 }
3893         } else if (flow_attr->type == IB_FLOW_ATTR_ALL_DEFAULT ||
3894                    flow_attr->type == IB_FLOW_ATTR_MC_DEFAULT) {
3895                 handler = create_leftovers_rule(dev, ft_prio, flow_attr,
3896                                                 dst);
3897         } else if (flow_attr->type == IB_FLOW_ATTR_SNIFFER) {
3898                 handler = create_sniffer_rule(dev, ft_prio, ft_prio_tx, dst);
3899         } else {
3900                 err = -EINVAL;
3901                 goto destroy_ft;
3902         }
3903
3904         if (IS_ERR(handler)) {
3905                 err = PTR_ERR(handler);
3906                 handler = NULL;
3907                 goto destroy_ft;
3908         }
3909
3910         mutex_unlock(&dev->flow_db->lock);
3911         kfree(dst);
3912         kfree(ucmd);
3913
3914         return &handler->ibflow;
3915
3916 destroy_ft:
3917         put_flow_table(dev, ft_prio, false);
3918         if (ft_prio_tx)
3919                 put_flow_table(dev, ft_prio_tx, false);
3920 unlock:
3921         mutex_unlock(&dev->flow_db->lock);
3922         kfree(dst);
3923 free_ucmd:
3924         kfree(ucmd);
3925         return ERR_PTR(err);
3926 }
3927
3928 static struct mlx5_ib_flow_prio *
3929 _get_flow_table(struct mlx5_ib_dev *dev,
3930                 struct mlx5_ib_flow_matcher *fs_matcher,
3931                 bool mcast)
3932 {
3933         struct mlx5_flow_namespace *ns = NULL;
3934         struct mlx5_ib_flow_prio *prio = NULL;
3935         int max_table_size = 0;
3936         bool esw_encap;
3937         u32 flags = 0;
3938         int priority;
3939
3940         if (mcast)
3941                 priority = MLX5_IB_FLOW_MCAST_PRIO;
3942         else
3943                 priority = ib_prio_to_core_prio(fs_matcher->priority, false);
3944
3945         esw_encap = mlx5_eswitch_get_encap_mode(dev->mdev) !=
3946                 DEVLINK_ESWITCH_ENCAP_MODE_NONE;
3947         if (fs_matcher->ns_type == MLX5_FLOW_NAMESPACE_BYPASS) {
3948                 max_table_size = BIT(MLX5_CAP_FLOWTABLE_NIC_RX(dev->mdev,
3949                                         log_max_ft_size));
3950                 if (MLX5_CAP_FLOWTABLE_NIC_RX(dev->mdev, decap) && !esw_encap)
3951                         flags |= MLX5_FLOW_TABLE_TUNNEL_EN_DECAP;
3952                 if (MLX5_CAP_FLOWTABLE_NIC_RX(dev->mdev,
3953                                               reformat_l3_tunnel_to_l2) &&
3954                     !esw_encap)
3955                         flags |= MLX5_FLOW_TABLE_TUNNEL_EN_REFORMAT;
3956         } else if (fs_matcher->ns_type == MLX5_FLOW_NAMESPACE_EGRESS) {
3957                 max_table_size = BIT(
3958                         MLX5_CAP_FLOWTABLE_NIC_TX(dev->mdev, log_max_ft_size));
3959                 if (MLX5_CAP_FLOWTABLE_NIC_TX(dev->mdev, reformat) && !esw_encap)
3960                         flags |= MLX5_FLOW_TABLE_TUNNEL_EN_REFORMAT;
3961         } else if (fs_matcher->ns_type == MLX5_FLOW_NAMESPACE_FDB) {
3962                 max_table_size = BIT(
3963                         MLX5_CAP_ESW_FLOWTABLE_FDB(dev->mdev, log_max_ft_size));
3964                 if (MLX5_CAP_ESW_FLOWTABLE_FDB(dev->mdev, decap) && esw_encap)
3965                         flags |= MLX5_FLOW_TABLE_TUNNEL_EN_DECAP;
3966                 if (MLX5_CAP_ESW_FLOWTABLE_FDB(dev->mdev, reformat_l3_tunnel_to_l2) &&
3967                     esw_encap)
3968                         flags |= MLX5_FLOW_TABLE_TUNNEL_EN_REFORMAT;
3969                 priority = FDB_BYPASS_PATH;
3970         } else if (fs_matcher->ns_type == MLX5_FLOW_NAMESPACE_RDMA_RX) {
3971                 max_table_size =
3972                         BIT(MLX5_CAP_FLOWTABLE_RDMA_RX(dev->mdev,
3973                                                        log_max_ft_size));
3974                 priority = fs_matcher->priority;
3975         }
3976
3977         max_table_size = min_t(int, max_table_size, MLX5_FS_MAX_ENTRIES);
3978
3979         ns = mlx5_get_flow_namespace(dev->mdev, fs_matcher->ns_type);
3980         if (!ns)
3981                 return ERR_PTR(-ENOTSUPP);
3982
3983         if (fs_matcher->ns_type == MLX5_FLOW_NAMESPACE_BYPASS)
3984                 prio = &dev->flow_db->prios[priority];
3985         else if (fs_matcher->ns_type == MLX5_FLOW_NAMESPACE_EGRESS)
3986                 prio = &dev->flow_db->egress_prios[priority];
3987         else if (fs_matcher->ns_type == MLX5_FLOW_NAMESPACE_FDB)
3988                 prio = &dev->flow_db->fdb;
3989         else if (fs_matcher->ns_type == MLX5_FLOW_NAMESPACE_RDMA_RX)
3990                 prio = &dev->flow_db->rdma_rx[priority];
3991
3992         if (!prio)
3993                 return ERR_PTR(-EINVAL);
3994
3995         if (prio->flow_table)
3996                 return prio;
3997
3998         return _get_prio(ns, prio, priority, max_table_size,
3999                          MLX5_FS_MAX_TYPES, flags);
4000 }
4001
4002 static struct mlx5_ib_flow_handler *
4003 _create_raw_flow_rule(struct mlx5_ib_dev *dev,
4004                       struct mlx5_ib_flow_prio *ft_prio,
4005                       struct mlx5_flow_destination *dst,
4006                       struct mlx5_ib_flow_matcher  *fs_matcher,
4007                       struct mlx5_flow_context *flow_context,
4008                       struct mlx5_flow_act *flow_act,
4009                       void *cmd_in, int inlen,
4010                       int dst_num)
4011 {
4012         struct mlx5_ib_flow_handler *handler;
4013         struct mlx5_flow_spec *spec;
4014         struct mlx5_flow_table *ft = ft_prio->flow_table;
4015         int err = 0;
4016
4017         spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
4018         handler = kzalloc(sizeof(*handler), GFP_KERNEL);
4019         if (!handler || !spec) {
4020                 err = -ENOMEM;
4021                 goto free;
4022         }
4023
4024         INIT_LIST_HEAD(&handler->list);
4025
4026         memcpy(spec->match_value, cmd_in, inlen);
4027         memcpy(spec->match_criteria, fs_matcher->matcher_mask.match_params,
4028                fs_matcher->mask_len);
4029         spec->match_criteria_enable = fs_matcher->match_criteria_enable;
4030         spec->flow_context = *flow_context;
4031
4032         handler->rule = mlx5_add_flow_rules(ft, spec,
4033                                             flow_act, dst, dst_num);
4034
4035         if (IS_ERR(handler->rule)) {
4036                 err = PTR_ERR(handler->rule);
4037                 goto free;
4038         }
4039
4040         ft_prio->refcount++;
4041         handler->prio = ft_prio;
4042         handler->dev = dev;
4043         ft_prio->flow_table = ft;
4044
4045 free:
4046         if (err)
4047                 kfree(handler);
4048         kvfree(spec);
4049         return err ? ERR_PTR(err) : handler;
4050 }
4051
4052 static bool raw_fs_is_multicast(struct mlx5_ib_flow_matcher *fs_matcher,
4053                                 void *match_v)
4054 {
4055         void *match_c;
4056         void *match_v_set_lyr_2_4, *match_c_set_lyr_2_4;
4057         void *dmac, *dmac_mask;
4058         void *ipv4, *ipv4_mask;
4059
4060         if (!(fs_matcher->match_criteria_enable &
4061               (1 << MATCH_CRITERIA_ENABLE_OUTER_BIT)))
4062                 return false;
4063
4064         match_c = fs_matcher->matcher_mask.match_params;
4065         match_v_set_lyr_2_4 = MLX5_ADDR_OF(fte_match_param, match_v,
4066                                            outer_headers);
4067         match_c_set_lyr_2_4 = MLX5_ADDR_OF(fte_match_param, match_c,
4068                                            outer_headers);
4069
4070         dmac = MLX5_ADDR_OF(fte_match_set_lyr_2_4, match_v_set_lyr_2_4,
4071                             dmac_47_16);
4072         dmac_mask = MLX5_ADDR_OF(fte_match_set_lyr_2_4, match_c_set_lyr_2_4,
4073                                  dmac_47_16);
4074
4075         if (is_multicast_ether_addr(dmac) &&
4076             is_multicast_ether_addr(dmac_mask))
4077                 return true;
4078
4079         ipv4 = MLX5_ADDR_OF(fte_match_set_lyr_2_4, match_v_set_lyr_2_4,
4080                             dst_ipv4_dst_ipv6.ipv4_layout.ipv4);
4081
4082         ipv4_mask = MLX5_ADDR_OF(fte_match_set_lyr_2_4, match_c_set_lyr_2_4,
4083                                  dst_ipv4_dst_ipv6.ipv4_layout.ipv4);
4084
4085         if (ipv4_is_multicast(*(__be32 *)(ipv4)) &&
4086             ipv4_is_multicast(*(__be32 *)(ipv4_mask)))
4087                 return true;
4088
4089         return false;
4090 }
4091
4092 struct mlx5_ib_flow_handler *
4093 mlx5_ib_raw_fs_rule_add(struct mlx5_ib_dev *dev,
4094                         struct mlx5_ib_flow_matcher *fs_matcher,
4095                         struct mlx5_flow_context *flow_context,
4096                         struct mlx5_flow_act *flow_act,
4097                         u32 counter_id,
4098                         void *cmd_in, int inlen, int dest_id,
4099                         int dest_type)
4100 {
4101         struct mlx5_flow_destination *dst;
4102         struct mlx5_ib_flow_prio *ft_prio;
4103         struct mlx5_ib_flow_handler *handler;
4104         int dst_num = 0;
4105         bool mcast;
4106         int err;
4107
4108         if (fs_matcher->flow_type != MLX5_IB_FLOW_TYPE_NORMAL)
4109                 return ERR_PTR(-EOPNOTSUPP);
4110
4111         if (fs_matcher->priority > MLX5_IB_FLOW_LAST_PRIO)
4112                 return ERR_PTR(-ENOMEM);
4113
4114         dst = kcalloc(2, sizeof(*dst), GFP_KERNEL);
4115         if (!dst)
4116                 return ERR_PTR(-ENOMEM);
4117
4118         mcast = raw_fs_is_multicast(fs_matcher, cmd_in);
4119         mutex_lock(&dev->flow_db->lock);
4120
4121         ft_prio = _get_flow_table(dev, fs_matcher, mcast);
4122         if (IS_ERR(ft_prio)) {
4123                 err = PTR_ERR(ft_prio);
4124                 goto unlock;
4125         }
4126
4127         if (dest_type == MLX5_FLOW_DESTINATION_TYPE_TIR) {
4128                 dst[dst_num].type = dest_type;
4129                 dst[dst_num].tir_num = dest_id;
4130                 flow_act->action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
4131         } else if (dest_type == MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE) {
4132                 dst[dst_num].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE_NUM;
4133                 dst[dst_num].ft_num = dest_id;
4134                 flow_act->action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
4135         } else {
4136                 dst[dst_num].type = MLX5_FLOW_DESTINATION_TYPE_PORT;
4137                 flow_act->action |= MLX5_FLOW_CONTEXT_ACTION_ALLOW;
4138         }
4139
4140         dst_num++;
4141
4142         if (flow_act->action & MLX5_FLOW_CONTEXT_ACTION_COUNT) {
4143                 dst[dst_num].type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
4144                 dst[dst_num].counter_id = counter_id;
4145                 dst_num++;
4146         }
4147
4148         handler = _create_raw_flow_rule(dev, ft_prio, dst, fs_matcher,
4149                                         flow_context, flow_act,
4150                                         cmd_in, inlen, dst_num);
4151
4152         if (IS_ERR(handler)) {
4153                 err = PTR_ERR(handler);
4154                 goto destroy_ft;
4155         }
4156
4157         mutex_unlock(&dev->flow_db->lock);
4158         atomic_inc(&fs_matcher->usecnt);
4159         handler->flow_matcher = fs_matcher;
4160
4161         kfree(dst);
4162
4163         return handler;
4164
4165 destroy_ft:
4166         put_flow_table(dev, ft_prio, false);
4167 unlock:
4168         mutex_unlock(&dev->flow_db->lock);
4169         kfree(dst);
4170
4171         return ERR_PTR(err);
4172 }
4173
4174 static u32 mlx5_ib_flow_action_flags_to_accel_xfrm_flags(u32 mlx5_flags)
4175 {
4176         u32 flags = 0;
4177
4178         if (mlx5_flags & MLX5_IB_UAPI_FLOW_ACTION_FLAGS_REQUIRE_METADATA)
4179                 flags |= MLX5_ACCEL_XFRM_FLAG_REQUIRE_METADATA;
4180
4181         return flags;
4182 }
4183
4184 #define MLX5_FLOW_ACTION_ESP_CREATE_LAST_SUPPORTED      MLX5_IB_UAPI_FLOW_ACTION_FLAGS_REQUIRE_METADATA
4185 static struct ib_flow_action *
4186 mlx5_ib_create_flow_action_esp(struct ib_device *device,
4187                                const struct ib_flow_action_attrs_esp *attr,
4188                                struct uverbs_attr_bundle *attrs)
4189 {
4190         struct mlx5_ib_dev *mdev = to_mdev(device);
4191         struct ib_uverbs_flow_action_esp_keymat_aes_gcm *aes_gcm;
4192         struct mlx5_accel_esp_xfrm_attrs accel_attrs = {};
4193         struct mlx5_ib_flow_action *action;
4194         u64 action_flags;
4195         u64 flags;
4196         int err = 0;
4197
4198         err = uverbs_get_flags64(
4199                 &action_flags, attrs, MLX5_IB_ATTR_CREATE_FLOW_ACTION_FLAGS,
4200                 ((MLX5_FLOW_ACTION_ESP_CREATE_LAST_SUPPORTED << 1) - 1));
4201         if (err)
4202                 return ERR_PTR(err);
4203
4204         flags = mlx5_ib_flow_action_flags_to_accel_xfrm_flags(action_flags);
4205
4206         /* We current only support a subset of the standard features. Only a
4207          * keymat of type AES_GCM, with icv_len == 16, iv_algo == SEQ and esn
4208          * (with overlap). Full offload mode isn't supported.
4209          */
4210         if (!attr->keymat || attr->replay || attr->encap ||
4211             attr->spi || attr->seq || attr->tfc_pad ||
4212             attr->hard_limit_pkts ||
4213             (attr->flags & ~(IB_FLOW_ACTION_ESP_FLAGS_ESN_TRIGGERED |
4214                              IB_UVERBS_FLOW_ACTION_ESP_FLAGS_ENCRYPT)))
4215                 return ERR_PTR(-EOPNOTSUPP);
4216
4217         if (attr->keymat->protocol !=
4218             IB_UVERBS_FLOW_ACTION_ESP_KEYMAT_AES_GCM)
4219                 return ERR_PTR(-EOPNOTSUPP);
4220
4221         aes_gcm = &attr->keymat->keymat.aes_gcm;
4222
4223         if (aes_gcm->icv_len != 16 ||
4224             aes_gcm->iv_algo != IB_UVERBS_FLOW_ACTION_IV_ALGO_SEQ)
4225                 return ERR_PTR(-EOPNOTSUPP);
4226
4227         action = kmalloc(sizeof(*action), GFP_KERNEL);
4228         if (!action)
4229                 return ERR_PTR(-ENOMEM);
4230
4231         action->esp_aes_gcm.ib_flags = attr->flags;
4232         memcpy(&accel_attrs.keymat.aes_gcm.aes_key, &aes_gcm->aes_key,
4233                sizeof(accel_attrs.keymat.aes_gcm.aes_key));
4234         accel_attrs.keymat.aes_gcm.key_len = aes_gcm->key_len * 8;
4235         memcpy(&accel_attrs.keymat.aes_gcm.salt, &aes_gcm->salt,
4236                sizeof(accel_attrs.keymat.aes_gcm.salt));
4237         memcpy(&accel_attrs.keymat.aes_gcm.seq_iv, &aes_gcm->iv,
4238                sizeof(accel_attrs.keymat.aes_gcm.seq_iv));
4239         accel_attrs.keymat.aes_gcm.icv_len = aes_gcm->icv_len * 8;
4240         accel_attrs.keymat.aes_gcm.iv_algo = MLX5_ACCEL_ESP_AES_GCM_IV_ALGO_SEQ;
4241         accel_attrs.keymat_type = MLX5_ACCEL_ESP_KEYMAT_AES_GCM;
4242
4243         accel_attrs.esn = attr->esn;
4244         if (attr->flags & IB_FLOW_ACTION_ESP_FLAGS_ESN_TRIGGERED)
4245                 accel_attrs.flags |= MLX5_ACCEL_ESP_FLAGS_ESN_TRIGGERED;
4246         if (attr->flags & IB_UVERBS_FLOW_ACTION_ESP_FLAGS_ESN_NEW_WINDOW)
4247                 accel_attrs.flags |= MLX5_ACCEL_ESP_FLAGS_ESN_STATE_OVERLAP;
4248
4249         if (attr->flags & IB_UVERBS_FLOW_ACTION_ESP_FLAGS_ENCRYPT)
4250                 accel_attrs.action |= MLX5_ACCEL_ESP_ACTION_ENCRYPT;
4251
4252         action->esp_aes_gcm.ctx =
4253                 mlx5_accel_esp_create_xfrm(mdev->mdev, &accel_attrs, flags);
4254         if (IS_ERR(action->esp_aes_gcm.ctx)) {
4255                 err = PTR_ERR(action->esp_aes_gcm.ctx);
4256                 goto err_parse;
4257         }
4258
4259         action->esp_aes_gcm.ib_flags = attr->flags;
4260
4261         return &action->ib_action;
4262
4263 err_parse:
4264         kfree(action);
4265         return ERR_PTR(err);
4266 }
4267
4268 static int
4269 mlx5_ib_modify_flow_action_esp(struct ib_flow_action *action,
4270                                const struct ib_flow_action_attrs_esp *attr,
4271                                struct uverbs_attr_bundle *attrs)
4272 {
4273         struct mlx5_ib_flow_action *maction = to_mflow_act(action);
4274         struct mlx5_accel_esp_xfrm_attrs accel_attrs;
4275         int err = 0;
4276
4277         if (attr->keymat || attr->replay || attr->encap ||
4278             attr->spi || attr->seq || attr->tfc_pad ||
4279             attr->hard_limit_pkts ||
4280             (attr->flags & ~(IB_FLOW_ACTION_ESP_FLAGS_ESN_TRIGGERED |
4281                              IB_FLOW_ACTION_ESP_FLAGS_MOD_ESP_ATTRS |
4282                              IB_UVERBS_FLOW_ACTION_ESP_FLAGS_ESN_NEW_WINDOW)))
4283                 return -EOPNOTSUPP;
4284
4285         /* Only the ESN value or the MLX5_ACCEL_ESP_FLAGS_ESN_STATE_OVERLAP can
4286          * be modified.
4287          */
4288         if (!(maction->esp_aes_gcm.ib_flags &
4289               IB_FLOW_ACTION_ESP_FLAGS_ESN_TRIGGERED) &&
4290             attr->flags & (IB_FLOW_ACTION_ESP_FLAGS_ESN_TRIGGERED |
4291                            IB_UVERBS_FLOW_ACTION_ESP_FLAGS_ESN_NEW_WINDOW))
4292                 return -EINVAL;
4293
4294         memcpy(&accel_attrs, &maction->esp_aes_gcm.ctx->attrs,
4295                sizeof(accel_attrs));
4296
4297         accel_attrs.esn = attr->esn;
4298         if (attr->flags & IB_UVERBS_FLOW_ACTION_ESP_FLAGS_ESN_NEW_WINDOW)
4299                 accel_attrs.flags |= MLX5_ACCEL_ESP_FLAGS_ESN_STATE_OVERLAP;
4300         else
4301                 accel_attrs.flags &= ~MLX5_ACCEL_ESP_FLAGS_ESN_STATE_OVERLAP;
4302
4303         err = mlx5_accel_esp_modify_xfrm(maction->esp_aes_gcm.ctx,
4304                                          &accel_attrs);
4305         if (err)
4306                 return err;
4307
4308         maction->esp_aes_gcm.ib_flags &=
4309                 ~IB_UVERBS_FLOW_ACTION_ESP_FLAGS_ESN_NEW_WINDOW;
4310         maction->esp_aes_gcm.ib_flags |=
4311                 attr->flags & IB_UVERBS_FLOW_ACTION_ESP_FLAGS_ESN_NEW_WINDOW;
4312
4313         return 0;
4314 }
4315
4316 static int mlx5_ib_destroy_flow_action(struct ib_flow_action *action)
4317 {
4318         struct mlx5_ib_flow_action *maction = to_mflow_act(action);
4319
4320         switch (action->type) {
4321         case IB_FLOW_ACTION_ESP:
4322                 /*
4323                  * We only support aes_gcm by now, so we implicitly know this is
4324                  * the underline crypto.
4325                  */
4326                 mlx5_accel_esp_destroy_xfrm(maction->esp_aes_gcm.ctx);
4327                 break;
4328         case IB_FLOW_ACTION_UNSPECIFIED:
4329                 mlx5_ib_destroy_flow_action_raw(maction);
4330                 break;
4331         default:
4332                 WARN_ON(true);
4333                 break;
4334         }
4335
4336         kfree(maction);
4337         return 0;
4338 }
4339
4340 static int mlx5_ib_mcg_attach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
4341 {
4342         struct mlx5_ib_dev *dev = to_mdev(ibqp->device);
4343         struct mlx5_ib_qp *mqp = to_mqp(ibqp);
4344         int err;
4345         u16 uid;
4346
4347         uid = ibqp->pd ?
4348                 to_mpd(ibqp->pd)->uid : 0;
4349
4350         if (mqp->flags & MLX5_IB_QP_UNDERLAY) {
4351                 mlx5_ib_dbg(dev, "Attaching a multi cast group to underlay QP is not supported\n");
4352                 return -EOPNOTSUPP;
4353         }
4354
4355         err = mlx5_cmd_attach_mcg(dev->mdev, gid, ibqp->qp_num, uid);
4356         if (err)
4357                 mlx5_ib_warn(dev, "failed attaching QPN 0x%x, MGID %pI6\n",
4358                              ibqp->qp_num, gid->raw);
4359
4360         return err;
4361 }
4362
4363 static int mlx5_ib_mcg_detach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
4364 {
4365         struct mlx5_ib_dev *dev = to_mdev(ibqp->device);
4366         int err;
4367         u16 uid;
4368
4369         uid = ibqp->pd ?
4370                 to_mpd(ibqp->pd)->uid : 0;
4371         err = mlx5_cmd_detach_mcg(dev->mdev, gid, ibqp->qp_num, uid);
4372         if (err)
4373                 mlx5_ib_warn(dev, "failed detaching QPN 0x%x, MGID %pI6\n",
4374                              ibqp->qp_num, gid->raw);
4375
4376         return err;
4377 }
4378
4379 static int init_node_data(struct mlx5_ib_dev *dev)
4380 {
4381         int err;
4382
4383         err = mlx5_query_node_desc(dev, dev->ib_dev.node_desc);
4384         if (err)
4385                 return err;
4386
4387         dev->mdev->rev_id = dev->mdev->pdev->revision;
4388
4389         return mlx5_query_node_guid(dev, &dev->ib_dev.node_guid);
4390 }
4391
4392 static ssize_t fw_pages_show(struct device *device,
4393                              struct device_attribute *attr, char *buf)
4394 {
4395         struct mlx5_ib_dev *dev =
4396                 rdma_device_to_drv_device(device, struct mlx5_ib_dev, ib_dev);
4397
4398         return sprintf(buf, "%d\n", dev->mdev->priv.fw_pages);
4399 }
4400 static DEVICE_ATTR_RO(fw_pages);
4401
4402 static ssize_t reg_pages_show(struct device *device,
4403                               struct device_attribute *attr, char *buf)
4404 {
4405         struct mlx5_ib_dev *dev =
4406                 rdma_device_to_drv_device(device, struct mlx5_ib_dev, ib_dev);
4407
4408         return sprintf(buf, "%d\n", atomic_read(&dev->mdev->priv.reg_pages));
4409 }
4410 static DEVICE_ATTR_RO(reg_pages);
4411
4412 static ssize_t hca_type_show(struct device *device,
4413                              struct device_attribute *attr, char *buf)
4414 {
4415         struct mlx5_ib_dev *dev =
4416                 rdma_device_to_drv_device(device, struct mlx5_ib_dev, ib_dev);
4417
4418         return sprintf(buf, "MT%d\n", dev->mdev->pdev->device);
4419 }
4420 static DEVICE_ATTR_RO(hca_type);
4421
4422 static ssize_t hw_rev_show(struct device *device,
4423                            struct device_attribute *attr, char *buf)
4424 {
4425         struct mlx5_ib_dev *dev =
4426                 rdma_device_to_drv_device(device, struct mlx5_ib_dev, ib_dev);
4427
4428         return sprintf(buf, "%x\n", dev->mdev->rev_id);
4429 }
4430 static DEVICE_ATTR_RO(hw_rev);
4431
4432 static ssize_t board_id_show(struct device *device,
4433                              struct device_attribute *attr, char *buf)
4434 {
4435         struct mlx5_ib_dev *dev =
4436                 rdma_device_to_drv_device(device, struct mlx5_ib_dev, ib_dev);
4437
4438         return sprintf(buf, "%.*s\n", MLX5_BOARD_ID_LEN,
4439                        dev->mdev->board_id);
4440 }
4441 static DEVICE_ATTR_RO(board_id);
4442
4443 static struct attribute *mlx5_class_attributes[] = {
4444         &dev_attr_hw_rev.attr,
4445         &dev_attr_hca_type.attr,
4446         &dev_attr_board_id.attr,
4447         &dev_attr_fw_pages.attr,
4448         &dev_attr_reg_pages.attr,
4449         NULL,
4450 };
4451
4452 static const struct attribute_group mlx5_attr_group = {
4453         .attrs = mlx5_class_attributes,
4454 };
4455
4456 static void pkey_change_handler(struct work_struct *work)
4457 {
4458         struct mlx5_ib_port_resources *ports =
4459                 container_of(work, struct mlx5_ib_port_resources,
4460                              pkey_change_work);
4461
4462         mutex_lock(&ports->devr->mutex);
4463         mlx5_ib_gsi_pkey_change(ports->gsi);
4464         mutex_unlock(&ports->devr->mutex);
4465 }
4466
4467 static void mlx5_ib_handle_internal_error(struct mlx5_ib_dev *ibdev)
4468 {
4469         struct mlx5_ib_qp *mqp;
4470         struct mlx5_ib_cq *send_mcq, *recv_mcq;
4471         struct mlx5_core_cq *mcq;
4472         struct list_head cq_armed_list;
4473         unsigned long flags_qp;
4474         unsigned long flags_cq;
4475         unsigned long flags;
4476
4477         INIT_LIST_HEAD(&cq_armed_list);
4478
4479         /* Go over qp list reside on that ibdev, sync with create/destroy qp.*/
4480         spin_lock_irqsave(&ibdev->reset_flow_resource_lock, flags);
4481         list_for_each_entry(mqp, &ibdev->qp_list, qps_list) {
4482                 spin_lock_irqsave(&mqp->sq.lock, flags_qp);
4483                 if (mqp->sq.tail != mqp->sq.head) {
4484                         send_mcq = to_mcq(mqp->ibqp.send_cq);
4485                         spin_lock_irqsave(&send_mcq->lock, flags_cq);
4486                         if (send_mcq->mcq.comp &&
4487                             mqp->ibqp.send_cq->comp_handler) {
4488                                 if (!send_mcq->mcq.reset_notify_added) {
4489                                         send_mcq->mcq.reset_notify_added = 1;
4490                                         list_add_tail(&send_mcq->mcq.reset_notify,
4491                                                       &cq_armed_list);
4492                                 }
4493                         }
4494                         spin_unlock_irqrestore(&send_mcq->lock, flags_cq);
4495                 }
4496                 spin_unlock_irqrestore(&mqp->sq.lock, flags_qp);
4497                 spin_lock_irqsave(&mqp->rq.lock, flags_qp);
4498                 /* no handling is needed for SRQ */
4499                 if (!mqp->ibqp.srq) {
4500                         if (mqp->rq.tail != mqp->rq.head) {
4501                                 recv_mcq = to_mcq(mqp->ibqp.recv_cq);
4502                                 spin_lock_irqsave(&recv_mcq->lock, flags_cq);
4503                                 if (recv_mcq->mcq.comp &&
4504                                     mqp->ibqp.recv_cq->comp_handler) {
4505                                         if (!recv_mcq->mcq.reset_notify_added) {
4506                                                 recv_mcq->mcq.reset_notify_added = 1;
4507                                                 list_add_tail(&recv_mcq->mcq.reset_notify,
4508                                                               &cq_armed_list);
4509                                         }
4510                                 }
4511                                 spin_unlock_irqrestore(&recv_mcq->lock,
4512                                                        flags_cq);
4513                         }
4514                 }
4515                 spin_unlock_irqrestore(&mqp->rq.lock, flags_qp);
4516         }
4517         /*At that point all inflight post send were put to be executed as of we
4518          * lock/unlock above locks Now need to arm all involved CQs.
4519          */
4520         list_for_each_entry(mcq, &cq_armed_list, reset_notify) {
4521                 mcq->comp(mcq, NULL);
4522         }
4523         spin_unlock_irqrestore(&ibdev->reset_flow_resource_lock, flags);
4524 }
4525
4526 static void delay_drop_handler(struct work_struct *work)
4527 {
4528         int err;
4529         struct mlx5_ib_delay_drop *delay_drop =
4530                 container_of(work, struct mlx5_ib_delay_drop,
4531                              delay_drop_work);
4532
4533         atomic_inc(&delay_drop->events_cnt);
4534
4535         mutex_lock(&delay_drop->lock);
4536         err = mlx5_core_set_delay_drop(delay_drop->dev->mdev,
4537                                        delay_drop->timeout);
4538         if (err) {
4539                 mlx5_ib_warn(delay_drop->dev, "Failed to set delay drop, timeout=%u\n",
4540                              delay_drop->timeout);
4541                 delay_drop->activate = false;
4542         }
4543         mutex_unlock(&delay_drop->lock);
4544 }
4545
4546 static void handle_general_event(struct mlx5_ib_dev *ibdev, struct mlx5_eqe *eqe,
4547                                  struct ib_event *ibev)
4548 {
4549         u8 port = (eqe->data.port.port >> 4) & 0xf;
4550
4551         switch (eqe->sub_type) {
4552         case MLX5_GENERAL_SUBTYPE_DELAY_DROP_TIMEOUT:
4553                 if (mlx5_ib_port_link_layer(&ibdev->ib_dev, port) ==
4554                                             IB_LINK_LAYER_ETHERNET)
4555                         schedule_work(&ibdev->delay_drop.delay_drop_work);
4556                 break;
4557         default: /* do nothing */
4558                 return;
4559         }
4560 }
4561
4562 static int handle_port_change(struct mlx5_ib_dev *ibdev, struct mlx5_eqe *eqe,
4563                               struct ib_event *ibev)
4564 {
4565         u8 port = (eqe->data.port.port >> 4) & 0xf;
4566
4567         ibev->element.port_num = port;
4568
4569         switch (eqe->sub_type) {
4570         case MLX5_PORT_CHANGE_SUBTYPE_ACTIVE:
4571         case MLX5_PORT_CHANGE_SUBTYPE_DOWN:
4572         case MLX5_PORT_CHANGE_SUBTYPE_INITIALIZED:
4573                 /* In RoCE, port up/down events are handled in
4574                  * mlx5_netdev_event().
4575                  */
4576                 if (mlx5_ib_port_link_layer(&ibdev->ib_dev, port) ==
4577                                             IB_LINK_LAYER_ETHERNET)
4578                         return -EINVAL;
4579
4580                 ibev->event = (eqe->sub_type == MLX5_PORT_CHANGE_SUBTYPE_ACTIVE) ?
4581                                 IB_EVENT_PORT_ACTIVE : IB_EVENT_PORT_ERR;
4582                 break;
4583
4584         case MLX5_PORT_CHANGE_SUBTYPE_LID:
4585                 ibev->event = IB_EVENT_LID_CHANGE;
4586                 break;
4587
4588         case MLX5_PORT_CHANGE_SUBTYPE_PKEY:
4589                 ibev->event = IB_EVENT_PKEY_CHANGE;
4590                 schedule_work(&ibdev->devr.ports[port - 1].pkey_change_work);
4591                 break;
4592
4593         case MLX5_PORT_CHANGE_SUBTYPE_GUID:
4594                 ibev->event = IB_EVENT_GID_CHANGE;
4595                 break;
4596
4597         case MLX5_PORT_CHANGE_SUBTYPE_CLIENT_REREG:
4598                 ibev->event = IB_EVENT_CLIENT_REREGISTER;
4599                 break;
4600         default:
4601                 return -EINVAL;
4602         }
4603
4604         return 0;
4605 }
4606
4607 static void mlx5_ib_handle_event(struct work_struct *_work)
4608 {
4609         struct mlx5_ib_event_work *work =
4610                 container_of(_work, struct mlx5_ib_event_work, work);
4611         struct mlx5_ib_dev *ibdev;
4612         struct ib_event ibev;
4613         bool fatal = false;
4614
4615         if (work->is_slave) {
4616                 ibdev = mlx5_ib_get_ibdev_from_mpi(work->mpi);
4617                 if (!ibdev)
4618                         goto out;
4619         } else {
4620                 ibdev = work->dev;
4621         }
4622
4623         switch (work->event) {
4624         case MLX5_DEV_EVENT_SYS_ERROR:
4625                 ibev.event = IB_EVENT_DEVICE_FATAL;
4626                 mlx5_ib_handle_internal_error(ibdev);
4627                 ibev.element.port_num  = (u8)(unsigned long)work->param;
4628                 fatal = true;
4629                 break;
4630         case MLX5_EVENT_TYPE_PORT_CHANGE:
4631                 if (handle_port_change(ibdev, work->param, &ibev))
4632                         goto out;
4633                 break;
4634         case MLX5_EVENT_TYPE_GENERAL_EVENT:
4635                 handle_general_event(ibdev, work->param, &ibev);
4636                 /* fall through */
4637         default:
4638                 goto out;
4639         }
4640
4641         ibev.device = &ibdev->ib_dev;
4642
4643         if (!rdma_is_port_valid(&ibdev->ib_dev, ibev.element.port_num)) {
4644                 mlx5_ib_warn(ibdev, "warning: event on port %d\n",  ibev.element.port_num);
4645                 goto out;
4646         }
4647
4648         if (ibdev->ib_active)
4649                 ib_dispatch_event(&ibev);
4650
4651         if (fatal)
4652                 ibdev->ib_active = false;
4653 out:
4654         kfree(work);
4655 }
4656
4657 static int mlx5_ib_event(struct notifier_block *nb,
4658                          unsigned long event, void *param)
4659 {
4660         struct mlx5_ib_event_work *work;
4661
4662         work = kmalloc(sizeof(*work), GFP_ATOMIC);
4663         if (!work)
4664                 return NOTIFY_DONE;
4665
4666         INIT_WORK(&work->work, mlx5_ib_handle_event);
4667         work->dev = container_of(nb, struct mlx5_ib_dev, mdev_events);
4668         work->is_slave = false;
4669         work->param = param;
4670         work->event = event;
4671
4672         queue_work(mlx5_ib_event_wq, &work->work);
4673
4674         return NOTIFY_OK;
4675 }
4676
4677 static int mlx5_ib_event_slave_port(struct notifier_block *nb,
4678                                     unsigned long event, void *param)
4679 {
4680         struct mlx5_ib_event_work *work;
4681
4682         work = kmalloc(sizeof(*work), GFP_ATOMIC);
4683         if (!work)
4684                 return NOTIFY_DONE;
4685
4686         INIT_WORK(&work->work, mlx5_ib_handle_event);
4687         work->mpi = container_of(nb, struct mlx5_ib_multiport_info, mdev_events);
4688         work->is_slave = true;
4689         work->param = param;
4690         work->event = event;
4691         queue_work(mlx5_ib_event_wq, &work->work);
4692
4693         return NOTIFY_OK;
4694 }
4695
4696 static int set_has_smi_cap(struct mlx5_ib_dev *dev)
4697 {
4698         struct mlx5_hca_vport_context vport_ctx;
4699         int err;
4700         int port;
4701
4702         for (port = 1; port <= ARRAY_SIZE(dev->mdev->port_caps); port++) {
4703                 dev->mdev->port_caps[port - 1].has_smi = false;
4704                 if (MLX5_CAP_GEN(dev->mdev, port_type) ==
4705                     MLX5_CAP_PORT_TYPE_IB) {
4706                         if (MLX5_CAP_GEN(dev->mdev, ib_virt)) {
4707                                 err = mlx5_query_hca_vport_context(dev->mdev, 0,
4708                                                                    port, 0,
4709                                                                    &vport_ctx);
4710                                 if (err) {
4711                                         mlx5_ib_err(dev, "query_hca_vport_context for port=%d failed %d\n",
4712                                                     port, err);
4713                                         return err;
4714                                 }
4715                                 dev->mdev->port_caps[port - 1].has_smi =
4716                                         vport_ctx.has_smi;
4717                         } else {
4718                                 dev->mdev->port_caps[port - 1].has_smi = true;
4719                         }
4720                 }
4721         }
4722         return 0;
4723 }
4724
4725 static void get_ext_port_caps(struct mlx5_ib_dev *dev)
4726 {
4727         int port;
4728
4729         for (port = 1; port <= dev->num_ports; port++)
4730                 mlx5_query_ext_port_caps(dev, port);
4731 }
4732
4733 static int __get_port_caps(struct mlx5_ib_dev *dev, u8 port)
4734 {
4735         struct ib_device_attr *dprops = NULL;
4736         struct ib_port_attr *pprops = NULL;
4737         int err = -ENOMEM;
4738         struct ib_udata uhw = {.inlen = 0, .outlen = 0};
4739
4740         pprops = kzalloc(sizeof(*pprops), GFP_KERNEL);
4741         if (!pprops)
4742                 goto out;
4743
4744         dprops = kmalloc(sizeof(*dprops), GFP_KERNEL);
4745         if (!dprops)
4746                 goto out;
4747
4748         err = mlx5_ib_query_device(&dev->ib_dev, dprops, &uhw);
4749         if (err) {
4750                 mlx5_ib_warn(dev, "query_device failed %d\n", err);
4751                 goto out;
4752         }
4753
4754         err = mlx5_ib_query_port(&dev->ib_dev, port, pprops);
4755         if (err) {
4756                 mlx5_ib_warn(dev, "query_port %d failed %d\n",
4757                              port, err);
4758                 goto out;
4759         }
4760
4761         dev->mdev->port_caps[port - 1].pkey_table_len =
4762                                         dprops->max_pkeys;
4763         dev->mdev->port_caps[port - 1].gid_table_len =
4764                                         pprops->gid_tbl_len;
4765         mlx5_ib_dbg(dev, "port %d: pkey_table_len %d, gid_table_len %d\n",
4766                     port, dprops->max_pkeys, pprops->gid_tbl_len);
4767
4768 out:
4769         kfree(pprops);
4770         kfree(dprops);
4771
4772         return err;
4773 }
4774
4775 static int get_port_caps(struct mlx5_ib_dev *dev, u8 port)
4776 {
4777         /* For representors use port 1, is this is the only native
4778          * port
4779          */
4780         if (dev->is_rep)
4781                 return __get_port_caps(dev, 1);
4782         return __get_port_caps(dev, port);
4783 }
4784
4785 static void destroy_umrc_res(struct mlx5_ib_dev *dev)
4786 {
4787         int err;
4788
4789         err = mlx5_mr_cache_cleanup(dev);
4790         if (err)
4791                 mlx5_ib_warn(dev, "mr cache cleanup failed\n");
4792
4793         if (dev->umrc.qp)
4794                 mlx5_ib_destroy_qp(dev->umrc.qp, NULL);
4795         if (dev->umrc.cq)
4796                 ib_free_cq(dev->umrc.cq);
4797         if (dev->umrc.pd)
4798                 ib_dealloc_pd(dev->umrc.pd);
4799 }
4800
4801 enum {
4802         MAX_UMR_WR = 128,
4803 };
4804
4805 static int create_umr_res(struct mlx5_ib_dev *dev)
4806 {
4807         struct ib_qp_init_attr *init_attr = NULL;
4808         struct ib_qp_attr *attr = NULL;
4809         struct ib_pd *pd;
4810         struct ib_cq *cq;
4811         struct ib_qp *qp;
4812         int ret;
4813
4814         attr = kzalloc(sizeof(*attr), GFP_KERNEL);
4815         init_attr = kzalloc(sizeof(*init_attr), GFP_KERNEL);
4816         if (!attr || !init_attr) {
4817                 ret = -ENOMEM;
4818                 goto error_0;
4819         }
4820
4821         pd = ib_alloc_pd(&dev->ib_dev, 0);
4822         if (IS_ERR(pd)) {
4823                 mlx5_ib_dbg(dev, "Couldn't create PD for sync UMR QP\n");
4824                 ret = PTR_ERR(pd);
4825                 goto error_0;
4826         }
4827
4828         cq = ib_alloc_cq(&dev->ib_dev, NULL, 128, 0, IB_POLL_SOFTIRQ);
4829         if (IS_ERR(cq)) {
4830                 mlx5_ib_dbg(dev, "Couldn't create CQ for sync UMR QP\n");
4831                 ret = PTR_ERR(cq);
4832                 goto error_2;
4833         }
4834
4835         init_attr->send_cq = cq;
4836         init_attr->recv_cq = cq;
4837         init_attr->sq_sig_type = IB_SIGNAL_ALL_WR;
4838         init_attr->cap.max_send_wr = MAX_UMR_WR;
4839         init_attr->cap.max_send_sge = 1;
4840         init_attr->qp_type = MLX5_IB_QPT_REG_UMR;
4841         init_attr->port_num = 1;
4842         qp = mlx5_ib_create_qp(pd, init_attr, NULL);
4843         if (IS_ERR(qp)) {
4844                 mlx5_ib_dbg(dev, "Couldn't create sync UMR QP\n");
4845                 ret = PTR_ERR(qp);
4846                 goto error_3;
4847         }
4848         qp->device     = &dev->ib_dev;
4849         qp->real_qp    = qp;
4850         qp->uobject    = NULL;
4851         qp->qp_type    = MLX5_IB_QPT_REG_UMR;
4852         qp->send_cq    = init_attr->send_cq;
4853         qp->recv_cq    = init_attr->recv_cq;
4854
4855         attr->qp_state = IB_QPS_INIT;
4856         attr->port_num = 1;
4857         ret = mlx5_ib_modify_qp(qp, attr, IB_QP_STATE | IB_QP_PKEY_INDEX |
4858                                 IB_QP_PORT, NULL);
4859         if (ret) {
4860                 mlx5_ib_dbg(dev, "Couldn't modify UMR QP\n");
4861                 goto error_4;
4862         }
4863
4864         memset(attr, 0, sizeof(*attr));
4865         attr->qp_state = IB_QPS_RTR;
4866         attr->path_mtu = IB_MTU_256;
4867
4868         ret = mlx5_ib_modify_qp(qp, attr, IB_QP_STATE, NULL);
4869         if (ret) {
4870                 mlx5_ib_dbg(dev, "Couldn't modify umr QP to rtr\n");
4871                 goto error_4;
4872         }
4873
4874         memset(attr, 0, sizeof(*attr));
4875         attr->qp_state = IB_QPS_RTS;
4876         ret = mlx5_ib_modify_qp(qp, attr, IB_QP_STATE, NULL);
4877         if (ret) {
4878                 mlx5_ib_dbg(dev, "Couldn't modify umr QP to rts\n");
4879                 goto error_4;
4880         }
4881
4882         dev->umrc.qp = qp;
4883         dev->umrc.cq = cq;
4884         dev->umrc.pd = pd;
4885
4886         sema_init(&dev->umrc.sem, MAX_UMR_WR);
4887         ret = mlx5_mr_cache_init(dev);
4888         if (ret) {
4889                 mlx5_ib_warn(dev, "mr cache init failed %d\n", ret);
4890                 goto error_4;
4891         }
4892
4893         kfree(attr);
4894         kfree(init_attr);
4895
4896         return 0;
4897
4898 error_4:
4899         mlx5_ib_destroy_qp(qp, NULL);
4900         dev->umrc.qp = NULL;
4901
4902 error_3:
4903         ib_free_cq(cq);
4904         dev->umrc.cq = NULL;
4905
4906 error_2:
4907         ib_dealloc_pd(pd);
4908         dev->umrc.pd = NULL;
4909
4910 error_0:
4911         kfree(attr);
4912         kfree(init_attr);
4913         return ret;
4914 }
4915
4916 static u8 mlx5_get_umr_fence(u8 umr_fence_cap)
4917 {
4918         switch (umr_fence_cap) {
4919         case MLX5_CAP_UMR_FENCE_NONE:
4920                 return MLX5_FENCE_MODE_NONE;
4921         case MLX5_CAP_UMR_FENCE_SMALL:
4922                 return MLX5_FENCE_MODE_INITIATOR_SMALL;
4923         default:
4924                 return MLX5_FENCE_MODE_STRONG_ORDERING;
4925         }
4926 }
4927
4928 static int create_dev_resources(struct mlx5_ib_resources *devr)
4929 {
4930         struct ib_srq_init_attr attr;
4931         struct mlx5_ib_dev *dev;
4932         struct ib_device *ibdev;
4933         struct ib_cq_init_attr cq_attr = {.cqe = 1};
4934         int port;
4935         int ret = 0;
4936
4937         dev = container_of(devr, struct mlx5_ib_dev, devr);
4938         ibdev = &dev->ib_dev;
4939
4940         mutex_init(&devr->mutex);
4941
4942         devr->p0 = rdma_zalloc_drv_obj(ibdev, ib_pd);
4943         if (!devr->p0)
4944                 return -ENOMEM;
4945
4946         devr->p0->device  = ibdev;
4947         devr->p0->uobject = NULL;
4948         atomic_set(&devr->p0->usecnt, 0);
4949
4950         ret = mlx5_ib_alloc_pd(devr->p0, NULL);
4951         if (ret)
4952                 goto error0;
4953
4954         devr->c0 = rdma_zalloc_drv_obj(ibdev, ib_cq);
4955         if (!devr->c0) {
4956                 ret = -ENOMEM;
4957                 goto error1;
4958         }
4959
4960         devr->c0->device = &dev->ib_dev;
4961         atomic_set(&devr->c0->usecnt, 0);
4962
4963         ret = mlx5_ib_create_cq(devr->c0, &cq_attr, NULL);
4964         if (ret)
4965                 goto err_create_cq;
4966
4967         devr->x0 = mlx5_ib_alloc_xrcd(&dev->ib_dev, NULL);
4968         if (IS_ERR(devr->x0)) {
4969                 ret = PTR_ERR(devr->x0);
4970                 goto error2;
4971         }
4972         devr->x0->device = &dev->ib_dev;
4973         devr->x0->inode = NULL;
4974         atomic_set(&devr->x0->usecnt, 0);
4975         mutex_init(&devr->x0->tgt_qp_mutex);
4976         INIT_LIST_HEAD(&devr->x0->tgt_qp_list);
4977
4978         devr->x1 = mlx5_ib_alloc_xrcd(&dev->ib_dev, NULL);
4979         if (IS_ERR(devr->x1)) {
4980                 ret = PTR_ERR(devr->x1);
4981                 goto error3;
4982         }
4983         devr->x1->device = &dev->ib_dev;
4984         devr->x1->inode = NULL;
4985         atomic_set(&devr->x1->usecnt, 0);
4986         mutex_init(&devr->x1->tgt_qp_mutex);
4987         INIT_LIST_HEAD(&devr->x1->tgt_qp_list);
4988
4989         memset(&attr, 0, sizeof(attr));
4990         attr.attr.max_sge = 1;
4991         attr.attr.max_wr = 1;
4992         attr.srq_type = IB_SRQT_XRC;
4993         attr.ext.cq = devr->c0;
4994         attr.ext.xrc.xrcd = devr->x0;
4995
4996         devr->s0 = rdma_zalloc_drv_obj(ibdev, ib_srq);
4997         if (!devr->s0) {
4998                 ret = -ENOMEM;
4999                 goto error4;
5000         }
5001
5002         devr->s0->device        = &dev->ib_dev;
5003         devr->s0->pd            = devr->p0;
5004         devr->s0->srq_type      = IB_SRQT_XRC;
5005         devr->s0->ext.xrc.xrcd  = devr->x0;
5006         devr->s0->ext.cq        = devr->c0;
5007         ret = mlx5_ib_create_srq(devr->s0, &attr, NULL);
5008         if (ret)
5009                 goto err_create;
5010
5011         atomic_inc(&devr->s0->ext.xrc.xrcd->usecnt);
5012         atomic_inc(&devr->s0->ext.cq->usecnt);
5013         atomic_inc(&devr->p0->usecnt);
5014         atomic_set(&devr->s0->usecnt, 0);
5015
5016         memset(&attr, 0, sizeof(attr));
5017         attr.attr.max_sge = 1;
5018         attr.attr.max_wr = 1;
5019         attr.srq_type = IB_SRQT_BASIC;
5020         devr->s1 = rdma_zalloc_drv_obj(ibdev, ib_srq);
5021         if (!devr->s1) {
5022                 ret = -ENOMEM;
5023                 goto error5;
5024         }
5025
5026         devr->s1->device        = &dev->ib_dev;
5027         devr->s1->pd            = devr->p0;
5028         devr->s1->srq_type      = IB_SRQT_BASIC;
5029         devr->s1->ext.cq        = devr->c0;
5030
5031         ret = mlx5_ib_create_srq(devr->s1, &attr, NULL);
5032         if (ret)
5033                 goto error6;
5034
5035         atomic_inc(&devr->p0->usecnt);
5036         atomic_set(&devr->s1->usecnt, 0);
5037
5038         for (port = 0; port < ARRAY_SIZE(devr->ports); ++port) {
5039                 INIT_WORK(&devr->ports[port].pkey_change_work,
5040                           pkey_change_handler);
5041                 devr->ports[port].devr = devr;
5042         }
5043
5044         return 0;
5045
5046 error6:
5047         kfree(devr->s1);
5048 error5:
5049         mlx5_ib_destroy_srq(devr->s0, NULL);
5050 err_create:
5051         kfree(devr->s0);
5052 error4:
5053         mlx5_ib_dealloc_xrcd(devr->x1, NULL);
5054 error3:
5055         mlx5_ib_dealloc_xrcd(devr->x0, NULL);
5056 error2:
5057         mlx5_ib_destroy_cq(devr->c0, NULL);
5058 err_create_cq:
5059         kfree(devr->c0);
5060 error1:
5061         mlx5_ib_dealloc_pd(devr->p0, NULL);
5062 error0:
5063         kfree(devr->p0);
5064         return ret;
5065 }
5066
5067 static void destroy_dev_resources(struct mlx5_ib_resources *devr)
5068 {
5069         int port;
5070
5071         mlx5_ib_destroy_srq(devr->s1, NULL);
5072         kfree(devr->s1);
5073         mlx5_ib_destroy_srq(devr->s0, NULL);
5074         kfree(devr->s0);
5075         mlx5_ib_dealloc_xrcd(devr->x0, NULL);
5076         mlx5_ib_dealloc_xrcd(devr->x1, NULL);
5077         mlx5_ib_destroy_cq(devr->c0, NULL);
5078         kfree(devr->c0);
5079         mlx5_ib_dealloc_pd(devr->p0, NULL);
5080         kfree(devr->p0);
5081
5082         /* Make sure no change P_Key work items are still executing */
5083         for (port = 0; port < ARRAY_SIZE(devr->ports); ++port)
5084                 cancel_work_sync(&devr->ports[port].pkey_change_work);
5085 }
5086
5087 static u32 get_core_cap_flags(struct ib_device *ibdev,
5088                               struct mlx5_hca_vport_context *rep)
5089 {
5090         struct mlx5_ib_dev *dev = to_mdev(ibdev);
5091         enum rdma_link_layer ll = mlx5_ib_port_link_layer(ibdev, 1);
5092         u8 l3_type_cap = MLX5_CAP_ROCE(dev->mdev, l3_type);
5093         u8 roce_version_cap = MLX5_CAP_ROCE(dev->mdev, roce_version);
5094         bool raw_support = !mlx5_core_mp_enabled(dev->mdev);
5095         u32 ret = 0;
5096
5097         if (rep->grh_required)
5098                 ret |= RDMA_CORE_CAP_IB_GRH_REQUIRED;
5099
5100         if (ll == IB_LINK_LAYER_INFINIBAND)
5101                 return ret | RDMA_CORE_PORT_IBA_IB;
5102
5103         if (raw_support)
5104                 ret |= RDMA_CORE_PORT_RAW_PACKET;
5105
5106         if (!(l3_type_cap & MLX5_ROCE_L3_TYPE_IPV4_CAP))
5107                 return ret;
5108
5109         if (!(l3_type_cap & MLX5_ROCE_L3_TYPE_IPV6_CAP))
5110                 return ret;
5111
5112         if (roce_version_cap & MLX5_ROCE_VERSION_1_CAP)
5113                 ret |= RDMA_CORE_PORT_IBA_ROCE;
5114
5115         if (roce_version_cap & MLX5_ROCE_VERSION_2_CAP)
5116                 ret |= RDMA_CORE_PORT_IBA_ROCE_UDP_ENCAP;
5117
5118         return ret;
5119 }
5120
5121 static int mlx5_port_immutable(struct ib_device *ibdev, u8 port_num,
5122                                struct ib_port_immutable *immutable)
5123 {
5124         struct ib_port_attr attr;
5125         struct mlx5_ib_dev *dev = to_mdev(ibdev);
5126         enum rdma_link_layer ll = mlx5_ib_port_link_layer(ibdev, port_num);
5127         struct mlx5_hca_vport_context rep = {0};
5128         int err;
5129
5130         err = ib_query_port(ibdev, port_num, &attr);
5131         if (err)
5132                 return err;
5133
5134         if (ll == IB_LINK_LAYER_INFINIBAND) {
5135                 err = mlx5_query_hca_vport_context(dev->mdev, 0, port_num, 0,
5136                                                    &rep);
5137                 if (err)
5138                         return err;
5139         }
5140
5141         immutable->pkey_tbl_len = attr.pkey_tbl_len;
5142         immutable->gid_tbl_len = attr.gid_tbl_len;
5143         immutable->core_cap_flags = get_core_cap_flags(ibdev, &rep);
5144         immutable->max_mad_size = IB_MGMT_MAD_SIZE;
5145
5146         return 0;
5147 }
5148
5149 static int mlx5_port_rep_immutable(struct ib_device *ibdev, u8 port_num,
5150                                    struct ib_port_immutable *immutable)
5151 {
5152         struct ib_port_attr attr;
5153         int err;
5154
5155         immutable->core_cap_flags = RDMA_CORE_PORT_RAW_PACKET;
5156
5157         err = ib_query_port(ibdev, port_num, &attr);
5158         if (err)
5159                 return err;
5160
5161         immutable->pkey_tbl_len = attr.pkey_tbl_len;
5162         immutable->gid_tbl_len = attr.gid_tbl_len;
5163         immutable->core_cap_flags = RDMA_CORE_PORT_RAW_PACKET;
5164
5165         return 0;
5166 }
5167
5168 static void get_dev_fw_str(struct ib_device *ibdev, char *str)
5169 {
5170         struct mlx5_ib_dev *dev =
5171                 container_of(ibdev, struct mlx5_ib_dev, ib_dev);
5172         snprintf(str, IB_FW_VERSION_NAME_MAX, "%d.%d.%04d",
5173                  fw_rev_maj(dev->mdev), fw_rev_min(dev->mdev),
5174                  fw_rev_sub(dev->mdev));
5175 }
5176
5177 static int mlx5_eth_lag_init(struct mlx5_ib_dev *dev)
5178 {
5179         struct mlx5_core_dev *mdev = dev->mdev;
5180         struct mlx5_flow_namespace *ns = mlx5_get_flow_namespace(mdev,
5181                                                                  MLX5_FLOW_NAMESPACE_LAG);
5182         struct mlx5_flow_table *ft;
5183         int err;
5184
5185         if (!ns || !mlx5_lag_is_roce(mdev))
5186                 return 0;
5187
5188         err = mlx5_cmd_create_vport_lag(mdev);
5189         if (err)
5190                 return err;
5191
5192         ft = mlx5_create_lag_demux_flow_table(ns, 0, 0);
5193         if (IS_ERR(ft)) {
5194                 err = PTR_ERR(ft);
5195                 goto err_destroy_vport_lag;
5196         }
5197
5198         dev->flow_db->lag_demux_ft = ft;
5199         dev->lag_active = true;
5200         return 0;
5201
5202 err_destroy_vport_lag:
5203         mlx5_cmd_destroy_vport_lag(mdev);
5204         return err;
5205 }
5206
5207 static void mlx5_eth_lag_cleanup(struct mlx5_ib_dev *dev)
5208 {
5209         struct mlx5_core_dev *mdev = dev->mdev;
5210
5211         if (dev->lag_active) {
5212                 dev->lag_active = false;
5213
5214                 mlx5_destroy_flow_table(dev->flow_db->lag_demux_ft);
5215                 dev->flow_db->lag_demux_ft = NULL;
5216
5217                 mlx5_cmd_destroy_vport_lag(mdev);
5218         }
5219 }
5220
5221 static int mlx5_add_netdev_notifier(struct mlx5_ib_dev *dev, u8 port_num)
5222 {
5223         int err;
5224
5225         dev->port[port_num].roce.nb.notifier_call = mlx5_netdev_event;
5226         err = register_netdevice_notifier(&dev->port[port_num].roce.nb);
5227         if (err) {
5228                 dev->port[port_num].roce.nb.notifier_call = NULL;
5229                 return err;
5230         }
5231
5232         return 0;
5233 }
5234
5235 static void mlx5_remove_netdev_notifier(struct mlx5_ib_dev *dev, u8 port_num)
5236 {
5237         if (dev->port[port_num].roce.nb.notifier_call) {
5238                 unregister_netdevice_notifier(&dev->port[port_num].roce.nb);
5239                 dev->port[port_num].roce.nb.notifier_call = NULL;
5240         }
5241 }
5242
5243 static int mlx5_enable_eth(struct mlx5_ib_dev *dev)
5244 {
5245         int err;
5246
5247         err = mlx5_nic_vport_enable_roce(dev->mdev);
5248         if (err)
5249                 return err;
5250
5251         err = mlx5_eth_lag_init(dev);
5252         if (err)
5253                 goto err_disable_roce;
5254
5255         return 0;
5256
5257 err_disable_roce:
5258         mlx5_nic_vport_disable_roce(dev->mdev);
5259
5260         return err;
5261 }
5262
5263 static void mlx5_disable_eth(struct mlx5_ib_dev *dev)
5264 {
5265         mlx5_eth_lag_cleanup(dev);
5266         mlx5_nic_vport_disable_roce(dev->mdev);
5267 }
5268
5269 struct mlx5_ib_counter {
5270         const char *name;
5271         size_t offset;
5272 };
5273
5274 #define INIT_Q_COUNTER(_name)           \
5275         { .name = #_name, .offset = MLX5_BYTE_OFF(query_q_counter_out, _name)}
5276
5277 static const struct mlx5_ib_counter basic_q_cnts[] = {
5278         INIT_Q_COUNTER(rx_write_requests),
5279         INIT_Q_COUNTER(rx_read_requests),
5280         INIT_Q_COUNTER(rx_atomic_requests),
5281         INIT_Q_COUNTER(out_of_buffer),
5282 };
5283
5284 static const struct mlx5_ib_counter out_of_seq_q_cnts[] = {
5285         INIT_Q_COUNTER(out_of_sequence),
5286 };
5287
5288 static const struct mlx5_ib_counter retrans_q_cnts[] = {
5289         INIT_Q_COUNTER(duplicate_request),
5290         INIT_Q_COUNTER(rnr_nak_retry_err),
5291         INIT_Q_COUNTER(packet_seq_err),
5292         INIT_Q_COUNTER(implied_nak_seq_err),
5293         INIT_Q_COUNTER(local_ack_timeout_err),
5294 };
5295
5296 #define INIT_CONG_COUNTER(_name)                \
5297         { .name = #_name, .offset =     \
5298                 MLX5_BYTE_OFF(query_cong_statistics_out, _name ## _high)}
5299
5300 static const struct mlx5_ib_counter cong_cnts[] = {
5301         INIT_CONG_COUNTER(rp_cnp_ignored),
5302         INIT_CONG_COUNTER(rp_cnp_handled),
5303         INIT_CONG_COUNTER(np_ecn_marked_roce_packets),
5304         INIT_CONG_COUNTER(np_cnp_sent),
5305 };
5306
5307 static const struct mlx5_ib_counter extended_err_cnts[] = {
5308         INIT_Q_COUNTER(resp_local_length_error),
5309         INIT_Q_COUNTER(resp_cqe_error),
5310         INIT_Q_COUNTER(req_cqe_error),
5311         INIT_Q_COUNTER(req_remote_invalid_request),
5312         INIT_Q_COUNTER(req_remote_access_errors),
5313         INIT_Q_COUNTER(resp_remote_access_errors),
5314         INIT_Q_COUNTER(resp_cqe_flush_error),
5315         INIT_Q_COUNTER(req_cqe_flush_error),
5316 };
5317
5318 #define INIT_EXT_PPCNT_COUNTER(_name)           \
5319         { .name = #_name, .offset =     \
5320         MLX5_BYTE_OFF(ppcnt_reg, \
5321                       counter_set.eth_extended_cntrs_grp_data_layout._name##_high)}
5322
5323 static const struct mlx5_ib_counter ext_ppcnt_cnts[] = {
5324         INIT_EXT_PPCNT_COUNTER(rx_icrc_encapsulated),
5325 };
5326
5327 static bool is_mdev_switchdev_mode(const struct mlx5_core_dev *mdev)
5328 {
5329         return MLX5_ESWITCH_MANAGER(mdev) &&
5330                mlx5_ib_eswitch_mode(mdev->priv.eswitch) ==
5331                        MLX5_ESWITCH_OFFLOADS;
5332 }
5333
5334 static void mlx5_ib_dealloc_counters(struct mlx5_ib_dev *dev)
5335 {
5336         int num_cnt_ports;
5337         int i;
5338
5339         num_cnt_ports = is_mdev_switchdev_mode(dev->mdev) ? 1 : dev->num_ports;
5340
5341         for (i = 0; i < num_cnt_ports; i++) {
5342                 if (dev->port[i].cnts.set_id_valid)
5343                         mlx5_core_dealloc_q_counter(dev->mdev,
5344                                                     dev->port[i].cnts.set_id);
5345                 kfree(dev->port[i].cnts.names);
5346                 kfree(dev->port[i].cnts.offsets);
5347         }
5348 }
5349
5350 static int __mlx5_ib_alloc_counters(struct mlx5_ib_dev *dev,
5351                                     struct mlx5_ib_counters *cnts)
5352 {
5353         u32 num_counters;
5354
5355         num_counters = ARRAY_SIZE(basic_q_cnts);
5356
5357         if (MLX5_CAP_GEN(dev->mdev, out_of_seq_cnt))
5358                 num_counters += ARRAY_SIZE(out_of_seq_q_cnts);
5359
5360         if (MLX5_CAP_GEN(dev->mdev, retransmission_q_counters))
5361                 num_counters += ARRAY_SIZE(retrans_q_cnts);
5362
5363         if (MLX5_CAP_GEN(dev->mdev, enhanced_error_q_counters))
5364                 num_counters += ARRAY_SIZE(extended_err_cnts);
5365
5366         cnts->num_q_counters = num_counters;
5367
5368         if (MLX5_CAP_GEN(dev->mdev, cc_query_allowed)) {
5369                 cnts->num_cong_counters = ARRAY_SIZE(cong_cnts);
5370                 num_counters += ARRAY_SIZE(cong_cnts);
5371         }
5372         if (MLX5_CAP_PCAM_FEATURE(dev->mdev, rx_icrc_encapsulated_counter)) {
5373                 cnts->num_ext_ppcnt_counters = ARRAY_SIZE(ext_ppcnt_cnts);
5374                 num_counters += ARRAY_SIZE(ext_ppcnt_cnts);
5375         }
5376         cnts->names = kcalloc(num_counters, sizeof(cnts->names), GFP_KERNEL);
5377         if (!cnts->names)
5378                 return -ENOMEM;
5379
5380         cnts->offsets = kcalloc(num_counters,
5381                                 sizeof(cnts->offsets), GFP_KERNEL);
5382         if (!cnts->offsets)
5383                 goto err_names;
5384
5385         return 0;
5386
5387 err_names:
5388         kfree(cnts->names);
5389         cnts->names = NULL;
5390         return -ENOMEM;
5391 }
5392
5393 static void mlx5_ib_fill_counters(struct mlx5_ib_dev *dev,
5394                                   const char **names,
5395                                   size_t *offsets)
5396 {
5397         int i;
5398         int j = 0;
5399
5400         for (i = 0; i < ARRAY_SIZE(basic_q_cnts); i++, j++) {
5401                 names[j] = basic_q_cnts[i].name;
5402                 offsets[j] = basic_q_cnts[i].offset;
5403         }
5404
5405         if (MLX5_CAP_GEN(dev->mdev, out_of_seq_cnt)) {
5406                 for (i = 0; i < ARRAY_SIZE(out_of_seq_q_cnts); i++, j++) {
5407                         names[j] = out_of_seq_q_cnts[i].name;
5408                         offsets[j] = out_of_seq_q_cnts[i].offset;
5409                 }
5410         }
5411
5412         if (MLX5_CAP_GEN(dev->mdev, retransmission_q_counters)) {
5413                 for (i = 0; i < ARRAY_SIZE(retrans_q_cnts); i++, j++) {
5414                         names[j] = retrans_q_cnts[i].name;
5415                         offsets[j] = retrans_q_cnts[i].offset;
5416                 }
5417         }
5418
5419         if (MLX5_CAP_GEN(dev->mdev, enhanced_error_q_counters)) {
5420                 for (i = 0; i < ARRAY_SIZE(extended_err_cnts); i++, j++) {
5421                         names[j] = extended_err_cnts[i].name;
5422                         offsets[j] = extended_err_cnts[i].offset;
5423                 }
5424         }
5425
5426         if (MLX5_CAP_GEN(dev->mdev, cc_query_allowed)) {
5427                 for (i = 0; i < ARRAY_SIZE(cong_cnts); i++, j++) {
5428                         names[j] = cong_cnts[i].name;
5429                         offsets[j] = cong_cnts[i].offset;
5430                 }
5431         }
5432
5433         if (MLX5_CAP_PCAM_FEATURE(dev->mdev, rx_icrc_encapsulated_counter)) {
5434                 for (i = 0; i < ARRAY_SIZE(ext_ppcnt_cnts); i++, j++) {
5435                         names[j] = ext_ppcnt_cnts[i].name;
5436                         offsets[j] = ext_ppcnt_cnts[i].offset;
5437                 }
5438         }
5439 }
5440
5441 static int mlx5_ib_alloc_counters(struct mlx5_ib_dev *dev)
5442 {
5443         int num_cnt_ports;
5444         int err = 0;
5445         int i;
5446         bool is_shared;
5447
5448         is_shared = MLX5_CAP_GEN(dev->mdev, log_max_uctx) != 0;
5449         num_cnt_ports = is_mdev_switchdev_mode(dev->mdev) ? 1 : dev->num_ports;
5450
5451         for (i = 0; i < num_cnt_ports; i++) {
5452                 err = __mlx5_ib_alloc_counters(dev, &dev->port[i].cnts);
5453                 if (err)
5454                         goto err_alloc;
5455
5456                 mlx5_ib_fill_counters(dev, dev->port[i].cnts.names,
5457                                       dev->port[i].cnts.offsets);
5458
5459                 err = mlx5_cmd_alloc_q_counter(dev->mdev,
5460                                                &dev->port[i].cnts.set_id,
5461                                                is_shared ?
5462                                                MLX5_SHARED_RESOURCE_UID : 0);
5463                 if (err) {
5464                         mlx5_ib_warn(dev,
5465                                      "couldn't allocate queue counter for port %d, err %d\n",
5466                                      i + 1, err);
5467                         goto err_alloc;
5468                 }
5469                 dev->port[i].cnts.set_id_valid = true;
5470         }
5471         return 0;
5472
5473 err_alloc:
5474         mlx5_ib_dealloc_counters(dev);
5475         return err;
5476 }
5477
5478 static const struct mlx5_ib_counters *get_counters(struct mlx5_ib_dev *dev,
5479                                                    u8 port_num)
5480 {
5481         return is_mdev_switchdev_mode(dev->mdev) ? &dev->port[0].cnts :
5482                                                    &dev->port[port_num].cnts;
5483 }
5484
5485 /**
5486  * mlx5_ib_get_counters_id - Returns counters id to use for device+port
5487  * @dev:        Pointer to mlx5 IB device
5488  * @port_num:   Zero based port number
5489  *
5490  * mlx5_ib_get_counters_id() Returns counters set id to use for given
5491  * device port combination in switchdev and non switchdev mode of the
5492  * parent device.
5493  */
5494 u16 mlx5_ib_get_counters_id(struct mlx5_ib_dev *dev, u8 port_num)
5495 {
5496         const struct mlx5_ib_counters *cnts = get_counters(dev, port_num);
5497
5498         return cnts->set_id;
5499 }
5500
5501 static struct rdma_hw_stats *mlx5_ib_alloc_hw_stats(struct ib_device *ibdev,
5502                                                     u8 port_num)
5503 {
5504         struct mlx5_ib_dev *dev = to_mdev(ibdev);
5505         const struct mlx5_ib_counters *cnts;
5506         bool is_switchdev = is_mdev_switchdev_mode(dev->mdev);
5507
5508         if ((is_switchdev && port_num) || (!is_switchdev && !port_num))
5509                 return NULL;
5510
5511         cnts = get_counters(dev, port_num - 1);
5512
5513         return rdma_alloc_hw_stats_struct(cnts->names,
5514                                           cnts->num_q_counters +
5515                                           cnts->num_cong_counters +
5516                                           cnts->num_ext_ppcnt_counters,
5517                                           RDMA_HW_STATS_DEFAULT_LIFESPAN);
5518 }
5519
5520 static int mlx5_ib_query_q_counters(struct mlx5_core_dev *mdev,
5521                                     const struct mlx5_ib_counters *cnts,
5522                                     struct rdma_hw_stats *stats,
5523                                     u16 set_id)
5524 {
5525         int outlen = MLX5_ST_SZ_BYTES(query_q_counter_out);
5526         void *out;
5527         __be32 val;
5528         int ret, i;
5529
5530         out = kvzalloc(outlen, GFP_KERNEL);
5531         if (!out)
5532                 return -ENOMEM;
5533
5534         ret = mlx5_core_query_q_counter(mdev, set_id, 0, out, outlen);
5535         if (ret)
5536                 goto free;
5537
5538         for (i = 0; i < cnts->num_q_counters; i++) {
5539                 val = *(__be32 *)(out + cnts->offsets[i]);
5540                 stats->value[i] = (u64)be32_to_cpu(val);
5541         }
5542
5543 free:
5544         kvfree(out);
5545         return ret;
5546 }
5547
5548 static int mlx5_ib_query_ext_ppcnt_counters(struct mlx5_ib_dev *dev,
5549                                             const struct mlx5_ib_counters *cnts,
5550                                             struct rdma_hw_stats *stats)
5551 {
5552         int offset = cnts->num_q_counters + cnts->num_cong_counters;
5553         int sz = MLX5_ST_SZ_BYTES(ppcnt_reg);
5554         int ret, i;
5555         void *out;
5556
5557         out = kvzalloc(sz, GFP_KERNEL);
5558         if (!out)
5559                 return -ENOMEM;
5560
5561         ret = mlx5_cmd_query_ext_ppcnt_counters(dev->mdev, out);
5562         if (ret)
5563                 goto free;
5564
5565         for (i = 0; i < cnts->num_ext_ppcnt_counters; i++)
5566                 stats->value[i + offset] =
5567                         be64_to_cpup((__be64 *)(out +
5568                                     cnts->offsets[i + offset]));
5569 free:
5570         kvfree(out);
5571         return ret;
5572 }
5573
5574 static int mlx5_ib_get_hw_stats(struct ib_device *ibdev,
5575                                 struct rdma_hw_stats *stats,
5576                                 u8 port_num, int index)
5577 {
5578         struct mlx5_ib_dev *dev = to_mdev(ibdev);
5579         const struct mlx5_ib_counters *cnts = get_counters(dev, port_num - 1);
5580         struct mlx5_core_dev *mdev;
5581         int ret, num_counters;
5582         u8 mdev_port_num;
5583
5584         if (!stats)
5585                 return -EINVAL;
5586
5587         num_counters = cnts->num_q_counters +
5588                        cnts->num_cong_counters +
5589                        cnts->num_ext_ppcnt_counters;
5590
5591         /* q_counters are per IB device, query the master mdev */
5592         ret = mlx5_ib_query_q_counters(dev->mdev, cnts, stats, cnts->set_id);
5593         if (ret)
5594                 return ret;
5595
5596         if (MLX5_CAP_PCAM_FEATURE(dev->mdev, rx_icrc_encapsulated_counter)) {
5597                 ret =  mlx5_ib_query_ext_ppcnt_counters(dev, cnts, stats);
5598                 if (ret)
5599                         return ret;
5600         }
5601
5602         if (MLX5_CAP_GEN(dev->mdev, cc_query_allowed)) {
5603                 mdev = mlx5_ib_get_native_port_mdev(dev, port_num,
5604                                                     &mdev_port_num);
5605                 if (!mdev) {
5606                         /* If port is not affiliated yet, its in down state
5607                          * which doesn't have any counters yet, so it would be
5608                          * zero. So no need to read from the HCA.
5609                          */
5610                         goto done;
5611                 }
5612                 ret = mlx5_lag_query_cong_counters(dev->mdev,
5613                                                    stats->value +
5614                                                    cnts->num_q_counters,
5615                                                    cnts->num_cong_counters,
5616                                                    cnts->offsets +
5617                                                    cnts->num_q_counters);
5618
5619                 mlx5_ib_put_native_port_mdev(dev, port_num);
5620                 if (ret)
5621                         return ret;
5622         }
5623
5624 done:
5625         return num_counters;
5626 }
5627
5628 static struct rdma_hw_stats *
5629 mlx5_ib_counter_alloc_stats(struct rdma_counter *counter)
5630 {
5631         struct mlx5_ib_dev *dev = to_mdev(counter->device);
5632         const struct mlx5_ib_counters *cnts =
5633                 get_counters(dev, counter->port - 1);
5634
5635         /* Q counters are in the beginning of all counters */
5636         return rdma_alloc_hw_stats_struct(cnts->names,
5637                                           cnts->num_q_counters,
5638                                           RDMA_HW_STATS_DEFAULT_LIFESPAN);
5639 }
5640
5641 static int mlx5_ib_counter_update_stats(struct rdma_counter *counter)
5642 {
5643         struct mlx5_ib_dev *dev = to_mdev(counter->device);
5644         const struct mlx5_ib_counters *cnts =
5645                 get_counters(dev, counter->port - 1);
5646
5647         return mlx5_ib_query_q_counters(dev->mdev, cnts,
5648                                         counter->stats, counter->id);
5649 }
5650
5651 static int mlx5_ib_counter_bind_qp(struct rdma_counter *counter,
5652                                    struct ib_qp *qp)
5653 {
5654         struct mlx5_ib_dev *dev = to_mdev(qp->device);
5655         u16 cnt_set_id = 0;
5656         int err;
5657
5658         if (!counter->id) {
5659                 err = mlx5_cmd_alloc_q_counter(dev->mdev,
5660                                                &cnt_set_id,
5661                                                MLX5_SHARED_RESOURCE_UID);
5662                 if (err)
5663                         return err;
5664                 counter->id = cnt_set_id;
5665         }
5666
5667         err = mlx5_ib_qp_set_counter(qp, counter);
5668         if (err)
5669                 goto fail_set_counter;
5670
5671         return 0;
5672
5673 fail_set_counter:
5674         mlx5_core_dealloc_q_counter(dev->mdev, cnt_set_id);
5675         counter->id = 0;
5676
5677         return err;
5678 }
5679
5680 static int mlx5_ib_counter_unbind_qp(struct ib_qp *qp)
5681 {
5682         return mlx5_ib_qp_set_counter(qp, NULL);
5683 }
5684
5685 static int mlx5_ib_counter_dealloc(struct rdma_counter *counter)
5686 {
5687         struct mlx5_ib_dev *dev = to_mdev(counter->device);
5688
5689         return mlx5_core_dealloc_q_counter(dev->mdev, counter->id);
5690 }
5691
5692 static int mlx5_ib_rn_get_params(struct ib_device *device, u8 port_num,
5693                                  enum rdma_netdev_t type,
5694                                  struct rdma_netdev_alloc_params *params)
5695 {
5696         if (type != RDMA_NETDEV_IPOIB)
5697                 return -EOPNOTSUPP;
5698
5699         return mlx5_rdma_rn_get_params(to_mdev(device)->mdev, device, params);
5700 }
5701
5702 static void delay_drop_debugfs_cleanup(struct mlx5_ib_dev *dev)
5703 {
5704         if (!dev->delay_drop.dir_debugfs)
5705                 return;
5706         debugfs_remove_recursive(dev->delay_drop.dir_debugfs);
5707         dev->delay_drop.dir_debugfs = NULL;
5708 }
5709
5710 static void cancel_delay_drop(struct mlx5_ib_dev *dev)
5711 {
5712         if (!(dev->ib_dev.attrs.raw_packet_caps & IB_RAW_PACKET_CAP_DELAY_DROP))
5713                 return;
5714
5715         cancel_work_sync(&dev->delay_drop.delay_drop_work);
5716         delay_drop_debugfs_cleanup(dev);
5717 }
5718
5719 static ssize_t delay_drop_timeout_read(struct file *filp, char __user *buf,
5720                                        size_t count, loff_t *pos)
5721 {
5722         struct mlx5_ib_delay_drop *delay_drop = filp->private_data;
5723         char lbuf[20];
5724         int len;
5725
5726         len = snprintf(lbuf, sizeof(lbuf), "%u\n", delay_drop->timeout);
5727         return simple_read_from_buffer(buf, count, pos, lbuf, len);
5728 }
5729
5730 static ssize_t delay_drop_timeout_write(struct file *filp, const char __user *buf,
5731                                         size_t count, loff_t *pos)
5732 {
5733         struct mlx5_ib_delay_drop *delay_drop = filp->private_data;
5734         u32 timeout;
5735         u32 var;
5736
5737         if (kstrtouint_from_user(buf, count, 0, &var))
5738                 return -EFAULT;
5739
5740         timeout = min_t(u32, roundup(var, 100), MLX5_MAX_DELAY_DROP_TIMEOUT_MS *
5741                         1000);
5742         if (timeout != var)
5743                 mlx5_ib_dbg(delay_drop->dev, "Round delay drop timeout to %u usec\n",
5744                             timeout);
5745
5746         delay_drop->timeout = timeout;
5747
5748         return count;
5749 }
5750
5751 static const struct file_operations fops_delay_drop_timeout = {
5752         .owner  = THIS_MODULE,
5753         .open   = simple_open,
5754         .write  = delay_drop_timeout_write,
5755         .read   = delay_drop_timeout_read,
5756 };
5757
5758 static void delay_drop_debugfs_init(struct mlx5_ib_dev *dev)
5759 {
5760         struct dentry *root;
5761
5762         if (!mlx5_debugfs_root)
5763                 return;
5764
5765         root = debugfs_create_dir("delay_drop", dev->mdev->priv.dbg_root);
5766         dev->delay_drop.dir_debugfs = root;
5767
5768         debugfs_create_atomic_t("num_timeout_events", 0400, root,
5769                                 &dev->delay_drop.events_cnt);
5770         debugfs_create_atomic_t("num_rqs", 0400, root,
5771                                 &dev->delay_drop.rqs_cnt);
5772         debugfs_create_file("timeout", 0600, root, &dev->delay_drop,
5773                             &fops_delay_drop_timeout);
5774 }
5775
5776 static void init_delay_drop(struct mlx5_ib_dev *dev)
5777 {
5778         if (!(dev->ib_dev.attrs.raw_packet_caps & IB_RAW_PACKET_CAP_DELAY_DROP))
5779                 return;
5780
5781         mutex_init(&dev->delay_drop.lock);
5782         dev->delay_drop.dev = dev;
5783         dev->delay_drop.activate = false;
5784         dev->delay_drop.timeout = MLX5_MAX_DELAY_DROP_TIMEOUT_MS * 1000;
5785         INIT_WORK(&dev->delay_drop.delay_drop_work, delay_drop_handler);
5786         atomic_set(&dev->delay_drop.rqs_cnt, 0);
5787         atomic_set(&dev->delay_drop.events_cnt, 0);
5788
5789         delay_drop_debugfs_init(dev);
5790 }
5791
5792 static void mlx5_ib_unbind_slave_port(struct mlx5_ib_dev *ibdev,
5793                                       struct mlx5_ib_multiport_info *mpi)
5794 {
5795         u8 port_num = mlx5_core_native_port_num(mpi->mdev) - 1;
5796         struct mlx5_ib_port *port = &ibdev->port[port_num];
5797         int comps;
5798         int err;
5799         int i;
5800
5801         lockdep_assert_held(&mlx5_ib_multiport_mutex);
5802
5803         mlx5_ib_cleanup_cong_debugfs(ibdev, port_num);
5804
5805         spin_lock(&port->mp.mpi_lock);
5806         if (!mpi->ibdev) {
5807                 spin_unlock(&port->mp.mpi_lock);
5808                 return;
5809         }
5810
5811         mpi->ibdev = NULL;
5812
5813         spin_unlock(&port->mp.mpi_lock);
5814         if (mpi->mdev_events.notifier_call)
5815                 mlx5_notifier_unregister(mpi->mdev, &mpi->mdev_events);
5816         mpi->mdev_events.notifier_call = NULL;
5817         mlx5_remove_netdev_notifier(ibdev, port_num);
5818         spin_lock(&port->mp.mpi_lock);
5819
5820         comps = mpi->mdev_refcnt;
5821         if (comps) {
5822                 mpi->unaffiliate = true;
5823                 init_completion(&mpi->unref_comp);
5824                 spin_unlock(&port->mp.mpi_lock);
5825
5826                 for (i = 0; i < comps; i++)
5827                         wait_for_completion(&mpi->unref_comp);
5828
5829                 spin_lock(&port->mp.mpi_lock);
5830                 mpi->unaffiliate = false;
5831         }
5832
5833         port->mp.mpi = NULL;
5834
5835         list_add_tail(&mpi->list, &mlx5_ib_unaffiliated_port_list);
5836
5837         spin_unlock(&port->mp.mpi_lock);
5838
5839         err = mlx5_nic_vport_unaffiliate_multiport(mpi->mdev);
5840
5841         mlx5_ib_dbg(ibdev, "unaffiliated port %d\n", port_num + 1);
5842         /* Log an error, still needed to cleanup the pointers and add
5843          * it back to the list.
5844          */
5845         if (err)
5846                 mlx5_ib_err(ibdev, "Failed to unaffiliate port %u\n",
5847                             port_num + 1);
5848
5849         ibdev->port[port_num].roce.last_port_state = IB_PORT_DOWN;
5850 }
5851
5852 static bool mlx5_ib_bind_slave_port(struct mlx5_ib_dev *ibdev,
5853                                     struct mlx5_ib_multiport_info *mpi)
5854 {
5855         u8 port_num = mlx5_core_native_port_num(mpi->mdev) - 1;
5856         int err;
5857
5858         lockdep_assert_held(&mlx5_ib_multiport_mutex);
5859
5860         spin_lock(&ibdev->port[port_num].mp.mpi_lock);
5861         if (ibdev->port[port_num].mp.mpi) {
5862                 mlx5_ib_dbg(ibdev, "port %d already affiliated.\n",
5863                             port_num + 1);
5864                 spin_unlock(&ibdev->port[port_num].mp.mpi_lock);
5865                 return false;
5866         }
5867
5868         ibdev->port[port_num].mp.mpi = mpi;
5869         mpi->ibdev = ibdev;
5870         mpi->mdev_events.notifier_call = NULL;
5871         spin_unlock(&ibdev->port[port_num].mp.mpi_lock);
5872
5873         err = mlx5_nic_vport_affiliate_multiport(ibdev->mdev, mpi->mdev);
5874         if (err)
5875                 goto unbind;
5876
5877         err = get_port_caps(ibdev, mlx5_core_native_port_num(mpi->mdev));
5878         if (err)
5879                 goto unbind;
5880
5881         err = mlx5_add_netdev_notifier(ibdev, port_num);
5882         if (err) {
5883                 mlx5_ib_err(ibdev, "failed adding netdev notifier for port %u\n",
5884                             port_num + 1);
5885                 goto unbind;
5886         }
5887
5888         mpi->mdev_events.notifier_call = mlx5_ib_event_slave_port;
5889         mlx5_notifier_register(mpi->mdev, &mpi->mdev_events);
5890
5891         mlx5_ib_init_cong_debugfs(ibdev, port_num);
5892
5893         return true;
5894
5895 unbind:
5896         mlx5_ib_unbind_slave_port(ibdev, mpi);
5897         return false;
5898 }
5899
5900 static int mlx5_ib_init_multiport_master(struct mlx5_ib_dev *dev)
5901 {
5902         int port_num = mlx5_core_native_port_num(dev->mdev) - 1;
5903         enum rdma_link_layer ll = mlx5_ib_port_link_layer(&dev->ib_dev,
5904                                                           port_num + 1);
5905         struct mlx5_ib_multiport_info *mpi;
5906         int err;
5907         int i;
5908
5909         if (!mlx5_core_is_mp_master(dev->mdev) || ll != IB_LINK_LAYER_ETHERNET)
5910                 return 0;
5911
5912         err = mlx5_query_nic_vport_system_image_guid(dev->mdev,
5913                                                      &dev->sys_image_guid);
5914         if (err)
5915                 return err;
5916
5917         err = mlx5_nic_vport_enable_roce(dev->mdev);
5918         if (err)
5919                 return err;
5920
5921         mutex_lock(&mlx5_ib_multiport_mutex);
5922         for (i = 0; i < dev->num_ports; i++) {
5923                 bool bound = false;
5924
5925                 /* build a stub multiport info struct for the native port. */
5926                 if (i == port_num) {
5927                         mpi = kzalloc(sizeof(*mpi), GFP_KERNEL);
5928                         if (!mpi) {
5929                                 mutex_unlock(&mlx5_ib_multiport_mutex);
5930                                 mlx5_nic_vport_disable_roce(dev->mdev);
5931                                 return -ENOMEM;
5932                         }
5933
5934                         mpi->is_master = true;
5935                         mpi->mdev = dev->mdev;
5936                         mpi->sys_image_guid = dev->sys_image_guid;
5937                         dev->port[i].mp.mpi = mpi;
5938                         mpi->ibdev = dev;
5939                         mpi = NULL;
5940                         continue;
5941                 }
5942
5943                 list_for_each_entry(mpi, &mlx5_ib_unaffiliated_port_list,
5944                                     list) {
5945                         if (dev->sys_image_guid == mpi->sys_image_guid &&
5946                             (mlx5_core_native_port_num(mpi->mdev) - 1) == i) {
5947                                 bound = mlx5_ib_bind_slave_port(dev, mpi);
5948                         }
5949
5950                         if (bound) {
5951                                 dev_dbg(mpi->mdev->device,
5952                                         "removing port from unaffiliated list.\n");
5953                                 mlx5_ib_dbg(dev, "port %d bound\n", i + 1);
5954                                 list_del(&mpi->list);
5955                                 break;
5956                         }
5957                 }
5958                 if (!bound) {
5959                         get_port_caps(dev, i + 1);
5960                         mlx5_ib_dbg(dev, "no free port found for port %d\n",
5961                                     i + 1);
5962                 }
5963         }
5964
5965         list_add_tail(&dev->ib_dev_list, &mlx5_ib_dev_list);
5966         mutex_unlock(&mlx5_ib_multiport_mutex);
5967         return err;
5968 }
5969
5970 static void mlx5_ib_cleanup_multiport_master(struct mlx5_ib_dev *dev)
5971 {
5972         int port_num = mlx5_core_native_port_num(dev->mdev) - 1;
5973         enum rdma_link_layer ll = mlx5_ib_port_link_layer(&dev->ib_dev,
5974                                                           port_num + 1);
5975         int i;
5976
5977         if (!mlx5_core_is_mp_master(dev->mdev) || ll != IB_LINK_LAYER_ETHERNET)
5978                 return;
5979
5980         mutex_lock(&mlx5_ib_multiport_mutex);
5981         for (i = 0; i < dev->num_ports; i++) {
5982                 if (dev->port[i].mp.mpi) {
5983                         /* Destroy the native port stub */
5984                         if (i == port_num) {
5985                                 kfree(dev->port[i].mp.mpi);
5986                                 dev->port[i].mp.mpi = NULL;
5987                         } else {
5988                                 mlx5_ib_dbg(dev, "unbinding port_num: %d\n", i + 1);
5989                                 mlx5_ib_unbind_slave_port(dev, dev->port[i].mp.mpi);
5990                         }
5991                 }
5992         }
5993
5994         mlx5_ib_dbg(dev, "removing from devlist\n");
5995         list_del(&dev->ib_dev_list);
5996         mutex_unlock(&mlx5_ib_multiport_mutex);
5997
5998         mlx5_nic_vport_disable_roce(dev->mdev);
5999 }
6000
6001 ADD_UVERBS_ATTRIBUTES_SIMPLE(
6002         mlx5_ib_dm,
6003         UVERBS_OBJECT_DM,
6004         UVERBS_METHOD_DM_ALLOC,
6005         UVERBS_ATTR_PTR_OUT(MLX5_IB_ATTR_ALLOC_DM_RESP_START_OFFSET,
6006                             UVERBS_ATTR_TYPE(u64),
6007                             UA_MANDATORY),
6008         UVERBS_ATTR_PTR_OUT(MLX5_IB_ATTR_ALLOC_DM_RESP_PAGE_INDEX,
6009                             UVERBS_ATTR_TYPE(u16),
6010                             UA_OPTIONAL),
6011         UVERBS_ATTR_CONST_IN(MLX5_IB_ATTR_ALLOC_DM_REQ_TYPE,
6012                              enum mlx5_ib_uapi_dm_type,
6013                              UA_OPTIONAL));
6014
6015 ADD_UVERBS_ATTRIBUTES_SIMPLE(
6016         mlx5_ib_flow_action,
6017         UVERBS_OBJECT_FLOW_ACTION,
6018         UVERBS_METHOD_FLOW_ACTION_ESP_CREATE,
6019         UVERBS_ATTR_FLAGS_IN(MLX5_IB_ATTR_CREATE_FLOW_ACTION_FLAGS,
6020                              enum mlx5_ib_uapi_flow_action_flags));
6021
6022 static const struct uapi_definition mlx5_ib_defs[] = {
6023 #if IS_ENABLED(CONFIG_INFINIBAND_USER_ACCESS)
6024         UAPI_DEF_CHAIN(mlx5_ib_devx_defs),
6025         UAPI_DEF_CHAIN(mlx5_ib_flow_defs),
6026 #endif
6027
6028         UAPI_DEF_CHAIN_OBJ_TREE(UVERBS_OBJECT_FLOW_ACTION,
6029                                 &mlx5_ib_flow_action),
6030         UAPI_DEF_CHAIN_OBJ_TREE(UVERBS_OBJECT_DM, &mlx5_ib_dm),
6031         {}
6032 };
6033
6034 static int mlx5_ib_read_counters(struct ib_counters *counters,
6035                                  struct ib_counters_read_attr *read_attr,
6036                                  struct uverbs_attr_bundle *attrs)
6037 {
6038         struct mlx5_ib_mcounters *mcounters = to_mcounters(counters);
6039         struct mlx5_read_counters_attr mread_attr = {};
6040         struct mlx5_ib_flow_counters_desc *desc;
6041         int ret, i;
6042
6043         mutex_lock(&mcounters->mcntrs_mutex);
6044         if (mcounters->cntrs_max_index > read_attr->ncounters) {
6045                 ret = -EINVAL;
6046                 goto err_bound;
6047         }
6048
6049         mread_attr.out = kcalloc(mcounters->counters_num, sizeof(u64),
6050                                  GFP_KERNEL);
6051         if (!mread_attr.out) {
6052                 ret = -ENOMEM;
6053                 goto err_bound;
6054         }
6055
6056         mread_attr.hw_cntrs_hndl = mcounters->hw_cntrs_hndl;
6057         mread_attr.flags = read_attr->flags;
6058         ret = mcounters->read_counters(counters->device, &mread_attr);
6059         if (ret)
6060                 goto err_read;
6061
6062         /* do the pass over the counters data array to assign according to the
6063          * descriptions and indexing pairs
6064          */
6065         desc = mcounters->counters_data;
6066         for (i = 0; i < mcounters->ncounters; i++)
6067                 read_attr->counters_buff[desc[i].index] += mread_attr.out[desc[i].description];
6068
6069 err_read:
6070         kfree(mread_attr.out);
6071 err_bound:
6072         mutex_unlock(&mcounters->mcntrs_mutex);
6073         return ret;
6074 }
6075
6076 static int mlx5_ib_destroy_counters(struct ib_counters *counters)
6077 {
6078         struct mlx5_ib_mcounters *mcounters = to_mcounters(counters);
6079
6080         counters_clear_description(counters);
6081         if (mcounters->hw_cntrs_hndl)
6082                 mlx5_fc_destroy(to_mdev(counters->device)->mdev,
6083                                 mcounters->hw_cntrs_hndl);
6084
6085         kfree(mcounters);
6086
6087         return 0;
6088 }
6089
6090 static struct ib_counters *mlx5_ib_create_counters(struct ib_device *device,
6091                                                    struct uverbs_attr_bundle *attrs)
6092 {
6093         struct mlx5_ib_mcounters *mcounters;
6094
6095         mcounters = kzalloc(sizeof(*mcounters), GFP_KERNEL);
6096         if (!mcounters)
6097                 return ERR_PTR(-ENOMEM);
6098
6099         mutex_init(&mcounters->mcntrs_mutex);
6100
6101         return &mcounters->ibcntrs;
6102 }
6103
6104 static void mlx5_ib_stage_init_cleanup(struct mlx5_ib_dev *dev)
6105 {
6106         mlx5_ib_cleanup_multiport_master(dev);
6107         WARN_ON(!xa_empty(&dev->odp_mkeys));
6108         cleanup_srcu_struct(&dev->odp_srcu);
6109
6110         WARN_ON(!xa_empty(&dev->sig_mrs));
6111         WARN_ON(!bitmap_empty(dev->dm.memic_alloc_pages, MLX5_MAX_MEMIC_PAGES));
6112 }
6113
6114 static int mlx5_ib_stage_init_init(struct mlx5_ib_dev *dev)
6115 {
6116         struct mlx5_core_dev *mdev = dev->mdev;
6117         int err;
6118         int i;
6119
6120         for (i = 0; i < dev->num_ports; i++) {
6121                 spin_lock_init(&dev->port[i].mp.mpi_lock);
6122                 rwlock_init(&dev->port[i].roce.netdev_lock);
6123                 dev->port[i].roce.dev = dev;
6124                 dev->port[i].roce.native_port_num = i + 1;
6125                 dev->port[i].roce.last_port_state = IB_PORT_DOWN;
6126         }
6127
6128         mlx5_ib_internal_fill_odp_caps(dev);
6129
6130         err = mlx5_ib_init_multiport_master(dev);
6131         if (err)
6132                 return err;
6133
6134         err = set_has_smi_cap(dev);
6135         if (err)
6136                 return err;
6137
6138         if (!mlx5_core_mp_enabled(mdev)) {
6139                 for (i = 1; i <= dev->num_ports; i++) {
6140                         err = get_port_caps(dev, i);
6141                         if (err)
6142                                 break;
6143                 }
6144         } else {
6145                 err = get_port_caps(dev, mlx5_core_native_port_num(mdev));
6146         }
6147         if (err)
6148                 goto err_mp;
6149
6150         if (mlx5_use_mad_ifc(dev))
6151                 get_ext_port_caps(dev);
6152
6153         dev->ib_dev.node_type           = RDMA_NODE_IB_CA;
6154         dev->ib_dev.local_dma_lkey      = 0 /* not supported for now */;
6155         dev->ib_dev.phys_port_cnt       = dev->num_ports;
6156         dev->ib_dev.num_comp_vectors    = mlx5_comp_vectors_count(mdev);
6157         dev->ib_dev.dev.parent          = mdev->device;
6158
6159         mutex_init(&dev->cap_mask_mutex);
6160         INIT_LIST_HEAD(&dev->qp_list);
6161         spin_lock_init(&dev->reset_flow_resource_lock);
6162         xa_init(&dev->odp_mkeys);
6163         xa_init(&dev->sig_mrs);
6164
6165         spin_lock_init(&dev->dm.lock);
6166         dev->dm.dev = mdev;
6167
6168         err = init_srcu_struct(&dev->odp_srcu);
6169         if (err)
6170                 goto err_mp;
6171
6172         return 0;
6173
6174 err_mp:
6175         mlx5_ib_cleanup_multiport_master(dev);
6176
6177         return -ENOMEM;
6178 }
6179
6180 static int mlx5_ib_stage_flow_db_init(struct mlx5_ib_dev *dev)
6181 {
6182         dev->flow_db = kzalloc(sizeof(*dev->flow_db), GFP_KERNEL);
6183
6184         if (!dev->flow_db)
6185                 return -ENOMEM;
6186
6187         mutex_init(&dev->flow_db->lock);
6188
6189         return 0;
6190 }
6191
6192 static void mlx5_ib_stage_flow_db_cleanup(struct mlx5_ib_dev *dev)
6193 {
6194         kfree(dev->flow_db);
6195 }
6196
6197 static const struct ib_device_ops mlx5_ib_dev_ops = {
6198         .owner = THIS_MODULE,
6199         .driver_id = RDMA_DRIVER_MLX5,
6200         .uverbs_abi_ver = MLX5_IB_UVERBS_ABI_VERSION,
6201
6202         .add_gid = mlx5_ib_add_gid,
6203         .alloc_mr = mlx5_ib_alloc_mr,
6204         .alloc_mr_integrity = mlx5_ib_alloc_mr_integrity,
6205         .alloc_pd = mlx5_ib_alloc_pd,
6206         .alloc_ucontext = mlx5_ib_alloc_ucontext,
6207         .attach_mcast = mlx5_ib_mcg_attach,
6208         .check_mr_status = mlx5_ib_check_mr_status,
6209         .create_ah = mlx5_ib_create_ah,
6210         .create_counters = mlx5_ib_create_counters,
6211         .create_cq = mlx5_ib_create_cq,
6212         .create_flow = mlx5_ib_create_flow,
6213         .create_qp = mlx5_ib_create_qp,
6214         .create_srq = mlx5_ib_create_srq,
6215         .dealloc_pd = mlx5_ib_dealloc_pd,
6216         .dealloc_ucontext = mlx5_ib_dealloc_ucontext,
6217         .del_gid = mlx5_ib_del_gid,
6218         .dereg_mr = mlx5_ib_dereg_mr,
6219         .destroy_ah = mlx5_ib_destroy_ah,
6220         .destroy_counters = mlx5_ib_destroy_counters,
6221         .destroy_cq = mlx5_ib_destroy_cq,
6222         .destroy_flow = mlx5_ib_destroy_flow,
6223         .destroy_flow_action = mlx5_ib_destroy_flow_action,
6224         .destroy_qp = mlx5_ib_destroy_qp,
6225         .destroy_srq = mlx5_ib_destroy_srq,
6226         .detach_mcast = mlx5_ib_mcg_detach,
6227         .disassociate_ucontext = mlx5_ib_disassociate_ucontext,
6228         .drain_rq = mlx5_ib_drain_rq,
6229         .drain_sq = mlx5_ib_drain_sq,
6230         .enable_driver = mlx5_ib_enable_driver,
6231         .fill_res_entry = mlx5_ib_fill_res_entry,
6232         .fill_stat_entry = mlx5_ib_fill_stat_entry,
6233         .get_dev_fw_str = get_dev_fw_str,
6234         .get_dma_mr = mlx5_ib_get_dma_mr,
6235         .get_link_layer = mlx5_ib_port_link_layer,
6236         .map_mr_sg = mlx5_ib_map_mr_sg,
6237         .map_mr_sg_pi = mlx5_ib_map_mr_sg_pi,
6238         .mmap = mlx5_ib_mmap,
6239         .modify_cq = mlx5_ib_modify_cq,
6240         .modify_device = mlx5_ib_modify_device,
6241         .modify_port = mlx5_ib_modify_port,
6242         .modify_qp = mlx5_ib_modify_qp,
6243         .modify_srq = mlx5_ib_modify_srq,
6244         .poll_cq = mlx5_ib_poll_cq,
6245         .post_recv = mlx5_ib_post_recv,
6246         .post_send = mlx5_ib_post_send,
6247         .post_srq_recv = mlx5_ib_post_srq_recv,
6248         .process_mad = mlx5_ib_process_mad,
6249         .query_ah = mlx5_ib_query_ah,
6250         .query_device = mlx5_ib_query_device,
6251         .query_gid = mlx5_ib_query_gid,
6252         .query_pkey = mlx5_ib_query_pkey,
6253         .query_qp = mlx5_ib_query_qp,
6254         .query_srq = mlx5_ib_query_srq,
6255         .read_counters = mlx5_ib_read_counters,
6256         .reg_user_mr = mlx5_ib_reg_user_mr,
6257         .req_notify_cq = mlx5_ib_arm_cq,
6258         .rereg_user_mr = mlx5_ib_rereg_user_mr,
6259         .resize_cq = mlx5_ib_resize_cq,
6260
6261         INIT_RDMA_OBJ_SIZE(ib_ah, mlx5_ib_ah, ibah),
6262         INIT_RDMA_OBJ_SIZE(ib_cq, mlx5_ib_cq, ibcq),
6263         INIT_RDMA_OBJ_SIZE(ib_pd, mlx5_ib_pd, ibpd),
6264         INIT_RDMA_OBJ_SIZE(ib_srq, mlx5_ib_srq, ibsrq),
6265         INIT_RDMA_OBJ_SIZE(ib_ucontext, mlx5_ib_ucontext, ibucontext),
6266 };
6267
6268 static const struct ib_device_ops mlx5_ib_dev_flow_ipsec_ops = {
6269         .create_flow_action_esp = mlx5_ib_create_flow_action_esp,
6270         .modify_flow_action_esp = mlx5_ib_modify_flow_action_esp,
6271 };
6272
6273 static const struct ib_device_ops mlx5_ib_dev_ipoib_enhanced_ops = {
6274         .rdma_netdev_get_params = mlx5_ib_rn_get_params,
6275 };
6276
6277 static const struct ib_device_ops mlx5_ib_dev_sriov_ops = {
6278         .get_vf_config = mlx5_ib_get_vf_config,
6279         .get_vf_guid = mlx5_ib_get_vf_guid,
6280         .get_vf_stats = mlx5_ib_get_vf_stats,
6281         .set_vf_guid = mlx5_ib_set_vf_guid,
6282         .set_vf_link_state = mlx5_ib_set_vf_link_state,
6283 };
6284
6285 static const struct ib_device_ops mlx5_ib_dev_mw_ops = {
6286         .alloc_mw = mlx5_ib_alloc_mw,
6287         .dealloc_mw = mlx5_ib_dealloc_mw,
6288 };
6289
6290 static const struct ib_device_ops mlx5_ib_dev_xrc_ops = {
6291         .alloc_xrcd = mlx5_ib_alloc_xrcd,
6292         .dealloc_xrcd = mlx5_ib_dealloc_xrcd,
6293 };
6294
6295 static const struct ib_device_ops mlx5_ib_dev_dm_ops = {
6296         .alloc_dm = mlx5_ib_alloc_dm,
6297         .dealloc_dm = mlx5_ib_dealloc_dm,
6298         .reg_dm_mr = mlx5_ib_reg_dm_mr,
6299 };
6300
6301 static int mlx5_ib_stage_caps_init(struct mlx5_ib_dev *dev)
6302 {
6303         struct mlx5_core_dev *mdev = dev->mdev;
6304         int err;
6305
6306         dev->ib_dev.uverbs_cmd_mask     =
6307                 (1ull << IB_USER_VERBS_CMD_GET_CONTEXT)         |
6308                 (1ull << IB_USER_VERBS_CMD_QUERY_DEVICE)        |
6309                 (1ull << IB_USER_VERBS_CMD_QUERY_PORT)          |
6310                 (1ull << IB_USER_VERBS_CMD_ALLOC_PD)            |
6311                 (1ull << IB_USER_VERBS_CMD_DEALLOC_PD)          |
6312                 (1ull << IB_USER_VERBS_CMD_CREATE_AH)           |
6313                 (1ull << IB_USER_VERBS_CMD_DESTROY_AH)          |
6314                 (1ull << IB_USER_VERBS_CMD_REG_MR)              |
6315                 (1ull << IB_USER_VERBS_CMD_REREG_MR)            |
6316                 (1ull << IB_USER_VERBS_CMD_DEREG_MR)            |
6317                 (1ull << IB_USER_VERBS_CMD_CREATE_COMP_CHANNEL) |
6318                 (1ull << IB_USER_VERBS_CMD_CREATE_CQ)           |
6319                 (1ull << IB_USER_VERBS_CMD_RESIZE_CQ)           |
6320                 (1ull << IB_USER_VERBS_CMD_DESTROY_CQ)          |
6321                 (1ull << IB_USER_VERBS_CMD_CREATE_QP)           |
6322                 (1ull << IB_USER_VERBS_CMD_MODIFY_QP)           |
6323                 (1ull << IB_USER_VERBS_CMD_QUERY_QP)            |
6324                 (1ull << IB_USER_VERBS_CMD_DESTROY_QP)          |
6325                 (1ull << IB_USER_VERBS_CMD_ATTACH_MCAST)        |
6326                 (1ull << IB_USER_VERBS_CMD_DETACH_MCAST)        |
6327                 (1ull << IB_USER_VERBS_CMD_CREATE_SRQ)          |
6328                 (1ull << IB_USER_VERBS_CMD_MODIFY_SRQ)          |
6329                 (1ull << IB_USER_VERBS_CMD_QUERY_SRQ)           |
6330                 (1ull << IB_USER_VERBS_CMD_DESTROY_SRQ)         |
6331                 (1ull << IB_USER_VERBS_CMD_CREATE_XSRQ)         |
6332                 (1ull << IB_USER_VERBS_CMD_OPEN_QP);
6333         dev->ib_dev.uverbs_ex_cmd_mask =
6334                 (1ull << IB_USER_VERBS_EX_CMD_QUERY_DEVICE)     |
6335                 (1ull << IB_USER_VERBS_EX_CMD_CREATE_CQ)        |
6336                 (1ull << IB_USER_VERBS_EX_CMD_CREATE_QP)        |
6337                 (1ull << IB_USER_VERBS_EX_CMD_MODIFY_QP)        |
6338                 (1ull << IB_USER_VERBS_EX_CMD_MODIFY_CQ)        |
6339                 (1ull << IB_USER_VERBS_EX_CMD_CREATE_FLOW)      |
6340                 (1ull << IB_USER_VERBS_EX_CMD_DESTROY_FLOW);
6341
6342         if (MLX5_CAP_GEN(mdev, ipoib_enhanced_offloads) &&
6343             IS_ENABLED(CONFIG_MLX5_CORE_IPOIB))
6344                 ib_set_device_ops(&dev->ib_dev,
6345                                   &mlx5_ib_dev_ipoib_enhanced_ops);
6346
6347         if (mlx5_core_is_pf(mdev))
6348                 ib_set_device_ops(&dev->ib_dev, &mlx5_ib_dev_sriov_ops);
6349
6350         dev->umr_fence = mlx5_get_umr_fence(MLX5_CAP_GEN(mdev, umr_fence));
6351
6352         if (MLX5_CAP_GEN(mdev, imaicl)) {
6353                 dev->ib_dev.uverbs_cmd_mask |=
6354                         (1ull << IB_USER_VERBS_CMD_ALLOC_MW)    |
6355                         (1ull << IB_USER_VERBS_CMD_DEALLOC_MW);
6356                 ib_set_device_ops(&dev->ib_dev, &mlx5_ib_dev_mw_ops);
6357         }
6358
6359         if (MLX5_CAP_GEN(mdev, xrc)) {
6360                 dev->ib_dev.uverbs_cmd_mask |=
6361                         (1ull << IB_USER_VERBS_CMD_OPEN_XRCD) |
6362                         (1ull << IB_USER_VERBS_CMD_CLOSE_XRCD);
6363                 ib_set_device_ops(&dev->ib_dev, &mlx5_ib_dev_xrc_ops);
6364         }
6365
6366         if (MLX5_CAP_DEV_MEM(mdev, memic) ||
6367             MLX5_CAP_GEN_64(dev->mdev, general_obj_types) &
6368             MLX5_GENERAL_OBJ_TYPES_CAP_SW_ICM)
6369                 ib_set_device_ops(&dev->ib_dev, &mlx5_ib_dev_dm_ops);
6370
6371         if (mlx5_accel_ipsec_device_caps(dev->mdev) &
6372             MLX5_ACCEL_IPSEC_CAP_DEVICE)
6373                 ib_set_device_ops(&dev->ib_dev, &mlx5_ib_dev_flow_ipsec_ops);
6374         ib_set_device_ops(&dev->ib_dev, &mlx5_ib_dev_ops);
6375
6376         if (IS_ENABLED(CONFIG_INFINIBAND_USER_ACCESS))
6377                 dev->ib_dev.driver_def = mlx5_ib_defs;
6378
6379         err = init_node_data(dev);
6380         if (err)
6381                 return err;
6382
6383         if ((MLX5_CAP_GEN(dev->mdev, port_type) == MLX5_CAP_PORT_TYPE_ETH) &&
6384             (MLX5_CAP_GEN(dev->mdev, disable_local_lb_uc) ||
6385              MLX5_CAP_GEN(dev->mdev, disable_local_lb_mc)))
6386                 mutex_init(&dev->lb.mutex);
6387
6388         dev->ib_dev.use_cq_dim = true;
6389
6390         return 0;
6391 }
6392
6393 static const struct ib_device_ops mlx5_ib_dev_port_ops = {
6394         .get_port_immutable = mlx5_port_immutable,
6395         .query_port = mlx5_ib_query_port,
6396 };
6397
6398 static int mlx5_ib_stage_non_default_cb(struct mlx5_ib_dev *dev)
6399 {
6400         ib_set_device_ops(&dev->ib_dev, &mlx5_ib_dev_port_ops);
6401         return 0;
6402 }
6403
6404 static const struct ib_device_ops mlx5_ib_dev_port_rep_ops = {
6405         .get_port_immutable = mlx5_port_rep_immutable,
6406         .query_port = mlx5_ib_rep_query_port,
6407 };
6408
6409 static int mlx5_ib_stage_raw_eth_non_default_cb(struct mlx5_ib_dev *dev)
6410 {
6411         ib_set_device_ops(&dev->ib_dev, &mlx5_ib_dev_port_rep_ops);
6412         return 0;
6413 }
6414
6415 static const struct ib_device_ops mlx5_ib_dev_common_roce_ops = {
6416         .create_rwq_ind_table = mlx5_ib_create_rwq_ind_table,
6417         .create_wq = mlx5_ib_create_wq,
6418         .destroy_rwq_ind_table = mlx5_ib_destroy_rwq_ind_table,
6419         .destroy_wq = mlx5_ib_destroy_wq,
6420         .get_netdev = mlx5_ib_get_netdev,
6421         .modify_wq = mlx5_ib_modify_wq,
6422 };
6423
6424 static int mlx5_ib_stage_common_roce_init(struct mlx5_ib_dev *dev)
6425 {
6426         u8 port_num;
6427
6428         dev->ib_dev.uverbs_ex_cmd_mask |=
6429                         (1ull << IB_USER_VERBS_EX_CMD_CREATE_WQ) |
6430                         (1ull << IB_USER_VERBS_EX_CMD_MODIFY_WQ) |
6431                         (1ull << IB_USER_VERBS_EX_CMD_DESTROY_WQ) |
6432                         (1ull << IB_USER_VERBS_EX_CMD_CREATE_RWQ_IND_TBL) |
6433                         (1ull << IB_USER_VERBS_EX_CMD_DESTROY_RWQ_IND_TBL);
6434         ib_set_device_ops(&dev->ib_dev, &mlx5_ib_dev_common_roce_ops);
6435
6436         port_num = mlx5_core_native_port_num(dev->mdev) - 1;
6437
6438         /* Register only for native ports */
6439         return mlx5_add_netdev_notifier(dev, port_num);
6440 }
6441
6442 static void mlx5_ib_stage_common_roce_cleanup(struct mlx5_ib_dev *dev)
6443 {
6444         u8 port_num = mlx5_core_native_port_num(dev->mdev) - 1;
6445
6446         mlx5_remove_netdev_notifier(dev, port_num);
6447 }
6448
6449 static int mlx5_ib_stage_raw_eth_roce_init(struct mlx5_ib_dev *dev)
6450 {
6451         struct mlx5_core_dev *mdev = dev->mdev;
6452         enum rdma_link_layer ll;
6453         int port_type_cap;
6454         int err = 0;
6455
6456         port_type_cap = MLX5_CAP_GEN(mdev, port_type);
6457         ll = mlx5_port_type_cap_to_rdma_ll(port_type_cap);
6458
6459         if (ll == IB_LINK_LAYER_ETHERNET)
6460                 err = mlx5_ib_stage_common_roce_init(dev);
6461
6462         return err;
6463 }
6464
6465 static void mlx5_ib_stage_raw_eth_roce_cleanup(struct mlx5_ib_dev *dev)
6466 {
6467         mlx5_ib_stage_common_roce_cleanup(dev);
6468 }
6469
6470 static int mlx5_ib_stage_roce_init(struct mlx5_ib_dev *dev)
6471 {
6472         struct mlx5_core_dev *mdev = dev->mdev;
6473         enum rdma_link_layer ll;
6474         int port_type_cap;
6475         int err;
6476
6477         port_type_cap = MLX5_CAP_GEN(mdev, port_type);
6478         ll = mlx5_port_type_cap_to_rdma_ll(port_type_cap);
6479
6480         if (ll == IB_LINK_LAYER_ETHERNET) {
6481                 err = mlx5_ib_stage_common_roce_init(dev);
6482                 if (err)
6483                         return err;
6484
6485                 err = mlx5_enable_eth(dev);
6486                 if (err)
6487                         goto cleanup;
6488         }
6489
6490         return 0;
6491 cleanup:
6492         mlx5_ib_stage_common_roce_cleanup(dev);
6493
6494         return err;
6495 }
6496
6497 static void mlx5_ib_stage_roce_cleanup(struct mlx5_ib_dev *dev)
6498 {
6499         struct mlx5_core_dev *mdev = dev->mdev;
6500         enum rdma_link_layer ll;
6501         int port_type_cap;
6502
6503         port_type_cap = MLX5_CAP_GEN(mdev, port_type);
6504         ll = mlx5_port_type_cap_to_rdma_ll(port_type_cap);
6505
6506         if (ll == IB_LINK_LAYER_ETHERNET) {
6507                 mlx5_disable_eth(dev);
6508                 mlx5_ib_stage_common_roce_cleanup(dev);
6509         }
6510 }
6511
6512 static int mlx5_ib_stage_dev_res_init(struct mlx5_ib_dev *dev)
6513 {
6514         return create_dev_resources(&dev->devr);
6515 }
6516
6517 static void mlx5_ib_stage_dev_res_cleanup(struct mlx5_ib_dev *dev)
6518 {
6519         destroy_dev_resources(&dev->devr);
6520 }
6521
6522 static int mlx5_ib_stage_odp_init(struct mlx5_ib_dev *dev)
6523 {
6524         return mlx5_ib_odp_init_one(dev);
6525 }
6526
6527 static void mlx5_ib_stage_odp_cleanup(struct mlx5_ib_dev *dev)
6528 {
6529         mlx5_ib_odp_cleanup_one(dev);
6530 }
6531
6532 static const struct ib_device_ops mlx5_ib_dev_hw_stats_ops = {
6533         .alloc_hw_stats = mlx5_ib_alloc_hw_stats,
6534         .get_hw_stats = mlx5_ib_get_hw_stats,
6535         .counter_bind_qp = mlx5_ib_counter_bind_qp,
6536         .counter_unbind_qp = mlx5_ib_counter_unbind_qp,
6537         .counter_dealloc = mlx5_ib_counter_dealloc,
6538         .counter_alloc_stats = mlx5_ib_counter_alloc_stats,
6539         .counter_update_stats = mlx5_ib_counter_update_stats,
6540 };
6541
6542 static int mlx5_ib_stage_counters_init(struct mlx5_ib_dev *dev)
6543 {
6544         if (MLX5_CAP_GEN(dev->mdev, max_qp_cnt)) {
6545                 ib_set_device_ops(&dev->ib_dev, &mlx5_ib_dev_hw_stats_ops);
6546
6547                 return mlx5_ib_alloc_counters(dev);
6548         }
6549
6550         return 0;
6551 }
6552
6553 static void mlx5_ib_stage_counters_cleanup(struct mlx5_ib_dev *dev)
6554 {
6555         if (MLX5_CAP_GEN(dev->mdev, max_qp_cnt))
6556                 mlx5_ib_dealloc_counters(dev);
6557 }
6558
6559 static int mlx5_ib_stage_cong_debugfs_init(struct mlx5_ib_dev *dev)
6560 {
6561         mlx5_ib_init_cong_debugfs(dev,
6562                                   mlx5_core_native_port_num(dev->mdev) - 1);
6563         return 0;
6564 }
6565
6566 static void mlx5_ib_stage_cong_debugfs_cleanup(struct mlx5_ib_dev *dev)
6567 {
6568         mlx5_ib_cleanup_cong_debugfs(dev,
6569                                      mlx5_core_native_port_num(dev->mdev) - 1);
6570 }
6571
6572 static int mlx5_ib_stage_uar_init(struct mlx5_ib_dev *dev)
6573 {
6574         dev->mdev->priv.uar = mlx5_get_uars_page(dev->mdev);
6575         return PTR_ERR_OR_ZERO(dev->mdev->priv.uar);
6576 }
6577
6578 static void mlx5_ib_stage_uar_cleanup(struct mlx5_ib_dev *dev)
6579 {
6580         mlx5_put_uars_page(dev->mdev, dev->mdev->priv.uar);
6581 }
6582
6583 static int mlx5_ib_stage_bfrag_init(struct mlx5_ib_dev *dev)
6584 {
6585         int err;
6586
6587         err = mlx5_alloc_bfreg(dev->mdev, &dev->bfreg, false, false);
6588         if (err)
6589                 return err;
6590
6591         err = mlx5_alloc_bfreg(dev->mdev, &dev->fp_bfreg, false, true);
6592         if (err)
6593                 mlx5_free_bfreg(dev->mdev, &dev->fp_bfreg);
6594
6595         return err;
6596 }
6597
6598 static void mlx5_ib_stage_bfrag_cleanup(struct mlx5_ib_dev *dev)
6599 {
6600         mlx5_free_bfreg(dev->mdev, &dev->fp_bfreg);
6601         mlx5_free_bfreg(dev->mdev, &dev->bfreg);
6602 }
6603
6604 static int mlx5_ib_stage_ib_reg_init(struct mlx5_ib_dev *dev)
6605 {
6606         const char *name;
6607
6608         rdma_set_device_sysfs_group(&dev->ib_dev, &mlx5_attr_group);
6609         if (!mlx5_lag_is_roce(dev->mdev))
6610                 name = "mlx5_%d";
6611         else
6612                 name = "mlx5_bond_%d";
6613         return ib_register_device(&dev->ib_dev, name);
6614 }
6615
6616 static void mlx5_ib_stage_pre_ib_reg_umr_cleanup(struct mlx5_ib_dev *dev)
6617 {
6618         destroy_umrc_res(dev);
6619 }
6620
6621 static void mlx5_ib_stage_ib_reg_cleanup(struct mlx5_ib_dev *dev)
6622 {
6623         ib_unregister_device(&dev->ib_dev);
6624 }
6625
6626 static int mlx5_ib_stage_post_ib_reg_umr_init(struct mlx5_ib_dev *dev)
6627 {
6628         return create_umr_res(dev);
6629 }
6630
6631 static int mlx5_ib_stage_delay_drop_init(struct mlx5_ib_dev *dev)
6632 {
6633         init_delay_drop(dev);
6634
6635         return 0;
6636 }
6637
6638 static void mlx5_ib_stage_delay_drop_cleanup(struct mlx5_ib_dev *dev)
6639 {
6640         cancel_delay_drop(dev);
6641 }
6642
6643 static int mlx5_ib_stage_dev_notifier_init(struct mlx5_ib_dev *dev)
6644 {
6645         dev->mdev_events.notifier_call = mlx5_ib_event;
6646         mlx5_notifier_register(dev->mdev, &dev->mdev_events);
6647         return 0;
6648 }
6649
6650 static void mlx5_ib_stage_dev_notifier_cleanup(struct mlx5_ib_dev *dev)
6651 {
6652         mlx5_notifier_unregister(dev->mdev, &dev->mdev_events);
6653 }
6654
6655 static int mlx5_ib_stage_devx_init(struct mlx5_ib_dev *dev)
6656 {
6657         int uid;
6658
6659         uid = mlx5_ib_devx_create(dev, false);
6660         if (uid > 0) {
6661                 dev->devx_whitelist_uid = uid;
6662                 mlx5_ib_devx_init_event_table(dev);
6663         }
6664
6665         return 0;
6666 }
6667 static void mlx5_ib_stage_devx_cleanup(struct mlx5_ib_dev *dev)
6668 {
6669         if (dev->devx_whitelist_uid) {
6670                 mlx5_ib_devx_cleanup_event_table(dev);
6671                 mlx5_ib_devx_destroy(dev, dev->devx_whitelist_uid);
6672         }
6673 }
6674
6675 int mlx5_ib_enable_driver(struct ib_device *dev)
6676 {
6677         struct mlx5_ib_dev *mdev = to_mdev(dev);
6678         int ret;
6679
6680         ret = mlx5_ib_test_wc(mdev);
6681         mlx5_ib_dbg(mdev, "Write-Combining %s",
6682                     mdev->wc_support ? "supported" : "not supported");
6683
6684         return ret;
6685 }
6686
6687 void __mlx5_ib_remove(struct mlx5_ib_dev *dev,
6688                       const struct mlx5_ib_profile *profile,
6689                       int stage)
6690 {
6691         /* Number of stages to cleanup */
6692         while (stage) {
6693                 stage--;
6694                 if (profile->stage[stage].cleanup)
6695                         profile->stage[stage].cleanup(dev);
6696         }
6697
6698         kfree(dev->port);
6699         ib_dealloc_device(&dev->ib_dev);
6700 }
6701
6702 void *__mlx5_ib_add(struct mlx5_ib_dev *dev,
6703                     const struct mlx5_ib_profile *profile)
6704 {
6705         int err;
6706         int i;
6707
6708         for (i = 0; i < MLX5_IB_STAGE_MAX; i++) {
6709                 if (profile->stage[i].init) {
6710                         err = profile->stage[i].init(dev);
6711                         if (err)
6712                                 goto err_out;
6713                 }
6714         }
6715
6716         dev->profile = profile;
6717         dev->ib_active = true;
6718
6719         return dev;
6720
6721 err_out:
6722         __mlx5_ib_remove(dev, profile, i);
6723
6724         return NULL;
6725 }
6726
6727 static const struct mlx5_ib_profile pf_profile = {
6728         STAGE_CREATE(MLX5_IB_STAGE_INIT,
6729                      mlx5_ib_stage_init_init,
6730                      mlx5_ib_stage_init_cleanup),
6731         STAGE_CREATE(MLX5_IB_STAGE_FLOW_DB,
6732                      mlx5_ib_stage_flow_db_init,
6733                      mlx5_ib_stage_flow_db_cleanup),
6734         STAGE_CREATE(MLX5_IB_STAGE_CAPS,
6735                      mlx5_ib_stage_caps_init,
6736                      NULL),
6737         STAGE_CREATE(MLX5_IB_STAGE_NON_DEFAULT_CB,
6738                      mlx5_ib_stage_non_default_cb,
6739                      NULL),
6740         STAGE_CREATE(MLX5_IB_STAGE_ROCE,
6741                      mlx5_ib_stage_roce_init,
6742                      mlx5_ib_stage_roce_cleanup),
6743         STAGE_CREATE(MLX5_IB_STAGE_SRQ,
6744                      mlx5_init_srq_table,
6745                      mlx5_cleanup_srq_table),
6746         STAGE_CREATE(MLX5_IB_STAGE_DEVICE_RESOURCES,
6747                      mlx5_ib_stage_dev_res_init,
6748                      mlx5_ib_stage_dev_res_cleanup),
6749         STAGE_CREATE(MLX5_IB_STAGE_DEVICE_NOTIFIER,
6750                      mlx5_ib_stage_dev_notifier_init,
6751                      mlx5_ib_stage_dev_notifier_cleanup),
6752         STAGE_CREATE(MLX5_IB_STAGE_ODP,
6753                      mlx5_ib_stage_odp_init,
6754                      mlx5_ib_stage_odp_cleanup),
6755         STAGE_CREATE(MLX5_IB_STAGE_COUNTERS,
6756                      mlx5_ib_stage_counters_init,
6757                      mlx5_ib_stage_counters_cleanup),
6758         STAGE_CREATE(MLX5_IB_STAGE_CONG_DEBUGFS,
6759                      mlx5_ib_stage_cong_debugfs_init,
6760                      mlx5_ib_stage_cong_debugfs_cleanup),
6761         STAGE_CREATE(MLX5_IB_STAGE_UAR,
6762                      mlx5_ib_stage_uar_init,
6763                      mlx5_ib_stage_uar_cleanup),
6764         STAGE_CREATE(MLX5_IB_STAGE_BFREG,
6765                      mlx5_ib_stage_bfrag_init,
6766                      mlx5_ib_stage_bfrag_cleanup),
6767         STAGE_CREATE(MLX5_IB_STAGE_PRE_IB_REG_UMR,
6768                      NULL,
6769                      mlx5_ib_stage_pre_ib_reg_umr_cleanup),
6770         STAGE_CREATE(MLX5_IB_STAGE_WHITELIST_UID,
6771                      mlx5_ib_stage_devx_init,
6772                      mlx5_ib_stage_devx_cleanup),
6773         STAGE_CREATE(MLX5_IB_STAGE_IB_REG,
6774                      mlx5_ib_stage_ib_reg_init,
6775                      mlx5_ib_stage_ib_reg_cleanup),
6776         STAGE_CREATE(MLX5_IB_STAGE_POST_IB_REG_UMR,
6777                      mlx5_ib_stage_post_ib_reg_umr_init,
6778                      NULL),
6779         STAGE_CREATE(MLX5_IB_STAGE_DELAY_DROP,
6780                      mlx5_ib_stage_delay_drop_init,
6781                      mlx5_ib_stage_delay_drop_cleanup),
6782 };
6783
6784 const struct mlx5_ib_profile raw_eth_profile = {
6785         STAGE_CREATE(MLX5_IB_STAGE_INIT,
6786                      mlx5_ib_stage_init_init,
6787                      mlx5_ib_stage_init_cleanup),
6788         STAGE_CREATE(MLX5_IB_STAGE_FLOW_DB,
6789                      mlx5_ib_stage_flow_db_init,
6790                      mlx5_ib_stage_flow_db_cleanup),
6791         STAGE_CREATE(MLX5_IB_STAGE_CAPS,
6792                      mlx5_ib_stage_caps_init,
6793                      NULL),
6794         STAGE_CREATE(MLX5_IB_STAGE_NON_DEFAULT_CB,
6795                      mlx5_ib_stage_raw_eth_non_default_cb,
6796                      NULL),
6797         STAGE_CREATE(MLX5_IB_STAGE_ROCE,
6798                      mlx5_ib_stage_raw_eth_roce_init,
6799                      mlx5_ib_stage_raw_eth_roce_cleanup),
6800         STAGE_CREATE(MLX5_IB_STAGE_SRQ,
6801                      mlx5_init_srq_table,
6802                      mlx5_cleanup_srq_table),
6803         STAGE_CREATE(MLX5_IB_STAGE_DEVICE_RESOURCES,
6804                      mlx5_ib_stage_dev_res_init,
6805                      mlx5_ib_stage_dev_res_cleanup),
6806         STAGE_CREATE(MLX5_IB_STAGE_DEVICE_NOTIFIER,
6807                      mlx5_ib_stage_dev_notifier_init,
6808                      mlx5_ib_stage_dev_notifier_cleanup),
6809         STAGE_CREATE(MLX5_IB_STAGE_COUNTERS,
6810                      mlx5_ib_stage_counters_init,
6811                      mlx5_ib_stage_counters_cleanup),
6812         STAGE_CREATE(MLX5_IB_STAGE_UAR,
6813                      mlx5_ib_stage_uar_init,
6814                      mlx5_ib_stage_uar_cleanup),
6815         STAGE_CREATE(MLX5_IB_STAGE_BFREG,
6816                      mlx5_ib_stage_bfrag_init,
6817                      mlx5_ib_stage_bfrag_cleanup),
6818         STAGE_CREATE(MLX5_IB_STAGE_PRE_IB_REG_UMR,
6819                      NULL,
6820                      mlx5_ib_stage_pre_ib_reg_umr_cleanup),
6821         STAGE_CREATE(MLX5_IB_STAGE_WHITELIST_UID,
6822                      mlx5_ib_stage_devx_init,
6823                      mlx5_ib_stage_devx_cleanup),
6824         STAGE_CREATE(MLX5_IB_STAGE_IB_REG,
6825                      mlx5_ib_stage_ib_reg_init,
6826                      mlx5_ib_stage_ib_reg_cleanup),
6827         STAGE_CREATE(MLX5_IB_STAGE_POST_IB_REG_UMR,
6828                      mlx5_ib_stage_post_ib_reg_umr_init,
6829                      NULL),
6830 };
6831
6832 static void *mlx5_ib_add_slave_port(struct mlx5_core_dev *mdev)
6833 {
6834         struct mlx5_ib_multiport_info *mpi;
6835         struct mlx5_ib_dev *dev;
6836         bool bound = false;
6837         int err;
6838
6839         mpi = kzalloc(sizeof(*mpi), GFP_KERNEL);
6840         if (!mpi)
6841                 return NULL;
6842
6843         mpi->mdev = mdev;
6844
6845         err = mlx5_query_nic_vport_system_image_guid(mdev,
6846                                                      &mpi->sys_image_guid);
6847         if (err) {
6848                 kfree(mpi);
6849                 return NULL;
6850         }
6851
6852         mutex_lock(&mlx5_ib_multiport_mutex);
6853         list_for_each_entry(dev, &mlx5_ib_dev_list, ib_dev_list) {
6854                 if (dev->sys_image_guid == mpi->sys_image_guid)
6855                         bound = mlx5_ib_bind_slave_port(dev, mpi);
6856
6857                 if (bound) {
6858                         rdma_roce_rescan_device(&dev->ib_dev);
6859                         break;
6860                 }
6861         }
6862
6863         if (!bound) {
6864                 list_add_tail(&mpi->list, &mlx5_ib_unaffiliated_port_list);
6865                 dev_dbg(mdev->device,
6866                         "no suitable IB device found to bind to, added to unaffiliated list.\n");
6867         }
6868         mutex_unlock(&mlx5_ib_multiport_mutex);
6869
6870         return mpi;
6871 }
6872
6873 static void *mlx5_ib_add(struct mlx5_core_dev *mdev)
6874 {
6875         const struct mlx5_ib_profile *profile;
6876         enum rdma_link_layer ll;
6877         struct mlx5_ib_dev *dev;
6878         int port_type_cap;
6879         int num_ports;
6880
6881         printk_once(KERN_INFO "%s", mlx5_version);
6882
6883         if (MLX5_ESWITCH_MANAGER(mdev) &&
6884             mlx5_ib_eswitch_mode(mdev->priv.eswitch) == MLX5_ESWITCH_OFFLOADS) {
6885                 if (!mlx5_core_mp_enabled(mdev))
6886                         mlx5_ib_register_vport_reps(mdev);
6887                 return mdev;
6888         }
6889
6890         port_type_cap = MLX5_CAP_GEN(mdev, port_type);
6891         ll = mlx5_port_type_cap_to_rdma_ll(port_type_cap);
6892
6893         if (mlx5_core_is_mp_slave(mdev) && ll == IB_LINK_LAYER_ETHERNET)
6894                 return mlx5_ib_add_slave_port(mdev);
6895
6896         num_ports = max(MLX5_CAP_GEN(mdev, num_ports),
6897                         MLX5_CAP_GEN(mdev, num_vhca_ports));
6898         dev = ib_alloc_device(mlx5_ib_dev, ib_dev);
6899         if (!dev)
6900                 return NULL;
6901         dev->port = kcalloc(num_ports, sizeof(*dev->port),
6902                              GFP_KERNEL);
6903         if (!dev->port) {
6904                 ib_dealloc_device(&dev->ib_dev);
6905                 return NULL;
6906         }
6907
6908         dev->mdev = mdev;
6909         dev->num_ports = num_ports;
6910
6911         if (ll == IB_LINK_LAYER_ETHERNET && !mlx5_is_roce_enabled(mdev))
6912                 profile = &raw_eth_profile;
6913         else
6914                 profile = &pf_profile;
6915
6916         return __mlx5_ib_add(dev, profile);
6917 }
6918
6919 static void mlx5_ib_remove(struct mlx5_core_dev *mdev, void *context)
6920 {
6921         struct mlx5_ib_multiport_info *mpi;
6922         struct mlx5_ib_dev *dev;
6923
6924         if (MLX5_ESWITCH_MANAGER(mdev) && context == mdev) {
6925                 mlx5_ib_unregister_vport_reps(mdev);
6926                 return;
6927         }
6928
6929         if (mlx5_core_is_mp_slave(mdev)) {
6930                 mpi = context;
6931                 mutex_lock(&mlx5_ib_multiport_mutex);
6932                 if (mpi->ibdev)
6933                         mlx5_ib_unbind_slave_port(mpi->ibdev, mpi);
6934                 list_del(&mpi->list);
6935                 mutex_unlock(&mlx5_ib_multiport_mutex);
6936                 kfree(mpi);
6937                 return;
6938         }
6939
6940         dev = context;
6941         __mlx5_ib_remove(dev, dev->profile, MLX5_IB_STAGE_MAX);
6942 }
6943
6944 static struct mlx5_interface mlx5_ib_interface = {
6945         .add            = mlx5_ib_add,
6946         .remove         = mlx5_ib_remove,
6947         .protocol       = MLX5_INTERFACE_PROTOCOL_IB,
6948 };
6949
6950 unsigned long mlx5_ib_get_xlt_emergency_page(void)
6951 {
6952         mutex_lock(&xlt_emergency_page_mutex);
6953         return xlt_emergency_page;
6954 }
6955
6956 void mlx5_ib_put_xlt_emergency_page(void)
6957 {
6958         mutex_unlock(&xlt_emergency_page_mutex);
6959 }
6960
6961 static int __init mlx5_ib_init(void)
6962 {
6963         int err;
6964
6965         xlt_emergency_page = __get_free_page(GFP_KERNEL);
6966         if (!xlt_emergency_page)
6967                 return -ENOMEM;
6968
6969         mutex_init(&xlt_emergency_page_mutex);
6970
6971         mlx5_ib_event_wq = alloc_ordered_workqueue("mlx5_ib_event_wq", 0);
6972         if (!mlx5_ib_event_wq) {
6973                 free_page(xlt_emergency_page);
6974                 return -ENOMEM;
6975         }
6976
6977         mlx5_ib_odp_init();
6978
6979         err = mlx5_register_interface(&mlx5_ib_interface);
6980
6981         return err;
6982 }
6983
6984 static void __exit mlx5_ib_cleanup(void)
6985 {
6986         mlx5_unregister_interface(&mlx5_ib_interface);
6987         destroy_workqueue(mlx5_ib_event_wq);
6988         mutex_destroy(&xlt_emergency_page_mutex);
6989         free_page(xlt_emergency_page);
6990 }
6991
6992 module_init(mlx5_ib_init);
6993 module_exit(mlx5_ib_cleanup);