e050eade97a1e53f541a4ca76be43dd0462fab97
[sfrench/cifs-2.6.git] / drivers / infiniband / hw / mlx5 / qp.c
1 /*
2  * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved.
3  *
4  * This software is available to you under a choice of one of two
5  * licenses.  You may choose to be licensed under the terms of the GNU
6  * General Public License (GPL) Version 2, available from the file
7  * COPYING in the main directory of this source tree, or the
8  * OpenIB.org BSD license below:
9  *
10  *     Redistribution and use in source and binary forms, with or
11  *     without modification, are permitted provided that the following
12  *     conditions are met:
13  *
14  *      - Redistributions of source code must retain the above
15  *        copyright notice, this list of conditions and the following
16  *        disclaimer.
17  *
18  *      - Redistributions in binary form must reproduce the above
19  *        copyright notice, this list of conditions and the following
20  *        disclaimer in the documentation and/or other materials
21  *        provided with the distribution.
22  *
23  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30  * SOFTWARE.
31  */
32
33 #include <linux/module.h>
34 #include <rdma/ib_umem.h>
35 #include <rdma/ib_cache.h>
36 #include <rdma/ib_user_verbs.h>
37 #include <rdma/rdma_counter.h>
38 #include <linux/mlx5/fs.h>
39 #include "mlx5_ib.h"
40 #include "ib_rep.h"
41 #include "cmd.h"
42 #include "qp.h"
43 #include "wr.h"
44
45 enum {
46         MLX5_IB_ACK_REQ_FREQ    = 8,
47 };
48
49 enum {
50         MLX5_IB_DEFAULT_SCHED_QUEUE     = 0x83,
51         MLX5_IB_DEFAULT_QP0_SCHED_QUEUE = 0x3f,
52         MLX5_IB_LINK_TYPE_IB            = 0,
53         MLX5_IB_LINK_TYPE_ETH           = 1
54 };
55
56 enum raw_qp_set_mask_map {
57         MLX5_RAW_QP_MOD_SET_RQ_Q_CTR_ID         = 1UL << 0,
58         MLX5_RAW_QP_RATE_LIMIT                  = 1UL << 1,
59 };
60
61 struct mlx5_modify_raw_qp_param {
62         u16 operation;
63
64         u32 set_mask; /* raw_qp_set_mask_map */
65
66         struct mlx5_rate_limit rl;
67
68         u8 rq_q_ctr_id;
69         u16 port;
70 };
71
72 static void get_cqs(enum ib_qp_type qp_type,
73                     struct ib_cq *ib_send_cq, struct ib_cq *ib_recv_cq,
74                     struct mlx5_ib_cq **send_cq, struct mlx5_ib_cq **recv_cq);
75
76 static int is_qp0(enum ib_qp_type qp_type)
77 {
78         return qp_type == IB_QPT_SMI;
79 }
80
81 static int is_sqp(enum ib_qp_type qp_type)
82 {
83         return is_qp0(qp_type) || is_qp1(qp_type);
84 }
85
86 /**
87  * mlx5_ib_read_user_wqe_common() - Copy a WQE (or part of) from user WQ
88  * to kernel buffer
89  *
90  * @umem: User space memory where the WQ is
91  * @buffer: buffer to copy to
92  * @buflen: buffer length
93  * @wqe_index: index of WQE to copy from
94  * @wq_offset: offset to start of WQ
95  * @wq_wqe_cnt: number of WQEs in WQ
96  * @wq_wqe_shift: log2 of WQE size
97  * @bcnt: number of bytes to copy
98  * @bytes_copied: number of bytes to copy (return value)
99  *
100  * Copies from start of WQE bcnt or less bytes.
101  * Does not gurantee to copy the entire WQE.
102  *
103  * Return: zero on success, or an error code.
104  */
105 static int mlx5_ib_read_user_wqe_common(struct ib_umem *umem, void *buffer,
106                                         size_t buflen, int wqe_index,
107                                         int wq_offset, int wq_wqe_cnt,
108                                         int wq_wqe_shift, int bcnt,
109                                         size_t *bytes_copied)
110 {
111         size_t offset = wq_offset + ((wqe_index % wq_wqe_cnt) << wq_wqe_shift);
112         size_t wq_end = wq_offset + (wq_wqe_cnt << wq_wqe_shift);
113         size_t copy_length;
114         int ret;
115
116         /* don't copy more than requested, more than buffer length or
117          * beyond WQ end
118          */
119         copy_length = min_t(u32, buflen, wq_end - offset);
120         copy_length = min_t(u32, copy_length, bcnt);
121
122         ret = ib_umem_copy_from(buffer, umem, offset, copy_length);
123         if (ret)
124                 return ret;
125
126         if (!ret && bytes_copied)
127                 *bytes_copied = copy_length;
128
129         return 0;
130 }
131
132 static int mlx5_ib_read_kernel_wqe_sq(struct mlx5_ib_qp *qp, int wqe_index,
133                                       void *buffer, size_t buflen, size_t *bc)
134 {
135         struct mlx5_wqe_ctrl_seg *ctrl;
136         size_t bytes_copied = 0;
137         size_t wqe_length;
138         void *p;
139         int ds;
140
141         wqe_index = wqe_index & qp->sq.fbc.sz_m1;
142
143         /* read the control segment first */
144         p = mlx5_frag_buf_get_wqe(&qp->sq.fbc, wqe_index);
145         ctrl = p;
146         ds = be32_to_cpu(ctrl->qpn_ds) & MLX5_WQE_CTRL_DS_MASK;
147         wqe_length = ds * MLX5_WQE_DS_UNITS;
148
149         /* read rest of WQE if it spreads over more than one stride */
150         while (bytes_copied < wqe_length) {
151                 size_t copy_length =
152                         min_t(size_t, buflen - bytes_copied, MLX5_SEND_WQE_BB);
153
154                 if (!copy_length)
155                         break;
156
157                 memcpy(buffer + bytes_copied, p, copy_length);
158                 bytes_copied += copy_length;
159
160                 wqe_index = (wqe_index + 1) & qp->sq.fbc.sz_m1;
161                 p = mlx5_frag_buf_get_wqe(&qp->sq.fbc, wqe_index);
162         }
163         *bc = bytes_copied;
164         return 0;
165 }
166
167 static int mlx5_ib_read_user_wqe_sq(struct mlx5_ib_qp *qp, int wqe_index,
168                                     void *buffer, size_t buflen, size_t *bc)
169 {
170         struct mlx5_ib_qp_base *base = &qp->trans_qp.base;
171         struct ib_umem *umem = base->ubuffer.umem;
172         struct mlx5_ib_wq *wq = &qp->sq;
173         struct mlx5_wqe_ctrl_seg *ctrl;
174         size_t bytes_copied;
175         size_t bytes_copied2;
176         size_t wqe_length;
177         int ret;
178         int ds;
179
180         /* at first read as much as possible */
181         ret = mlx5_ib_read_user_wqe_common(umem, buffer, buflen, wqe_index,
182                                            wq->offset, wq->wqe_cnt,
183                                            wq->wqe_shift, buflen,
184                                            &bytes_copied);
185         if (ret)
186                 return ret;
187
188         /* we need at least control segment size to proceed */
189         if (bytes_copied < sizeof(*ctrl))
190                 return -EINVAL;
191
192         ctrl = buffer;
193         ds = be32_to_cpu(ctrl->qpn_ds) & MLX5_WQE_CTRL_DS_MASK;
194         wqe_length = ds * MLX5_WQE_DS_UNITS;
195
196         /* if we copied enough then we are done */
197         if (bytes_copied >= wqe_length) {
198                 *bc = bytes_copied;
199                 return 0;
200         }
201
202         /* otherwise this a wrapped around wqe
203          * so read the remaining bytes starting
204          * from  wqe_index 0
205          */
206         ret = mlx5_ib_read_user_wqe_common(umem, buffer + bytes_copied,
207                                            buflen - bytes_copied, 0, wq->offset,
208                                            wq->wqe_cnt, wq->wqe_shift,
209                                            wqe_length - bytes_copied,
210                                            &bytes_copied2);
211
212         if (ret)
213                 return ret;
214         *bc = bytes_copied + bytes_copied2;
215         return 0;
216 }
217
218 int mlx5_ib_read_wqe_sq(struct mlx5_ib_qp *qp, int wqe_index, void *buffer,
219                         size_t buflen, size_t *bc)
220 {
221         struct mlx5_ib_qp_base *base = &qp->trans_qp.base;
222         struct ib_umem *umem = base->ubuffer.umem;
223
224         if (buflen < sizeof(struct mlx5_wqe_ctrl_seg))
225                 return -EINVAL;
226
227         if (!umem)
228                 return mlx5_ib_read_kernel_wqe_sq(qp, wqe_index, buffer,
229                                                   buflen, bc);
230
231         return mlx5_ib_read_user_wqe_sq(qp, wqe_index, buffer, buflen, bc);
232 }
233
234 static int mlx5_ib_read_user_wqe_rq(struct mlx5_ib_qp *qp, int wqe_index,
235                                     void *buffer, size_t buflen, size_t *bc)
236 {
237         struct mlx5_ib_qp_base *base = &qp->trans_qp.base;
238         struct ib_umem *umem = base->ubuffer.umem;
239         struct mlx5_ib_wq *wq = &qp->rq;
240         size_t bytes_copied;
241         int ret;
242
243         ret = mlx5_ib_read_user_wqe_common(umem, buffer, buflen, wqe_index,
244                                            wq->offset, wq->wqe_cnt,
245                                            wq->wqe_shift, buflen,
246                                            &bytes_copied);
247
248         if (ret)
249                 return ret;
250         *bc = bytes_copied;
251         return 0;
252 }
253
254 int mlx5_ib_read_wqe_rq(struct mlx5_ib_qp *qp, int wqe_index, void *buffer,
255                         size_t buflen, size_t *bc)
256 {
257         struct mlx5_ib_qp_base *base = &qp->trans_qp.base;
258         struct ib_umem *umem = base->ubuffer.umem;
259         struct mlx5_ib_wq *wq = &qp->rq;
260         size_t wqe_size = 1 << wq->wqe_shift;
261
262         if (buflen < wqe_size)
263                 return -EINVAL;
264
265         if (!umem)
266                 return -EOPNOTSUPP;
267
268         return mlx5_ib_read_user_wqe_rq(qp, wqe_index, buffer, buflen, bc);
269 }
270
271 static int mlx5_ib_read_user_wqe_srq(struct mlx5_ib_srq *srq, int wqe_index,
272                                      void *buffer, size_t buflen, size_t *bc)
273 {
274         struct ib_umem *umem = srq->umem;
275         size_t bytes_copied;
276         int ret;
277
278         ret = mlx5_ib_read_user_wqe_common(umem, buffer, buflen, wqe_index, 0,
279                                            srq->msrq.max, srq->msrq.wqe_shift,
280                                            buflen, &bytes_copied);
281
282         if (ret)
283                 return ret;
284         *bc = bytes_copied;
285         return 0;
286 }
287
288 int mlx5_ib_read_wqe_srq(struct mlx5_ib_srq *srq, int wqe_index, void *buffer,
289                          size_t buflen, size_t *bc)
290 {
291         struct ib_umem *umem = srq->umem;
292         size_t wqe_size = 1 << srq->msrq.wqe_shift;
293
294         if (buflen < wqe_size)
295                 return -EINVAL;
296
297         if (!umem)
298                 return -EOPNOTSUPP;
299
300         return mlx5_ib_read_user_wqe_srq(srq, wqe_index, buffer, buflen, bc);
301 }
302
303 static void mlx5_ib_qp_event(struct mlx5_core_qp *qp, int type)
304 {
305         struct ib_qp *ibqp = &to_mibqp(qp)->ibqp;
306         struct ib_event event;
307
308         if (type == MLX5_EVENT_TYPE_PATH_MIG) {
309                 /* This event is only valid for trans_qps */
310                 to_mibqp(qp)->port = to_mibqp(qp)->trans_qp.alt_port;
311         }
312
313         if (ibqp->event_handler) {
314                 event.device     = ibqp->device;
315                 event.element.qp = ibqp;
316                 switch (type) {
317                 case MLX5_EVENT_TYPE_PATH_MIG:
318                         event.event = IB_EVENT_PATH_MIG;
319                         break;
320                 case MLX5_EVENT_TYPE_COMM_EST:
321                         event.event = IB_EVENT_COMM_EST;
322                         break;
323                 case MLX5_EVENT_TYPE_SQ_DRAINED:
324                         event.event = IB_EVENT_SQ_DRAINED;
325                         break;
326                 case MLX5_EVENT_TYPE_SRQ_LAST_WQE:
327                         event.event = IB_EVENT_QP_LAST_WQE_REACHED;
328                         break;
329                 case MLX5_EVENT_TYPE_WQ_CATAS_ERROR:
330                         event.event = IB_EVENT_QP_FATAL;
331                         break;
332                 case MLX5_EVENT_TYPE_PATH_MIG_FAILED:
333                         event.event = IB_EVENT_PATH_MIG_ERR;
334                         break;
335                 case MLX5_EVENT_TYPE_WQ_INVAL_REQ_ERROR:
336                         event.event = IB_EVENT_QP_REQ_ERR;
337                         break;
338                 case MLX5_EVENT_TYPE_WQ_ACCESS_ERROR:
339                         event.event = IB_EVENT_QP_ACCESS_ERR;
340                         break;
341                 default:
342                         pr_warn("mlx5_ib: Unexpected event type %d on QP %06x\n", type, qp->qpn);
343                         return;
344                 }
345
346                 ibqp->event_handler(&event, ibqp->qp_context);
347         }
348 }
349
350 static int set_rq_size(struct mlx5_ib_dev *dev, struct ib_qp_cap *cap,
351                        int has_rq, struct mlx5_ib_qp *qp, struct mlx5_ib_create_qp *ucmd)
352 {
353         int wqe_size;
354         int wq_size;
355
356         /* Sanity check RQ size before proceeding */
357         if (cap->max_recv_wr > (1 << MLX5_CAP_GEN(dev->mdev, log_max_qp_sz)))
358                 return -EINVAL;
359
360         if (!has_rq) {
361                 qp->rq.max_gs = 0;
362                 qp->rq.wqe_cnt = 0;
363                 qp->rq.wqe_shift = 0;
364                 cap->max_recv_wr = 0;
365                 cap->max_recv_sge = 0;
366         } else {
367                 int wq_sig = !!(qp->flags_en & MLX5_QP_FLAG_SIGNATURE);
368
369                 if (ucmd) {
370                         qp->rq.wqe_cnt = ucmd->rq_wqe_count;
371                         if (ucmd->rq_wqe_shift > BITS_PER_BYTE * sizeof(ucmd->rq_wqe_shift))
372                                 return -EINVAL;
373                         qp->rq.wqe_shift = ucmd->rq_wqe_shift;
374                         if ((1 << qp->rq.wqe_shift) /
375                                     sizeof(struct mlx5_wqe_data_seg) <
376                             wq_sig)
377                                 return -EINVAL;
378                         qp->rq.max_gs =
379                                 (1 << qp->rq.wqe_shift) /
380                                         sizeof(struct mlx5_wqe_data_seg) -
381                                 wq_sig;
382                         qp->rq.max_post = qp->rq.wqe_cnt;
383                 } else {
384                         wqe_size =
385                                 wq_sig ? sizeof(struct mlx5_wqe_signature_seg) :
386                                          0;
387                         wqe_size += cap->max_recv_sge * sizeof(struct mlx5_wqe_data_seg);
388                         wqe_size = roundup_pow_of_two(wqe_size);
389                         wq_size = roundup_pow_of_two(cap->max_recv_wr) * wqe_size;
390                         wq_size = max_t(int, wq_size, MLX5_SEND_WQE_BB);
391                         qp->rq.wqe_cnt = wq_size / wqe_size;
392                         if (wqe_size > MLX5_CAP_GEN(dev->mdev, max_wqe_sz_rq)) {
393                                 mlx5_ib_dbg(dev, "wqe_size %d, max %d\n",
394                                             wqe_size,
395                                             MLX5_CAP_GEN(dev->mdev,
396                                                          max_wqe_sz_rq));
397                                 return -EINVAL;
398                         }
399                         qp->rq.wqe_shift = ilog2(wqe_size);
400                         qp->rq.max_gs =
401                                 (1 << qp->rq.wqe_shift) /
402                                         sizeof(struct mlx5_wqe_data_seg) -
403                                 wq_sig;
404                         qp->rq.max_post = qp->rq.wqe_cnt;
405                 }
406         }
407
408         return 0;
409 }
410
411 static int sq_overhead(struct ib_qp_init_attr *attr)
412 {
413         int size = 0;
414
415         switch (attr->qp_type) {
416         case IB_QPT_XRC_INI:
417                 size += sizeof(struct mlx5_wqe_xrc_seg);
418                 /* fall through */
419         case IB_QPT_RC:
420                 size += sizeof(struct mlx5_wqe_ctrl_seg) +
421                         max(sizeof(struct mlx5_wqe_atomic_seg) +
422                             sizeof(struct mlx5_wqe_raddr_seg),
423                             sizeof(struct mlx5_wqe_umr_ctrl_seg) +
424                             sizeof(struct mlx5_mkey_seg) +
425                             MLX5_IB_SQ_UMR_INLINE_THRESHOLD /
426                             MLX5_IB_UMR_OCTOWORD);
427                 break;
428
429         case IB_QPT_XRC_TGT:
430                 return 0;
431
432         case IB_QPT_UC:
433                 size += sizeof(struct mlx5_wqe_ctrl_seg) +
434                         max(sizeof(struct mlx5_wqe_raddr_seg),
435                             sizeof(struct mlx5_wqe_umr_ctrl_seg) +
436                             sizeof(struct mlx5_mkey_seg));
437                 break;
438
439         case IB_QPT_UD:
440                 if (attr->create_flags & IB_QP_CREATE_IPOIB_UD_LSO)
441                         size += sizeof(struct mlx5_wqe_eth_pad) +
442                                 sizeof(struct mlx5_wqe_eth_seg);
443                 /* fall through */
444         case IB_QPT_SMI:
445         case MLX5_IB_QPT_HW_GSI:
446                 size += sizeof(struct mlx5_wqe_ctrl_seg) +
447                         sizeof(struct mlx5_wqe_datagram_seg);
448                 break;
449
450         case MLX5_IB_QPT_REG_UMR:
451                 size += sizeof(struct mlx5_wqe_ctrl_seg) +
452                         sizeof(struct mlx5_wqe_umr_ctrl_seg) +
453                         sizeof(struct mlx5_mkey_seg);
454                 break;
455
456         default:
457                 return -EINVAL;
458         }
459
460         return size;
461 }
462
463 static int calc_send_wqe(struct ib_qp_init_attr *attr)
464 {
465         int inl_size = 0;
466         int size;
467
468         size = sq_overhead(attr);
469         if (size < 0)
470                 return size;
471
472         if (attr->cap.max_inline_data) {
473                 inl_size = size + sizeof(struct mlx5_wqe_inline_seg) +
474                         attr->cap.max_inline_data;
475         }
476
477         size += attr->cap.max_send_sge * sizeof(struct mlx5_wqe_data_seg);
478         if (attr->create_flags & IB_QP_CREATE_INTEGRITY_EN &&
479             ALIGN(max_t(int, inl_size, size), MLX5_SEND_WQE_BB) < MLX5_SIG_WQE_SIZE)
480                 return MLX5_SIG_WQE_SIZE;
481         else
482                 return ALIGN(max_t(int, inl_size, size), MLX5_SEND_WQE_BB);
483 }
484
485 static int get_send_sge(struct ib_qp_init_attr *attr, int wqe_size)
486 {
487         int max_sge;
488
489         if (attr->qp_type == IB_QPT_RC)
490                 max_sge = (min_t(int, wqe_size, 512) -
491                            sizeof(struct mlx5_wqe_ctrl_seg) -
492                            sizeof(struct mlx5_wqe_raddr_seg)) /
493                         sizeof(struct mlx5_wqe_data_seg);
494         else if (attr->qp_type == IB_QPT_XRC_INI)
495                 max_sge = (min_t(int, wqe_size, 512) -
496                            sizeof(struct mlx5_wqe_ctrl_seg) -
497                            sizeof(struct mlx5_wqe_xrc_seg) -
498                            sizeof(struct mlx5_wqe_raddr_seg)) /
499                         sizeof(struct mlx5_wqe_data_seg);
500         else
501                 max_sge = (wqe_size - sq_overhead(attr)) /
502                         sizeof(struct mlx5_wqe_data_seg);
503
504         return min_t(int, max_sge, wqe_size - sq_overhead(attr) /
505                      sizeof(struct mlx5_wqe_data_seg));
506 }
507
508 static int calc_sq_size(struct mlx5_ib_dev *dev, struct ib_qp_init_attr *attr,
509                         struct mlx5_ib_qp *qp)
510 {
511         int wqe_size;
512         int wq_size;
513
514         if (!attr->cap.max_send_wr)
515                 return 0;
516
517         wqe_size = calc_send_wqe(attr);
518         mlx5_ib_dbg(dev, "wqe_size %d\n", wqe_size);
519         if (wqe_size < 0)
520                 return wqe_size;
521
522         if (wqe_size > MLX5_CAP_GEN(dev->mdev, max_wqe_sz_sq)) {
523                 mlx5_ib_dbg(dev, "wqe_size(%d) > max_sq_desc_sz(%d)\n",
524                             wqe_size, MLX5_CAP_GEN(dev->mdev, max_wqe_sz_sq));
525                 return -EINVAL;
526         }
527
528         qp->max_inline_data = wqe_size - sq_overhead(attr) -
529                               sizeof(struct mlx5_wqe_inline_seg);
530         attr->cap.max_inline_data = qp->max_inline_data;
531
532         wq_size = roundup_pow_of_two(attr->cap.max_send_wr * wqe_size);
533         qp->sq.wqe_cnt = wq_size / MLX5_SEND_WQE_BB;
534         if (qp->sq.wqe_cnt > (1 << MLX5_CAP_GEN(dev->mdev, log_max_qp_sz))) {
535                 mlx5_ib_dbg(dev, "send queue size (%d * %d / %d -> %d) exceeds limits(%d)\n",
536                             attr->cap.max_send_wr, wqe_size, MLX5_SEND_WQE_BB,
537                             qp->sq.wqe_cnt,
538                             1 << MLX5_CAP_GEN(dev->mdev, log_max_qp_sz));
539                 return -ENOMEM;
540         }
541         qp->sq.wqe_shift = ilog2(MLX5_SEND_WQE_BB);
542         qp->sq.max_gs = get_send_sge(attr, wqe_size);
543         if (qp->sq.max_gs < attr->cap.max_send_sge)
544                 return -ENOMEM;
545
546         attr->cap.max_send_sge = qp->sq.max_gs;
547         qp->sq.max_post = wq_size / wqe_size;
548         attr->cap.max_send_wr = qp->sq.max_post;
549
550         return wq_size;
551 }
552
553 static int set_user_buf_size(struct mlx5_ib_dev *dev,
554                             struct mlx5_ib_qp *qp,
555                             struct mlx5_ib_create_qp *ucmd,
556                             struct mlx5_ib_qp_base *base,
557                             struct ib_qp_init_attr *attr)
558 {
559         int desc_sz = 1 << qp->sq.wqe_shift;
560
561         if (desc_sz > MLX5_CAP_GEN(dev->mdev, max_wqe_sz_sq)) {
562                 mlx5_ib_warn(dev, "desc_sz %d, max_sq_desc_sz %d\n",
563                              desc_sz, MLX5_CAP_GEN(dev->mdev, max_wqe_sz_sq));
564                 return -EINVAL;
565         }
566
567         if (ucmd->sq_wqe_count && !is_power_of_2(ucmd->sq_wqe_count)) {
568                 mlx5_ib_warn(dev, "sq_wqe_count %d is not a power of two\n",
569                              ucmd->sq_wqe_count);
570                 return -EINVAL;
571         }
572
573         qp->sq.wqe_cnt = ucmd->sq_wqe_count;
574
575         if (qp->sq.wqe_cnt > (1 << MLX5_CAP_GEN(dev->mdev, log_max_qp_sz))) {
576                 mlx5_ib_warn(dev, "wqe_cnt %d, max_wqes %d\n",
577                              qp->sq.wqe_cnt,
578                              1 << MLX5_CAP_GEN(dev->mdev, log_max_qp_sz));
579                 return -EINVAL;
580         }
581
582         if (attr->qp_type == IB_QPT_RAW_PACKET ||
583             qp->flags & IB_QP_CREATE_SOURCE_QPN) {
584                 base->ubuffer.buf_size = qp->rq.wqe_cnt << qp->rq.wqe_shift;
585                 qp->raw_packet_qp.sq.ubuffer.buf_size = qp->sq.wqe_cnt << 6;
586         } else {
587                 base->ubuffer.buf_size = (qp->rq.wqe_cnt << qp->rq.wqe_shift) +
588                                          (qp->sq.wqe_cnt << 6);
589         }
590
591         return 0;
592 }
593
594 static int qp_has_rq(struct ib_qp_init_attr *attr)
595 {
596         if (attr->qp_type == IB_QPT_XRC_INI ||
597             attr->qp_type == IB_QPT_XRC_TGT || attr->srq ||
598             attr->qp_type == MLX5_IB_QPT_REG_UMR ||
599             !attr->cap.max_recv_wr)
600                 return 0;
601
602         return 1;
603 }
604
605 enum {
606         /* this is the first blue flame register in the array of bfregs assigned
607          * to a processes. Since we do not use it for blue flame but rather
608          * regular 64 bit doorbells, we do not need a lock for maintaiing
609          * "odd/even" order
610          */
611         NUM_NON_BLUE_FLAME_BFREGS = 1,
612 };
613
614 static int max_bfregs(struct mlx5_ib_dev *dev, struct mlx5_bfreg_info *bfregi)
615 {
616         return get_num_static_uars(dev, bfregi) * MLX5_NON_FP_BFREGS_PER_UAR;
617 }
618
619 static int num_med_bfreg(struct mlx5_ib_dev *dev,
620                          struct mlx5_bfreg_info *bfregi)
621 {
622         int n;
623
624         n = max_bfregs(dev, bfregi) - bfregi->num_low_latency_bfregs -
625             NUM_NON_BLUE_FLAME_BFREGS;
626
627         return n >= 0 ? n : 0;
628 }
629
630 static int first_med_bfreg(struct mlx5_ib_dev *dev,
631                            struct mlx5_bfreg_info *bfregi)
632 {
633         return num_med_bfreg(dev, bfregi) ? 1 : -ENOMEM;
634 }
635
636 static int first_hi_bfreg(struct mlx5_ib_dev *dev,
637                           struct mlx5_bfreg_info *bfregi)
638 {
639         int med;
640
641         med = num_med_bfreg(dev, bfregi);
642         return ++med;
643 }
644
645 static int alloc_high_class_bfreg(struct mlx5_ib_dev *dev,
646                                   struct mlx5_bfreg_info *bfregi)
647 {
648         int i;
649
650         for (i = first_hi_bfreg(dev, bfregi); i < max_bfregs(dev, bfregi); i++) {
651                 if (!bfregi->count[i]) {
652                         bfregi->count[i]++;
653                         return i;
654                 }
655         }
656
657         return -ENOMEM;
658 }
659
660 static int alloc_med_class_bfreg(struct mlx5_ib_dev *dev,
661                                  struct mlx5_bfreg_info *bfregi)
662 {
663         int minidx = first_med_bfreg(dev, bfregi);
664         int i;
665
666         if (minidx < 0)
667                 return minidx;
668
669         for (i = minidx; i < first_hi_bfreg(dev, bfregi); i++) {
670                 if (bfregi->count[i] < bfregi->count[minidx])
671                         minidx = i;
672                 if (!bfregi->count[minidx])
673                         break;
674         }
675
676         bfregi->count[minidx]++;
677         return minidx;
678 }
679
680 static int alloc_bfreg(struct mlx5_ib_dev *dev,
681                        struct mlx5_bfreg_info *bfregi)
682 {
683         int bfregn = -ENOMEM;
684
685         if (bfregi->lib_uar_dyn)
686                 return -EINVAL;
687
688         mutex_lock(&bfregi->lock);
689         if (bfregi->ver >= 2) {
690                 bfregn = alloc_high_class_bfreg(dev, bfregi);
691                 if (bfregn < 0)
692                         bfregn = alloc_med_class_bfreg(dev, bfregi);
693         }
694
695         if (bfregn < 0) {
696                 BUILD_BUG_ON(NUM_NON_BLUE_FLAME_BFREGS != 1);
697                 bfregn = 0;
698                 bfregi->count[bfregn]++;
699         }
700         mutex_unlock(&bfregi->lock);
701
702         return bfregn;
703 }
704
705 void mlx5_ib_free_bfreg(struct mlx5_ib_dev *dev, struct mlx5_bfreg_info *bfregi, int bfregn)
706 {
707         mutex_lock(&bfregi->lock);
708         bfregi->count[bfregn]--;
709         mutex_unlock(&bfregi->lock);
710 }
711
712 static enum mlx5_qp_state to_mlx5_state(enum ib_qp_state state)
713 {
714         switch (state) {
715         case IB_QPS_RESET:      return MLX5_QP_STATE_RST;
716         case IB_QPS_INIT:       return MLX5_QP_STATE_INIT;
717         case IB_QPS_RTR:        return MLX5_QP_STATE_RTR;
718         case IB_QPS_RTS:        return MLX5_QP_STATE_RTS;
719         case IB_QPS_SQD:        return MLX5_QP_STATE_SQD;
720         case IB_QPS_SQE:        return MLX5_QP_STATE_SQER;
721         case IB_QPS_ERR:        return MLX5_QP_STATE_ERR;
722         default:                return -1;
723         }
724 }
725
726 static int to_mlx5_st(enum ib_qp_type type)
727 {
728         switch (type) {
729         case IB_QPT_RC:                 return MLX5_QP_ST_RC;
730         case IB_QPT_UC:                 return MLX5_QP_ST_UC;
731         case IB_QPT_UD:                 return MLX5_QP_ST_UD;
732         case MLX5_IB_QPT_REG_UMR:       return MLX5_QP_ST_REG_UMR;
733         case IB_QPT_XRC_INI:
734         case IB_QPT_XRC_TGT:            return MLX5_QP_ST_XRC;
735         case IB_QPT_SMI:                return MLX5_QP_ST_QP0;
736         case MLX5_IB_QPT_HW_GSI:        return MLX5_QP_ST_QP1;
737         case MLX5_IB_QPT_DCI:           return MLX5_QP_ST_DCI;
738         case IB_QPT_RAW_PACKET:         return MLX5_QP_ST_RAW_ETHERTYPE;
739         default:                return -EINVAL;
740         }
741 }
742
743 static void mlx5_ib_lock_cqs(struct mlx5_ib_cq *send_cq,
744                              struct mlx5_ib_cq *recv_cq);
745 static void mlx5_ib_unlock_cqs(struct mlx5_ib_cq *send_cq,
746                                struct mlx5_ib_cq *recv_cq);
747
748 int bfregn_to_uar_index(struct mlx5_ib_dev *dev,
749                         struct mlx5_bfreg_info *bfregi, u32 bfregn,
750                         bool dyn_bfreg)
751 {
752         unsigned int bfregs_per_sys_page;
753         u32 index_of_sys_page;
754         u32 offset;
755
756         if (bfregi->lib_uar_dyn)
757                 return -EINVAL;
758
759         bfregs_per_sys_page = get_uars_per_sys_page(dev, bfregi->lib_uar_4k) *
760                                 MLX5_NON_FP_BFREGS_PER_UAR;
761         index_of_sys_page = bfregn / bfregs_per_sys_page;
762
763         if (dyn_bfreg) {
764                 index_of_sys_page += bfregi->num_static_sys_pages;
765
766                 if (index_of_sys_page >= bfregi->num_sys_pages)
767                         return -EINVAL;
768
769                 if (bfregn > bfregi->num_dyn_bfregs ||
770                     bfregi->sys_pages[index_of_sys_page] == MLX5_IB_INVALID_UAR_INDEX) {
771                         mlx5_ib_dbg(dev, "Invalid dynamic uar index\n");
772                         return -EINVAL;
773                 }
774         }
775
776         offset = bfregn % bfregs_per_sys_page / MLX5_NON_FP_BFREGS_PER_UAR;
777         return bfregi->sys_pages[index_of_sys_page] + offset;
778 }
779
780 static int mlx5_ib_umem_get(struct mlx5_ib_dev *dev, struct ib_udata *udata,
781                             unsigned long addr, size_t size,
782                             struct ib_umem **umem, int *npages, int *page_shift,
783                             int *ncont, u32 *offset)
784 {
785         int err;
786
787         *umem = ib_umem_get(&dev->ib_dev, addr, size, 0);
788         if (IS_ERR(*umem)) {
789                 mlx5_ib_dbg(dev, "umem_get failed\n");
790                 return PTR_ERR(*umem);
791         }
792
793         mlx5_ib_cont_pages(*umem, addr, 0, npages, page_shift, ncont, NULL);
794
795         err = mlx5_ib_get_buf_offset(addr, *page_shift, offset);
796         if (err) {
797                 mlx5_ib_warn(dev, "bad offset\n");
798                 goto err_umem;
799         }
800
801         mlx5_ib_dbg(dev, "addr 0x%lx, size %zu, npages %d, page_shift %d, ncont %d, offset %d\n",
802                     addr, size, *npages, *page_shift, *ncont, *offset);
803
804         return 0;
805
806 err_umem:
807         ib_umem_release(*umem);
808         *umem = NULL;
809
810         return err;
811 }
812
813 static void destroy_user_rq(struct mlx5_ib_dev *dev, struct ib_pd *pd,
814                             struct mlx5_ib_rwq *rwq, struct ib_udata *udata)
815 {
816         struct mlx5_ib_ucontext *context =
817                 rdma_udata_to_drv_context(
818                         udata,
819                         struct mlx5_ib_ucontext,
820                         ibucontext);
821
822         if (rwq->create_flags & MLX5_IB_WQ_FLAGS_DELAY_DROP)
823                 atomic_dec(&dev->delay_drop.rqs_cnt);
824
825         mlx5_ib_db_unmap_user(context, &rwq->db);
826         ib_umem_release(rwq->umem);
827 }
828
829 static int create_user_rq(struct mlx5_ib_dev *dev, struct ib_pd *pd,
830                           struct ib_udata *udata, struct mlx5_ib_rwq *rwq,
831                           struct mlx5_ib_create_wq *ucmd)
832 {
833         struct mlx5_ib_ucontext *ucontext = rdma_udata_to_drv_context(
834                 udata, struct mlx5_ib_ucontext, ibucontext);
835         int page_shift = 0;
836         int npages;
837         u32 offset = 0;
838         int ncont = 0;
839         int err;
840
841         if (!ucmd->buf_addr)
842                 return -EINVAL;
843
844         rwq->umem = ib_umem_get(&dev->ib_dev, ucmd->buf_addr, rwq->buf_size, 0);
845         if (IS_ERR(rwq->umem)) {
846                 mlx5_ib_dbg(dev, "umem_get failed\n");
847                 err = PTR_ERR(rwq->umem);
848                 return err;
849         }
850
851         mlx5_ib_cont_pages(rwq->umem, ucmd->buf_addr, 0, &npages, &page_shift,
852                            &ncont, NULL);
853         err = mlx5_ib_get_buf_offset(ucmd->buf_addr, page_shift,
854                                      &rwq->rq_page_offset);
855         if (err) {
856                 mlx5_ib_warn(dev, "bad offset\n");
857                 goto err_umem;
858         }
859
860         rwq->rq_num_pas = ncont;
861         rwq->page_shift = page_shift;
862         rwq->log_page_size =  page_shift - MLX5_ADAPTER_PAGE_SHIFT;
863         rwq->wq_sig = !!(ucmd->flags & MLX5_WQ_FLAG_SIGNATURE);
864
865         mlx5_ib_dbg(dev, "addr 0x%llx, size %zd, npages %d, page_shift %d, ncont %d, offset %d\n",
866                     (unsigned long long)ucmd->buf_addr, rwq->buf_size,
867                     npages, page_shift, ncont, offset);
868
869         err = mlx5_ib_db_map_user(ucontext, udata, ucmd->db_addr, &rwq->db);
870         if (err) {
871                 mlx5_ib_dbg(dev, "map failed\n");
872                 goto err_umem;
873         }
874
875         return 0;
876
877 err_umem:
878         ib_umem_release(rwq->umem);
879         return err;
880 }
881
882 static int adjust_bfregn(struct mlx5_ib_dev *dev,
883                          struct mlx5_bfreg_info *bfregi, int bfregn)
884 {
885         return bfregn / MLX5_NON_FP_BFREGS_PER_UAR * MLX5_BFREGS_PER_UAR +
886                                 bfregn % MLX5_NON_FP_BFREGS_PER_UAR;
887 }
888
889 static int _create_user_qp(struct mlx5_ib_dev *dev, struct ib_pd *pd,
890                            struct mlx5_ib_qp *qp, struct ib_udata *udata,
891                            struct ib_qp_init_attr *attr, u32 **in,
892                            struct mlx5_ib_create_qp_resp *resp, int *inlen,
893                            struct mlx5_ib_qp_base *base,
894                            struct mlx5_ib_create_qp *ucmd)
895 {
896         struct mlx5_ib_ucontext *context;
897         struct mlx5_ib_ubuffer *ubuffer = &base->ubuffer;
898         int page_shift = 0;
899         int uar_index = 0;
900         int npages;
901         u32 offset = 0;
902         int bfregn;
903         int ncont = 0;
904         __be64 *pas;
905         void *qpc;
906         int err;
907         u16 uid;
908         u32 uar_flags;
909
910         context = rdma_udata_to_drv_context(udata, struct mlx5_ib_ucontext,
911                                             ibucontext);
912         uar_flags = qp->flags_en &
913                     (MLX5_QP_FLAG_UAR_PAGE_INDEX | MLX5_QP_FLAG_BFREG_INDEX);
914         switch (uar_flags) {
915         case MLX5_QP_FLAG_UAR_PAGE_INDEX:
916                 uar_index = ucmd->bfreg_index;
917                 bfregn = MLX5_IB_INVALID_BFREG;
918                 break;
919         case MLX5_QP_FLAG_BFREG_INDEX:
920                 uar_index = bfregn_to_uar_index(dev, &context->bfregi,
921                                                 ucmd->bfreg_index, true);
922                 if (uar_index < 0)
923                         return uar_index;
924                 bfregn = MLX5_IB_INVALID_BFREG;
925                 break;
926         case 0:
927                 if (qp->flags & IB_QP_CREATE_CROSS_CHANNEL)
928                         return -EINVAL;
929                 bfregn = alloc_bfreg(dev, &context->bfregi);
930                 if (bfregn < 0)
931                         return bfregn;
932                 break;
933         default:
934                 return -EINVAL;
935         }
936
937         mlx5_ib_dbg(dev, "bfregn 0x%x, uar_index 0x%x\n", bfregn, uar_index);
938         if (bfregn != MLX5_IB_INVALID_BFREG)
939                 uar_index = bfregn_to_uar_index(dev, &context->bfregi, bfregn,
940                                                 false);
941
942         qp->rq.offset = 0;
943         qp->sq.wqe_shift = ilog2(MLX5_SEND_WQE_BB);
944         qp->sq.offset = qp->rq.wqe_cnt << qp->rq.wqe_shift;
945
946         err = set_user_buf_size(dev, qp, ucmd, base, attr);
947         if (err)
948                 goto err_bfreg;
949
950         if (ucmd->buf_addr && ubuffer->buf_size) {
951                 ubuffer->buf_addr = ucmd->buf_addr;
952                 err = mlx5_ib_umem_get(dev, udata, ubuffer->buf_addr,
953                                        ubuffer->buf_size, &ubuffer->umem,
954                                        &npages, &page_shift, &ncont, &offset);
955                 if (err)
956                         goto err_bfreg;
957         } else {
958                 ubuffer->umem = NULL;
959         }
960
961         *inlen = MLX5_ST_SZ_BYTES(create_qp_in) +
962                  MLX5_FLD_SZ_BYTES(create_qp_in, pas[0]) * ncont;
963         *in = kvzalloc(*inlen, GFP_KERNEL);
964         if (!*in) {
965                 err = -ENOMEM;
966                 goto err_umem;
967         }
968
969         uid = (attr->qp_type != IB_QPT_XRC_INI) ? to_mpd(pd)->uid : 0;
970         MLX5_SET(create_qp_in, *in, uid, uid);
971         pas = (__be64 *)MLX5_ADDR_OF(create_qp_in, *in, pas);
972         if (ubuffer->umem)
973                 mlx5_ib_populate_pas(dev, ubuffer->umem, page_shift, pas, 0);
974
975         qpc = MLX5_ADDR_OF(create_qp_in, *in, qpc);
976
977         MLX5_SET(qpc, qpc, log_page_size, page_shift - MLX5_ADAPTER_PAGE_SHIFT);
978         MLX5_SET(qpc, qpc, page_offset, offset);
979
980         MLX5_SET(qpc, qpc, uar_page, uar_index);
981         if (bfregn != MLX5_IB_INVALID_BFREG)
982                 resp->bfreg_index = adjust_bfregn(dev, &context->bfregi, bfregn);
983         else
984                 resp->bfreg_index = MLX5_IB_INVALID_BFREG;
985         qp->bfregn = bfregn;
986
987         err = mlx5_ib_db_map_user(context, udata, ucmd->db_addr, &qp->db);
988         if (err) {
989                 mlx5_ib_dbg(dev, "map failed\n");
990                 goto err_free;
991         }
992
993         return 0;
994
995 err_free:
996         kvfree(*in);
997
998 err_umem:
999         ib_umem_release(ubuffer->umem);
1000
1001 err_bfreg:
1002         if (bfregn != MLX5_IB_INVALID_BFREG)
1003                 mlx5_ib_free_bfreg(dev, &context->bfregi, bfregn);
1004         return err;
1005 }
1006
1007 static void destroy_qp(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp,
1008                        struct mlx5_ib_qp_base *base, struct ib_udata *udata)
1009 {
1010         struct mlx5_ib_ucontext *context = rdma_udata_to_drv_context(
1011                 udata, struct mlx5_ib_ucontext, ibucontext);
1012
1013         if (udata) {
1014                 /* User QP */
1015                 mlx5_ib_db_unmap_user(context, &qp->db);
1016                 ib_umem_release(base->ubuffer.umem);
1017
1018                 /*
1019                  * Free only the BFREGs which are handled by the kernel.
1020                  * BFREGs of UARs allocated dynamically are handled by user.
1021                  */
1022                 if (qp->bfregn != MLX5_IB_INVALID_BFREG)
1023                         mlx5_ib_free_bfreg(dev, &context->bfregi, qp->bfregn);
1024                 return;
1025         }
1026
1027         /* Kernel QP */
1028         kvfree(qp->sq.wqe_head);
1029         kvfree(qp->sq.w_list);
1030         kvfree(qp->sq.wrid);
1031         kvfree(qp->sq.wr_data);
1032         kvfree(qp->rq.wrid);
1033         if (qp->db.db)
1034                 mlx5_db_free(dev->mdev, &qp->db);
1035         if (qp->buf.frags)
1036                 mlx5_frag_buf_free(dev->mdev, &qp->buf);
1037 }
1038
1039 static int _create_kernel_qp(struct mlx5_ib_dev *dev,
1040                              struct ib_qp_init_attr *init_attr,
1041                              struct mlx5_ib_qp *qp, u32 **in, int *inlen,
1042                              struct mlx5_ib_qp_base *base)
1043 {
1044         int uar_index;
1045         void *qpc;
1046         int err;
1047
1048         if (init_attr->qp_type == MLX5_IB_QPT_REG_UMR)
1049                 qp->bf.bfreg = &dev->fp_bfreg;
1050         else if (qp->flags & MLX5_IB_QP_CREATE_WC_TEST)
1051                 qp->bf.bfreg = &dev->wc_bfreg;
1052         else
1053                 qp->bf.bfreg = &dev->bfreg;
1054
1055         /* We need to divide by two since each register is comprised of
1056          * two buffers of identical size, namely odd and even
1057          */
1058         qp->bf.buf_size = (1 << MLX5_CAP_GEN(dev->mdev, log_bf_reg_size)) / 2;
1059         uar_index = qp->bf.bfreg->index;
1060
1061         err = calc_sq_size(dev, init_attr, qp);
1062         if (err < 0) {
1063                 mlx5_ib_dbg(dev, "err %d\n", err);
1064                 return err;
1065         }
1066
1067         qp->rq.offset = 0;
1068         qp->sq.offset = qp->rq.wqe_cnt << qp->rq.wqe_shift;
1069         base->ubuffer.buf_size = err + (qp->rq.wqe_cnt << qp->rq.wqe_shift);
1070
1071         err = mlx5_frag_buf_alloc_node(dev->mdev, base->ubuffer.buf_size,
1072                                        &qp->buf, dev->mdev->priv.numa_node);
1073         if (err) {
1074                 mlx5_ib_dbg(dev, "err %d\n", err);
1075                 return err;
1076         }
1077
1078         if (qp->rq.wqe_cnt)
1079                 mlx5_init_fbc(qp->buf.frags, qp->rq.wqe_shift,
1080                               ilog2(qp->rq.wqe_cnt), &qp->rq.fbc);
1081
1082         if (qp->sq.wqe_cnt) {
1083                 int sq_strides_offset = (qp->sq.offset  & (PAGE_SIZE - 1)) /
1084                                         MLX5_SEND_WQE_BB;
1085                 mlx5_init_fbc_offset(qp->buf.frags +
1086                                      (qp->sq.offset / PAGE_SIZE),
1087                                      ilog2(MLX5_SEND_WQE_BB),
1088                                      ilog2(qp->sq.wqe_cnt),
1089                                      sq_strides_offset, &qp->sq.fbc);
1090
1091                 qp->sq.cur_edge = get_sq_edge(&qp->sq, 0);
1092         }
1093
1094         *inlen = MLX5_ST_SZ_BYTES(create_qp_in) +
1095                  MLX5_FLD_SZ_BYTES(create_qp_in, pas[0]) * qp->buf.npages;
1096         *in = kvzalloc(*inlen, GFP_KERNEL);
1097         if (!*in) {
1098                 err = -ENOMEM;
1099                 goto err_buf;
1100         }
1101
1102         qpc = MLX5_ADDR_OF(create_qp_in, *in, qpc);
1103         MLX5_SET(qpc, qpc, uar_page, uar_index);
1104         MLX5_SET(qpc, qpc, log_page_size, qp->buf.page_shift - MLX5_ADAPTER_PAGE_SHIFT);
1105
1106         /* Set "fast registration enabled" for all kernel QPs */
1107         MLX5_SET(qpc, qpc, fre, 1);
1108         MLX5_SET(qpc, qpc, rlky, 1);
1109
1110         if (qp->flags & MLX5_IB_QP_CREATE_SQPN_QP1)
1111                 MLX5_SET(qpc, qpc, deth_sqpn, 1);
1112
1113         mlx5_fill_page_frag_array(&qp->buf,
1114                                   (__be64 *)MLX5_ADDR_OF(create_qp_in,
1115                                                          *in, pas));
1116
1117         err = mlx5_db_alloc(dev->mdev, &qp->db);
1118         if (err) {
1119                 mlx5_ib_dbg(dev, "err %d\n", err);
1120                 goto err_free;
1121         }
1122
1123         qp->sq.wrid = kvmalloc_array(qp->sq.wqe_cnt,
1124                                      sizeof(*qp->sq.wrid), GFP_KERNEL);
1125         qp->sq.wr_data = kvmalloc_array(qp->sq.wqe_cnt,
1126                                         sizeof(*qp->sq.wr_data), GFP_KERNEL);
1127         qp->rq.wrid = kvmalloc_array(qp->rq.wqe_cnt,
1128                                      sizeof(*qp->rq.wrid), GFP_KERNEL);
1129         qp->sq.w_list = kvmalloc_array(qp->sq.wqe_cnt,
1130                                        sizeof(*qp->sq.w_list), GFP_KERNEL);
1131         qp->sq.wqe_head = kvmalloc_array(qp->sq.wqe_cnt,
1132                                          sizeof(*qp->sq.wqe_head), GFP_KERNEL);
1133
1134         if (!qp->sq.wrid || !qp->sq.wr_data || !qp->rq.wrid ||
1135             !qp->sq.w_list || !qp->sq.wqe_head) {
1136                 err = -ENOMEM;
1137                 goto err_wrid;
1138         }
1139
1140         return 0;
1141
1142 err_wrid:
1143         kvfree(qp->sq.wqe_head);
1144         kvfree(qp->sq.w_list);
1145         kvfree(qp->sq.wrid);
1146         kvfree(qp->sq.wr_data);
1147         kvfree(qp->rq.wrid);
1148         mlx5_db_free(dev->mdev, &qp->db);
1149
1150 err_free:
1151         kvfree(*in);
1152
1153 err_buf:
1154         mlx5_frag_buf_free(dev->mdev, &qp->buf);
1155         return err;
1156 }
1157
1158 static u32 get_rx_type(struct mlx5_ib_qp *qp, struct ib_qp_init_attr *attr)
1159 {
1160         if (attr->srq || (qp->type == IB_QPT_XRC_TGT) ||
1161             (qp->type == MLX5_IB_QPT_DCI) || (qp->type == IB_QPT_XRC_INI))
1162                 return MLX5_SRQ_RQ;
1163         else if (!qp->has_rq)
1164                 return MLX5_ZERO_LEN_RQ;
1165
1166         return MLX5_NON_ZERO_RQ;
1167 }
1168
1169 static int create_raw_packet_qp_tis(struct mlx5_ib_dev *dev,
1170                                     struct mlx5_ib_qp *qp,
1171                                     struct mlx5_ib_sq *sq, u32 tdn,
1172                                     struct ib_pd *pd)
1173 {
1174         u32 in[MLX5_ST_SZ_DW(create_tis_in)] = {};
1175         void *tisc = MLX5_ADDR_OF(create_tis_in, in, ctx);
1176
1177         MLX5_SET(create_tis_in, in, uid, to_mpd(pd)->uid);
1178         MLX5_SET(tisc, tisc, transport_domain, tdn);
1179         if (qp->flags & IB_QP_CREATE_SOURCE_QPN)
1180                 MLX5_SET(tisc, tisc, underlay_qpn, qp->underlay_qpn);
1181
1182         return mlx5_core_create_tis(dev->mdev, in, &sq->tisn);
1183 }
1184
1185 static void destroy_raw_packet_qp_tis(struct mlx5_ib_dev *dev,
1186                                       struct mlx5_ib_sq *sq, struct ib_pd *pd)
1187 {
1188         mlx5_cmd_destroy_tis(dev->mdev, sq->tisn, to_mpd(pd)->uid);
1189 }
1190
1191 static void destroy_flow_rule_vport_sq(struct mlx5_ib_sq *sq)
1192 {
1193         if (sq->flow_rule)
1194                 mlx5_del_flow_rules(sq->flow_rule);
1195         sq->flow_rule = NULL;
1196 }
1197
1198 static int create_raw_packet_qp_sq(struct mlx5_ib_dev *dev,
1199                                    struct ib_udata *udata,
1200                                    struct mlx5_ib_sq *sq, void *qpin,
1201                                    struct ib_pd *pd)
1202 {
1203         struct mlx5_ib_ubuffer *ubuffer = &sq->ubuffer;
1204         __be64 *pas;
1205         void *in;
1206         void *sqc;
1207         void *qpc = MLX5_ADDR_OF(create_qp_in, qpin, qpc);
1208         void *wq;
1209         int inlen;
1210         int err;
1211         int page_shift = 0;
1212         int npages;
1213         int ncont = 0;
1214         u32 offset = 0;
1215
1216         err = mlx5_ib_umem_get(dev, udata, ubuffer->buf_addr, ubuffer->buf_size,
1217                                &sq->ubuffer.umem, &npages, &page_shift, &ncont,
1218                                &offset);
1219         if (err)
1220                 return err;
1221
1222         inlen = MLX5_ST_SZ_BYTES(create_sq_in) + sizeof(u64) * ncont;
1223         in = kvzalloc(inlen, GFP_KERNEL);
1224         if (!in) {
1225                 err = -ENOMEM;
1226                 goto err_umem;
1227         }
1228
1229         MLX5_SET(create_sq_in, in, uid, to_mpd(pd)->uid);
1230         sqc = MLX5_ADDR_OF(create_sq_in, in, ctx);
1231         MLX5_SET(sqc, sqc, flush_in_error_en, 1);
1232         if (MLX5_CAP_ETH(dev->mdev, multi_pkt_send_wqe))
1233                 MLX5_SET(sqc, sqc, allow_multi_pkt_send_wqe, 1);
1234         MLX5_SET(sqc, sqc, state, MLX5_SQC_STATE_RST);
1235         MLX5_SET(sqc, sqc, user_index, MLX5_GET(qpc, qpc, user_index));
1236         MLX5_SET(sqc, sqc, cqn, MLX5_GET(qpc, qpc, cqn_snd));
1237         MLX5_SET(sqc, sqc, tis_lst_sz, 1);
1238         MLX5_SET(sqc, sqc, tis_num_0, sq->tisn);
1239         if (MLX5_CAP_GEN(dev->mdev, eth_net_offloads) &&
1240             MLX5_CAP_ETH(dev->mdev, swp))
1241                 MLX5_SET(sqc, sqc, allow_swp, 1);
1242
1243         wq = MLX5_ADDR_OF(sqc, sqc, wq);
1244         MLX5_SET(wq, wq, wq_type, MLX5_WQ_TYPE_CYCLIC);
1245         MLX5_SET(wq, wq, pd, MLX5_GET(qpc, qpc, pd));
1246         MLX5_SET(wq, wq, uar_page, MLX5_GET(qpc, qpc, uar_page));
1247         MLX5_SET64(wq, wq, dbr_addr, MLX5_GET64(qpc, qpc, dbr_addr));
1248         MLX5_SET(wq, wq, log_wq_stride, ilog2(MLX5_SEND_WQE_BB));
1249         MLX5_SET(wq, wq, log_wq_sz, MLX5_GET(qpc, qpc, log_sq_size));
1250         MLX5_SET(wq, wq, log_wq_pg_sz,  page_shift - MLX5_ADAPTER_PAGE_SHIFT);
1251         MLX5_SET(wq, wq, page_offset, offset);
1252
1253         pas = (__be64 *)MLX5_ADDR_OF(wq, wq, pas);
1254         mlx5_ib_populate_pas(dev, sq->ubuffer.umem, page_shift, pas, 0);
1255
1256         err = mlx5_core_create_sq_tracked(dev, in, inlen, &sq->base.mqp);
1257
1258         kvfree(in);
1259
1260         if (err)
1261                 goto err_umem;
1262
1263         return 0;
1264
1265 err_umem:
1266         ib_umem_release(sq->ubuffer.umem);
1267         sq->ubuffer.umem = NULL;
1268
1269         return err;
1270 }
1271
1272 static void destroy_raw_packet_qp_sq(struct mlx5_ib_dev *dev,
1273                                      struct mlx5_ib_sq *sq)
1274 {
1275         destroy_flow_rule_vport_sq(sq);
1276         mlx5_core_destroy_sq_tracked(dev, &sq->base.mqp);
1277         ib_umem_release(sq->ubuffer.umem);
1278 }
1279
1280 static size_t get_rq_pas_size(void *qpc)
1281 {
1282         u32 log_page_size = MLX5_GET(qpc, qpc, log_page_size) + 12;
1283         u32 log_rq_stride = MLX5_GET(qpc, qpc, log_rq_stride);
1284         u32 log_rq_size   = MLX5_GET(qpc, qpc, log_rq_size);
1285         u32 page_offset   = MLX5_GET(qpc, qpc, page_offset);
1286         u32 po_quanta     = 1 << (log_page_size - 6);
1287         u32 rq_sz         = 1 << (log_rq_size + 4 + log_rq_stride);
1288         u32 page_size     = 1 << log_page_size;
1289         u32 rq_sz_po      = rq_sz + (page_offset * po_quanta);
1290         u32 rq_num_pas    = (rq_sz_po + page_size - 1) / page_size;
1291
1292         return rq_num_pas * sizeof(u64);
1293 }
1294
1295 static int create_raw_packet_qp_rq(struct mlx5_ib_dev *dev,
1296                                    struct mlx5_ib_rq *rq, void *qpin,
1297                                    size_t qpinlen, struct ib_pd *pd)
1298 {
1299         struct mlx5_ib_qp *mqp = rq->base.container_mibqp;
1300         __be64 *pas;
1301         __be64 *qp_pas;
1302         void *in;
1303         void *rqc;
1304         void *wq;
1305         void *qpc = MLX5_ADDR_OF(create_qp_in, qpin, qpc);
1306         size_t rq_pas_size = get_rq_pas_size(qpc);
1307         size_t inlen;
1308         int err;
1309
1310         if (qpinlen < rq_pas_size + MLX5_BYTE_OFF(create_qp_in, pas))
1311                 return -EINVAL;
1312
1313         inlen = MLX5_ST_SZ_BYTES(create_rq_in) + rq_pas_size;
1314         in = kvzalloc(inlen, GFP_KERNEL);
1315         if (!in)
1316                 return -ENOMEM;
1317
1318         MLX5_SET(create_rq_in, in, uid, to_mpd(pd)->uid);
1319         rqc = MLX5_ADDR_OF(create_rq_in, in, ctx);
1320         if (!(rq->flags & MLX5_IB_RQ_CVLAN_STRIPPING))
1321                 MLX5_SET(rqc, rqc, vsd, 1);
1322         MLX5_SET(rqc, rqc, mem_rq_type, MLX5_RQC_MEM_RQ_TYPE_MEMORY_RQ_INLINE);
1323         MLX5_SET(rqc, rqc, state, MLX5_RQC_STATE_RST);
1324         MLX5_SET(rqc, rqc, flush_in_error_en, 1);
1325         MLX5_SET(rqc, rqc, user_index, MLX5_GET(qpc, qpc, user_index));
1326         MLX5_SET(rqc, rqc, cqn, MLX5_GET(qpc, qpc, cqn_rcv));
1327
1328         if (mqp->flags & IB_QP_CREATE_SCATTER_FCS)
1329                 MLX5_SET(rqc, rqc, scatter_fcs, 1);
1330
1331         wq = MLX5_ADDR_OF(rqc, rqc, wq);
1332         MLX5_SET(wq, wq, wq_type, MLX5_WQ_TYPE_CYCLIC);
1333         if (rq->flags & MLX5_IB_RQ_PCI_WRITE_END_PADDING)
1334                 MLX5_SET(wq, wq, end_padding_mode, MLX5_WQ_END_PAD_MODE_ALIGN);
1335         MLX5_SET(wq, wq, page_offset, MLX5_GET(qpc, qpc, page_offset));
1336         MLX5_SET(wq, wq, pd, MLX5_GET(qpc, qpc, pd));
1337         MLX5_SET64(wq, wq, dbr_addr, MLX5_GET64(qpc, qpc, dbr_addr));
1338         MLX5_SET(wq, wq, log_wq_stride, MLX5_GET(qpc, qpc, log_rq_stride) + 4);
1339         MLX5_SET(wq, wq, log_wq_pg_sz, MLX5_GET(qpc, qpc, log_page_size));
1340         MLX5_SET(wq, wq, log_wq_sz, MLX5_GET(qpc, qpc, log_rq_size));
1341
1342         pas = (__be64 *)MLX5_ADDR_OF(wq, wq, pas);
1343         qp_pas = (__be64 *)MLX5_ADDR_OF(create_qp_in, qpin, pas);
1344         memcpy(pas, qp_pas, rq_pas_size);
1345
1346         err = mlx5_core_create_rq_tracked(dev, in, inlen, &rq->base.mqp);
1347
1348         kvfree(in);
1349
1350         return err;
1351 }
1352
1353 static void destroy_raw_packet_qp_rq(struct mlx5_ib_dev *dev,
1354                                      struct mlx5_ib_rq *rq)
1355 {
1356         mlx5_core_destroy_rq_tracked(dev, &rq->base.mqp);
1357 }
1358
1359 static void destroy_raw_packet_qp_tir(struct mlx5_ib_dev *dev,
1360                                       struct mlx5_ib_rq *rq,
1361                                       u32 qp_flags_en,
1362                                       struct ib_pd *pd)
1363 {
1364         if (qp_flags_en & (MLX5_QP_FLAG_TIR_ALLOW_SELF_LB_UC |
1365                            MLX5_QP_FLAG_TIR_ALLOW_SELF_LB_MC))
1366                 mlx5_ib_disable_lb(dev, false, true);
1367         mlx5_cmd_destroy_tir(dev->mdev, rq->tirn, to_mpd(pd)->uid);
1368 }
1369
1370 static int create_raw_packet_qp_tir(struct mlx5_ib_dev *dev,
1371                                     struct mlx5_ib_rq *rq, u32 tdn,
1372                                     u32 *qp_flags_en, struct ib_pd *pd,
1373                                     u32 *out)
1374 {
1375         u8 lb_flag = 0;
1376         u32 *in;
1377         void *tirc;
1378         int inlen;
1379         int err;
1380
1381         inlen = MLX5_ST_SZ_BYTES(create_tir_in);
1382         in = kvzalloc(inlen, GFP_KERNEL);
1383         if (!in)
1384                 return -ENOMEM;
1385
1386         MLX5_SET(create_tir_in, in, uid, to_mpd(pd)->uid);
1387         tirc = MLX5_ADDR_OF(create_tir_in, in, ctx);
1388         MLX5_SET(tirc, tirc, disp_type, MLX5_TIRC_DISP_TYPE_DIRECT);
1389         MLX5_SET(tirc, tirc, inline_rqn, rq->base.mqp.qpn);
1390         MLX5_SET(tirc, tirc, transport_domain, tdn);
1391         if (*qp_flags_en & MLX5_QP_FLAG_TUNNEL_OFFLOADS)
1392                 MLX5_SET(tirc, tirc, tunneled_offload_en, 1);
1393
1394         if (*qp_flags_en & MLX5_QP_FLAG_TIR_ALLOW_SELF_LB_UC)
1395                 lb_flag |= MLX5_TIRC_SELF_LB_BLOCK_BLOCK_UNICAST;
1396
1397         if (*qp_flags_en & MLX5_QP_FLAG_TIR_ALLOW_SELF_LB_MC)
1398                 lb_flag |= MLX5_TIRC_SELF_LB_BLOCK_BLOCK_MULTICAST;
1399
1400         if (dev->is_rep) {
1401                 lb_flag |= MLX5_TIRC_SELF_LB_BLOCK_BLOCK_UNICAST;
1402                 *qp_flags_en |= MLX5_QP_FLAG_TIR_ALLOW_SELF_LB_UC;
1403         }
1404
1405         MLX5_SET(tirc, tirc, self_lb_block, lb_flag);
1406         MLX5_SET(create_tir_in, in, opcode, MLX5_CMD_OP_CREATE_TIR);
1407         err = mlx5_cmd_exec_inout(dev->mdev, create_tir, in, out);
1408         rq->tirn = MLX5_GET(create_tir_out, out, tirn);
1409         if (!err && MLX5_GET(tirc, tirc, self_lb_block)) {
1410                 err = mlx5_ib_enable_lb(dev, false, true);
1411
1412                 if (err)
1413                         destroy_raw_packet_qp_tir(dev, rq, 0, pd);
1414         }
1415         kvfree(in);
1416
1417         return err;
1418 }
1419
1420 static int create_raw_packet_qp(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp,
1421                                 u32 *in, size_t inlen,
1422                                 struct ib_pd *pd,
1423                                 struct ib_udata *udata,
1424                                 struct mlx5_ib_create_qp_resp *resp)
1425 {
1426         struct mlx5_ib_raw_packet_qp *raw_packet_qp = &qp->raw_packet_qp;
1427         struct mlx5_ib_sq *sq = &raw_packet_qp->sq;
1428         struct mlx5_ib_rq *rq = &raw_packet_qp->rq;
1429         struct mlx5_ib_ucontext *mucontext = rdma_udata_to_drv_context(
1430                 udata, struct mlx5_ib_ucontext, ibucontext);
1431         int err;
1432         u32 tdn = mucontext->tdn;
1433         u16 uid = to_mpd(pd)->uid;
1434         u32 out[MLX5_ST_SZ_DW(create_tir_out)] = {};
1435
1436         if (!qp->sq.wqe_cnt && !qp->rq.wqe_cnt)
1437                 return -EINVAL;
1438         if (qp->sq.wqe_cnt) {
1439                 err = create_raw_packet_qp_tis(dev, qp, sq, tdn, pd);
1440                 if (err)
1441                         return err;
1442
1443                 err = create_raw_packet_qp_sq(dev, udata, sq, in, pd);
1444                 if (err)
1445                         goto err_destroy_tis;
1446
1447                 if (uid) {
1448                         resp->tisn = sq->tisn;
1449                         resp->comp_mask |= MLX5_IB_CREATE_QP_RESP_MASK_TISN;
1450                         resp->sqn = sq->base.mqp.qpn;
1451                         resp->comp_mask |= MLX5_IB_CREATE_QP_RESP_MASK_SQN;
1452                 }
1453
1454                 sq->base.container_mibqp = qp;
1455                 sq->base.mqp.event = mlx5_ib_qp_event;
1456         }
1457
1458         if (qp->rq.wqe_cnt) {
1459                 rq->base.container_mibqp = qp;
1460
1461                 if (qp->flags & IB_QP_CREATE_CVLAN_STRIPPING)
1462                         rq->flags |= MLX5_IB_RQ_CVLAN_STRIPPING;
1463                 if (qp->flags & IB_QP_CREATE_PCI_WRITE_END_PADDING)
1464                         rq->flags |= MLX5_IB_RQ_PCI_WRITE_END_PADDING;
1465                 err = create_raw_packet_qp_rq(dev, rq, in, inlen, pd);
1466                 if (err)
1467                         goto err_destroy_sq;
1468
1469                 err = create_raw_packet_qp_tir(dev, rq, tdn, &qp->flags_en, pd,
1470                                                out);
1471                 if (err)
1472                         goto err_destroy_rq;
1473
1474                 if (uid) {
1475                         resp->rqn = rq->base.mqp.qpn;
1476                         resp->comp_mask |= MLX5_IB_CREATE_QP_RESP_MASK_RQN;
1477                         resp->tirn = rq->tirn;
1478                         resp->comp_mask |= MLX5_IB_CREATE_QP_RESP_MASK_TIRN;
1479                         if (MLX5_CAP_FLOWTABLE_NIC_RX(dev->mdev, sw_owner)) {
1480                                 resp->tir_icm_addr = MLX5_GET(
1481                                         create_tir_out, out, icm_address_31_0);
1482                                 resp->tir_icm_addr |=
1483                                         (u64)MLX5_GET(create_tir_out, out,
1484                                                       icm_address_39_32)
1485                                         << 32;
1486                                 resp->tir_icm_addr |=
1487                                         (u64)MLX5_GET(create_tir_out, out,
1488                                                       icm_address_63_40)
1489                                         << 40;
1490                                 resp->comp_mask |=
1491                                         MLX5_IB_CREATE_QP_RESP_MASK_TIR_ICM_ADDR;
1492                         }
1493                 }
1494         }
1495
1496         qp->trans_qp.base.mqp.qpn = qp->sq.wqe_cnt ? sq->base.mqp.qpn :
1497                                                      rq->base.mqp.qpn;
1498         return 0;
1499
1500 err_destroy_rq:
1501         destroy_raw_packet_qp_rq(dev, rq);
1502 err_destroy_sq:
1503         if (!qp->sq.wqe_cnt)
1504                 return err;
1505         destroy_raw_packet_qp_sq(dev, sq);
1506 err_destroy_tis:
1507         destroy_raw_packet_qp_tis(dev, sq, pd);
1508
1509         return err;
1510 }
1511
1512 static void destroy_raw_packet_qp(struct mlx5_ib_dev *dev,
1513                                   struct mlx5_ib_qp *qp)
1514 {
1515         struct mlx5_ib_raw_packet_qp *raw_packet_qp = &qp->raw_packet_qp;
1516         struct mlx5_ib_sq *sq = &raw_packet_qp->sq;
1517         struct mlx5_ib_rq *rq = &raw_packet_qp->rq;
1518
1519         if (qp->rq.wqe_cnt) {
1520                 destroy_raw_packet_qp_tir(dev, rq, qp->flags_en, qp->ibqp.pd);
1521                 destroy_raw_packet_qp_rq(dev, rq);
1522         }
1523
1524         if (qp->sq.wqe_cnt) {
1525                 destroy_raw_packet_qp_sq(dev, sq);
1526                 destroy_raw_packet_qp_tis(dev, sq, qp->ibqp.pd);
1527         }
1528 }
1529
1530 static void raw_packet_qp_copy_info(struct mlx5_ib_qp *qp,
1531                                     struct mlx5_ib_raw_packet_qp *raw_packet_qp)
1532 {
1533         struct mlx5_ib_sq *sq = &raw_packet_qp->sq;
1534         struct mlx5_ib_rq *rq = &raw_packet_qp->rq;
1535
1536         sq->sq = &qp->sq;
1537         rq->rq = &qp->rq;
1538         sq->doorbell = &qp->db;
1539         rq->doorbell = &qp->db;
1540 }
1541
1542 static void destroy_rss_raw_qp_tir(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp)
1543 {
1544         if (qp->flags_en & (MLX5_QP_FLAG_TIR_ALLOW_SELF_LB_UC |
1545                             MLX5_QP_FLAG_TIR_ALLOW_SELF_LB_MC))
1546                 mlx5_ib_disable_lb(dev, false, true);
1547         mlx5_cmd_destroy_tir(dev->mdev, qp->rss_qp.tirn,
1548                              to_mpd(qp->ibqp.pd)->uid);
1549 }
1550
1551 struct mlx5_create_qp_params {
1552         struct ib_udata *udata;
1553         size_t inlen;
1554         size_t outlen;
1555         size_t ucmd_size;
1556         void *ucmd;
1557         u8 is_rss_raw : 1;
1558         struct ib_qp_init_attr *attr;
1559         u32 uidx;
1560         struct mlx5_ib_create_qp_resp resp;
1561 };
1562
1563 static int create_rss_raw_qp_tir(struct mlx5_ib_dev *dev, struct ib_pd *pd,
1564                                  struct mlx5_ib_qp *qp,
1565                                  struct mlx5_create_qp_params *params)
1566 {
1567         struct ib_qp_init_attr *init_attr = params->attr;
1568         struct mlx5_ib_create_qp_rss *ucmd = params->ucmd;
1569         struct ib_udata *udata = params->udata;
1570         struct mlx5_ib_ucontext *mucontext = rdma_udata_to_drv_context(
1571                 udata, struct mlx5_ib_ucontext, ibucontext);
1572         int inlen;
1573         int outlen;
1574         int err;
1575         u32 *in;
1576         u32 *out;
1577         void *tirc;
1578         void *hfso;
1579         u32 selected_fields = 0;
1580         u32 outer_l4;
1581         u32 tdn = mucontext->tdn;
1582         u8 lb_flag = 0;
1583
1584         if (ucmd->comp_mask) {
1585                 mlx5_ib_dbg(dev, "invalid comp mask\n");
1586                 return -EOPNOTSUPP;
1587         }
1588
1589         if (ucmd->rx_hash_fields_mask & MLX5_RX_HASH_INNER &&
1590             !(ucmd->flags & MLX5_QP_FLAG_TUNNEL_OFFLOADS)) {
1591                 mlx5_ib_dbg(dev, "Tunnel offloads must be set for inner RSS\n");
1592                 return -EOPNOTSUPP;
1593         }
1594
1595         if (dev->is_rep)
1596                 qp->flags_en |= MLX5_QP_FLAG_TIR_ALLOW_SELF_LB_UC;
1597
1598         if (qp->flags_en & MLX5_QP_FLAG_TIR_ALLOW_SELF_LB_UC)
1599                 lb_flag |= MLX5_TIRC_SELF_LB_BLOCK_BLOCK_UNICAST;
1600
1601         if (qp->flags_en & MLX5_QP_FLAG_TIR_ALLOW_SELF_LB_MC)
1602                 lb_flag |= MLX5_TIRC_SELF_LB_BLOCK_BLOCK_MULTICAST;
1603
1604         inlen = MLX5_ST_SZ_BYTES(create_tir_in);
1605         outlen = MLX5_ST_SZ_BYTES(create_tir_out);
1606         in = kvzalloc(inlen + outlen, GFP_KERNEL);
1607         if (!in)
1608                 return -ENOMEM;
1609
1610         out = in + MLX5_ST_SZ_DW(create_tir_in);
1611         MLX5_SET(create_tir_in, in, uid, to_mpd(pd)->uid);
1612         tirc = MLX5_ADDR_OF(create_tir_in, in, ctx);
1613         MLX5_SET(tirc, tirc, disp_type,
1614                  MLX5_TIRC_DISP_TYPE_INDIRECT);
1615         MLX5_SET(tirc, tirc, indirect_table,
1616                  init_attr->rwq_ind_tbl->ind_tbl_num);
1617         MLX5_SET(tirc, tirc, transport_domain, tdn);
1618
1619         hfso = MLX5_ADDR_OF(tirc, tirc, rx_hash_field_selector_outer);
1620
1621         if (ucmd->flags & MLX5_QP_FLAG_TUNNEL_OFFLOADS)
1622                 MLX5_SET(tirc, tirc, tunneled_offload_en, 1);
1623
1624         MLX5_SET(tirc, tirc, self_lb_block, lb_flag);
1625
1626         if (ucmd->rx_hash_fields_mask & MLX5_RX_HASH_INNER)
1627                 hfso = MLX5_ADDR_OF(tirc, tirc, rx_hash_field_selector_inner);
1628         else
1629                 hfso = MLX5_ADDR_OF(tirc, tirc, rx_hash_field_selector_outer);
1630
1631         switch (ucmd->rx_hash_function) {
1632         case MLX5_RX_HASH_FUNC_TOEPLITZ:
1633         {
1634                 void *rss_key = MLX5_ADDR_OF(tirc, tirc, rx_hash_toeplitz_key);
1635                 size_t len = MLX5_FLD_SZ_BYTES(tirc, rx_hash_toeplitz_key);
1636
1637                 if (len != ucmd->rx_key_len) {
1638                         err = -EINVAL;
1639                         goto err;
1640                 }
1641
1642                 MLX5_SET(tirc, tirc, rx_hash_fn, MLX5_RX_HASH_FN_TOEPLITZ);
1643                 memcpy(rss_key, ucmd->rx_hash_key, len);
1644                 break;
1645         }
1646         default:
1647                 err = -EOPNOTSUPP;
1648                 goto err;
1649         }
1650
1651         if (!ucmd->rx_hash_fields_mask) {
1652                 /* special case when this TIR serves as steering entry without hashing */
1653                 if (!init_attr->rwq_ind_tbl->log_ind_tbl_size)
1654                         goto create_tir;
1655                 err = -EINVAL;
1656                 goto err;
1657         }
1658
1659         if (((ucmd->rx_hash_fields_mask & MLX5_RX_HASH_SRC_IPV4) ||
1660              (ucmd->rx_hash_fields_mask & MLX5_RX_HASH_DST_IPV4)) &&
1661              ((ucmd->rx_hash_fields_mask & MLX5_RX_HASH_SRC_IPV6) ||
1662              (ucmd->rx_hash_fields_mask & MLX5_RX_HASH_DST_IPV6))) {
1663                 err = -EINVAL;
1664                 goto err;
1665         }
1666
1667         /* If none of IPV4 & IPV6 SRC/DST was set - this bit field is ignored */
1668         if ((ucmd->rx_hash_fields_mask & MLX5_RX_HASH_SRC_IPV4) ||
1669             (ucmd->rx_hash_fields_mask & MLX5_RX_HASH_DST_IPV4))
1670                 MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
1671                          MLX5_L3_PROT_TYPE_IPV4);
1672         else if ((ucmd->rx_hash_fields_mask & MLX5_RX_HASH_SRC_IPV6) ||
1673                  (ucmd->rx_hash_fields_mask & MLX5_RX_HASH_DST_IPV6))
1674                 MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
1675                          MLX5_L3_PROT_TYPE_IPV6);
1676
1677         outer_l4 = ((ucmd->rx_hash_fields_mask & MLX5_RX_HASH_SRC_PORT_TCP) ||
1678                     (ucmd->rx_hash_fields_mask & MLX5_RX_HASH_DST_PORT_TCP))
1679                            << 0 |
1680                    ((ucmd->rx_hash_fields_mask & MLX5_RX_HASH_SRC_PORT_UDP) ||
1681                     (ucmd->rx_hash_fields_mask & MLX5_RX_HASH_DST_PORT_UDP))
1682                            << 1 |
1683                    (ucmd->rx_hash_fields_mask & MLX5_RX_HASH_IPSEC_SPI) << 2;
1684
1685         /* Check that only one l4 protocol is set */
1686         if (outer_l4 & (outer_l4 - 1)) {
1687                 err = -EINVAL;
1688                 goto err;
1689         }
1690
1691         /* If none of TCP & UDP SRC/DST was set - this bit field is ignored */
1692         if ((ucmd->rx_hash_fields_mask & MLX5_RX_HASH_SRC_PORT_TCP) ||
1693             (ucmd->rx_hash_fields_mask & MLX5_RX_HASH_DST_PORT_TCP))
1694                 MLX5_SET(rx_hash_field_select, hfso, l4_prot_type,
1695                          MLX5_L4_PROT_TYPE_TCP);
1696         else if ((ucmd->rx_hash_fields_mask & MLX5_RX_HASH_SRC_PORT_UDP) ||
1697                  (ucmd->rx_hash_fields_mask & MLX5_RX_HASH_DST_PORT_UDP))
1698                 MLX5_SET(rx_hash_field_select, hfso, l4_prot_type,
1699                          MLX5_L4_PROT_TYPE_UDP);
1700
1701         if ((ucmd->rx_hash_fields_mask & MLX5_RX_HASH_SRC_IPV4) ||
1702             (ucmd->rx_hash_fields_mask & MLX5_RX_HASH_SRC_IPV6))
1703                 selected_fields |= MLX5_HASH_FIELD_SEL_SRC_IP;
1704
1705         if ((ucmd->rx_hash_fields_mask & MLX5_RX_HASH_DST_IPV4) ||
1706             (ucmd->rx_hash_fields_mask & MLX5_RX_HASH_DST_IPV6))
1707                 selected_fields |= MLX5_HASH_FIELD_SEL_DST_IP;
1708
1709         if ((ucmd->rx_hash_fields_mask & MLX5_RX_HASH_SRC_PORT_TCP) ||
1710             (ucmd->rx_hash_fields_mask & MLX5_RX_HASH_SRC_PORT_UDP))
1711                 selected_fields |= MLX5_HASH_FIELD_SEL_L4_SPORT;
1712
1713         if ((ucmd->rx_hash_fields_mask & MLX5_RX_HASH_DST_PORT_TCP) ||
1714             (ucmd->rx_hash_fields_mask & MLX5_RX_HASH_DST_PORT_UDP))
1715                 selected_fields |= MLX5_HASH_FIELD_SEL_L4_DPORT;
1716
1717         if (ucmd->rx_hash_fields_mask & MLX5_RX_HASH_IPSEC_SPI)
1718                 selected_fields |= MLX5_HASH_FIELD_SEL_IPSEC_SPI;
1719
1720         MLX5_SET(rx_hash_field_select, hfso, selected_fields, selected_fields);
1721
1722 create_tir:
1723         MLX5_SET(create_tir_in, in, opcode, MLX5_CMD_OP_CREATE_TIR);
1724         err = mlx5_cmd_exec_inout(dev->mdev, create_tir, in, out);
1725
1726         qp->rss_qp.tirn = MLX5_GET(create_tir_out, out, tirn);
1727         if (!err && MLX5_GET(tirc, tirc, self_lb_block)) {
1728                 err = mlx5_ib_enable_lb(dev, false, true);
1729
1730                 if (err)
1731                         mlx5_cmd_destroy_tir(dev->mdev, qp->rss_qp.tirn,
1732                                              to_mpd(pd)->uid);
1733         }
1734
1735         if (err)
1736                 goto err;
1737
1738         if (mucontext->devx_uid) {
1739                 params->resp.comp_mask |= MLX5_IB_CREATE_QP_RESP_MASK_TIRN;
1740                 params->resp.tirn = qp->rss_qp.tirn;
1741                 if (MLX5_CAP_FLOWTABLE_NIC_RX(dev->mdev, sw_owner)) {
1742                         params->resp.tir_icm_addr =
1743                                 MLX5_GET(create_tir_out, out, icm_address_31_0);
1744                         params->resp.tir_icm_addr |=
1745                                 (u64)MLX5_GET(create_tir_out, out,
1746                                               icm_address_39_32)
1747                                 << 32;
1748                         params->resp.tir_icm_addr |=
1749                                 (u64)MLX5_GET(create_tir_out, out,
1750                                               icm_address_63_40)
1751                                 << 40;
1752                         params->resp.comp_mask |=
1753                                 MLX5_IB_CREATE_QP_RESP_MASK_TIR_ICM_ADDR;
1754                 }
1755         }
1756
1757         kvfree(in);
1758         /* qpn is reserved for that QP */
1759         qp->trans_qp.base.mqp.qpn = 0;
1760         qp->is_rss = true;
1761         return 0;
1762
1763 err:
1764         kvfree(in);
1765         return err;
1766 }
1767
1768 static void configure_requester_scat_cqe(struct mlx5_ib_dev *dev,
1769                                          struct ib_qp_init_attr *init_attr,
1770                                          struct mlx5_ib_create_qp *ucmd,
1771                                          void *qpc)
1772 {
1773         int scqe_sz;
1774         bool allow_scat_cqe = false;
1775
1776         if (ucmd)
1777                 allow_scat_cqe = ucmd->flags & MLX5_QP_FLAG_ALLOW_SCATTER_CQE;
1778
1779         if (!allow_scat_cqe && init_attr->sq_sig_type != IB_SIGNAL_ALL_WR)
1780                 return;
1781
1782         scqe_sz = mlx5_ib_get_cqe_size(init_attr->send_cq);
1783         if (scqe_sz == 128) {
1784                 MLX5_SET(qpc, qpc, cs_req, MLX5_REQ_SCAT_DATA64_CQE);
1785                 return;
1786         }
1787
1788         if (init_attr->qp_type != MLX5_IB_QPT_DCI ||
1789             MLX5_CAP_GEN(dev->mdev, dc_req_scat_data_cqe))
1790                 MLX5_SET(qpc, qpc, cs_req, MLX5_REQ_SCAT_DATA32_CQE);
1791 }
1792
1793 static int atomic_size_to_mode(int size_mask)
1794 {
1795         /* driver does not support atomic_size > 256B
1796          * and does not know how to translate bigger sizes
1797          */
1798         int supported_size_mask = size_mask & 0x1ff;
1799         int log_max_size;
1800
1801         if (!supported_size_mask)
1802                 return -EOPNOTSUPP;
1803
1804         log_max_size = __fls(supported_size_mask);
1805
1806         if (log_max_size > 3)
1807                 return log_max_size;
1808
1809         return MLX5_ATOMIC_MODE_8B;
1810 }
1811
1812 static int get_atomic_mode(struct mlx5_ib_dev *dev,
1813                            enum ib_qp_type qp_type)
1814 {
1815         u8 atomic_operations = MLX5_CAP_ATOMIC(dev->mdev, atomic_operations);
1816         u8 atomic = MLX5_CAP_GEN(dev->mdev, atomic);
1817         int atomic_mode = -EOPNOTSUPP;
1818         int atomic_size_mask;
1819
1820         if (!atomic)
1821                 return -EOPNOTSUPP;
1822
1823         if (qp_type == MLX5_IB_QPT_DCT)
1824                 atomic_size_mask = MLX5_CAP_ATOMIC(dev->mdev, atomic_size_dc);
1825         else
1826                 atomic_size_mask = MLX5_CAP_ATOMIC(dev->mdev, atomic_size_qp);
1827
1828         if ((atomic_operations & MLX5_ATOMIC_OPS_EXTENDED_CMP_SWAP) ||
1829             (atomic_operations & MLX5_ATOMIC_OPS_EXTENDED_FETCH_ADD))
1830                 atomic_mode = atomic_size_to_mode(atomic_size_mask);
1831
1832         if (atomic_mode <= 0 &&
1833             (atomic_operations & MLX5_ATOMIC_OPS_CMP_SWAP &&
1834              atomic_operations & MLX5_ATOMIC_OPS_FETCH_ADD))
1835                 atomic_mode = MLX5_ATOMIC_MODE_IB_COMP;
1836
1837         return atomic_mode;
1838 }
1839
1840 static int create_xrc_tgt_qp(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp,
1841                              struct mlx5_create_qp_params *params)
1842 {
1843         struct mlx5_ib_create_qp *ucmd = params->ucmd;
1844         struct ib_qp_init_attr *attr = params->attr;
1845         u32 uidx = params->uidx;
1846         struct mlx5_ib_resources *devr = &dev->devr;
1847         u32 out[MLX5_ST_SZ_DW(create_qp_out)] = {};
1848         int inlen = MLX5_ST_SZ_BYTES(create_qp_in);
1849         struct mlx5_core_dev *mdev = dev->mdev;
1850         struct mlx5_ib_qp_base *base;
1851         unsigned long flags;
1852         void *qpc;
1853         u32 *in;
1854         int err;
1855
1856         mutex_init(&qp->mutex);
1857
1858         if (attr->sq_sig_type == IB_SIGNAL_ALL_WR)
1859                 qp->sq_signal_bits = MLX5_WQE_CTRL_CQ_UPDATE;
1860
1861         in = kvzalloc(inlen, GFP_KERNEL);
1862         if (!in)
1863                 return -ENOMEM;
1864
1865         if (MLX5_CAP_GEN(mdev, ece_support) && ucmd)
1866                 MLX5_SET(create_qp_in, in, ece, ucmd->ece_options);
1867         qpc = MLX5_ADDR_OF(create_qp_in, in, qpc);
1868
1869         MLX5_SET(qpc, qpc, st, MLX5_QP_ST_XRC);
1870         MLX5_SET(qpc, qpc, pm_state, MLX5_QP_PM_MIGRATED);
1871         MLX5_SET(qpc, qpc, pd, to_mpd(devr->p0)->pdn);
1872
1873         if (qp->flags & IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK)
1874                 MLX5_SET(qpc, qpc, block_lb_mc, 1);
1875         if (qp->flags & IB_QP_CREATE_CROSS_CHANNEL)
1876                 MLX5_SET(qpc, qpc, cd_master, 1);
1877         if (qp->flags & IB_QP_CREATE_MANAGED_SEND)
1878                 MLX5_SET(qpc, qpc, cd_slave_send, 1);
1879         if (qp->flags & IB_QP_CREATE_MANAGED_RECV)
1880                 MLX5_SET(qpc, qpc, cd_slave_receive, 1);
1881
1882         MLX5_SET(qpc, qpc, rq_type, MLX5_SRQ_RQ);
1883         MLX5_SET(qpc, qpc, no_sq, 1);
1884         MLX5_SET(qpc, qpc, cqn_rcv, to_mcq(devr->c0)->mcq.cqn);
1885         MLX5_SET(qpc, qpc, cqn_snd, to_mcq(devr->c0)->mcq.cqn);
1886         MLX5_SET(qpc, qpc, srqn_rmpn_xrqn, to_msrq(devr->s0)->msrq.srqn);
1887         MLX5_SET(qpc, qpc, xrcd, to_mxrcd(attr->xrcd)->xrcdn);
1888         MLX5_SET64(qpc, qpc, dbr_addr, qp->db.dma);
1889
1890         /* 0xffffff means we ask to work with cqe version 0 */
1891         if (MLX5_CAP_GEN(mdev, cqe_version) == MLX5_CQE_VERSION_V1)
1892                 MLX5_SET(qpc, qpc, user_index, uidx);
1893
1894         if (qp->flags & IB_QP_CREATE_PCI_WRITE_END_PADDING) {
1895                 MLX5_SET(qpc, qpc, end_padding_mode,
1896                          MLX5_WQ_END_PAD_MODE_ALIGN);
1897                 /* Special case to clean flag */
1898                 qp->flags &= ~IB_QP_CREATE_PCI_WRITE_END_PADDING;
1899         }
1900
1901         base = &qp->trans_qp.base;
1902         err = mlx5_qpc_create_qp(dev, &base->mqp, in, inlen, out);
1903         kvfree(in);
1904         if (err)
1905                 return err;
1906
1907         base->container_mibqp = qp;
1908         base->mqp.event = mlx5_ib_qp_event;
1909         if (MLX5_CAP_GEN(mdev, ece_support))
1910                 params->resp.ece_options = MLX5_GET(create_qp_out, out, ece);
1911
1912         spin_lock_irqsave(&dev->reset_flow_resource_lock, flags);
1913         list_add_tail(&qp->qps_list, &dev->qp_list);
1914         spin_unlock_irqrestore(&dev->reset_flow_resource_lock, flags);
1915
1916         qp->trans_qp.xrcdn = to_mxrcd(attr->xrcd)->xrcdn;
1917         return 0;
1918 }
1919
1920 static int create_user_qp(struct mlx5_ib_dev *dev, struct ib_pd *pd,
1921                           struct mlx5_ib_qp *qp,
1922                           struct mlx5_create_qp_params *params)
1923 {
1924         struct ib_qp_init_attr *init_attr = params->attr;
1925         struct mlx5_ib_create_qp *ucmd = params->ucmd;
1926         u32 out[MLX5_ST_SZ_DW(create_qp_out)] = {};
1927         struct ib_udata *udata = params->udata;
1928         u32 uidx = params->uidx;
1929         struct mlx5_ib_resources *devr = &dev->devr;
1930         int inlen = MLX5_ST_SZ_BYTES(create_qp_in);
1931         struct mlx5_core_dev *mdev = dev->mdev;
1932         struct mlx5_ib_cq *send_cq;
1933         struct mlx5_ib_cq *recv_cq;
1934         unsigned long flags;
1935         struct mlx5_ib_qp_base *base;
1936         int mlx5_st;
1937         void *qpc;
1938         u32 *in;
1939         int err;
1940
1941         mutex_init(&qp->mutex);
1942         spin_lock_init(&qp->sq.lock);
1943         spin_lock_init(&qp->rq.lock);
1944
1945         mlx5_st = to_mlx5_st(qp->type);
1946         if (mlx5_st < 0)
1947                 return -EINVAL;
1948
1949         if (init_attr->sq_sig_type == IB_SIGNAL_ALL_WR)
1950                 qp->sq_signal_bits = MLX5_WQE_CTRL_CQ_UPDATE;
1951
1952         if (qp->flags & IB_QP_CREATE_SOURCE_QPN)
1953                 qp->underlay_qpn = init_attr->source_qpn;
1954
1955         base = (init_attr->qp_type == IB_QPT_RAW_PACKET ||
1956                 qp->flags & IB_QP_CREATE_SOURCE_QPN) ?
1957                &qp->raw_packet_qp.rq.base :
1958                &qp->trans_qp.base;
1959
1960         qp->has_rq = qp_has_rq(init_attr);
1961         err = set_rq_size(dev, &init_attr->cap, qp->has_rq, qp, ucmd);
1962         if (err) {
1963                 mlx5_ib_dbg(dev, "err %d\n", err);
1964                 return err;
1965         }
1966
1967         if (ucmd->rq_wqe_shift != qp->rq.wqe_shift ||
1968             ucmd->rq_wqe_count != qp->rq.wqe_cnt)
1969                 return -EINVAL;
1970
1971         if (ucmd->sq_wqe_count > (1 << MLX5_CAP_GEN(mdev, log_max_qp_sz)))
1972                 return -EINVAL;
1973
1974         err = _create_user_qp(dev, pd, qp, udata, init_attr, &in, &params->resp,
1975                               &inlen, base, ucmd);
1976         if (err)
1977                 return err;
1978
1979         if (is_sqp(init_attr->qp_type))
1980                 qp->port = init_attr->port_num;
1981
1982         if (MLX5_CAP_GEN(mdev, ece_support))
1983                 MLX5_SET(create_qp_in, in, ece, ucmd->ece_options);
1984         qpc = MLX5_ADDR_OF(create_qp_in, in, qpc);
1985
1986         MLX5_SET(qpc, qpc, st, mlx5_st);
1987         MLX5_SET(qpc, qpc, pm_state, MLX5_QP_PM_MIGRATED);
1988         MLX5_SET(qpc, qpc, pd, to_mpd(pd)->pdn);
1989
1990         if (qp->flags_en & MLX5_QP_FLAG_SIGNATURE)
1991                 MLX5_SET(qpc, qpc, wq_signature, 1);
1992
1993         if (qp->flags & IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK)
1994                 MLX5_SET(qpc, qpc, block_lb_mc, 1);
1995
1996         if (qp->flags & IB_QP_CREATE_CROSS_CHANNEL)
1997                 MLX5_SET(qpc, qpc, cd_master, 1);
1998         if (qp->flags & IB_QP_CREATE_MANAGED_SEND)
1999                 MLX5_SET(qpc, qpc, cd_slave_send, 1);
2000         if (qp->flags & IB_QP_CREATE_MANAGED_RECV)
2001                 MLX5_SET(qpc, qpc, cd_slave_receive, 1);
2002         if (qp->flags_en & MLX5_QP_FLAG_PACKET_BASED_CREDIT_MODE)
2003                 MLX5_SET(qpc, qpc, req_e2e_credit_mode, 1);
2004         if ((qp->flags_en & MLX5_QP_FLAG_SCATTER_CQE) &&
2005             (init_attr->qp_type == IB_QPT_RC ||
2006              init_attr->qp_type == IB_QPT_UC)) {
2007                 int rcqe_sz = mlx5_ib_get_cqe_size(init_attr->recv_cq);
2008
2009                 MLX5_SET(qpc, qpc, cs_res,
2010                          rcqe_sz == 128 ? MLX5_RES_SCAT_DATA64_CQE :
2011                                           MLX5_RES_SCAT_DATA32_CQE);
2012         }
2013         if ((qp->flags_en & MLX5_QP_FLAG_SCATTER_CQE) &&
2014             (qp->type == MLX5_IB_QPT_DCI || qp->type == IB_QPT_RC))
2015                 configure_requester_scat_cqe(dev, init_attr, ucmd, qpc);
2016
2017         if (qp->rq.wqe_cnt) {
2018                 MLX5_SET(qpc, qpc, log_rq_stride, qp->rq.wqe_shift - 4);
2019                 MLX5_SET(qpc, qpc, log_rq_size, ilog2(qp->rq.wqe_cnt));
2020         }
2021
2022         MLX5_SET(qpc, qpc, rq_type, get_rx_type(qp, init_attr));
2023
2024         if (qp->sq.wqe_cnt) {
2025                 MLX5_SET(qpc, qpc, log_sq_size, ilog2(qp->sq.wqe_cnt));
2026         } else {
2027                 MLX5_SET(qpc, qpc, no_sq, 1);
2028                 if (init_attr->srq &&
2029                     init_attr->srq->srq_type == IB_SRQT_TM)
2030                         MLX5_SET(qpc, qpc, offload_type,
2031                                  MLX5_QPC_OFFLOAD_TYPE_RNDV);
2032         }
2033
2034         /* Set default resources */
2035         switch (init_attr->qp_type) {
2036         case IB_QPT_XRC_INI:
2037                 MLX5_SET(qpc, qpc, cqn_rcv, to_mcq(devr->c0)->mcq.cqn);
2038                 MLX5_SET(qpc, qpc, xrcd, to_mxrcd(devr->x1)->xrcdn);
2039                 MLX5_SET(qpc, qpc, srqn_rmpn_xrqn, to_msrq(devr->s0)->msrq.srqn);
2040                 break;
2041         default:
2042                 if (init_attr->srq) {
2043                         MLX5_SET(qpc, qpc, xrcd, to_mxrcd(devr->x0)->xrcdn);
2044                         MLX5_SET(qpc, qpc, srqn_rmpn_xrqn, to_msrq(init_attr->srq)->msrq.srqn);
2045                 } else {
2046                         MLX5_SET(qpc, qpc, xrcd, to_mxrcd(devr->x1)->xrcdn);
2047                         MLX5_SET(qpc, qpc, srqn_rmpn_xrqn, to_msrq(devr->s1)->msrq.srqn);
2048                 }
2049         }
2050
2051         if (init_attr->send_cq)
2052                 MLX5_SET(qpc, qpc, cqn_snd, to_mcq(init_attr->send_cq)->mcq.cqn);
2053
2054         if (init_attr->recv_cq)
2055                 MLX5_SET(qpc, qpc, cqn_rcv, to_mcq(init_attr->recv_cq)->mcq.cqn);
2056
2057         MLX5_SET64(qpc, qpc, dbr_addr, qp->db.dma);
2058
2059         /* 0xffffff means we ask to work with cqe version 0 */
2060         if (MLX5_CAP_GEN(mdev, cqe_version) == MLX5_CQE_VERSION_V1)
2061                 MLX5_SET(qpc, qpc, user_index, uidx);
2062
2063         if (qp->flags & IB_QP_CREATE_PCI_WRITE_END_PADDING &&
2064             init_attr->qp_type != IB_QPT_RAW_PACKET) {
2065                 MLX5_SET(qpc, qpc, end_padding_mode,
2066                          MLX5_WQ_END_PAD_MODE_ALIGN);
2067                 /* Special case to clean flag */
2068                 qp->flags &= ~IB_QP_CREATE_PCI_WRITE_END_PADDING;
2069         }
2070
2071         if (init_attr->qp_type == IB_QPT_RAW_PACKET ||
2072             qp->flags & IB_QP_CREATE_SOURCE_QPN) {
2073                 qp->raw_packet_qp.sq.ubuffer.buf_addr = ucmd->sq_buf_addr;
2074                 raw_packet_qp_copy_info(qp, &qp->raw_packet_qp);
2075                 err = create_raw_packet_qp(dev, qp, in, inlen, pd, udata,
2076                                            &params->resp);
2077         } else
2078                 err = mlx5_qpc_create_qp(dev, &base->mqp, in, inlen, out);
2079
2080         kvfree(in);
2081         if (err)
2082                 goto err_create;
2083
2084         base->container_mibqp = qp;
2085         base->mqp.event = mlx5_ib_qp_event;
2086         if (MLX5_CAP_GEN(mdev, ece_support))
2087                 params->resp.ece_options = MLX5_GET(create_qp_out, out, ece);
2088
2089         get_cqs(qp->type, init_attr->send_cq, init_attr->recv_cq,
2090                 &send_cq, &recv_cq);
2091         spin_lock_irqsave(&dev->reset_flow_resource_lock, flags);
2092         mlx5_ib_lock_cqs(send_cq, recv_cq);
2093         /* Maintain device to QPs access, needed for further handling via reset
2094          * flow
2095          */
2096         list_add_tail(&qp->qps_list, &dev->qp_list);
2097         /* Maintain CQ to QPs access, needed for further handling via reset flow
2098          */
2099         if (send_cq)
2100                 list_add_tail(&qp->cq_send_list, &send_cq->list_send_qp);
2101         if (recv_cq)
2102                 list_add_tail(&qp->cq_recv_list, &recv_cq->list_recv_qp);
2103         mlx5_ib_unlock_cqs(send_cq, recv_cq);
2104         spin_unlock_irqrestore(&dev->reset_flow_resource_lock, flags);
2105
2106         return 0;
2107
2108 err_create:
2109         destroy_qp(dev, qp, base, udata);
2110         return err;
2111 }
2112
2113 static int create_kernel_qp(struct mlx5_ib_dev *dev, struct ib_pd *pd,
2114                             struct mlx5_ib_qp *qp,
2115                             struct mlx5_create_qp_params *params)
2116 {
2117         struct ib_qp_init_attr *attr = params->attr;
2118         u32 uidx = params->uidx;
2119         struct mlx5_ib_resources *devr = &dev->devr;
2120         u32 out[MLX5_ST_SZ_DW(create_qp_out)] = {};
2121         int inlen = MLX5_ST_SZ_BYTES(create_qp_in);
2122         struct mlx5_core_dev *mdev = dev->mdev;
2123         struct mlx5_ib_cq *send_cq;
2124         struct mlx5_ib_cq *recv_cq;
2125         unsigned long flags;
2126         struct mlx5_ib_qp_base *base;
2127         int mlx5_st;
2128         void *qpc;
2129         u32 *in;
2130         int err;
2131
2132         mutex_init(&qp->mutex);
2133         spin_lock_init(&qp->sq.lock);
2134         spin_lock_init(&qp->rq.lock);
2135
2136         mlx5_st = to_mlx5_st(qp->type);
2137         if (mlx5_st < 0)
2138                 return -EINVAL;
2139
2140         if (attr->sq_sig_type == IB_SIGNAL_ALL_WR)
2141                 qp->sq_signal_bits = MLX5_WQE_CTRL_CQ_UPDATE;
2142
2143         base = &qp->trans_qp.base;
2144
2145         qp->has_rq = qp_has_rq(attr);
2146         err = set_rq_size(dev, &attr->cap, qp->has_rq, qp, NULL);
2147         if (err) {
2148                 mlx5_ib_dbg(dev, "err %d\n", err);
2149                 return err;
2150         }
2151
2152         err = _create_kernel_qp(dev, attr, qp, &in, &inlen, base);
2153         if (err)
2154                 return err;
2155
2156         if (is_sqp(attr->qp_type))
2157                 qp->port = attr->port_num;
2158
2159         qpc = MLX5_ADDR_OF(create_qp_in, in, qpc);
2160
2161         MLX5_SET(qpc, qpc, st, mlx5_st);
2162         MLX5_SET(qpc, qpc, pm_state, MLX5_QP_PM_MIGRATED);
2163
2164         if (attr->qp_type != MLX5_IB_QPT_REG_UMR)
2165                 MLX5_SET(qpc, qpc, pd, to_mpd(pd ? pd : devr->p0)->pdn);
2166         else
2167                 MLX5_SET(qpc, qpc, latency_sensitive, 1);
2168
2169
2170         if (qp->flags & IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK)
2171                 MLX5_SET(qpc, qpc, block_lb_mc, 1);
2172
2173         if (qp->rq.wqe_cnt) {
2174                 MLX5_SET(qpc, qpc, log_rq_stride, qp->rq.wqe_shift - 4);
2175                 MLX5_SET(qpc, qpc, log_rq_size, ilog2(qp->rq.wqe_cnt));
2176         }
2177
2178         MLX5_SET(qpc, qpc, rq_type, get_rx_type(qp, attr));
2179
2180         if (qp->sq.wqe_cnt)
2181                 MLX5_SET(qpc, qpc, log_sq_size, ilog2(qp->sq.wqe_cnt));
2182         else
2183                 MLX5_SET(qpc, qpc, no_sq, 1);
2184
2185         if (attr->srq) {
2186                 MLX5_SET(qpc, qpc, xrcd, to_mxrcd(devr->x0)->xrcdn);
2187                 MLX5_SET(qpc, qpc, srqn_rmpn_xrqn,
2188                          to_msrq(attr->srq)->msrq.srqn);
2189         } else {
2190                 MLX5_SET(qpc, qpc, xrcd, to_mxrcd(devr->x1)->xrcdn);
2191                 MLX5_SET(qpc, qpc, srqn_rmpn_xrqn,
2192                          to_msrq(devr->s1)->msrq.srqn);
2193         }
2194
2195         if (attr->send_cq)
2196                 MLX5_SET(qpc, qpc, cqn_snd, to_mcq(attr->send_cq)->mcq.cqn);
2197
2198         if (attr->recv_cq)
2199                 MLX5_SET(qpc, qpc, cqn_rcv, to_mcq(attr->recv_cq)->mcq.cqn);
2200
2201         MLX5_SET64(qpc, qpc, dbr_addr, qp->db.dma);
2202
2203         /* 0xffffff means we ask to work with cqe version 0 */
2204         if (MLX5_CAP_GEN(mdev, cqe_version) == MLX5_CQE_VERSION_V1)
2205                 MLX5_SET(qpc, qpc, user_index, uidx);
2206
2207         /* we use IB_QP_CREATE_IPOIB_UD_LSO to indicates ipoib qp */
2208         if (qp->flags & IB_QP_CREATE_IPOIB_UD_LSO)
2209                 MLX5_SET(qpc, qpc, ulp_stateless_offload_mode, 1);
2210
2211         err = mlx5_qpc_create_qp(dev, &base->mqp, in, inlen, out);
2212         kvfree(in);
2213         if (err)
2214                 goto err_create;
2215
2216         base->container_mibqp = qp;
2217         base->mqp.event = mlx5_ib_qp_event;
2218
2219         get_cqs(qp->type, attr->send_cq, attr->recv_cq,
2220                 &send_cq, &recv_cq);
2221         spin_lock_irqsave(&dev->reset_flow_resource_lock, flags);
2222         mlx5_ib_lock_cqs(send_cq, recv_cq);
2223         /* Maintain device to QPs access, needed for further handling via reset
2224          * flow
2225          */
2226         list_add_tail(&qp->qps_list, &dev->qp_list);
2227         /* Maintain CQ to QPs access, needed for further handling via reset flow
2228          */
2229         if (send_cq)
2230                 list_add_tail(&qp->cq_send_list, &send_cq->list_send_qp);
2231         if (recv_cq)
2232                 list_add_tail(&qp->cq_recv_list, &recv_cq->list_recv_qp);
2233         mlx5_ib_unlock_cqs(send_cq, recv_cq);
2234         spin_unlock_irqrestore(&dev->reset_flow_resource_lock, flags);
2235
2236         return 0;
2237
2238 err_create:
2239         destroy_qp(dev, qp, base, NULL);
2240         return err;
2241 }
2242
2243 static void mlx5_ib_lock_cqs(struct mlx5_ib_cq *send_cq, struct mlx5_ib_cq *recv_cq)
2244         __acquires(&send_cq->lock) __acquires(&recv_cq->lock)
2245 {
2246         if (send_cq) {
2247                 if (recv_cq) {
2248                         if (send_cq->mcq.cqn < recv_cq->mcq.cqn)  {
2249                                 spin_lock(&send_cq->lock);
2250                                 spin_lock_nested(&recv_cq->lock,
2251                                                  SINGLE_DEPTH_NESTING);
2252                         } else if (send_cq->mcq.cqn == recv_cq->mcq.cqn) {
2253                                 spin_lock(&send_cq->lock);
2254                                 __acquire(&recv_cq->lock);
2255                         } else {
2256                                 spin_lock(&recv_cq->lock);
2257                                 spin_lock_nested(&send_cq->lock,
2258                                                  SINGLE_DEPTH_NESTING);
2259                         }
2260                 } else {
2261                         spin_lock(&send_cq->lock);
2262                         __acquire(&recv_cq->lock);
2263                 }
2264         } else if (recv_cq) {
2265                 spin_lock(&recv_cq->lock);
2266                 __acquire(&send_cq->lock);
2267         } else {
2268                 __acquire(&send_cq->lock);
2269                 __acquire(&recv_cq->lock);
2270         }
2271 }
2272
2273 static void mlx5_ib_unlock_cqs(struct mlx5_ib_cq *send_cq, struct mlx5_ib_cq *recv_cq)
2274         __releases(&send_cq->lock) __releases(&recv_cq->lock)
2275 {
2276         if (send_cq) {
2277                 if (recv_cq) {
2278                         if (send_cq->mcq.cqn < recv_cq->mcq.cqn)  {
2279                                 spin_unlock(&recv_cq->lock);
2280                                 spin_unlock(&send_cq->lock);
2281                         } else if (send_cq->mcq.cqn == recv_cq->mcq.cqn) {
2282                                 __release(&recv_cq->lock);
2283                                 spin_unlock(&send_cq->lock);
2284                         } else {
2285                                 spin_unlock(&send_cq->lock);
2286                                 spin_unlock(&recv_cq->lock);
2287                         }
2288                 } else {
2289                         __release(&recv_cq->lock);
2290                         spin_unlock(&send_cq->lock);
2291                 }
2292         } else if (recv_cq) {
2293                 __release(&send_cq->lock);
2294                 spin_unlock(&recv_cq->lock);
2295         } else {
2296                 __release(&recv_cq->lock);
2297                 __release(&send_cq->lock);
2298         }
2299 }
2300
2301 static void get_cqs(enum ib_qp_type qp_type,
2302                     struct ib_cq *ib_send_cq, struct ib_cq *ib_recv_cq,
2303                     struct mlx5_ib_cq **send_cq, struct mlx5_ib_cq **recv_cq)
2304 {
2305         switch (qp_type) {
2306         case IB_QPT_XRC_TGT:
2307                 *send_cq = NULL;
2308                 *recv_cq = NULL;
2309                 break;
2310         case MLX5_IB_QPT_REG_UMR:
2311         case IB_QPT_XRC_INI:
2312                 *send_cq = ib_send_cq ? to_mcq(ib_send_cq) : NULL;
2313                 *recv_cq = NULL;
2314                 break;
2315
2316         case IB_QPT_SMI:
2317         case MLX5_IB_QPT_HW_GSI:
2318         case IB_QPT_RC:
2319         case IB_QPT_UC:
2320         case IB_QPT_UD:
2321         case IB_QPT_RAW_PACKET:
2322                 *send_cq = ib_send_cq ? to_mcq(ib_send_cq) : NULL;
2323                 *recv_cq = ib_recv_cq ? to_mcq(ib_recv_cq) : NULL;
2324                 break;
2325         default:
2326                 *send_cq = NULL;
2327                 *recv_cq = NULL;
2328                 break;
2329         }
2330 }
2331
2332 static int modify_raw_packet_qp(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp,
2333                                 const struct mlx5_modify_raw_qp_param *raw_qp_param,
2334                                 u8 lag_tx_affinity);
2335
2336 static void destroy_qp_common(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp,
2337                               struct ib_udata *udata)
2338 {
2339         struct mlx5_ib_cq *send_cq, *recv_cq;
2340         struct mlx5_ib_qp_base *base;
2341         unsigned long flags;
2342         int err;
2343
2344         if (qp->is_rss) {
2345                 destroy_rss_raw_qp_tir(dev, qp);
2346                 return;
2347         }
2348
2349         base = (qp->type == IB_QPT_RAW_PACKET ||
2350                 qp->flags & IB_QP_CREATE_SOURCE_QPN) ?
2351                        &qp->raw_packet_qp.rq.base :
2352                        &qp->trans_qp.base;
2353
2354         if (qp->state != IB_QPS_RESET) {
2355                 if (qp->type != IB_QPT_RAW_PACKET &&
2356                     !(qp->flags & IB_QP_CREATE_SOURCE_QPN)) {
2357                         err = mlx5_core_qp_modify(dev, MLX5_CMD_OP_2RST_QP, 0,
2358                                                   NULL, &base->mqp, NULL);
2359                 } else {
2360                         struct mlx5_modify_raw_qp_param raw_qp_param = {
2361                                 .operation = MLX5_CMD_OP_2RST_QP
2362                         };
2363
2364                         err = modify_raw_packet_qp(dev, qp, &raw_qp_param, 0);
2365                 }
2366                 if (err)
2367                         mlx5_ib_warn(dev, "mlx5_ib: modify QP 0x%06x to RESET failed\n",
2368                                      base->mqp.qpn);
2369         }
2370
2371         get_cqs(qp->type, qp->ibqp.send_cq, qp->ibqp.recv_cq, &send_cq,
2372                 &recv_cq);
2373
2374         spin_lock_irqsave(&dev->reset_flow_resource_lock, flags);
2375         mlx5_ib_lock_cqs(send_cq, recv_cq);
2376         /* del from lists under both locks above to protect reset flow paths */
2377         list_del(&qp->qps_list);
2378         if (send_cq)
2379                 list_del(&qp->cq_send_list);
2380
2381         if (recv_cq)
2382                 list_del(&qp->cq_recv_list);
2383
2384         if (!udata) {
2385                 __mlx5_ib_cq_clean(recv_cq, base->mqp.qpn,
2386                                    qp->ibqp.srq ? to_msrq(qp->ibqp.srq) : NULL);
2387                 if (send_cq != recv_cq)
2388                         __mlx5_ib_cq_clean(send_cq, base->mqp.qpn,
2389                                            NULL);
2390         }
2391         mlx5_ib_unlock_cqs(send_cq, recv_cq);
2392         spin_unlock_irqrestore(&dev->reset_flow_resource_lock, flags);
2393
2394         if (qp->type == IB_QPT_RAW_PACKET ||
2395             qp->flags & IB_QP_CREATE_SOURCE_QPN) {
2396                 destroy_raw_packet_qp(dev, qp);
2397         } else {
2398                 err = mlx5_core_destroy_qp(dev, &base->mqp);
2399                 if (err)
2400                         mlx5_ib_warn(dev, "failed to destroy QP 0x%x\n",
2401                                      base->mqp.qpn);
2402         }
2403
2404         destroy_qp(dev, qp, base, udata);
2405 }
2406
2407 static int create_dct(struct mlx5_ib_dev *dev, struct ib_pd *pd,
2408                       struct mlx5_ib_qp *qp,
2409                       struct mlx5_create_qp_params *params)
2410 {
2411         struct ib_qp_init_attr *attr = params->attr;
2412         struct mlx5_ib_create_qp *ucmd = params->ucmd;
2413         u32 uidx = params->uidx;
2414         void *dctc;
2415
2416         qp->dct.in = kzalloc(MLX5_ST_SZ_BYTES(create_dct_in), GFP_KERNEL);
2417         if (!qp->dct.in)
2418                 return -ENOMEM;
2419
2420         MLX5_SET(create_dct_in, qp->dct.in, uid, to_mpd(pd)->uid);
2421         dctc = MLX5_ADDR_OF(create_dct_in, qp->dct.in, dct_context_entry);
2422         MLX5_SET(dctc, dctc, pd, to_mpd(pd)->pdn);
2423         MLX5_SET(dctc, dctc, srqn_xrqn, to_msrq(attr->srq)->msrq.srqn);
2424         MLX5_SET(dctc, dctc, cqn, to_mcq(attr->recv_cq)->mcq.cqn);
2425         MLX5_SET64(dctc, dctc, dc_access_key, ucmd->access_key);
2426         MLX5_SET(dctc, dctc, user_index, uidx);
2427         if (MLX5_CAP_GEN(dev->mdev, ece_support))
2428                 MLX5_SET(dctc, dctc, ece, ucmd->ece_options);
2429
2430         if (qp->flags_en & MLX5_QP_FLAG_SCATTER_CQE) {
2431                 int rcqe_sz = mlx5_ib_get_cqe_size(attr->recv_cq);
2432
2433                 if (rcqe_sz == 128)
2434                         MLX5_SET(dctc, dctc, cs_res, MLX5_RES_SCAT_DATA64_CQE);
2435         }
2436
2437         qp->state = IB_QPS_RESET;
2438
2439         return 0;
2440 }
2441
2442 static int check_qp_type(struct mlx5_ib_dev *dev, struct ib_qp_init_attr *attr,
2443                          enum ib_qp_type *type)
2444 {
2445         if (attr->qp_type == IB_QPT_DRIVER && !MLX5_CAP_GEN(dev->mdev, dct))
2446                 goto out;
2447
2448         switch (attr->qp_type) {
2449         case IB_QPT_XRC_TGT:
2450         case IB_QPT_XRC_INI:
2451                 if (!MLX5_CAP_GEN(dev->mdev, xrc))
2452                         goto out;
2453                 fallthrough;
2454         case IB_QPT_RC:
2455         case IB_QPT_UC:
2456         case IB_QPT_SMI:
2457         case MLX5_IB_QPT_HW_GSI:
2458         case IB_QPT_DRIVER:
2459         case IB_QPT_GSI:
2460                 if (dev->profile == &raw_eth_profile)
2461                         goto out;
2462         case IB_QPT_RAW_PACKET:
2463         case IB_QPT_UD:
2464         case MLX5_IB_QPT_REG_UMR:
2465                 break;
2466         default:
2467                 goto out;
2468         }
2469
2470         *type = attr->qp_type;
2471         return 0;
2472
2473 out:
2474         mlx5_ib_dbg(dev, "Unsupported QP type %d\n", attr->qp_type);
2475         return -EOPNOTSUPP;
2476 }
2477
2478 static int check_valid_flow(struct mlx5_ib_dev *dev, struct ib_pd *pd,
2479                             struct ib_qp_init_attr *attr,
2480                             struct ib_udata *udata)
2481 {
2482         struct mlx5_ib_ucontext *ucontext = rdma_udata_to_drv_context(
2483                 udata, struct mlx5_ib_ucontext, ibucontext);
2484
2485         if (!udata) {
2486                 /* Kernel create_qp callers */
2487                 if (attr->rwq_ind_tbl)
2488                         return -EOPNOTSUPP;
2489
2490                 switch (attr->qp_type) {
2491                 case IB_QPT_RAW_PACKET:
2492                 case IB_QPT_DRIVER:
2493                         return -EOPNOTSUPP;
2494                 default:
2495                         return 0;
2496                 }
2497         }
2498
2499         /* Userspace create_qp callers */
2500         if (attr->qp_type == IB_QPT_RAW_PACKET && !ucontext->cqe_version) {
2501                 mlx5_ib_dbg(dev,
2502                         "Raw Packet QP is only supported for CQE version > 0\n");
2503                 return -EINVAL;
2504         }
2505
2506         if (attr->qp_type != IB_QPT_RAW_PACKET && attr->rwq_ind_tbl) {
2507                 mlx5_ib_dbg(dev,
2508                             "Wrong QP type %d for the RWQ indirect table\n",
2509                             attr->qp_type);
2510                 return -EINVAL;
2511         }
2512
2513         switch (attr->qp_type) {
2514         case IB_QPT_SMI:
2515         case MLX5_IB_QPT_HW_GSI:
2516         case MLX5_IB_QPT_REG_UMR:
2517         case IB_QPT_GSI:
2518                 mlx5_ib_dbg(dev, "Kernel doesn't support QP type %d\n",
2519                             attr->qp_type);
2520                 return -EINVAL;
2521         default:
2522                 break;
2523         }
2524
2525         /*
2526          * We don't need to see this warning, it means that kernel code
2527          * missing ib_pd. Placed here to catch developer's mistakes.
2528          */
2529         WARN_ONCE(!pd && attr->qp_type != IB_QPT_XRC_TGT,
2530                   "There is a missing PD pointer assignment\n");
2531         return 0;
2532 }
2533
2534 static void process_vendor_flag(struct mlx5_ib_dev *dev, int *flags, int flag,
2535                                 bool cond, struct mlx5_ib_qp *qp)
2536 {
2537         if (!(*flags & flag))
2538                 return;
2539
2540         if (cond) {
2541                 qp->flags_en |= flag;
2542                 *flags &= ~flag;
2543                 return;
2544         }
2545
2546         if (flag == MLX5_QP_FLAG_SCATTER_CQE) {
2547                 /*
2548                  * We don't return error if this flag was provided,
2549                  * and mlx5 doesn't have right capability.
2550                  */
2551                 *flags &= ~MLX5_QP_FLAG_SCATTER_CQE;
2552                 return;
2553         }
2554         mlx5_ib_dbg(dev, "Vendor create QP flag 0x%X is not supported\n", flag);
2555 }
2556
2557 static int process_vendor_flags(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp,
2558                                 void *ucmd, struct ib_qp_init_attr *attr)
2559 {
2560         struct mlx5_core_dev *mdev = dev->mdev;
2561         bool cond;
2562         int flags;
2563
2564         if (attr->rwq_ind_tbl)
2565                 flags = ((struct mlx5_ib_create_qp_rss *)ucmd)->flags;
2566         else
2567                 flags = ((struct mlx5_ib_create_qp *)ucmd)->flags;
2568
2569         switch (flags & (MLX5_QP_FLAG_TYPE_DCT | MLX5_QP_FLAG_TYPE_DCI)) {
2570         case MLX5_QP_FLAG_TYPE_DCI:
2571                 qp->type = MLX5_IB_QPT_DCI;
2572                 break;
2573         case MLX5_QP_FLAG_TYPE_DCT:
2574                 qp->type = MLX5_IB_QPT_DCT;
2575                 break;
2576         default:
2577                 if (qp->type != IB_QPT_DRIVER)
2578                         break;
2579                 /*
2580                  * It is IB_QPT_DRIVER and or no subtype or
2581                  * wrong subtype were provided.
2582                  */
2583                 return -EINVAL;
2584         }
2585
2586         process_vendor_flag(dev, &flags, MLX5_QP_FLAG_TYPE_DCI, true, qp);
2587         process_vendor_flag(dev, &flags, MLX5_QP_FLAG_TYPE_DCT, true, qp);
2588
2589         process_vendor_flag(dev, &flags, MLX5_QP_FLAG_SIGNATURE, true, qp);
2590         process_vendor_flag(dev, &flags, MLX5_QP_FLAG_SCATTER_CQE,
2591                             MLX5_CAP_GEN(mdev, sctr_data_cqe), qp);
2592
2593         if (qp->type == IB_QPT_RAW_PACKET) {
2594                 cond = MLX5_CAP_ETH(mdev, tunnel_stateless_vxlan) ||
2595                        MLX5_CAP_ETH(mdev, tunnel_stateless_gre) ||
2596                        MLX5_CAP_ETH(mdev, tunnel_stateless_geneve_rx);
2597                 process_vendor_flag(dev, &flags, MLX5_QP_FLAG_TUNNEL_OFFLOADS,
2598                                     cond, qp);
2599                 process_vendor_flag(dev, &flags,
2600                                     MLX5_QP_FLAG_TIR_ALLOW_SELF_LB_UC, true,
2601                                     qp);
2602                 process_vendor_flag(dev, &flags,
2603                                     MLX5_QP_FLAG_TIR_ALLOW_SELF_LB_MC, true,
2604                                     qp);
2605         }
2606
2607         if (qp->type == IB_QPT_RC)
2608                 process_vendor_flag(dev, &flags,
2609                                     MLX5_QP_FLAG_PACKET_BASED_CREDIT_MODE,
2610                                     MLX5_CAP_GEN(mdev, qp_packet_based), qp);
2611
2612         process_vendor_flag(dev, &flags, MLX5_QP_FLAG_BFREG_INDEX, true, qp);
2613         process_vendor_flag(dev, &flags, MLX5_QP_FLAG_UAR_PAGE_INDEX, true, qp);
2614
2615         cond = qp->flags_en & ~(MLX5_QP_FLAG_TUNNEL_OFFLOADS |
2616                                 MLX5_QP_FLAG_TIR_ALLOW_SELF_LB_UC |
2617                                 MLX5_QP_FLAG_TIR_ALLOW_SELF_LB_MC);
2618         if (attr->rwq_ind_tbl && cond) {
2619                 mlx5_ib_dbg(dev, "RSS RAW QP has unsupported flags 0x%X\n",
2620                             cond);
2621                 return -EINVAL;
2622         }
2623
2624         if (flags)
2625                 mlx5_ib_dbg(dev, "udata has unsupported flags 0x%X\n", flags);
2626
2627         return (flags) ? -EINVAL : 0;
2628         }
2629
2630 static void process_create_flag(struct mlx5_ib_dev *dev, int *flags, int flag,
2631                                 bool cond, struct mlx5_ib_qp *qp)
2632 {
2633         if (!(*flags & flag))
2634                 return;
2635
2636         if (cond) {
2637                 qp->flags |= flag;
2638                 *flags &= ~flag;
2639                 return;
2640         }
2641
2642         if (flag == MLX5_IB_QP_CREATE_WC_TEST) {
2643                 /*
2644                  * Special case, if condition didn't meet, it won't be error,
2645                  * just different in-kernel flow.
2646                  */
2647                 *flags &= ~MLX5_IB_QP_CREATE_WC_TEST;
2648                 return;
2649         }
2650         mlx5_ib_dbg(dev, "Verbs create QP flag 0x%X is not supported\n", flag);
2651 }
2652
2653 static int process_create_flags(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp,
2654                                 struct ib_qp_init_attr *attr)
2655 {
2656         enum ib_qp_type qp_type = qp->type;
2657         struct mlx5_core_dev *mdev = dev->mdev;
2658         int create_flags = attr->create_flags;
2659         bool cond;
2660
2661         if (qp->type == IB_QPT_UD && dev->profile == &raw_eth_profile)
2662                 if (create_flags & ~MLX5_IB_QP_CREATE_WC_TEST)
2663                         return -EINVAL;
2664
2665         if (qp_type == MLX5_IB_QPT_DCT)
2666                 return (create_flags) ? -EINVAL : 0;
2667
2668         if (qp_type == IB_QPT_RAW_PACKET && attr->rwq_ind_tbl)
2669                 return (create_flags) ? -EINVAL : 0;
2670
2671         process_create_flag(dev, &create_flags, IB_QP_CREATE_NETIF_QP,
2672                             mlx5_get_flow_namespace(dev->mdev,
2673                                                     MLX5_FLOW_NAMESPACE_BYPASS),
2674                             qp);
2675         process_create_flag(dev, &create_flags,
2676                             IB_QP_CREATE_INTEGRITY_EN,
2677                             MLX5_CAP_GEN(mdev, sho), qp);
2678         process_create_flag(dev, &create_flags,
2679                             IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK,
2680                             MLX5_CAP_GEN(mdev, block_lb_mc), qp);
2681         process_create_flag(dev, &create_flags, IB_QP_CREATE_CROSS_CHANNEL,
2682                             MLX5_CAP_GEN(mdev, cd), qp);
2683         process_create_flag(dev, &create_flags, IB_QP_CREATE_MANAGED_SEND,
2684                             MLX5_CAP_GEN(mdev, cd), qp);
2685         process_create_flag(dev, &create_flags, IB_QP_CREATE_MANAGED_RECV,
2686                             MLX5_CAP_GEN(mdev, cd), qp);
2687
2688         if (qp_type == IB_QPT_UD) {
2689                 process_create_flag(dev, &create_flags,
2690                                     IB_QP_CREATE_IPOIB_UD_LSO,
2691                                     MLX5_CAP_GEN(mdev, ipoib_basic_offloads),
2692                                     qp);
2693                 cond = MLX5_CAP_GEN(mdev, port_type) == MLX5_CAP_PORT_TYPE_IB;
2694                 process_create_flag(dev, &create_flags, IB_QP_CREATE_SOURCE_QPN,
2695                                     cond, qp);
2696         }
2697
2698         if (qp_type == IB_QPT_RAW_PACKET) {
2699                 cond = MLX5_CAP_GEN(mdev, eth_net_offloads) &&
2700                        MLX5_CAP_ETH(mdev, scatter_fcs);
2701                 process_create_flag(dev, &create_flags,
2702                                     IB_QP_CREATE_SCATTER_FCS, cond, qp);
2703
2704                 cond = MLX5_CAP_GEN(mdev, eth_net_offloads) &&
2705                        MLX5_CAP_ETH(mdev, vlan_cap);
2706                 process_create_flag(dev, &create_flags,
2707                                     IB_QP_CREATE_CVLAN_STRIPPING, cond, qp);
2708         }
2709
2710         process_create_flag(dev, &create_flags,
2711                             IB_QP_CREATE_PCI_WRITE_END_PADDING,
2712                             MLX5_CAP_GEN(mdev, end_pad), qp);
2713
2714         process_create_flag(dev, &create_flags, MLX5_IB_QP_CREATE_WC_TEST,
2715                             qp_type != MLX5_IB_QPT_REG_UMR, qp);
2716         process_create_flag(dev, &create_flags, MLX5_IB_QP_CREATE_SQPN_QP1,
2717                             true, qp);
2718
2719         if (create_flags)
2720                 mlx5_ib_dbg(dev, "Create QP has unsupported flags 0x%X\n",
2721                             create_flags);
2722
2723         return (create_flags) ? -EINVAL : 0;
2724 }
2725
2726 static int process_udata_size(struct mlx5_ib_dev *dev,
2727                               struct mlx5_create_qp_params *params)
2728 {
2729         size_t ucmd = sizeof(struct mlx5_ib_create_qp);
2730         struct ib_udata *udata = params->udata;
2731         size_t outlen = udata->outlen;
2732         size_t inlen = udata->inlen;
2733
2734         params->outlen = min(outlen, sizeof(struct mlx5_ib_create_qp_resp));
2735         params->ucmd_size = ucmd;
2736         if (!params->is_rss_raw) {
2737                 /* User has old rdma-core, which doesn't support ECE */
2738                 size_t min_inlen =
2739                         offsetof(struct mlx5_ib_create_qp, ece_options);
2740
2741                 /*
2742                  * We will check in check_ucmd_data() that user
2743                  * cleared everything after inlen.
2744                  */
2745                 params->inlen = (inlen < min_inlen) ? 0 : min(inlen, ucmd);
2746                 goto out;
2747         }
2748
2749         /* RSS RAW QP */
2750         if (inlen < offsetofend(struct mlx5_ib_create_qp_rss, flags))
2751                 return -EINVAL;
2752
2753         if (outlen < offsetofend(struct mlx5_ib_create_qp_resp, bfreg_index))
2754                 return -EINVAL;
2755
2756         ucmd = sizeof(struct mlx5_ib_create_qp_rss);
2757         params->ucmd_size = ucmd;
2758         if (inlen > ucmd && !ib_is_udata_cleared(udata, ucmd, inlen - ucmd))
2759                 return -EINVAL;
2760
2761         params->inlen = min(ucmd, inlen);
2762 out:
2763         if (!params->inlen)
2764                 mlx5_ib_dbg(dev, "udata is too small\n");
2765
2766         return (params->inlen) ? 0 : -EINVAL;
2767 }
2768
2769 static int create_qp(struct mlx5_ib_dev *dev, struct ib_pd *pd,
2770                      struct mlx5_ib_qp *qp,
2771                      struct mlx5_create_qp_params *params)
2772 {
2773         int err;
2774
2775         if (params->is_rss_raw) {
2776                 err = create_rss_raw_qp_tir(dev, pd, qp, params);
2777                 goto out;
2778         }
2779
2780         if (qp->type == MLX5_IB_QPT_DCT) {
2781                 err = create_dct(dev, pd, qp, params);
2782                 goto out;
2783         }
2784
2785         if (qp->type == IB_QPT_XRC_TGT) {
2786                 err = create_xrc_tgt_qp(dev, qp, params);
2787                 goto out;
2788         }
2789
2790         if (params->udata)
2791                 err = create_user_qp(dev, pd, qp, params);
2792         else
2793                 err = create_kernel_qp(dev, pd, qp, params);
2794
2795 out:
2796         if (err) {
2797                 mlx5_ib_err(dev, "Create QP type %d failed\n", qp->type);
2798                 return err;
2799         }
2800
2801         if (is_qp0(qp->type))
2802                 qp->ibqp.qp_num = 0;
2803         else if (is_qp1(qp->type))
2804                 qp->ibqp.qp_num = 1;
2805         else
2806                 qp->ibqp.qp_num = qp->trans_qp.base.mqp.qpn;
2807
2808         mlx5_ib_dbg(dev,
2809                 "QP type %d, ib qpn 0x%X, mlx qpn 0x%x, rcqn 0x%x, scqn 0x%x, ece 0x%x\n",
2810                 qp->type, qp->ibqp.qp_num, qp->trans_qp.base.mqp.qpn,
2811                 params->attr->recv_cq ? to_mcq(params->attr->recv_cq)->mcq.cqn :
2812                                         -1,
2813                 params->attr->send_cq ? to_mcq(params->attr->send_cq)->mcq.cqn :
2814                                         -1,
2815                 params->resp.ece_options);
2816
2817         return 0;
2818 }
2819
2820 static int check_qp_attr(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp,
2821                          struct ib_qp_init_attr *attr)
2822 {
2823         int ret = 0;
2824
2825         switch (qp->type) {
2826         case MLX5_IB_QPT_DCT:
2827                 ret = (!attr->srq || !attr->recv_cq) ? -EINVAL : 0;
2828                 break;
2829         case MLX5_IB_QPT_DCI:
2830                 ret = (attr->cap.max_recv_wr || attr->cap.max_recv_sge) ?
2831                               -EINVAL :
2832                               0;
2833                 break;
2834         case IB_QPT_RAW_PACKET:
2835                 ret = (attr->rwq_ind_tbl && attr->send_cq) ? -EINVAL : 0;
2836                 break;
2837         default:
2838                 break;
2839         }
2840
2841         if (ret)
2842                 mlx5_ib_dbg(dev, "QP type %d has wrong attributes\n", qp->type);
2843
2844         return ret;
2845 }
2846
2847 static int get_qp_uidx(struct mlx5_ib_qp *qp,
2848                        struct mlx5_create_qp_params *params)
2849 {
2850         struct mlx5_ib_create_qp *ucmd = params->ucmd;
2851         struct ib_udata *udata = params->udata;
2852         struct mlx5_ib_ucontext *ucontext = rdma_udata_to_drv_context(
2853                 udata, struct mlx5_ib_ucontext, ibucontext);
2854
2855         if (params->is_rss_raw)
2856                 return 0;
2857
2858         return get_qp_user_index(ucontext, ucmd, sizeof(*ucmd), &params->uidx);
2859 }
2860
2861 static int mlx5_ib_destroy_dct(struct mlx5_ib_qp *mqp)
2862 {
2863         struct mlx5_ib_dev *dev = to_mdev(mqp->ibqp.device);
2864
2865         if (mqp->state == IB_QPS_RTR) {
2866                 int err;
2867
2868                 err = mlx5_core_destroy_dct(dev, &mqp->dct.mdct);
2869                 if (err) {
2870                         mlx5_ib_warn(dev, "failed to destroy DCT %d\n", err);
2871                         return err;
2872                 }
2873         }
2874
2875         kfree(mqp->dct.in);
2876         kfree(mqp);
2877         return 0;
2878 }
2879
2880 static int check_ucmd_data(struct mlx5_ib_dev *dev,
2881                            struct mlx5_create_qp_params *params)
2882 {
2883         struct ib_udata *udata = params->udata;
2884         size_t size, last;
2885         int ret;
2886
2887         if (params->is_rss_raw)
2888                 /*
2889                  * These QPs don't have "reserved" field in their
2890                  * create_qp input struct, so their data is always valid.
2891                  */
2892                 last = sizeof(struct mlx5_ib_create_qp_rss);
2893         else
2894                 last = offsetof(struct mlx5_ib_create_qp, reserved);
2895
2896         if (udata->inlen <= last)
2897                 return 0;
2898
2899         /*
2900          * User provides different create_qp structures based on the
2901          * flow and we need to know if he cleared memory after our
2902          * struct create_qp ends.
2903          */
2904         size = udata->inlen - last;
2905         ret = ib_is_udata_cleared(params->udata, last, size);
2906         if (!ret)
2907                 mlx5_ib_dbg(
2908                         dev,
2909                         "udata is not cleared, inlen = %zu, ucmd = %zu, last = %zu, size = %zu\n",
2910                         udata->inlen, params->ucmd_size, last, size);
2911         return ret ? 0 : -EINVAL;
2912 }
2913
2914 struct ib_qp *mlx5_ib_create_qp(struct ib_pd *pd, struct ib_qp_init_attr *attr,
2915                                 struct ib_udata *udata)
2916 {
2917         struct mlx5_create_qp_params params = {};
2918         struct mlx5_ib_dev *dev;
2919         struct mlx5_ib_qp *qp;
2920         enum ib_qp_type type;
2921         int err;
2922
2923         dev = pd ? to_mdev(pd->device) :
2924                    to_mdev(to_mxrcd(attr->xrcd)->ibxrcd.device);
2925
2926         err = check_qp_type(dev, attr, &type);
2927         if (err)
2928                 return ERR_PTR(err);
2929
2930         err = check_valid_flow(dev, pd, attr, udata);
2931         if (err)
2932                 return ERR_PTR(err);
2933
2934         if (attr->qp_type == IB_QPT_GSI)
2935                 return mlx5_ib_gsi_create_qp(pd, attr);
2936
2937         params.udata = udata;
2938         params.uidx = MLX5_IB_DEFAULT_UIDX;
2939         params.attr = attr;
2940         params.is_rss_raw = !!attr->rwq_ind_tbl;
2941
2942         if (udata) {
2943                 err = process_udata_size(dev, &params);
2944                 if (err)
2945                         return ERR_PTR(err);
2946
2947                 err = check_ucmd_data(dev, &params);
2948                 if (err)
2949                         return ERR_PTR(err);
2950
2951                 params.ucmd = kzalloc(params.ucmd_size, GFP_KERNEL);
2952                 if (!params.ucmd)
2953                         return ERR_PTR(-ENOMEM);
2954
2955                 err = ib_copy_from_udata(params.ucmd, udata, params.inlen);
2956                 if (err)
2957                         goto free_ucmd;
2958         }
2959
2960         qp = kzalloc(sizeof(*qp), GFP_KERNEL);
2961         if (!qp) {
2962                 err = -ENOMEM;
2963                 goto free_ucmd;
2964         }
2965
2966         qp->type = type;
2967         if (udata) {
2968                 err = process_vendor_flags(dev, qp, params.ucmd, attr);
2969                 if (err)
2970                         goto free_qp;
2971
2972                 err = get_qp_uidx(qp, &params);
2973                 if (err)
2974                         goto free_qp;
2975         }
2976         err = process_create_flags(dev, qp, attr);
2977         if (err)
2978                 goto free_qp;
2979
2980         err = check_qp_attr(dev, qp, attr);
2981         if (err)
2982                 goto free_qp;
2983
2984         err = create_qp(dev, pd, qp, &params);
2985         if (err)
2986                 goto free_qp;
2987
2988         kfree(params.ucmd);
2989         params.ucmd = NULL;
2990
2991         if (udata)
2992                 /*
2993                  * It is safe to copy response for all user create QP flows,
2994                  * including MLX5_IB_QPT_DCT, which doesn't need it.
2995                  * In that case, resp will be filled with zeros.
2996                  */
2997                 err = ib_copy_to_udata(udata, &params.resp, params.outlen);
2998         if (err)
2999                 goto destroy_qp;
3000
3001         return &qp->ibqp;
3002
3003 destroy_qp:
3004         if (qp->type == MLX5_IB_QPT_DCT) {
3005                 mlx5_ib_destroy_dct(qp);
3006         } else {
3007                 /*
3008                  * These lines below are temp solution till QP allocation
3009                  * will be moved to be under IB/core responsiblity.
3010                  */
3011                 qp->ibqp.send_cq = attr->send_cq;
3012                 qp->ibqp.recv_cq = attr->recv_cq;
3013                 qp->ibqp.pd = pd;
3014                 destroy_qp_common(dev, qp, udata);
3015         }
3016
3017         qp = NULL;
3018 free_qp:
3019         kfree(qp);
3020 free_ucmd:
3021         kfree(params.ucmd);
3022         return ERR_PTR(err);
3023 }
3024
3025 int mlx5_ib_destroy_qp(struct ib_qp *qp, struct ib_udata *udata)
3026 {
3027         struct mlx5_ib_dev *dev = to_mdev(qp->device);
3028         struct mlx5_ib_qp *mqp = to_mqp(qp);
3029
3030         if (unlikely(qp->qp_type == IB_QPT_GSI))
3031                 return mlx5_ib_gsi_destroy_qp(qp);
3032
3033         if (mqp->type == MLX5_IB_QPT_DCT)
3034                 return mlx5_ib_destroy_dct(mqp);
3035
3036         destroy_qp_common(dev, mqp, udata);
3037
3038         kfree(mqp);
3039
3040         return 0;
3041 }
3042
3043 static int set_qpc_atomic_flags(struct mlx5_ib_qp *qp,
3044                                 const struct ib_qp_attr *attr, int attr_mask,
3045                                 void *qpc)
3046 {
3047         struct mlx5_ib_dev *dev = to_mdev(qp->ibqp.device);
3048         u8 dest_rd_atomic;
3049         u32 access_flags;
3050
3051         if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC)
3052                 dest_rd_atomic = attr->max_dest_rd_atomic;
3053         else
3054                 dest_rd_atomic = qp->trans_qp.resp_depth;
3055
3056         if (attr_mask & IB_QP_ACCESS_FLAGS)
3057                 access_flags = attr->qp_access_flags;
3058         else
3059                 access_flags = qp->trans_qp.atomic_rd_en;
3060
3061         if (!dest_rd_atomic)
3062                 access_flags &= IB_ACCESS_REMOTE_WRITE;
3063
3064         MLX5_SET(qpc, qpc, rre, !!(access_flags & IB_ACCESS_REMOTE_READ));
3065
3066         if (access_flags & IB_ACCESS_REMOTE_ATOMIC) {
3067                 int atomic_mode;
3068
3069                 atomic_mode = get_atomic_mode(dev, qp->ibqp.qp_type);
3070                 if (atomic_mode < 0)
3071                         return -EOPNOTSUPP;
3072
3073                 MLX5_SET(qpc, qpc, rae, 1);
3074                 MLX5_SET(qpc, qpc, atomic_mode, atomic_mode);
3075         }
3076
3077         MLX5_SET(qpc, qpc, rwe, !!(access_flags & IB_ACCESS_REMOTE_WRITE));
3078         return 0;
3079 }
3080
3081 enum {
3082         MLX5_PATH_FLAG_FL       = 1 << 0,
3083         MLX5_PATH_FLAG_FREE_AR  = 1 << 1,
3084         MLX5_PATH_FLAG_COUNTER  = 1 << 2,
3085 };
3086
3087 static int ib_rate_to_mlx5(struct mlx5_ib_dev *dev, u8 rate)
3088 {
3089         if (rate == IB_RATE_PORT_CURRENT)
3090                 return 0;
3091
3092         if (rate < IB_RATE_2_5_GBPS || rate > IB_RATE_600_GBPS)
3093                 return -EINVAL;
3094
3095         while (rate != IB_RATE_PORT_CURRENT &&
3096                !(1 << (rate + MLX5_STAT_RATE_OFFSET) &
3097                  MLX5_CAP_GEN(dev->mdev, stat_rate_support)))
3098                 --rate;
3099
3100         return rate ? rate + MLX5_STAT_RATE_OFFSET : rate;
3101 }
3102
3103 static int modify_raw_packet_eth_prio(struct mlx5_core_dev *dev,
3104                                       struct mlx5_ib_sq *sq, u8 sl,
3105                                       struct ib_pd *pd)
3106 {
3107         void *in;
3108         void *tisc;
3109         int inlen;
3110         int err;
3111
3112         inlen = MLX5_ST_SZ_BYTES(modify_tis_in);
3113         in = kvzalloc(inlen, GFP_KERNEL);
3114         if (!in)
3115                 return -ENOMEM;
3116
3117         MLX5_SET(modify_tis_in, in, bitmask.prio, 1);
3118         MLX5_SET(modify_tis_in, in, uid, to_mpd(pd)->uid);
3119
3120         tisc = MLX5_ADDR_OF(modify_tis_in, in, ctx);
3121         MLX5_SET(tisc, tisc, prio, ((sl & 0x7) << 1));
3122
3123         err = mlx5_core_modify_tis(dev, sq->tisn, in);
3124
3125         kvfree(in);
3126
3127         return err;
3128 }
3129
3130 static int modify_raw_packet_tx_affinity(struct mlx5_core_dev *dev,
3131                                          struct mlx5_ib_sq *sq, u8 tx_affinity,
3132                                          struct ib_pd *pd)
3133 {
3134         void *in;
3135         void *tisc;
3136         int inlen;
3137         int err;
3138
3139         inlen = MLX5_ST_SZ_BYTES(modify_tis_in);
3140         in = kvzalloc(inlen, GFP_KERNEL);
3141         if (!in)
3142                 return -ENOMEM;
3143
3144         MLX5_SET(modify_tis_in, in, bitmask.lag_tx_port_affinity, 1);
3145         MLX5_SET(modify_tis_in, in, uid, to_mpd(pd)->uid);
3146
3147         tisc = MLX5_ADDR_OF(modify_tis_in, in, ctx);
3148         MLX5_SET(tisc, tisc, lag_tx_port_affinity, tx_affinity);
3149
3150         err = mlx5_core_modify_tis(dev, sq->tisn, in);
3151
3152         kvfree(in);
3153
3154         return err;
3155 }
3156
3157 static void mlx5_set_path_udp_sport(void *path, const struct rdma_ah_attr *ah,
3158                                     u32 lqpn, u32 rqpn)
3159
3160 {
3161         u32 fl = ah->grh.flow_label;
3162
3163         if (!fl)
3164                 fl = rdma_calc_flow_label(lqpn, rqpn);
3165
3166         MLX5_SET(ads, path, udp_sport, rdma_flow_label_to_udp_sport(fl));
3167 }
3168
3169 static int mlx5_set_path(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp,
3170                          const struct rdma_ah_attr *ah, void *path, u8 port,
3171                          int attr_mask, u32 path_flags,
3172                          const struct ib_qp_attr *attr, bool alt)
3173 {
3174         const struct ib_global_route *grh = rdma_ah_read_grh(ah);
3175         int err;
3176         enum ib_gid_type gid_type;
3177         u8 ah_flags = rdma_ah_get_ah_flags(ah);
3178         u8 sl = rdma_ah_get_sl(ah);
3179
3180         if (attr_mask & IB_QP_PKEY_INDEX)
3181                 MLX5_SET(ads, path, pkey_index,
3182                          alt ? attr->alt_pkey_index : attr->pkey_index);
3183
3184         if (ah_flags & IB_AH_GRH) {
3185                 if (grh->sgid_index >=
3186                     dev->mdev->port_caps[port - 1].gid_table_len) {
3187                         pr_err("sgid_index (%u) too large. max is %d\n",
3188                                grh->sgid_index,
3189                                dev->mdev->port_caps[port - 1].gid_table_len);
3190                         return -EINVAL;
3191                 }
3192         }
3193
3194         if (ah->type == RDMA_AH_ATTR_TYPE_ROCE) {
3195                 if (!(ah_flags & IB_AH_GRH))
3196                         return -EINVAL;
3197
3198                 ether_addr_copy(MLX5_ADDR_OF(ads, path, rmac_47_32),
3199                                 ah->roce.dmac);
3200                 if ((qp->ibqp.qp_type == IB_QPT_RC ||
3201                      qp->ibqp.qp_type == IB_QPT_UC ||
3202                      qp->ibqp.qp_type == IB_QPT_XRC_INI ||
3203                      qp->ibqp.qp_type == IB_QPT_XRC_TGT) &&
3204                     (grh->sgid_attr->gid_type == IB_GID_TYPE_ROCE_UDP_ENCAP) &&
3205                     (attr_mask & IB_QP_DEST_QPN))
3206                         mlx5_set_path_udp_sport(path, ah,
3207                                                 qp->ibqp.qp_num,
3208                                                 attr->dest_qp_num);
3209                 MLX5_SET(ads, path, eth_prio, sl & 0x7);
3210                 gid_type = ah->grh.sgid_attr->gid_type;
3211                 if (gid_type == IB_GID_TYPE_ROCE_UDP_ENCAP)
3212                         MLX5_SET(ads, path, dscp, grh->traffic_class >> 2);
3213         } else {
3214                 MLX5_SET(ads, path, fl, !!(path_flags & MLX5_PATH_FLAG_FL));
3215                 MLX5_SET(ads, path, free_ar,
3216                          !!(path_flags & MLX5_PATH_FLAG_FREE_AR));
3217                 MLX5_SET(ads, path, rlid, rdma_ah_get_dlid(ah));
3218                 MLX5_SET(ads, path, mlid, rdma_ah_get_path_bits(ah));
3219                 MLX5_SET(ads, path, grh, !!(ah_flags & IB_AH_GRH));
3220                 MLX5_SET(ads, path, sl, sl);
3221         }
3222
3223         if (ah_flags & IB_AH_GRH) {
3224                 MLX5_SET(ads, path, src_addr_index, grh->sgid_index);
3225                 MLX5_SET(ads, path, hop_limit, grh->hop_limit);
3226                 MLX5_SET(ads, path, tclass, grh->traffic_class);
3227                 MLX5_SET(ads, path, flow_label, grh->flow_label);
3228                 memcpy(MLX5_ADDR_OF(ads, path, rgid_rip), grh->dgid.raw,
3229                        sizeof(grh->dgid.raw));
3230         }
3231
3232         err = ib_rate_to_mlx5(dev, rdma_ah_get_static_rate(ah));
3233         if (err < 0)
3234                 return err;
3235         MLX5_SET(ads, path, stat_rate, err);
3236         MLX5_SET(ads, path, vhca_port_num, port);
3237
3238         if (attr_mask & IB_QP_TIMEOUT)
3239                 MLX5_SET(ads, path, ack_timeout,
3240                          alt ? attr->alt_timeout : attr->timeout);
3241
3242         if ((qp->ibqp.qp_type == IB_QPT_RAW_PACKET) && qp->sq.wqe_cnt)
3243                 return modify_raw_packet_eth_prio(dev->mdev,
3244                                                   &qp->raw_packet_qp.sq,
3245                                                   sl & 0xf, qp->ibqp.pd);
3246
3247         return 0;
3248 }
3249
3250 static enum mlx5_qp_optpar opt_mask[MLX5_QP_NUM_STATE][MLX5_QP_NUM_STATE][MLX5_QP_ST_MAX] = {
3251         [MLX5_QP_STATE_INIT] = {
3252                 [MLX5_QP_STATE_INIT] = {
3253                         [MLX5_QP_ST_RC] = MLX5_QP_OPTPAR_RRE            |
3254                                           MLX5_QP_OPTPAR_RAE            |
3255                                           MLX5_QP_OPTPAR_RWE            |
3256                                           MLX5_QP_OPTPAR_PKEY_INDEX     |
3257                                           MLX5_QP_OPTPAR_PRI_PORT       |
3258                                           MLX5_QP_OPTPAR_LAG_TX_AFF,
3259                         [MLX5_QP_ST_UC] = MLX5_QP_OPTPAR_RWE            |
3260                                           MLX5_QP_OPTPAR_PKEY_INDEX     |
3261                                           MLX5_QP_OPTPAR_PRI_PORT       |
3262                                           MLX5_QP_OPTPAR_LAG_TX_AFF,
3263                         [MLX5_QP_ST_UD] = MLX5_QP_OPTPAR_PKEY_INDEX     |
3264                                           MLX5_QP_OPTPAR_Q_KEY          |
3265                                           MLX5_QP_OPTPAR_PRI_PORT,
3266                         [MLX5_QP_ST_XRC] = MLX5_QP_OPTPAR_RRE           |
3267                                           MLX5_QP_OPTPAR_RAE            |
3268                                           MLX5_QP_OPTPAR_RWE            |
3269                                           MLX5_QP_OPTPAR_PKEY_INDEX     |
3270                                           MLX5_QP_OPTPAR_PRI_PORT       |
3271                                           MLX5_QP_OPTPAR_LAG_TX_AFF,
3272                 },
3273                 [MLX5_QP_STATE_RTR] = {
3274                         [MLX5_QP_ST_RC] = MLX5_QP_OPTPAR_ALT_ADDR_PATH  |
3275                                           MLX5_QP_OPTPAR_RRE            |
3276                                           MLX5_QP_OPTPAR_RAE            |
3277                                           MLX5_QP_OPTPAR_RWE            |
3278                                           MLX5_QP_OPTPAR_PKEY_INDEX     |
3279                                           MLX5_QP_OPTPAR_LAG_TX_AFF,
3280                         [MLX5_QP_ST_UC] = MLX5_QP_OPTPAR_ALT_ADDR_PATH  |
3281                                           MLX5_QP_OPTPAR_RWE            |
3282                                           MLX5_QP_OPTPAR_PKEY_INDEX     |
3283                                           MLX5_QP_OPTPAR_LAG_TX_AFF,
3284                         [MLX5_QP_ST_UD] = MLX5_QP_OPTPAR_PKEY_INDEX     |
3285                                           MLX5_QP_OPTPAR_Q_KEY,
3286                         [MLX5_QP_ST_MLX] = MLX5_QP_OPTPAR_PKEY_INDEX    |
3287                                            MLX5_QP_OPTPAR_Q_KEY,
3288                         [MLX5_QP_ST_XRC] = MLX5_QP_OPTPAR_ALT_ADDR_PATH |
3289                                           MLX5_QP_OPTPAR_RRE            |
3290                                           MLX5_QP_OPTPAR_RAE            |
3291                                           MLX5_QP_OPTPAR_RWE            |
3292                                           MLX5_QP_OPTPAR_PKEY_INDEX     |
3293                                           MLX5_QP_OPTPAR_LAG_TX_AFF,
3294                 },
3295         },
3296         [MLX5_QP_STATE_RTR] = {
3297                 [MLX5_QP_STATE_RTS] = {
3298                         [MLX5_QP_ST_RC] = MLX5_QP_OPTPAR_ALT_ADDR_PATH  |
3299                                           MLX5_QP_OPTPAR_RRE            |
3300                                           MLX5_QP_OPTPAR_RAE            |
3301                                           MLX5_QP_OPTPAR_RWE            |
3302                                           MLX5_QP_OPTPAR_PM_STATE       |
3303                                           MLX5_QP_OPTPAR_RNR_TIMEOUT,
3304                         [MLX5_QP_ST_UC] = MLX5_QP_OPTPAR_ALT_ADDR_PATH  |
3305                                           MLX5_QP_OPTPAR_RWE            |
3306                                           MLX5_QP_OPTPAR_PM_STATE,
3307                         [MLX5_QP_ST_UD] = MLX5_QP_OPTPAR_Q_KEY,
3308                         [MLX5_QP_ST_XRC] = MLX5_QP_OPTPAR_ALT_ADDR_PATH |
3309                                           MLX5_QP_OPTPAR_RRE            |
3310                                           MLX5_QP_OPTPAR_RAE            |
3311                                           MLX5_QP_OPTPAR_RWE            |
3312                                           MLX5_QP_OPTPAR_PM_STATE       |
3313                                           MLX5_QP_OPTPAR_RNR_TIMEOUT,
3314                 },
3315         },
3316         [MLX5_QP_STATE_RTS] = {
3317                 [MLX5_QP_STATE_RTS] = {
3318                         [MLX5_QP_ST_RC] = MLX5_QP_OPTPAR_RRE            |
3319                                           MLX5_QP_OPTPAR_RAE            |
3320                                           MLX5_QP_OPTPAR_RWE            |
3321                                           MLX5_QP_OPTPAR_RNR_TIMEOUT    |
3322                                           MLX5_QP_OPTPAR_PM_STATE       |
3323                                           MLX5_QP_OPTPAR_ALT_ADDR_PATH,
3324                         [MLX5_QP_ST_UC] = MLX5_QP_OPTPAR_RWE            |
3325                                           MLX5_QP_OPTPAR_PM_STATE       |
3326                                           MLX5_QP_OPTPAR_ALT_ADDR_PATH,
3327                         [MLX5_QP_ST_UD] = MLX5_QP_OPTPAR_Q_KEY          |
3328                                           MLX5_QP_OPTPAR_SRQN           |
3329                                           MLX5_QP_OPTPAR_CQN_RCV,
3330                         [MLX5_QP_ST_XRC] = MLX5_QP_OPTPAR_RRE           |
3331                                           MLX5_QP_OPTPAR_RAE            |
3332                                           MLX5_QP_OPTPAR_RWE            |
3333                                           MLX5_QP_OPTPAR_RNR_TIMEOUT    |
3334                                           MLX5_QP_OPTPAR_PM_STATE       |
3335                                           MLX5_QP_OPTPAR_ALT_ADDR_PATH,
3336                 },
3337         },
3338         [MLX5_QP_STATE_SQER] = {
3339                 [MLX5_QP_STATE_RTS] = {
3340                         [MLX5_QP_ST_UD]  = MLX5_QP_OPTPAR_Q_KEY,
3341                         [MLX5_QP_ST_MLX] = MLX5_QP_OPTPAR_Q_KEY,
3342                         [MLX5_QP_ST_UC]  = MLX5_QP_OPTPAR_RWE,
3343                         [MLX5_QP_ST_RC]  = MLX5_QP_OPTPAR_RNR_TIMEOUT   |
3344                                            MLX5_QP_OPTPAR_RWE           |
3345                                            MLX5_QP_OPTPAR_RAE           |
3346                                            MLX5_QP_OPTPAR_RRE,
3347                         [MLX5_QP_ST_XRC]  = MLX5_QP_OPTPAR_RNR_TIMEOUT  |
3348                                            MLX5_QP_OPTPAR_RWE           |
3349                                            MLX5_QP_OPTPAR_RAE           |
3350                                            MLX5_QP_OPTPAR_RRE,
3351                 },
3352         },
3353 };
3354
3355 static int ib_nr_to_mlx5_nr(int ib_mask)
3356 {
3357         switch (ib_mask) {
3358         case IB_QP_STATE:
3359                 return 0;
3360         case IB_QP_CUR_STATE:
3361                 return 0;
3362         case IB_QP_EN_SQD_ASYNC_NOTIFY:
3363                 return 0;
3364         case IB_QP_ACCESS_FLAGS:
3365                 return MLX5_QP_OPTPAR_RWE | MLX5_QP_OPTPAR_RRE |
3366                         MLX5_QP_OPTPAR_RAE;
3367         case IB_QP_PKEY_INDEX:
3368                 return MLX5_QP_OPTPAR_PKEY_INDEX;
3369         case IB_QP_PORT:
3370                 return MLX5_QP_OPTPAR_PRI_PORT;
3371         case IB_QP_QKEY:
3372                 return MLX5_QP_OPTPAR_Q_KEY;
3373         case IB_QP_AV:
3374                 return MLX5_QP_OPTPAR_PRIMARY_ADDR_PATH |
3375                         MLX5_QP_OPTPAR_PRI_PORT;
3376         case IB_QP_PATH_MTU:
3377                 return 0;
3378         case IB_QP_TIMEOUT:
3379                 return MLX5_QP_OPTPAR_ACK_TIMEOUT;
3380         case IB_QP_RETRY_CNT:
3381                 return MLX5_QP_OPTPAR_RETRY_COUNT;
3382         case IB_QP_RNR_RETRY:
3383                 return MLX5_QP_OPTPAR_RNR_RETRY;
3384         case IB_QP_RQ_PSN:
3385                 return 0;
3386         case IB_QP_MAX_QP_RD_ATOMIC:
3387                 return MLX5_QP_OPTPAR_SRA_MAX;
3388         case IB_QP_ALT_PATH:
3389                 return MLX5_QP_OPTPAR_ALT_ADDR_PATH;
3390         case IB_QP_MIN_RNR_TIMER:
3391                 return MLX5_QP_OPTPAR_RNR_TIMEOUT;
3392         case IB_QP_SQ_PSN:
3393                 return 0;
3394         case IB_QP_MAX_DEST_RD_ATOMIC:
3395                 return MLX5_QP_OPTPAR_RRA_MAX | MLX5_QP_OPTPAR_RWE |
3396                         MLX5_QP_OPTPAR_RRE | MLX5_QP_OPTPAR_RAE;
3397         case IB_QP_PATH_MIG_STATE:
3398                 return MLX5_QP_OPTPAR_PM_STATE;
3399         case IB_QP_CAP:
3400                 return 0;
3401         case IB_QP_DEST_QPN:
3402                 return 0;
3403         }
3404         return 0;
3405 }
3406
3407 static int ib_mask_to_mlx5_opt(int ib_mask)
3408 {
3409         int result = 0;
3410         int i;
3411
3412         for (i = 0; i < 8 * sizeof(int); i++) {
3413                 if ((1 << i) & ib_mask)
3414                         result |= ib_nr_to_mlx5_nr(1 << i);
3415         }
3416
3417         return result;
3418 }
3419
3420 static int modify_raw_packet_qp_rq(
3421         struct mlx5_ib_dev *dev, struct mlx5_ib_rq *rq, int new_state,
3422         const struct mlx5_modify_raw_qp_param *raw_qp_param, struct ib_pd *pd)
3423 {
3424         void *in;
3425         void *rqc;
3426         int inlen;
3427         int err;
3428
3429         inlen = MLX5_ST_SZ_BYTES(modify_rq_in);
3430         in = kvzalloc(inlen, GFP_KERNEL);
3431         if (!in)
3432                 return -ENOMEM;
3433
3434         MLX5_SET(modify_rq_in, in, rq_state, rq->state);
3435         MLX5_SET(modify_rq_in, in, uid, to_mpd(pd)->uid);
3436
3437         rqc = MLX5_ADDR_OF(modify_rq_in, in, ctx);
3438         MLX5_SET(rqc, rqc, state, new_state);
3439
3440         if (raw_qp_param->set_mask & MLX5_RAW_QP_MOD_SET_RQ_Q_CTR_ID) {
3441                 if (MLX5_CAP_GEN(dev->mdev, modify_rq_counter_set_id)) {
3442                         MLX5_SET64(modify_rq_in, in, modify_bitmask,
3443                                    MLX5_MODIFY_RQ_IN_MODIFY_BITMASK_RQ_COUNTER_SET_ID);
3444                         MLX5_SET(rqc, rqc, counter_set_id, raw_qp_param->rq_q_ctr_id);
3445                 } else
3446                         dev_info_once(
3447                                 &dev->ib_dev.dev,
3448                                 "RAW PACKET QP counters are not supported on current FW\n");
3449         }
3450
3451         err = mlx5_core_modify_rq(dev->mdev, rq->base.mqp.qpn, in);
3452         if (err)
3453                 goto out;
3454
3455         rq->state = new_state;
3456
3457 out:
3458         kvfree(in);
3459         return err;
3460 }
3461
3462 static int modify_raw_packet_qp_sq(
3463         struct mlx5_core_dev *dev, struct mlx5_ib_sq *sq, int new_state,
3464         const struct mlx5_modify_raw_qp_param *raw_qp_param, struct ib_pd *pd)
3465 {
3466         struct mlx5_ib_qp *ibqp = sq->base.container_mibqp;
3467         struct mlx5_rate_limit old_rl = ibqp->rl;
3468         struct mlx5_rate_limit new_rl = old_rl;
3469         bool new_rate_added = false;
3470         u16 rl_index = 0;
3471         void *in;
3472         void *sqc;
3473         int inlen;
3474         int err;
3475
3476         inlen = MLX5_ST_SZ_BYTES(modify_sq_in);
3477         in = kvzalloc(inlen, GFP_KERNEL);
3478         if (!in)
3479                 return -ENOMEM;
3480
3481         MLX5_SET(modify_sq_in, in, uid, to_mpd(pd)->uid);
3482         MLX5_SET(modify_sq_in, in, sq_state, sq->state);
3483
3484         sqc = MLX5_ADDR_OF(modify_sq_in, in, ctx);
3485         MLX5_SET(sqc, sqc, state, new_state);
3486
3487         if (raw_qp_param->set_mask & MLX5_RAW_QP_RATE_LIMIT) {
3488                 if (new_state != MLX5_SQC_STATE_RDY)
3489                         pr_warn("%s: Rate limit can only be changed when SQ is moving to RDY\n",
3490                                 __func__);
3491                 else
3492                         new_rl = raw_qp_param->rl;
3493         }
3494
3495         if (!mlx5_rl_are_equal(&old_rl, &new_rl)) {
3496                 if (new_rl.rate) {
3497                         err = mlx5_rl_add_rate(dev, &rl_index, &new_rl);
3498                         if (err) {
3499                                 pr_err("Failed configuring rate limit(err %d): \
3500                                        rate %u, max_burst_sz %u, typical_pkt_sz %u\n",
3501                                        err, new_rl.rate, new_rl.max_burst_sz,
3502                                        new_rl.typical_pkt_sz);
3503
3504                                 goto out;
3505                         }
3506                         new_rate_added = true;
3507                 }
3508
3509                 MLX5_SET64(modify_sq_in, in, modify_bitmask, 1);
3510                 /* index 0 means no limit */
3511                 MLX5_SET(sqc, sqc, packet_pacing_rate_limit_index, rl_index);
3512         }
3513
3514         err = mlx5_core_modify_sq(dev, sq->base.mqp.qpn, in);
3515         if (err) {
3516                 /* Remove new rate from table if failed */
3517                 if (new_rate_added)
3518                         mlx5_rl_remove_rate(dev, &new_rl);
3519                 goto out;
3520         }
3521
3522         /* Only remove the old rate after new rate was set */
3523         if ((old_rl.rate && !mlx5_rl_are_equal(&old_rl, &new_rl)) ||
3524             (new_state != MLX5_SQC_STATE_RDY)) {
3525                 mlx5_rl_remove_rate(dev, &old_rl);
3526                 if (new_state != MLX5_SQC_STATE_RDY)
3527                         memset(&new_rl, 0, sizeof(new_rl));
3528         }
3529
3530         ibqp->rl = new_rl;
3531         sq->state = new_state;
3532
3533 out:
3534         kvfree(in);
3535         return err;
3536 }
3537
3538 static int modify_raw_packet_qp(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp,
3539                                 const struct mlx5_modify_raw_qp_param *raw_qp_param,
3540                                 u8 tx_affinity)
3541 {
3542         struct mlx5_ib_raw_packet_qp *raw_packet_qp = &qp->raw_packet_qp;
3543         struct mlx5_ib_rq *rq = &raw_packet_qp->rq;
3544         struct mlx5_ib_sq *sq = &raw_packet_qp->sq;
3545         int modify_rq = !!qp->rq.wqe_cnt;
3546         int modify_sq = !!qp->sq.wqe_cnt;
3547         int rq_state;
3548         int sq_state;
3549         int err;
3550
3551         switch (raw_qp_param->operation) {
3552         case MLX5_CMD_OP_RST2INIT_QP:
3553                 rq_state = MLX5_RQC_STATE_RDY;
3554                 sq_state = MLX5_SQC_STATE_RDY;
3555                 break;
3556         case MLX5_CMD_OP_2ERR_QP:
3557                 rq_state = MLX5_RQC_STATE_ERR;
3558                 sq_state = MLX5_SQC_STATE_ERR;
3559                 break;
3560         case MLX5_CMD_OP_2RST_QP:
3561                 rq_state = MLX5_RQC_STATE_RST;
3562                 sq_state = MLX5_SQC_STATE_RST;
3563                 break;
3564         case MLX5_CMD_OP_RTR2RTS_QP:
3565         case MLX5_CMD_OP_RTS2RTS_QP:
3566                 if (raw_qp_param->set_mask ==
3567                     MLX5_RAW_QP_RATE_LIMIT) {
3568                         modify_rq = 0;
3569                         sq_state = sq->state;
3570                 } else {
3571                         return raw_qp_param->set_mask ? -EINVAL : 0;
3572                 }
3573                 break;
3574         case MLX5_CMD_OP_INIT2INIT_QP:
3575         case MLX5_CMD_OP_INIT2RTR_QP:
3576                 if (raw_qp_param->set_mask)
3577                         return -EINVAL;
3578                 else
3579                         return 0;
3580         default:
3581                 WARN_ON(1);
3582                 return -EINVAL;
3583         }
3584
3585         if (modify_rq) {
3586                 err =  modify_raw_packet_qp_rq(dev, rq, rq_state, raw_qp_param,
3587                                                qp->ibqp.pd);
3588                 if (err)
3589                         return err;
3590         }
3591
3592         if (modify_sq) {
3593                 struct mlx5_flow_handle *flow_rule;
3594
3595                 if (tx_affinity) {
3596                         err = modify_raw_packet_tx_affinity(dev->mdev, sq,
3597                                                             tx_affinity,
3598                                                             qp->ibqp.pd);
3599                         if (err)
3600                                 return err;
3601                 }
3602
3603                 flow_rule = create_flow_rule_vport_sq(dev, sq,
3604                                                       raw_qp_param->port);
3605                 if (IS_ERR(flow_rule))
3606                         return PTR_ERR(flow_rule);
3607
3608                 err = modify_raw_packet_qp_sq(dev->mdev, sq, sq_state,
3609                                               raw_qp_param, qp->ibqp.pd);
3610                 if (err) {
3611                         if (flow_rule)
3612                                 mlx5_del_flow_rules(flow_rule);
3613                         return err;
3614                 }
3615
3616                 if (flow_rule) {
3617                         destroy_flow_rule_vport_sq(sq);
3618                         sq->flow_rule = flow_rule;
3619                 }
3620
3621                 return err;
3622         }
3623
3624         return 0;
3625 }
3626
3627 static unsigned int get_tx_affinity_rr(struct mlx5_ib_dev *dev,
3628                                        struct ib_udata *udata)
3629 {
3630         struct mlx5_ib_ucontext *ucontext = rdma_udata_to_drv_context(
3631                 udata, struct mlx5_ib_ucontext, ibucontext);
3632         u8 port_num = mlx5_core_native_port_num(dev->mdev) - 1;
3633         atomic_t *tx_port_affinity;
3634
3635         if (ucontext)
3636                 tx_port_affinity = &ucontext->tx_port_affinity;
3637         else
3638                 tx_port_affinity = &dev->port[port_num].roce.tx_port_affinity;
3639
3640         return (unsigned int)atomic_add_return(1, tx_port_affinity) %
3641                 MLX5_MAX_PORTS + 1;
3642 }
3643
3644 static bool qp_supports_affinity(struct ib_qp *qp)
3645 {
3646         if ((qp->qp_type == IB_QPT_RC) ||
3647             (qp->qp_type == IB_QPT_UD) ||
3648             (qp->qp_type == IB_QPT_UC) ||
3649             (qp->qp_type == IB_QPT_RAW_PACKET) ||
3650             (qp->qp_type == IB_QPT_XRC_INI) ||
3651             (qp->qp_type == IB_QPT_XRC_TGT))
3652                 return true;
3653         return false;
3654 }
3655
3656 static unsigned int get_tx_affinity(struct ib_qp *qp,
3657                                     const struct ib_qp_attr *attr,
3658                                     int attr_mask, u8 init,
3659                                     struct ib_udata *udata)
3660 {
3661         struct mlx5_ib_ucontext *ucontext = rdma_udata_to_drv_context(
3662                 udata, struct mlx5_ib_ucontext, ibucontext);
3663         struct mlx5_ib_dev *dev = to_mdev(qp->device);
3664         struct mlx5_ib_qp *mqp = to_mqp(qp);
3665         struct mlx5_ib_qp_base *qp_base;
3666         unsigned int tx_affinity;
3667
3668         if (!(mlx5_ib_lag_should_assign_affinity(dev) &&
3669               qp_supports_affinity(qp)))
3670                 return 0;
3671
3672         if (mqp->flags & MLX5_IB_QP_CREATE_SQPN_QP1)
3673                 tx_affinity = mqp->gsi_lag_port;
3674         else if (init)
3675                 tx_affinity = get_tx_affinity_rr(dev, udata);
3676         else if ((attr_mask & IB_QP_AV) && attr->xmit_slave)
3677                 tx_affinity =
3678                         mlx5_lag_get_slave_port(dev->mdev, attr->xmit_slave);
3679         else
3680                 return 0;
3681
3682         qp_base = &mqp->trans_qp.base;
3683         if (ucontext)
3684                 mlx5_ib_dbg(dev, "Set tx affinity 0x%x to qpn 0x%x ucontext %p\n",
3685                             tx_affinity, qp_base->mqp.qpn, ucontext);
3686         else
3687                 mlx5_ib_dbg(dev, "Set tx affinity 0x%x to qpn 0x%x\n",
3688                             tx_affinity, qp_base->mqp.qpn);
3689         return tx_affinity;
3690 }
3691
3692 static int __mlx5_ib_qp_set_counter(struct ib_qp *qp,
3693                                     struct rdma_counter *counter)
3694 {
3695         struct mlx5_ib_dev *dev = to_mdev(qp->device);
3696         u32 in[MLX5_ST_SZ_DW(rts2rts_qp_in)] = {};
3697         struct mlx5_ib_qp *mqp = to_mqp(qp);
3698         struct mlx5_ib_qp_base *base;
3699         u32 set_id;
3700         u32 *qpc;
3701
3702         if (counter)
3703                 set_id = counter->id;
3704         else
3705                 set_id = mlx5_ib_get_counters_id(dev, mqp->port - 1);
3706
3707         base = &mqp->trans_qp.base;
3708         MLX5_SET(rts2rts_qp_in, in, opcode, MLX5_CMD_OP_RTS2RTS_QP);
3709         MLX5_SET(rts2rts_qp_in, in, qpn, base->mqp.qpn);
3710         MLX5_SET(rts2rts_qp_in, in, uid, base->mqp.uid);
3711         MLX5_SET(rts2rts_qp_in, in, opt_param_mask,
3712                  MLX5_QP_OPTPAR_COUNTER_SET_ID);
3713
3714         qpc = MLX5_ADDR_OF(rts2rts_qp_in, in, qpc);
3715         MLX5_SET(qpc, qpc, counter_set_id, set_id);
3716         return mlx5_cmd_exec_in(dev->mdev, rts2rts_qp, in);
3717 }
3718
3719 static int __mlx5_ib_modify_qp(struct ib_qp *ibqp,
3720                                const struct ib_qp_attr *attr, int attr_mask,
3721                                enum ib_qp_state cur_state,
3722                                enum ib_qp_state new_state,
3723                                const struct mlx5_ib_modify_qp *ucmd,
3724                                struct mlx5_ib_modify_qp_resp *resp,
3725                                struct ib_udata *udata)
3726 {
3727         static const u16 optab[MLX5_QP_NUM_STATE][MLX5_QP_NUM_STATE] = {
3728                 [MLX5_QP_STATE_RST] = {
3729                         [MLX5_QP_STATE_RST]     = MLX5_CMD_OP_2RST_QP,
3730                         [MLX5_QP_STATE_ERR]     = MLX5_CMD_OP_2ERR_QP,
3731                         [MLX5_QP_STATE_INIT]    = MLX5_CMD_OP_RST2INIT_QP,
3732                 },
3733                 [MLX5_QP_STATE_INIT]  = {
3734                         [MLX5_QP_STATE_RST]     = MLX5_CMD_OP_2RST_QP,
3735                         [MLX5_QP_STATE_ERR]     = MLX5_CMD_OP_2ERR_QP,
3736                         [MLX5_QP_STATE_INIT]    = MLX5_CMD_OP_INIT2INIT_QP,
3737                         [MLX5_QP_STATE_RTR]     = MLX5_CMD_OP_INIT2RTR_QP,
3738                 },
3739                 [MLX5_QP_STATE_RTR]   = {
3740                         [MLX5_QP_STATE_RST]     = MLX5_CMD_OP_2RST_QP,
3741                         [MLX5_QP_STATE_ERR]     = MLX5_CMD_OP_2ERR_QP,
3742                         [MLX5_QP_STATE_RTS]     = MLX5_CMD_OP_RTR2RTS_QP,
3743                 },
3744                 [MLX5_QP_STATE_RTS]   = {
3745                         [MLX5_QP_STATE_RST]     = MLX5_CMD_OP_2RST_QP,
3746                         [MLX5_QP_STATE_ERR]     = MLX5_CMD_OP_2ERR_QP,
3747                         [MLX5_QP_STATE_RTS]     = MLX5_CMD_OP_RTS2RTS_QP,
3748                 },
3749                 [MLX5_QP_STATE_SQD] = {
3750                         [MLX5_QP_STATE_RST]     = MLX5_CMD_OP_2RST_QP,
3751                         [MLX5_QP_STATE_ERR]     = MLX5_CMD_OP_2ERR_QP,
3752                 },
3753                 [MLX5_QP_STATE_SQER] = {
3754                         [MLX5_QP_STATE_RST]     = MLX5_CMD_OP_2RST_QP,
3755                         [MLX5_QP_STATE_ERR]     = MLX5_CMD_OP_2ERR_QP,
3756                         [MLX5_QP_STATE_RTS]     = MLX5_CMD_OP_SQERR2RTS_QP,
3757                 },
3758                 [MLX5_QP_STATE_ERR] = {
3759                         [MLX5_QP_STATE_RST]     = MLX5_CMD_OP_2RST_QP,
3760                         [MLX5_QP_STATE_ERR]     = MLX5_CMD_OP_2ERR_QP,
3761                 }
3762         };
3763
3764         struct mlx5_ib_dev *dev = to_mdev(ibqp->device);
3765         struct mlx5_ib_qp *qp = to_mqp(ibqp);
3766         struct mlx5_ib_qp_base *base = &qp->trans_qp.base;
3767         struct mlx5_ib_cq *send_cq, *recv_cq;
3768         struct mlx5_ib_pd *pd;
3769         enum mlx5_qp_state mlx5_cur, mlx5_new;
3770         void *qpc, *pri_path, *alt_path;
3771         enum mlx5_qp_optpar optpar = 0;
3772         u32 set_id = 0;
3773         int mlx5_st;
3774         int err;
3775         u16 op;
3776         u8 tx_affinity = 0;
3777
3778         mlx5_st = to_mlx5_st(qp->type);
3779         if (mlx5_st < 0)
3780                 return -EINVAL;
3781
3782         qpc = kzalloc(MLX5_ST_SZ_BYTES(qpc), GFP_KERNEL);
3783         if (!qpc)
3784                 return -ENOMEM;
3785
3786         pd = to_mpd(qp->ibqp.pd);
3787         MLX5_SET(qpc, qpc, st, mlx5_st);
3788
3789         if (!(attr_mask & IB_QP_PATH_MIG_STATE)) {
3790                 MLX5_SET(qpc, qpc, pm_state, MLX5_QP_PM_MIGRATED);
3791         } else {
3792                 switch (attr->path_mig_state) {
3793                 case IB_MIG_MIGRATED:
3794                         MLX5_SET(qpc, qpc, pm_state, MLX5_QP_PM_MIGRATED);
3795                         break;
3796                 case IB_MIG_REARM:
3797                         MLX5_SET(qpc, qpc, pm_state, MLX5_QP_PM_REARM);
3798                         break;
3799                 case IB_MIG_ARMED:
3800                         MLX5_SET(qpc, qpc, pm_state, MLX5_QP_PM_ARMED);
3801                         break;
3802                 }
3803         }
3804
3805         tx_affinity = get_tx_affinity(ibqp, attr, attr_mask,
3806                                       cur_state == IB_QPS_RESET &&
3807                                       new_state == IB_QPS_INIT, udata);
3808
3809         MLX5_SET(qpc, qpc, lag_tx_port_affinity, tx_affinity);
3810         if (tx_affinity && new_state == IB_QPS_RTR &&
3811             MLX5_CAP_GEN(dev->mdev, init2_lag_tx_port_affinity))
3812                 optpar |= MLX5_QP_OPTPAR_LAG_TX_AFF;
3813
3814         if (is_sqp(ibqp->qp_type)) {
3815                 MLX5_SET(qpc, qpc, mtu, IB_MTU_256);
3816                 MLX5_SET(qpc, qpc, log_msg_max, 8);
3817         } else if ((ibqp->qp_type == IB_QPT_UD &&
3818                     !(qp->flags & IB_QP_CREATE_SOURCE_QPN)) ||
3819                    ibqp->qp_type == MLX5_IB_QPT_REG_UMR) {
3820                 MLX5_SET(qpc, qpc, mtu, IB_MTU_4096);
3821                 MLX5_SET(qpc, qpc, log_msg_max, 12);
3822         } else if (attr_mask & IB_QP_PATH_MTU) {
3823                 if (attr->path_mtu < IB_MTU_256 ||
3824                     attr->path_mtu > IB_MTU_4096) {
3825                         mlx5_ib_warn(dev, "invalid mtu %d\n", attr->path_mtu);
3826                         err = -EINVAL;
3827                         goto out;
3828                 }
3829                 MLX5_SET(qpc, qpc, mtu, attr->path_mtu);
3830                 MLX5_SET(qpc, qpc, log_msg_max,
3831                          MLX5_CAP_GEN(dev->mdev, log_max_msg));
3832         }
3833
3834         if (attr_mask & IB_QP_DEST_QPN)
3835                 MLX5_SET(qpc, qpc, remote_qpn, attr->dest_qp_num);
3836
3837         pri_path = MLX5_ADDR_OF(qpc, qpc, primary_address_path);
3838         alt_path = MLX5_ADDR_OF(qpc, qpc, secondary_address_path);
3839
3840         if (attr_mask & IB_QP_PKEY_INDEX)
3841                 MLX5_SET(ads, pri_path, pkey_index, attr->pkey_index);
3842
3843         /* todo implement counter_index functionality */
3844
3845         if (is_sqp(ibqp->qp_type))
3846                 MLX5_SET(ads, pri_path, vhca_port_num, qp->port);
3847
3848         if (attr_mask & IB_QP_PORT)
3849                 MLX5_SET(ads, pri_path, vhca_port_num, attr->port_num);
3850
3851         if (attr_mask & IB_QP_AV) {
3852                 err = mlx5_set_path(dev, qp, &attr->ah_attr, pri_path,
3853                                     attr_mask & IB_QP_PORT ? attr->port_num :
3854                                                              qp->port,
3855                                     attr_mask, 0, attr, false);
3856                 if (err)
3857                         goto out;
3858         }
3859
3860         if (attr_mask & IB_QP_TIMEOUT)
3861                 MLX5_SET(ads, pri_path, ack_timeout, attr->timeout);
3862
3863         if (attr_mask & IB_QP_ALT_PATH) {
3864                 err = mlx5_set_path(dev, qp, &attr->alt_ah_attr, alt_path,
3865                                     attr->alt_port_num,
3866                                     attr_mask | IB_QP_PKEY_INDEX |
3867                                             IB_QP_TIMEOUT,
3868                                     0, attr, true);
3869                 if (err)
3870                         goto out;
3871         }
3872
3873         get_cqs(qp->ibqp.qp_type, qp->ibqp.send_cq, qp->ibqp.recv_cq,
3874                 &send_cq, &recv_cq);
3875
3876         MLX5_SET(qpc, qpc, pd, pd ? pd->pdn : to_mpd(dev->devr.p0)->pdn);
3877         if (send_cq)
3878                 MLX5_SET(qpc, qpc, cqn_snd, send_cq->mcq.cqn);
3879         if (recv_cq)
3880                 MLX5_SET(qpc, qpc, cqn_rcv, recv_cq->mcq.cqn);
3881
3882         MLX5_SET(qpc, qpc, log_ack_req_freq, MLX5_IB_ACK_REQ_FREQ);
3883
3884         if (attr_mask & IB_QP_RNR_RETRY)
3885                 MLX5_SET(qpc, qpc, rnr_retry, attr->rnr_retry);
3886
3887         if (attr_mask & IB_QP_RETRY_CNT)
3888                 MLX5_SET(qpc, qpc, retry_count, attr->retry_cnt);
3889
3890         if (attr_mask & IB_QP_MAX_QP_RD_ATOMIC && attr->max_rd_atomic)
3891                 MLX5_SET(qpc, qpc, log_sra_max, ilog2(attr->max_rd_atomic));
3892
3893         if (attr_mask & IB_QP_SQ_PSN)
3894                 MLX5_SET(qpc, qpc, next_send_psn, attr->sq_psn);
3895
3896         if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC && attr->max_dest_rd_atomic)
3897                 MLX5_SET(qpc, qpc, log_rra_max,
3898                          ilog2(attr->max_dest_rd_atomic));
3899
3900         if (attr_mask & (IB_QP_ACCESS_FLAGS | IB_QP_MAX_DEST_RD_ATOMIC)) {
3901                 err = set_qpc_atomic_flags(qp, attr, attr_mask, qpc);
3902                 if (err)
3903                         goto out;
3904         }
3905
3906         if (attr_mask & IB_QP_MIN_RNR_TIMER)
3907                 MLX5_SET(qpc, qpc, min_rnr_nak, attr->min_rnr_timer);
3908
3909         if (attr_mask & IB_QP_RQ_PSN)
3910                 MLX5_SET(qpc, qpc, next_rcv_psn, attr->rq_psn);
3911
3912         if (attr_mask & IB_QP_QKEY)
3913                 MLX5_SET(qpc, qpc, q_key, attr->qkey);
3914
3915         if (qp->rq.wqe_cnt && cur_state == IB_QPS_RESET && new_state == IB_QPS_INIT)
3916                 MLX5_SET64(qpc, qpc, dbr_addr, qp->db.dma);
3917
3918         if (cur_state == IB_QPS_RESET && new_state == IB_QPS_INIT) {
3919                 u8 port_num = (attr_mask & IB_QP_PORT ? attr->port_num :
3920                                qp->port) - 1;
3921
3922                 /* Underlay port should be used - index 0 function per port */
3923                 if (qp->flags & IB_QP_CREATE_SOURCE_QPN)
3924                         port_num = 0;
3925
3926                 if (ibqp->counter)
3927                         set_id = ibqp->counter->id;
3928                 else
3929                         set_id = mlx5_ib_get_counters_id(dev, port_num);
3930                 MLX5_SET(qpc, qpc, counter_set_id, set_id);
3931         }
3932
3933         if (!ibqp->uobject && cur_state == IB_QPS_RESET && new_state == IB_QPS_INIT)
3934                 MLX5_SET(qpc, qpc, rlky, 1);
3935
3936         if (qp->flags & MLX5_IB_QP_CREATE_SQPN_QP1)
3937                 MLX5_SET(qpc, qpc, deth_sqpn, 1);
3938
3939         mlx5_cur = to_mlx5_state(cur_state);
3940         mlx5_new = to_mlx5_state(new_state);
3941
3942         if (mlx5_cur >= MLX5_QP_NUM_STATE || mlx5_new >= MLX5_QP_NUM_STATE ||
3943             !optab[mlx5_cur][mlx5_new]) {
3944                 err = -EINVAL;
3945                 goto out;
3946         }
3947
3948         op = optab[mlx5_cur][mlx5_new];
3949         optpar |= ib_mask_to_mlx5_opt(attr_mask);
3950         optpar &= opt_mask[mlx5_cur][mlx5_new][mlx5_st];
3951
3952         if (qp->ibqp.qp_type == IB_QPT_RAW_PACKET ||
3953             qp->flags & IB_QP_CREATE_SOURCE_QPN) {
3954                 struct mlx5_modify_raw_qp_param raw_qp_param = {};
3955
3956                 raw_qp_param.operation = op;
3957                 if (cur_state == IB_QPS_RESET && new_state == IB_QPS_INIT) {
3958                         raw_qp_param.rq_q_ctr_id = set_id;
3959                         raw_qp_param.set_mask |= MLX5_RAW_QP_MOD_SET_RQ_Q_CTR_ID;
3960                 }
3961
3962                 if (attr_mask & IB_QP_PORT)
3963                         raw_qp_param.port = attr->port_num;
3964
3965                 if (attr_mask & IB_QP_RATE_LIMIT) {
3966                         raw_qp_param.rl.rate = attr->rate_limit;
3967
3968                         if (ucmd->burst_info.max_burst_sz) {
3969                                 if (attr->rate_limit &&
3970                                     MLX5_CAP_QOS(dev->mdev, packet_pacing_burst_bound)) {
3971                                         raw_qp_param.rl.max_burst_sz =
3972                                                 ucmd->burst_info.max_burst_sz;
3973                                 } else {
3974                                         err = -EINVAL;
3975                                         goto out;
3976                                 }
3977                         }
3978
3979                         if (ucmd->burst_info.typical_pkt_sz) {
3980                                 if (attr->rate_limit &&
3981                                     MLX5_CAP_QOS(dev->mdev, packet_pacing_typical_size)) {
3982                                         raw_qp_param.rl.typical_pkt_sz =
3983                                                 ucmd->burst_info.typical_pkt_sz;
3984                                 } else {
3985                                         err = -EINVAL;
3986                                         goto out;
3987                                 }
3988                         }
3989
3990                         raw_qp_param.set_mask |= MLX5_RAW_QP_RATE_LIMIT;
3991                 }
3992
3993                 err = modify_raw_packet_qp(dev, qp, &raw_qp_param, tx_affinity);
3994         } else {
3995                 if (udata) {
3996                         /* For the kernel flows, the resp will stay zero */
3997                         resp->ece_options =
3998                                 MLX5_CAP_GEN(dev->mdev, ece_support) ?
3999                                         ucmd->ece_options : 0;
4000                         resp->response_length = sizeof(*resp);
4001                 }
4002                 err = mlx5_core_qp_modify(dev, op, optpar, qpc, &base->mqp,
4003                                           &resp->ece_options);
4004         }
4005
4006         if (err)
4007                 goto out;
4008
4009         qp->state = new_state;
4010
4011         if (attr_mask & IB_QP_ACCESS_FLAGS)
4012                 qp->trans_qp.atomic_rd_en = attr->qp_access_flags;
4013         if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC)
4014                 qp->trans_qp.resp_depth = attr->max_dest_rd_atomic;
4015         if (attr_mask & IB_QP_PORT)
4016                 qp->port = attr->port_num;
4017         if (attr_mask & IB_QP_ALT_PATH)
4018                 qp->trans_qp.alt_port = attr->alt_port_num;
4019
4020         /*
4021          * If we moved a kernel QP to RESET, clean up all old CQ
4022          * entries and reinitialize the QP.
4023          */
4024         if (new_state == IB_QPS_RESET &&
4025             !ibqp->uobject && ibqp->qp_type != IB_QPT_XRC_TGT) {
4026                 mlx5_ib_cq_clean(recv_cq, base->mqp.qpn,
4027                                  ibqp->srq ? to_msrq(ibqp->srq) : NULL);
4028                 if (send_cq != recv_cq)
4029                         mlx5_ib_cq_clean(send_cq, base->mqp.qpn, NULL);
4030
4031                 qp->rq.head = 0;
4032                 qp->rq.tail = 0;
4033                 qp->sq.head = 0;
4034                 qp->sq.tail = 0;
4035                 qp->sq.cur_post = 0;
4036                 if (qp->sq.wqe_cnt)
4037                         qp->sq.cur_edge = get_sq_edge(&qp->sq, 0);
4038                 qp->sq.last_poll = 0;
4039                 qp->db.db[MLX5_RCV_DBR] = 0;
4040                 qp->db.db[MLX5_SND_DBR] = 0;
4041         }
4042
4043         if ((new_state == IB_QPS_RTS) && qp->counter_pending) {
4044                 err = __mlx5_ib_qp_set_counter(ibqp, ibqp->counter);
4045                 if (!err)
4046                         qp->counter_pending = 0;
4047         }
4048
4049 out:
4050         kfree(qpc);
4051         return err;
4052 }
4053
4054 static inline bool is_valid_mask(int mask, int req, int opt)
4055 {
4056         if ((mask & req) != req)
4057                 return false;
4058
4059         if (mask & ~(req | opt))
4060                 return false;
4061
4062         return true;
4063 }
4064
4065 /* check valid transition for driver QP types
4066  * for now the only QP type that this function supports is DCI
4067  */
4068 static bool modify_dci_qp_is_ok(enum ib_qp_state cur_state, enum ib_qp_state new_state,
4069                                 enum ib_qp_attr_mask attr_mask)
4070 {
4071         int req = IB_QP_STATE;
4072         int opt = 0;
4073
4074         if (new_state == IB_QPS_RESET) {
4075                 return is_valid_mask(attr_mask, req, opt);
4076         } else if (cur_state == IB_QPS_RESET && new_state == IB_QPS_INIT) {
4077                 req |= IB_QP_PKEY_INDEX | IB_QP_PORT;
4078                 return is_valid_mask(attr_mask, req, opt);
4079         } else if (cur_state == IB_QPS_INIT && new_state == IB_QPS_INIT) {
4080                 opt = IB_QP_PKEY_INDEX | IB_QP_PORT;
4081                 return is_valid_mask(attr_mask, req, opt);
4082         } else if (cur_state == IB_QPS_INIT && new_state == IB_QPS_RTR) {
4083                 req |= IB_QP_PATH_MTU;
4084                 opt = IB_QP_PKEY_INDEX | IB_QP_AV;
4085                 return is_valid_mask(attr_mask, req, opt);
4086         } else if (cur_state == IB_QPS_RTR && new_state == IB_QPS_RTS) {
4087                 req |= IB_QP_TIMEOUT | IB_QP_RETRY_CNT | IB_QP_RNR_RETRY |
4088                        IB_QP_MAX_QP_RD_ATOMIC | IB_QP_SQ_PSN;
4089                 opt = IB_QP_MIN_RNR_TIMER;
4090                 return is_valid_mask(attr_mask, req, opt);
4091         } else if (cur_state == IB_QPS_RTS && new_state == IB_QPS_RTS) {
4092                 opt = IB_QP_MIN_RNR_TIMER;
4093                 return is_valid_mask(attr_mask, req, opt);
4094         } else if (cur_state != IB_QPS_RESET && new_state == IB_QPS_ERR) {
4095                 return is_valid_mask(attr_mask, req, opt);
4096         }
4097         return false;
4098 }
4099
4100 /* mlx5_ib_modify_dct: modify a DCT QP
4101  * valid transitions are:
4102  * RESET to INIT: must set access_flags, pkey_index and port
4103  * INIT  to RTR : must set min_rnr_timer, tclass, flow_label,
4104  *                         mtu, gid_index and hop_limit
4105  * Other transitions and attributes are illegal
4106  */
4107 static int mlx5_ib_modify_dct(struct ib_qp *ibqp, struct ib_qp_attr *attr,
4108                               int attr_mask, struct mlx5_ib_modify_qp *ucmd,
4109                               struct ib_udata *udata)
4110 {
4111         struct mlx5_ib_qp *qp = to_mqp(ibqp);
4112         struct mlx5_ib_dev *dev = to_mdev(ibqp->device);
4113         enum ib_qp_state cur_state, new_state;
4114         int err = 0;
4115         int required = IB_QP_STATE;
4116         void *dctc;
4117
4118         if (!(attr_mask & IB_QP_STATE))
4119                 return -EINVAL;
4120
4121         cur_state = qp->state;
4122         new_state = attr->qp_state;
4123
4124         dctc = MLX5_ADDR_OF(create_dct_in, qp->dct.in, dct_context_entry);
4125         if (MLX5_CAP_GEN(dev->mdev, ece_support) && ucmd->ece_options)
4126                 /*
4127                  * DCT doesn't initialize QP till modify command is executed,
4128                  * so we need to overwrite previously set ECE field if user
4129                  * provided any value except zero, which means not set/not
4130                  * valid.
4131                  */
4132                 MLX5_SET(dctc, dctc, ece, ucmd->ece_options);
4133
4134         if (cur_state == IB_QPS_RESET && new_state == IB_QPS_INIT) {
4135                 u16 set_id;
4136
4137                 required |= IB_QP_ACCESS_FLAGS | IB_QP_PKEY_INDEX | IB_QP_PORT;
4138                 if (!is_valid_mask(attr_mask, required, 0))
4139                         return -EINVAL;
4140
4141                 if (attr->port_num == 0 ||
4142                     attr->port_num > MLX5_CAP_GEN(dev->mdev, num_ports)) {
4143                         mlx5_ib_dbg(dev, "invalid port number %d. number of ports is %d\n",
4144                                     attr->port_num, dev->num_ports);
4145                         return -EINVAL;
4146                 }
4147                 if (attr->qp_access_flags & IB_ACCESS_REMOTE_READ)
4148                         MLX5_SET(dctc, dctc, rre, 1);
4149                 if (attr->qp_access_flags & IB_ACCESS_REMOTE_WRITE)
4150                         MLX5_SET(dctc, dctc, rwe, 1);
4151                 if (attr->qp_access_flags & IB_ACCESS_REMOTE_ATOMIC) {
4152                         int atomic_mode;
4153
4154                         atomic_mode = get_atomic_mode(dev, MLX5_IB_QPT_DCT);
4155                         if (atomic_mode < 0)
4156                                 return -EOPNOTSUPP;
4157
4158                         MLX5_SET(dctc, dctc, atomic_mode, atomic_mode);
4159                         MLX5_SET(dctc, dctc, rae, 1);
4160                 }
4161                 MLX5_SET(dctc, dctc, pkey_index, attr->pkey_index);
4162                 MLX5_SET(dctc, dctc, port, attr->port_num);
4163
4164                 set_id = mlx5_ib_get_counters_id(dev, attr->port_num - 1);
4165                 MLX5_SET(dctc, dctc, counter_set_id, set_id);
4166         } else if (cur_state == IB_QPS_INIT && new_state == IB_QPS_RTR) {
4167                 struct mlx5_ib_modify_qp_resp resp = {};
4168                 u32 out[MLX5_ST_SZ_DW(create_dct_out)] = {};
4169                 u32 min_resp_len = offsetofend(typeof(resp), dctn);
4170
4171                 if (udata->outlen < min_resp_len)
4172                         return -EINVAL;
4173                 /*
4174                  * If we don't have enough space for the ECE options,
4175                  * simply indicate it with resp.response_length.
4176                  */
4177                 resp.response_length = (udata->outlen < sizeof(resp)) ?
4178                                                min_resp_len :
4179                                                sizeof(resp);
4180
4181                 required |= IB_QP_MIN_RNR_TIMER | IB_QP_AV | IB_QP_PATH_MTU;
4182                 if (!is_valid_mask(attr_mask, required, 0))
4183                         return -EINVAL;
4184                 MLX5_SET(dctc, dctc, min_rnr_nak, attr->min_rnr_timer);
4185                 MLX5_SET(dctc, dctc, tclass, attr->ah_attr.grh.traffic_class);
4186                 MLX5_SET(dctc, dctc, flow_label, attr->ah_attr.grh.flow_label);
4187                 MLX5_SET(dctc, dctc, mtu, attr->path_mtu);
4188                 MLX5_SET(dctc, dctc, my_addr_index, attr->ah_attr.grh.sgid_index);
4189                 MLX5_SET(dctc, dctc, hop_limit, attr->ah_attr.grh.hop_limit);
4190
4191                 err = mlx5_core_create_dct(dev, &qp->dct.mdct, qp->dct.in,
4192                                            MLX5_ST_SZ_BYTES(create_dct_in), out,
4193                                            sizeof(out));
4194                 if (err)
4195                         return err;
4196                 resp.dctn = qp->dct.mdct.mqp.qpn;
4197                 if (MLX5_CAP_GEN(dev->mdev, ece_support))
4198                         resp.ece_options = MLX5_GET(create_dct_out, out, ece);
4199                 err = ib_copy_to_udata(udata, &resp, resp.response_length);
4200                 if (err) {
4201                         mlx5_core_destroy_dct(dev, &qp->dct.mdct);
4202                         return err;
4203                 }
4204         } else {
4205                 mlx5_ib_warn(dev, "Modify DCT: Invalid transition from %d to %d\n", cur_state, new_state);
4206                 return -EINVAL;
4207         }
4208         if (err)
4209                 qp->state = IB_QPS_ERR;
4210         else
4211                 qp->state = new_state;
4212         return err;
4213 }
4214
4215 int mlx5_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
4216                       int attr_mask, struct ib_udata *udata)
4217 {
4218         struct mlx5_ib_dev *dev = to_mdev(ibqp->device);
4219         struct mlx5_ib_modify_qp_resp resp = {};
4220         struct mlx5_ib_qp *qp = to_mqp(ibqp);
4221         struct mlx5_ib_modify_qp ucmd = {};
4222         enum ib_qp_type qp_type;
4223         enum ib_qp_state cur_state, new_state;
4224         int err = -EINVAL;
4225         int port;
4226
4227         if (ibqp->rwq_ind_tbl)
4228                 return -ENOSYS;
4229
4230         if (udata && udata->inlen) {
4231                 if (udata->inlen < offsetofend(typeof(ucmd), ece_options))
4232                         return -EINVAL;
4233
4234                 if (udata->inlen > sizeof(ucmd) &&
4235                     !ib_is_udata_cleared(udata, sizeof(ucmd),
4236                                          udata->inlen - sizeof(ucmd)))
4237                         return -EOPNOTSUPP;
4238
4239                 if (ib_copy_from_udata(&ucmd, udata,
4240                                        min(udata->inlen, sizeof(ucmd))))
4241                         return -EFAULT;
4242
4243                 if (ucmd.comp_mask ||
4244                     memchr_inv(&ucmd.burst_info.reserved, 0,
4245                                sizeof(ucmd.burst_info.reserved)))
4246                         return -EOPNOTSUPP;
4247
4248         }
4249
4250         if (unlikely(ibqp->qp_type == IB_QPT_GSI))
4251                 return mlx5_ib_gsi_modify_qp(ibqp, attr, attr_mask);
4252
4253         qp_type = (unlikely(ibqp->qp_type == MLX5_IB_QPT_HW_GSI)) ? IB_QPT_GSI :
4254                                                                     qp->type;
4255
4256         if (qp_type == MLX5_IB_QPT_DCT)
4257                 return mlx5_ib_modify_dct(ibqp, attr, attr_mask, &ucmd, udata);
4258
4259         mutex_lock(&qp->mutex);
4260
4261         cur_state = attr_mask & IB_QP_CUR_STATE ? attr->cur_qp_state : qp->state;
4262         new_state = attr_mask & IB_QP_STATE ? attr->qp_state : cur_state;
4263
4264         if (!(cur_state == new_state && cur_state == IB_QPS_RESET)) {
4265                 port = attr_mask & IB_QP_PORT ? attr->port_num : qp->port;
4266         }
4267
4268         if (qp->flags & IB_QP_CREATE_SOURCE_QPN) {
4269                 if (attr_mask & ~(IB_QP_STATE | IB_QP_CUR_STATE)) {
4270                         mlx5_ib_dbg(dev, "invalid attr_mask 0x%x when underlay QP is used\n",
4271                                     attr_mask);
4272                         goto out;
4273                 }
4274         } else if (qp_type != MLX5_IB_QPT_REG_UMR &&
4275                    qp_type != MLX5_IB_QPT_DCI &&
4276                    !ib_modify_qp_is_ok(cur_state, new_state, qp_type,
4277                                        attr_mask)) {
4278                 mlx5_ib_dbg(dev, "invalid QP state transition from %d to %d, qp_type %d, attr_mask 0x%x\n",
4279                             cur_state, new_state, ibqp->qp_type, attr_mask);
4280                 goto out;
4281         } else if (qp_type == MLX5_IB_QPT_DCI &&
4282                    !modify_dci_qp_is_ok(cur_state, new_state, attr_mask)) {
4283                 mlx5_ib_dbg(dev, "invalid QP state transition from %d to %d, qp_type %d, attr_mask 0x%x\n",
4284                             cur_state, new_state, qp_type, attr_mask);
4285                 goto out;
4286         }
4287
4288         if ((attr_mask & IB_QP_PORT) &&
4289             (attr->port_num == 0 ||
4290              attr->port_num > dev->num_ports)) {
4291                 mlx5_ib_dbg(dev, "invalid port number %d. number of ports is %d\n",
4292                             attr->port_num, dev->num_ports);
4293                 goto out;
4294         }
4295
4296         if (attr_mask & IB_QP_PKEY_INDEX) {
4297                 port = attr_mask & IB_QP_PORT ? attr->port_num : qp->port;
4298                 if (attr->pkey_index >=
4299                     dev->mdev->port_caps[port - 1].pkey_table_len) {
4300                         mlx5_ib_dbg(dev, "invalid pkey index %d\n",
4301                                     attr->pkey_index);
4302                         goto out;
4303                 }
4304         }
4305
4306         if (attr_mask & IB_QP_MAX_QP_RD_ATOMIC &&
4307             attr->max_rd_atomic >
4308             (1 << MLX5_CAP_GEN(dev->mdev, log_max_ra_res_qp))) {
4309                 mlx5_ib_dbg(dev, "invalid max_rd_atomic value %d\n",
4310                             attr->max_rd_atomic);
4311                 goto out;
4312         }
4313
4314         if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC &&
4315             attr->max_dest_rd_atomic >
4316             (1 << MLX5_CAP_GEN(dev->mdev, log_max_ra_req_qp))) {
4317                 mlx5_ib_dbg(dev, "invalid max_dest_rd_atomic value %d\n",
4318                             attr->max_dest_rd_atomic);
4319                 goto out;
4320         }
4321
4322         if (cur_state == new_state && cur_state == IB_QPS_RESET) {
4323                 err = 0;
4324                 goto out;
4325         }
4326
4327         err = __mlx5_ib_modify_qp(ibqp, attr, attr_mask, cur_state,
4328                                   new_state, &ucmd, &resp, udata);
4329
4330         /* resp.response_length is set in ECE supported flows only */
4331         if (!err && resp.response_length &&
4332             udata->outlen >= resp.response_length)
4333                 /* Return -EFAULT to the user and expect him to destroy QP. */
4334                 err = ib_copy_to_udata(udata, &resp, resp.response_length);
4335
4336 out:
4337         mutex_unlock(&qp->mutex);
4338         return err;
4339 }
4340
4341 static inline enum ib_qp_state to_ib_qp_state(enum mlx5_qp_state mlx5_state)
4342 {
4343         switch (mlx5_state) {
4344         case MLX5_QP_STATE_RST:      return IB_QPS_RESET;
4345         case MLX5_QP_STATE_INIT:     return IB_QPS_INIT;
4346         case MLX5_QP_STATE_RTR:      return IB_QPS_RTR;
4347         case MLX5_QP_STATE_RTS:      return IB_QPS_RTS;
4348         case MLX5_QP_STATE_SQ_DRAINING:
4349         case MLX5_QP_STATE_SQD:      return IB_QPS_SQD;
4350         case MLX5_QP_STATE_SQER:     return IB_QPS_SQE;
4351         case MLX5_QP_STATE_ERR:      return IB_QPS_ERR;
4352         default:                     return -1;
4353         }
4354 }
4355
4356 static inline enum ib_mig_state to_ib_mig_state(int mlx5_mig_state)
4357 {
4358         switch (mlx5_mig_state) {
4359         case MLX5_QP_PM_ARMED:          return IB_MIG_ARMED;
4360         case MLX5_QP_PM_REARM:          return IB_MIG_REARM;
4361         case MLX5_QP_PM_MIGRATED:       return IB_MIG_MIGRATED;
4362         default: return -1;
4363         }
4364 }
4365
4366 static void to_rdma_ah_attr(struct mlx5_ib_dev *ibdev,
4367                             struct rdma_ah_attr *ah_attr, void *path)
4368 {
4369         int port = MLX5_GET(ads, path, vhca_port_num);
4370         int static_rate;
4371
4372         memset(ah_attr, 0, sizeof(*ah_attr));
4373
4374         if (!port || port > ibdev->num_ports)
4375                 return;
4376
4377         ah_attr->type = rdma_ah_find_type(&ibdev->ib_dev, port);
4378
4379         rdma_ah_set_port_num(ah_attr, port);
4380         rdma_ah_set_sl(ah_attr, MLX5_GET(ads, path, sl));
4381
4382         rdma_ah_set_dlid(ah_attr, MLX5_GET(ads, path, rlid));
4383         rdma_ah_set_path_bits(ah_attr, MLX5_GET(ads, path, mlid));
4384
4385         static_rate = MLX5_GET(ads, path, stat_rate);
4386         rdma_ah_set_static_rate(ah_attr, static_rate ? static_rate - 5 : 0);
4387         if (MLX5_GET(ads, path, grh) ||
4388             ah_attr->type == RDMA_AH_ATTR_TYPE_ROCE) {
4389                 rdma_ah_set_grh(ah_attr, NULL, MLX5_GET(ads, path, flow_label),
4390                                 MLX5_GET(ads, path, src_addr_index),
4391                                 MLX5_GET(ads, path, hop_limit),
4392                                 MLX5_GET(ads, path, tclass));
4393                 rdma_ah_set_dgid_raw(ah_attr, MLX5_ADDR_OF(ads, path, rgid_rip));
4394         }
4395 }
4396
4397 static int query_raw_packet_qp_sq_state(struct mlx5_ib_dev *dev,
4398                                         struct mlx5_ib_sq *sq,
4399                                         u8 *sq_state)
4400 {
4401         int err;
4402
4403         err = mlx5_core_query_sq_state(dev->mdev, sq->base.mqp.qpn, sq_state);
4404         if (err)
4405                 goto out;
4406         sq->state = *sq_state;
4407
4408 out:
4409         return err;
4410 }
4411
4412 static int query_raw_packet_qp_rq_state(struct mlx5_ib_dev *dev,
4413                                         struct mlx5_ib_rq *rq,
4414                                         u8 *rq_state)
4415 {
4416         void *out;
4417         void *rqc;
4418         int inlen;
4419         int err;
4420
4421         inlen = MLX5_ST_SZ_BYTES(query_rq_out);
4422         out = kvzalloc(inlen, GFP_KERNEL);
4423         if (!out)
4424                 return -ENOMEM;
4425
4426         err = mlx5_core_query_rq(dev->mdev, rq->base.mqp.qpn, out);
4427         if (err)
4428                 goto out;
4429
4430         rqc = MLX5_ADDR_OF(query_rq_out, out, rq_context);
4431         *rq_state = MLX5_GET(rqc, rqc, state);
4432         rq->state = *rq_state;
4433
4434 out:
4435         kvfree(out);
4436         return err;
4437 }
4438
4439 static int sqrq_state_to_qp_state(u8 sq_state, u8 rq_state,
4440                                   struct mlx5_ib_qp *qp, u8 *qp_state)
4441 {
4442         static const u8 sqrq_trans[MLX5_RQ_NUM_STATE][MLX5_SQ_NUM_STATE] = {
4443                 [MLX5_RQC_STATE_RST] = {
4444                         [MLX5_SQC_STATE_RST]    = IB_QPS_RESET,
4445                         [MLX5_SQC_STATE_RDY]    = MLX5_QP_STATE_BAD,
4446                         [MLX5_SQC_STATE_ERR]    = MLX5_QP_STATE_BAD,
4447                         [MLX5_SQ_STATE_NA]      = IB_QPS_RESET,
4448                 },
4449                 [MLX5_RQC_STATE_RDY] = {
4450                         [MLX5_SQC_STATE_RST]    = MLX5_QP_STATE_BAD,
4451                         [MLX5_SQC_STATE_RDY]    = MLX5_QP_STATE,
4452                         [MLX5_SQC_STATE_ERR]    = IB_QPS_SQE,
4453                         [MLX5_SQ_STATE_NA]      = MLX5_QP_STATE,
4454                 },
4455                 [MLX5_RQC_STATE_ERR] = {
4456                         [MLX5_SQC_STATE_RST]    = MLX5_QP_STATE_BAD,
4457                         [MLX5_SQC_STATE_RDY]    = MLX5_QP_STATE_BAD,
4458                         [MLX5_SQC_STATE_ERR]    = IB_QPS_ERR,
4459                         [MLX5_SQ_STATE_NA]      = IB_QPS_ERR,
4460                 },
4461                 [MLX5_RQ_STATE_NA] = {
4462                         [MLX5_SQC_STATE_RST]    = IB_QPS_RESET,
4463                         [MLX5_SQC_STATE_RDY]    = MLX5_QP_STATE,
4464                         [MLX5_SQC_STATE_ERR]    = MLX5_QP_STATE,
4465                         [MLX5_SQ_STATE_NA]      = MLX5_QP_STATE_BAD,
4466                 },
4467         };
4468
4469         *qp_state = sqrq_trans[rq_state][sq_state];
4470
4471         if (*qp_state == MLX5_QP_STATE_BAD) {
4472                 WARN(1, "Buggy Raw Packet QP state, SQ 0x%x state: 0x%x, RQ 0x%x state: 0x%x",
4473                      qp->raw_packet_qp.sq.base.mqp.qpn, sq_state,
4474                      qp->raw_packet_qp.rq.base.mqp.qpn, rq_state);
4475                 return -EINVAL;
4476         }
4477
4478         if (*qp_state == MLX5_QP_STATE)
4479                 *qp_state = qp->state;
4480
4481         return 0;
4482 }
4483
4484 static int query_raw_packet_qp_state(struct mlx5_ib_dev *dev,
4485                                      struct mlx5_ib_qp *qp,
4486                                      u8 *raw_packet_qp_state)
4487 {
4488         struct mlx5_ib_raw_packet_qp *raw_packet_qp = &qp->raw_packet_qp;
4489         struct mlx5_ib_sq *sq = &raw_packet_qp->sq;
4490         struct mlx5_ib_rq *rq = &raw_packet_qp->rq;
4491         int err;
4492         u8 sq_state = MLX5_SQ_STATE_NA;
4493         u8 rq_state = MLX5_RQ_STATE_NA;
4494
4495         if (qp->sq.wqe_cnt) {
4496                 err = query_raw_packet_qp_sq_state(dev, sq, &sq_state);
4497                 if (err)
4498                         return err;
4499         }
4500
4501         if (qp->rq.wqe_cnt) {
4502                 err = query_raw_packet_qp_rq_state(dev, rq, &rq_state);
4503                 if (err)
4504                         return err;
4505         }
4506
4507         return sqrq_state_to_qp_state(sq_state, rq_state, qp,
4508                                       raw_packet_qp_state);
4509 }
4510
4511 static int query_qp_attr(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp,
4512                          struct ib_qp_attr *qp_attr)
4513 {
4514         int outlen = MLX5_ST_SZ_BYTES(query_qp_out);
4515         void *qpc, *pri_path, *alt_path;
4516         u32 *outb;
4517         int err;
4518
4519         outb = kzalloc(outlen, GFP_KERNEL);
4520         if (!outb)
4521                 return -ENOMEM;
4522
4523         err = mlx5_core_qp_query(dev, &qp->trans_qp.base.mqp, outb, outlen);
4524         if (err)
4525                 goto out;
4526
4527         qpc = MLX5_ADDR_OF(query_qp_out, outb, qpc);
4528
4529         qp->state = to_ib_qp_state(MLX5_GET(qpc, qpc, state));
4530         if (MLX5_GET(qpc, qpc, state) == MLX5_QP_STATE_SQ_DRAINING)
4531                 qp_attr->sq_draining = 1;
4532
4533         qp_attr->path_mtu = MLX5_GET(qpc, qpc, mtu);
4534         qp_attr->path_mig_state = to_ib_mig_state(MLX5_GET(qpc, qpc, pm_state));
4535         qp_attr->qkey = MLX5_GET(qpc, qpc, q_key);
4536         qp_attr->rq_psn = MLX5_GET(qpc, qpc, next_rcv_psn);
4537         qp_attr->sq_psn = MLX5_GET(qpc, qpc, next_send_psn);
4538         qp_attr->dest_qp_num = MLX5_GET(qpc, qpc, remote_qpn);
4539
4540         if (MLX5_GET(qpc, qpc, rre))
4541                 qp_attr->qp_access_flags |= IB_ACCESS_REMOTE_READ;
4542         if (MLX5_GET(qpc, qpc, rwe))
4543                 qp_attr->qp_access_flags |= IB_ACCESS_REMOTE_WRITE;
4544         if (MLX5_GET(qpc, qpc, rae))
4545                 qp_attr->qp_access_flags |= IB_ACCESS_REMOTE_ATOMIC;
4546
4547         qp_attr->max_rd_atomic = 1 << MLX5_GET(qpc, qpc, log_sra_max);
4548         qp_attr->max_dest_rd_atomic = 1 << MLX5_GET(qpc, qpc, log_rra_max);
4549         qp_attr->min_rnr_timer = MLX5_GET(qpc, qpc, min_rnr_nak);
4550         qp_attr->retry_cnt = MLX5_GET(qpc, qpc, retry_count);
4551         qp_attr->rnr_retry = MLX5_GET(qpc, qpc, rnr_retry);
4552
4553         pri_path = MLX5_ADDR_OF(qpc, qpc, primary_address_path);
4554         alt_path = MLX5_ADDR_OF(qpc, qpc, secondary_address_path);
4555
4556         if (qp->ibqp.qp_type == IB_QPT_RC || qp->ibqp.qp_type == IB_QPT_UC) {
4557                 to_rdma_ah_attr(dev, &qp_attr->ah_attr, pri_path);
4558                 to_rdma_ah_attr(dev, &qp_attr->alt_ah_attr, alt_path);
4559                 qp_attr->alt_pkey_index = MLX5_GET(ads, alt_path, pkey_index);
4560                 qp_attr->alt_port_num = MLX5_GET(ads, alt_path, vhca_port_num);
4561         }
4562
4563         qp_attr->pkey_index = MLX5_GET(ads, pri_path, pkey_index);
4564         qp_attr->port_num = MLX5_GET(ads, pri_path, vhca_port_num);
4565         qp_attr->timeout = MLX5_GET(ads, pri_path, ack_timeout);
4566         qp_attr->alt_timeout = MLX5_GET(ads, alt_path, ack_timeout);
4567
4568 out:
4569         kfree(outb);
4570         return err;
4571 }
4572
4573 static int mlx5_ib_dct_query_qp(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *mqp,
4574                                 struct ib_qp_attr *qp_attr, int qp_attr_mask,
4575                                 struct ib_qp_init_attr *qp_init_attr)
4576 {
4577         struct mlx5_core_dct    *dct = &mqp->dct.mdct;
4578         u32 *out;
4579         u32 access_flags = 0;
4580         int outlen = MLX5_ST_SZ_BYTES(query_dct_out);
4581         void *dctc;
4582         int err;
4583         int supported_mask = IB_QP_STATE |
4584                              IB_QP_ACCESS_FLAGS |
4585                              IB_QP_PORT |
4586                              IB_QP_MIN_RNR_TIMER |
4587                              IB_QP_AV |
4588                              IB_QP_PATH_MTU |
4589                              IB_QP_PKEY_INDEX;
4590
4591         if (qp_attr_mask & ~supported_mask)
4592                 return -EINVAL;
4593         if (mqp->state != IB_QPS_RTR)
4594                 return -EINVAL;
4595
4596         out = kzalloc(outlen, GFP_KERNEL);
4597         if (!out)
4598                 return -ENOMEM;
4599
4600         err = mlx5_core_dct_query(dev, dct, out, outlen);
4601         if (err)
4602                 goto out;
4603
4604         dctc = MLX5_ADDR_OF(query_dct_out, out, dct_context_entry);
4605
4606         if (qp_attr_mask & IB_QP_STATE)
4607                 qp_attr->qp_state = IB_QPS_RTR;
4608
4609         if (qp_attr_mask & IB_QP_ACCESS_FLAGS) {
4610                 if (MLX5_GET(dctc, dctc, rre))
4611                         access_flags |= IB_ACCESS_REMOTE_READ;
4612                 if (MLX5_GET(dctc, dctc, rwe))
4613                         access_flags |= IB_ACCESS_REMOTE_WRITE;
4614                 if (MLX5_GET(dctc, dctc, rae))
4615                         access_flags |= IB_ACCESS_REMOTE_ATOMIC;
4616                 qp_attr->qp_access_flags = access_flags;
4617         }
4618
4619         if (qp_attr_mask & IB_QP_PORT)
4620                 qp_attr->port_num = MLX5_GET(dctc, dctc, port);
4621         if (qp_attr_mask & IB_QP_MIN_RNR_TIMER)
4622                 qp_attr->min_rnr_timer = MLX5_GET(dctc, dctc, min_rnr_nak);
4623         if (qp_attr_mask & IB_QP_AV) {
4624                 qp_attr->ah_attr.grh.traffic_class = MLX5_GET(dctc, dctc, tclass);
4625                 qp_attr->ah_attr.grh.flow_label = MLX5_GET(dctc, dctc, flow_label);
4626                 qp_attr->ah_attr.grh.sgid_index = MLX5_GET(dctc, dctc, my_addr_index);
4627                 qp_attr->ah_attr.grh.hop_limit = MLX5_GET(dctc, dctc, hop_limit);
4628         }
4629         if (qp_attr_mask & IB_QP_PATH_MTU)
4630                 qp_attr->path_mtu = MLX5_GET(dctc, dctc, mtu);
4631         if (qp_attr_mask & IB_QP_PKEY_INDEX)
4632                 qp_attr->pkey_index = MLX5_GET(dctc, dctc, pkey_index);
4633 out:
4634         kfree(out);
4635         return err;
4636 }
4637
4638 int mlx5_ib_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr,
4639                      int qp_attr_mask, struct ib_qp_init_attr *qp_init_attr)
4640 {
4641         struct mlx5_ib_dev *dev = to_mdev(ibqp->device);
4642         struct mlx5_ib_qp *qp = to_mqp(ibqp);
4643         int err = 0;
4644         u8 raw_packet_qp_state;
4645
4646         if (ibqp->rwq_ind_tbl)
4647                 return -ENOSYS;
4648
4649         if (unlikely(ibqp->qp_type == IB_QPT_GSI))
4650                 return mlx5_ib_gsi_query_qp(ibqp, qp_attr, qp_attr_mask,
4651                                             qp_init_attr);
4652
4653         /* Not all of output fields are applicable, make sure to zero them */
4654         memset(qp_init_attr, 0, sizeof(*qp_init_attr));
4655         memset(qp_attr, 0, sizeof(*qp_attr));
4656
4657         if (unlikely(qp->type == MLX5_IB_QPT_DCT))
4658                 return mlx5_ib_dct_query_qp(dev, qp, qp_attr,
4659                                             qp_attr_mask, qp_init_attr);
4660
4661         mutex_lock(&qp->mutex);
4662
4663         if (qp->ibqp.qp_type == IB_QPT_RAW_PACKET ||
4664             qp->flags & IB_QP_CREATE_SOURCE_QPN) {
4665                 err = query_raw_packet_qp_state(dev, qp, &raw_packet_qp_state);
4666                 if (err)
4667                         goto out;
4668                 qp->state = raw_packet_qp_state;
4669                 qp_attr->port_num = 1;
4670         } else {
4671                 err = query_qp_attr(dev, qp, qp_attr);
4672                 if (err)
4673                         goto out;
4674         }
4675
4676         qp_attr->qp_state            = qp->state;
4677         qp_attr->cur_qp_state        = qp_attr->qp_state;
4678         qp_attr->cap.max_recv_wr     = qp->rq.wqe_cnt;
4679         qp_attr->cap.max_recv_sge    = qp->rq.max_gs;
4680
4681         if (!ibqp->uobject) {
4682                 qp_attr->cap.max_send_wr  = qp->sq.max_post;
4683                 qp_attr->cap.max_send_sge = qp->sq.max_gs;
4684                 qp_init_attr->qp_context = ibqp->qp_context;
4685         } else {
4686                 qp_attr->cap.max_send_wr  = 0;
4687                 qp_attr->cap.max_send_sge = 0;
4688         }
4689
4690         qp_init_attr->qp_type = ibqp->qp_type;
4691         qp_init_attr->recv_cq = ibqp->recv_cq;
4692         qp_init_attr->send_cq = ibqp->send_cq;
4693         qp_init_attr->srq = ibqp->srq;
4694         qp_attr->cap.max_inline_data = qp->max_inline_data;
4695
4696         qp_init_attr->cap            = qp_attr->cap;
4697
4698         qp_init_attr->create_flags = qp->flags;
4699
4700         qp_init_attr->sq_sig_type = qp->sq_signal_bits & MLX5_WQE_CTRL_CQ_UPDATE ?
4701                 IB_SIGNAL_ALL_WR : IB_SIGNAL_REQ_WR;
4702
4703 out:
4704         mutex_unlock(&qp->mutex);
4705         return err;
4706 }
4707
4708 struct ib_xrcd *mlx5_ib_alloc_xrcd(struct ib_device *ibdev,
4709                                    struct ib_udata *udata)
4710 {
4711         struct mlx5_ib_dev *dev = to_mdev(ibdev);
4712         struct mlx5_ib_xrcd *xrcd;
4713         int err;
4714
4715         if (!MLX5_CAP_GEN(dev->mdev, xrc))
4716                 return ERR_PTR(-ENOSYS);
4717
4718         xrcd = kmalloc(sizeof(*xrcd), GFP_KERNEL);
4719         if (!xrcd)
4720                 return ERR_PTR(-ENOMEM);
4721
4722         err = mlx5_cmd_xrcd_alloc(dev->mdev, &xrcd->xrcdn, 0);
4723         if (err) {
4724                 kfree(xrcd);
4725                 return ERR_PTR(-ENOMEM);
4726         }
4727
4728         return &xrcd->ibxrcd;
4729 }
4730
4731 int mlx5_ib_dealloc_xrcd(struct ib_xrcd *xrcd, struct ib_udata *udata)
4732 {
4733         struct mlx5_ib_dev *dev = to_mdev(xrcd->device);
4734         u32 xrcdn = to_mxrcd(xrcd)->xrcdn;
4735         int err;
4736
4737         err = mlx5_cmd_xrcd_dealloc(dev->mdev, xrcdn, 0);
4738         if (err)
4739                 mlx5_ib_warn(dev, "failed to dealloc xrcdn 0x%x\n", xrcdn);
4740
4741         kfree(xrcd);
4742         return 0;
4743 }
4744
4745 static void mlx5_ib_wq_event(struct mlx5_core_qp *core_qp, int type)
4746 {
4747         struct mlx5_ib_rwq *rwq = to_mibrwq(core_qp);
4748         struct mlx5_ib_dev *dev = to_mdev(rwq->ibwq.device);
4749         struct ib_event event;
4750
4751         if (rwq->ibwq.event_handler) {
4752                 event.device     = rwq->ibwq.device;
4753                 event.element.wq = &rwq->ibwq;
4754                 switch (type) {
4755                 case MLX5_EVENT_TYPE_WQ_CATAS_ERROR:
4756                         event.event = IB_EVENT_WQ_FATAL;
4757                         break;
4758                 default:
4759                         mlx5_ib_warn(dev, "Unexpected event type %d on WQ %06x\n", type, core_qp->qpn);
4760                         return;
4761                 }
4762
4763                 rwq->ibwq.event_handler(&event, rwq->ibwq.wq_context);
4764         }
4765 }
4766
4767 static int set_delay_drop(struct mlx5_ib_dev *dev)
4768 {
4769         int err = 0;
4770
4771         mutex_lock(&dev->delay_drop.lock);
4772         if (dev->delay_drop.activate)
4773                 goto out;
4774
4775         err = mlx5_core_set_delay_drop(dev, dev->delay_drop.timeout);
4776         if (err)
4777                 goto out;
4778
4779         dev->delay_drop.activate = true;
4780 out:
4781         mutex_unlock(&dev->delay_drop.lock);
4782
4783         if (!err)
4784                 atomic_inc(&dev->delay_drop.rqs_cnt);
4785         return err;
4786 }
4787
4788 static int  create_rq(struct mlx5_ib_rwq *rwq, struct ib_pd *pd,
4789                       struct ib_wq_init_attr *init_attr)
4790 {
4791         struct mlx5_ib_dev *dev;
4792         int has_net_offloads;
4793         __be64 *rq_pas0;
4794         void *in;
4795         void *rqc;
4796         void *wq;
4797         int inlen;
4798         int err;
4799
4800         dev = to_mdev(pd->device);
4801
4802         inlen = MLX5_ST_SZ_BYTES(create_rq_in) + sizeof(u64) * rwq->rq_num_pas;
4803         in = kvzalloc(inlen, GFP_KERNEL);
4804         if (!in)
4805                 return -ENOMEM;
4806
4807         MLX5_SET(create_rq_in, in, uid, to_mpd(pd)->uid);
4808         rqc = MLX5_ADDR_OF(create_rq_in, in, ctx);
4809         MLX5_SET(rqc,  rqc, mem_rq_type,
4810                  MLX5_RQC_MEM_RQ_TYPE_MEMORY_RQ_INLINE);
4811         MLX5_SET(rqc, rqc, user_index, rwq->user_index);
4812         MLX5_SET(rqc,  rqc, cqn, to_mcq(init_attr->cq)->mcq.cqn);
4813         MLX5_SET(rqc,  rqc, state, MLX5_RQC_STATE_RST);
4814         MLX5_SET(rqc,  rqc, flush_in_error_en, 1);
4815         wq = MLX5_ADDR_OF(rqc, rqc, wq);
4816         MLX5_SET(wq, wq, wq_type,
4817                  rwq->create_flags & MLX5_IB_WQ_FLAGS_STRIDING_RQ ?
4818                  MLX5_WQ_TYPE_CYCLIC_STRIDING_RQ : MLX5_WQ_TYPE_CYCLIC);
4819         if (init_attr->create_flags & IB_WQ_FLAGS_PCI_WRITE_END_PADDING) {
4820                 if (!MLX5_CAP_GEN(dev->mdev, end_pad)) {
4821                         mlx5_ib_dbg(dev, "Scatter end padding is not supported\n");
4822                         err = -EOPNOTSUPP;
4823                         goto out;
4824                 } else {
4825                         MLX5_SET(wq, wq, end_padding_mode, MLX5_WQ_END_PAD_MODE_ALIGN);
4826                 }
4827         }
4828         MLX5_SET(wq, wq, log_wq_stride, rwq->log_rq_stride);
4829         if (rwq->create_flags & MLX5_IB_WQ_FLAGS_STRIDING_RQ) {
4830                 /*
4831                  * In Firmware number of strides in each WQE is:
4832                  *   "512 * 2^single_wqe_log_num_of_strides"
4833                  * Values 3 to 8 are accepted as 10 to 15, 9 to 18 are
4834                  * accepted as 0 to 9
4835                  */
4836                 static const u8 fw_map[] = { 10, 11, 12, 13, 14, 15, 0, 1,
4837                                              2,  3,  4,  5,  6,  7,  8, 9 };
4838                 MLX5_SET(wq, wq, two_byte_shift_en, rwq->two_byte_shift_en);
4839                 MLX5_SET(wq, wq, log_wqe_stride_size,
4840                          rwq->single_stride_log_num_of_bytes -
4841                          MLX5_MIN_SINGLE_STRIDE_LOG_NUM_BYTES);
4842                 MLX5_SET(wq, wq, log_wqe_num_of_strides,
4843                          fw_map[rwq->log_num_strides -
4844                                 MLX5_EXT_MIN_SINGLE_WQE_LOG_NUM_STRIDES]);
4845         }
4846         MLX5_SET(wq, wq, log_wq_sz, rwq->log_rq_size);
4847         MLX5_SET(wq, wq, pd, to_mpd(pd)->pdn);
4848         MLX5_SET(wq, wq, page_offset, rwq->rq_page_offset);
4849         MLX5_SET(wq, wq, log_wq_pg_sz, rwq->log_page_size);
4850         MLX5_SET(wq, wq, wq_signature, rwq->wq_sig);
4851         MLX5_SET64(wq, wq, dbr_addr, rwq->db.dma);
4852         has_net_offloads = MLX5_CAP_GEN(dev->mdev, eth_net_offloads);
4853         if (init_attr->create_flags & IB_WQ_FLAGS_CVLAN_STRIPPING) {
4854                 if (!(has_net_offloads && MLX5_CAP_ETH(dev->mdev, vlan_cap))) {
4855                         mlx5_ib_dbg(dev, "VLAN offloads are not supported\n");
4856                         err = -EOPNOTSUPP;
4857                         goto out;
4858                 }
4859         } else {
4860                 MLX5_SET(rqc, rqc, vsd, 1);
4861         }
4862         if (init_attr->create_flags & IB_WQ_FLAGS_SCATTER_FCS) {
4863                 if (!(has_net_offloads && MLX5_CAP_ETH(dev->mdev, scatter_fcs))) {
4864                         mlx5_ib_dbg(dev, "Scatter FCS is not supported\n");
4865                         err = -EOPNOTSUPP;
4866                         goto out;
4867                 }
4868                 MLX5_SET(rqc, rqc, scatter_fcs, 1);
4869         }
4870         if (init_attr->create_flags & IB_WQ_FLAGS_DELAY_DROP) {
4871                 if (!(dev->ib_dev.attrs.raw_packet_caps &
4872                       IB_RAW_PACKET_CAP_DELAY_DROP)) {
4873                         mlx5_ib_dbg(dev, "Delay drop is not supported\n");
4874                         err = -EOPNOTSUPP;
4875                         goto out;
4876                 }
4877                 MLX5_SET(rqc, rqc, delay_drop_en, 1);
4878         }
4879         rq_pas0 = (__be64 *)MLX5_ADDR_OF(wq, wq, pas);
4880         mlx5_ib_populate_pas(dev, rwq->umem, rwq->page_shift, rq_pas0, 0);
4881         err = mlx5_core_create_rq_tracked(dev, in, inlen, &rwq->core_qp);
4882         if (!err && init_attr->create_flags & IB_WQ_FLAGS_DELAY_DROP) {
4883                 err = set_delay_drop(dev);
4884                 if (err) {
4885                         mlx5_ib_warn(dev, "Failed to enable delay drop err=%d\n",
4886                                      err);
4887                         mlx5_core_destroy_rq_tracked(dev, &rwq->core_qp);
4888                 } else {
4889                         rwq->create_flags |= MLX5_IB_WQ_FLAGS_DELAY_DROP;
4890                 }
4891         }
4892 out:
4893         kvfree(in);
4894         return err;
4895 }
4896
4897 static int set_user_rq_size(struct mlx5_ib_dev *dev,
4898                             struct ib_wq_init_attr *wq_init_attr,
4899                             struct mlx5_ib_create_wq *ucmd,
4900                             struct mlx5_ib_rwq *rwq)
4901 {
4902         /* Sanity check RQ size before proceeding */
4903         if (wq_init_attr->max_wr > (1 << MLX5_CAP_GEN(dev->mdev, log_max_wq_sz)))
4904                 return -EINVAL;
4905
4906         if (!ucmd->rq_wqe_count)
4907                 return -EINVAL;
4908
4909         rwq->wqe_count = ucmd->rq_wqe_count;
4910         rwq->wqe_shift = ucmd->rq_wqe_shift;
4911         if (check_shl_overflow(rwq->wqe_count, rwq->wqe_shift, &rwq->buf_size))
4912                 return -EINVAL;
4913
4914         rwq->log_rq_stride = rwq->wqe_shift;
4915         rwq->log_rq_size = ilog2(rwq->wqe_count);
4916         return 0;
4917 }
4918
4919 static bool log_of_strides_valid(struct mlx5_ib_dev *dev, u32 log_num_strides)
4920 {
4921         if ((log_num_strides > MLX5_MAX_SINGLE_WQE_LOG_NUM_STRIDES) ||
4922             (log_num_strides < MLX5_EXT_MIN_SINGLE_WQE_LOG_NUM_STRIDES))
4923                 return false;
4924
4925         if (!MLX5_CAP_GEN(dev->mdev, ext_stride_num_range) &&
4926             (log_num_strides < MLX5_MIN_SINGLE_WQE_LOG_NUM_STRIDES))
4927                 return false;
4928
4929         return true;
4930 }
4931
4932 static int prepare_user_rq(struct ib_pd *pd,
4933                            struct ib_wq_init_attr *init_attr,
4934                            struct ib_udata *udata,
4935                            struct mlx5_ib_rwq *rwq)
4936 {
4937         struct mlx5_ib_dev *dev = to_mdev(pd->device);
4938         struct mlx5_ib_create_wq ucmd = {};
4939         int err;
4940         size_t required_cmd_sz;
4941
4942         required_cmd_sz = offsetof(typeof(ucmd), single_stride_log_num_of_bytes)
4943                 + sizeof(ucmd.single_stride_log_num_of_bytes);
4944         if (udata->inlen < required_cmd_sz) {
4945                 mlx5_ib_dbg(dev, "invalid inlen\n");
4946                 return -EINVAL;
4947         }
4948
4949         if (udata->inlen > sizeof(ucmd) &&
4950             !ib_is_udata_cleared(udata, sizeof(ucmd),
4951                                  udata->inlen - sizeof(ucmd))) {
4952                 mlx5_ib_dbg(dev, "inlen is not supported\n");
4953                 return -EOPNOTSUPP;
4954         }
4955
4956         if (ib_copy_from_udata(&ucmd, udata, min(sizeof(ucmd), udata->inlen))) {
4957                 mlx5_ib_dbg(dev, "copy failed\n");
4958                 return -EFAULT;
4959         }
4960
4961         if (ucmd.comp_mask & (~MLX5_IB_CREATE_WQ_STRIDING_RQ)) {
4962                 mlx5_ib_dbg(dev, "invalid comp mask\n");
4963                 return -EOPNOTSUPP;
4964         } else if (ucmd.comp_mask & MLX5_IB_CREATE_WQ_STRIDING_RQ) {
4965                 if (!MLX5_CAP_GEN(dev->mdev, striding_rq)) {
4966                         mlx5_ib_dbg(dev, "Striding RQ is not supported\n");
4967                         return -EOPNOTSUPP;
4968                 }
4969                 if ((ucmd.single_stride_log_num_of_bytes <
4970                     MLX5_MIN_SINGLE_STRIDE_LOG_NUM_BYTES) ||
4971                     (ucmd.single_stride_log_num_of_bytes >
4972                      MLX5_MAX_SINGLE_STRIDE_LOG_NUM_BYTES)) {
4973                         mlx5_ib_dbg(dev, "Invalid log stride size (%u. Range is %u - %u)\n",
4974                                     ucmd.single_stride_log_num_of_bytes,
4975                                     MLX5_MIN_SINGLE_STRIDE_LOG_NUM_BYTES,
4976                                     MLX5_MAX_SINGLE_STRIDE_LOG_NUM_BYTES);
4977                         return -EINVAL;
4978                 }
4979                 if (!log_of_strides_valid(dev,
4980                                           ucmd.single_wqe_log_num_of_strides)) {
4981                         mlx5_ib_dbg(
4982                                 dev,
4983                                 "Invalid log num strides (%u. Range is %u - %u)\n",
4984                                 ucmd.single_wqe_log_num_of_strides,
4985                                 MLX5_CAP_GEN(dev->mdev, ext_stride_num_range) ?
4986                                         MLX5_EXT_MIN_SINGLE_WQE_LOG_NUM_STRIDES :
4987                                         MLX5_MIN_SINGLE_WQE_LOG_NUM_STRIDES,
4988                                 MLX5_MAX_SINGLE_WQE_LOG_NUM_STRIDES);
4989                         return -EINVAL;
4990                 }
4991                 rwq->single_stride_log_num_of_bytes =
4992                         ucmd.single_stride_log_num_of_bytes;
4993                 rwq->log_num_strides = ucmd.single_wqe_log_num_of_strides;
4994                 rwq->two_byte_shift_en = !!ucmd.two_byte_shift_en;
4995                 rwq->create_flags |= MLX5_IB_WQ_FLAGS_STRIDING_RQ;
4996         }
4997
4998         err = set_user_rq_size(dev, init_attr, &ucmd, rwq);
4999         if (err) {
5000                 mlx5_ib_dbg(dev, "err %d\n", err);
5001                 return err;
5002         }
5003
5004         err = create_user_rq(dev, pd, udata, rwq, &ucmd);
5005         if (err) {
5006                 mlx5_ib_dbg(dev, "err %d\n", err);
5007                 return err;
5008         }
5009
5010         rwq->user_index = ucmd.user_index;
5011         return 0;
5012 }
5013
5014 struct ib_wq *mlx5_ib_create_wq(struct ib_pd *pd,
5015                                 struct ib_wq_init_attr *init_attr,
5016                                 struct ib_udata *udata)
5017 {
5018         struct mlx5_ib_dev *dev;
5019         struct mlx5_ib_rwq *rwq;
5020         struct mlx5_ib_create_wq_resp resp = {};
5021         size_t min_resp_len;
5022         int err;
5023
5024         if (!udata)
5025                 return ERR_PTR(-ENOSYS);
5026
5027         min_resp_len = offsetof(typeof(resp), reserved) + sizeof(resp.reserved);
5028         if (udata->outlen && udata->outlen < min_resp_len)
5029                 return ERR_PTR(-EINVAL);
5030
5031         if (!capable(CAP_SYS_RAWIO) &&
5032             init_attr->create_flags & IB_WQ_FLAGS_DELAY_DROP)
5033                 return ERR_PTR(-EPERM);
5034
5035         dev = to_mdev(pd->device);
5036         switch (init_attr->wq_type) {
5037         case IB_WQT_RQ:
5038                 rwq = kzalloc(sizeof(*rwq), GFP_KERNEL);
5039                 if (!rwq)
5040                         return ERR_PTR(-ENOMEM);
5041                 err = prepare_user_rq(pd, init_attr, udata, rwq);
5042                 if (err)
5043                         goto err;
5044                 err = create_rq(rwq, pd, init_attr);
5045                 if (err)
5046                         goto err_user_rq;
5047                 break;
5048         default:
5049                 mlx5_ib_dbg(dev, "unsupported wq type %d\n",
5050                             init_attr->wq_type);
5051                 return ERR_PTR(-EINVAL);
5052         }
5053
5054         rwq->ibwq.wq_num = rwq->core_qp.qpn;
5055         rwq->ibwq.state = IB_WQS_RESET;
5056         if (udata->outlen) {
5057                 resp.response_length = offsetof(typeof(resp), response_length) +
5058                                 sizeof(resp.response_length);
5059                 err = ib_copy_to_udata(udata, &resp, resp.response_length);
5060                 if (err)
5061                         goto err_copy;
5062         }
5063
5064         rwq->core_qp.event = mlx5_ib_wq_event;
5065         rwq->ibwq.event_handler = init_attr->event_handler;
5066         return &rwq->ibwq;
5067
5068 err_copy:
5069         mlx5_core_destroy_rq_tracked(dev, &rwq->core_qp);
5070 err_user_rq:
5071         destroy_user_rq(dev, pd, rwq, udata);
5072 err:
5073         kfree(rwq);
5074         return ERR_PTR(err);
5075 }
5076
5077 void mlx5_ib_destroy_wq(struct ib_wq *wq, struct ib_udata *udata)
5078 {
5079         struct mlx5_ib_dev *dev = to_mdev(wq->device);
5080         struct mlx5_ib_rwq *rwq = to_mrwq(wq);
5081
5082         mlx5_core_destroy_rq_tracked(dev, &rwq->core_qp);
5083         destroy_user_rq(dev, wq->pd, rwq, udata);
5084         kfree(rwq);
5085 }
5086
5087 struct ib_rwq_ind_table *mlx5_ib_create_rwq_ind_table(struct ib_device *device,
5088                                                       struct ib_rwq_ind_table_init_attr *init_attr,
5089                                                       struct ib_udata *udata)
5090 {
5091         struct mlx5_ib_dev *dev = to_mdev(device);
5092         struct mlx5_ib_rwq_ind_table *rwq_ind_tbl;
5093         int sz = 1 << init_attr->log_ind_tbl_size;
5094         struct mlx5_ib_create_rwq_ind_tbl_resp resp = {};
5095         size_t min_resp_len;
5096         int inlen;
5097         int err;
5098         int i;
5099         u32 *in;
5100         void *rqtc;
5101
5102         if (udata->inlen > 0 &&
5103             !ib_is_udata_cleared(udata, 0,
5104                                  udata->inlen))
5105                 return ERR_PTR(-EOPNOTSUPP);
5106
5107         if (init_attr->log_ind_tbl_size >
5108             MLX5_CAP_GEN(dev->mdev, log_max_rqt_size)) {
5109                 mlx5_ib_dbg(dev, "log_ind_tbl_size = %d is bigger than supported = %d\n",
5110                             init_attr->log_ind_tbl_size,
5111                             MLX5_CAP_GEN(dev->mdev, log_max_rqt_size));
5112                 return ERR_PTR(-EINVAL);
5113         }
5114
5115         min_resp_len = offsetof(typeof(resp), reserved) + sizeof(resp.reserved);
5116         if (udata->outlen && udata->outlen < min_resp_len)
5117                 return ERR_PTR(-EINVAL);
5118
5119         rwq_ind_tbl = kzalloc(sizeof(*rwq_ind_tbl), GFP_KERNEL);
5120         if (!rwq_ind_tbl)
5121                 return ERR_PTR(-ENOMEM);
5122
5123         inlen = MLX5_ST_SZ_BYTES(create_rqt_in) + sizeof(u32) * sz;
5124         in = kvzalloc(inlen, GFP_KERNEL);
5125         if (!in) {
5126                 err = -ENOMEM;
5127                 goto err;
5128         }
5129
5130         rqtc = MLX5_ADDR_OF(create_rqt_in, in, rqt_context);
5131
5132         MLX5_SET(rqtc, rqtc, rqt_actual_size, sz);
5133         MLX5_SET(rqtc, rqtc, rqt_max_size, sz);
5134
5135         for (i = 0; i < sz; i++)
5136                 MLX5_SET(rqtc, rqtc, rq_num[i], init_attr->ind_tbl[i]->wq_num);
5137
5138         rwq_ind_tbl->uid = to_mpd(init_attr->ind_tbl[0]->pd)->uid;
5139         MLX5_SET(create_rqt_in, in, uid, rwq_ind_tbl->uid);
5140
5141         err = mlx5_core_create_rqt(dev->mdev, in, inlen, &rwq_ind_tbl->rqtn);
5142         kvfree(in);
5143
5144         if (err)
5145                 goto err;
5146
5147         rwq_ind_tbl->ib_rwq_ind_tbl.ind_tbl_num = rwq_ind_tbl->rqtn;
5148         if (udata->outlen) {
5149                 resp.response_length = offsetof(typeof(resp), response_length) +
5150                                         sizeof(resp.response_length);
5151                 err = ib_copy_to_udata(udata, &resp, resp.response_length);
5152                 if (err)
5153                         goto err_copy;
5154         }
5155
5156         return &rwq_ind_tbl->ib_rwq_ind_tbl;
5157
5158 err_copy:
5159         mlx5_cmd_destroy_rqt(dev->mdev, rwq_ind_tbl->rqtn, rwq_ind_tbl->uid);
5160 err:
5161         kfree(rwq_ind_tbl);
5162         return ERR_PTR(err);
5163 }
5164
5165 int mlx5_ib_destroy_rwq_ind_table(struct ib_rwq_ind_table *ib_rwq_ind_tbl)
5166 {
5167         struct mlx5_ib_rwq_ind_table *rwq_ind_tbl = to_mrwq_ind_table(ib_rwq_ind_tbl);
5168         struct mlx5_ib_dev *dev = to_mdev(ib_rwq_ind_tbl->device);
5169
5170         mlx5_cmd_destroy_rqt(dev->mdev, rwq_ind_tbl->rqtn, rwq_ind_tbl->uid);
5171
5172         kfree(rwq_ind_tbl);
5173         return 0;
5174 }
5175
5176 int mlx5_ib_modify_wq(struct ib_wq *wq, struct ib_wq_attr *wq_attr,
5177                       u32 wq_attr_mask, struct ib_udata *udata)
5178 {
5179         struct mlx5_ib_dev *dev = to_mdev(wq->device);
5180         struct mlx5_ib_rwq *rwq = to_mrwq(wq);
5181         struct mlx5_ib_modify_wq ucmd = {};
5182         size_t required_cmd_sz;
5183         int curr_wq_state;
5184         int wq_state;
5185         int inlen;
5186         int err;
5187         void *rqc;
5188         void *in;
5189
5190         required_cmd_sz = offsetof(typeof(ucmd), reserved) + sizeof(ucmd.reserved);
5191         if (udata->inlen < required_cmd_sz)
5192                 return -EINVAL;
5193
5194         if (udata->inlen > sizeof(ucmd) &&
5195             !ib_is_udata_cleared(udata, sizeof(ucmd),
5196                                  udata->inlen - sizeof(ucmd)))
5197                 return -EOPNOTSUPP;
5198
5199         if (ib_copy_from_udata(&ucmd, udata, min(sizeof(ucmd), udata->inlen)))
5200                 return -EFAULT;
5201
5202         if (ucmd.comp_mask || ucmd.reserved)
5203                 return -EOPNOTSUPP;
5204
5205         inlen = MLX5_ST_SZ_BYTES(modify_rq_in);
5206         in = kvzalloc(inlen, GFP_KERNEL);
5207         if (!in)
5208                 return -ENOMEM;
5209
5210         rqc = MLX5_ADDR_OF(modify_rq_in, in, ctx);
5211
5212         curr_wq_state = (wq_attr_mask & IB_WQ_CUR_STATE) ?
5213                 wq_attr->curr_wq_state : wq->state;
5214         wq_state = (wq_attr_mask & IB_WQ_STATE) ?
5215                 wq_attr->wq_state : curr_wq_state;
5216         if (curr_wq_state == IB_WQS_ERR)
5217                 curr_wq_state = MLX5_RQC_STATE_ERR;
5218         if (wq_state == IB_WQS_ERR)
5219                 wq_state = MLX5_RQC_STATE_ERR;
5220         MLX5_SET(modify_rq_in, in, rq_state, curr_wq_state);
5221         MLX5_SET(modify_rq_in, in, uid, to_mpd(wq->pd)->uid);
5222         MLX5_SET(rqc, rqc, state, wq_state);
5223
5224         if (wq_attr_mask & IB_WQ_FLAGS) {
5225                 if (wq_attr->flags_mask & IB_WQ_FLAGS_CVLAN_STRIPPING) {
5226                         if (!(MLX5_CAP_GEN(dev->mdev, eth_net_offloads) &&
5227                               MLX5_CAP_ETH(dev->mdev, vlan_cap))) {
5228                                 mlx5_ib_dbg(dev, "VLAN offloads are not "
5229                                             "supported\n");
5230                                 err = -EOPNOTSUPP;
5231                                 goto out;
5232                         }
5233                         MLX5_SET64(modify_rq_in, in, modify_bitmask,
5234                                    MLX5_MODIFY_RQ_IN_MODIFY_BITMASK_VSD);
5235                         MLX5_SET(rqc, rqc, vsd,
5236                                  (wq_attr->flags & IB_WQ_FLAGS_CVLAN_STRIPPING) ? 0 : 1);
5237                 }
5238
5239                 if (wq_attr->flags_mask & IB_WQ_FLAGS_PCI_WRITE_END_PADDING) {
5240                         mlx5_ib_dbg(dev, "Modifying scatter end padding is not supported\n");
5241                         err = -EOPNOTSUPP;
5242                         goto out;
5243                 }
5244         }
5245
5246         if (curr_wq_state == IB_WQS_RESET && wq_state == IB_WQS_RDY) {
5247                 u16 set_id;
5248
5249                 set_id = mlx5_ib_get_counters_id(dev, 0);
5250                 if (MLX5_CAP_GEN(dev->mdev, modify_rq_counter_set_id)) {
5251                         MLX5_SET64(modify_rq_in, in, modify_bitmask,
5252                                    MLX5_MODIFY_RQ_IN_MODIFY_BITMASK_RQ_COUNTER_SET_ID);
5253                         MLX5_SET(rqc, rqc, counter_set_id, set_id);
5254                 } else
5255                         dev_info_once(
5256                                 &dev->ib_dev.dev,
5257                                 "Receive WQ counters are not supported on current FW\n");
5258         }
5259
5260         err = mlx5_core_modify_rq(dev->mdev, rwq->core_qp.qpn, in);
5261         if (!err)
5262                 rwq->ibwq.state = (wq_state == MLX5_RQC_STATE_ERR) ? IB_WQS_ERR : wq_state;
5263
5264 out:
5265         kvfree(in);
5266         return err;
5267 }
5268
5269 struct mlx5_ib_drain_cqe {
5270         struct ib_cqe cqe;
5271         struct completion done;
5272 };
5273
5274 static void mlx5_ib_drain_qp_done(struct ib_cq *cq, struct ib_wc *wc)
5275 {
5276         struct mlx5_ib_drain_cqe *cqe = container_of(wc->wr_cqe,
5277                                                      struct mlx5_ib_drain_cqe,
5278                                                      cqe);
5279
5280         complete(&cqe->done);
5281 }
5282
5283 /* This function returns only once the drained WR was completed */
5284 static void handle_drain_completion(struct ib_cq *cq,
5285                                     struct mlx5_ib_drain_cqe *sdrain,
5286                                     struct mlx5_ib_dev *dev)
5287 {
5288         struct mlx5_core_dev *mdev = dev->mdev;
5289
5290         if (cq->poll_ctx == IB_POLL_DIRECT) {
5291                 while (wait_for_completion_timeout(&sdrain->done, HZ / 10) <= 0)
5292                         ib_process_cq_direct(cq, -1);
5293                 return;
5294         }
5295
5296         if (mdev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR) {
5297                 struct mlx5_ib_cq *mcq = to_mcq(cq);
5298                 bool triggered = false;
5299                 unsigned long flags;
5300
5301                 spin_lock_irqsave(&dev->reset_flow_resource_lock, flags);
5302                 /* Make sure that the CQ handler won't run if wasn't run yet */
5303                 if (!mcq->mcq.reset_notify_added)
5304                         mcq->mcq.reset_notify_added = 1;
5305                 else
5306                         triggered = true;
5307                 spin_unlock_irqrestore(&dev->reset_flow_resource_lock, flags);
5308
5309                 if (triggered) {
5310                         /* Wait for any scheduled/running task to be ended */
5311                         switch (cq->poll_ctx) {
5312                         case IB_POLL_SOFTIRQ:
5313                                 irq_poll_disable(&cq->iop);
5314                                 irq_poll_enable(&cq->iop);
5315                                 break;
5316                         case IB_POLL_WORKQUEUE:
5317                                 cancel_work_sync(&cq->work);
5318                                 break;
5319                         default:
5320                                 WARN_ON_ONCE(1);
5321                         }
5322                 }
5323
5324                 /* Run the CQ handler - this makes sure that the drain WR will
5325                  * be processed if wasn't processed yet.
5326                  */
5327                 mcq->mcq.comp(&mcq->mcq, NULL);
5328         }
5329
5330         wait_for_completion(&sdrain->done);
5331 }
5332
5333 void mlx5_ib_drain_sq(struct ib_qp *qp)
5334 {
5335         struct ib_cq *cq = qp->send_cq;
5336         struct ib_qp_attr attr = { .qp_state = IB_QPS_ERR };
5337         struct mlx5_ib_drain_cqe sdrain;
5338         const struct ib_send_wr *bad_swr;
5339         struct ib_rdma_wr swr = {
5340                 .wr = {
5341                         .next = NULL,
5342                         { .wr_cqe       = &sdrain.cqe, },
5343                         .opcode = IB_WR_RDMA_WRITE,
5344                 },
5345         };
5346         int ret;
5347         struct mlx5_ib_dev *dev = to_mdev(qp->device);
5348         struct mlx5_core_dev *mdev = dev->mdev;
5349
5350         ret = ib_modify_qp(qp, &attr, IB_QP_STATE);
5351         if (ret && mdev->state != MLX5_DEVICE_STATE_INTERNAL_ERROR) {
5352                 WARN_ONCE(ret, "failed to drain send queue: %d\n", ret);
5353                 return;
5354         }
5355
5356         sdrain.cqe.done = mlx5_ib_drain_qp_done;
5357         init_completion(&sdrain.done);
5358
5359         ret = mlx5_ib_post_send_drain(qp, &swr.wr, &bad_swr);
5360         if (ret) {
5361                 WARN_ONCE(ret, "failed to drain send queue: %d\n", ret);
5362                 return;
5363         }
5364
5365         handle_drain_completion(cq, &sdrain, dev);
5366 }
5367
5368 void mlx5_ib_drain_rq(struct ib_qp *qp)
5369 {
5370         struct ib_cq *cq = qp->recv_cq;
5371         struct ib_qp_attr attr = { .qp_state = IB_QPS_ERR };
5372         struct mlx5_ib_drain_cqe rdrain;
5373         struct ib_recv_wr rwr = {};
5374         const struct ib_recv_wr *bad_rwr;
5375         int ret;
5376         struct mlx5_ib_dev *dev = to_mdev(qp->device);
5377         struct mlx5_core_dev *mdev = dev->mdev;
5378
5379         ret = ib_modify_qp(qp, &attr, IB_QP_STATE);
5380         if (ret && mdev->state != MLX5_DEVICE_STATE_INTERNAL_ERROR) {
5381                 WARN_ONCE(ret, "failed to drain recv queue: %d\n", ret);
5382                 return;
5383         }
5384
5385         rwr.wr_cqe = &rdrain.cqe;
5386         rdrain.cqe.done = mlx5_ib_drain_qp_done;
5387         init_completion(&rdrain.done);
5388
5389         ret = mlx5_ib_post_recv_drain(qp, &rwr, &bad_rwr);
5390         if (ret) {
5391                 WARN_ONCE(ret, "failed to drain recv queue: %d\n", ret);
5392                 return;
5393         }
5394
5395         handle_drain_completion(cq, &rdrain, dev);
5396 }
5397
5398 /**
5399  * Bind a qp to a counter. If @counter is NULL then bind the qp to
5400  * the default counter
5401  */
5402 int mlx5_ib_qp_set_counter(struct ib_qp *qp, struct rdma_counter *counter)
5403 {
5404         struct mlx5_ib_dev *dev = to_mdev(qp->device);
5405         struct mlx5_ib_qp *mqp = to_mqp(qp);
5406         int err = 0;
5407
5408         mutex_lock(&mqp->mutex);
5409         if (mqp->state == IB_QPS_RESET) {
5410                 qp->counter = counter;
5411                 goto out;
5412         }
5413
5414         if (!MLX5_CAP_GEN(dev->mdev, rts2rts_qp_counters_set_id)) {
5415                 err = -EOPNOTSUPP;
5416                 goto out;
5417         }
5418
5419         if (mqp->state == IB_QPS_RTS) {
5420                 err = __mlx5_ib_qp_set_counter(qp, counter);
5421                 if (!err)
5422                         qp->counter = counter;
5423
5424                 goto out;
5425         }
5426
5427         mqp->counter_pending = 1;
5428         qp->counter = counter;
5429
5430 out:
5431         mutex_unlock(&mqp->mutex);
5432         return err;
5433 }