EDAC/igen6: ecclog_llist can be static
[sfrench/cifs-2.6.git] / drivers / infiniband / hw / hns / hns_roce_hw_v2.c
1 /*
2  * Copyright (c) 2016-2017 Hisilicon Limited.
3  *
4  * This software is available to you under a choice of one of two
5  * licenses.  You may choose to be licensed under the terms of the GNU
6  * General Public License (GPL) Version 2, available from the file
7  * COPYING in the main directory of this source tree, or the
8  * OpenIB.org BSD license below:
9  *
10  *     Redistribution and use in source and binary forms, with or
11  *     without modification, are permitted provided that the following
12  *     conditions are met:
13  *
14  *      - Redistributions of source code must retain the above
15  *        copyright notice, this list of conditions and the following
16  *        disclaimer.
17  *
18  *      - Redistributions in binary form must reproduce the above
19  *        copyright notice, this list of conditions and the following
20  *        disclaimer in the documentation and/or other materials
21  *        provided with the distribution.
22  *
23  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30  * SOFTWARE.
31  */
32
33 #include <linux/acpi.h>
34 #include <linux/etherdevice.h>
35 #include <linux/interrupt.h>
36 #include <linux/kernel.h>
37 #include <linux/types.h>
38 #include <net/addrconf.h>
39 #include <rdma/ib_addr.h>
40 #include <rdma/ib_cache.h>
41 #include <rdma/ib_umem.h>
42 #include <rdma/uverbs_ioctl.h>
43
44 #include "hnae3.h"
45 #include "hns_roce_common.h"
46 #include "hns_roce_device.h"
47 #include "hns_roce_cmd.h"
48 #include "hns_roce_hem.h"
49 #include "hns_roce_hw_v2.h"
50
51 static void set_data_seg_v2(struct hns_roce_v2_wqe_data_seg *dseg,
52                             struct ib_sge *sg)
53 {
54         dseg->lkey = cpu_to_le32(sg->lkey);
55         dseg->addr = cpu_to_le64(sg->addr);
56         dseg->len  = cpu_to_le32(sg->length);
57 }
58
59 /*
60  * mapped-value = 1 + real-value
61  * The hns wr opcode real value is start from 0, In order to distinguish between
62  * initialized and uninitialized map values, we plus 1 to the actual value when
63  * defining the mapping, so that the validity can be identified by checking the
64  * mapped value is greater than 0.
65  */
66 #define HR_OPC_MAP(ib_key, hr_key) \
67                 [IB_WR_ ## ib_key] = 1 + HNS_ROCE_V2_WQE_OP_ ## hr_key
68
69 static const u32 hns_roce_op_code[] = {
70         HR_OPC_MAP(RDMA_WRITE,                  RDMA_WRITE),
71         HR_OPC_MAP(RDMA_WRITE_WITH_IMM,         RDMA_WRITE_WITH_IMM),
72         HR_OPC_MAP(SEND,                        SEND),
73         HR_OPC_MAP(SEND_WITH_IMM,               SEND_WITH_IMM),
74         HR_OPC_MAP(RDMA_READ,                   RDMA_READ),
75         HR_OPC_MAP(ATOMIC_CMP_AND_SWP,          ATOM_CMP_AND_SWAP),
76         HR_OPC_MAP(ATOMIC_FETCH_AND_ADD,        ATOM_FETCH_AND_ADD),
77         HR_OPC_MAP(SEND_WITH_INV,               SEND_WITH_INV),
78         HR_OPC_MAP(LOCAL_INV,                   LOCAL_INV),
79         HR_OPC_MAP(MASKED_ATOMIC_CMP_AND_SWP,   ATOM_MSK_CMP_AND_SWAP),
80         HR_OPC_MAP(MASKED_ATOMIC_FETCH_AND_ADD, ATOM_MSK_FETCH_AND_ADD),
81         HR_OPC_MAP(REG_MR,                      FAST_REG_PMR),
82 };
83
84 static u32 to_hr_opcode(u32 ib_opcode)
85 {
86         if (ib_opcode >= ARRAY_SIZE(hns_roce_op_code))
87                 return HNS_ROCE_V2_WQE_OP_MASK;
88
89         return hns_roce_op_code[ib_opcode] ? hns_roce_op_code[ib_opcode] - 1 :
90                                              HNS_ROCE_V2_WQE_OP_MASK;
91 }
92
93 static void set_frmr_seg(struct hns_roce_v2_rc_send_wqe *rc_sq_wqe,
94                          const struct ib_reg_wr *wr)
95 {
96         struct hns_roce_wqe_frmr_seg *fseg =
97                 (void *)rc_sq_wqe + sizeof(struct hns_roce_v2_rc_send_wqe);
98         struct hns_roce_mr *mr = to_hr_mr(wr->mr);
99         u64 pbl_ba;
100
101         /* use ib_access_flags */
102         roce_set_bit(rc_sq_wqe->byte_4, V2_RC_FRMR_WQE_BYTE_4_BIND_EN_S,
103                      wr->access & IB_ACCESS_MW_BIND ? 1 : 0);
104         roce_set_bit(rc_sq_wqe->byte_4, V2_RC_FRMR_WQE_BYTE_4_ATOMIC_S,
105                      wr->access & IB_ACCESS_REMOTE_ATOMIC ? 1 : 0);
106         roce_set_bit(rc_sq_wqe->byte_4, V2_RC_FRMR_WQE_BYTE_4_RR_S,
107                      wr->access & IB_ACCESS_REMOTE_READ ? 1 : 0);
108         roce_set_bit(rc_sq_wqe->byte_4, V2_RC_FRMR_WQE_BYTE_4_RW_S,
109                      wr->access & IB_ACCESS_REMOTE_WRITE ? 1 : 0);
110         roce_set_bit(rc_sq_wqe->byte_4, V2_RC_FRMR_WQE_BYTE_4_LW_S,
111                      wr->access & IB_ACCESS_LOCAL_WRITE ? 1 : 0);
112
113         /* Data structure reuse may lead to confusion */
114         pbl_ba = mr->pbl_mtr.hem_cfg.root_ba;
115         rc_sq_wqe->msg_len = cpu_to_le32(lower_32_bits(pbl_ba));
116         rc_sq_wqe->inv_key = cpu_to_le32(upper_32_bits(pbl_ba));
117
118         rc_sq_wqe->byte_16 = cpu_to_le32(wr->mr->length & 0xffffffff);
119         rc_sq_wqe->byte_20 = cpu_to_le32(wr->mr->length >> 32);
120         rc_sq_wqe->rkey = cpu_to_le32(wr->key);
121         rc_sq_wqe->va = cpu_to_le64(wr->mr->iova);
122
123         fseg->pbl_size = cpu_to_le32(mr->npages);
124         roce_set_field(fseg->mode_buf_pg_sz,
125                        V2_RC_FRMR_WQE_BYTE_40_PBL_BUF_PG_SZ_M,
126                        V2_RC_FRMR_WQE_BYTE_40_PBL_BUF_PG_SZ_S,
127                        to_hr_hw_page_shift(mr->pbl_mtr.hem_cfg.buf_pg_shift));
128         roce_set_bit(fseg->mode_buf_pg_sz,
129                      V2_RC_FRMR_WQE_BYTE_40_BLK_MODE_S, 0);
130 }
131
132 static void set_atomic_seg(const struct ib_send_wr *wr,
133                            struct hns_roce_v2_rc_send_wqe *rc_sq_wqe,
134                            unsigned int valid_num_sge)
135 {
136         struct hns_roce_v2_wqe_data_seg *dseg =
137                 (void *)rc_sq_wqe + sizeof(struct hns_roce_v2_rc_send_wqe);
138         struct hns_roce_wqe_atomic_seg *aseg =
139                 (void *)dseg + sizeof(struct hns_roce_v2_wqe_data_seg);
140
141         set_data_seg_v2(dseg, wr->sg_list);
142
143         if (wr->opcode == IB_WR_ATOMIC_CMP_AND_SWP) {
144                 aseg->fetchadd_swap_data = cpu_to_le64(atomic_wr(wr)->swap);
145                 aseg->cmp_data = cpu_to_le64(atomic_wr(wr)->compare_add);
146         } else {
147                 aseg->fetchadd_swap_data =
148                         cpu_to_le64(atomic_wr(wr)->compare_add);
149                 aseg->cmp_data = 0;
150         }
151
152         roce_set_field(rc_sq_wqe->byte_16, V2_RC_SEND_WQE_BYTE_16_SGE_NUM_M,
153                        V2_RC_SEND_WQE_BYTE_16_SGE_NUM_S, valid_num_sge);
154 }
155
156 static int fill_ext_sge_inl_data(struct hns_roce_qp *qp,
157                                  const struct ib_send_wr *wr,
158                                  unsigned int *sge_idx, u32 msg_len)
159 {
160         struct ib_device *ibdev = &(to_hr_dev(qp->ibqp.device))->ib_dev;
161         unsigned int dseg_len = sizeof(struct hns_roce_v2_wqe_data_seg);
162         unsigned int ext_sge_sz = qp->sq.max_gs * dseg_len;
163         unsigned int left_len_in_pg;
164         unsigned int idx = *sge_idx;
165         unsigned int i = 0;
166         unsigned int len;
167         void *addr;
168         void *dseg;
169
170         if (msg_len > ext_sge_sz) {
171                 ibdev_err(ibdev,
172                           "no enough extended sge space for inline data.\n");
173                 return -EINVAL;
174         }
175
176         dseg = hns_roce_get_extend_sge(qp, idx & (qp->sge.sge_cnt - 1));
177         left_len_in_pg = hr_hw_page_align((uintptr_t)dseg) - (uintptr_t)dseg;
178         len = wr->sg_list[0].length;
179         addr = (void *)(unsigned long)(wr->sg_list[0].addr);
180
181         /* When copying data to extended sge space, the left length in page may
182          * not long enough for current user's sge. So the data should be
183          * splited into several parts, one in the first page, and the others in
184          * the subsequent pages.
185          */
186         while (1) {
187                 if (len <= left_len_in_pg) {
188                         memcpy(dseg, addr, len);
189
190                         idx += len / dseg_len;
191
192                         i++;
193                         if (i >= wr->num_sge)
194                                 break;
195
196                         left_len_in_pg -= len;
197                         len = wr->sg_list[i].length;
198                         addr = (void *)(unsigned long)(wr->sg_list[i].addr);
199                         dseg += len;
200                 } else {
201                         memcpy(dseg, addr, left_len_in_pg);
202
203                         len -= left_len_in_pg;
204                         addr += left_len_in_pg;
205                         idx += left_len_in_pg / dseg_len;
206                         dseg = hns_roce_get_extend_sge(qp,
207                                                 idx & (qp->sge.sge_cnt - 1));
208                         left_len_in_pg = 1 << HNS_HW_PAGE_SHIFT;
209                 }
210         }
211
212         *sge_idx = idx;
213
214         return 0;
215 }
216
217 static void set_extend_sge(struct hns_roce_qp *qp, const struct ib_send_wr *wr,
218                            unsigned int *sge_ind, unsigned int valid_num_sge)
219 {
220         struct hns_roce_v2_wqe_data_seg *dseg;
221         unsigned int cnt = valid_num_sge;
222         struct ib_sge *sge = wr->sg_list;
223         unsigned int idx = *sge_ind;
224
225         if (qp->ibqp.qp_type == IB_QPT_RC || qp->ibqp.qp_type == IB_QPT_UC) {
226                 cnt -= HNS_ROCE_SGE_IN_WQE;
227                 sge += HNS_ROCE_SGE_IN_WQE;
228         }
229
230         while (cnt > 0) {
231                 dseg = hns_roce_get_extend_sge(qp, idx & (qp->sge.sge_cnt - 1));
232                 set_data_seg_v2(dseg, sge);
233                 idx++;
234                 sge++;
235                 cnt--;
236         }
237
238         *sge_ind = idx;
239 }
240
241 static bool check_inl_data_len(struct hns_roce_qp *qp, unsigned int len)
242 {
243         struct hns_roce_dev *hr_dev = to_hr_dev(qp->ibqp.device);
244         int mtu = ib_mtu_enum_to_int(qp->path_mtu);
245
246         if (len > qp->max_inline_data || len > mtu) {
247                 ibdev_err(&hr_dev->ib_dev,
248                           "invalid length of data, data len = %u, max inline len = %u, path mtu = %d.\n",
249                           len, qp->max_inline_data, mtu);
250                 return false;
251         }
252
253         return true;
254 }
255
256 static int set_rc_inl(struct hns_roce_qp *qp, const struct ib_send_wr *wr,
257                       struct hns_roce_v2_rc_send_wqe *rc_sq_wqe,
258                       unsigned int *sge_idx)
259 {
260         struct hns_roce_dev *hr_dev = to_hr_dev(qp->ibqp.device);
261         u32 msg_len = le32_to_cpu(rc_sq_wqe->msg_len);
262         struct ib_device *ibdev = &hr_dev->ib_dev;
263         unsigned int curr_idx = *sge_idx;
264         void *dseg = rc_sq_wqe;
265         unsigned int i;
266         int ret;
267
268         if (unlikely(wr->opcode == IB_WR_RDMA_READ)) {
269                 ibdev_err(ibdev, "invalid inline parameters!\n");
270                 return -EINVAL;
271         }
272
273         if (!check_inl_data_len(qp, msg_len))
274                 return -EINVAL;
275
276         dseg += sizeof(struct hns_roce_v2_rc_send_wqe);
277
278         roce_set_bit(rc_sq_wqe->byte_4, V2_RC_SEND_WQE_BYTE_4_INLINE_S, 1);
279
280         if (msg_len <= HNS_ROCE_V2_MAX_RC_INL_INN_SZ) {
281                 roce_set_bit(rc_sq_wqe->byte_20,
282                              V2_RC_SEND_WQE_BYTE_20_INL_TYPE_S, 0);
283
284                 for (i = 0; i < wr->num_sge; i++) {
285                         memcpy(dseg, ((void *)wr->sg_list[i].addr),
286                                wr->sg_list[i].length);
287                         dseg += wr->sg_list[i].length;
288                 }
289         } else {
290                 roce_set_bit(rc_sq_wqe->byte_20,
291                              V2_RC_SEND_WQE_BYTE_20_INL_TYPE_S, 1);
292
293                 ret = fill_ext_sge_inl_data(qp, wr, &curr_idx, msg_len);
294                 if (ret)
295                         return ret;
296
297                 roce_set_field(rc_sq_wqe->byte_16,
298                                V2_RC_SEND_WQE_BYTE_16_SGE_NUM_M,
299                                V2_RC_SEND_WQE_BYTE_16_SGE_NUM_S,
300                                curr_idx - *sge_idx);
301         }
302
303         *sge_idx = curr_idx;
304
305         return 0;
306 }
307
308 static int set_rwqe_data_seg(struct ib_qp *ibqp, const struct ib_send_wr *wr,
309                              struct hns_roce_v2_rc_send_wqe *rc_sq_wqe,
310                              unsigned int *sge_ind,
311                              unsigned int valid_num_sge)
312 {
313         struct hns_roce_v2_wqe_data_seg *dseg =
314                 (void *)rc_sq_wqe + sizeof(struct hns_roce_v2_rc_send_wqe);
315         struct hns_roce_qp *qp = to_hr_qp(ibqp);
316         int j = 0;
317         int i;
318
319         roce_set_field(rc_sq_wqe->byte_20,
320                        V2_RC_SEND_WQE_BYTE_20_MSG_START_SGE_IDX_M,
321                        V2_RC_SEND_WQE_BYTE_20_MSG_START_SGE_IDX_S,
322                        (*sge_ind) & (qp->sge.sge_cnt - 1));
323
324         if (wr->send_flags & IB_SEND_INLINE)
325                 return set_rc_inl(qp, wr, rc_sq_wqe, sge_ind);
326
327         if (valid_num_sge <= HNS_ROCE_SGE_IN_WQE) {
328                 for (i = 0; i < wr->num_sge; i++) {
329                         if (likely(wr->sg_list[i].length)) {
330                                 set_data_seg_v2(dseg, wr->sg_list + i);
331                                 dseg++;
332                         }
333                 }
334         } else {
335                 for (i = 0; i < wr->num_sge && j < HNS_ROCE_SGE_IN_WQE; i++) {
336                         if (likely(wr->sg_list[i].length)) {
337                                 set_data_seg_v2(dseg, wr->sg_list + i);
338                                 dseg++;
339                                 j++;
340                         }
341                 }
342
343                 set_extend_sge(qp, wr, sge_ind, valid_num_sge);
344         }
345
346         roce_set_field(rc_sq_wqe->byte_16,
347                        V2_RC_SEND_WQE_BYTE_16_SGE_NUM_M,
348                        V2_RC_SEND_WQE_BYTE_16_SGE_NUM_S, valid_num_sge);
349
350         return 0;
351 }
352
353 static int check_send_valid(struct hns_roce_dev *hr_dev,
354                             struct hns_roce_qp *hr_qp)
355 {
356         struct ib_device *ibdev = &hr_dev->ib_dev;
357         struct ib_qp *ibqp = &hr_qp->ibqp;
358
359         if (unlikely(ibqp->qp_type != IB_QPT_RC &&
360                      ibqp->qp_type != IB_QPT_GSI &&
361                      ibqp->qp_type != IB_QPT_UD)) {
362                 ibdev_err(ibdev, "Not supported QP(0x%x)type!\n",
363                           ibqp->qp_type);
364                 return -EOPNOTSUPP;
365         } else if (unlikely(hr_qp->state == IB_QPS_RESET ||
366                    hr_qp->state == IB_QPS_INIT ||
367                    hr_qp->state == IB_QPS_RTR)) {
368                 ibdev_err(ibdev, "failed to post WQE, QP state %d!\n",
369                           hr_qp->state);
370                 return -EINVAL;
371         } else if (unlikely(hr_dev->state >= HNS_ROCE_DEVICE_STATE_RST_DOWN)) {
372                 ibdev_err(ibdev, "failed to post WQE, dev state %d!\n",
373                           hr_dev->state);
374                 return -EIO;
375         }
376
377         return 0;
378 }
379
380 static unsigned int calc_wr_sge_num(const struct ib_send_wr *wr,
381                                     unsigned int *sge_len)
382 {
383         unsigned int valid_num = 0;
384         unsigned int len = 0;
385         int i;
386
387         for (i = 0; i < wr->num_sge; i++) {
388                 if (likely(wr->sg_list[i].length)) {
389                         len += wr->sg_list[i].length;
390                         valid_num++;
391                 }
392         }
393
394         *sge_len = len;
395         return valid_num;
396 }
397
398 static __le32 get_immtdata(const struct ib_send_wr *wr)
399 {
400         switch (wr->opcode) {
401         case IB_WR_SEND_WITH_IMM:
402         case IB_WR_RDMA_WRITE_WITH_IMM:
403                 return cpu_to_le32(be32_to_cpu(wr->ex.imm_data));
404         default:
405                 return 0;
406         }
407 }
408
409 static int set_ud_opcode(struct hns_roce_v2_ud_send_wqe *ud_sq_wqe,
410                          const struct ib_send_wr *wr)
411 {
412         u32 ib_op = wr->opcode;
413
414         if (ib_op != IB_WR_SEND && ib_op != IB_WR_SEND_WITH_IMM)
415                 return -EINVAL;
416
417         ud_sq_wqe->immtdata = get_immtdata(wr);
418
419         roce_set_field(ud_sq_wqe->byte_4, V2_UD_SEND_WQE_BYTE_4_OPCODE_M,
420                        V2_UD_SEND_WQE_BYTE_4_OPCODE_S, to_hr_opcode(ib_op));
421
422         return 0;
423 }
424
425 static inline int set_ud_wqe(struct hns_roce_qp *qp,
426                              const struct ib_send_wr *wr,
427                              void *wqe, unsigned int *sge_idx,
428                              unsigned int owner_bit)
429 {
430         struct hns_roce_dev *hr_dev = to_hr_dev(qp->ibqp.device);
431         struct hns_roce_ah *ah = to_hr_ah(ud_wr(wr)->ah);
432         struct hns_roce_v2_ud_send_wqe *ud_sq_wqe = wqe;
433         unsigned int curr_idx = *sge_idx;
434         int valid_num_sge;
435         u32 msg_len = 0;
436         bool loopback;
437         u8 *smac;
438         int ret;
439
440         valid_num_sge = calc_wr_sge_num(wr, &msg_len);
441         memset(ud_sq_wqe, 0, sizeof(*ud_sq_wqe));
442
443         ret = set_ud_opcode(ud_sq_wqe, wr);
444         if (WARN_ON(ret))
445                 return ret;
446
447         roce_set_field(ud_sq_wqe->dmac, V2_UD_SEND_WQE_DMAC_0_M,
448                        V2_UD_SEND_WQE_DMAC_0_S, ah->av.mac[0]);
449         roce_set_field(ud_sq_wqe->dmac, V2_UD_SEND_WQE_DMAC_1_M,
450                        V2_UD_SEND_WQE_DMAC_1_S, ah->av.mac[1]);
451         roce_set_field(ud_sq_wqe->dmac, V2_UD_SEND_WQE_DMAC_2_M,
452                        V2_UD_SEND_WQE_DMAC_2_S, ah->av.mac[2]);
453         roce_set_field(ud_sq_wqe->dmac, V2_UD_SEND_WQE_DMAC_3_M,
454                        V2_UD_SEND_WQE_DMAC_3_S, ah->av.mac[3]);
455         roce_set_field(ud_sq_wqe->byte_48, V2_UD_SEND_WQE_BYTE_48_DMAC_4_M,
456                        V2_UD_SEND_WQE_BYTE_48_DMAC_4_S, ah->av.mac[4]);
457         roce_set_field(ud_sq_wqe->byte_48, V2_UD_SEND_WQE_BYTE_48_DMAC_5_M,
458                        V2_UD_SEND_WQE_BYTE_48_DMAC_5_S, ah->av.mac[5]);
459
460         /* MAC loopback */
461         smac = (u8 *)hr_dev->dev_addr[qp->port];
462         loopback = ether_addr_equal_unaligned(ah->av.mac, smac) ? 1 : 0;
463
464         roce_set_bit(ud_sq_wqe->byte_40,
465                      V2_UD_SEND_WQE_BYTE_40_LBI_S, loopback);
466
467         ud_sq_wqe->msg_len = cpu_to_le32(msg_len);
468
469         /* Set sig attr */
470         roce_set_bit(ud_sq_wqe->byte_4, V2_UD_SEND_WQE_BYTE_4_CQE_S,
471                      (wr->send_flags & IB_SEND_SIGNALED) ? 1 : 0);
472
473         /* Set se attr */
474         roce_set_bit(ud_sq_wqe->byte_4, V2_UD_SEND_WQE_BYTE_4_SE_S,
475                      (wr->send_flags & IB_SEND_SOLICITED) ? 1 : 0);
476
477         roce_set_bit(ud_sq_wqe->byte_4, V2_UD_SEND_WQE_BYTE_4_OWNER_S,
478                      owner_bit);
479
480         roce_set_field(ud_sq_wqe->byte_16, V2_UD_SEND_WQE_BYTE_16_PD_M,
481                        V2_UD_SEND_WQE_BYTE_16_PD_S, to_hr_pd(qp->ibqp.pd)->pdn);
482
483         roce_set_field(ud_sq_wqe->byte_16, V2_UD_SEND_WQE_BYTE_16_SGE_NUM_M,
484                        V2_UD_SEND_WQE_BYTE_16_SGE_NUM_S, valid_num_sge);
485
486         roce_set_field(ud_sq_wqe->byte_20,
487                        V2_UD_SEND_WQE_BYTE_20_MSG_START_SGE_IDX_M,
488                        V2_UD_SEND_WQE_BYTE_20_MSG_START_SGE_IDX_S,
489                        curr_idx & (qp->sge.sge_cnt - 1));
490
491         roce_set_field(ud_sq_wqe->byte_24, V2_UD_SEND_WQE_BYTE_24_UDPSPN_M,
492                        V2_UD_SEND_WQE_BYTE_24_UDPSPN_S, ah->av.udp_sport);
493         ud_sq_wqe->qkey = cpu_to_le32(ud_wr(wr)->remote_qkey & 0x80000000 ?
494                           qp->qkey : ud_wr(wr)->remote_qkey);
495         roce_set_field(ud_sq_wqe->byte_32, V2_UD_SEND_WQE_BYTE_32_DQPN_M,
496                        V2_UD_SEND_WQE_BYTE_32_DQPN_S, ud_wr(wr)->remote_qpn);
497
498         roce_set_field(ud_sq_wqe->byte_36, V2_UD_SEND_WQE_BYTE_36_VLAN_M,
499                        V2_UD_SEND_WQE_BYTE_36_VLAN_S, ah->av.vlan_id);
500         roce_set_field(ud_sq_wqe->byte_36, V2_UD_SEND_WQE_BYTE_36_HOPLIMIT_M,
501                        V2_UD_SEND_WQE_BYTE_36_HOPLIMIT_S, ah->av.hop_limit);
502         roce_set_field(ud_sq_wqe->byte_36, V2_UD_SEND_WQE_BYTE_36_TCLASS_M,
503                        V2_UD_SEND_WQE_BYTE_36_TCLASS_S, ah->av.tclass);
504         roce_set_field(ud_sq_wqe->byte_40, V2_UD_SEND_WQE_BYTE_40_FLOW_LABEL_M,
505                        V2_UD_SEND_WQE_BYTE_40_FLOW_LABEL_S, ah->av.flowlabel);
506         roce_set_field(ud_sq_wqe->byte_40, V2_UD_SEND_WQE_BYTE_40_SL_M,
507                        V2_UD_SEND_WQE_BYTE_40_SL_S, ah->av.sl);
508         roce_set_field(ud_sq_wqe->byte_40, V2_UD_SEND_WQE_BYTE_40_PORTN_M,
509                        V2_UD_SEND_WQE_BYTE_40_PORTN_S, qp->port);
510
511         roce_set_bit(ud_sq_wqe->byte_40, V2_UD_SEND_WQE_BYTE_40_UD_VLAN_EN_S,
512                      ah->av.vlan_en ? 1 : 0);
513         roce_set_field(ud_sq_wqe->byte_48, V2_UD_SEND_WQE_BYTE_48_SGID_INDX_M,
514                        V2_UD_SEND_WQE_BYTE_48_SGID_INDX_S, ah->av.gid_index);
515
516         memcpy(&ud_sq_wqe->dgid[0], &ah->av.dgid[0], GID_LEN_V2);
517
518         set_extend_sge(qp, wr, &curr_idx, valid_num_sge);
519
520         *sge_idx = curr_idx;
521
522         return 0;
523 }
524
525 static int set_rc_opcode(struct hns_roce_v2_rc_send_wqe *rc_sq_wqe,
526                          const struct ib_send_wr *wr)
527 {
528         u32 ib_op = wr->opcode;
529
530         rc_sq_wqe->immtdata = get_immtdata(wr);
531
532         switch (ib_op) {
533         case IB_WR_RDMA_READ:
534         case IB_WR_RDMA_WRITE:
535         case IB_WR_RDMA_WRITE_WITH_IMM:
536                 rc_sq_wqe->rkey = cpu_to_le32(rdma_wr(wr)->rkey);
537                 rc_sq_wqe->va = cpu_to_le64(rdma_wr(wr)->remote_addr);
538                 break;
539         case IB_WR_SEND:
540         case IB_WR_SEND_WITH_IMM:
541                 break;
542         case IB_WR_ATOMIC_CMP_AND_SWP:
543         case IB_WR_ATOMIC_FETCH_AND_ADD:
544                 rc_sq_wqe->rkey = cpu_to_le32(atomic_wr(wr)->rkey);
545                 rc_sq_wqe->va = cpu_to_le64(atomic_wr(wr)->remote_addr);
546                 break;
547         case IB_WR_REG_MR:
548                 set_frmr_seg(rc_sq_wqe, reg_wr(wr));
549                 break;
550         case IB_WR_LOCAL_INV:
551                 roce_set_bit(rc_sq_wqe->byte_4, V2_RC_SEND_WQE_BYTE_4_SO_S, 1);
552                 fallthrough;
553         case IB_WR_SEND_WITH_INV:
554                 rc_sq_wqe->inv_key = cpu_to_le32(wr->ex.invalidate_rkey);
555                 break;
556         default:
557                 return -EINVAL;
558         }
559
560         roce_set_field(rc_sq_wqe->byte_4, V2_RC_SEND_WQE_BYTE_4_OPCODE_M,
561                        V2_RC_SEND_WQE_BYTE_4_OPCODE_S, to_hr_opcode(ib_op));
562
563         return 0;
564 }
565 static inline int set_rc_wqe(struct hns_roce_qp *qp,
566                              const struct ib_send_wr *wr,
567                              void *wqe, unsigned int *sge_idx,
568                              unsigned int owner_bit)
569 {
570         struct hns_roce_v2_rc_send_wqe *rc_sq_wqe = wqe;
571         unsigned int curr_idx = *sge_idx;
572         unsigned int valid_num_sge;
573         u32 msg_len = 0;
574         int ret;
575
576         valid_num_sge = calc_wr_sge_num(wr, &msg_len);
577         memset(rc_sq_wqe, 0, sizeof(*rc_sq_wqe));
578
579         rc_sq_wqe->msg_len = cpu_to_le32(msg_len);
580
581         ret = set_rc_opcode(rc_sq_wqe, wr);
582         if (WARN_ON(ret))
583                 return ret;
584
585         roce_set_bit(rc_sq_wqe->byte_4, V2_RC_SEND_WQE_BYTE_4_FENCE_S,
586                      (wr->send_flags & IB_SEND_FENCE) ? 1 : 0);
587
588         roce_set_bit(rc_sq_wqe->byte_4, V2_RC_SEND_WQE_BYTE_4_SE_S,
589                      (wr->send_flags & IB_SEND_SOLICITED) ? 1 : 0);
590
591         roce_set_bit(rc_sq_wqe->byte_4, V2_RC_SEND_WQE_BYTE_4_CQE_S,
592                      (wr->send_flags & IB_SEND_SIGNALED) ? 1 : 0);
593
594         roce_set_bit(rc_sq_wqe->byte_4, V2_RC_SEND_WQE_BYTE_4_OWNER_S,
595                      owner_bit);
596
597         if (wr->opcode == IB_WR_ATOMIC_CMP_AND_SWP ||
598             wr->opcode == IB_WR_ATOMIC_FETCH_AND_ADD)
599                 set_atomic_seg(wr, rc_sq_wqe, valid_num_sge);
600         else if (wr->opcode != IB_WR_REG_MR)
601                 ret = set_rwqe_data_seg(&qp->ibqp, wr, rc_sq_wqe,
602                                         &curr_idx, valid_num_sge);
603
604         *sge_idx = curr_idx;
605
606         return ret;
607 }
608
609 static inline void update_sq_db(struct hns_roce_dev *hr_dev,
610                                 struct hns_roce_qp *qp)
611 {
612         /*
613          * Hip08 hardware cannot flush the WQEs in SQ if the QP state
614          * gets into errored mode. Hence, as a workaround to this
615          * hardware limitation, driver needs to assist in flushing. But
616          * the flushing operation uses mailbox to convey the QP state to
617          * the hardware and which can sleep due to the mutex protection
618          * around the mailbox calls. Hence, use the deferred flush for
619          * now.
620          */
621         if (qp->state == IB_QPS_ERR) {
622                 if (!test_and_set_bit(HNS_ROCE_FLUSH_FLAG, &qp->flush_flag))
623                         init_flush_work(hr_dev, qp);
624         } else {
625                 struct hns_roce_v2_db sq_db = {};
626
627                 roce_set_field(sq_db.byte_4, V2_DB_BYTE_4_TAG_M,
628                                V2_DB_BYTE_4_TAG_S, qp->doorbell_qpn);
629                 roce_set_field(sq_db.byte_4, V2_DB_BYTE_4_CMD_M,
630                                V2_DB_BYTE_4_CMD_S, HNS_ROCE_V2_SQ_DB);
631                 roce_set_field(sq_db.parameter, V2_DB_PARAMETER_IDX_M,
632                                V2_DB_PARAMETER_IDX_S, qp->sq.head);
633                 roce_set_field(sq_db.parameter, V2_DB_PARAMETER_SL_M,
634                                V2_DB_PARAMETER_SL_S, qp->sl);
635
636                 hns_roce_write64(hr_dev, (__le32 *)&sq_db, qp->sq.db_reg_l);
637         }
638 }
639
640 static int hns_roce_v2_post_send(struct ib_qp *ibqp,
641                                  const struct ib_send_wr *wr,
642                                  const struct ib_send_wr **bad_wr)
643 {
644         struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
645         struct ib_device *ibdev = &hr_dev->ib_dev;
646         struct hns_roce_qp *qp = to_hr_qp(ibqp);
647         unsigned long flags = 0;
648         unsigned int owner_bit;
649         unsigned int sge_idx;
650         unsigned int wqe_idx;
651         void *wqe = NULL;
652         int nreq;
653         int ret;
654
655         spin_lock_irqsave(&qp->sq.lock, flags);
656
657         ret = check_send_valid(hr_dev, qp);
658         if (unlikely(ret)) {
659                 *bad_wr = wr;
660                 nreq = 0;
661                 goto out;
662         }
663
664         sge_idx = qp->next_sge;
665
666         for (nreq = 0; wr; ++nreq, wr = wr->next) {
667                 if (hns_roce_wq_overflow(&qp->sq, nreq, qp->ibqp.send_cq)) {
668                         ret = -ENOMEM;
669                         *bad_wr = wr;
670                         goto out;
671                 }
672
673                 wqe_idx = (qp->sq.head + nreq) & (qp->sq.wqe_cnt - 1);
674
675                 if (unlikely(wr->num_sge > qp->sq.max_gs)) {
676                         ibdev_err(ibdev, "num_sge=%d > qp->sq.max_gs=%d\n",
677                                   wr->num_sge, qp->sq.max_gs);
678                         ret = -EINVAL;
679                         *bad_wr = wr;
680                         goto out;
681                 }
682
683                 wqe = hns_roce_get_send_wqe(qp, wqe_idx);
684                 qp->sq.wrid[wqe_idx] = wr->wr_id;
685                 owner_bit =
686                        ~(((qp->sq.head + nreq) >> ilog2(qp->sq.wqe_cnt)) & 0x1);
687
688                 /* Corresponding to the QP type, wqe process separately */
689                 if (ibqp->qp_type == IB_QPT_GSI)
690                         ret = set_ud_wqe(qp, wr, wqe, &sge_idx, owner_bit);
691                 else if (ibqp->qp_type == IB_QPT_RC)
692                         ret = set_rc_wqe(qp, wr, wqe, &sge_idx, owner_bit);
693
694                 if (unlikely(ret)) {
695                         *bad_wr = wr;
696                         goto out;
697                 }
698         }
699
700 out:
701         if (likely(nreq)) {
702                 qp->sq.head += nreq;
703                 qp->next_sge = sge_idx;
704                 /* Memory barrier */
705                 wmb();
706                 update_sq_db(hr_dev, qp);
707         }
708
709         spin_unlock_irqrestore(&qp->sq.lock, flags);
710
711         return ret;
712 }
713
714 static int check_recv_valid(struct hns_roce_dev *hr_dev,
715                             struct hns_roce_qp *hr_qp)
716 {
717         if (unlikely(hr_dev->state >= HNS_ROCE_DEVICE_STATE_RST_DOWN))
718                 return -EIO;
719         else if (hr_qp->state == IB_QPS_RESET)
720                 return -EINVAL;
721
722         return 0;
723 }
724
725 static int hns_roce_v2_post_recv(struct ib_qp *ibqp,
726                                  const struct ib_recv_wr *wr,
727                                  const struct ib_recv_wr **bad_wr)
728 {
729         struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
730         struct hns_roce_qp *hr_qp = to_hr_qp(ibqp);
731         struct ib_device *ibdev = &hr_dev->ib_dev;
732         struct hns_roce_v2_wqe_data_seg *dseg;
733         struct hns_roce_rinl_sge *sge_list;
734         unsigned long flags;
735         void *wqe = NULL;
736         u32 wqe_idx;
737         int nreq;
738         int ret;
739         int i;
740
741         spin_lock_irqsave(&hr_qp->rq.lock, flags);
742
743         ret = check_recv_valid(hr_dev, hr_qp);
744         if (unlikely(ret)) {
745                 *bad_wr = wr;
746                 nreq = 0;
747                 goto out;
748         }
749
750         for (nreq = 0; wr; ++nreq, wr = wr->next) {
751                 if (unlikely(hns_roce_wq_overflow(&hr_qp->rq, nreq,
752                                                   hr_qp->ibqp.recv_cq))) {
753                         ret = -ENOMEM;
754                         *bad_wr = wr;
755                         goto out;
756                 }
757
758                 wqe_idx = (hr_qp->rq.head + nreq) & (hr_qp->rq.wqe_cnt - 1);
759
760                 if (unlikely(wr->num_sge > hr_qp->rq.max_gs)) {
761                         ibdev_err(ibdev, "rq:num_sge=%d >= qp->sq.max_gs=%d\n",
762                                   wr->num_sge, hr_qp->rq.max_gs);
763                         ret = -EINVAL;
764                         *bad_wr = wr;
765                         goto out;
766                 }
767
768                 wqe = hns_roce_get_recv_wqe(hr_qp, wqe_idx);
769                 dseg = (struct hns_roce_v2_wqe_data_seg *)wqe;
770                 for (i = 0; i < wr->num_sge; i++) {
771                         if (!wr->sg_list[i].length)
772                                 continue;
773                         set_data_seg_v2(dseg, wr->sg_list + i);
774                         dseg++;
775                 }
776
777                 if (wr->num_sge < hr_qp->rq.max_gs) {
778                         dseg->lkey = cpu_to_le32(HNS_ROCE_INVALID_LKEY);
779                         dseg->addr = 0;
780                 }
781
782                 /* rq support inline data */
783                 if (hr_qp->rq_inl_buf.wqe_cnt) {
784                         sge_list = hr_qp->rq_inl_buf.wqe_list[wqe_idx].sg_list;
785                         hr_qp->rq_inl_buf.wqe_list[wqe_idx].sge_cnt =
786                                                                (u32)wr->num_sge;
787                         for (i = 0; i < wr->num_sge; i++) {
788                                 sge_list[i].addr =
789                                                (void *)(u64)wr->sg_list[i].addr;
790                                 sge_list[i].len = wr->sg_list[i].length;
791                         }
792                 }
793
794                 hr_qp->rq.wrid[wqe_idx] = wr->wr_id;
795         }
796
797 out:
798         if (likely(nreq)) {
799                 hr_qp->rq.head += nreq;
800                 /* Memory barrier */
801                 wmb();
802
803                 /*
804                  * Hip08 hardware cannot flush the WQEs in RQ if the QP state
805                  * gets into errored mode. Hence, as a workaround to this
806                  * hardware limitation, driver needs to assist in flushing. But
807                  * the flushing operation uses mailbox to convey the QP state to
808                  * the hardware and which can sleep due to the mutex protection
809                  * around the mailbox calls. Hence, use the deferred flush for
810                  * now.
811                  */
812                 if (hr_qp->state == IB_QPS_ERR) {
813                         if (!test_and_set_bit(HNS_ROCE_FLUSH_FLAG,
814                                               &hr_qp->flush_flag))
815                                 init_flush_work(hr_dev, hr_qp);
816                 } else {
817                         *hr_qp->rdb.db_record = hr_qp->rq.head & 0xffff;
818                 }
819         }
820         spin_unlock_irqrestore(&hr_qp->rq.lock, flags);
821
822         return ret;
823 }
824
825 static void *get_srq_wqe(struct hns_roce_srq *srq, int n)
826 {
827         return hns_roce_buf_offset(srq->buf_mtr.kmem, n << srq->wqe_shift);
828 }
829
830 static void *get_idx_buf(struct hns_roce_idx_que *idx_que, int n)
831 {
832         return hns_roce_buf_offset(idx_que->mtr.kmem,
833                                    n << idx_que->entry_shift);
834 }
835
836 static void hns_roce_free_srq_wqe(struct hns_roce_srq *srq, int wqe_index)
837 {
838         /* always called with interrupts disabled. */
839         spin_lock(&srq->lock);
840
841         bitmap_clear(srq->idx_que.bitmap, wqe_index, 1);
842         srq->tail++;
843
844         spin_unlock(&srq->lock);
845 }
846
847 static int find_empty_entry(struct hns_roce_idx_que *idx_que,
848                             unsigned long size)
849 {
850         int wqe_idx;
851
852         if (unlikely(bitmap_full(idx_que->bitmap, size)))
853                 return -ENOSPC;
854
855         wqe_idx = find_first_zero_bit(idx_que->bitmap, size);
856
857         bitmap_set(idx_que->bitmap, wqe_idx, 1);
858
859         return wqe_idx;
860 }
861
862 static int hns_roce_v2_post_srq_recv(struct ib_srq *ibsrq,
863                                      const struct ib_recv_wr *wr,
864                                      const struct ib_recv_wr **bad_wr)
865 {
866         struct hns_roce_dev *hr_dev = to_hr_dev(ibsrq->device);
867         struct hns_roce_srq *srq = to_hr_srq(ibsrq);
868         struct hns_roce_v2_wqe_data_seg *dseg;
869         struct hns_roce_v2_db srq_db;
870         unsigned long flags;
871         __le32 *srq_idx;
872         int ret = 0;
873         int wqe_idx;
874         void *wqe;
875         int nreq;
876         int ind;
877         int i;
878
879         spin_lock_irqsave(&srq->lock, flags);
880
881         ind = srq->head & (srq->wqe_cnt - 1);
882
883         for (nreq = 0; wr; ++nreq, wr = wr->next) {
884                 if (unlikely(wr->num_sge >= srq->max_gs)) {
885                         ret = -EINVAL;
886                         *bad_wr = wr;
887                         break;
888                 }
889
890                 if (unlikely(srq->head == srq->tail)) {
891                         ret = -ENOMEM;
892                         *bad_wr = wr;
893                         break;
894                 }
895
896                 wqe_idx = find_empty_entry(&srq->idx_que, srq->wqe_cnt);
897                 if (unlikely(wqe_idx < 0)) {
898                         ret = -ENOMEM;
899                         *bad_wr = wr;
900                         break;
901                 }
902
903                 wqe = get_srq_wqe(srq, wqe_idx);
904                 dseg = (struct hns_roce_v2_wqe_data_seg *)wqe;
905
906                 for (i = 0; i < wr->num_sge; ++i) {
907                         dseg[i].len = cpu_to_le32(wr->sg_list[i].length);
908                         dseg[i].lkey = cpu_to_le32(wr->sg_list[i].lkey);
909                         dseg[i].addr = cpu_to_le64(wr->sg_list[i].addr);
910                 }
911
912                 if (wr->num_sge < srq->max_gs) {
913                         dseg[i].len = 0;
914                         dseg[i].lkey = cpu_to_le32(0x100);
915                         dseg[i].addr = 0;
916                 }
917
918                 srq_idx = get_idx_buf(&srq->idx_que, ind);
919                 *srq_idx = cpu_to_le32(wqe_idx);
920
921                 srq->wrid[wqe_idx] = wr->wr_id;
922                 ind = (ind + 1) & (srq->wqe_cnt - 1);
923         }
924
925         if (likely(nreq)) {
926                 srq->head += nreq;
927
928                 /*
929                  * Make sure that descriptors are written before
930                  * doorbell record.
931                  */
932                 wmb();
933
934                 srq_db.byte_4 =
935                         cpu_to_le32(HNS_ROCE_V2_SRQ_DB << V2_DB_BYTE_4_CMD_S |
936                                     (srq->srqn & V2_DB_BYTE_4_TAG_M));
937                 srq_db.parameter =
938                         cpu_to_le32(srq->head & V2_DB_PARAMETER_IDX_M);
939
940                 hns_roce_write64(hr_dev, (__le32 *)&srq_db, srq->db_reg_l);
941         }
942
943         spin_unlock_irqrestore(&srq->lock, flags);
944
945         return ret;
946 }
947
948 static int hns_roce_v2_cmd_hw_reseted(struct hns_roce_dev *hr_dev,
949                                       unsigned long instance_stage,
950                                       unsigned long reset_stage)
951 {
952         /* When hardware reset has been completed once or more, we should stop
953          * sending mailbox&cmq&doorbell to hardware. If now in .init_instance()
954          * function, we should exit with error. If now at HNAE3_INIT_CLIENT
955          * stage of soft reset process, we should exit with error, and then
956          * HNAE3_INIT_CLIENT related process can rollback the operation like
957          * notifing hardware to free resources, HNAE3_INIT_CLIENT related
958          * process will exit with error to notify NIC driver to reschedule soft
959          * reset process once again.
960          */
961         hr_dev->is_reset = true;
962         hr_dev->dis_db = true;
963
964         if (reset_stage == HNS_ROCE_STATE_RST_INIT ||
965             instance_stage == HNS_ROCE_STATE_INIT)
966                 return CMD_RST_PRC_EBUSY;
967
968         return CMD_RST_PRC_SUCCESS;
969 }
970
971 static int hns_roce_v2_cmd_hw_resetting(struct hns_roce_dev *hr_dev,
972                                         unsigned long instance_stage,
973                                         unsigned long reset_stage)
974 {
975         struct hns_roce_v2_priv *priv = hr_dev->priv;
976         struct hnae3_handle *handle = priv->handle;
977         const struct hnae3_ae_ops *ops = handle->ae_algo->ops;
978
979         /* When hardware reset is detected, we should stop sending mailbox&cmq&
980          * doorbell to hardware. If now in .init_instance() function, we should
981          * exit with error. If now at HNAE3_INIT_CLIENT stage of soft reset
982          * process, we should exit with error, and then HNAE3_INIT_CLIENT
983          * related process can rollback the operation like notifing hardware to
984          * free resources, HNAE3_INIT_CLIENT related process will exit with
985          * error to notify NIC driver to reschedule soft reset process once
986          * again.
987          */
988         hr_dev->dis_db = true;
989         if (!ops->get_hw_reset_stat(handle))
990                 hr_dev->is_reset = true;
991
992         if (!hr_dev->is_reset || reset_stage == HNS_ROCE_STATE_RST_INIT ||
993             instance_stage == HNS_ROCE_STATE_INIT)
994                 return CMD_RST_PRC_EBUSY;
995
996         return CMD_RST_PRC_SUCCESS;
997 }
998
999 static int hns_roce_v2_cmd_sw_resetting(struct hns_roce_dev *hr_dev)
1000 {
1001         struct hns_roce_v2_priv *priv = hr_dev->priv;
1002         struct hnae3_handle *handle = priv->handle;
1003         const struct hnae3_ae_ops *ops = handle->ae_algo->ops;
1004
1005         /* When software reset is detected at .init_instance() function, we
1006          * should stop sending mailbox&cmq&doorbell to hardware, and exit
1007          * with error.
1008          */
1009         hr_dev->dis_db = true;
1010         if (ops->ae_dev_reset_cnt(handle) != hr_dev->reset_cnt)
1011                 hr_dev->is_reset = true;
1012
1013         return CMD_RST_PRC_EBUSY;
1014 }
1015
1016 static int hns_roce_v2_rst_process_cmd(struct hns_roce_dev *hr_dev)
1017 {
1018         struct hns_roce_v2_priv *priv = hr_dev->priv;
1019         struct hnae3_handle *handle = priv->handle;
1020         const struct hnae3_ae_ops *ops = handle->ae_algo->ops;
1021         unsigned long instance_stage;   /* the current instance stage */
1022         unsigned long reset_stage;      /* the current reset stage */
1023         unsigned long reset_cnt;
1024         bool sw_resetting;
1025         bool hw_resetting;
1026
1027         if (hr_dev->is_reset)
1028                 return CMD_RST_PRC_SUCCESS;
1029
1030         /* Get information about reset from NIC driver or RoCE driver itself,
1031          * the meaning of the following variables from NIC driver are described
1032          * as below:
1033          * reset_cnt -- The count value of completed hardware reset.
1034          * hw_resetting -- Whether hardware device is resetting now.
1035          * sw_resetting -- Whether NIC's software reset process is running now.
1036          */
1037         instance_stage = handle->rinfo.instance_state;
1038         reset_stage = handle->rinfo.reset_state;
1039         reset_cnt = ops->ae_dev_reset_cnt(handle);
1040         hw_resetting = ops->get_cmdq_stat(handle);
1041         sw_resetting = ops->ae_dev_resetting(handle);
1042
1043         if (reset_cnt != hr_dev->reset_cnt)
1044                 return hns_roce_v2_cmd_hw_reseted(hr_dev, instance_stage,
1045                                                   reset_stage);
1046         else if (hw_resetting)
1047                 return hns_roce_v2_cmd_hw_resetting(hr_dev, instance_stage,
1048                                                     reset_stage);
1049         else if (sw_resetting && instance_stage == HNS_ROCE_STATE_INIT)
1050                 return hns_roce_v2_cmd_sw_resetting(hr_dev);
1051
1052         return 0;
1053 }
1054
1055 static int hns_roce_cmq_space(struct hns_roce_v2_cmq_ring *ring)
1056 {
1057         int ntu = ring->next_to_use;
1058         int ntc = ring->next_to_clean;
1059         int used = (ntu - ntc + ring->desc_num) % ring->desc_num;
1060
1061         return ring->desc_num - used - 1;
1062 }
1063
1064 static int hns_roce_alloc_cmq_desc(struct hns_roce_dev *hr_dev,
1065                                    struct hns_roce_v2_cmq_ring *ring)
1066 {
1067         int size = ring->desc_num * sizeof(struct hns_roce_cmq_desc);
1068
1069         ring->desc = kzalloc(size, GFP_KERNEL);
1070         if (!ring->desc)
1071                 return -ENOMEM;
1072
1073         ring->desc_dma_addr = dma_map_single(hr_dev->dev, ring->desc, size,
1074                                              DMA_BIDIRECTIONAL);
1075         if (dma_mapping_error(hr_dev->dev, ring->desc_dma_addr)) {
1076                 ring->desc_dma_addr = 0;
1077                 kfree(ring->desc);
1078                 ring->desc = NULL;
1079                 return -ENOMEM;
1080         }
1081
1082         return 0;
1083 }
1084
1085 static void hns_roce_free_cmq_desc(struct hns_roce_dev *hr_dev,
1086                                    struct hns_roce_v2_cmq_ring *ring)
1087 {
1088         dma_unmap_single(hr_dev->dev, ring->desc_dma_addr,
1089                          ring->desc_num * sizeof(struct hns_roce_cmq_desc),
1090                          DMA_BIDIRECTIONAL);
1091
1092         ring->desc_dma_addr = 0;
1093         kfree(ring->desc);
1094 }
1095
1096 static int hns_roce_init_cmq_ring(struct hns_roce_dev *hr_dev, bool ring_type)
1097 {
1098         struct hns_roce_v2_priv *priv = hr_dev->priv;
1099         struct hns_roce_v2_cmq_ring *ring = (ring_type == TYPE_CSQ) ?
1100                                             &priv->cmq.csq : &priv->cmq.crq;
1101
1102         ring->flag = ring_type;
1103         ring->next_to_clean = 0;
1104         ring->next_to_use = 0;
1105
1106         return hns_roce_alloc_cmq_desc(hr_dev, ring);
1107 }
1108
1109 static void hns_roce_cmq_init_regs(struct hns_roce_dev *hr_dev, bool ring_type)
1110 {
1111         struct hns_roce_v2_priv *priv = hr_dev->priv;
1112         struct hns_roce_v2_cmq_ring *ring = (ring_type == TYPE_CSQ) ?
1113                                             &priv->cmq.csq : &priv->cmq.crq;
1114         dma_addr_t dma = ring->desc_dma_addr;
1115
1116         if (ring_type == TYPE_CSQ) {
1117                 roce_write(hr_dev, ROCEE_TX_CMQ_BASEADDR_L_REG, (u32)dma);
1118                 roce_write(hr_dev, ROCEE_TX_CMQ_BASEADDR_H_REG,
1119                            upper_32_bits(dma));
1120                 roce_write(hr_dev, ROCEE_TX_CMQ_DEPTH_REG,
1121                            ring->desc_num >> HNS_ROCE_CMQ_DESC_NUM_S);
1122                 roce_write(hr_dev, ROCEE_TX_CMQ_HEAD_REG, 0);
1123                 roce_write(hr_dev, ROCEE_TX_CMQ_TAIL_REG, 0);
1124         } else {
1125                 roce_write(hr_dev, ROCEE_RX_CMQ_BASEADDR_L_REG, (u32)dma);
1126                 roce_write(hr_dev, ROCEE_RX_CMQ_BASEADDR_H_REG,
1127                            upper_32_bits(dma));
1128                 roce_write(hr_dev, ROCEE_RX_CMQ_DEPTH_REG,
1129                            ring->desc_num >> HNS_ROCE_CMQ_DESC_NUM_S);
1130                 roce_write(hr_dev, ROCEE_RX_CMQ_HEAD_REG, 0);
1131                 roce_write(hr_dev, ROCEE_RX_CMQ_TAIL_REG, 0);
1132         }
1133 }
1134
1135 static int hns_roce_v2_cmq_init(struct hns_roce_dev *hr_dev)
1136 {
1137         struct hns_roce_v2_priv *priv = hr_dev->priv;
1138         int ret;
1139
1140         /* Setup the queue entries for command queue */
1141         priv->cmq.csq.desc_num = CMD_CSQ_DESC_NUM;
1142         priv->cmq.crq.desc_num = CMD_CRQ_DESC_NUM;
1143
1144         /* Setup the lock for command queue */
1145         spin_lock_init(&priv->cmq.csq.lock);
1146         spin_lock_init(&priv->cmq.crq.lock);
1147
1148         /* Setup Tx write back timeout */
1149         priv->cmq.tx_timeout = HNS_ROCE_CMQ_TX_TIMEOUT;
1150
1151         /* Init CSQ */
1152         ret = hns_roce_init_cmq_ring(hr_dev, TYPE_CSQ);
1153         if (ret) {
1154                 dev_err(hr_dev->dev, "Init CSQ error, ret = %d.\n", ret);
1155                 return ret;
1156         }
1157
1158         /* Init CRQ */
1159         ret = hns_roce_init_cmq_ring(hr_dev, TYPE_CRQ);
1160         if (ret) {
1161                 dev_err(hr_dev->dev, "Init CRQ error, ret = %d.\n", ret);
1162                 goto err_crq;
1163         }
1164
1165         /* Init CSQ REG */
1166         hns_roce_cmq_init_regs(hr_dev, TYPE_CSQ);
1167
1168         /* Init CRQ REG */
1169         hns_roce_cmq_init_regs(hr_dev, TYPE_CRQ);
1170
1171         return 0;
1172
1173 err_crq:
1174         hns_roce_free_cmq_desc(hr_dev, &priv->cmq.csq);
1175
1176         return ret;
1177 }
1178
1179 static void hns_roce_v2_cmq_exit(struct hns_roce_dev *hr_dev)
1180 {
1181         struct hns_roce_v2_priv *priv = hr_dev->priv;
1182
1183         hns_roce_free_cmq_desc(hr_dev, &priv->cmq.csq);
1184         hns_roce_free_cmq_desc(hr_dev, &priv->cmq.crq);
1185 }
1186
1187 static void hns_roce_cmq_setup_basic_desc(struct hns_roce_cmq_desc *desc,
1188                                           enum hns_roce_opcode_type opcode,
1189                                           bool is_read)
1190 {
1191         memset((void *)desc, 0, sizeof(struct hns_roce_cmq_desc));
1192         desc->opcode = cpu_to_le16(opcode);
1193         desc->flag =
1194                 cpu_to_le16(HNS_ROCE_CMD_FLAG_NO_INTR | HNS_ROCE_CMD_FLAG_IN);
1195         if (is_read)
1196                 desc->flag |= cpu_to_le16(HNS_ROCE_CMD_FLAG_WR);
1197         else
1198                 desc->flag &= cpu_to_le16(~HNS_ROCE_CMD_FLAG_WR);
1199 }
1200
1201 static int hns_roce_cmq_csq_done(struct hns_roce_dev *hr_dev)
1202 {
1203         u32 head = roce_read(hr_dev, ROCEE_TX_CMQ_HEAD_REG);
1204         struct hns_roce_v2_priv *priv = hr_dev->priv;
1205
1206         return head == priv->cmq.csq.next_to_use;
1207 }
1208
1209 static int hns_roce_cmq_csq_clean(struct hns_roce_dev *hr_dev)
1210 {
1211         struct hns_roce_v2_priv *priv = hr_dev->priv;
1212         struct hns_roce_v2_cmq_ring *csq = &priv->cmq.csq;
1213         struct hns_roce_cmq_desc *desc;
1214         u16 ntc = csq->next_to_clean;
1215         u32 head;
1216         int clean = 0;
1217
1218         desc = &csq->desc[ntc];
1219         head = roce_read(hr_dev, ROCEE_TX_CMQ_HEAD_REG);
1220         while (head != ntc) {
1221                 memset(desc, 0, sizeof(*desc));
1222                 ntc++;
1223                 if (ntc == csq->desc_num)
1224                         ntc = 0;
1225                 desc = &csq->desc[ntc];
1226                 clean++;
1227         }
1228         csq->next_to_clean = ntc;
1229
1230         return clean;
1231 }
1232
1233 static int __hns_roce_cmq_send(struct hns_roce_dev *hr_dev,
1234                                struct hns_roce_cmq_desc *desc, int num)
1235 {
1236         struct hns_roce_v2_priv *priv = hr_dev->priv;
1237         struct hns_roce_v2_cmq_ring *csq = &priv->cmq.csq;
1238         struct hns_roce_cmq_desc *desc_to_use;
1239         bool complete = false;
1240         u32 timeout = 0;
1241         int handle = 0;
1242         u16 desc_ret;
1243         int ret = 0;
1244         int ntc;
1245
1246         spin_lock_bh(&csq->lock);
1247
1248         if (num > hns_roce_cmq_space(csq)) {
1249                 spin_unlock_bh(&csq->lock);
1250                 return -EBUSY;
1251         }
1252
1253         /*
1254          * Record the location of desc in the cmq for this time
1255          * which will be use for hardware to write back
1256          */
1257         ntc = csq->next_to_use;
1258
1259         while (handle < num) {
1260                 desc_to_use = &csq->desc[csq->next_to_use];
1261                 *desc_to_use = desc[handle];
1262                 dev_dbg(hr_dev->dev, "set cmq desc:\n");
1263                 csq->next_to_use++;
1264                 if (csq->next_to_use == csq->desc_num)
1265                         csq->next_to_use = 0;
1266                 handle++;
1267         }
1268
1269         /* Write to hardware */
1270         roce_write(hr_dev, ROCEE_TX_CMQ_TAIL_REG, csq->next_to_use);
1271
1272         /*
1273          * If the command is sync, wait for the firmware to write back,
1274          * if multi descriptors to be sent, use the first one to check
1275          */
1276         if (le16_to_cpu(desc->flag) & HNS_ROCE_CMD_FLAG_NO_INTR) {
1277                 do {
1278                         if (hns_roce_cmq_csq_done(hr_dev))
1279                                 break;
1280                         udelay(1);
1281                         timeout++;
1282                 } while (timeout < priv->cmq.tx_timeout);
1283         }
1284
1285         if (hns_roce_cmq_csq_done(hr_dev)) {
1286                 complete = true;
1287                 handle = 0;
1288                 while (handle < num) {
1289                         /* get the result of hardware write back */
1290                         desc_to_use = &csq->desc[ntc];
1291                         desc[handle] = *desc_to_use;
1292                         dev_dbg(hr_dev->dev, "Get cmq desc:\n");
1293                         desc_ret = le16_to_cpu(desc[handle].retval);
1294                         if (desc_ret == CMD_EXEC_SUCCESS)
1295                                 ret = 0;
1296                         else
1297                                 ret = -EIO;
1298                         priv->cmq.last_status = desc_ret;
1299                         ntc++;
1300                         handle++;
1301                         if (ntc == csq->desc_num)
1302                                 ntc = 0;
1303                 }
1304         }
1305
1306         if (!complete)
1307                 ret = -EAGAIN;
1308
1309         /* clean the command send queue */
1310         handle = hns_roce_cmq_csq_clean(hr_dev);
1311         if (handle != num)
1312                 dev_warn(hr_dev->dev, "Cleaned %d, need to clean %d\n",
1313                          handle, num);
1314
1315         spin_unlock_bh(&csq->lock);
1316
1317         return ret;
1318 }
1319
1320 static int hns_roce_cmq_send(struct hns_roce_dev *hr_dev,
1321                              struct hns_roce_cmq_desc *desc, int num)
1322 {
1323         int retval;
1324         int ret;
1325
1326         ret = hns_roce_v2_rst_process_cmd(hr_dev);
1327         if (ret == CMD_RST_PRC_SUCCESS)
1328                 return 0;
1329         if (ret == CMD_RST_PRC_EBUSY)
1330                 return -EBUSY;
1331
1332         ret = __hns_roce_cmq_send(hr_dev, desc, num);
1333         if (ret) {
1334                 retval = hns_roce_v2_rst_process_cmd(hr_dev);
1335                 if (retval == CMD_RST_PRC_SUCCESS)
1336                         return 0;
1337                 else if (retval == CMD_RST_PRC_EBUSY)
1338                         return -EBUSY;
1339         }
1340
1341         return ret;
1342 }
1343
1344 static int hns_roce_cmq_query_hw_info(struct hns_roce_dev *hr_dev)
1345 {
1346         struct hns_roce_query_version *resp;
1347         struct hns_roce_cmq_desc desc;
1348         int ret;
1349
1350         hns_roce_cmq_setup_basic_desc(&desc, HNS_ROCE_OPC_QUERY_HW_VER, true);
1351         ret = hns_roce_cmq_send(hr_dev, &desc, 1);
1352         if (ret)
1353                 return ret;
1354
1355         resp = (struct hns_roce_query_version *)desc.data;
1356         hr_dev->hw_rev = le16_to_cpu(resp->rocee_hw_version);
1357         hr_dev->vendor_id = hr_dev->pci_dev->vendor;
1358
1359         return 0;
1360 }
1361
1362 static bool hns_roce_func_clr_chk_rst(struct hns_roce_dev *hr_dev)
1363 {
1364         struct hns_roce_v2_priv *priv = hr_dev->priv;
1365         struct hnae3_handle *handle = priv->handle;
1366         const struct hnae3_ae_ops *ops = handle->ae_algo->ops;
1367         unsigned long reset_cnt;
1368         bool sw_resetting;
1369         bool hw_resetting;
1370
1371         reset_cnt = ops->ae_dev_reset_cnt(handle);
1372         hw_resetting = ops->get_hw_reset_stat(handle);
1373         sw_resetting = ops->ae_dev_resetting(handle);
1374
1375         if (reset_cnt != hr_dev->reset_cnt || hw_resetting || sw_resetting)
1376                 return true;
1377
1378         return false;
1379 }
1380
1381 static void hns_roce_func_clr_rst_prc(struct hns_roce_dev *hr_dev, int retval,
1382                                       int flag)
1383 {
1384         struct hns_roce_v2_priv *priv = hr_dev->priv;
1385         struct hnae3_handle *handle = priv->handle;
1386         const struct hnae3_ae_ops *ops = handle->ae_algo->ops;
1387         unsigned long instance_stage;
1388         unsigned long reset_cnt;
1389         unsigned long end;
1390         bool sw_resetting;
1391         bool hw_resetting;
1392
1393         instance_stage = handle->rinfo.instance_state;
1394         reset_cnt = ops->ae_dev_reset_cnt(handle);
1395         hw_resetting = ops->get_hw_reset_stat(handle);
1396         sw_resetting = ops->ae_dev_resetting(handle);
1397
1398         if (reset_cnt != hr_dev->reset_cnt) {
1399                 hr_dev->dis_db = true;
1400                 hr_dev->is_reset = true;
1401                 dev_info(hr_dev->dev, "Func clear success after reset.\n");
1402         } else if (hw_resetting) {
1403                 hr_dev->dis_db = true;
1404
1405                 dev_warn(hr_dev->dev,
1406                          "Func clear is pending, device in resetting state.\n");
1407                 end = HNS_ROCE_V2_HW_RST_TIMEOUT;
1408                 while (end) {
1409                         if (!ops->get_hw_reset_stat(handle)) {
1410                                 hr_dev->is_reset = true;
1411                                 dev_info(hr_dev->dev,
1412                                          "Func clear success after reset.\n");
1413                                 return;
1414                         }
1415                         msleep(HNS_ROCE_V2_HW_RST_COMPLETION_WAIT);
1416                         end -= HNS_ROCE_V2_HW_RST_COMPLETION_WAIT;
1417                 }
1418
1419                 dev_warn(hr_dev->dev, "Func clear failed.\n");
1420         } else if (sw_resetting && instance_stage == HNS_ROCE_STATE_INIT) {
1421                 hr_dev->dis_db = true;
1422
1423                 dev_warn(hr_dev->dev,
1424                          "Func clear is pending, device in resetting state.\n");
1425                 end = HNS_ROCE_V2_HW_RST_TIMEOUT;
1426                 while (end) {
1427                         if (ops->ae_dev_reset_cnt(handle) !=
1428                             hr_dev->reset_cnt) {
1429                                 hr_dev->is_reset = true;
1430                                 dev_info(hr_dev->dev,
1431                                          "Func clear success after sw reset\n");
1432                                 return;
1433                         }
1434                         msleep(HNS_ROCE_V2_HW_RST_COMPLETION_WAIT);
1435                         end -= HNS_ROCE_V2_HW_RST_COMPLETION_WAIT;
1436                 }
1437
1438                 dev_warn(hr_dev->dev, "Func clear failed because of unfinished sw reset\n");
1439         } else {
1440                 if (retval && !flag)
1441                         dev_warn(hr_dev->dev,
1442                                  "Func clear read failed, ret = %d.\n", retval);
1443
1444                 dev_warn(hr_dev->dev, "Func clear failed.\n");
1445         }
1446 }
1447 static void hns_roce_function_clear(struct hns_roce_dev *hr_dev)
1448 {
1449         bool fclr_write_fail_flag = false;
1450         struct hns_roce_func_clear *resp;
1451         struct hns_roce_cmq_desc desc;
1452         unsigned long end;
1453         int ret = 0;
1454
1455         if (hns_roce_func_clr_chk_rst(hr_dev))
1456                 goto out;
1457
1458         hns_roce_cmq_setup_basic_desc(&desc, HNS_ROCE_OPC_FUNC_CLEAR, false);
1459         resp = (struct hns_roce_func_clear *)desc.data;
1460
1461         ret = hns_roce_cmq_send(hr_dev, &desc, 1);
1462         if (ret) {
1463                 fclr_write_fail_flag = true;
1464                 dev_err(hr_dev->dev, "Func clear write failed, ret = %d.\n",
1465                          ret);
1466                 goto out;
1467         }
1468
1469         msleep(HNS_ROCE_V2_READ_FUNC_CLEAR_FLAG_INTERVAL);
1470         end = HNS_ROCE_V2_FUNC_CLEAR_TIMEOUT_MSECS;
1471         while (end) {
1472                 if (hns_roce_func_clr_chk_rst(hr_dev))
1473                         goto out;
1474                 msleep(HNS_ROCE_V2_READ_FUNC_CLEAR_FLAG_FAIL_WAIT);
1475                 end -= HNS_ROCE_V2_READ_FUNC_CLEAR_FLAG_FAIL_WAIT;
1476
1477                 hns_roce_cmq_setup_basic_desc(&desc, HNS_ROCE_OPC_FUNC_CLEAR,
1478                                               true);
1479
1480                 ret = hns_roce_cmq_send(hr_dev, &desc, 1);
1481                 if (ret)
1482                         continue;
1483
1484                 if (roce_get_bit(resp->func_done, FUNC_CLEAR_RST_FUN_DONE_S)) {
1485                         hr_dev->is_reset = true;
1486                         return;
1487                 }
1488         }
1489
1490 out:
1491         hns_roce_func_clr_rst_prc(hr_dev, ret, fclr_write_fail_flag);
1492 }
1493
1494 static int hns_roce_query_fw_ver(struct hns_roce_dev *hr_dev)
1495 {
1496         struct hns_roce_query_fw_info *resp;
1497         struct hns_roce_cmq_desc desc;
1498         int ret;
1499
1500         hns_roce_cmq_setup_basic_desc(&desc, HNS_QUERY_FW_VER, true);
1501         ret = hns_roce_cmq_send(hr_dev, &desc, 1);
1502         if (ret)
1503                 return ret;
1504
1505         resp = (struct hns_roce_query_fw_info *)desc.data;
1506         hr_dev->caps.fw_ver = (u64)(le32_to_cpu(resp->fw_ver));
1507
1508         return 0;
1509 }
1510
1511 static int hns_roce_config_global_param(struct hns_roce_dev *hr_dev)
1512 {
1513         struct hns_roce_cfg_global_param *req;
1514         struct hns_roce_cmq_desc desc;
1515
1516         hns_roce_cmq_setup_basic_desc(&desc, HNS_ROCE_OPC_CFG_GLOBAL_PARAM,
1517                                       false);
1518
1519         req = (struct hns_roce_cfg_global_param *)desc.data;
1520         memset(req, 0, sizeof(*req));
1521         roce_set_field(req->time_cfg_udp_port,
1522                        CFG_GLOBAL_PARAM_DATA_0_ROCEE_TIME_1US_CFG_M,
1523                        CFG_GLOBAL_PARAM_DATA_0_ROCEE_TIME_1US_CFG_S, 0x3e8);
1524         roce_set_field(req->time_cfg_udp_port,
1525                        CFG_GLOBAL_PARAM_DATA_0_ROCEE_UDP_PORT_M,
1526                        CFG_GLOBAL_PARAM_DATA_0_ROCEE_UDP_PORT_S, 0x12b7);
1527
1528         return hns_roce_cmq_send(hr_dev, &desc, 1);
1529 }
1530
1531 static int hns_roce_query_pf_resource(struct hns_roce_dev *hr_dev)
1532 {
1533         struct hns_roce_cmq_desc desc[2];
1534         struct hns_roce_pf_res_a *req_a;
1535         struct hns_roce_pf_res_b *req_b;
1536         int ret;
1537         int i;
1538
1539         for (i = 0; i < 2; i++) {
1540                 hns_roce_cmq_setup_basic_desc(&desc[i],
1541                                               HNS_ROCE_OPC_QUERY_PF_RES, true);
1542
1543                 if (i == 0)
1544                         desc[i].flag |= cpu_to_le16(HNS_ROCE_CMD_FLAG_NEXT);
1545                 else
1546                         desc[i].flag &= ~cpu_to_le16(HNS_ROCE_CMD_FLAG_NEXT);
1547         }
1548
1549         ret = hns_roce_cmq_send(hr_dev, desc, 2);
1550         if (ret)
1551                 return ret;
1552
1553         req_a = (struct hns_roce_pf_res_a *)desc[0].data;
1554         req_b = (struct hns_roce_pf_res_b *)desc[1].data;
1555
1556         hr_dev->caps.qpc_bt_num = roce_get_field(req_a->qpc_bt_idx_num,
1557                                                  PF_RES_DATA_1_PF_QPC_BT_NUM_M,
1558                                                  PF_RES_DATA_1_PF_QPC_BT_NUM_S);
1559         hr_dev->caps.srqc_bt_num = roce_get_field(req_a->srqc_bt_idx_num,
1560                                                 PF_RES_DATA_2_PF_SRQC_BT_NUM_M,
1561                                                 PF_RES_DATA_2_PF_SRQC_BT_NUM_S);
1562         hr_dev->caps.cqc_bt_num = roce_get_field(req_a->cqc_bt_idx_num,
1563                                                  PF_RES_DATA_3_PF_CQC_BT_NUM_M,
1564                                                  PF_RES_DATA_3_PF_CQC_BT_NUM_S);
1565         hr_dev->caps.mpt_bt_num = roce_get_field(req_a->mpt_bt_idx_num,
1566                                                  PF_RES_DATA_4_PF_MPT_BT_NUM_M,
1567                                                  PF_RES_DATA_4_PF_MPT_BT_NUM_S);
1568
1569         hr_dev->caps.sl_num = roce_get_field(req_b->qid_idx_sl_num,
1570                                              PF_RES_DATA_3_PF_SL_NUM_M,
1571                                              PF_RES_DATA_3_PF_SL_NUM_S);
1572         hr_dev->caps.sccc_bt_num = roce_get_field(req_b->sccc_bt_idx_num,
1573                                              PF_RES_DATA_4_PF_SCCC_BT_NUM_M,
1574                                              PF_RES_DATA_4_PF_SCCC_BT_NUM_S);
1575
1576         return 0;
1577 }
1578
1579 static int hns_roce_query_pf_timer_resource(struct hns_roce_dev *hr_dev)
1580 {
1581         struct hns_roce_pf_timer_res_a *req_a;
1582         struct hns_roce_cmq_desc desc;
1583         int ret;
1584
1585         hns_roce_cmq_setup_basic_desc(&desc, HNS_ROCE_OPC_QUERY_PF_TIMER_RES,
1586                                       true);
1587
1588         ret = hns_roce_cmq_send(hr_dev, &desc, 1);
1589         if (ret)
1590                 return ret;
1591
1592         req_a = (struct hns_roce_pf_timer_res_a *)desc.data;
1593
1594         hr_dev->caps.qpc_timer_bt_num =
1595                 roce_get_field(req_a->qpc_timer_bt_idx_num,
1596                                PF_RES_DATA_1_PF_QPC_TIMER_BT_NUM_M,
1597                                PF_RES_DATA_1_PF_QPC_TIMER_BT_NUM_S);
1598         hr_dev->caps.cqc_timer_bt_num =
1599                 roce_get_field(req_a->cqc_timer_bt_idx_num,
1600                                PF_RES_DATA_2_PF_CQC_TIMER_BT_NUM_M,
1601                                PF_RES_DATA_2_PF_CQC_TIMER_BT_NUM_S);
1602
1603         return 0;
1604 }
1605
1606 static int hns_roce_set_vf_switch_param(struct hns_roce_dev *hr_dev, int vf_id)
1607 {
1608         struct hns_roce_cmq_desc desc;
1609         struct hns_roce_vf_switch *swt;
1610         int ret;
1611
1612         swt = (struct hns_roce_vf_switch *)desc.data;
1613         hns_roce_cmq_setup_basic_desc(&desc, HNS_SWITCH_PARAMETER_CFG, true);
1614         swt->rocee_sel |= cpu_to_le32(HNS_ICL_SWITCH_CMD_ROCEE_SEL);
1615         roce_set_field(swt->fun_id, VF_SWITCH_DATA_FUN_ID_VF_ID_M,
1616                        VF_SWITCH_DATA_FUN_ID_VF_ID_S, vf_id);
1617         ret = hns_roce_cmq_send(hr_dev, &desc, 1);
1618         if (ret)
1619                 return ret;
1620
1621         desc.flag =
1622                 cpu_to_le16(HNS_ROCE_CMD_FLAG_NO_INTR | HNS_ROCE_CMD_FLAG_IN);
1623         desc.flag &= cpu_to_le16(~HNS_ROCE_CMD_FLAG_WR);
1624         roce_set_bit(swt->cfg, VF_SWITCH_DATA_CFG_ALW_LPBK_S, 1);
1625         roce_set_bit(swt->cfg, VF_SWITCH_DATA_CFG_ALW_LCL_LPBK_S, 0);
1626         roce_set_bit(swt->cfg, VF_SWITCH_DATA_CFG_ALW_DST_OVRD_S, 1);
1627
1628         return hns_roce_cmq_send(hr_dev, &desc, 1);
1629 }
1630
1631 static int hns_roce_alloc_vf_resource(struct hns_roce_dev *hr_dev)
1632 {
1633         struct hns_roce_cmq_desc desc[2];
1634         struct hns_roce_vf_res_a *req_a;
1635         struct hns_roce_vf_res_b *req_b;
1636         int i;
1637
1638         req_a = (struct hns_roce_vf_res_a *)desc[0].data;
1639         req_b = (struct hns_roce_vf_res_b *)desc[1].data;
1640         for (i = 0; i < 2; i++) {
1641                 hns_roce_cmq_setup_basic_desc(&desc[i],
1642                                               HNS_ROCE_OPC_ALLOC_VF_RES, false);
1643
1644                 if (i == 0)
1645                         desc[i].flag |= cpu_to_le16(HNS_ROCE_CMD_FLAG_NEXT);
1646                 else
1647                         desc[i].flag &= ~cpu_to_le16(HNS_ROCE_CMD_FLAG_NEXT);
1648         }
1649
1650         roce_set_field(req_a->vf_qpc_bt_idx_num,
1651                        VF_RES_A_DATA_1_VF_QPC_BT_IDX_M,
1652                        VF_RES_A_DATA_1_VF_QPC_BT_IDX_S, 0);
1653         roce_set_field(req_a->vf_qpc_bt_idx_num,
1654                        VF_RES_A_DATA_1_VF_QPC_BT_NUM_M,
1655                        VF_RES_A_DATA_1_VF_QPC_BT_NUM_S, HNS_ROCE_VF_QPC_BT_NUM);
1656
1657         roce_set_field(req_a->vf_srqc_bt_idx_num,
1658                        VF_RES_A_DATA_2_VF_SRQC_BT_IDX_M,
1659                        VF_RES_A_DATA_2_VF_SRQC_BT_IDX_S, 0);
1660         roce_set_field(req_a->vf_srqc_bt_idx_num,
1661                        VF_RES_A_DATA_2_VF_SRQC_BT_NUM_M,
1662                        VF_RES_A_DATA_2_VF_SRQC_BT_NUM_S,
1663                        HNS_ROCE_VF_SRQC_BT_NUM);
1664
1665         roce_set_field(req_a->vf_cqc_bt_idx_num,
1666                        VF_RES_A_DATA_3_VF_CQC_BT_IDX_M,
1667                        VF_RES_A_DATA_3_VF_CQC_BT_IDX_S, 0);
1668         roce_set_field(req_a->vf_cqc_bt_idx_num,
1669                        VF_RES_A_DATA_3_VF_CQC_BT_NUM_M,
1670                        VF_RES_A_DATA_3_VF_CQC_BT_NUM_S, HNS_ROCE_VF_CQC_BT_NUM);
1671
1672         roce_set_field(req_a->vf_mpt_bt_idx_num,
1673                        VF_RES_A_DATA_4_VF_MPT_BT_IDX_M,
1674                        VF_RES_A_DATA_4_VF_MPT_BT_IDX_S, 0);
1675         roce_set_field(req_a->vf_mpt_bt_idx_num,
1676                        VF_RES_A_DATA_4_VF_MPT_BT_NUM_M,
1677                        VF_RES_A_DATA_4_VF_MPT_BT_NUM_S, HNS_ROCE_VF_MPT_BT_NUM);
1678
1679         roce_set_field(req_a->vf_eqc_bt_idx_num, VF_RES_A_DATA_5_VF_EQC_IDX_M,
1680                        VF_RES_A_DATA_5_VF_EQC_IDX_S, 0);
1681         roce_set_field(req_a->vf_eqc_bt_idx_num, VF_RES_A_DATA_5_VF_EQC_NUM_M,
1682                        VF_RES_A_DATA_5_VF_EQC_NUM_S, HNS_ROCE_VF_EQC_NUM);
1683
1684         roce_set_field(req_b->vf_smac_idx_num, VF_RES_B_DATA_1_VF_SMAC_IDX_M,
1685                        VF_RES_B_DATA_1_VF_SMAC_IDX_S, 0);
1686         roce_set_field(req_b->vf_smac_idx_num, VF_RES_B_DATA_1_VF_SMAC_NUM_M,
1687                        VF_RES_B_DATA_1_VF_SMAC_NUM_S, HNS_ROCE_VF_SMAC_NUM);
1688
1689         roce_set_field(req_b->vf_sgid_idx_num, VF_RES_B_DATA_2_VF_SGID_IDX_M,
1690                        VF_RES_B_DATA_2_VF_SGID_IDX_S, 0);
1691         roce_set_field(req_b->vf_sgid_idx_num, VF_RES_B_DATA_2_VF_SGID_NUM_M,
1692                        VF_RES_B_DATA_2_VF_SGID_NUM_S, HNS_ROCE_VF_SGID_NUM);
1693
1694         roce_set_field(req_b->vf_qid_idx_sl_num, VF_RES_B_DATA_3_VF_QID_IDX_M,
1695                        VF_RES_B_DATA_3_VF_QID_IDX_S, 0);
1696         roce_set_field(req_b->vf_qid_idx_sl_num, VF_RES_B_DATA_3_VF_SL_NUM_M,
1697                        VF_RES_B_DATA_3_VF_SL_NUM_S, HNS_ROCE_VF_SL_NUM);
1698
1699         roce_set_field(req_b->vf_sccc_idx_num, VF_RES_B_DATA_4_VF_SCCC_BT_IDX_M,
1700                        VF_RES_B_DATA_4_VF_SCCC_BT_IDX_S, 0);
1701         roce_set_field(req_b->vf_sccc_idx_num, VF_RES_B_DATA_4_VF_SCCC_BT_NUM_M,
1702                        VF_RES_B_DATA_4_VF_SCCC_BT_NUM_S,
1703                        HNS_ROCE_VF_SCCC_BT_NUM);
1704
1705         return hns_roce_cmq_send(hr_dev, desc, 2);
1706 }
1707
1708 static int hns_roce_v2_set_bt(struct hns_roce_dev *hr_dev)
1709 {
1710         u8 srqc_hop_num = hr_dev->caps.srqc_hop_num;
1711         u8 qpc_hop_num = hr_dev->caps.qpc_hop_num;
1712         u8 cqc_hop_num = hr_dev->caps.cqc_hop_num;
1713         u8 mpt_hop_num = hr_dev->caps.mpt_hop_num;
1714         u8 sccc_hop_num = hr_dev->caps.sccc_hop_num;
1715         struct hns_roce_cfg_bt_attr *req;
1716         struct hns_roce_cmq_desc desc;
1717
1718         hns_roce_cmq_setup_basic_desc(&desc, HNS_ROCE_OPC_CFG_BT_ATTR, false);
1719         req = (struct hns_roce_cfg_bt_attr *)desc.data;
1720         memset(req, 0, sizeof(*req));
1721
1722         roce_set_field(req->vf_qpc_cfg, CFG_BT_ATTR_DATA_0_VF_QPC_BA_PGSZ_M,
1723                        CFG_BT_ATTR_DATA_0_VF_QPC_BA_PGSZ_S,
1724                        hr_dev->caps.qpc_ba_pg_sz + PG_SHIFT_OFFSET);
1725         roce_set_field(req->vf_qpc_cfg, CFG_BT_ATTR_DATA_0_VF_QPC_BUF_PGSZ_M,
1726                        CFG_BT_ATTR_DATA_0_VF_QPC_BUF_PGSZ_S,
1727                        hr_dev->caps.qpc_buf_pg_sz + PG_SHIFT_OFFSET);
1728         roce_set_field(req->vf_qpc_cfg, CFG_BT_ATTR_DATA_0_VF_QPC_HOPNUM_M,
1729                        CFG_BT_ATTR_DATA_0_VF_QPC_HOPNUM_S,
1730                        qpc_hop_num == HNS_ROCE_HOP_NUM_0 ? 0 : qpc_hop_num);
1731
1732         roce_set_field(req->vf_srqc_cfg, CFG_BT_ATTR_DATA_1_VF_SRQC_BA_PGSZ_M,
1733                        CFG_BT_ATTR_DATA_1_VF_SRQC_BA_PGSZ_S,
1734                        hr_dev->caps.srqc_ba_pg_sz + PG_SHIFT_OFFSET);
1735         roce_set_field(req->vf_srqc_cfg, CFG_BT_ATTR_DATA_1_VF_SRQC_BUF_PGSZ_M,
1736                        CFG_BT_ATTR_DATA_1_VF_SRQC_BUF_PGSZ_S,
1737                        hr_dev->caps.srqc_buf_pg_sz + PG_SHIFT_OFFSET);
1738         roce_set_field(req->vf_srqc_cfg, CFG_BT_ATTR_DATA_1_VF_SRQC_HOPNUM_M,
1739                        CFG_BT_ATTR_DATA_1_VF_SRQC_HOPNUM_S,
1740                        srqc_hop_num == HNS_ROCE_HOP_NUM_0 ? 0 : srqc_hop_num);
1741
1742         roce_set_field(req->vf_cqc_cfg, CFG_BT_ATTR_DATA_2_VF_CQC_BA_PGSZ_M,
1743                        CFG_BT_ATTR_DATA_2_VF_CQC_BA_PGSZ_S,
1744                        hr_dev->caps.cqc_ba_pg_sz + PG_SHIFT_OFFSET);
1745         roce_set_field(req->vf_cqc_cfg, CFG_BT_ATTR_DATA_2_VF_CQC_BUF_PGSZ_M,
1746                        CFG_BT_ATTR_DATA_2_VF_CQC_BUF_PGSZ_S,
1747                        hr_dev->caps.cqc_buf_pg_sz + PG_SHIFT_OFFSET);
1748         roce_set_field(req->vf_cqc_cfg, CFG_BT_ATTR_DATA_2_VF_CQC_HOPNUM_M,
1749                        CFG_BT_ATTR_DATA_2_VF_CQC_HOPNUM_S,
1750                        cqc_hop_num == HNS_ROCE_HOP_NUM_0 ? 0 : cqc_hop_num);
1751
1752         roce_set_field(req->vf_mpt_cfg, CFG_BT_ATTR_DATA_3_VF_MPT_BA_PGSZ_M,
1753                        CFG_BT_ATTR_DATA_3_VF_MPT_BA_PGSZ_S,
1754                        hr_dev->caps.mpt_ba_pg_sz + PG_SHIFT_OFFSET);
1755         roce_set_field(req->vf_mpt_cfg, CFG_BT_ATTR_DATA_3_VF_MPT_BUF_PGSZ_M,
1756                        CFG_BT_ATTR_DATA_3_VF_MPT_BUF_PGSZ_S,
1757                        hr_dev->caps.mpt_buf_pg_sz + PG_SHIFT_OFFSET);
1758         roce_set_field(req->vf_mpt_cfg, CFG_BT_ATTR_DATA_3_VF_MPT_HOPNUM_M,
1759                        CFG_BT_ATTR_DATA_3_VF_MPT_HOPNUM_S,
1760                        mpt_hop_num == HNS_ROCE_HOP_NUM_0 ? 0 : mpt_hop_num);
1761
1762         roce_set_field(req->vf_sccc_cfg,
1763                        CFG_BT_ATTR_DATA_4_VF_SCCC_BA_PGSZ_M,
1764                        CFG_BT_ATTR_DATA_4_VF_SCCC_BA_PGSZ_S,
1765                        hr_dev->caps.sccc_ba_pg_sz + PG_SHIFT_OFFSET);
1766         roce_set_field(req->vf_sccc_cfg,
1767                        CFG_BT_ATTR_DATA_4_VF_SCCC_BUF_PGSZ_M,
1768                        CFG_BT_ATTR_DATA_4_VF_SCCC_BUF_PGSZ_S,
1769                        hr_dev->caps.sccc_buf_pg_sz + PG_SHIFT_OFFSET);
1770         roce_set_field(req->vf_sccc_cfg,
1771                        CFG_BT_ATTR_DATA_4_VF_SCCC_HOPNUM_M,
1772                        CFG_BT_ATTR_DATA_4_VF_SCCC_HOPNUM_S,
1773                        sccc_hop_num ==
1774                               HNS_ROCE_HOP_NUM_0 ? 0 : sccc_hop_num);
1775
1776         return hns_roce_cmq_send(hr_dev, &desc, 1);
1777 }
1778
1779 static void set_default_caps(struct hns_roce_dev *hr_dev)
1780 {
1781         struct hns_roce_caps *caps = &hr_dev->caps;
1782
1783         caps->num_qps           = HNS_ROCE_V2_MAX_QP_NUM;
1784         caps->max_wqes          = HNS_ROCE_V2_MAX_WQE_NUM;
1785         caps->num_cqs           = HNS_ROCE_V2_MAX_CQ_NUM;
1786         caps->num_srqs          = HNS_ROCE_V2_MAX_SRQ_NUM;
1787         caps->min_cqes          = HNS_ROCE_MIN_CQE_NUM;
1788         caps->max_cqes          = HNS_ROCE_V2_MAX_CQE_NUM;
1789         caps->max_sq_sg         = HNS_ROCE_V2_MAX_SQ_SGE_NUM;
1790         caps->max_extend_sg     = HNS_ROCE_V2_MAX_EXTEND_SGE_NUM;
1791         caps->max_rq_sg         = HNS_ROCE_V2_MAX_RQ_SGE_NUM;
1792         caps->max_sq_inline     = HNS_ROCE_V2_MAX_SQ_INLINE;
1793         caps->num_uars          = HNS_ROCE_V2_UAR_NUM;
1794         caps->phy_num_uars      = HNS_ROCE_V2_PHY_UAR_NUM;
1795         caps->num_aeq_vectors   = HNS_ROCE_V2_AEQE_VEC_NUM;
1796         caps->num_comp_vectors  = HNS_ROCE_V2_COMP_VEC_NUM;
1797         caps->num_other_vectors = HNS_ROCE_V2_ABNORMAL_VEC_NUM;
1798         caps->num_mtpts         = HNS_ROCE_V2_MAX_MTPT_NUM;
1799         caps->num_mtt_segs      = HNS_ROCE_V2_MAX_MTT_SEGS;
1800         caps->num_cqe_segs      = HNS_ROCE_V2_MAX_CQE_SEGS;
1801         caps->num_srqwqe_segs   = HNS_ROCE_V2_MAX_SRQWQE_SEGS;
1802         caps->num_idx_segs      = HNS_ROCE_V2_MAX_IDX_SEGS;
1803         caps->num_pds           = HNS_ROCE_V2_MAX_PD_NUM;
1804         caps->max_qp_init_rdma  = HNS_ROCE_V2_MAX_QP_INIT_RDMA;
1805         caps->max_qp_dest_rdma  = HNS_ROCE_V2_MAX_QP_DEST_RDMA;
1806         caps->max_sq_desc_sz    = HNS_ROCE_V2_MAX_SQ_DESC_SZ;
1807         caps->max_rq_desc_sz    = HNS_ROCE_V2_MAX_RQ_DESC_SZ;
1808         caps->max_srq_desc_sz   = HNS_ROCE_V2_MAX_SRQ_DESC_SZ;
1809         caps->qpc_sz            = HNS_ROCE_V2_QPC_SZ;
1810         caps->irrl_entry_sz     = HNS_ROCE_V2_IRRL_ENTRY_SZ;
1811         caps->trrl_entry_sz     = HNS_ROCE_V2_EXT_ATOMIC_TRRL_ENTRY_SZ;
1812         caps->cqc_entry_sz      = HNS_ROCE_V2_CQC_ENTRY_SZ;
1813         caps->srqc_entry_sz     = HNS_ROCE_V2_SRQC_ENTRY_SZ;
1814         caps->mtpt_entry_sz     = HNS_ROCE_V2_MTPT_ENTRY_SZ;
1815         caps->mtt_entry_sz      = HNS_ROCE_V2_MTT_ENTRY_SZ;
1816         caps->idx_entry_sz      = HNS_ROCE_V2_IDX_ENTRY_SZ;
1817         caps->cqe_sz            = HNS_ROCE_V2_CQE_SIZE;
1818         caps->page_size_cap     = HNS_ROCE_V2_PAGE_SIZE_SUPPORTED;
1819         caps->reserved_lkey     = 0;
1820         caps->reserved_pds      = 0;
1821         caps->reserved_mrws     = 1;
1822         caps->reserved_uars     = 0;
1823         caps->reserved_cqs      = 0;
1824         caps->reserved_srqs     = 0;
1825         caps->reserved_qps      = HNS_ROCE_V2_RSV_QPS;
1826
1827         caps->qpc_ba_pg_sz      = 0;
1828         caps->qpc_buf_pg_sz     = 0;
1829         caps->qpc_hop_num       = HNS_ROCE_CONTEXT_HOP_NUM;
1830         caps->srqc_ba_pg_sz     = 0;
1831         caps->srqc_buf_pg_sz    = 0;
1832         caps->srqc_hop_num      = HNS_ROCE_CONTEXT_HOP_NUM;
1833         caps->cqc_ba_pg_sz      = 0;
1834         caps->cqc_buf_pg_sz     = 0;
1835         caps->cqc_hop_num       = HNS_ROCE_CONTEXT_HOP_NUM;
1836         caps->mpt_ba_pg_sz      = 0;
1837         caps->mpt_buf_pg_sz     = 0;
1838         caps->mpt_hop_num       = HNS_ROCE_CONTEXT_HOP_NUM;
1839         caps->mtt_ba_pg_sz      = 0;
1840         caps->mtt_buf_pg_sz     = 0;
1841         caps->mtt_hop_num       = HNS_ROCE_MTT_HOP_NUM;
1842         caps->wqe_sq_hop_num    = HNS_ROCE_SQWQE_HOP_NUM;
1843         caps->wqe_sge_hop_num   = HNS_ROCE_EXT_SGE_HOP_NUM;
1844         caps->wqe_rq_hop_num    = HNS_ROCE_RQWQE_HOP_NUM;
1845         caps->cqe_ba_pg_sz      = HNS_ROCE_BA_PG_SZ_SUPPORTED_256K;
1846         caps->cqe_buf_pg_sz     = 0;
1847         caps->cqe_hop_num       = HNS_ROCE_CQE_HOP_NUM;
1848         caps->srqwqe_ba_pg_sz   = 0;
1849         caps->srqwqe_buf_pg_sz  = 0;
1850         caps->srqwqe_hop_num    = HNS_ROCE_SRQWQE_HOP_NUM;
1851         caps->idx_ba_pg_sz      = 0;
1852         caps->idx_buf_pg_sz     = 0;
1853         caps->idx_hop_num       = HNS_ROCE_IDX_HOP_NUM;
1854         caps->chunk_sz          = HNS_ROCE_V2_TABLE_CHUNK_SIZE;
1855
1856         caps->flags             = HNS_ROCE_CAP_FLAG_REREG_MR |
1857                                   HNS_ROCE_CAP_FLAG_ROCE_V1_V2 |
1858                                   HNS_ROCE_CAP_FLAG_RQ_INLINE |
1859                                   HNS_ROCE_CAP_FLAG_RECORD_DB |
1860                                   HNS_ROCE_CAP_FLAG_SQ_RECORD_DB;
1861
1862         caps->pkey_table_len[0] = 1;
1863         caps->gid_table_len[0]  = HNS_ROCE_V2_GID_INDEX_NUM;
1864         caps->ceqe_depth        = HNS_ROCE_V2_COMP_EQE_NUM;
1865         caps->aeqe_depth        = HNS_ROCE_V2_ASYNC_EQE_NUM;
1866         caps->aeqe_size         = HNS_ROCE_AEQE_SIZE;
1867         caps->ceqe_size         = HNS_ROCE_CEQE_SIZE;
1868         caps->local_ca_ack_delay = 0;
1869         caps->max_mtu = IB_MTU_4096;
1870
1871         caps->max_srq_wrs       = HNS_ROCE_V2_MAX_SRQ_WR;
1872         caps->max_srq_sges      = HNS_ROCE_V2_MAX_SRQ_SGE;
1873
1874         caps->flags |= HNS_ROCE_CAP_FLAG_ATOMIC | HNS_ROCE_CAP_FLAG_MW |
1875                        HNS_ROCE_CAP_FLAG_SRQ | HNS_ROCE_CAP_FLAG_FRMR |
1876                        HNS_ROCE_CAP_FLAG_QP_FLOW_CTRL;
1877
1878         caps->num_qpc_timer       = HNS_ROCE_V2_MAX_QPC_TIMER_NUM;
1879         caps->qpc_timer_entry_sz  = HNS_ROCE_V2_QPC_TIMER_ENTRY_SZ;
1880         caps->qpc_timer_ba_pg_sz  = 0;
1881         caps->qpc_timer_buf_pg_sz = 0;
1882         caps->qpc_timer_hop_num   = HNS_ROCE_HOP_NUM_0;
1883         caps->num_cqc_timer       = HNS_ROCE_V2_MAX_CQC_TIMER_NUM;
1884         caps->cqc_timer_entry_sz  = HNS_ROCE_V2_CQC_TIMER_ENTRY_SZ;
1885         caps->cqc_timer_ba_pg_sz  = 0;
1886         caps->cqc_timer_buf_pg_sz = 0;
1887         caps->cqc_timer_hop_num   = HNS_ROCE_HOP_NUM_0;
1888
1889         caps->sccc_sz = HNS_ROCE_V2_SCCC_SZ;
1890         caps->sccc_ba_pg_sz       = 0;
1891         caps->sccc_buf_pg_sz      = 0;
1892         caps->sccc_hop_num        = HNS_ROCE_SCCC_HOP_NUM;
1893
1894         if (hr_dev->pci_dev->revision >= PCI_REVISION_ID_HIP09) {
1895                 caps->aeqe_size = HNS_ROCE_V3_EQE_SIZE;
1896                 caps->ceqe_size = HNS_ROCE_V3_EQE_SIZE;
1897                 caps->cqe_sz = HNS_ROCE_V3_CQE_SIZE;
1898                 caps->qpc_sz = HNS_ROCE_V3_QPC_SZ;
1899         }
1900 }
1901
1902 static void calc_pg_sz(int obj_num, int obj_size, int hop_num, int ctx_bt_num,
1903                        int *buf_page_size, int *bt_page_size, u32 hem_type)
1904 {
1905         u64 obj_per_chunk;
1906         u64 bt_chunk_size = PAGE_SIZE;
1907         u64 buf_chunk_size = PAGE_SIZE;
1908         u64 obj_per_chunk_default = buf_chunk_size / obj_size;
1909
1910         *buf_page_size = 0;
1911         *bt_page_size = 0;
1912
1913         switch (hop_num) {
1914         case 3:
1915                 obj_per_chunk = ctx_bt_num * (bt_chunk_size / BA_BYTE_LEN) *
1916                                 (bt_chunk_size / BA_BYTE_LEN) *
1917                                 (bt_chunk_size / BA_BYTE_LEN) *
1918                                  obj_per_chunk_default;
1919                 break;
1920         case 2:
1921                 obj_per_chunk = ctx_bt_num * (bt_chunk_size / BA_BYTE_LEN) *
1922                                 (bt_chunk_size / BA_BYTE_LEN) *
1923                                  obj_per_chunk_default;
1924                 break;
1925         case 1:
1926                 obj_per_chunk = ctx_bt_num * (bt_chunk_size / BA_BYTE_LEN) *
1927                                 obj_per_chunk_default;
1928                 break;
1929         case HNS_ROCE_HOP_NUM_0:
1930                 obj_per_chunk = ctx_bt_num * obj_per_chunk_default;
1931                 break;
1932         default:
1933                 pr_err("Table %d not support hop_num = %d!\n", hem_type,
1934                         hop_num);
1935                 return;
1936         }
1937
1938         if (hem_type >= HEM_TYPE_MTT)
1939                 *bt_page_size = ilog2(DIV_ROUND_UP(obj_num, obj_per_chunk));
1940         else
1941                 *buf_page_size = ilog2(DIV_ROUND_UP(obj_num, obj_per_chunk));
1942 }
1943
1944 static int hns_roce_query_pf_caps(struct hns_roce_dev *hr_dev)
1945 {
1946         struct hns_roce_cmq_desc desc[HNS_ROCE_QUERY_PF_CAPS_CMD_NUM];
1947         struct hns_roce_caps *caps = &hr_dev->caps;
1948         struct hns_roce_query_pf_caps_a *resp_a;
1949         struct hns_roce_query_pf_caps_b *resp_b;
1950         struct hns_roce_query_pf_caps_c *resp_c;
1951         struct hns_roce_query_pf_caps_d *resp_d;
1952         struct hns_roce_query_pf_caps_e *resp_e;
1953         int ctx_hop_num;
1954         int pbl_hop_num;
1955         int ret;
1956         int i;
1957
1958         for (i = 0; i < HNS_ROCE_QUERY_PF_CAPS_CMD_NUM; i++) {
1959                 hns_roce_cmq_setup_basic_desc(&desc[i],
1960                                               HNS_ROCE_OPC_QUERY_PF_CAPS_NUM,
1961                                               true);
1962                 if (i < (HNS_ROCE_QUERY_PF_CAPS_CMD_NUM - 1))
1963                         desc[i].flag |= cpu_to_le16(HNS_ROCE_CMD_FLAG_NEXT);
1964                 else
1965                         desc[i].flag &= ~cpu_to_le16(HNS_ROCE_CMD_FLAG_NEXT);
1966         }
1967
1968         ret = hns_roce_cmq_send(hr_dev, desc, HNS_ROCE_QUERY_PF_CAPS_CMD_NUM);
1969         if (ret)
1970                 return ret;
1971
1972         resp_a = (struct hns_roce_query_pf_caps_a *)desc[0].data;
1973         resp_b = (struct hns_roce_query_pf_caps_b *)desc[1].data;
1974         resp_c = (struct hns_roce_query_pf_caps_c *)desc[2].data;
1975         resp_d = (struct hns_roce_query_pf_caps_d *)desc[3].data;
1976         resp_e = (struct hns_roce_query_pf_caps_e *)desc[4].data;
1977
1978         caps->local_ca_ack_delay     = resp_a->local_ca_ack_delay;
1979         caps->max_sq_sg              = le16_to_cpu(resp_a->max_sq_sg);
1980         caps->max_sq_inline          = le16_to_cpu(resp_a->max_sq_inline);
1981         caps->max_rq_sg              = le16_to_cpu(resp_a->max_rq_sg);
1982         caps->max_extend_sg          = le32_to_cpu(resp_a->max_extend_sg);
1983         caps->num_qpc_timer          = le16_to_cpu(resp_a->num_qpc_timer);
1984         caps->num_cqc_timer          = le16_to_cpu(resp_a->num_cqc_timer);
1985         caps->max_srq_sges           = le16_to_cpu(resp_a->max_srq_sges);
1986         caps->num_aeq_vectors        = resp_a->num_aeq_vectors;
1987         caps->num_other_vectors      = resp_a->num_other_vectors;
1988         caps->max_sq_desc_sz         = resp_a->max_sq_desc_sz;
1989         caps->max_rq_desc_sz         = resp_a->max_rq_desc_sz;
1990         caps->max_srq_desc_sz        = resp_a->max_srq_desc_sz;
1991         caps->cqe_sz                 = HNS_ROCE_V2_CQE_SIZE;
1992
1993         caps->mtpt_entry_sz          = resp_b->mtpt_entry_sz;
1994         caps->irrl_entry_sz          = resp_b->irrl_entry_sz;
1995         caps->trrl_entry_sz          = resp_b->trrl_entry_sz;
1996         caps->cqc_entry_sz           = resp_b->cqc_entry_sz;
1997         caps->srqc_entry_sz          = resp_b->srqc_entry_sz;
1998         caps->idx_entry_sz           = resp_b->idx_entry_sz;
1999         caps->sccc_sz                = resp_b->sccc_sz;
2000         caps->max_mtu                = resp_b->max_mtu;
2001         caps->qpc_sz                 = HNS_ROCE_V2_QPC_SZ;
2002         caps->min_cqes               = resp_b->min_cqes;
2003         caps->min_wqes               = resp_b->min_wqes;
2004         caps->page_size_cap          = le32_to_cpu(resp_b->page_size_cap);
2005         caps->pkey_table_len[0]      = resp_b->pkey_table_len;
2006         caps->phy_num_uars           = resp_b->phy_num_uars;
2007         ctx_hop_num                  = resp_b->ctx_hop_num;
2008         pbl_hop_num                  = resp_b->pbl_hop_num;
2009
2010         caps->num_pds = 1 << roce_get_field(resp_c->cap_flags_num_pds,
2011                                             V2_QUERY_PF_CAPS_C_NUM_PDS_M,
2012                                             V2_QUERY_PF_CAPS_C_NUM_PDS_S);
2013         caps->flags = roce_get_field(resp_c->cap_flags_num_pds,
2014                                      V2_QUERY_PF_CAPS_C_CAP_FLAGS_M,
2015                                      V2_QUERY_PF_CAPS_C_CAP_FLAGS_S);
2016         caps->flags |= le16_to_cpu(resp_d->cap_flags_ex) <<
2017                        HNS_ROCE_CAP_FLAGS_EX_SHIFT;
2018
2019         caps->num_cqs = 1 << roce_get_field(resp_c->max_gid_num_cqs,
2020                                             V2_QUERY_PF_CAPS_C_NUM_CQS_M,
2021                                             V2_QUERY_PF_CAPS_C_NUM_CQS_S);
2022         caps->gid_table_len[0] = roce_get_field(resp_c->max_gid_num_cqs,
2023                                                 V2_QUERY_PF_CAPS_C_MAX_GID_M,
2024                                                 V2_QUERY_PF_CAPS_C_MAX_GID_S);
2025         caps->max_cqes = 1 << roce_get_field(resp_c->cq_depth,
2026                                              V2_QUERY_PF_CAPS_C_CQ_DEPTH_M,
2027                                              V2_QUERY_PF_CAPS_C_CQ_DEPTH_S);
2028         caps->num_mtpts = 1 << roce_get_field(resp_c->num_mrws,
2029                                               V2_QUERY_PF_CAPS_C_NUM_MRWS_M,
2030                                               V2_QUERY_PF_CAPS_C_NUM_MRWS_S);
2031         caps->num_qps = 1 << roce_get_field(resp_c->ord_num_qps,
2032                                             V2_QUERY_PF_CAPS_C_NUM_QPS_M,
2033                                             V2_QUERY_PF_CAPS_C_NUM_QPS_S);
2034         caps->max_qp_init_rdma = roce_get_field(resp_c->ord_num_qps,
2035                                                 V2_QUERY_PF_CAPS_C_MAX_ORD_M,
2036                                                 V2_QUERY_PF_CAPS_C_MAX_ORD_S);
2037         caps->max_qp_dest_rdma = caps->max_qp_init_rdma;
2038         caps->max_wqes = 1 << le16_to_cpu(resp_c->sq_depth);
2039         caps->num_srqs = 1 << roce_get_field(resp_d->wq_hop_num_max_srqs,
2040                                              V2_QUERY_PF_CAPS_D_NUM_SRQS_M,
2041                                              V2_QUERY_PF_CAPS_D_NUM_SRQS_S);
2042         caps->max_srq_wrs = 1 << le16_to_cpu(resp_d->srq_depth);
2043         caps->ceqe_depth = 1 << roce_get_field(resp_d->num_ceqs_ceq_depth,
2044                                                V2_QUERY_PF_CAPS_D_CEQ_DEPTH_M,
2045                                                V2_QUERY_PF_CAPS_D_CEQ_DEPTH_S);
2046         caps->num_comp_vectors = roce_get_field(resp_d->num_ceqs_ceq_depth,
2047                                                 V2_QUERY_PF_CAPS_D_NUM_CEQS_M,
2048                                                 V2_QUERY_PF_CAPS_D_NUM_CEQS_S);
2049         caps->aeqe_depth = 1 << roce_get_field(resp_d->arm_st_aeq_depth,
2050                                                V2_QUERY_PF_CAPS_D_AEQ_DEPTH_M,
2051                                                V2_QUERY_PF_CAPS_D_AEQ_DEPTH_S);
2052         caps->default_aeq_arm_st = roce_get_field(resp_d->arm_st_aeq_depth,
2053                                             V2_QUERY_PF_CAPS_D_AEQ_ARM_ST_M,
2054                                             V2_QUERY_PF_CAPS_D_AEQ_ARM_ST_S);
2055         caps->default_ceq_arm_st = roce_get_field(resp_d->arm_st_aeq_depth,
2056                                             V2_QUERY_PF_CAPS_D_CEQ_ARM_ST_M,
2057                                             V2_QUERY_PF_CAPS_D_CEQ_ARM_ST_S);
2058         caps->reserved_pds = roce_get_field(resp_d->num_uars_rsv_pds,
2059                                             V2_QUERY_PF_CAPS_D_RSV_PDS_M,
2060                                             V2_QUERY_PF_CAPS_D_RSV_PDS_S);
2061         caps->num_uars = 1 << roce_get_field(resp_d->num_uars_rsv_pds,
2062                                              V2_QUERY_PF_CAPS_D_NUM_UARS_M,
2063                                              V2_QUERY_PF_CAPS_D_NUM_UARS_S);
2064         caps->reserved_qps = roce_get_field(resp_d->rsv_uars_rsv_qps,
2065                                             V2_QUERY_PF_CAPS_D_RSV_QPS_M,
2066                                             V2_QUERY_PF_CAPS_D_RSV_QPS_S);
2067         caps->reserved_uars = roce_get_field(resp_d->rsv_uars_rsv_qps,
2068                                              V2_QUERY_PF_CAPS_D_RSV_UARS_M,
2069                                              V2_QUERY_PF_CAPS_D_RSV_UARS_S);
2070         caps->reserved_mrws = roce_get_field(resp_e->chunk_size_shift_rsv_mrws,
2071                                              V2_QUERY_PF_CAPS_E_RSV_MRWS_M,
2072                                              V2_QUERY_PF_CAPS_E_RSV_MRWS_S);
2073         caps->chunk_sz = 1 << roce_get_field(resp_e->chunk_size_shift_rsv_mrws,
2074                                          V2_QUERY_PF_CAPS_E_CHUNK_SIZE_SHIFT_M,
2075                                          V2_QUERY_PF_CAPS_E_CHUNK_SIZE_SHIFT_S);
2076         caps->reserved_cqs = roce_get_field(resp_e->rsv_cqs,
2077                                             V2_QUERY_PF_CAPS_E_RSV_CQS_M,
2078                                             V2_QUERY_PF_CAPS_E_RSV_CQS_S);
2079         caps->reserved_srqs = roce_get_field(resp_e->rsv_srqs,
2080                                              V2_QUERY_PF_CAPS_E_RSV_SRQS_M,
2081                                              V2_QUERY_PF_CAPS_E_RSV_SRQS_S);
2082         caps->reserved_lkey = roce_get_field(resp_e->rsv_lkey,
2083                                              V2_QUERY_PF_CAPS_E_RSV_LKEYS_M,
2084                                              V2_QUERY_PF_CAPS_E_RSV_LKEYS_S);
2085         caps->default_ceq_max_cnt = le16_to_cpu(resp_e->ceq_max_cnt);
2086         caps->default_ceq_period = le16_to_cpu(resp_e->ceq_period);
2087         caps->default_aeq_max_cnt = le16_to_cpu(resp_e->aeq_max_cnt);
2088         caps->default_aeq_period = le16_to_cpu(resp_e->aeq_period);
2089
2090         caps->qpc_timer_entry_sz = HNS_ROCE_V2_QPC_TIMER_ENTRY_SZ;
2091         caps->cqc_timer_entry_sz = HNS_ROCE_V2_CQC_TIMER_ENTRY_SZ;
2092         caps->mtt_entry_sz = HNS_ROCE_V2_MTT_ENTRY_SZ;
2093         caps->num_mtt_segs = HNS_ROCE_V2_MAX_MTT_SEGS;
2094         caps->ceqe_size = HNS_ROCE_CEQE_SIZE;
2095         caps->aeqe_size = HNS_ROCE_AEQE_SIZE;
2096         caps->mtt_ba_pg_sz = 0;
2097         caps->num_cqe_segs = HNS_ROCE_V2_MAX_CQE_SEGS;
2098         caps->num_srqwqe_segs = HNS_ROCE_V2_MAX_SRQWQE_SEGS;
2099         caps->num_idx_segs = HNS_ROCE_V2_MAX_IDX_SEGS;
2100
2101         caps->qpc_hop_num = ctx_hop_num;
2102         caps->srqc_hop_num = ctx_hop_num;
2103         caps->cqc_hop_num = ctx_hop_num;
2104         caps->mpt_hop_num = ctx_hop_num;
2105         caps->mtt_hop_num = pbl_hop_num;
2106         caps->cqe_hop_num = pbl_hop_num;
2107         caps->srqwqe_hop_num = pbl_hop_num;
2108         caps->idx_hop_num = pbl_hop_num;
2109         caps->wqe_sq_hop_num = roce_get_field(resp_d->wq_hop_num_max_srqs,
2110                                           V2_QUERY_PF_CAPS_D_SQWQE_HOP_NUM_M,
2111                                           V2_QUERY_PF_CAPS_D_SQWQE_HOP_NUM_S);
2112         caps->wqe_sge_hop_num = roce_get_field(resp_d->wq_hop_num_max_srqs,
2113                                           V2_QUERY_PF_CAPS_D_EX_SGE_HOP_NUM_M,
2114                                           V2_QUERY_PF_CAPS_D_EX_SGE_HOP_NUM_S);
2115         caps->wqe_rq_hop_num = roce_get_field(resp_d->wq_hop_num_max_srqs,
2116                                           V2_QUERY_PF_CAPS_D_RQWQE_HOP_NUM_M,
2117                                           V2_QUERY_PF_CAPS_D_RQWQE_HOP_NUM_S);
2118
2119         if (hr_dev->pci_dev->revision >= PCI_REVISION_ID_HIP09) {
2120                 caps->ceqe_size = HNS_ROCE_V3_EQE_SIZE;
2121                 caps->aeqe_size = HNS_ROCE_V3_EQE_SIZE;
2122                 caps->cqe_sz = HNS_ROCE_V3_CQE_SIZE;
2123                 caps->qpc_sz = HNS_ROCE_V3_QPC_SZ;
2124                 caps->sccc_sz = HNS_ROCE_V3_SCCC_SZ;
2125         }
2126
2127         calc_pg_sz(caps->num_qps, caps->qpc_sz, caps->qpc_hop_num,
2128                    caps->qpc_bt_num, &caps->qpc_buf_pg_sz, &caps->qpc_ba_pg_sz,
2129                    HEM_TYPE_QPC);
2130         calc_pg_sz(caps->num_mtpts, caps->mtpt_entry_sz, caps->mpt_hop_num,
2131                    caps->mpt_bt_num, &caps->mpt_buf_pg_sz, &caps->mpt_ba_pg_sz,
2132                    HEM_TYPE_MTPT);
2133         calc_pg_sz(caps->num_cqs, caps->cqc_entry_sz, caps->cqc_hop_num,
2134                    caps->cqc_bt_num, &caps->cqc_buf_pg_sz, &caps->cqc_ba_pg_sz,
2135                    HEM_TYPE_CQC);
2136         calc_pg_sz(caps->num_srqs, caps->srqc_entry_sz, caps->srqc_hop_num,
2137                    caps->srqc_bt_num, &caps->srqc_buf_pg_sz,
2138                    &caps->srqc_ba_pg_sz, HEM_TYPE_SRQC);
2139
2140         caps->sccc_hop_num = ctx_hop_num;
2141         caps->qpc_timer_hop_num = HNS_ROCE_HOP_NUM_0;
2142         caps->cqc_timer_hop_num = HNS_ROCE_HOP_NUM_0;
2143
2144         calc_pg_sz(caps->num_qps, caps->sccc_sz,
2145                    caps->sccc_hop_num, caps->sccc_bt_num,
2146                    &caps->sccc_buf_pg_sz, &caps->sccc_ba_pg_sz,
2147                    HEM_TYPE_SCCC);
2148         calc_pg_sz(caps->num_cqc_timer, caps->cqc_timer_entry_sz,
2149                    caps->cqc_timer_hop_num, caps->cqc_timer_bt_num,
2150                    &caps->cqc_timer_buf_pg_sz,
2151                    &caps->cqc_timer_ba_pg_sz, HEM_TYPE_CQC_TIMER);
2152
2153         calc_pg_sz(caps->num_cqe_segs, caps->mtt_entry_sz, caps->cqe_hop_num,
2154                    1, &caps->cqe_buf_pg_sz, &caps->cqe_ba_pg_sz, HEM_TYPE_CQE);
2155         calc_pg_sz(caps->num_srqwqe_segs, caps->mtt_entry_sz,
2156                    caps->srqwqe_hop_num, 1, &caps->srqwqe_buf_pg_sz,
2157                    &caps->srqwqe_ba_pg_sz, HEM_TYPE_SRQWQE);
2158         calc_pg_sz(caps->num_idx_segs, caps->idx_entry_sz, caps->idx_hop_num,
2159                    1, &caps->idx_buf_pg_sz, &caps->idx_ba_pg_sz, HEM_TYPE_IDX);
2160
2161         return 0;
2162 }
2163
2164 static int hns_roce_config_qpc_size(struct hns_roce_dev *hr_dev)
2165 {
2166         struct hns_roce_cmq_desc desc;
2167         struct hns_roce_cfg_entry_size *cfg_size =
2168                                   (struct hns_roce_cfg_entry_size *)desc.data;
2169
2170         hns_roce_cmq_setup_basic_desc(&desc, HNS_ROCE_OPC_CFG_ENTRY_SIZE,
2171                                       false);
2172
2173         cfg_size->type = cpu_to_le32(HNS_ROCE_CFG_QPC_SIZE);
2174         cfg_size->size = cpu_to_le32(hr_dev->caps.qpc_sz);
2175
2176         return hns_roce_cmq_send(hr_dev, &desc, 1);
2177 }
2178
2179 static int hns_roce_config_sccc_size(struct hns_roce_dev *hr_dev)
2180 {
2181         struct hns_roce_cmq_desc desc;
2182         struct hns_roce_cfg_entry_size *cfg_size =
2183                                   (struct hns_roce_cfg_entry_size *)desc.data;
2184
2185         hns_roce_cmq_setup_basic_desc(&desc, HNS_ROCE_OPC_CFG_ENTRY_SIZE,
2186                                       false);
2187
2188         cfg_size->type = cpu_to_le32(HNS_ROCE_CFG_SCCC_SIZE);
2189         cfg_size->size = cpu_to_le32(hr_dev->caps.sccc_sz);
2190
2191         return hns_roce_cmq_send(hr_dev, &desc, 1);
2192 }
2193
2194 static int hns_roce_config_entry_size(struct hns_roce_dev *hr_dev)
2195 {
2196         int ret;
2197
2198         if (hr_dev->pci_dev->revision < PCI_REVISION_ID_HIP09)
2199                 return 0;
2200
2201         ret = hns_roce_config_qpc_size(hr_dev);
2202         if (ret) {
2203                 dev_err(hr_dev->dev, "failed to cfg qpc sz, ret = %d.\n", ret);
2204                 return ret;
2205         }
2206
2207         ret = hns_roce_config_sccc_size(hr_dev);
2208         if (ret)
2209                 dev_err(hr_dev->dev, "failed to cfg sccc sz, ret = %d.\n", ret);
2210
2211         return ret;
2212 }
2213
2214 static int hns_roce_v2_profile(struct hns_roce_dev *hr_dev)
2215 {
2216         struct hns_roce_caps *caps = &hr_dev->caps;
2217         int ret;
2218
2219         ret = hns_roce_cmq_query_hw_info(hr_dev);
2220         if (ret) {
2221                 dev_err(hr_dev->dev, "Query hardware version fail, ret = %d.\n",
2222                         ret);
2223                 return ret;
2224         }
2225
2226         ret = hns_roce_query_fw_ver(hr_dev);
2227         if (ret) {
2228                 dev_err(hr_dev->dev, "Query firmware version fail, ret = %d.\n",
2229                         ret);
2230                 return ret;
2231         }
2232
2233         ret = hns_roce_config_global_param(hr_dev);
2234         if (ret) {
2235                 dev_err(hr_dev->dev, "Configure global param fail, ret = %d.\n",
2236                         ret);
2237                 return ret;
2238         }
2239
2240         /* Get pf resource owned by every pf */
2241         ret = hns_roce_query_pf_resource(hr_dev);
2242         if (ret) {
2243                 dev_err(hr_dev->dev, "Query pf resource fail, ret = %d.\n",
2244                         ret);
2245                 return ret;
2246         }
2247
2248         ret = hns_roce_query_pf_timer_resource(hr_dev);
2249         if (ret) {
2250                 dev_err(hr_dev->dev,
2251                         "failed to query pf timer resource, ret = %d.\n", ret);
2252                 return ret;
2253         }
2254
2255         ret = hns_roce_set_vf_switch_param(hr_dev, 0);
2256         if (ret) {
2257                 dev_err(hr_dev->dev,
2258                         "failed to set function switch param, ret = %d.\n",
2259                         ret);
2260                 return ret;
2261         }
2262
2263         hr_dev->vendor_part_id = hr_dev->pci_dev->device;
2264         hr_dev->sys_image_guid = be64_to_cpu(hr_dev->ib_dev.node_guid);
2265
2266         caps->pbl_ba_pg_sz      = HNS_ROCE_BA_PG_SZ_SUPPORTED_16K;
2267         caps->pbl_buf_pg_sz     = 0;
2268         caps->pbl_hop_num       = HNS_ROCE_PBL_HOP_NUM;
2269         caps->eqe_ba_pg_sz      = 0;
2270         caps->eqe_buf_pg_sz     = 0;
2271         caps->eqe_hop_num       = HNS_ROCE_EQE_HOP_NUM;
2272         caps->tsq_buf_pg_sz     = 0;
2273
2274         ret = hns_roce_query_pf_caps(hr_dev);
2275         if (ret)
2276                 set_default_caps(hr_dev);
2277
2278         ret = hns_roce_alloc_vf_resource(hr_dev);
2279         if (ret) {
2280                 dev_err(hr_dev->dev, "Allocate vf resource fail, ret = %d.\n",
2281                         ret);
2282                 return ret;
2283         }
2284
2285         ret = hns_roce_v2_set_bt(hr_dev);
2286         if (ret) {
2287                 dev_err(hr_dev->dev,
2288                         "Configure bt attribute fail, ret = %d.\n", ret);
2289                 return ret;
2290         }
2291
2292         /* Configure the size of QPC, SCCC, etc. */
2293         ret = hns_roce_config_entry_size(hr_dev);
2294
2295         return ret;
2296 }
2297
2298 static int hns_roce_config_link_table(struct hns_roce_dev *hr_dev,
2299                                       enum hns_roce_link_table_type type)
2300 {
2301         struct hns_roce_cmq_desc desc[2];
2302         struct hns_roce_cfg_llm_a *req_a =
2303                                 (struct hns_roce_cfg_llm_a *)desc[0].data;
2304         struct hns_roce_cfg_llm_b *req_b =
2305                                 (struct hns_roce_cfg_llm_b *)desc[1].data;
2306         struct hns_roce_v2_priv *priv = hr_dev->priv;
2307         struct hns_roce_link_table *link_tbl;
2308         struct hns_roce_link_table_entry *entry;
2309         enum hns_roce_opcode_type opcode;
2310         u32 page_num;
2311         int i;
2312
2313         switch (type) {
2314         case TSQ_LINK_TABLE:
2315                 link_tbl = &priv->tsq;
2316                 opcode = HNS_ROCE_OPC_CFG_EXT_LLM;
2317                 break;
2318         case TPQ_LINK_TABLE:
2319                 link_tbl = &priv->tpq;
2320                 opcode = HNS_ROCE_OPC_CFG_TMOUT_LLM;
2321                 break;
2322         default:
2323                 return -EINVAL;
2324         }
2325
2326         page_num = link_tbl->npages;
2327         entry = link_tbl->table.buf;
2328
2329         for (i = 0; i < 2; i++) {
2330                 hns_roce_cmq_setup_basic_desc(&desc[i], opcode, false);
2331
2332                 if (i == 0)
2333                         desc[i].flag |= cpu_to_le16(HNS_ROCE_CMD_FLAG_NEXT);
2334                 else
2335                         desc[i].flag &= ~cpu_to_le16(HNS_ROCE_CMD_FLAG_NEXT);
2336         }
2337
2338         req_a->base_addr_l = cpu_to_le32(link_tbl->table.map & 0xffffffff);
2339         req_a->base_addr_h = cpu_to_le32(link_tbl->table.map >> 32);
2340         roce_set_field(req_a->depth_pgsz_init_en, CFG_LLM_QUE_DEPTH_M,
2341                        CFG_LLM_QUE_DEPTH_S, link_tbl->npages);
2342         roce_set_field(req_a->depth_pgsz_init_en, CFG_LLM_QUE_PGSZ_M,
2343                        CFG_LLM_QUE_PGSZ_S, link_tbl->pg_sz);
2344         roce_set_field(req_a->depth_pgsz_init_en, CFG_LLM_INIT_EN_M,
2345                        CFG_LLM_INIT_EN_S, 1);
2346         req_a->head_ba_l = cpu_to_le32(entry[0].blk_ba0);
2347         req_a->head_ba_h_nxtptr = cpu_to_le32(entry[0].blk_ba1_nxt_ptr);
2348         roce_set_field(req_a->head_ptr, CFG_LLM_HEAD_PTR_M, CFG_LLM_HEAD_PTR_S,
2349                        0);
2350
2351         req_b->tail_ba_l = cpu_to_le32(entry[page_num - 1].blk_ba0);
2352         roce_set_field(req_b->tail_ba_h, CFG_LLM_TAIL_BA_H_M,
2353                        CFG_LLM_TAIL_BA_H_S,
2354                        entry[page_num - 1].blk_ba1_nxt_ptr &
2355                        HNS_ROCE_LINK_TABLE_BA1_M);
2356         roce_set_field(req_b->tail_ptr, CFG_LLM_TAIL_PTR_M, CFG_LLM_TAIL_PTR_S,
2357                        (entry[page_num - 2].blk_ba1_nxt_ptr &
2358                         HNS_ROCE_LINK_TABLE_NXT_PTR_M) >>
2359                         HNS_ROCE_LINK_TABLE_NXT_PTR_S);
2360
2361         return hns_roce_cmq_send(hr_dev, desc, 2);
2362 }
2363
2364 static int hns_roce_init_link_table(struct hns_roce_dev *hr_dev,
2365                                     enum hns_roce_link_table_type type)
2366 {
2367         struct hns_roce_v2_priv *priv = hr_dev->priv;
2368         struct hns_roce_link_table *link_tbl;
2369         struct hns_roce_link_table_entry *entry;
2370         struct device *dev = hr_dev->dev;
2371         u32 buf_chk_sz;
2372         dma_addr_t t;
2373         int func_num = 1;
2374         int pg_num_a;
2375         int pg_num_b;
2376         int pg_num;
2377         int size;
2378         int i;
2379
2380         switch (type) {
2381         case TSQ_LINK_TABLE:
2382                 link_tbl = &priv->tsq;
2383                 buf_chk_sz = 1 << (hr_dev->caps.tsq_buf_pg_sz + PAGE_SHIFT);
2384                 pg_num_a = hr_dev->caps.num_qps * 8 / buf_chk_sz;
2385                 pg_num_b = hr_dev->caps.sl_num * 4 + 2;
2386                 break;
2387         case TPQ_LINK_TABLE:
2388                 link_tbl = &priv->tpq;
2389                 buf_chk_sz = 1 << (hr_dev->caps.tpq_buf_pg_sz + PAGE_SHIFT);
2390                 pg_num_a = hr_dev->caps.num_cqs * 4 / buf_chk_sz;
2391                 pg_num_b = 2 * 4 * func_num + 2;
2392                 break;
2393         default:
2394                 return -EINVAL;
2395         }
2396
2397         pg_num = max(pg_num_a, pg_num_b);
2398         size = pg_num * sizeof(struct hns_roce_link_table_entry);
2399
2400         link_tbl->table.buf = dma_alloc_coherent(dev, size,
2401                                                  &link_tbl->table.map,
2402                                                  GFP_KERNEL);
2403         if (!link_tbl->table.buf)
2404                 goto out;
2405
2406         link_tbl->pg_list = kcalloc(pg_num, sizeof(*link_tbl->pg_list),
2407                                     GFP_KERNEL);
2408         if (!link_tbl->pg_list)
2409                 goto err_kcalloc_failed;
2410
2411         entry = link_tbl->table.buf;
2412         for (i = 0; i < pg_num; ++i) {
2413                 link_tbl->pg_list[i].buf = dma_alloc_coherent(dev, buf_chk_sz,
2414                                                               &t, GFP_KERNEL);
2415                 if (!link_tbl->pg_list[i].buf)
2416                         goto err_alloc_buf_failed;
2417
2418                 link_tbl->pg_list[i].map = t;
2419
2420                 entry[i].blk_ba0 = (u32)(t >> 12);
2421                 entry[i].blk_ba1_nxt_ptr = (u32)(t >> 44);
2422
2423                 if (i < (pg_num - 1))
2424                         entry[i].blk_ba1_nxt_ptr |=
2425                                 (i + 1) << HNS_ROCE_LINK_TABLE_NXT_PTR_S;
2426
2427         }
2428         link_tbl->npages = pg_num;
2429         link_tbl->pg_sz = buf_chk_sz;
2430
2431         return hns_roce_config_link_table(hr_dev, type);
2432
2433 err_alloc_buf_failed:
2434         for (i -= 1; i >= 0; i--)
2435                 dma_free_coherent(dev, buf_chk_sz,
2436                                   link_tbl->pg_list[i].buf,
2437                                   link_tbl->pg_list[i].map);
2438         kfree(link_tbl->pg_list);
2439
2440 err_kcalloc_failed:
2441         dma_free_coherent(dev, size, link_tbl->table.buf,
2442                           link_tbl->table.map);
2443
2444 out:
2445         return -ENOMEM;
2446 }
2447
2448 static void hns_roce_free_link_table(struct hns_roce_dev *hr_dev,
2449                                      struct hns_roce_link_table *link_tbl)
2450 {
2451         struct device *dev = hr_dev->dev;
2452         int size;
2453         int i;
2454
2455         size = link_tbl->npages * sizeof(struct hns_roce_link_table_entry);
2456
2457         for (i = 0; i < link_tbl->npages; ++i)
2458                 if (link_tbl->pg_list[i].buf)
2459                         dma_free_coherent(dev, link_tbl->pg_sz,
2460                                           link_tbl->pg_list[i].buf,
2461                                           link_tbl->pg_list[i].map);
2462         kfree(link_tbl->pg_list);
2463
2464         dma_free_coherent(dev, size, link_tbl->table.buf,
2465                           link_tbl->table.map);
2466 }
2467
2468 static int hns_roce_v2_init(struct hns_roce_dev *hr_dev)
2469 {
2470         struct hns_roce_v2_priv *priv = hr_dev->priv;
2471         int qpc_count, cqc_count;
2472         int ret, i;
2473
2474         /* TSQ includes SQ doorbell and ack doorbell */
2475         ret = hns_roce_init_link_table(hr_dev, TSQ_LINK_TABLE);
2476         if (ret) {
2477                 dev_err(hr_dev->dev, "TSQ init failed, ret = %d.\n", ret);
2478                 return ret;
2479         }
2480
2481         ret = hns_roce_init_link_table(hr_dev, TPQ_LINK_TABLE);
2482         if (ret) {
2483                 dev_err(hr_dev->dev, "TPQ init failed, ret = %d.\n", ret);
2484                 goto err_tpq_init_failed;
2485         }
2486
2487         /* Alloc memory for QPC Timer buffer space chunk */
2488         for (qpc_count = 0; qpc_count < hr_dev->caps.qpc_timer_bt_num;
2489              qpc_count++) {
2490                 ret = hns_roce_table_get(hr_dev, &hr_dev->qpc_timer_table,
2491                                          qpc_count);
2492                 if (ret) {
2493                         dev_err(hr_dev->dev, "QPC Timer get failed\n");
2494                         goto err_qpc_timer_failed;
2495                 }
2496         }
2497
2498         /* Alloc memory for CQC Timer buffer space chunk */
2499         for (cqc_count = 0; cqc_count < hr_dev->caps.cqc_timer_bt_num;
2500              cqc_count++) {
2501                 ret = hns_roce_table_get(hr_dev, &hr_dev->cqc_timer_table,
2502                                          cqc_count);
2503                 if (ret) {
2504                         dev_err(hr_dev->dev, "CQC Timer get failed\n");
2505                         goto err_cqc_timer_failed;
2506                 }
2507         }
2508
2509         return 0;
2510
2511 err_cqc_timer_failed:
2512         for (i = 0; i < cqc_count; i++)
2513                 hns_roce_table_put(hr_dev, &hr_dev->cqc_timer_table, i);
2514
2515 err_qpc_timer_failed:
2516         for (i = 0; i < qpc_count; i++)
2517                 hns_roce_table_put(hr_dev, &hr_dev->qpc_timer_table, i);
2518
2519         hns_roce_free_link_table(hr_dev, &priv->tpq);
2520
2521 err_tpq_init_failed:
2522         hns_roce_free_link_table(hr_dev, &priv->tsq);
2523
2524         return ret;
2525 }
2526
2527 static void hns_roce_v2_exit(struct hns_roce_dev *hr_dev)
2528 {
2529         struct hns_roce_v2_priv *priv = hr_dev->priv;
2530
2531         hns_roce_function_clear(hr_dev);
2532
2533         hns_roce_free_link_table(hr_dev, &priv->tpq);
2534         hns_roce_free_link_table(hr_dev, &priv->tsq);
2535 }
2536
2537 static int hns_roce_query_mbox_status(struct hns_roce_dev *hr_dev)
2538 {
2539         struct hns_roce_cmq_desc desc;
2540         struct hns_roce_mbox_status *mb_st =
2541                                        (struct hns_roce_mbox_status *)desc.data;
2542         enum hns_roce_cmd_return_status status;
2543
2544         hns_roce_cmq_setup_basic_desc(&desc, HNS_ROCE_OPC_QUERY_MB_ST, true);
2545
2546         status = hns_roce_cmq_send(hr_dev, &desc, 1);
2547         if (status)
2548                 return status;
2549
2550         return le32_to_cpu(mb_st->mb_status_hw_run);
2551 }
2552
2553 static int hns_roce_v2_cmd_pending(struct hns_roce_dev *hr_dev)
2554 {
2555         u32 status = hns_roce_query_mbox_status(hr_dev);
2556
2557         return status >> HNS_ROCE_HW_RUN_BIT_SHIFT;
2558 }
2559
2560 static int hns_roce_v2_cmd_complete(struct hns_roce_dev *hr_dev)
2561 {
2562         u32 status = hns_roce_query_mbox_status(hr_dev);
2563
2564         return status & HNS_ROCE_HW_MB_STATUS_MASK;
2565 }
2566
2567 static int hns_roce_mbox_post(struct hns_roce_dev *hr_dev, u64 in_param,
2568                               u64 out_param, u32 in_modifier, u8 op_modifier,
2569                               u16 op, u16 token, int event)
2570 {
2571         struct hns_roce_cmq_desc desc;
2572         struct hns_roce_post_mbox *mb = (struct hns_roce_post_mbox *)desc.data;
2573
2574         hns_roce_cmq_setup_basic_desc(&desc, HNS_ROCE_OPC_POST_MB, false);
2575
2576         mb->in_param_l = cpu_to_le32(in_param);
2577         mb->in_param_h = cpu_to_le32(in_param >> 32);
2578         mb->out_param_l = cpu_to_le32(out_param);
2579         mb->out_param_h = cpu_to_le32(out_param >> 32);
2580         mb->cmd_tag = cpu_to_le32(in_modifier << 8 | op);
2581         mb->token_event_en = cpu_to_le32(event << 16 | token);
2582
2583         return hns_roce_cmq_send(hr_dev, &desc, 1);
2584 }
2585
2586 static int hns_roce_v2_post_mbox(struct hns_roce_dev *hr_dev, u64 in_param,
2587                                  u64 out_param, u32 in_modifier, u8 op_modifier,
2588                                  u16 op, u16 token, int event)
2589 {
2590         struct device *dev = hr_dev->dev;
2591         unsigned long end;
2592         int ret;
2593
2594         end = msecs_to_jiffies(HNS_ROCE_V2_GO_BIT_TIMEOUT_MSECS) + jiffies;
2595         while (hns_roce_v2_cmd_pending(hr_dev)) {
2596                 if (time_after(jiffies, end)) {
2597                         dev_dbg(dev, "jiffies=%d end=%d\n", (int)jiffies,
2598                                 (int)end);
2599                         return -EAGAIN;
2600                 }
2601                 cond_resched();
2602         }
2603
2604         ret = hns_roce_mbox_post(hr_dev, in_param, out_param, in_modifier,
2605                                  op_modifier, op, token, event);
2606         if (ret)
2607                 dev_err(dev, "Post mailbox fail(%d)\n", ret);
2608
2609         return ret;
2610 }
2611
2612 static int hns_roce_v2_chk_mbox(struct hns_roce_dev *hr_dev,
2613                                 unsigned long timeout)
2614 {
2615         struct device *dev = hr_dev->dev;
2616         unsigned long end;
2617         u32 status;
2618
2619         end = msecs_to_jiffies(timeout) + jiffies;
2620         while (hns_roce_v2_cmd_pending(hr_dev) && time_before(jiffies, end))
2621                 cond_resched();
2622
2623         if (hns_roce_v2_cmd_pending(hr_dev)) {
2624                 dev_err(dev, "[cmd_poll]hw run cmd TIMEDOUT!\n");
2625                 return -ETIMEDOUT;
2626         }
2627
2628         status = hns_roce_v2_cmd_complete(hr_dev);
2629         if (status != 0x1) {
2630                 if (status == CMD_RST_PRC_EBUSY)
2631                         return status;
2632
2633                 dev_err(dev, "mailbox status 0x%x!\n", status);
2634                 return -EBUSY;
2635         }
2636
2637         return 0;
2638 }
2639
2640 static int hns_roce_config_sgid_table(struct hns_roce_dev *hr_dev,
2641                                       int gid_index, const union ib_gid *gid,
2642                                       enum hns_roce_sgid_type sgid_type)
2643 {
2644         struct hns_roce_cmq_desc desc;
2645         struct hns_roce_cfg_sgid_tb *sgid_tb =
2646                                     (struct hns_roce_cfg_sgid_tb *)desc.data;
2647         u32 *p;
2648
2649         hns_roce_cmq_setup_basic_desc(&desc, HNS_ROCE_OPC_CFG_SGID_TB, false);
2650
2651         roce_set_field(sgid_tb->table_idx_rsv, CFG_SGID_TB_TABLE_IDX_M,
2652                        CFG_SGID_TB_TABLE_IDX_S, gid_index);
2653         roce_set_field(sgid_tb->vf_sgid_type_rsv, CFG_SGID_TB_VF_SGID_TYPE_M,
2654                        CFG_SGID_TB_VF_SGID_TYPE_S, sgid_type);
2655
2656         p = (u32 *)&gid->raw[0];
2657         sgid_tb->vf_sgid_l = cpu_to_le32(*p);
2658
2659         p = (u32 *)&gid->raw[4];
2660         sgid_tb->vf_sgid_ml = cpu_to_le32(*p);
2661
2662         p = (u32 *)&gid->raw[8];
2663         sgid_tb->vf_sgid_mh = cpu_to_le32(*p);
2664
2665         p = (u32 *)&gid->raw[0xc];
2666         sgid_tb->vf_sgid_h = cpu_to_le32(*p);
2667
2668         return hns_roce_cmq_send(hr_dev, &desc, 1);
2669 }
2670
2671 static int hns_roce_v2_set_gid(struct hns_roce_dev *hr_dev, u8 port,
2672                                int gid_index, const union ib_gid *gid,
2673                                const struct ib_gid_attr *attr)
2674 {
2675         enum hns_roce_sgid_type sgid_type = GID_TYPE_FLAG_ROCE_V1;
2676         int ret;
2677
2678         if (!gid || !attr)
2679                 return -EINVAL;
2680
2681         if (attr->gid_type == IB_GID_TYPE_ROCE)
2682                 sgid_type = GID_TYPE_FLAG_ROCE_V1;
2683
2684         if (attr->gid_type == IB_GID_TYPE_ROCE_UDP_ENCAP) {
2685                 if (ipv6_addr_v4mapped((void *)gid))
2686                         sgid_type = GID_TYPE_FLAG_ROCE_V2_IPV4;
2687                 else
2688                         sgid_type = GID_TYPE_FLAG_ROCE_V2_IPV6;
2689         }
2690
2691         ret = hns_roce_config_sgid_table(hr_dev, gid_index, gid, sgid_type);
2692         if (ret)
2693                 ibdev_err(&hr_dev->ib_dev,
2694                           "failed to configure sgid table, ret = %d!\n",
2695                           ret);
2696
2697         return ret;
2698 }
2699
2700 static int hns_roce_v2_set_mac(struct hns_roce_dev *hr_dev, u8 phy_port,
2701                                u8 *addr)
2702 {
2703         struct hns_roce_cmq_desc desc;
2704         struct hns_roce_cfg_smac_tb *smac_tb =
2705                                     (struct hns_roce_cfg_smac_tb *)desc.data;
2706         u16 reg_smac_h;
2707         u32 reg_smac_l;
2708
2709         hns_roce_cmq_setup_basic_desc(&desc, HNS_ROCE_OPC_CFG_SMAC_TB, false);
2710
2711         reg_smac_l = *(u32 *)(&addr[0]);
2712         reg_smac_h = *(u16 *)(&addr[4]);
2713
2714         roce_set_field(smac_tb->tb_idx_rsv, CFG_SMAC_TB_IDX_M,
2715                        CFG_SMAC_TB_IDX_S, phy_port);
2716         roce_set_field(smac_tb->vf_smac_h_rsv, CFG_SMAC_TB_VF_SMAC_H_M,
2717                        CFG_SMAC_TB_VF_SMAC_H_S, reg_smac_h);
2718         smac_tb->vf_smac_l = cpu_to_le32(reg_smac_l);
2719
2720         return hns_roce_cmq_send(hr_dev, &desc, 1);
2721 }
2722
2723 static int set_mtpt_pbl(struct hns_roce_dev *hr_dev,
2724                         struct hns_roce_v2_mpt_entry *mpt_entry,
2725                         struct hns_roce_mr *mr)
2726 {
2727         u64 pages[HNS_ROCE_V2_MAX_INNER_MTPT_NUM] = { 0 };
2728         struct ib_device *ibdev = &hr_dev->ib_dev;
2729         dma_addr_t pbl_ba;
2730         int i, count;
2731
2732         count = hns_roce_mtr_find(hr_dev, &mr->pbl_mtr, 0, pages,
2733                                   ARRAY_SIZE(pages), &pbl_ba);
2734         if (count < 1) {
2735                 ibdev_err(ibdev, "failed to find PBL mtr, count = %d.\n",
2736                           count);
2737                 return -ENOBUFS;
2738         }
2739
2740         /* Aligned to the hardware address access unit */
2741         for (i = 0; i < count; i++)
2742                 pages[i] >>= 6;
2743
2744         mpt_entry->pbl_size = cpu_to_le32(mr->npages);
2745         mpt_entry->pbl_ba_l = cpu_to_le32(pbl_ba >> 3);
2746         roce_set_field(mpt_entry->byte_48_mode_ba,
2747                        V2_MPT_BYTE_48_PBL_BA_H_M, V2_MPT_BYTE_48_PBL_BA_H_S,
2748                        upper_32_bits(pbl_ba >> 3));
2749
2750         mpt_entry->pa0_l = cpu_to_le32(lower_32_bits(pages[0]));
2751         roce_set_field(mpt_entry->byte_56_pa0_h, V2_MPT_BYTE_56_PA0_H_M,
2752                        V2_MPT_BYTE_56_PA0_H_S, upper_32_bits(pages[0]));
2753
2754         mpt_entry->pa1_l = cpu_to_le32(lower_32_bits(pages[1]));
2755         roce_set_field(mpt_entry->byte_64_buf_pa1, V2_MPT_BYTE_64_PA1_H_M,
2756                        V2_MPT_BYTE_64_PA1_H_S, upper_32_bits(pages[1]));
2757         roce_set_field(mpt_entry->byte_64_buf_pa1,
2758                        V2_MPT_BYTE_64_PBL_BUF_PG_SZ_M,
2759                        V2_MPT_BYTE_64_PBL_BUF_PG_SZ_S,
2760                        to_hr_hw_page_shift(mr->pbl_mtr.hem_cfg.buf_pg_shift));
2761
2762         return 0;
2763 }
2764
2765 static int hns_roce_v2_write_mtpt(struct hns_roce_dev *hr_dev,
2766                                   void *mb_buf, struct hns_roce_mr *mr,
2767                                   unsigned long mtpt_idx)
2768 {
2769         struct hns_roce_v2_mpt_entry *mpt_entry;
2770         int ret;
2771
2772         mpt_entry = mb_buf;
2773         memset(mpt_entry, 0, sizeof(*mpt_entry));
2774
2775         roce_set_field(mpt_entry->byte_4_pd_hop_st, V2_MPT_BYTE_4_MPT_ST_M,
2776                        V2_MPT_BYTE_4_MPT_ST_S, V2_MPT_ST_VALID);
2777         roce_set_field(mpt_entry->byte_4_pd_hop_st, V2_MPT_BYTE_4_PBL_HOP_NUM_M,
2778                        V2_MPT_BYTE_4_PBL_HOP_NUM_S, mr->pbl_hop_num ==
2779                        HNS_ROCE_HOP_NUM_0 ? 0 : mr->pbl_hop_num);
2780         roce_set_field(mpt_entry->byte_4_pd_hop_st,
2781                        V2_MPT_BYTE_4_PBL_BA_PG_SZ_M,
2782                        V2_MPT_BYTE_4_PBL_BA_PG_SZ_S,
2783                        to_hr_hw_page_shift(mr->pbl_mtr.hem_cfg.ba_pg_shift));
2784         roce_set_field(mpt_entry->byte_4_pd_hop_st, V2_MPT_BYTE_4_PD_M,
2785                        V2_MPT_BYTE_4_PD_S, mr->pd);
2786
2787         roce_set_bit(mpt_entry->byte_8_mw_cnt_en, V2_MPT_BYTE_8_RA_EN_S, 0);
2788         roce_set_bit(mpt_entry->byte_8_mw_cnt_en, V2_MPT_BYTE_8_R_INV_EN_S, 0);
2789         roce_set_bit(mpt_entry->byte_8_mw_cnt_en, V2_MPT_BYTE_8_L_INV_EN_S, 1);
2790         roce_set_bit(mpt_entry->byte_8_mw_cnt_en, V2_MPT_BYTE_8_BIND_EN_S,
2791                      (mr->access & IB_ACCESS_MW_BIND ? 1 : 0));
2792         roce_set_bit(mpt_entry->byte_8_mw_cnt_en, V2_MPT_BYTE_8_ATOMIC_EN_S,
2793                      mr->access & IB_ACCESS_REMOTE_ATOMIC ? 1 : 0);
2794         roce_set_bit(mpt_entry->byte_8_mw_cnt_en, V2_MPT_BYTE_8_RR_EN_S,
2795                      (mr->access & IB_ACCESS_REMOTE_READ ? 1 : 0));
2796         roce_set_bit(mpt_entry->byte_8_mw_cnt_en, V2_MPT_BYTE_8_RW_EN_S,
2797                      (mr->access & IB_ACCESS_REMOTE_WRITE ? 1 : 0));
2798         roce_set_bit(mpt_entry->byte_8_mw_cnt_en, V2_MPT_BYTE_8_LW_EN_S,
2799                      (mr->access & IB_ACCESS_LOCAL_WRITE ? 1 : 0));
2800
2801         roce_set_bit(mpt_entry->byte_12_mw_pa, V2_MPT_BYTE_12_PA_S,
2802                      mr->type == MR_TYPE_MR ? 0 : 1);
2803         roce_set_bit(mpt_entry->byte_12_mw_pa, V2_MPT_BYTE_12_INNER_PA_VLD_S,
2804                      1);
2805
2806         mpt_entry->len_l = cpu_to_le32(lower_32_bits(mr->size));
2807         mpt_entry->len_h = cpu_to_le32(upper_32_bits(mr->size));
2808         mpt_entry->lkey = cpu_to_le32(mr->key);
2809         mpt_entry->va_l = cpu_to_le32(lower_32_bits(mr->iova));
2810         mpt_entry->va_h = cpu_to_le32(upper_32_bits(mr->iova));
2811
2812         if (mr->type == MR_TYPE_DMA)
2813                 return 0;
2814
2815         ret = set_mtpt_pbl(hr_dev, mpt_entry, mr);
2816
2817         return ret;
2818 }
2819
2820 static int hns_roce_v2_rereg_write_mtpt(struct hns_roce_dev *hr_dev,
2821                                         struct hns_roce_mr *mr, int flags,
2822                                         u32 pdn, int mr_access_flags, u64 iova,
2823                                         u64 size, void *mb_buf)
2824 {
2825         struct hns_roce_v2_mpt_entry *mpt_entry = mb_buf;
2826         int ret = 0;
2827
2828         roce_set_field(mpt_entry->byte_4_pd_hop_st, V2_MPT_BYTE_4_MPT_ST_M,
2829                        V2_MPT_BYTE_4_MPT_ST_S, V2_MPT_ST_VALID);
2830
2831         if (flags & IB_MR_REREG_PD) {
2832                 roce_set_field(mpt_entry->byte_4_pd_hop_st, V2_MPT_BYTE_4_PD_M,
2833                                V2_MPT_BYTE_4_PD_S, pdn);
2834                 mr->pd = pdn;
2835         }
2836
2837         if (flags & IB_MR_REREG_ACCESS) {
2838                 roce_set_bit(mpt_entry->byte_8_mw_cnt_en,
2839                              V2_MPT_BYTE_8_BIND_EN_S,
2840                              (mr_access_flags & IB_ACCESS_MW_BIND ? 1 : 0));
2841                 roce_set_bit(mpt_entry->byte_8_mw_cnt_en,
2842                              V2_MPT_BYTE_8_ATOMIC_EN_S,
2843                              mr_access_flags & IB_ACCESS_REMOTE_ATOMIC ? 1 : 0);
2844                 roce_set_bit(mpt_entry->byte_8_mw_cnt_en, V2_MPT_BYTE_8_RR_EN_S,
2845                              mr_access_flags & IB_ACCESS_REMOTE_READ ? 1 : 0);
2846                 roce_set_bit(mpt_entry->byte_8_mw_cnt_en, V2_MPT_BYTE_8_RW_EN_S,
2847                              mr_access_flags & IB_ACCESS_REMOTE_WRITE ? 1 : 0);
2848                 roce_set_bit(mpt_entry->byte_8_mw_cnt_en, V2_MPT_BYTE_8_LW_EN_S,
2849                              mr_access_flags & IB_ACCESS_LOCAL_WRITE ? 1 : 0);
2850         }
2851
2852         if (flags & IB_MR_REREG_TRANS) {
2853                 mpt_entry->va_l = cpu_to_le32(lower_32_bits(iova));
2854                 mpt_entry->va_h = cpu_to_le32(upper_32_bits(iova));
2855                 mpt_entry->len_l = cpu_to_le32(lower_32_bits(size));
2856                 mpt_entry->len_h = cpu_to_le32(upper_32_bits(size));
2857
2858                 mr->iova = iova;
2859                 mr->size = size;
2860
2861                 ret = set_mtpt_pbl(hr_dev, mpt_entry, mr);
2862         }
2863
2864         return ret;
2865 }
2866
2867 static int hns_roce_v2_frmr_write_mtpt(struct hns_roce_dev *hr_dev,
2868                                        void *mb_buf, struct hns_roce_mr *mr)
2869 {
2870         struct ib_device *ibdev = &hr_dev->ib_dev;
2871         struct hns_roce_v2_mpt_entry *mpt_entry;
2872         dma_addr_t pbl_ba = 0;
2873
2874         mpt_entry = mb_buf;
2875         memset(mpt_entry, 0, sizeof(*mpt_entry));
2876
2877         if (hns_roce_mtr_find(hr_dev, &mr->pbl_mtr, 0, NULL, 0, &pbl_ba) < 0) {
2878                 ibdev_err(ibdev, "failed to find frmr mtr.\n");
2879                 return -ENOBUFS;
2880         }
2881
2882         roce_set_field(mpt_entry->byte_4_pd_hop_st, V2_MPT_BYTE_4_MPT_ST_M,
2883                        V2_MPT_BYTE_4_MPT_ST_S, V2_MPT_ST_FREE);
2884         roce_set_field(mpt_entry->byte_4_pd_hop_st, V2_MPT_BYTE_4_PBL_HOP_NUM_M,
2885                        V2_MPT_BYTE_4_PBL_HOP_NUM_S, 1);
2886         roce_set_field(mpt_entry->byte_4_pd_hop_st,
2887                        V2_MPT_BYTE_4_PBL_BA_PG_SZ_M,
2888                        V2_MPT_BYTE_4_PBL_BA_PG_SZ_S,
2889                        to_hr_hw_page_shift(mr->pbl_mtr.hem_cfg.ba_pg_shift));
2890         roce_set_field(mpt_entry->byte_4_pd_hop_st, V2_MPT_BYTE_4_PD_M,
2891                        V2_MPT_BYTE_4_PD_S, mr->pd);
2892
2893         roce_set_bit(mpt_entry->byte_8_mw_cnt_en, V2_MPT_BYTE_8_RA_EN_S, 1);
2894         roce_set_bit(mpt_entry->byte_8_mw_cnt_en, V2_MPT_BYTE_8_R_INV_EN_S, 1);
2895         roce_set_bit(mpt_entry->byte_8_mw_cnt_en, V2_MPT_BYTE_8_L_INV_EN_S, 1);
2896
2897         roce_set_bit(mpt_entry->byte_12_mw_pa, V2_MPT_BYTE_12_FRE_S, 1);
2898         roce_set_bit(mpt_entry->byte_12_mw_pa, V2_MPT_BYTE_12_PA_S, 0);
2899         roce_set_bit(mpt_entry->byte_12_mw_pa, V2_MPT_BYTE_12_MR_MW_S, 0);
2900         roce_set_bit(mpt_entry->byte_12_mw_pa, V2_MPT_BYTE_12_BPD_S, 1);
2901
2902         mpt_entry->pbl_size = cpu_to_le32(mr->npages);
2903
2904         mpt_entry->pbl_ba_l = cpu_to_le32(lower_32_bits(pbl_ba >> 3));
2905         roce_set_field(mpt_entry->byte_48_mode_ba, V2_MPT_BYTE_48_PBL_BA_H_M,
2906                        V2_MPT_BYTE_48_PBL_BA_H_S,
2907                        upper_32_bits(pbl_ba >> 3));
2908
2909         roce_set_field(mpt_entry->byte_64_buf_pa1,
2910                        V2_MPT_BYTE_64_PBL_BUF_PG_SZ_M,
2911                        V2_MPT_BYTE_64_PBL_BUF_PG_SZ_S,
2912                        to_hr_hw_page_shift(mr->pbl_mtr.hem_cfg.buf_pg_shift));
2913
2914         return 0;
2915 }
2916
2917 static int hns_roce_v2_mw_write_mtpt(void *mb_buf, struct hns_roce_mw *mw)
2918 {
2919         struct hns_roce_v2_mpt_entry *mpt_entry;
2920
2921         mpt_entry = mb_buf;
2922         memset(mpt_entry, 0, sizeof(*mpt_entry));
2923
2924         roce_set_field(mpt_entry->byte_4_pd_hop_st, V2_MPT_BYTE_4_MPT_ST_M,
2925                        V2_MPT_BYTE_4_MPT_ST_S, V2_MPT_ST_FREE);
2926         roce_set_field(mpt_entry->byte_4_pd_hop_st, V2_MPT_BYTE_4_PD_M,
2927                        V2_MPT_BYTE_4_PD_S, mw->pdn);
2928         roce_set_field(mpt_entry->byte_4_pd_hop_st, V2_MPT_BYTE_4_PBL_HOP_NUM_M,
2929                        V2_MPT_BYTE_4_PBL_HOP_NUM_S,
2930                        mw->pbl_hop_num == HNS_ROCE_HOP_NUM_0 ? 0 :
2931                                                                mw->pbl_hop_num);
2932         roce_set_field(mpt_entry->byte_4_pd_hop_st,
2933                        V2_MPT_BYTE_4_PBL_BA_PG_SZ_M,
2934                        V2_MPT_BYTE_4_PBL_BA_PG_SZ_S,
2935                        mw->pbl_ba_pg_sz + PG_SHIFT_OFFSET);
2936
2937         roce_set_bit(mpt_entry->byte_8_mw_cnt_en, V2_MPT_BYTE_8_R_INV_EN_S, 1);
2938         roce_set_bit(mpt_entry->byte_8_mw_cnt_en, V2_MPT_BYTE_8_L_INV_EN_S, 1);
2939
2940         roce_set_bit(mpt_entry->byte_12_mw_pa, V2_MPT_BYTE_12_PA_S, 0);
2941         roce_set_bit(mpt_entry->byte_12_mw_pa, V2_MPT_BYTE_12_MR_MW_S, 1);
2942         roce_set_bit(mpt_entry->byte_12_mw_pa, V2_MPT_BYTE_12_BPD_S, 1);
2943         roce_set_bit(mpt_entry->byte_12_mw_pa, V2_MPT_BYTE_12_BQP_S,
2944                      mw->ibmw.type == IB_MW_TYPE_1 ? 0 : 1);
2945
2946         roce_set_field(mpt_entry->byte_64_buf_pa1,
2947                        V2_MPT_BYTE_64_PBL_BUF_PG_SZ_M,
2948                        V2_MPT_BYTE_64_PBL_BUF_PG_SZ_S,
2949                        mw->pbl_buf_pg_sz + PG_SHIFT_OFFSET);
2950
2951         mpt_entry->lkey = cpu_to_le32(mw->rkey);
2952
2953         return 0;
2954 }
2955
2956 static void *get_cqe_v2(struct hns_roce_cq *hr_cq, int n)
2957 {
2958         return hns_roce_buf_offset(hr_cq->mtr.kmem, n * hr_cq->cqe_size);
2959 }
2960
2961 static void *get_sw_cqe_v2(struct hns_roce_cq *hr_cq, int n)
2962 {
2963         struct hns_roce_v2_cqe *cqe = get_cqe_v2(hr_cq, n & hr_cq->ib_cq.cqe);
2964
2965         /* Get cqe when Owner bit is Conversely with the MSB of cons_idx */
2966         return (roce_get_bit(cqe->byte_4, V2_CQE_BYTE_4_OWNER_S) ^
2967                 !!(n & hr_cq->cq_depth)) ? cqe : NULL;
2968 }
2969
2970 static inline void hns_roce_v2_cq_set_ci(struct hns_roce_cq *hr_cq, u32 ci)
2971 {
2972         *hr_cq->set_ci_db = ci & V2_CQ_DB_PARAMETER_CONS_IDX_M;
2973 }
2974
2975 static void __hns_roce_v2_cq_clean(struct hns_roce_cq *hr_cq, u32 qpn,
2976                                    struct hns_roce_srq *srq)
2977 {
2978         struct hns_roce_v2_cqe *cqe, *dest;
2979         u32 prod_index;
2980         int nfreed = 0;
2981         int wqe_index;
2982         u8 owner_bit;
2983
2984         for (prod_index = hr_cq->cons_index; get_sw_cqe_v2(hr_cq, prod_index);
2985              ++prod_index) {
2986                 if (prod_index > hr_cq->cons_index + hr_cq->ib_cq.cqe)
2987                         break;
2988         }
2989
2990         /*
2991          * Now backwards through the CQ, removing CQ entries
2992          * that match our QP by overwriting them with next entries.
2993          */
2994         while ((int) --prod_index - (int) hr_cq->cons_index >= 0) {
2995                 cqe = get_cqe_v2(hr_cq, prod_index & hr_cq->ib_cq.cqe);
2996                 if ((roce_get_field(cqe->byte_16, V2_CQE_BYTE_16_LCL_QPN_M,
2997                                     V2_CQE_BYTE_16_LCL_QPN_S) &
2998                                     HNS_ROCE_V2_CQE_QPN_MASK) == qpn) {
2999                         if (srq &&
3000                             roce_get_bit(cqe->byte_4, V2_CQE_BYTE_4_S_R_S)) {
3001                                 wqe_index = roce_get_field(cqe->byte_4,
3002                                                      V2_CQE_BYTE_4_WQE_INDX_M,
3003                                                      V2_CQE_BYTE_4_WQE_INDX_S);
3004                                 hns_roce_free_srq_wqe(srq, wqe_index);
3005                         }
3006                         ++nfreed;
3007                 } else if (nfreed) {
3008                         dest = get_cqe_v2(hr_cq, (prod_index + nfreed) &
3009                                           hr_cq->ib_cq.cqe);
3010                         owner_bit = roce_get_bit(dest->byte_4,
3011                                                  V2_CQE_BYTE_4_OWNER_S);
3012                         memcpy(dest, cqe, sizeof(*cqe));
3013                         roce_set_bit(dest->byte_4, V2_CQE_BYTE_4_OWNER_S,
3014                                      owner_bit);
3015                 }
3016         }
3017
3018         if (nfreed) {
3019                 hr_cq->cons_index += nfreed;
3020                 /*
3021                  * Make sure update of buffer contents is done before
3022                  * updating consumer index.
3023                  */
3024                 wmb();
3025                 hns_roce_v2_cq_set_ci(hr_cq, hr_cq->cons_index);
3026         }
3027 }
3028
3029 static void hns_roce_v2_cq_clean(struct hns_roce_cq *hr_cq, u32 qpn,
3030                                  struct hns_roce_srq *srq)
3031 {
3032         spin_lock_irq(&hr_cq->lock);
3033         __hns_roce_v2_cq_clean(hr_cq, qpn, srq);
3034         spin_unlock_irq(&hr_cq->lock);
3035 }
3036
3037 static void hns_roce_v2_write_cqc(struct hns_roce_dev *hr_dev,
3038                                   struct hns_roce_cq *hr_cq, void *mb_buf,
3039                                   u64 *mtts, dma_addr_t dma_handle)
3040 {
3041         struct hns_roce_v2_cq_context *cq_context;
3042
3043         cq_context = mb_buf;
3044         memset(cq_context, 0, sizeof(*cq_context));
3045
3046         roce_set_field(cq_context->byte_4_pg_ceqn, V2_CQC_BYTE_4_CQ_ST_M,
3047                        V2_CQC_BYTE_4_CQ_ST_S, V2_CQ_STATE_VALID);
3048         roce_set_field(cq_context->byte_4_pg_ceqn, V2_CQC_BYTE_4_ARM_ST_M,
3049                        V2_CQC_BYTE_4_ARM_ST_S, REG_NXT_CEQE);
3050         roce_set_field(cq_context->byte_4_pg_ceqn, V2_CQC_BYTE_4_SHIFT_M,
3051                        V2_CQC_BYTE_4_SHIFT_S, ilog2(hr_cq->cq_depth));
3052         roce_set_field(cq_context->byte_4_pg_ceqn, V2_CQC_BYTE_4_CEQN_M,
3053                        V2_CQC_BYTE_4_CEQN_S, hr_cq->vector);
3054
3055         roce_set_field(cq_context->byte_8_cqn, V2_CQC_BYTE_8_CQN_M,
3056                        V2_CQC_BYTE_8_CQN_S, hr_cq->cqn);
3057
3058         roce_set_field(cq_context->byte_8_cqn, V2_CQC_BYTE_8_CQE_SIZE_M,
3059                        V2_CQC_BYTE_8_CQE_SIZE_S, hr_cq->cqe_size ==
3060                        HNS_ROCE_V3_CQE_SIZE ? 1 : 0);
3061
3062         cq_context->cqe_cur_blk_addr = cpu_to_le32(to_hr_hw_page_addr(mtts[0]));
3063
3064         roce_set_field(cq_context->byte_16_hop_addr,
3065                        V2_CQC_BYTE_16_CQE_CUR_BLK_ADDR_M,
3066                        V2_CQC_BYTE_16_CQE_CUR_BLK_ADDR_S,
3067                        upper_32_bits(to_hr_hw_page_addr(mtts[0])));
3068         roce_set_field(cq_context->byte_16_hop_addr,
3069                        V2_CQC_BYTE_16_CQE_HOP_NUM_M,
3070                        V2_CQC_BYTE_16_CQE_HOP_NUM_S, hr_dev->caps.cqe_hop_num ==
3071                        HNS_ROCE_HOP_NUM_0 ? 0 : hr_dev->caps.cqe_hop_num);
3072
3073         cq_context->cqe_nxt_blk_addr = cpu_to_le32(to_hr_hw_page_addr(mtts[1]));
3074         roce_set_field(cq_context->byte_24_pgsz_addr,
3075                        V2_CQC_BYTE_24_CQE_NXT_BLK_ADDR_M,
3076                        V2_CQC_BYTE_24_CQE_NXT_BLK_ADDR_S,
3077                        upper_32_bits(to_hr_hw_page_addr(mtts[1])));
3078         roce_set_field(cq_context->byte_24_pgsz_addr,
3079                        V2_CQC_BYTE_24_CQE_BA_PG_SZ_M,
3080                        V2_CQC_BYTE_24_CQE_BA_PG_SZ_S,
3081                        to_hr_hw_page_shift(hr_cq->mtr.hem_cfg.ba_pg_shift));
3082         roce_set_field(cq_context->byte_24_pgsz_addr,
3083                        V2_CQC_BYTE_24_CQE_BUF_PG_SZ_M,
3084                        V2_CQC_BYTE_24_CQE_BUF_PG_SZ_S,
3085                        to_hr_hw_page_shift(hr_cq->mtr.hem_cfg.buf_pg_shift));
3086
3087         cq_context->cqe_ba = cpu_to_le32(dma_handle >> 3);
3088
3089         roce_set_field(cq_context->byte_40_cqe_ba, V2_CQC_BYTE_40_CQE_BA_M,
3090                        V2_CQC_BYTE_40_CQE_BA_S, (dma_handle >> (32 + 3)));
3091
3092         roce_set_bit(cq_context->byte_44_db_record,
3093                      V2_CQC_BYTE_44_DB_RECORD_EN_S,
3094                      (hr_cq->flags & HNS_ROCE_CQ_FLAG_RECORD_DB) ? 1 : 0);
3095
3096         roce_set_field(cq_context->byte_44_db_record,
3097                        V2_CQC_BYTE_44_DB_RECORD_ADDR_M,
3098                        V2_CQC_BYTE_44_DB_RECORD_ADDR_S,
3099                        ((u32)hr_cq->db.dma) >> 1);
3100         cq_context->db_record_addr = cpu_to_le32(hr_cq->db.dma >> 32);
3101
3102         roce_set_field(cq_context->byte_56_cqe_period_maxcnt,
3103                        V2_CQC_BYTE_56_CQ_MAX_CNT_M,
3104                        V2_CQC_BYTE_56_CQ_MAX_CNT_S,
3105                        HNS_ROCE_V2_CQ_DEFAULT_BURST_NUM);
3106         roce_set_field(cq_context->byte_56_cqe_period_maxcnt,
3107                        V2_CQC_BYTE_56_CQ_PERIOD_M,
3108                        V2_CQC_BYTE_56_CQ_PERIOD_S,
3109                        HNS_ROCE_V2_CQ_DEFAULT_INTERVAL);
3110 }
3111
3112 static int hns_roce_v2_req_notify_cq(struct ib_cq *ibcq,
3113                                      enum ib_cq_notify_flags flags)
3114 {
3115         struct hns_roce_dev *hr_dev = to_hr_dev(ibcq->device);
3116         struct hns_roce_cq *hr_cq = to_hr_cq(ibcq);
3117         u32 notification_flag;
3118         __le32 doorbell[2];
3119
3120         doorbell[0] = 0;
3121         doorbell[1] = 0;
3122
3123         notification_flag = (flags & IB_CQ_SOLICITED_MASK) == IB_CQ_SOLICITED ?
3124                              V2_CQ_DB_REQ_NOT : V2_CQ_DB_REQ_NOT_SOL;
3125         /*
3126          * flags = 0; Notification Flag = 1, next
3127          * flags = 1; Notification Flag = 0, solocited
3128          */
3129         roce_set_field(doorbell[0], V2_CQ_DB_BYTE_4_TAG_M, V2_DB_BYTE_4_TAG_S,
3130                        hr_cq->cqn);
3131         roce_set_field(doorbell[0], V2_CQ_DB_BYTE_4_CMD_M, V2_DB_BYTE_4_CMD_S,
3132                        HNS_ROCE_V2_CQ_DB_NTR);
3133         roce_set_field(doorbell[1], V2_CQ_DB_PARAMETER_CONS_IDX_M,
3134                        V2_CQ_DB_PARAMETER_CONS_IDX_S, hr_cq->cons_index);
3135         roce_set_field(doorbell[1], V2_CQ_DB_PARAMETER_CMD_SN_M,
3136                        V2_CQ_DB_PARAMETER_CMD_SN_S, hr_cq->arm_sn & 0x3);
3137         roce_set_bit(doorbell[1], V2_CQ_DB_PARAMETER_NOTIFY_S,
3138                      notification_flag);
3139
3140         hns_roce_write64(hr_dev, doorbell, hr_cq->cq_db_l);
3141
3142         return 0;
3143 }
3144
3145 static int hns_roce_handle_recv_inl_wqe(struct hns_roce_v2_cqe *cqe,
3146                                                     struct hns_roce_qp **cur_qp,
3147                                                     struct ib_wc *wc)
3148 {
3149         struct hns_roce_rinl_sge *sge_list;
3150         u32 wr_num, wr_cnt, sge_num;
3151         u32 sge_cnt, data_len, size;
3152         void *wqe_buf;
3153
3154         wr_num = roce_get_field(cqe->byte_4, V2_CQE_BYTE_4_WQE_INDX_M,
3155                                 V2_CQE_BYTE_4_WQE_INDX_S) & 0xffff;
3156         wr_cnt = wr_num & ((*cur_qp)->rq.wqe_cnt - 1);
3157
3158         sge_list = (*cur_qp)->rq_inl_buf.wqe_list[wr_cnt].sg_list;
3159         sge_num = (*cur_qp)->rq_inl_buf.wqe_list[wr_cnt].sge_cnt;
3160         wqe_buf = hns_roce_get_recv_wqe(*cur_qp, wr_cnt);
3161         data_len = wc->byte_len;
3162
3163         for (sge_cnt = 0; (sge_cnt < sge_num) && (data_len); sge_cnt++) {
3164                 size = min(sge_list[sge_cnt].len, data_len);
3165                 memcpy((void *)sge_list[sge_cnt].addr, wqe_buf, size);
3166
3167                 data_len -= size;
3168                 wqe_buf += size;
3169         }
3170
3171         if (unlikely(data_len)) {
3172                 wc->status = IB_WC_LOC_LEN_ERR;
3173                 return -EAGAIN;
3174         }
3175
3176         return 0;
3177 }
3178
3179 static int sw_comp(struct hns_roce_qp *hr_qp, struct hns_roce_wq *wq,
3180                    int num_entries, struct ib_wc *wc)
3181 {
3182         unsigned int left;
3183         int npolled = 0;
3184
3185         left = wq->head - wq->tail;
3186         if (left == 0)
3187                 return 0;
3188
3189         left = min_t(unsigned int, (unsigned int)num_entries, left);
3190         while (npolled < left) {
3191                 wc->wr_id = wq->wrid[wq->tail & (wq->wqe_cnt - 1)];
3192                 wc->status = IB_WC_WR_FLUSH_ERR;
3193                 wc->vendor_err = 0;
3194                 wc->qp = &hr_qp->ibqp;
3195
3196                 wq->tail++;
3197                 wc++;
3198                 npolled++;
3199         }
3200
3201         return npolled;
3202 }
3203
3204 static int hns_roce_v2_sw_poll_cq(struct hns_roce_cq *hr_cq, int num_entries,
3205                                   struct ib_wc *wc)
3206 {
3207         struct hns_roce_qp *hr_qp;
3208         int npolled = 0;
3209
3210         list_for_each_entry(hr_qp, &hr_cq->sq_list, sq_node) {
3211                 npolled += sw_comp(hr_qp, &hr_qp->sq,
3212                                    num_entries - npolled, wc + npolled);
3213                 if (npolled >= num_entries)
3214                         goto out;
3215         }
3216
3217         list_for_each_entry(hr_qp, &hr_cq->rq_list, rq_node) {
3218                 npolled += sw_comp(hr_qp, &hr_qp->rq,
3219                                    num_entries - npolled, wc + npolled);
3220                 if (npolled >= num_entries)
3221                         goto out;
3222         }
3223
3224 out:
3225         return npolled;
3226 }
3227
3228 static void get_cqe_status(struct hns_roce_dev *hr_dev, struct hns_roce_qp *qp,
3229                            struct hns_roce_cq *cq, struct hns_roce_v2_cqe *cqe,
3230                            struct ib_wc *wc)
3231 {
3232         static const struct {
3233                 u32 cqe_status;
3234                 enum ib_wc_status wc_status;
3235         } map[] = {
3236                 { HNS_ROCE_CQE_V2_SUCCESS, IB_WC_SUCCESS },
3237                 { HNS_ROCE_CQE_V2_LOCAL_LENGTH_ERR, IB_WC_LOC_LEN_ERR },
3238                 { HNS_ROCE_CQE_V2_LOCAL_QP_OP_ERR, IB_WC_LOC_QP_OP_ERR },
3239                 { HNS_ROCE_CQE_V2_LOCAL_PROT_ERR, IB_WC_LOC_PROT_ERR },
3240                 { HNS_ROCE_CQE_V2_WR_FLUSH_ERR, IB_WC_WR_FLUSH_ERR },
3241                 { HNS_ROCE_CQE_V2_MW_BIND_ERR, IB_WC_MW_BIND_ERR },
3242                 { HNS_ROCE_CQE_V2_BAD_RESP_ERR, IB_WC_BAD_RESP_ERR },
3243                 { HNS_ROCE_CQE_V2_LOCAL_ACCESS_ERR, IB_WC_LOC_ACCESS_ERR },
3244                 { HNS_ROCE_CQE_V2_REMOTE_INVAL_REQ_ERR, IB_WC_REM_INV_REQ_ERR },
3245                 { HNS_ROCE_CQE_V2_REMOTE_ACCESS_ERR, IB_WC_REM_ACCESS_ERR },
3246                 { HNS_ROCE_CQE_V2_REMOTE_OP_ERR, IB_WC_REM_OP_ERR },
3247                 { HNS_ROCE_CQE_V2_TRANSPORT_RETRY_EXC_ERR,
3248                   IB_WC_RETRY_EXC_ERR },
3249                 { HNS_ROCE_CQE_V2_RNR_RETRY_EXC_ERR, IB_WC_RNR_RETRY_EXC_ERR },
3250                 { HNS_ROCE_CQE_V2_REMOTE_ABORT_ERR, IB_WC_REM_ABORT_ERR },
3251                 { HNS_ROCE_CQE_V2_GENERAL_ERR, IB_WC_GENERAL_ERR}
3252         };
3253
3254         u32 cqe_status = roce_get_field(cqe->byte_4, V2_CQE_BYTE_4_STATUS_M,
3255                                         V2_CQE_BYTE_4_STATUS_S);
3256         int i;
3257
3258         wc->status = IB_WC_GENERAL_ERR;
3259         for (i = 0; i < ARRAY_SIZE(map); i++)
3260                 if (cqe_status == map[i].cqe_status) {
3261                         wc->status = map[i].wc_status;
3262                         break;
3263                 }
3264
3265         if (likely(wc->status == IB_WC_SUCCESS ||
3266                    wc->status == IB_WC_WR_FLUSH_ERR))
3267                 return;
3268
3269         ibdev_err(&hr_dev->ib_dev, "error cqe status 0x%x:\n", cqe_status);
3270         print_hex_dump(KERN_ERR, "", DUMP_PREFIX_NONE, 16, 4, cqe,
3271                        cq->cqe_size, false);
3272
3273         /*
3274          * For hns ROCEE, GENERAL_ERR is an error type that is not defined in
3275          * the standard protocol, the driver must ignore it and needn't to set
3276          * the QP to an error state.
3277          */
3278         if (cqe_status == HNS_ROCE_CQE_V2_GENERAL_ERR)
3279                 return;
3280
3281         /*
3282          * Hip08 hardware cannot flush the WQEs in SQ/RQ if the QP state gets
3283          * into errored mode. Hence, as a workaround to this hardware
3284          * limitation, driver needs to assist in flushing. But the flushing
3285          * operation uses mailbox to convey the QP state to the hardware and
3286          * which can sleep due to the mutex protection around the mailbox calls.
3287          * Hence, use the deferred flush for now. Once wc error detected, the
3288          * flushing operation is needed.
3289          */
3290         if (!test_and_set_bit(HNS_ROCE_FLUSH_FLAG, &qp->flush_flag))
3291                 init_flush_work(hr_dev, qp);
3292 }
3293
3294 static int hns_roce_v2_poll_one(struct hns_roce_cq *hr_cq,
3295                                 struct hns_roce_qp **cur_qp, struct ib_wc *wc)
3296 {
3297         struct hns_roce_dev *hr_dev = to_hr_dev(hr_cq->ib_cq.device);
3298         struct hns_roce_srq *srq = NULL;
3299         struct hns_roce_v2_cqe *cqe;
3300         struct hns_roce_qp *hr_qp;
3301         struct hns_roce_wq *wq;
3302         int is_send;
3303         u16 wqe_ctr;
3304         u32 opcode;
3305         int qpn;
3306         int ret;
3307
3308         /* Find cqe according to consumer index */
3309         cqe = get_sw_cqe_v2(hr_cq, hr_cq->cons_index);
3310         if (!cqe)
3311                 return -EAGAIN;
3312
3313         ++hr_cq->cons_index;
3314         /* Memory barrier */
3315         rmb();
3316
3317         /* 0->SQ, 1->RQ */
3318         is_send = !roce_get_bit(cqe->byte_4, V2_CQE_BYTE_4_S_R_S);
3319
3320         qpn = roce_get_field(cqe->byte_16, V2_CQE_BYTE_16_LCL_QPN_M,
3321                                 V2_CQE_BYTE_16_LCL_QPN_S);
3322
3323         if (!*cur_qp || (qpn & HNS_ROCE_V2_CQE_QPN_MASK) != (*cur_qp)->qpn) {
3324                 hr_qp = __hns_roce_qp_lookup(hr_dev, qpn);
3325                 if (unlikely(!hr_qp)) {
3326                         ibdev_err(&hr_dev->ib_dev,
3327                                   "CQ %06lx with entry for unknown QPN %06x\n",
3328                                   hr_cq->cqn, qpn & HNS_ROCE_V2_CQE_QPN_MASK);
3329                         return -EINVAL;
3330                 }
3331                 *cur_qp = hr_qp;
3332         }
3333
3334         wc->qp = &(*cur_qp)->ibqp;
3335         wc->vendor_err = 0;
3336
3337         if (is_send) {
3338                 wq = &(*cur_qp)->sq;
3339                 if ((*cur_qp)->sq_signal_bits) {
3340                         /*
3341                          * If sg_signal_bit is 1,
3342                          * firstly tail pointer updated to wqe
3343                          * which current cqe correspond to
3344                          */
3345                         wqe_ctr = (u16)roce_get_field(cqe->byte_4,
3346                                                       V2_CQE_BYTE_4_WQE_INDX_M,
3347                                                       V2_CQE_BYTE_4_WQE_INDX_S);
3348                         wq->tail += (wqe_ctr - (u16)wq->tail) &
3349                                     (wq->wqe_cnt - 1);
3350                 }
3351
3352                 wc->wr_id = wq->wrid[wq->tail & (wq->wqe_cnt - 1)];
3353                 ++wq->tail;
3354         } else if ((*cur_qp)->ibqp.srq) {
3355                 srq = to_hr_srq((*cur_qp)->ibqp.srq);
3356                 wqe_ctr = (u16)roce_get_field(cqe->byte_4,
3357                                               V2_CQE_BYTE_4_WQE_INDX_M,
3358                                               V2_CQE_BYTE_4_WQE_INDX_S);
3359                 wc->wr_id = srq->wrid[wqe_ctr];
3360                 hns_roce_free_srq_wqe(srq, wqe_ctr);
3361         } else {
3362                 /* Update tail pointer, record wr_id */
3363                 wq = &(*cur_qp)->rq;
3364                 wc->wr_id = wq->wrid[wq->tail & (wq->wqe_cnt - 1)];
3365                 ++wq->tail;
3366         }
3367
3368         get_cqe_status(hr_dev, *cur_qp, hr_cq, cqe, wc);
3369         if (unlikely(wc->status != IB_WC_SUCCESS))
3370                 return 0;
3371
3372         if (is_send) {
3373                 wc->wc_flags = 0;
3374                 /* SQ corresponding to CQE */
3375                 switch (roce_get_field(cqe->byte_4, V2_CQE_BYTE_4_OPCODE_M,
3376                                        V2_CQE_BYTE_4_OPCODE_S) & 0x1f) {
3377                 case HNS_ROCE_V2_WQE_OP_SEND:
3378                         wc->opcode = IB_WC_SEND;
3379                         break;
3380                 case HNS_ROCE_V2_WQE_OP_SEND_WITH_INV:
3381                         wc->opcode = IB_WC_SEND;
3382                         break;
3383                 case HNS_ROCE_V2_WQE_OP_SEND_WITH_IMM:
3384                         wc->opcode = IB_WC_SEND;
3385                         wc->wc_flags |= IB_WC_WITH_IMM;
3386                         break;
3387                 case HNS_ROCE_V2_WQE_OP_RDMA_READ:
3388                         wc->opcode = IB_WC_RDMA_READ;
3389                         wc->byte_len = le32_to_cpu(cqe->byte_cnt);
3390                         break;
3391                 case HNS_ROCE_V2_WQE_OP_RDMA_WRITE:
3392                         wc->opcode = IB_WC_RDMA_WRITE;
3393                         break;
3394                 case HNS_ROCE_V2_WQE_OP_RDMA_WRITE_WITH_IMM:
3395                         wc->opcode = IB_WC_RDMA_WRITE;
3396                         wc->wc_flags |= IB_WC_WITH_IMM;
3397                         break;
3398                 case HNS_ROCE_V2_WQE_OP_LOCAL_INV:
3399                         wc->opcode = IB_WC_LOCAL_INV;
3400                         wc->wc_flags |= IB_WC_WITH_INVALIDATE;
3401                         break;
3402                 case HNS_ROCE_V2_WQE_OP_ATOM_CMP_AND_SWAP:
3403                         wc->opcode = IB_WC_COMP_SWAP;
3404                         wc->byte_len  = 8;
3405                         break;
3406                 case HNS_ROCE_V2_WQE_OP_ATOM_FETCH_AND_ADD:
3407                         wc->opcode = IB_WC_FETCH_ADD;
3408                         wc->byte_len  = 8;
3409                         break;
3410                 case HNS_ROCE_V2_WQE_OP_ATOM_MSK_CMP_AND_SWAP:
3411                         wc->opcode = IB_WC_MASKED_COMP_SWAP;
3412                         wc->byte_len  = 8;
3413                         break;
3414                 case HNS_ROCE_V2_WQE_OP_ATOM_MSK_FETCH_AND_ADD:
3415                         wc->opcode = IB_WC_MASKED_FETCH_ADD;
3416                         wc->byte_len  = 8;
3417                         break;
3418                 case HNS_ROCE_V2_WQE_OP_FAST_REG_PMR:
3419                         wc->opcode = IB_WC_REG_MR;
3420                         break;
3421                 case HNS_ROCE_V2_WQE_OP_BIND_MW:
3422                         wc->opcode = IB_WC_REG_MR;
3423                         break;
3424                 default:
3425                         wc->status = IB_WC_GENERAL_ERR;
3426                         break;
3427                 }
3428         } else {
3429                 /* RQ correspond to CQE */
3430                 wc->byte_len = le32_to_cpu(cqe->byte_cnt);
3431
3432                 opcode = roce_get_field(cqe->byte_4, V2_CQE_BYTE_4_OPCODE_M,
3433                                         V2_CQE_BYTE_4_OPCODE_S);
3434                 switch (opcode & 0x1f) {
3435                 case HNS_ROCE_V2_OPCODE_RDMA_WRITE_IMM:
3436                         wc->opcode = IB_WC_RECV_RDMA_WITH_IMM;
3437                         wc->wc_flags = IB_WC_WITH_IMM;
3438                         wc->ex.imm_data =
3439                                 cpu_to_be32(le32_to_cpu(cqe->immtdata));
3440                         break;
3441                 case HNS_ROCE_V2_OPCODE_SEND:
3442                         wc->opcode = IB_WC_RECV;
3443                         wc->wc_flags = 0;
3444                         break;
3445                 case HNS_ROCE_V2_OPCODE_SEND_WITH_IMM:
3446                         wc->opcode = IB_WC_RECV;
3447                         wc->wc_flags = IB_WC_WITH_IMM;
3448                         wc->ex.imm_data =
3449                                 cpu_to_be32(le32_to_cpu(cqe->immtdata));
3450                         break;
3451                 case HNS_ROCE_V2_OPCODE_SEND_WITH_INV:
3452                         wc->opcode = IB_WC_RECV;
3453                         wc->wc_flags = IB_WC_WITH_INVALIDATE;
3454                         wc->ex.invalidate_rkey = le32_to_cpu(cqe->rkey);
3455                         break;
3456                 default:
3457                         wc->status = IB_WC_GENERAL_ERR;
3458                         break;
3459                 }
3460
3461                 if ((wc->qp->qp_type == IB_QPT_RC ||
3462                      wc->qp->qp_type == IB_QPT_UC) &&
3463                     (opcode == HNS_ROCE_V2_OPCODE_SEND ||
3464                     opcode == HNS_ROCE_V2_OPCODE_SEND_WITH_IMM ||
3465                     opcode == HNS_ROCE_V2_OPCODE_SEND_WITH_INV) &&
3466                     (roce_get_bit(cqe->byte_4, V2_CQE_BYTE_4_RQ_INLINE_S))) {
3467                         ret = hns_roce_handle_recv_inl_wqe(cqe, cur_qp, wc);
3468                         if (unlikely(ret))
3469                                 return -EAGAIN;
3470                 }
3471
3472                 wc->sl = (u8)roce_get_field(cqe->byte_32, V2_CQE_BYTE_32_SL_M,
3473                                             V2_CQE_BYTE_32_SL_S);
3474                 wc->src_qp = (u8)roce_get_field(cqe->byte_32,
3475                                                 V2_CQE_BYTE_32_RMT_QPN_M,
3476                                                 V2_CQE_BYTE_32_RMT_QPN_S);
3477                 wc->slid = 0;
3478                 wc->wc_flags |= (roce_get_bit(cqe->byte_32,
3479                                               V2_CQE_BYTE_32_GRH_S) ?
3480                                               IB_WC_GRH : 0);
3481                 wc->port_num = roce_get_field(cqe->byte_32,
3482                                 V2_CQE_BYTE_32_PORTN_M, V2_CQE_BYTE_32_PORTN_S);
3483                 wc->pkey_index = 0;
3484
3485                 if (roce_get_bit(cqe->byte_28, V2_CQE_BYTE_28_VID_VLD_S)) {
3486                         wc->vlan_id = (u16)roce_get_field(cqe->byte_28,
3487                                                           V2_CQE_BYTE_28_VID_M,
3488                                                           V2_CQE_BYTE_28_VID_S);
3489                         wc->wc_flags |= IB_WC_WITH_VLAN;
3490                 } else {
3491                         wc->vlan_id = 0xffff;
3492                 }
3493
3494                 wc->network_hdr_type = roce_get_field(cqe->byte_28,
3495                                                     V2_CQE_BYTE_28_PORT_TYPE_M,
3496                                                     V2_CQE_BYTE_28_PORT_TYPE_S);
3497         }
3498
3499         return 0;
3500 }
3501
3502 static int hns_roce_v2_poll_cq(struct ib_cq *ibcq, int num_entries,
3503                                struct ib_wc *wc)
3504 {
3505         struct hns_roce_dev *hr_dev = to_hr_dev(ibcq->device);
3506         struct hns_roce_cq *hr_cq = to_hr_cq(ibcq);
3507         struct hns_roce_qp *cur_qp = NULL;
3508         unsigned long flags;
3509         int npolled;
3510
3511         spin_lock_irqsave(&hr_cq->lock, flags);
3512
3513         /*
3514          * When the device starts to reset, the state is RST_DOWN. At this time,
3515          * there may still be some valid CQEs in the hardware that are not
3516          * polled. Therefore, it is not allowed to switch to the software mode
3517          * immediately. When the state changes to UNINIT, CQE no longer exists
3518          * in the hardware, and then switch to software mode.
3519          */
3520         if (hr_dev->state == HNS_ROCE_DEVICE_STATE_UNINIT) {
3521                 npolled = hns_roce_v2_sw_poll_cq(hr_cq, num_entries, wc);
3522                 goto out;
3523         }
3524
3525         for (npolled = 0; npolled < num_entries; ++npolled) {
3526                 if (hns_roce_v2_poll_one(hr_cq, &cur_qp, wc + npolled))
3527                         break;
3528         }
3529
3530         if (npolled) {
3531                 /* Memory barrier */
3532                 wmb();
3533                 hns_roce_v2_cq_set_ci(hr_cq, hr_cq->cons_index);
3534         }
3535
3536 out:
3537         spin_unlock_irqrestore(&hr_cq->lock, flags);
3538
3539         return npolled;
3540 }
3541
3542 static int get_op_for_set_hem(struct hns_roce_dev *hr_dev, u32 type,
3543                               int step_idx)
3544 {
3545         int op;
3546
3547         if (type == HEM_TYPE_SCCC && step_idx)
3548                 return -EINVAL;
3549
3550         switch (type) {
3551         case HEM_TYPE_QPC:
3552                 op = HNS_ROCE_CMD_WRITE_QPC_BT0;
3553                 break;
3554         case HEM_TYPE_MTPT:
3555                 op = HNS_ROCE_CMD_WRITE_MPT_BT0;
3556                 break;
3557         case HEM_TYPE_CQC:
3558                 op = HNS_ROCE_CMD_WRITE_CQC_BT0;
3559                 break;
3560         case HEM_TYPE_SRQC:
3561                 op = HNS_ROCE_CMD_WRITE_SRQC_BT0;
3562                 break;
3563         case HEM_TYPE_SCCC:
3564                 op = HNS_ROCE_CMD_WRITE_SCCC_BT0;
3565                 break;
3566         case HEM_TYPE_QPC_TIMER:
3567                 op = HNS_ROCE_CMD_WRITE_QPC_TIMER_BT0;
3568                 break;
3569         case HEM_TYPE_CQC_TIMER:
3570                 op = HNS_ROCE_CMD_WRITE_CQC_TIMER_BT0;
3571                 break;
3572         default:
3573                 dev_warn(hr_dev->dev,
3574                          "Table %d not to be written by mailbox!\n", type);
3575                 return -EINVAL;
3576         }
3577
3578         return op + step_idx;
3579 }
3580
3581 static int set_hem_to_hw(struct hns_roce_dev *hr_dev, int obj, u64 bt_ba,
3582                          u32 hem_type, int step_idx)
3583 {
3584         struct hns_roce_cmd_mailbox *mailbox;
3585         int ret;
3586         int op;
3587
3588         op = get_op_for_set_hem(hr_dev, hem_type, step_idx);
3589         if (op < 0)
3590                 return 0;
3591
3592         mailbox = hns_roce_alloc_cmd_mailbox(hr_dev);
3593         if (IS_ERR(mailbox))
3594                 return PTR_ERR(mailbox);
3595
3596         ret = hns_roce_cmd_mbox(hr_dev, bt_ba, mailbox->dma, obj,
3597                                 0, op, HNS_ROCE_CMD_TIMEOUT_MSECS);
3598
3599         hns_roce_free_cmd_mailbox(hr_dev, mailbox);
3600
3601         return ret;
3602 }
3603
3604 static int hns_roce_v2_set_hem(struct hns_roce_dev *hr_dev,
3605                                struct hns_roce_hem_table *table, int obj,
3606                                int step_idx)
3607 {
3608         struct hns_roce_hem_iter iter;
3609         struct hns_roce_hem_mhop mhop;
3610         struct hns_roce_hem *hem;
3611         unsigned long mhop_obj = obj;
3612         int i, j, k;
3613         int ret = 0;
3614         u64 hem_idx = 0;
3615         u64 l1_idx = 0;
3616         u64 bt_ba = 0;
3617         u32 chunk_ba_num;
3618         u32 hop_num;
3619
3620         if (!hns_roce_check_whether_mhop(hr_dev, table->type))
3621                 return 0;
3622
3623         hns_roce_calc_hem_mhop(hr_dev, table, &mhop_obj, &mhop);
3624         i = mhop.l0_idx;
3625         j = mhop.l1_idx;
3626         k = mhop.l2_idx;
3627         hop_num = mhop.hop_num;
3628         chunk_ba_num = mhop.bt_chunk_size / 8;
3629
3630         if (hop_num == 2) {
3631                 hem_idx = i * chunk_ba_num * chunk_ba_num + j * chunk_ba_num +
3632                           k;
3633                 l1_idx = i * chunk_ba_num + j;
3634         } else if (hop_num == 1) {
3635                 hem_idx = i * chunk_ba_num + j;
3636         } else if (hop_num == HNS_ROCE_HOP_NUM_0) {
3637                 hem_idx = i;
3638         }
3639
3640         if (table->type == HEM_TYPE_SCCC)
3641                 obj = mhop.l0_idx;
3642
3643         if (check_whether_last_step(hop_num, step_idx)) {
3644                 hem = table->hem[hem_idx];
3645                 for (hns_roce_hem_first(hem, &iter);
3646                      !hns_roce_hem_last(&iter); hns_roce_hem_next(&iter)) {
3647                         bt_ba = hns_roce_hem_addr(&iter);
3648                         ret = set_hem_to_hw(hr_dev, obj, bt_ba, table->type,
3649                                             step_idx);
3650                 }
3651         } else {
3652                 if (step_idx == 0)
3653                         bt_ba = table->bt_l0_dma_addr[i];
3654                 else if (step_idx == 1 && hop_num == 2)
3655                         bt_ba = table->bt_l1_dma_addr[l1_idx];
3656
3657                 ret = set_hem_to_hw(hr_dev, obj, bt_ba, table->type, step_idx);
3658         }
3659
3660         return ret;
3661 }
3662
3663 static int hns_roce_v2_clear_hem(struct hns_roce_dev *hr_dev,
3664                                  struct hns_roce_hem_table *table, int obj,
3665                                  int step_idx)
3666 {
3667         struct device *dev = hr_dev->dev;
3668         struct hns_roce_cmd_mailbox *mailbox;
3669         int ret;
3670         u16 op = 0xff;
3671
3672         if (!hns_roce_check_whether_mhop(hr_dev, table->type))
3673                 return 0;
3674
3675         switch (table->type) {
3676         case HEM_TYPE_QPC:
3677                 op = HNS_ROCE_CMD_DESTROY_QPC_BT0;
3678                 break;
3679         case HEM_TYPE_MTPT:
3680                 op = HNS_ROCE_CMD_DESTROY_MPT_BT0;
3681                 break;
3682         case HEM_TYPE_CQC:
3683                 op = HNS_ROCE_CMD_DESTROY_CQC_BT0;
3684                 break;
3685         case HEM_TYPE_SCCC:
3686         case HEM_TYPE_QPC_TIMER:
3687         case HEM_TYPE_CQC_TIMER:
3688                 break;
3689         case HEM_TYPE_SRQC:
3690                 op = HNS_ROCE_CMD_DESTROY_SRQC_BT0;
3691                 break;
3692         default:
3693                 dev_warn(dev, "Table %d not to be destroyed by mailbox!\n",
3694                          table->type);
3695                 return 0;
3696         }
3697
3698         if (table->type == HEM_TYPE_SCCC ||
3699             table->type == HEM_TYPE_QPC_TIMER ||
3700             table->type == HEM_TYPE_CQC_TIMER)
3701                 return 0;
3702
3703         op += step_idx;
3704
3705         mailbox = hns_roce_alloc_cmd_mailbox(hr_dev);
3706         if (IS_ERR(mailbox))
3707                 return PTR_ERR(mailbox);
3708
3709         /* configure the tag and op */
3710         ret = hns_roce_cmd_mbox(hr_dev, 0, mailbox->dma, obj, 0, op,
3711                                 HNS_ROCE_CMD_TIMEOUT_MSECS);
3712
3713         hns_roce_free_cmd_mailbox(hr_dev, mailbox);
3714         return ret;
3715 }
3716
3717 static int hns_roce_v2_qp_modify(struct hns_roce_dev *hr_dev,
3718                                  struct hns_roce_v2_qp_context *context,
3719                                  struct hns_roce_v2_qp_context *qpc_mask,
3720                                  struct hns_roce_qp *hr_qp)
3721 {
3722         struct hns_roce_cmd_mailbox *mailbox;
3723         int qpc_size;
3724         int ret;
3725
3726         mailbox = hns_roce_alloc_cmd_mailbox(hr_dev);
3727         if (IS_ERR(mailbox))
3728                 return PTR_ERR(mailbox);
3729
3730         /* The qpc size of HIP08 is only 256B, which is half of HIP09 */
3731         qpc_size = hr_dev->caps.qpc_sz;
3732         memcpy(mailbox->buf, context, qpc_size);
3733         memcpy(mailbox->buf + qpc_size, qpc_mask, qpc_size);
3734
3735         ret = hns_roce_cmd_mbox(hr_dev, mailbox->dma, 0, hr_qp->qpn, 0,
3736                                 HNS_ROCE_CMD_MODIFY_QPC,
3737                                 HNS_ROCE_CMD_TIMEOUT_MSECS);
3738
3739         hns_roce_free_cmd_mailbox(hr_dev, mailbox);
3740
3741         return ret;
3742 }
3743
3744 static void set_access_flags(struct hns_roce_qp *hr_qp,
3745                              struct hns_roce_v2_qp_context *context,
3746                              struct hns_roce_v2_qp_context *qpc_mask,
3747                              const struct ib_qp_attr *attr, int attr_mask)
3748 {
3749         u8 dest_rd_atomic;
3750         u32 access_flags;
3751
3752         dest_rd_atomic = (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC) ?
3753                          attr->max_dest_rd_atomic : hr_qp->resp_depth;
3754
3755         access_flags = (attr_mask & IB_QP_ACCESS_FLAGS) ?
3756                        attr->qp_access_flags : hr_qp->atomic_rd_en;
3757
3758         if (!dest_rd_atomic)
3759                 access_flags &= IB_ACCESS_REMOTE_WRITE;
3760
3761         roce_set_bit(context->byte_76_srqn_op_en, V2_QPC_BYTE_76_RRE_S,
3762                      !!(access_flags & IB_ACCESS_REMOTE_READ));
3763         roce_set_bit(qpc_mask->byte_76_srqn_op_en, V2_QPC_BYTE_76_RRE_S, 0);
3764
3765         roce_set_bit(context->byte_76_srqn_op_en, V2_QPC_BYTE_76_RWE_S,
3766                      !!(access_flags & IB_ACCESS_REMOTE_WRITE));
3767         roce_set_bit(qpc_mask->byte_76_srqn_op_en, V2_QPC_BYTE_76_RWE_S, 0);
3768
3769         roce_set_bit(context->byte_76_srqn_op_en, V2_QPC_BYTE_76_ATE_S,
3770                      !!(access_flags & IB_ACCESS_REMOTE_ATOMIC));
3771         roce_set_bit(qpc_mask->byte_76_srqn_op_en, V2_QPC_BYTE_76_ATE_S, 0);
3772         roce_set_bit(context->byte_76_srqn_op_en, V2_QPC_BYTE_76_EXT_ATE_S,
3773                      !!(access_flags & IB_ACCESS_REMOTE_ATOMIC));
3774         roce_set_bit(qpc_mask->byte_76_srqn_op_en, V2_QPC_BYTE_76_EXT_ATE_S, 0);
3775 }
3776
3777 static void set_qpc_wqe_cnt(struct hns_roce_qp *hr_qp,
3778                             struct hns_roce_v2_qp_context *context,
3779                             struct hns_roce_v2_qp_context *qpc_mask)
3780 {
3781         roce_set_field(context->byte_4_sqpn_tst,
3782                        V2_QPC_BYTE_4_SGE_SHIFT_M, V2_QPC_BYTE_4_SGE_SHIFT_S,
3783                        to_hr_hem_entries_shift(hr_qp->sge.sge_cnt,
3784                                                hr_qp->sge.sge_shift));
3785
3786         roce_set_field(context->byte_20_smac_sgid_idx,
3787                        V2_QPC_BYTE_20_SQ_SHIFT_M, V2_QPC_BYTE_20_SQ_SHIFT_S,
3788                        ilog2(hr_qp->sq.wqe_cnt));
3789
3790         roce_set_field(context->byte_20_smac_sgid_idx,
3791                        V2_QPC_BYTE_20_RQ_SHIFT_M, V2_QPC_BYTE_20_RQ_SHIFT_S,
3792                        ilog2(hr_qp->rq.wqe_cnt));
3793 }
3794
3795 static void modify_qp_reset_to_init(struct ib_qp *ibqp,
3796                                     const struct ib_qp_attr *attr,
3797                                     int attr_mask,
3798                                     struct hns_roce_v2_qp_context *context,
3799                                     struct hns_roce_v2_qp_context *qpc_mask)
3800 {
3801         struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
3802         struct hns_roce_qp *hr_qp = to_hr_qp(ibqp);
3803
3804         /*
3805          * In v2 engine, software pass context and context mask to hardware
3806          * when modifying qp. If software need modify some fields in context,
3807          * we should set all bits of the relevant fields in context mask to
3808          * 0 at the same time, else set them to 0x1.
3809          */
3810         roce_set_field(context->byte_4_sqpn_tst, V2_QPC_BYTE_4_TST_M,
3811                        V2_QPC_BYTE_4_TST_S, to_hr_qp_type(hr_qp->ibqp.qp_type));
3812
3813         roce_set_field(context->byte_4_sqpn_tst, V2_QPC_BYTE_4_SQPN_M,
3814                        V2_QPC_BYTE_4_SQPN_S, hr_qp->qpn);
3815
3816         roce_set_field(context->byte_16_buf_ba_pg_sz, V2_QPC_BYTE_16_PD_M,
3817                        V2_QPC_BYTE_16_PD_S, to_hr_pd(ibqp->pd)->pdn);
3818
3819         roce_set_field(context->byte_20_smac_sgid_idx, V2_QPC_BYTE_20_RQWS_M,
3820                        V2_QPC_BYTE_20_RQWS_S, ilog2(hr_qp->rq.max_gs));
3821
3822         set_qpc_wqe_cnt(hr_qp, context, qpc_mask);
3823
3824         /* No VLAN need to set 0xFFF */
3825         roce_set_field(context->byte_24_mtu_tc, V2_QPC_BYTE_24_VLAN_ID_M,
3826                        V2_QPC_BYTE_24_VLAN_ID_S, 0xfff);
3827
3828         if (hr_qp->en_flags & HNS_ROCE_QP_CAP_RQ_RECORD_DB)
3829                 roce_set_bit(context->byte_68_rq_db,
3830                              V2_QPC_BYTE_68_RQ_RECORD_EN_S, 1);
3831
3832         roce_set_field(context->byte_68_rq_db,
3833                        V2_QPC_BYTE_68_RQ_DB_RECORD_ADDR_M,
3834                        V2_QPC_BYTE_68_RQ_DB_RECORD_ADDR_S,
3835                        ((u32)hr_qp->rdb.dma) >> 1);
3836         context->rq_db_record_addr = cpu_to_le32(hr_qp->rdb.dma >> 32);
3837
3838         roce_set_bit(context->byte_76_srqn_op_en, V2_QPC_BYTE_76_RQIE_S,
3839                     (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RQ_INLINE) ? 1 : 0);
3840
3841         roce_set_field(context->byte_80_rnr_rx_cqn, V2_QPC_BYTE_80_RX_CQN_M,
3842                        V2_QPC_BYTE_80_RX_CQN_S, to_hr_cq(ibqp->recv_cq)->cqn);
3843         if (ibqp->srq) {
3844                 roce_set_field(context->byte_76_srqn_op_en,
3845                                V2_QPC_BYTE_76_SRQN_M, V2_QPC_BYTE_76_SRQN_S,
3846                                to_hr_srq(ibqp->srq)->srqn);
3847                 roce_set_bit(context->byte_76_srqn_op_en,
3848                              V2_QPC_BYTE_76_SRQ_EN_S, 1);
3849         }
3850
3851         roce_set_bit(context->byte_172_sq_psn, V2_QPC_BYTE_172_FRE_S, 1);
3852
3853         hr_qp->access_flags = attr->qp_access_flags;
3854         roce_set_field(context->byte_252_err_txcqn, V2_QPC_BYTE_252_TX_CQN_M,
3855                        V2_QPC_BYTE_252_TX_CQN_S, to_hr_cq(ibqp->send_cq)->cqn);
3856 }
3857
3858 static void modify_qp_init_to_init(struct ib_qp *ibqp,
3859                                    const struct ib_qp_attr *attr, int attr_mask,
3860                                    struct hns_roce_v2_qp_context *context,
3861                                    struct hns_roce_v2_qp_context *qpc_mask)
3862 {
3863         struct hns_roce_qp *hr_qp = to_hr_qp(ibqp);
3864
3865         /*
3866          * In v2 engine, software pass context and context mask to hardware
3867          * when modifying qp. If software need modify some fields in context,
3868          * we should set all bits of the relevant fields in context mask to
3869          * 0 at the same time, else set them to 0x1.
3870          */
3871         roce_set_field(context->byte_4_sqpn_tst, V2_QPC_BYTE_4_TST_M,
3872                        V2_QPC_BYTE_4_TST_S, to_hr_qp_type(hr_qp->ibqp.qp_type));
3873         roce_set_field(qpc_mask->byte_4_sqpn_tst, V2_QPC_BYTE_4_TST_M,
3874                        V2_QPC_BYTE_4_TST_S, 0);
3875
3876         if (attr_mask & IB_QP_ACCESS_FLAGS) {
3877                 roce_set_bit(context->byte_76_srqn_op_en, V2_QPC_BYTE_76_RRE_S,
3878                              !!(attr->qp_access_flags & IB_ACCESS_REMOTE_READ));
3879                 roce_set_bit(qpc_mask->byte_76_srqn_op_en, V2_QPC_BYTE_76_RRE_S,
3880                              0);
3881
3882                 roce_set_bit(context->byte_76_srqn_op_en, V2_QPC_BYTE_76_RWE_S,
3883                              !!(attr->qp_access_flags &
3884                              IB_ACCESS_REMOTE_WRITE));
3885                 roce_set_bit(qpc_mask->byte_76_srqn_op_en, V2_QPC_BYTE_76_RWE_S,
3886                              0);
3887
3888                 roce_set_bit(context->byte_76_srqn_op_en, V2_QPC_BYTE_76_ATE_S,
3889                              !!(attr->qp_access_flags &
3890                              IB_ACCESS_REMOTE_ATOMIC));
3891                 roce_set_bit(qpc_mask->byte_76_srqn_op_en, V2_QPC_BYTE_76_ATE_S,
3892                              0);
3893                 roce_set_bit(context->byte_76_srqn_op_en,
3894                              V2_QPC_BYTE_76_EXT_ATE_S,
3895                              !!(attr->qp_access_flags &
3896                                 IB_ACCESS_REMOTE_ATOMIC));
3897                 roce_set_bit(qpc_mask->byte_76_srqn_op_en,
3898                              V2_QPC_BYTE_76_EXT_ATE_S, 0);
3899         } else {
3900                 roce_set_bit(context->byte_76_srqn_op_en, V2_QPC_BYTE_76_RRE_S,
3901                              !!(hr_qp->access_flags & IB_ACCESS_REMOTE_READ));
3902                 roce_set_bit(qpc_mask->byte_76_srqn_op_en, V2_QPC_BYTE_76_RRE_S,
3903                              0);
3904
3905                 roce_set_bit(context->byte_76_srqn_op_en, V2_QPC_BYTE_76_RWE_S,
3906                              !!(hr_qp->access_flags & IB_ACCESS_REMOTE_WRITE));
3907                 roce_set_bit(qpc_mask->byte_76_srqn_op_en, V2_QPC_BYTE_76_RWE_S,
3908                              0);
3909
3910                 roce_set_bit(context->byte_76_srqn_op_en, V2_QPC_BYTE_76_ATE_S,
3911                              !!(hr_qp->access_flags & IB_ACCESS_REMOTE_ATOMIC));
3912                 roce_set_bit(qpc_mask->byte_76_srqn_op_en, V2_QPC_BYTE_76_ATE_S,
3913                              0);
3914                 roce_set_bit(context->byte_76_srqn_op_en,
3915                              V2_QPC_BYTE_76_EXT_ATE_S,
3916                              !!(hr_qp->access_flags & IB_ACCESS_REMOTE_ATOMIC));
3917                 roce_set_bit(qpc_mask->byte_76_srqn_op_en,
3918                              V2_QPC_BYTE_76_EXT_ATE_S, 0);
3919         }
3920
3921         roce_set_field(context->byte_16_buf_ba_pg_sz, V2_QPC_BYTE_16_PD_M,
3922                        V2_QPC_BYTE_16_PD_S, to_hr_pd(ibqp->pd)->pdn);
3923         roce_set_field(qpc_mask->byte_16_buf_ba_pg_sz, V2_QPC_BYTE_16_PD_M,
3924                        V2_QPC_BYTE_16_PD_S, 0);
3925
3926         roce_set_field(context->byte_80_rnr_rx_cqn, V2_QPC_BYTE_80_RX_CQN_M,
3927                        V2_QPC_BYTE_80_RX_CQN_S, to_hr_cq(ibqp->recv_cq)->cqn);
3928         roce_set_field(qpc_mask->byte_80_rnr_rx_cqn, V2_QPC_BYTE_80_RX_CQN_M,
3929                        V2_QPC_BYTE_80_RX_CQN_S, 0);
3930
3931         roce_set_field(context->byte_252_err_txcqn, V2_QPC_BYTE_252_TX_CQN_M,
3932                        V2_QPC_BYTE_252_TX_CQN_S, to_hr_cq(ibqp->send_cq)->cqn);
3933         roce_set_field(qpc_mask->byte_252_err_txcqn, V2_QPC_BYTE_252_TX_CQN_M,
3934                        V2_QPC_BYTE_252_TX_CQN_S, 0);
3935
3936         if (ibqp->srq) {
3937                 roce_set_bit(context->byte_76_srqn_op_en,
3938                              V2_QPC_BYTE_76_SRQ_EN_S, 1);
3939                 roce_set_bit(qpc_mask->byte_76_srqn_op_en,
3940                              V2_QPC_BYTE_76_SRQ_EN_S, 0);
3941                 roce_set_field(context->byte_76_srqn_op_en,
3942                                V2_QPC_BYTE_76_SRQN_M, V2_QPC_BYTE_76_SRQN_S,
3943                                to_hr_srq(ibqp->srq)->srqn);
3944                 roce_set_field(qpc_mask->byte_76_srqn_op_en,
3945                                V2_QPC_BYTE_76_SRQN_M, V2_QPC_BYTE_76_SRQN_S, 0);
3946         }
3947
3948         roce_set_field(context->byte_4_sqpn_tst, V2_QPC_BYTE_4_SQPN_M,
3949                        V2_QPC_BYTE_4_SQPN_S, hr_qp->qpn);
3950         roce_set_field(qpc_mask->byte_4_sqpn_tst, V2_QPC_BYTE_4_SQPN_M,
3951                        V2_QPC_BYTE_4_SQPN_S, 0);
3952
3953         if (attr_mask & IB_QP_DEST_QPN) {
3954                 roce_set_field(context->byte_56_dqpn_err, V2_QPC_BYTE_56_DQPN_M,
3955                                V2_QPC_BYTE_56_DQPN_S, hr_qp->qpn);
3956                 roce_set_field(qpc_mask->byte_56_dqpn_err,
3957                                V2_QPC_BYTE_56_DQPN_M, V2_QPC_BYTE_56_DQPN_S, 0);
3958         }
3959 }
3960
3961 static int config_qp_rq_buf(struct hns_roce_dev *hr_dev,
3962                             struct hns_roce_qp *hr_qp,
3963                             struct hns_roce_v2_qp_context *context,
3964                             struct hns_roce_v2_qp_context *qpc_mask)
3965 {
3966         u64 mtts[MTT_MIN_COUNT] = { 0 };
3967         u64 wqe_sge_ba;
3968         int count;
3969
3970         /* Search qp buf's mtts */
3971         count = hns_roce_mtr_find(hr_dev, &hr_qp->mtr, hr_qp->rq.offset, mtts,
3972                                   MTT_MIN_COUNT, &wqe_sge_ba);
3973         if (hr_qp->rq.wqe_cnt && count < 1) {
3974                 ibdev_err(&hr_dev->ib_dev,
3975                           "failed to find RQ WQE, QPN = 0x%lx.\n", hr_qp->qpn);
3976                 return -EINVAL;
3977         }
3978
3979         context->wqe_sge_ba = cpu_to_le32(wqe_sge_ba >> 3);
3980         qpc_mask->wqe_sge_ba = 0;
3981
3982         /*
3983          * In v2 engine, software pass context and context mask to hardware
3984          * when modifying qp. If software need modify some fields in context,
3985          * we should set all bits of the relevant fields in context mask to
3986          * 0 at the same time, else set them to 0x1.
3987          */
3988         roce_set_field(context->byte_12_sq_hop, V2_QPC_BYTE_12_WQE_SGE_BA_M,
3989                        V2_QPC_BYTE_12_WQE_SGE_BA_S, wqe_sge_ba >> (32 + 3));
3990         roce_set_field(qpc_mask->byte_12_sq_hop, V2_QPC_BYTE_12_WQE_SGE_BA_M,
3991                        V2_QPC_BYTE_12_WQE_SGE_BA_S, 0);
3992
3993         roce_set_field(context->byte_12_sq_hop, V2_QPC_BYTE_12_SQ_HOP_NUM_M,
3994                        V2_QPC_BYTE_12_SQ_HOP_NUM_S,
3995                        to_hr_hem_hopnum(hr_dev->caps.wqe_sq_hop_num,
3996                                         hr_qp->sq.wqe_cnt));
3997         roce_set_field(qpc_mask->byte_12_sq_hop, V2_QPC_BYTE_12_SQ_HOP_NUM_M,
3998                        V2_QPC_BYTE_12_SQ_HOP_NUM_S, 0);
3999
4000         roce_set_field(context->byte_20_smac_sgid_idx,
4001                        V2_QPC_BYTE_20_SGE_HOP_NUM_M,
4002                        V2_QPC_BYTE_20_SGE_HOP_NUM_S,
4003                        to_hr_hem_hopnum(hr_dev->caps.wqe_sge_hop_num,
4004                                         hr_qp->sge.sge_cnt));
4005         roce_set_field(qpc_mask->byte_20_smac_sgid_idx,
4006                        V2_QPC_BYTE_20_SGE_HOP_NUM_M,
4007                        V2_QPC_BYTE_20_SGE_HOP_NUM_S, 0);
4008
4009         roce_set_field(context->byte_20_smac_sgid_idx,
4010                        V2_QPC_BYTE_20_RQ_HOP_NUM_M,
4011                        V2_QPC_BYTE_20_RQ_HOP_NUM_S,
4012                        to_hr_hem_hopnum(hr_dev->caps.wqe_rq_hop_num,
4013                                         hr_qp->rq.wqe_cnt));
4014
4015         roce_set_field(qpc_mask->byte_20_smac_sgid_idx,
4016                        V2_QPC_BYTE_20_RQ_HOP_NUM_M,
4017                        V2_QPC_BYTE_20_RQ_HOP_NUM_S, 0);
4018
4019         roce_set_field(context->byte_16_buf_ba_pg_sz,
4020                        V2_QPC_BYTE_16_WQE_SGE_BA_PG_SZ_M,
4021                        V2_QPC_BYTE_16_WQE_SGE_BA_PG_SZ_S,
4022                        to_hr_hw_page_shift(hr_qp->mtr.hem_cfg.ba_pg_shift));
4023         roce_set_field(qpc_mask->byte_16_buf_ba_pg_sz,
4024                        V2_QPC_BYTE_16_WQE_SGE_BA_PG_SZ_M,
4025                        V2_QPC_BYTE_16_WQE_SGE_BA_PG_SZ_S, 0);
4026
4027         roce_set_field(context->byte_16_buf_ba_pg_sz,
4028                        V2_QPC_BYTE_16_WQE_SGE_BUF_PG_SZ_M,
4029                        V2_QPC_BYTE_16_WQE_SGE_BUF_PG_SZ_S,
4030                        to_hr_hw_page_shift(hr_qp->mtr.hem_cfg.buf_pg_shift));
4031         roce_set_field(qpc_mask->byte_16_buf_ba_pg_sz,
4032                        V2_QPC_BYTE_16_WQE_SGE_BUF_PG_SZ_M,
4033                        V2_QPC_BYTE_16_WQE_SGE_BUF_PG_SZ_S, 0);
4034
4035         context->rq_cur_blk_addr = cpu_to_le32(to_hr_hw_page_addr(mtts[0]));
4036         qpc_mask->rq_cur_blk_addr = 0;
4037
4038         roce_set_field(context->byte_92_srq_info,
4039                        V2_QPC_BYTE_92_RQ_CUR_BLK_ADDR_M,
4040                        V2_QPC_BYTE_92_RQ_CUR_BLK_ADDR_S,
4041                        upper_32_bits(to_hr_hw_page_addr(mtts[0])));
4042         roce_set_field(qpc_mask->byte_92_srq_info,
4043                        V2_QPC_BYTE_92_RQ_CUR_BLK_ADDR_M,
4044                        V2_QPC_BYTE_92_RQ_CUR_BLK_ADDR_S, 0);
4045
4046         context->rq_nxt_blk_addr = cpu_to_le32(to_hr_hw_page_addr(mtts[1]));
4047         qpc_mask->rq_nxt_blk_addr = 0;
4048
4049         roce_set_field(context->byte_104_rq_sge,
4050                        V2_QPC_BYTE_104_RQ_NXT_BLK_ADDR_M,
4051                        V2_QPC_BYTE_104_RQ_NXT_BLK_ADDR_S,
4052                        upper_32_bits(to_hr_hw_page_addr(mtts[1])));
4053         roce_set_field(qpc_mask->byte_104_rq_sge,
4054                        V2_QPC_BYTE_104_RQ_NXT_BLK_ADDR_M,
4055                        V2_QPC_BYTE_104_RQ_NXT_BLK_ADDR_S, 0);
4056
4057         roce_set_field(context->byte_84_rq_ci_pi,
4058                        V2_QPC_BYTE_84_RQ_PRODUCER_IDX_M,
4059                        V2_QPC_BYTE_84_RQ_PRODUCER_IDX_S, hr_qp->rq.head);
4060         roce_set_field(qpc_mask->byte_84_rq_ci_pi,
4061                        V2_QPC_BYTE_84_RQ_PRODUCER_IDX_M,
4062                        V2_QPC_BYTE_84_RQ_PRODUCER_IDX_S, 0);
4063
4064         roce_set_field(qpc_mask->byte_84_rq_ci_pi,
4065                        V2_QPC_BYTE_84_RQ_CONSUMER_IDX_M,
4066                        V2_QPC_BYTE_84_RQ_CONSUMER_IDX_S, 0);
4067
4068         return 0;
4069 }
4070
4071 static int config_qp_sq_buf(struct hns_roce_dev *hr_dev,
4072                             struct hns_roce_qp *hr_qp,
4073                             struct hns_roce_v2_qp_context *context,
4074                             struct hns_roce_v2_qp_context *qpc_mask)
4075 {
4076         struct ib_device *ibdev = &hr_dev->ib_dev;
4077         u64 sge_cur_blk = 0;
4078         u64 sq_cur_blk = 0;
4079         int count;
4080
4081         /* search qp buf's mtts */
4082         count = hns_roce_mtr_find(hr_dev, &hr_qp->mtr, 0, &sq_cur_blk, 1, NULL);
4083         if (count < 1) {
4084                 ibdev_err(ibdev, "failed to find QP(0x%lx) SQ buf.\n",
4085                           hr_qp->qpn);
4086                 return -EINVAL;
4087         }
4088         if (hr_qp->sge.sge_cnt > 0) {
4089                 count = hns_roce_mtr_find(hr_dev, &hr_qp->mtr,
4090                                           hr_qp->sge.offset,
4091                                           &sge_cur_blk, 1, NULL);
4092                 if (count < 1) {
4093                         ibdev_err(ibdev, "failed to find QP(0x%lx) SGE buf.\n",
4094                                   hr_qp->qpn);
4095                         return -EINVAL;
4096                 }
4097         }
4098
4099         /*
4100          * In v2 engine, software pass context and context mask to hardware
4101          * when modifying qp. If software need modify some fields in context,
4102          * we should set all bits of the relevant fields in context mask to
4103          * 0 at the same time, else set them to 0x1.
4104          */
4105         context->sq_cur_blk_addr = cpu_to_le32(to_hr_hw_page_addr(sq_cur_blk));
4106         roce_set_field(context->byte_168_irrl_idx,
4107                        V2_QPC_BYTE_168_SQ_CUR_BLK_ADDR_M,
4108                        V2_QPC_BYTE_168_SQ_CUR_BLK_ADDR_S,
4109                        upper_32_bits(to_hr_hw_page_addr(sq_cur_blk)));
4110         qpc_mask->sq_cur_blk_addr = 0;
4111         roce_set_field(qpc_mask->byte_168_irrl_idx,
4112                        V2_QPC_BYTE_168_SQ_CUR_BLK_ADDR_M,
4113                        V2_QPC_BYTE_168_SQ_CUR_BLK_ADDR_S, 0);
4114
4115         context->sq_cur_sge_blk_addr =
4116                 cpu_to_le32(to_hr_hw_page_addr(sge_cur_blk));
4117         roce_set_field(context->byte_184_irrl_idx,
4118                        V2_QPC_BYTE_184_SQ_CUR_SGE_BLK_ADDR_M,
4119                        V2_QPC_BYTE_184_SQ_CUR_SGE_BLK_ADDR_S,
4120                        upper_32_bits(to_hr_hw_page_addr(sge_cur_blk)));
4121         qpc_mask->sq_cur_sge_blk_addr = 0;
4122         roce_set_field(qpc_mask->byte_184_irrl_idx,
4123                        V2_QPC_BYTE_184_SQ_CUR_SGE_BLK_ADDR_M,
4124                        V2_QPC_BYTE_184_SQ_CUR_SGE_BLK_ADDR_S, 0);
4125
4126         context->rx_sq_cur_blk_addr =
4127                 cpu_to_le32(to_hr_hw_page_addr(sq_cur_blk));
4128         roce_set_field(context->byte_232_irrl_sge,
4129                        V2_QPC_BYTE_232_RX_SQ_CUR_BLK_ADDR_M,
4130                        V2_QPC_BYTE_232_RX_SQ_CUR_BLK_ADDR_S,
4131                        upper_32_bits(to_hr_hw_page_addr(sq_cur_blk)));
4132         qpc_mask->rx_sq_cur_blk_addr = 0;
4133         roce_set_field(qpc_mask->byte_232_irrl_sge,
4134                        V2_QPC_BYTE_232_RX_SQ_CUR_BLK_ADDR_M,
4135                        V2_QPC_BYTE_232_RX_SQ_CUR_BLK_ADDR_S, 0);
4136
4137         return 0;
4138 }
4139
4140 static inline enum ib_mtu get_mtu(struct ib_qp *ibqp,
4141                                   const struct ib_qp_attr *attr)
4142 {
4143         if (ibqp->qp_type == IB_QPT_GSI || ibqp->qp_type == IB_QPT_UD)
4144                 return IB_MTU_4096;
4145
4146         return attr->path_mtu;
4147 }
4148
4149 static int modify_qp_init_to_rtr(struct ib_qp *ibqp,
4150                                  const struct ib_qp_attr *attr, int attr_mask,
4151                                  struct hns_roce_v2_qp_context *context,
4152                                  struct hns_roce_v2_qp_context *qpc_mask)
4153 {
4154         const struct ib_global_route *grh = rdma_ah_read_grh(&attr->ah_attr);
4155         struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
4156         struct hns_roce_qp *hr_qp = to_hr_qp(ibqp);
4157         struct ib_device *ibdev = &hr_dev->ib_dev;
4158         dma_addr_t trrl_ba;
4159         dma_addr_t irrl_ba;
4160         enum ib_mtu mtu;
4161         u8 lp_pktn_ini;
4162         u8 port_num;
4163         u64 *mtts;
4164         u8 *dmac;
4165         u8 *smac;
4166         int port;
4167         int ret;
4168
4169         ret = config_qp_rq_buf(hr_dev, hr_qp, context, qpc_mask);
4170         if (ret) {
4171                 ibdev_err(ibdev, "failed to config rq buf, ret = %d.\n", ret);
4172                 return ret;
4173         }
4174
4175         /* Search IRRL's mtts */
4176         mtts = hns_roce_table_find(hr_dev, &hr_dev->qp_table.irrl_table,
4177                                    hr_qp->qpn, &irrl_ba);
4178         if (!mtts) {
4179                 ibdev_err(ibdev, "failed to find qp irrl_table.\n");
4180                 return -EINVAL;
4181         }
4182
4183         /* Search TRRL's mtts */
4184         mtts = hns_roce_table_find(hr_dev, &hr_dev->qp_table.trrl_table,
4185                                    hr_qp->qpn, &trrl_ba);
4186         if (!mtts) {
4187                 ibdev_err(ibdev, "failed to find qp trrl_table.\n");
4188                 return -EINVAL;
4189         }
4190
4191         if (attr_mask & IB_QP_ALT_PATH) {
4192                 ibdev_err(ibdev, "INIT2RTR attr_mask (0x%x) error.\n",
4193                           attr_mask);
4194                 return -EINVAL;
4195         }
4196
4197         roce_set_field(context->byte_132_trrl, V2_QPC_BYTE_132_TRRL_BA_M,
4198                        V2_QPC_BYTE_132_TRRL_BA_S, trrl_ba >> 4);
4199         roce_set_field(qpc_mask->byte_132_trrl, V2_QPC_BYTE_132_TRRL_BA_M,
4200                        V2_QPC_BYTE_132_TRRL_BA_S, 0);
4201         context->trrl_ba = cpu_to_le32(trrl_ba >> (16 + 4));
4202         qpc_mask->trrl_ba = 0;
4203         roce_set_field(context->byte_140_raq, V2_QPC_BYTE_140_TRRL_BA_M,
4204                        V2_QPC_BYTE_140_TRRL_BA_S,
4205                        (u32)(trrl_ba >> (32 + 16 + 4)));
4206         roce_set_field(qpc_mask->byte_140_raq, V2_QPC_BYTE_140_TRRL_BA_M,
4207                        V2_QPC_BYTE_140_TRRL_BA_S, 0);
4208
4209         context->irrl_ba = cpu_to_le32(irrl_ba >> 6);
4210         qpc_mask->irrl_ba = 0;
4211         roce_set_field(context->byte_208_irrl, V2_QPC_BYTE_208_IRRL_BA_M,
4212                        V2_QPC_BYTE_208_IRRL_BA_S,
4213                        irrl_ba >> (32 + 6));
4214         roce_set_field(qpc_mask->byte_208_irrl, V2_QPC_BYTE_208_IRRL_BA_M,
4215                        V2_QPC_BYTE_208_IRRL_BA_S, 0);
4216
4217         roce_set_bit(context->byte_208_irrl, V2_QPC_BYTE_208_RMT_E2E_S, 1);
4218         roce_set_bit(qpc_mask->byte_208_irrl, V2_QPC_BYTE_208_RMT_E2E_S, 0);
4219
4220         roce_set_bit(context->byte_252_err_txcqn, V2_QPC_BYTE_252_SIG_TYPE_S,
4221                      hr_qp->sq_signal_bits);
4222         roce_set_bit(qpc_mask->byte_252_err_txcqn, V2_QPC_BYTE_252_SIG_TYPE_S,
4223                      0);
4224
4225         port = (attr_mask & IB_QP_PORT) ? (attr->port_num - 1) : hr_qp->port;
4226
4227         smac = (u8 *)hr_dev->dev_addr[port];
4228         dmac = (u8 *)attr->ah_attr.roce.dmac;
4229         /* when dmac equals smac or loop_idc is 1, it should loopback */
4230         if (ether_addr_equal_unaligned(dmac, smac) ||
4231             hr_dev->loop_idc == 0x1) {
4232                 roce_set_bit(context->byte_28_at_fl, V2_QPC_BYTE_28_LBI_S, 1);
4233                 roce_set_bit(qpc_mask->byte_28_at_fl, V2_QPC_BYTE_28_LBI_S, 0);
4234         }
4235
4236         if (attr_mask & IB_QP_DEST_QPN) {
4237                 roce_set_field(context->byte_56_dqpn_err, V2_QPC_BYTE_56_DQPN_M,
4238                                V2_QPC_BYTE_56_DQPN_S, attr->dest_qp_num);
4239                 roce_set_field(qpc_mask->byte_56_dqpn_err,
4240                                V2_QPC_BYTE_56_DQPN_M, V2_QPC_BYTE_56_DQPN_S, 0);
4241         }
4242
4243         /* Configure GID index */
4244         port_num = rdma_ah_get_port_num(&attr->ah_attr);
4245         roce_set_field(context->byte_20_smac_sgid_idx,
4246                        V2_QPC_BYTE_20_SGID_IDX_M, V2_QPC_BYTE_20_SGID_IDX_S,
4247                        hns_get_gid_index(hr_dev, port_num - 1,
4248                                          grh->sgid_index));
4249         roce_set_field(qpc_mask->byte_20_smac_sgid_idx,
4250                        V2_QPC_BYTE_20_SGID_IDX_M, V2_QPC_BYTE_20_SGID_IDX_S, 0);
4251
4252         memcpy(&(context->dmac), dmac, sizeof(u32));
4253         roce_set_field(context->byte_52_udpspn_dmac, V2_QPC_BYTE_52_DMAC_M,
4254                        V2_QPC_BYTE_52_DMAC_S, *((u16 *)(&dmac[4])));
4255         qpc_mask->dmac = 0;
4256         roce_set_field(qpc_mask->byte_52_udpspn_dmac, V2_QPC_BYTE_52_DMAC_M,
4257                        V2_QPC_BYTE_52_DMAC_S, 0);
4258
4259         mtu = get_mtu(ibqp, attr);
4260         hr_qp->path_mtu = mtu;
4261
4262         if (attr_mask & IB_QP_PATH_MTU) {
4263                 roce_set_field(context->byte_24_mtu_tc, V2_QPC_BYTE_24_MTU_M,
4264                                V2_QPC_BYTE_24_MTU_S, mtu);
4265                 roce_set_field(qpc_mask->byte_24_mtu_tc, V2_QPC_BYTE_24_MTU_M,
4266                                V2_QPC_BYTE_24_MTU_S, 0);
4267         }
4268
4269 #define MAX_LP_MSG_LEN 65536
4270         /* MTU * (2 ^ LP_PKTN_INI) shouldn't be bigger than 64KB */
4271         lp_pktn_ini = ilog2(MAX_LP_MSG_LEN / ib_mtu_enum_to_int(mtu));
4272
4273         roce_set_field(context->byte_56_dqpn_err, V2_QPC_BYTE_56_LP_PKTN_INI_M,
4274                        V2_QPC_BYTE_56_LP_PKTN_INI_S, lp_pktn_ini);
4275         roce_set_field(qpc_mask->byte_56_dqpn_err, V2_QPC_BYTE_56_LP_PKTN_INI_M,
4276                        V2_QPC_BYTE_56_LP_PKTN_INI_S, 0);
4277
4278         /* ACK_REQ_FREQ should be larger than or equal to LP_PKTN_INI */
4279         roce_set_field(context->byte_172_sq_psn, V2_QPC_BYTE_172_ACK_REQ_FREQ_M,
4280                        V2_QPC_BYTE_172_ACK_REQ_FREQ_S, lp_pktn_ini);
4281         roce_set_field(qpc_mask->byte_172_sq_psn,
4282                        V2_QPC_BYTE_172_ACK_REQ_FREQ_M,
4283                        V2_QPC_BYTE_172_ACK_REQ_FREQ_S, 0);
4284
4285         roce_set_bit(qpc_mask->byte_108_rx_reqepsn,
4286                      V2_QPC_BYTE_108_RX_REQ_PSN_ERR_S, 0);
4287         roce_set_field(qpc_mask->byte_96_rx_reqmsn, V2_QPC_BYTE_96_RX_REQ_MSN_M,
4288                        V2_QPC_BYTE_96_RX_REQ_MSN_S, 0);
4289         roce_set_field(qpc_mask->byte_108_rx_reqepsn,
4290                        V2_QPC_BYTE_108_RX_REQ_LAST_OPTYPE_M,
4291                        V2_QPC_BYTE_108_RX_REQ_LAST_OPTYPE_S, 0);
4292
4293         context->rq_rnr_timer = 0;
4294         qpc_mask->rq_rnr_timer = 0;
4295
4296         roce_set_field(qpc_mask->byte_132_trrl, V2_QPC_BYTE_132_TRRL_HEAD_MAX_M,
4297                        V2_QPC_BYTE_132_TRRL_HEAD_MAX_S, 0);
4298         roce_set_field(qpc_mask->byte_132_trrl, V2_QPC_BYTE_132_TRRL_TAIL_MAX_M,
4299                        V2_QPC_BYTE_132_TRRL_TAIL_MAX_S, 0);
4300
4301         /* rocee send 2^lp_sgen_ini segs every time */
4302         roce_set_field(context->byte_168_irrl_idx,
4303                        V2_QPC_BYTE_168_LP_SGEN_INI_M,
4304                        V2_QPC_BYTE_168_LP_SGEN_INI_S, 3);
4305         roce_set_field(qpc_mask->byte_168_irrl_idx,
4306                        V2_QPC_BYTE_168_LP_SGEN_INI_M,
4307                        V2_QPC_BYTE_168_LP_SGEN_INI_S, 0);
4308
4309         return 0;
4310 }
4311
4312 static int modify_qp_rtr_to_rts(struct ib_qp *ibqp,
4313                                 const struct ib_qp_attr *attr, int attr_mask,
4314                                 struct hns_roce_v2_qp_context *context,
4315                                 struct hns_roce_v2_qp_context *qpc_mask)
4316 {
4317         struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
4318         struct hns_roce_qp *hr_qp = to_hr_qp(ibqp);
4319         struct ib_device *ibdev = &hr_dev->ib_dev;
4320         int ret;
4321
4322         /* Not support alternate path and path migration */
4323         if (attr_mask & (IB_QP_ALT_PATH | IB_QP_PATH_MIG_STATE)) {
4324                 ibdev_err(ibdev, "RTR2RTS attr_mask (0x%x)error\n", attr_mask);
4325                 return -EINVAL;
4326         }
4327
4328         ret = config_qp_sq_buf(hr_dev, hr_qp, context, qpc_mask);
4329         if (ret) {
4330                 ibdev_err(ibdev, "failed to config sq buf, ret %d\n", ret);
4331                 return ret;
4332         }
4333
4334         /*
4335          * Set some fields in context to zero, Because the default values
4336          * of all fields in context are zero, we need not set them to 0 again.
4337          * but we should set the relevant fields of context mask to 0.
4338          */
4339         roce_set_field(qpc_mask->byte_232_irrl_sge,
4340                        V2_QPC_BYTE_232_IRRL_SGE_IDX_M,
4341                        V2_QPC_BYTE_232_IRRL_SGE_IDX_S, 0);
4342
4343         roce_set_field(qpc_mask->byte_240_irrl_tail,
4344                        V2_QPC_BYTE_240_RX_ACK_MSN_M,
4345                        V2_QPC_BYTE_240_RX_ACK_MSN_S, 0);
4346
4347         roce_set_field(qpc_mask->byte_248_ack_psn,
4348                        V2_QPC_BYTE_248_ACK_LAST_OPTYPE_M,
4349                        V2_QPC_BYTE_248_ACK_LAST_OPTYPE_S, 0);
4350         roce_set_bit(qpc_mask->byte_248_ack_psn,
4351                      V2_QPC_BYTE_248_IRRL_PSN_VLD_S, 0);
4352         roce_set_field(qpc_mask->byte_248_ack_psn,
4353                        V2_QPC_BYTE_248_IRRL_PSN_M,
4354                        V2_QPC_BYTE_248_IRRL_PSN_S, 0);
4355
4356         roce_set_field(qpc_mask->byte_240_irrl_tail,
4357                        V2_QPC_BYTE_240_IRRL_TAIL_REAL_M,
4358                        V2_QPC_BYTE_240_IRRL_TAIL_REAL_S, 0);
4359
4360         roce_set_field(qpc_mask->byte_220_retry_psn_msn,
4361                        V2_QPC_BYTE_220_RETRY_MSG_MSN_M,
4362                        V2_QPC_BYTE_220_RETRY_MSG_MSN_S, 0);
4363
4364         roce_set_bit(qpc_mask->byte_248_ack_psn,
4365                      V2_QPC_BYTE_248_RNR_RETRY_FLAG_S, 0);
4366
4367         roce_set_field(qpc_mask->byte_212_lsn, V2_QPC_BYTE_212_CHECK_FLG_M,
4368                        V2_QPC_BYTE_212_CHECK_FLG_S, 0);
4369
4370         roce_set_field(context->byte_212_lsn, V2_QPC_BYTE_212_LSN_M,
4371                        V2_QPC_BYTE_212_LSN_S, 0x100);
4372         roce_set_field(qpc_mask->byte_212_lsn, V2_QPC_BYTE_212_LSN_M,
4373                        V2_QPC_BYTE_212_LSN_S, 0);
4374
4375         roce_set_field(qpc_mask->byte_196_sq_psn, V2_QPC_BYTE_196_IRRL_HEAD_M,
4376                        V2_QPC_BYTE_196_IRRL_HEAD_S, 0);
4377
4378         return 0;
4379 }
4380
4381 static inline u16 get_udp_sport(u32 fl, u32 lqpn, u32 rqpn)
4382 {
4383         if (!fl)
4384                 fl = rdma_calc_flow_label(lqpn, rqpn);
4385
4386         return rdma_flow_label_to_udp_sport(fl);
4387 }
4388
4389 static int hns_roce_v2_set_path(struct ib_qp *ibqp,
4390                                 const struct ib_qp_attr *attr,
4391                                 int attr_mask,
4392                                 struct hns_roce_v2_qp_context *context,
4393                                 struct hns_roce_v2_qp_context *qpc_mask)
4394 {
4395         const struct ib_global_route *grh = rdma_ah_read_grh(&attr->ah_attr);
4396         struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
4397         struct hns_roce_qp *hr_qp = to_hr_qp(ibqp);
4398         struct ib_device *ibdev = &hr_dev->ib_dev;
4399         const struct ib_gid_attr *gid_attr = NULL;
4400         int is_roce_protocol;
4401         u16 vlan_id = 0xffff;
4402         bool is_udp = false;
4403         u8 ib_port;
4404         u8 hr_port;
4405         int ret;
4406
4407         ib_port = (attr_mask & IB_QP_PORT) ? attr->port_num : hr_qp->port + 1;
4408         hr_port = ib_port - 1;
4409         is_roce_protocol = rdma_cap_eth_ah(&hr_dev->ib_dev, ib_port) &&
4410                            rdma_ah_get_ah_flags(&attr->ah_attr) & IB_AH_GRH;
4411
4412         if (is_roce_protocol) {
4413                 gid_attr = attr->ah_attr.grh.sgid_attr;
4414                 ret = rdma_read_gid_l2_fields(gid_attr, &vlan_id, NULL);
4415                 if (ret)
4416                         return ret;
4417
4418                 if (gid_attr)
4419                         is_udp = (gid_attr->gid_type ==
4420                                  IB_GID_TYPE_ROCE_UDP_ENCAP);
4421         }
4422
4423         if (vlan_id < VLAN_N_VID) {
4424                 roce_set_bit(context->byte_76_srqn_op_en,
4425                              V2_QPC_BYTE_76_RQ_VLAN_EN_S, 1);
4426                 roce_set_bit(qpc_mask->byte_76_srqn_op_en,
4427                              V2_QPC_BYTE_76_RQ_VLAN_EN_S, 0);
4428                 roce_set_bit(context->byte_168_irrl_idx,
4429                              V2_QPC_BYTE_168_SQ_VLAN_EN_S, 1);
4430                 roce_set_bit(qpc_mask->byte_168_irrl_idx,
4431                              V2_QPC_BYTE_168_SQ_VLAN_EN_S, 0);
4432         }
4433
4434         roce_set_field(context->byte_24_mtu_tc, V2_QPC_BYTE_24_VLAN_ID_M,
4435                        V2_QPC_BYTE_24_VLAN_ID_S, vlan_id);
4436         roce_set_field(qpc_mask->byte_24_mtu_tc, V2_QPC_BYTE_24_VLAN_ID_M,
4437                        V2_QPC_BYTE_24_VLAN_ID_S, 0);
4438
4439         if (grh->sgid_index >= hr_dev->caps.gid_table_len[hr_port]) {
4440                 ibdev_err(ibdev, "sgid_index(%u) too large. max is %d\n",
4441                           grh->sgid_index, hr_dev->caps.gid_table_len[hr_port]);
4442                 return -EINVAL;
4443         }
4444
4445         if (attr->ah_attr.type != RDMA_AH_ATTR_TYPE_ROCE) {
4446                 ibdev_err(ibdev, "ah attr is not RDMA roce type\n");
4447                 return -EINVAL;
4448         }
4449
4450         roce_set_field(context->byte_52_udpspn_dmac, V2_QPC_BYTE_52_UDPSPN_M,
4451                        V2_QPC_BYTE_52_UDPSPN_S,
4452                        is_udp ? get_udp_sport(grh->flow_label, ibqp->qp_num,
4453                                               attr->dest_qp_num) : 0);
4454
4455         roce_set_field(qpc_mask->byte_52_udpspn_dmac, V2_QPC_BYTE_52_UDPSPN_M,
4456                        V2_QPC_BYTE_52_UDPSPN_S, 0);
4457
4458         roce_set_field(context->byte_20_smac_sgid_idx,
4459                        V2_QPC_BYTE_20_SGID_IDX_M, V2_QPC_BYTE_20_SGID_IDX_S,
4460                        grh->sgid_index);
4461
4462         roce_set_field(qpc_mask->byte_20_smac_sgid_idx,
4463                        V2_QPC_BYTE_20_SGID_IDX_M, V2_QPC_BYTE_20_SGID_IDX_S, 0);
4464
4465         roce_set_field(context->byte_24_mtu_tc, V2_QPC_BYTE_24_HOP_LIMIT_M,
4466                        V2_QPC_BYTE_24_HOP_LIMIT_S, grh->hop_limit);
4467         roce_set_field(qpc_mask->byte_24_mtu_tc, V2_QPC_BYTE_24_HOP_LIMIT_M,
4468                        V2_QPC_BYTE_24_HOP_LIMIT_S, 0);
4469
4470         if (is_udp)
4471                 roce_set_field(context->byte_24_mtu_tc, V2_QPC_BYTE_24_TC_M,
4472                                V2_QPC_BYTE_24_TC_S, grh->traffic_class >> 2);
4473         else
4474                 roce_set_field(context->byte_24_mtu_tc, V2_QPC_BYTE_24_TC_M,
4475                                V2_QPC_BYTE_24_TC_S, grh->traffic_class);
4476
4477         roce_set_field(qpc_mask->byte_24_mtu_tc, V2_QPC_BYTE_24_TC_M,
4478                        V2_QPC_BYTE_24_TC_S, 0);
4479         roce_set_field(context->byte_28_at_fl, V2_QPC_BYTE_28_FL_M,
4480                        V2_QPC_BYTE_28_FL_S, grh->flow_label);
4481         roce_set_field(qpc_mask->byte_28_at_fl, V2_QPC_BYTE_28_FL_M,
4482                        V2_QPC_BYTE_28_FL_S, 0);
4483         memcpy(context->dgid, grh->dgid.raw, sizeof(grh->dgid.raw));
4484         memset(qpc_mask->dgid, 0, sizeof(grh->dgid.raw));
4485
4486         hr_qp->sl = rdma_ah_get_sl(&attr->ah_attr);
4487         if (unlikely(hr_qp->sl > MAX_SERVICE_LEVEL)) {
4488                 ibdev_err(ibdev,
4489                           "failed to fill QPC, sl (%d) shouldn't be larger than %d.\n",
4490                           hr_qp->sl, MAX_SERVICE_LEVEL);
4491                 return -EINVAL;
4492         }
4493
4494         roce_set_field(context->byte_28_at_fl, V2_QPC_BYTE_28_SL_M,
4495                        V2_QPC_BYTE_28_SL_S, hr_qp->sl);
4496         roce_set_field(qpc_mask->byte_28_at_fl, V2_QPC_BYTE_28_SL_M,
4497                        V2_QPC_BYTE_28_SL_S, 0);
4498
4499         return 0;
4500 }
4501
4502 static bool check_qp_state(enum ib_qp_state cur_state,
4503                            enum ib_qp_state new_state)
4504 {
4505         static const bool sm[][IB_QPS_ERR + 1] = {
4506                 [IB_QPS_RESET] = { [IB_QPS_RESET] = true,
4507                                    [IB_QPS_INIT] = true },
4508                 [IB_QPS_INIT] = { [IB_QPS_RESET] = true,
4509                                   [IB_QPS_INIT] = true,
4510                                   [IB_QPS_RTR] = true,
4511                                   [IB_QPS_ERR] = true },
4512                 [IB_QPS_RTR] = { [IB_QPS_RESET] = true,
4513                                  [IB_QPS_RTS] = true,
4514                                  [IB_QPS_ERR] = true },
4515                 [IB_QPS_RTS] = { [IB_QPS_RESET] = true,
4516                                  [IB_QPS_RTS] = true,
4517                                  [IB_QPS_ERR] = true },
4518                 [IB_QPS_SQD] = {},
4519                 [IB_QPS_SQE] = {},
4520                 [IB_QPS_ERR] = { [IB_QPS_RESET] = true, [IB_QPS_ERR] = true }
4521         };
4522
4523         return sm[cur_state][new_state];
4524 }
4525
4526 static int hns_roce_v2_set_abs_fields(struct ib_qp *ibqp,
4527                                       const struct ib_qp_attr *attr,
4528                                       int attr_mask,
4529                                       enum ib_qp_state cur_state,
4530                                       enum ib_qp_state new_state,
4531                                       struct hns_roce_v2_qp_context *context,
4532                                       struct hns_roce_v2_qp_context *qpc_mask)
4533 {
4534         struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
4535         int ret = 0;
4536
4537         if (!check_qp_state(cur_state, new_state)) {
4538                 ibdev_err(&hr_dev->ib_dev, "Illegal state for QP!\n");
4539                 return -EINVAL;
4540         }
4541
4542         if (cur_state == IB_QPS_RESET && new_state == IB_QPS_INIT) {
4543                 memset(qpc_mask, 0, hr_dev->caps.qpc_sz);
4544                 modify_qp_reset_to_init(ibqp, attr, attr_mask, context,
4545                                         qpc_mask);
4546         } else if (cur_state == IB_QPS_INIT && new_state == IB_QPS_INIT) {
4547                 modify_qp_init_to_init(ibqp, attr, attr_mask, context,
4548                                        qpc_mask);
4549         } else if (cur_state == IB_QPS_INIT && new_state == IB_QPS_RTR) {
4550                 ret = modify_qp_init_to_rtr(ibqp, attr, attr_mask, context,
4551                                             qpc_mask);
4552         } else if (cur_state == IB_QPS_RTR && new_state == IB_QPS_RTS) {
4553                 ret = modify_qp_rtr_to_rts(ibqp, attr, attr_mask, context,
4554                                            qpc_mask);
4555         }
4556
4557         return ret;
4558 }
4559
4560 static int hns_roce_v2_set_opt_fields(struct ib_qp *ibqp,
4561                                       const struct ib_qp_attr *attr,
4562                                       int attr_mask,
4563                                       struct hns_roce_v2_qp_context *context,
4564                                       struct hns_roce_v2_qp_context *qpc_mask)
4565 {
4566         struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
4567         struct hns_roce_qp *hr_qp = to_hr_qp(ibqp);
4568         int ret = 0;
4569
4570         if (attr_mask & IB_QP_AV) {
4571                 ret = hns_roce_v2_set_path(ibqp, attr, attr_mask, context,
4572                                            qpc_mask);
4573                 if (ret)
4574                         return ret;
4575         }
4576
4577         if (attr_mask & IB_QP_TIMEOUT) {
4578                 if (attr->timeout < 31) {
4579                         roce_set_field(context->byte_28_at_fl,
4580                                        V2_QPC_BYTE_28_AT_M, V2_QPC_BYTE_28_AT_S,
4581                                        attr->timeout);
4582                         roce_set_field(qpc_mask->byte_28_at_fl,
4583                                        V2_QPC_BYTE_28_AT_M, V2_QPC_BYTE_28_AT_S,
4584                                        0);
4585                 } else {
4586                         ibdev_warn(&hr_dev->ib_dev,
4587                                    "Local ACK timeout shall be 0 to 30.\n");
4588                 }
4589         }
4590
4591         if (attr_mask & IB_QP_RETRY_CNT) {
4592                 roce_set_field(context->byte_212_lsn,
4593                                V2_QPC_BYTE_212_RETRY_NUM_INIT_M,
4594                                V2_QPC_BYTE_212_RETRY_NUM_INIT_S,
4595                                attr->retry_cnt);
4596                 roce_set_field(qpc_mask->byte_212_lsn,
4597                                V2_QPC_BYTE_212_RETRY_NUM_INIT_M,
4598                                V2_QPC_BYTE_212_RETRY_NUM_INIT_S, 0);
4599
4600                 roce_set_field(context->byte_212_lsn,
4601                                V2_QPC_BYTE_212_RETRY_CNT_M,
4602                                V2_QPC_BYTE_212_RETRY_CNT_S, attr->retry_cnt);
4603                 roce_set_field(qpc_mask->byte_212_lsn,
4604                                V2_QPC_BYTE_212_RETRY_CNT_M,
4605                                V2_QPC_BYTE_212_RETRY_CNT_S, 0);
4606         }
4607
4608         if (attr_mask & IB_QP_RNR_RETRY) {
4609                 roce_set_field(context->byte_244_rnr_rxack,
4610                                V2_QPC_BYTE_244_RNR_NUM_INIT_M,
4611                                V2_QPC_BYTE_244_RNR_NUM_INIT_S, attr->rnr_retry);
4612                 roce_set_field(qpc_mask->byte_244_rnr_rxack,
4613                                V2_QPC_BYTE_244_RNR_NUM_INIT_M,
4614                                V2_QPC_BYTE_244_RNR_NUM_INIT_S, 0);
4615
4616                 roce_set_field(context->byte_244_rnr_rxack,
4617                                V2_QPC_BYTE_244_RNR_CNT_M,
4618                                V2_QPC_BYTE_244_RNR_CNT_S, attr->rnr_retry);
4619                 roce_set_field(qpc_mask->byte_244_rnr_rxack,
4620                                V2_QPC_BYTE_244_RNR_CNT_M,
4621                                V2_QPC_BYTE_244_RNR_CNT_S, 0);
4622         }
4623
4624         /* RC&UC&UD required attr */
4625         if (attr_mask & IB_QP_SQ_PSN) {
4626                 roce_set_field(context->byte_172_sq_psn,
4627                                V2_QPC_BYTE_172_SQ_CUR_PSN_M,
4628                                V2_QPC_BYTE_172_SQ_CUR_PSN_S, attr->sq_psn);
4629                 roce_set_field(qpc_mask->byte_172_sq_psn,
4630                                V2_QPC_BYTE_172_SQ_CUR_PSN_M,
4631                                V2_QPC_BYTE_172_SQ_CUR_PSN_S, 0);
4632
4633                 roce_set_field(context->byte_196_sq_psn,
4634                                V2_QPC_BYTE_196_SQ_MAX_PSN_M,
4635                                V2_QPC_BYTE_196_SQ_MAX_PSN_S, attr->sq_psn);
4636                 roce_set_field(qpc_mask->byte_196_sq_psn,
4637                                V2_QPC_BYTE_196_SQ_MAX_PSN_M,
4638                                V2_QPC_BYTE_196_SQ_MAX_PSN_S, 0);
4639
4640                 roce_set_field(context->byte_220_retry_psn_msn,
4641                                V2_QPC_BYTE_220_RETRY_MSG_PSN_M,
4642                                V2_QPC_BYTE_220_RETRY_MSG_PSN_S, attr->sq_psn);
4643                 roce_set_field(qpc_mask->byte_220_retry_psn_msn,
4644                                V2_QPC_BYTE_220_RETRY_MSG_PSN_M,
4645                                V2_QPC_BYTE_220_RETRY_MSG_PSN_S, 0);
4646
4647                 roce_set_field(context->byte_224_retry_msg,
4648                                V2_QPC_BYTE_224_RETRY_MSG_PSN_M,
4649                                V2_QPC_BYTE_224_RETRY_MSG_PSN_S,
4650                                attr->sq_psn >> V2_QPC_BYTE_220_RETRY_MSG_PSN_S);
4651                 roce_set_field(qpc_mask->byte_224_retry_msg,
4652                                V2_QPC_BYTE_224_RETRY_MSG_PSN_M,
4653                                V2_QPC_BYTE_224_RETRY_MSG_PSN_S, 0);
4654
4655                 roce_set_field(context->byte_224_retry_msg,
4656                                V2_QPC_BYTE_224_RETRY_MSG_FPKT_PSN_M,
4657                                V2_QPC_BYTE_224_RETRY_MSG_FPKT_PSN_S,
4658                                attr->sq_psn);
4659                 roce_set_field(qpc_mask->byte_224_retry_msg,
4660                                V2_QPC_BYTE_224_RETRY_MSG_FPKT_PSN_M,
4661                                V2_QPC_BYTE_224_RETRY_MSG_FPKT_PSN_S, 0);
4662
4663                 roce_set_field(context->byte_244_rnr_rxack,
4664                                V2_QPC_BYTE_244_RX_ACK_EPSN_M,
4665                                V2_QPC_BYTE_244_RX_ACK_EPSN_S, attr->sq_psn);
4666                 roce_set_field(qpc_mask->byte_244_rnr_rxack,
4667                                V2_QPC_BYTE_244_RX_ACK_EPSN_M,
4668                                V2_QPC_BYTE_244_RX_ACK_EPSN_S, 0);
4669         }
4670
4671         if ((attr_mask & IB_QP_MAX_DEST_RD_ATOMIC) &&
4672              attr->max_dest_rd_atomic) {
4673                 roce_set_field(context->byte_140_raq, V2_QPC_BYTE_140_RR_MAX_M,
4674                                V2_QPC_BYTE_140_RR_MAX_S,
4675                                fls(attr->max_dest_rd_atomic - 1));
4676                 roce_set_field(qpc_mask->byte_140_raq, V2_QPC_BYTE_140_RR_MAX_M,
4677                                V2_QPC_BYTE_140_RR_MAX_S, 0);
4678         }
4679
4680         if ((attr_mask & IB_QP_MAX_QP_RD_ATOMIC) && attr->max_rd_atomic) {
4681                 roce_set_field(context->byte_208_irrl, V2_QPC_BYTE_208_SR_MAX_M,
4682                                V2_QPC_BYTE_208_SR_MAX_S,
4683                                fls(attr->max_rd_atomic - 1));
4684                 roce_set_field(qpc_mask->byte_208_irrl,
4685                                V2_QPC_BYTE_208_SR_MAX_M,
4686                                V2_QPC_BYTE_208_SR_MAX_S, 0);
4687         }
4688
4689         if (attr_mask & (IB_QP_ACCESS_FLAGS | IB_QP_MAX_DEST_RD_ATOMIC))
4690                 set_access_flags(hr_qp, context, qpc_mask, attr, attr_mask);
4691
4692         if (attr_mask & IB_QP_MIN_RNR_TIMER) {
4693                 roce_set_field(context->byte_80_rnr_rx_cqn,
4694                                V2_QPC_BYTE_80_MIN_RNR_TIME_M,
4695                                V2_QPC_BYTE_80_MIN_RNR_TIME_S,
4696                                attr->min_rnr_timer);
4697                 roce_set_field(qpc_mask->byte_80_rnr_rx_cqn,
4698                                V2_QPC_BYTE_80_MIN_RNR_TIME_M,
4699                                V2_QPC_BYTE_80_MIN_RNR_TIME_S, 0);
4700         }
4701
4702         /* RC&UC required attr */
4703         if (attr_mask & IB_QP_RQ_PSN) {
4704                 roce_set_field(context->byte_108_rx_reqepsn,
4705                                V2_QPC_BYTE_108_RX_REQ_EPSN_M,
4706                                V2_QPC_BYTE_108_RX_REQ_EPSN_S, attr->rq_psn);
4707                 roce_set_field(qpc_mask->byte_108_rx_reqepsn,
4708                                V2_QPC_BYTE_108_RX_REQ_EPSN_M,
4709                                V2_QPC_BYTE_108_RX_REQ_EPSN_S, 0);
4710
4711                 roce_set_field(context->byte_152_raq, V2_QPC_BYTE_152_RAQ_PSN_M,
4712                                V2_QPC_BYTE_152_RAQ_PSN_S, attr->rq_psn - 1);
4713                 roce_set_field(qpc_mask->byte_152_raq,
4714                                V2_QPC_BYTE_152_RAQ_PSN_M,
4715                                V2_QPC_BYTE_152_RAQ_PSN_S, 0);
4716         }
4717
4718         if (attr_mask & IB_QP_QKEY) {
4719                 context->qkey_xrcd = cpu_to_le32(attr->qkey);
4720                 qpc_mask->qkey_xrcd = 0;
4721                 hr_qp->qkey = attr->qkey;
4722         }
4723
4724         return ret;
4725 }
4726
4727 static void hns_roce_v2_record_opt_fields(struct ib_qp *ibqp,
4728                                           const struct ib_qp_attr *attr,
4729                                           int attr_mask)
4730 {
4731         struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
4732         struct hns_roce_qp *hr_qp = to_hr_qp(ibqp);
4733
4734         if (attr_mask & IB_QP_ACCESS_FLAGS)
4735                 hr_qp->atomic_rd_en = attr->qp_access_flags;
4736
4737         if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC)
4738                 hr_qp->resp_depth = attr->max_dest_rd_atomic;
4739         if (attr_mask & IB_QP_PORT) {
4740                 hr_qp->port = attr->port_num - 1;
4741                 hr_qp->phy_port = hr_dev->iboe.phy_port[hr_qp->port];
4742         }
4743 }
4744
4745 static int hns_roce_v2_modify_qp(struct ib_qp *ibqp,
4746                                  const struct ib_qp_attr *attr,
4747                                  int attr_mask, enum ib_qp_state cur_state,
4748                                  enum ib_qp_state new_state)
4749 {
4750         struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
4751         struct hns_roce_qp *hr_qp = to_hr_qp(ibqp);
4752         struct hns_roce_v2_qp_context ctx[2];
4753         struct hns_roce_v2_qp_context *context = ctx;
4754         struct hns_roce_v2_qp_context *qpc_mask = ctx + 1;
4755         struct ib_device *ibdev = &hr_dev->ib_dev;
4756         unsigned long sq_flag = 0;
4757         unsigned long rq_flag = 0;
4758         int ret;
4759
4760         /*
4761          * In v2 engine, software pass context and context mask to hardware
4762          * when modifying qp. If software need modify some fields in context,
4763          * we should set all bits of the relevant fields in context mask to
4764          * 0 at the same time, else set them to 0x1.
4765          */
4766         memset(context, 0, hr_dev->caps.qpc_sz);
4767         memset(qpc_mask, 0xff, hr_dev->caps.qpc_sz);
4768
4769         ret = hns_roce_v2_set_abs_fields(ibqp, attr, attr_mask, cur_state,
4770                                          new_state, context, qpc_mask);
4771         if (ret)
4772                 goto out;
4773
4774         /* When QP state is err, SQ and RQ WQE should be flushed */
4775         if (new_state == IB_QPS_ERR) {
4776                 spin_lock_irqsave(&hr_qp->sq.lock, sq_flag);
4777                 hr_qp->state = IB_QPS_ERR;
4778                 roce_set_field(context->byte_160_sq_ci_pi,
4779                                V2_QPC_BYTE_160_SQ_PRODUCER_IDX_M,
4780                                V2_QPC_BYTE_160_SQ_PRODUCER_IDX_S,
4781                                hr_qp->sq.head);
4782                 roce_set_field(qpc_mask->byte_160_sq_ci_pi,
4783                                V2_QPC_BYTE_160_SQ_PRODUCER_IDX_M,
4784                                V2_QPC_BYTE_160_SQ_PRODUCER_IDX_S, 0);
4785                 spin_unlock_irqrestore(&hr_qp->sq.lock, sq_flag);
4786
4787                 if (!ibqp->srq) {
4788                         spin_lock_irqsave(&hr_qp->rq.lock, rq_flag);
4789                         roce_set_field(context->byte_84_rq_ci_pi,
4790                                V2_QPC_BYTE_84_RQ_PRODUCER_IDX_M,
4791                                V2_QPC_BYTE_84_RQ_PRODUCER_IDX_S,
4792                                hr_qp->rq.head);
4793                         roce_set_field(qpc_mask->byte_84_rq_ci_pi,
4794                                V2_QPC_BYTE_84_RQ_PRODUCER_IDX_M,
4795                                V2_QPC_BYTE_84_RQ_PRODUCER_IDX_S, 0);
4796                         spin_unlock_irqrestore(&hr_qp->rq.lock, rq_flag);
4797                 }
4798         }
4799
4800         /* Configure the optional fields */
4801         ret = hns_roce_v2_set_opt_fields(ibqp, attr, attr_mask, context,
4802                                          qpc_mask);
4803         if (ret)
4804                 goto out;
4805
4806         roce_set_bit(context->byte_108_rx_reqepsn, V2_QPC_BYTE_108_INV_CREDIT_S,
4807                      ibqp->srq ? 1 : 0);
4808         roce_set_bit(qpc_mask->byte_108_rx_reqepsn,
4809                      V2_QPC_BYTE_108_INV_CREDIT_S, 0);
4810
4811         /* Every status migrate must change state */
4812         roce_set_field(context->byte_60_qpst_tempid, V2_QPC_BYTE_60_QP_ST_M,
4813                        V2_QPC_BYTE_60_QP_ST_S, new_state);
4814         roce_set_field(qpc_mask->byte_60_qpst_tempid, V2_QPC_BYTE_60_QP_ST_M,
4815                        V2_QPC_BYTE_60_QP_ST_S, 0);
4816
4817         /* SW pass context to HW */
4818         ret = hns_roce_v2_qp_modify(hr_dev, context, qpc_mask, hr_qp);
4819         if (ret) {
4820                 ibdev_err(ibdev, "failed to modify QP, ret = %d\n", ret);
4821                 goto out;
4822         }
4823
4824         hr_qp->state = new_state;
4825
4826         hns_roce_v2_record_opt_fields(ibqp, attr, attr_mask);
4827
4828         if (new_state == IB_QPS_RESET && !ibqp->uobject) {
4829                 hns_roce_v2_cq_clean(to_hr_cq(ibqp->recv_cq), hr_qp->qpn,
4830                                      ibqp->srq ? to_hr_srq(ibqp->srq) : NULL);
4831                 if (ibqp->send_cq != ibqp->recv_cq)
4832                         hns_roce_v2_cq_clean(to_hr_cq(ibqp->send_cq),
4833                                              hr_qp->qpn, NULL);
4834
4835                 hr_qp->rq.head = 0;
4836                 hr_qp->rq.tail = 0;
4837                 hr_qp->sq.head = 0;
4838                 hr_qp->sq.tail = 0;
4839                 hr_qp->next_sge = 0;
4840                 if (hr_qp->rq.wqe_cnt)
4841                         *hr_qp->rdb.db_record = 0;
4842         }
4843
4844 out:
4845         return ret;
4846 }
4847
4848 static int to_ib_qp_st(enum hns_roce_v2_qp_state state)
4849 {
4850         static const enum ib_qp_state map[] = {
4851                 [HNS_ROCE_QP_ST_RST] = IB_QPS_RESET,
4852                 [HNS_ROCE_QP_ST_INIT] = IB_QPS_INIT,
4853                 [HNS_ROCE_QP_ST_RTR] = IB_QPS_RTR,
4854                 [HNS_ROCE_QP_ST_RTS] = IB_QPS_RTS,
4855                 [HNS_ROCE_QP_ST_SQD] = IB_QPS_SQD,
4856                 [HNS_ROCE_QP_ST_SQER] = IB_QPS_SQE,
4857                 [HNS_ROCE_QP_ST_ERR] = IB_QPS_ERR,
4858                 [HNS_ROCE_QP_ST_SQ_DRAINING] = IB_QPS_SQD
4859         };
4860
4861         return (state < ARRAY_SIZE(map)) ? map[state] : -1;
4862 }
4863
4864 static int hns_roce_v2_query_qpc(struct hns_roce_dev *hr_dev,
4865                                  struct hns_roce_qp *hr_qp,
4866                                  struct hns_roce_v2_qp_context *hr_context)
4867 {
4868         struct hns_roce_cmd_mailbox *mailbox;
4869         int ret;
4870
4871         mailbox = hns_roce_alloc_cmd_mailbox(hr_dev);
4872         if (IS_ERR(mailbox))
4873                 return PTR_ERR(mailbox);
4874
4875         ret = hns_roce_cmd_mbox(hr_dev, 0, mailbox->dma, hr_qp->qpn, 0,
4876                                 HNS_ROCE_CMD_QUERY_QPC,
4877                                 HNS_ROCE_CMD_TIMEOUT_MSECS);
4878         if (ret)
4879                 goto out;
4880
4881         memcpy(hr_context, mailbox->buf, hr_dev->caps.qpc_sz);
4882
4883 out:
4884         hns_roce_free_cmd_mailbox(hr_dev, mailbox);
4885         return ret;
4886 }
4887
4888 static int hns_roce_v2_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr,
4889                                 int qp_attr_mask,
4890                                 struct ib_qp_init_attr *qp_init_attr)
4891 {
4892         struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
4893         struct hns_roce_qp *hr_qp = to_hr_qp(ibqp);
4894         struct hns_roce_v2_qp_context context = {};
4895         struct ib_device *ibdev = &hr_dev->ib_dev;
4896         int tmp_qp_state;
4897         int state;
4898         int ret;
4899
4900         memset(qp_attr, 0, sizeof(*qp_attr));
4901         memset(qp_init_attr, 0, sizeof(*qp_init_attr));
4902
4903         mutex_lock(&hr_qp->mutex);
4904
4905         if (hr_qp->state == IB_QPS_RESET) {
4906                 qp_attr->qp_state = IB_QPS_RESET;
4907                 ret = 0;
4908                 goto done;
4909         }
4910
4911         ret = hns_roce_v2_query_qpc(hr_dev, hr_qp, &context);
4912         if (ret) {
4913                 ibdev_err(ibdev, "failed to query QPC, ret = %d\n", ret);
4914                 ret = -EINVAL;
4915                 goto out;
4916         }
4917
4918         state = roce_get_field(context.byte_60_qpst_tempid,
4919                                V2_QPC_BYTE_60_QP_ST_M, V2_QPC_BYTE_60_QP_ST_S);
4920         tmp_qp_state = to_ib_qp_st((enum hns_roce_v2_qp_state)state);
4921         if (tmp_qp_state == -1) {
4922                 ibdev_err(ibdev, "Illegal ib_qp_state\n");
4923                 ret = -EINVAL;
4924                 goto out;
4925         }
4926         hr_qp->state = (u8)tmp_qp_state;
4927         qp_attr->qp_state = (enum ib_qp_state)hr_qp->state;
4928         qp_attr->path_mtu = (enum ib_mtu)roce_get_field(context.byte_24_mtu_tc,
4929                                                         V2_QPC_BYTE_24_MTU_M,
4930                                                         V2_QPC_BYTE_24_MTU_S);
4931         qp_attr->path_mig_state = IB_MIG_ARMED;
4932         qp_attr->ah_attr.type   = RDMA_AH_ATTR_TYPE_ROCE;
4933         if (hr_qp->ibqp.qp_type == IB_QPT_UD)
4934                 qp_attr->qkey = le32_to_cpu(context.qkey_xrcd);
4935
4936         qp_attr->rq_psn = roce_get_field(context.byte_108_rx_reqepsn,
4937                                          V2_QPC_BYTE_108_RX_REQ_EPSN_M,
4938                                          V2_QPC_BYTE_108_RX_REQ_EPSN_S);
4939         qp_attr->sq_psn = (u32)roce_get_field(context.byte_172_sq_psn,
4940                                               V2_QPC_BYTE_172_SQ_CUR_PSN_M,
4941                                               V2_QPC_BYTE_172_SQ_CUR_PSN_S);
4942         qp_attr->dest_qp_num = (u8)roce_get_field(context.byte_56_dqpn_err,
4943                                                   V2_QPC_BYTE_56_DQPN_M,
4944                                                   V2_QPC_BYTE_56_DQPN_S);
4945         qp_attr->qp_access_flags = ((roce_get_bit(context.byte_76_srqn_op_en,
4946                                     V2_QPC_BYTE_76_RRE_S)) << V2_QP_RRE_S) |
4947                                     ((roce_get_bit(context.byte_76_srqn_op_en,
4948                                     V2_QPC_BYTE_76_RWE_S)) << V2_QP_RWE_S) |
4949                                     ((roce_get_bit(context.byte_76_srqn_op_en,
4950                                     V2_QPC_BYTE_76_ATE_S)) << V2_QP_ATE_S);
4951
4952         if (hr_qp->ibqp.qp_type == IB_QPT_RC ||
4953             hr_qp->ibqp.qp_type == IB_QPT_UC) {
4954                 struct ib_global_route *grh =
4955                                 rdma_ah_retrieve_grh(&qp_attr->ah_attr);
4956
4957                 rdma_ah_set_sl(&qp_attr->ah_attr,
4958                                roce_get_field(context.byte_28_at_fl,
4959                                               V2_QPC_BYTE_28_SL_M,
4960                                               V2_QPC_BYTE_28_SL_S));
4961                 grh->flow_label = roce_get_field(context.byte_28_at_fl,
4962                                                  V2_QPC_BYTE_28_FL_M,
4963                                                  V2_QPC_BYTE_28_FL_S);
4964                 grh->sgid_index = roce_get_field(context.byte_20_smac_sgid_idx,
4965                                                  V2_QPC_BYTE_20_SGID_IDX_M,
4966                                                  V2_QPC_BYTE_20_SGID_IDX_S);
4967                 grh->hop_limit = roce_get_field(context.byte_24_mtu_tc,
4968                                                 V2_QPC_BYTE_24_HOP_LIMIT_M,
4969                                                 V2_QPC_BYTE_24_HOP_LIMIT_S);
4970                 grh->traffic_class = roce_get_field(context.byte_24_mtu_tc,
4971                                                     V2_QPC_BYTE_24_TC_M,
4972                                                     V2_QPC_BYTE_24_TC_S);
4973
4974                 memcpy(grh->dgid.raw, context.dgid, sizeof(grh->dgid.raw));
4975         }
4976
4977         qp_attr->port_num = hr_qp->port + 1;
4978         qp_attr->sq_draining = 0;
4979         qp_attr->max_rd_atomic = 1 << roce_get_field(context.byte_208_irrl,
4980                                                      V2_QPC_BYTE_208_SR_MAX_M,
4981                                                      V2_QPC_BYTE_208_SR_MAX_S);
4982         qp_attr->max_dest_rd_atomic = 1 << roce_get_field(context.byte_140_raq,
4983                                                      V2_QPC_BYTE_140_RR_MAX_M,
4984                                                      V2_QPC_BYTE_140_RR_MAX_S);
4985         qp_attr->min_rnr_timer = (u8)roce_get_field(context.byte_80_rnr_rx_cqn,
4986                                                  V2_QPC_BYTE_80_MIN_RNR_TIME_M,
4987                                                  V2_QPC_BYTE_80_MIN_RNR_TIME_S);
4988         qp_attr->timeout = (u8)roce_get_field(context.byte_28_at_fl,
4989                                               V2_QPC_BYTE_28_AT_M,
4990                                               V2_QPC_BYTE_28_AT_S);
4991         qp_attr->retry_cnt = roce_get_field(context.byte_212_lsn,
4992                                             V2_QPC_BYTE_212_RETRY_CNT_M,
4993                                             V2_QPC_BYTE_212_RETRY_CNT_S);
4994         qp_attr->rnr_retry = roce_get_field(context.byte_244_rnr_rxack,
4995                                             V2_QPC_BYTE_244_RNR_CNT_M,
4996                                             V2_QPC_BYTE_244_RNR_CNT_S);
4997
4998 done:
4999         qp_attr->cur_qp_state = qp_attr->qp_state;
5000         qp_attr->cap.max_recv_wr = hr_qp->rq.wqe_cnt;
5001         qp_attr->cap.max_recv_sge = hr_qp->rq.max_gs;
5002
5003         if (!ibqp->uobject) {
5004                 qp_attr->cap.max_send_wr = hr_qp->sq.wqe_cnt;
5005                 qp_attr->cap.max_send_sge = hr_qp->sq.max_gs;
5006         } else {
5007                 qp_attr->cap.max_send_wr = 0;
5008                 qp_attr->cap.max_send_sge = 0;
5009         }
5010
5011         qp_init_attr->cap = qp_attr->cap;
5012         qp_init_attr->sq_sig_type = hr_qp->sq_signal_bits;
5013
5014 out:
5015         mutex_unlock(&hr_qp->mutex);
5016         return ret;
5017 }
5018
5019 static int hns_roce_v2_destroy_qp_common(struct hns_roce_dev *hr_dev,
5020                                          struct hns_roce_qp *hr_qp,
5021                                          struct ib_udata *udata)
5022 {
5023         struct ib_device *ibdev = &hr_dev->ib_dev;
5024         struct hns_roce_cq *send_cq, *recv_cq;
5025         unsigned long flags;
5026         int ret = 0;
5027
5028         if (hr_qp->ibqp.qp_type == IB_QPT_RC && hr_qp->state != IB_QPS_RESET) {
5029                 /* Modify qp to reset before destroying qp */
5030                 ret = hns_roce_v2_modify_qp(&hr_qp->ibqp, NULL, 0,
5031                                             hr_qp->state, IB_QPS_RESET);
5032                 if (ret)
5033                         ibdev_err(ibdev,
5034                                   "failed to modify QP to RST, ret = %d\n",
5035                                   ret);
5036         }
5037
5038         send_cq = hr_qp->ibqp.send_cq ? to_hr_cq(hr_qp->ibqp.send_cq) : NULL;
5039         recv_cq = hr_qp->ibqp.recv_cq ? to_hr_cq(hr_qp->ibqp.recv_cq) : NULL;
5040
5041         spin_lock_irqsave(&hr_dev->qp_list_lock, flags);
5042         hns_roce_lock_cqs(send_cq, recv_cq);
5043
5044         if (!udata) {
5045                 if (recv_cq)
5046                         __hns_roce_v2_cq_clean(recv_cq, hr_qp->qpn,
5047                                                (hr_qp->ibqp.srq ?
5048                                                 to_hr_srq(hr_qp->ibqp.srq) :
5049                                                 NULL));
5050
5051                 if (send_cq && send_cq != recv_cq)
5052                         __hns_roce_v2_cq_clean(send_cq, hr_qp->qpn, NULL);
5053
5054         }
5055
5056         hns_roce_qp_remove(hr_dev, hr_qp);
5057
5058         hns_roce_unlock_cqs(send_cq, recv_cq);
5059         spin_unlock_irqrestore(&hr_dev->qp_list_lock, flags);
5060
5061         return ret;
5062 }
5063
5064 static int hns_roce_v2_destroy_qp(struct ib_qp *ibqp, struct ib_udata *udata)
5065 {
5066         struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
5067         struct hns_roce_qp *hr_qp = to_hr_qp(ibqp);
5068         int ret;
5069
5070         ret = hns_roce_v2_destroy_qp_common(hr_dev, hr_qp, udata);
5071         if (ret)
5072                 ibdev_err(&hr_dev->ib_dev,
5073                           "failed to destroy QP 0x%06lx, ret = %d\n",
5074                           hr_qp->qpn, ret);
5075
5076         hns_roce_qp_destroy(hr_dev, hr_qp, udata);
5077
5078         return 0;
5079 }
5080
5081 static int hns_roce_v2_qp_flow_control_init(struct hns_roce_dev *hr_dev,
5082                                             struct hns_roce_qp *hr_qp)
5083 {
5084         struct ib_device *ibdev = &hr_dev->ib_dev;
5085         struct hns_roce_sccc_clr_done *resp;
5086         struct hns_roce_sccc_clr *clr;
5087         struct hns_roce_cmq_desc desc;
5088         int ret, i;
5089
5090         mutex_lock(&hr_dev->qp_table.scc_mutex);
5091
5092         /* set scc ctx clear done flag */
5093         hns_roce_cmq_setup_basic_desc(&desc, HNS_ROCE_OPC_RESET_SCCC, false);
5094         ret =  hns_roce_cmq_send(hr_dev, &desc, 1);
5095         if (ret) {
5096                 ibdev_err(ibdev, "failed to reset SCC ctx, ret = %d\n", ret);
5097                 goto out;
5098         }
5099
5100         /* clear scc context */
5101         hns_roce_cmq_setup_basic_desc(&desc, HNS_ROCE_OPC_CLR_SCCC, false);
5102         clr = (struct hns_roce_sccc_clr *)desc.data;
5103         clr->qpn = cpu_to_le32(hr_qp->qpn);
5104         ret =  hns_roce_cmq_send(hr_dev, &desc, 1);
5105         if (ret) {
5106                 ibdev_err(ibdev, "failed to clear SCC ctx, ret = %d\n", ret);
5107                 goto out;
5108         }
5109
5110         /* query scc context clear is done or not */
5111         resp = (struct hns_roce_sccc_clr_done *)desc.data;
5112         for (i = 0; i <= HNS_ROCE_CMQ_SCC_CLR_DONE_CNT; i++) {
5113                 hns_roce_cmq_setup_basic_desc(&desc,
5114                                               HNS_ROCE_OPC_QUERY_SCCC, true);
5115                 ret = hns_roce_cmq_send(hr_dev, &desc, 1);
5116                 if (ret) {
5117                         ibdev_err(ibdev, "failed to query clr cmq, ret = %d\n",
5118                                   ret);
5119                         goto out;
5120                 }
5121
5122                 if (resp->clr_done)
5123                         goto out;
5124
5125                 msleep(20);
5126         }
5127
5128         ibdev_err(ibdev, "Query SCC clr done flag overtime.\n");
5129         ret = -ETIMEDOUT;
5130
5131 out:
5132         mutex_unlock(&hr_dev->qp_table.scc_mutex);
5133         return ret;
5134 }
5135
5136 static void hns_roce_v2_write_srqc(struct hns_roce_dev *hr_dev,
5137                                    struct hns_roce_srq *srq, u32 pdn, u16 xrcd,
5138                                    u32 cqn, void *mb_buf, u64 *mtts_wqe,
5139                                    u64 *mtts_idx, dma_addr_t dma_handle_wqe,
5140                                    dma_addr_t dma_handle_idx)
5141 {
5142         struct hns_roce_srq_context *srq_context;
5143
5144         srq_context = mb_buf;
5145         memset(srq_context, 0, sizeof(*srq_context));
5146
5147         roce_set_field(srq_context->byte_4_srqn_srqst, SRQC_BYTE_4_SRQ_ST_M,
5148                        SRQC_BYTE_4_SRQ_ST_S, 1);
5149
5150         roce_set_field(srq_context->byte_4_srqn_srqst,
5151                        SRQC_BYTE_4_SRQ_WQE_HOP_NUM_M,
5152                        SRQC_BYTE_4_SRQ_WQE_HOP_NUM_S,
5153                        to_hr_hem_hopnum(hr_dev->caps.srqwqe_hop_num,
5154                                         srq->wqe_cnt));
5155         roce_set_field(srq_context->byte_4_srqn_srqst,
5156                        SRQC_BYTE_4_SRQ_SHIFT_M, SRQC_BYTE_4_SRQ_SHIFT_S,
5157                        ilog2(srq->wqe_cnt));
5158
5159         roce_set_field(srq_context->byte_4_srqn_srqst, SRQC_BYTE_4_SRQN_M,
5160                        SRQC_BYTE_4_SRQN_S, srq->srqn);
5161
5162         roce_set_field(srq_context->byte_8_limit_wl, SRQC_BYTE_8_SRQ_LIMIT_WL_M,
5163                        SRQC_BYTE_8_SRQ_LIMIT_WL_S, 0);
5164
5165         roce_set_field(srq_context->byte_12_xrcd, SRQC_BYTE_12_SRQ_XRCD_M,
5166                        SRQC_BYTE_12_SRQ_XRCD_S, xrcd);
5167
5168         srq_context->wqe_bt_ba = cpu_to_le32((u32)(dma_handle_wqe >> 3));
5169
5170         roce_set_field(srq_context->byte_24_wqe_bt_ba,
5171                        SRQC_BYTE_24_SRQ_WQE_BT_BA_M,
5172                        SRQC_BYTE_24_SRQ_WQE_BT_BA_S,
5173                        dma_handle_wqe >> 35);
5174
5175         roce_set_field(srq_context->byte_28_rqws_pd, SRQC_BYTE_28_PD_M,
5176                        SRQC_BYTE_28_PD_S, pdn);
5177         roce_set_field(srq_context->byte_28_rqws_pd, SRQC_BYTE_28_RQWS_M,
5178                        SRQC_BYTE_28_RQWS_S, srq->max_gs <= 0 ? 0 :
5179                        fls(srq->max_gs - 1));
5180
5181         srq_context->idx_bt_ba = cpu_to_le32(dma_handle_idx >> 3);
5182         roce_set_field(srq_context->rsv_idx_bt_ba,
5183                        SRQC_BYTE_36_SRQ_IDX_BT_BA_M,
5184                        SRQC_BYTE_36_SRQ_IDX_BT_BA_S,
5185                        dma_handle_idx >> 35);
5186
5187         srq_context->idx_cur_blk_addr =
5188                 cpu_to_le32(to_hr_hw_page_addr(mtts_idx[0]));
5189         roce_set_field(srq_context->byte_44_idxbufpgsz_addr,
5190                        SRQC_BYTE_44_SRQ_IDX_CUR_BLK_ADDR_M,
5191                        SRQC_BYTE_44_SRQ_IDX_CUR_BLK_ADDR_S,
5192                        upper_32_bits(to_hr_hw_page_addr(mtts_idx[0])));
5193         roce_set_field(srq_context->byte_44_idxbufpgsz_addr,
5194                        SRQC_BYTE_44_SRQ_IDX_HOP_NUM_M,
5195                        SRQC_BYTE_44_SRQ_IDX_HOP_NUM_S,
5196                        to_hr_hem_hopnum(hr_dev->caps.idx_hop_num,
5197                                         srq->wqe_cnt));
5198
5199         roce_set_field(srq_context->byte_44_idxbufpgsz_addr,
5200                        SRQC_BYTE_44_SRQ_IDX_BA_PG_SZ_M,
5201                        SRQC_BYTE_44_SRQ_IDX_BA_PG_SZ_S,
5202                 to_hr_hw_page_shift(srq->idx_que.mtr.hem_cfg.ba_pg_shift));
5203         roce_set_field(srq_context->byte_44_idxbufpgsz_addr,
5204                        SRQC_BYTE_44_SRQ_IDX_BUF_PG_SZ_M,
5205                        SRQC_BYTE_44_SRQ_IDX_BUF_PG_SZ_S,
5206                 to_hr_hw_page_shift(srq->idx_que.mtr.hem_cfg.buf_pg_shift));
5207
5208         srq_context->idx_nxt_blk_addr =
5209                                 cpu_to_le32(to_hr_hw_page_addr(mtts_idx[1]));
5210         roce_set_field(srq_context->rsv_idxnxtblkaddr,
5211                        SRQC_BYTE_52_SRQ_IDX_NXT_BLK_ADDR_M,
5212                        SRQC_BYTE_52_SRQ_IDX_NXT_BLK_ADDR_S,
5213                        upper_32_bits(to_hr_hw_page_addr(mtts_idx[1])));
5214         roce_set_field(srq_context->byte_56_xrc_cqn,
5215                        SRQC_BYTE_56_SRQ_XRC_CQN_M, SRQC_BYTE_56_SRQ_XRC_CQN_S,
5216                        cqn);
5217         roce_set_field(srq_context->byte_56_xrc_cqn,
5218                        SRQC_BYTE_56_SRQ_WQE_BA_PG_SZ_M,
5219                        SRQC_BYTE_56_SRQ_WQE_BA_PG_SZ_S,
5220                        to_hr_hw_page_shift(srq->buf_mtr.hem_cfg.ba_pg_shift));
5221         roce_set_field(srq_context->byte_56_xrc_cqn,
5222                        SRQC_BYTE_56_SRQ_WQE_BUF_PG_SZ_M,
5223                        SRQC_BYTE_56_SRQ_WQE_BUF_PG_SZ_S,
5224                        to_hr_hw_page_shift(srq->buf_mtr.hem_cfg.buf_pg_shift));
5225
5226         roce_set_bit(srq_context->db_record_addr_record_en,
5227                      SRQC_BYTE_60_SRQ_RECORD_EN_S, 0);
5228 }
5229
5230 static int hns_roce_v2_modify_srq(struct ib_srq *ibsrq,
5231                                   struct ib_srq_attr *srq_attr,
5232                                   enum ib_srq_attr_mask srq_attr_mask,
5233                                   struct ib_udata *udata)
5234 {
5235         struct hns_roce_dev *hr_dev = to_hr_dev(ibsrq->device);
5236         struct hns_roce_srq *srq = to_hr_srq(ibsrq);
5237         struct hns_roce_srq_context *srq_context;
5238         struct hns_roce_srq_context *srqc_mask;
5239         struct hns_roce_cmd_mailbox *mailbox;
5240         int ret;
5241
5242         /* Resizing SRQs is not supported yet */
5243         if (srq_attr_mask & IB_SRQ_MAX_WR)
5244                 return -EINVAL;
5245
5246         if (srq_attr_mask & IB_SRQ_LIMIT) {
5247                 if (srq_attr->srq_limit >= srq->wqe_cnt)
5248                         return -EINVAL;
5249
5250                 mailbox = hns_roce_alloc_cmd_mailbox(hr_dev);
5251                 if (IS_ERR(mailbox))
5252                         return PTR_ERR(mailbox);
5253
5254                 srq_context = mailbox->buf;
5255                 srqc_mask = (struct hns_roce_srq_context *)mailbox->buf + 1;
5256
5257                 memset(srqc_mask, 0xff, sizeof(*srqc_mask));
5258
5259                 roce_set_field(srq_context->byte_8_limit_wl,
5260                                SRQC_BYTE_8_SRQ_LIMIT_WL_M,
5261                                SRQC_BYTE_8_SRQ_LIMIT_WL_S, srq_attr->srq_limit);
5262                 roce_set_field(srqc_mask->byte_8_limit_wl,
5263                                SRQC_BYTE_8_SRQ_LIMIT_WL_M,
5264                                SRQC_BYTE_8_SRQ_LIMIT_WL_S, 0);
5265
5266                 ret = hns_roce_cmd_mbox(hr_dev, mailbox->dma, 0, srq->srqn, 0,
5267                                         HNS_ROCE_CMD_MODIFY_SRQC,
5268                                         HNS_ROCE_CMD_TIMEOUT_MSECS);
5269                 hns_roce_free_cmd_mailbox(hr_dev, mailbox);
5270                 if (ret) {
5271                         ibdev_err(&hr_dev->ib_dev,
5272                                   "failed to handle cmd of modifying SRQ, ret = %d.\n",
5273                                   ret);
5274                         return ret;
5275                 }
5276         }
5277
5278         return 0;
5279 }
5280
5281 static int hns_roce_v2_query_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr)
5282 {
5283         struct hns_roce_dev *hr_dev = to_hr_dev(ibsrq->device);
5284         struct hns_roce_srq *srq = to_hr_srq(ibsrq);
5285         struct hns_roce_srq_context *srq_context;
5286         struct hns_roce_cmd_mailbox *mailbox;
5287         int limit_wl;
5288         int ret;
5289
5290         mailbox = hns_roce_alloc_cmd_mailbox(hr_dev);
5291         if (IS_ERR(mailbox))
5292                 return PTR_ERR(mailbox);
5293
5294         srq_context = mailbox->buf;
5295         ret = hns_roce_cmd_mbox(hr_dev, 0, mailbox->dma, srq->srqn, 0,
5296                                 HNS_ROCE_CMD_QUERY_SRQC,
5297                                 HNS_ROCE_CMD_TIMEOUT_MSECS);
5298         if (ret) {
5299                 ibdev_err(&hr_dev->ib_dev,
5300                           "failed to process cmd of querying SRQ, ret = %d.\n",
5301                           ret);
5302                 goto out;
5303         }
5304
5305         limit_wl = roce_get_field(srq_context->byte_8_limit_wl,
5306                                   SRQC_BYTE_8_SRQ_LIMIT_WL_M,
5307                                   SRQC_BYTE_8_SRQ_LIMIT_WL_S);
5308
5309         attr->srq_limit = limit_wl;
5310         attr->max_wr = srq->wqe_cnt - 1;
5311         attr->max_sge = srq->max_gs;
5312
5313 out:
5314         hns_roce_free_cmd_mailbox(hr_dev, mailbox);
5315         return ret;
5316 }
5317
5318 static int hns_roce_v2_modify_cq(struct ib_cq *cq, u16 cq_count, u16 cq_period)
5319 {
5320         struct hns_roce_dev *hr_dev = to_hr_dev(cq->device);
5321         struct hns_roce_v2_cq_context *cq_context;
5322         struct hns_roce_cq *hr_cq = to_hr_cq(cq);
5323         struct hns_roce_v2_cq_context *cqc_mask;
5324         struct hns_roce_cmd_mailbox *mailbox;
5325         int ret;
5326
5327         mailbox = hns_roce_alloc_cmd_mailbox(hr_dev);
5328         if (IS_ERR(mailbox))
5329                 return PTR_ERR(mailbox);
5330
5331         cq_context = mailbox->buf;
5332         cqc_mask = (struct hns_roce_v2_cq_context *)mailbox->buf + 1;
5333
5334         memset(cqc_mask, 0xff, sizeof(*cqc_mask));
5335
5336         roce_set_field(cq_context->byte_56_cqe_period_maxcnt,
5337                        V2_CQC_BYTE_56_CQ_MAX_CNT_M, V2_CQC_BYTE_56_CQ_MAX_CNT_S,
5338                        cq_count);
5339         roce_set_field(cqc_mask->byte_56_cqe_period_maxcnt,
5340                        V2_CQC_BYTE_56_CQ_MAX_CNT_M, V2_CQC_BYTE_56_CQ_MAX_CNT_S,
5341                        0);
5342         roce_set_field(cq_context->byte_56_cqe_period_maxcnt,
5343                        V2_CQC_BYTE_56_CQ_PERIOD_M, V2_CQC_BYTE_56_CQ_PERIOD_S,
5344                        cq_period);
5345         roce_set_field(cqc_mask->byte_56_cqe_period_maxcnt,
5346                        V2_CQC_BYTE_56_CQ_PERIOD_M, V2_CQC_BYTE_56_CQ_PERIOD_S,
5347                        0);
5348
5349         ret = hns_roce_cmd_mbox(hr_dev, mailbox->dma, 0, hr_cq->cqn, 1,
5350                                 HNS_ROCE_CMD_MODIFY_CQC,
5351                                 HNS_ROCE_CMD_TIMEOUT_MSECS);
5352         hns_roce_free_cmd_mailbox(hr_dev, mailbox);
5353         if (ret)
5354                 ibdev_err(&hr_dev->ib_dev,
5355                           "failed to process cmd when modifying CQ, ret = %d\n",
5356                           ret);
5357
5358         return ret;
5359 }
5360
5361 static void hns_roce_irq_work_handle(struct work_struct *work)
5362 {
5363         struct hns_roce_work *irq_work =
5364                                 container_of(work, struct hns_roce_work, work);
5365         struct ib_device *ibdev = &irq_work->hr_dev->ib_dev;
5366         u32 qpn = irq_work->qpn;
5367         u32 cqn = irq_work->cqn;
5368
5369         switch (irq_work->event_type) {
5370         case HNS_ROCE_EVENT_TYPE_PATH_MIG:
5371                 ibdev_info(ibdev, "Path migrated succeeded.\n");
5372                 break;
5373         case HNS_ROCE_EVENT_TYPE_PATH_MIG_FAILED:
5374                 ibdev_warn(ibdev, "Path migration failed.\n");
5375                 break;
5376         case HNS_ROCE_EVENT_TYPE_COMM_EST:
5377                 break;
5378         case HNS_ROCE_EVENT_TYPE_SQ_DRAINED:
5379                 ibdev_warn(ibdev, "Send queue drained.\n");
5380                 break;
5381         case HNS_ROCE_EVENT_TYPE_WQ_CATAS_ERROR:
5382                 ibdev_err(ibdev, "Local work queue 0x%x catast error, sub_event type is: %d\n",
5383                           qpn, irq_work->sub_type);
5384                 break;
5385         case HNS_ROCE_EVENT_TYPE_INV_REQ_LOCAL_WQ_ERROR:
5386                 ibdev_err(ibdev, "Invalid request local work queue 0x%x error.\n",
5387                           qpn);
5388                 break;
5389         case HNS_ROCE_EVENT_TYPE_LOCAL_WQ_ACCESS_ERROR:
5390                 ibdev_err(ibdev, "Local access violation work queue 0x%x error, sub_event type is: %d\n",
5391                           qpn, irq_work->sub_type);
5392                 break;
5393         case HNS_ROCE_EVENT_TYPE_SRQ_LIMIT_REACH:
5394                 ibdev_warn(ibdev, "SRQ limit reach.\n");
5395                 break;
5396         case HNS_ROCE_EVENT_TYPE_SRQ_LAST_WQE_REACH:
5397                 ibdev_warn(ibdev, "SRQ last wqe reach.\n");
5398                 break;
5399         case HNS_ROCE_EVENT_TYPE_SRQ_CATAS_ERROR:
5400                 ibdev_err(ibdev, "SRQ catas error.\n");
5401                 break;
5402         case HNS_ROCE_EVENT_TYPE_CQ_ACCESS_ERROR:
5403                 ibdev_err(ibdev, "CQ 0x%x access err.\n", cqn);
5404                 break;
5405         case HNS_ROCE_EVENT_TYPE_CQ_OVERFLOW:
5406                 ibdev_warn(ibdev, "CQ 0x%x overflow\n", cqn);
5407                 break;
5408         case HNS_ROCE_EVENT_TYPE_DB_OVERFLOW:
5409                 ibdev_warn(ibdev, "DB overflow.\n");
5410                 break;
5411         case HNS_ROCE_EVENT_TYPE_FLR:
5412                 ibdev_warn(ibdev, "Function level reset.\n");
5413                 break;
5414         default:
5415                 break;
5416         }
5417
5418         kfree(irq_work);
5419 }
5420
5421 static void hns_roce_v2_init_irq_work(struct hns_roce_dev *hr_dev,
5422                                       struct hns_roce_eq *eq,
5423                                       u32 qpn, u32 cqn)
5424 {
5425         struct hns_roce_work *irq_work;
5426
5427         irq_work = kzalloc(sizeof(struct hns_roce_work), GFP_ATOMIC);
5428         if (!irq_work)
5429                 return;
5430
5431         INIT_WORK(&(irq_work->work), hns_roce_irq_work_handle);
5432         irq_work->hr_dev = hr_dev;
5433         irq_work->qpn = qpn;
5434         irq_work->cqn = cqn;
5435         irq_work->event_type = eq->event_type;
5436         irq_work->sub_type = eq->sub_type;
5437         queue_work(hr_dev->irq_workq, &(irq_work->work));
5438 }
5439
5440 static void set_eq_cons_index_v2(struct hns_roce_eq *eq)
5441 {
5442         struct hns_roce_dev *hr_dev = eq->hr_dev;
5443         __le32 doorbell[2] = {};
5444
5445         if (eq->type_flag == HNS_ROCE_AEQ) {
5446                 roce_set_field(doorbell[0], HNS_ROCE_V2_EQ_DB_CMD_M,
5447                                HNS_ROCE_V2_EQ_DB_CMD_S,
5448                                eq->arm_st == HNS_ROCE_V2_EQ_ALWAYS_ARMED ?
5449                                HNS_ROCE_EQ_DB_CMD_AEQ :
5450                                HNS_ROCE_EQ_DB_CMD_AEQ_ARMED);
5451         } else {
5452                 roce_set_field(doorbell[0], HNS_ROCE_V2_EQ_DB_TAG_M,
5453                                HNS_ROCE_V2_EQ_DB_TAG_S, eq->eqn);
5454
5455                 roce_set_field(doorbell[0], HNS_ROCE_V2_EQ_DB_CMD_M,
5456                                HNS_ROCE_V2_EQ_DB_CMD_S,
5457                                eq->arm_st == HNS_ROCE_V2_EQ_ALWAYS_ARMED ?
5458                                HNS_ROCE_EQ_DB_CMD_CEQ :
5459                                HNS_ROCE_EQ_DB_CMD_CEQ_ARMED);
5460         }
5461
5462         roce_set_field(doorbell[1], HNS_ROCE_V2_EQ_DB_PARA_M,
5463                        HNS_ROCE_V2_EQ_DB_PARA_S,
5464                        (eq->cons_index & HNS_ROCE_V2_CONS_IDX_M));
5465
5466         hns_roce_write64(hr_dev, doorbell, eq->doorbell);
5467 }
5468
5469 static struct hns_roce_aeqe *next_aeqe_sw_v2(struct hns_roce_eq *eq)
5470 {
5471         struct hns_roce_aeqe *aeqe;
5472
5473         aeqe = hns_roce_buf_offset(eq->mtr.kmem,
5474                                    (eq->cons_index & (eq->entries - 1)) *
5475                                    eq->eqe_size);
5476
5477         return (roce_get_bit(aeqe->asyn, HNS_ROCE_V2_AEQ_AEQE_OWNER_S) ^
5478                 !!(eq->cons_index & eq->entries)) ? aeqe : NULL;
5479 }
5480
5481 static int hns_roce_v2_aeq_int(struct hns_roce_dev *hr_dev,
5482                                struct hns_roce_eq *eq)
5483 {
5484         struct device *dev = hr_dev->dev;
5485         struct hns_roce_aeqe *aeqe = next_aeqe_sw_v2(eq);
5486         int aeqe_found = 0;
5487         int event_type;
5488         int sub_type;
5489         u32 srqn;
5490         u32 qpn;
5491         u32 cqn;
5492
5493         while (aeqe) {
5494                 /* Make sure we read AEQ entry after we have checked the
5495                  * ownership bit
5496                  */
5497                 dma_rmb();
5498
5499                 event_type = roce_get_field(aeqe->asyn,
5500                                             HNS_ROCE_V2_AEQE_EVENT_TYPE_M,
5501                                             HNS_ROCE_V2_AEQE_EVENT_TYPE_S);
5502                 sub_type = roce_get_field(aeqe->asyn,
5503                                           HNS_ROCE_V2_AEQE_SUB_TYPE_M,
5504                                           HNS_ROCE_V2_AEQE_SUB_TYPE_S);
5505                 qpn = roce_get_field(aeqe->event.qp_event.qp,
5506                                      HNS_ROCE_V2_AEQE_EVENT_QUEUE_NUM_M,
5507                                      HNS_ROCE_V2_AEQE_EVENT_QUEUE_NUM_S);
5508                 cqn = roce_get_field(aeqe->event.cq_event.cq,
5509                                      HNS_ROCE_V2_AEQE_EVENT_QUEUE_NUM_M,
5510                                      HNS_ROCE_V2_AEQE_EVENT_QUEUE_NUM_S);
5511                 srqn = roce_get_field(aeqe->event.srq_event.srq,
5512                                      HNS_ROCE_V2_AEQE_EVENT_QUEUE_NUM_M,
5513                                      HNS_ROCE_V2_AEQE_EVENT_QUEUE_NUM_S);
5514
5515                 switch (event_type) {
5516                 case HNS_ROCE_EVENT_TYPE_PATH_MIG:
5517                 case HNS_ROCE_EVENT_TYPE_PATH_MIG_FAILED:
5518                 case HNS_ROCE_EVENT_TYPE_COMM_EST:
5519                 case HNS_ROCE_EVENT_TYPE_SQ_DRAINED:
5520                 case HNS_ROCE_EVENT_TYPE_WQ_CATAS_ERROR:
5521                 case HNS_ROCE_EVENT_TYPE_SRQ_LAST_WQE_REACH:
5522                 case HNS_ROCE_EVENT_TYPE_INV_REQ_LOCAL_WQ_ERROR:
5523                 case HNS_ROCE_EVENT_TYPE_LOCAL_WQ_ACCESS_ERROR:
5524                         hns_roce_qp_event(hr_dev, qpn, event_type);
5525                         break;
5526                 case HNS_ROCE_EVENT_TYPE_SRQ_LIMIT_REACH:
5527                 case HNS_ROCE_EVENT_TYPE_SRQ_CATAS_ERROR:
5528                         hns_roce_srq_event(hr_dev, srqn, event_type);
5529                         break;
5530                 case HNS_ROCE_EVENT_TYPE_CQ_ACCESS_ERROR:
5531                 case HNS_ROCE_EVENT_TYPE_CQ_OVERFLOW:
5532                         hns_roce_cq_event(hr_dev, cqn, event_type);
5533                         break;
5534                 case HNS_ROCE_EVENT_TYPE_DB_OVERFLOW:
5535                         break;
5536                 case HNS_ROCE_EVENT_TYPE_MB:
5537                         hns_roce_cmd_event(hr_dev,
5538                                         le16_to_cpu(aeqe->event.cmd.token),
5539                                         aeqe->event.cmd.status,
5540                                         le64_to_cpu(aeqe->event.cmd.out_param));
5541                         break;
5542                 case HNS_ROCE_EVENT_TYPE_CEQ_OVERFLOW:
5543                         break;
5544                 case HNS_ROCE_EVENT_TYPE_FLR:
5545                         break;
5546                 default:
5547                         dev_err(dev, "Unhandled event %d on EQ %d at idx %u.\n",
5548                                 event_type, eq->eqn, eq->cons_index);
5549                         break;
5550                 }
5551
5552                 eq->event_type = event_type;
5553                 eq->sub_type = sub_type;
5554                 ++eq->cons_index;
5555                 aeqe_found = 1;
5556
5557                 if (eq->cons_index > (2 * eq->entries - 1))
5558                         eq->cons_index = 0;
5559
5560                 hns_roce_v2_init_irq_work(hr_dev, eq, qpn, cqn);
5561
5562                 aeqe = next_aeqe_sw_v2(eq);
5563         }
5564
5565         set_eq_cons_index_v2(eq);
5566         return aeqe_found;
5567 }
5568
5569 static struct hns_roce_ceqe *next_ceqe_sw_v2(struct hns_roce_eq *eq)
5570 {
5571         struct hns_roce_ceqe *ceqe;
5572
5573         ceqe = hns_roce_buf_offset(eq->mtr.kmem,
5574                                    (eq->cons_index & (eq->entries - 1)) *
5575                                    eq->eqe_size);
5576
5577         return (!!(roce_get_bit(ceqe->comp, HNS_ROCE_V2_CEQ_CEQE_OWNER_S))) ^
5578                 (!!(eq->cons_index & eq->entries)) ? ceqe : NULL;
5579 }
5580
5581 static int hns_roce_v2_ceq_int(struct hns_roce_dev *hr_dev,
5582                                struct hns_roce_eq *eq)
5583 {
5584         struct hns_roce_ceqe *ceqe = next_ceqe_sw_v2(eq);
5585         int ceqe_found = 0;
5586         u32 cqn;
5587
5588         while (ceqe) {
5589                 /* Make sure we read CEQ entry after we have checked the
5590                  * ownership bit
5591                  */
5592                 dma_rmb();
5593
5594                 cqn = roce_get_field(ceqe->comp, HNS_ROCE_V2_CEQE_COMP_CQN_M,
5595                                      HNS_ROCE_V2_CEQE_COMP_CQN_S);
5596
5597                 hns_roce_cq_completion(hr_dev, cqn);
5598
5599                 ++eq->cons_index;
5600                 ceqe_found = 1;
5601
5602                 if (eq->cons_index > (EQ_DEPTH_COEFF * eq->entries - 1))
5603                         eq->cons_index = 0;
5604
5605                 ceqe = next_ceqe_sw_v2(eq);
5606         }
5607
5608         set_eq_cons_index_v2(eq);
5609
5610         return ceqe_found;
5611 }
5612
5613 static irqreturn_t hns_roce_v2_msix_interrupt_eq(int irq, void *eq_ptr)
5614 {
5615         struct hns_roce_eq *eq = eq_ptr;
5616         struct hns_roce_dev *hr_dev = eq->hr_dev;
5617         int int_work;
5618
5619         if (eq->type_flag == HNS_ROCE_CEQ)
5620                 /* Completion event interrupt */
5621                 int_work = hns_roce_v2_ceq_int(hr_dev, eq);
5622         else
5623                 /* Asychronous event interrupt */
5624                 int_work = hns_roce_v2_aeq_int(hr_dev, eq);
5625
5626         return IRQ_RETVAL(int_work);
5627 }
5628
5629 static irqreturn_t hns_roce_v2_msix_interrupt_abn(int irq, void *dev_id)
5630 {
5631         struct hns_roce_dev *hr_dev = dev_id;
5632         struct device *dev = hr_dev->dev;
5633         int int_work = 0;
5634         u32 int_st;
5635         u32 int_en;
5636
5637         /* Abnormal interrupt */
5638         int_st = roce_read(hr_dev, ROCEE_VF_ABN_INT_ST_REG);
5639         int_en = roce_read(hr_dev, ROCEE_VF_ABN_INT_EN_REG);
5640
5641         if (int_st & BIT(HNS_ROCE_V2_VF_INT_ST_AEQ_OVERFLOW_S)) {
5642                 struct pci_dev *pdev = hr_dev->pci_dev;
5643                 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev);
5644                 const struct hnae3_ae_ops *ops = ae_dev->ops;
5645
5646                 dev_err(dev, "AEQ overflow!\n");
5647
5648                 int_st |= 1 << HNS_ROCE_V2_VF_INT_ST_AEQ_OVERFLOW_S;
5649                 roce_write(hr_dev, ROCEE_VF_ABN_INT_ST_REG, int_st);
5650
5651                 /* Set reset level for reset_event() */
5652                 if (ops->set_default_reset_request)
5653                         ops->set_default_reset_request(ae_dev,
5654                                                        HNAE3_FUNC_RESET);
5655                 if (ops->reset_event)
5656                         ops->reset_event(pdev, NULL);
5657
5658                 int_en |= 1 << HNS_ROCE_V2_VF_ABN_INT_EN_S;
5659                 roce_write(hr_dev, ROCEE_VF_ABN_INT_EN_REG, int_en);
5660
5661                 int_work = 1;
5662         } else if (int_st & BIT(HNS_ROCE_V2_VF_INT_ST_BUS_ERR_S)) {
5663                 dev_err(dev, "BUS ERR!\n");
5664
5665                 int_st |= 1 << HNS_ROCE_V2_VF_INT_ST_BUS_ERR_S;
5666                 roce_write(hr_dev, ROCEE_VF_ABN_INT_ST_REG, int_st);
5667
5668                 int_en |= 1 << HNS_ROCE_V2_VF_ABN_INT_EN_S;
5669                 roce_write(hr_dev, ROCEE_VF_ABN_INT_EN_REG, int_en);
5670
5671                 int_work = 1;
5672         } else if (int_st & BIT(HNS_ROCE_V2_VF_INT_ST_OTHER_ERR_S)) {
5673                 dev_err(dev, "OTHER ERR!\n");
5674
5675                 int_st |= 1 << HNS_ROCE_V2_VF_INT_ST_OTHER_ERR_S;
5676                 roce_write(hr_dev, ROCEE_VF_ABN_INT_ST_REG, int_st);
5677
5678                 int_en |= 1 << HNS_ROCE_V2_VF_ABN_INT_EN_S;
5679                 roce_write(hr_dev, ROCEE_VF_ABN_INT_EN_REG, int_en);
5680
5681                 int_work = 1;
5682         } else
5683                 dev_err(dev, "There is no abnormal irq found!\n");
5684
5685         return IRQ_RETVAL(int_work);
5686 }
5687
5688 static void hns_roce_v2_int_mask_enable(struct hns_roce_dev *hr_dev,
5689                                         int eq_num, int enable_flag)
5690 {
5691         int i;
5692
5693         if (enable_flag == EQ_ENABLE) {
5694                 for (i = 0; i < eq_num; i++)
5695                         roce_write(hr_dev, ROCEE_VF_EVENT_INT_EN_REG +
5696                                    i * EQ_REG_OFFSET,
5697                                    HNS_ROCE_V2_VF_EVENT_INT_EN_M);
5698
5699                 roce_write(hr_dev, ROCEE_VF_ABN_INT_EN_REG,
5700                            HNS_ROCE_V2_VF_ABN_INT_EN_M);
5701                 roce_write(hr_dev, ROCEE_VF_ABN_INT_CFG_REG,
5702                            HNS_ROCE_V2_VF_ABN_INT_CFG_M);
5703         } else {
5704                 for (i = 0; i < eq_num; i++)
5705                         roce_write(hr_dev, ROCEE_VF_EVENT_INT_EN_REG +
5706                                    i * EQ_REG_OFFSET,
5707                                    HNS_ROCE_V2_VF_EVENT_INT_EN_M & 0x0);
5708
5709                 roce_write(hr_dev, ROCEE_VF_ABN_INT_EN_REG,
5710                            HNS_ROCE_V2_VF_ABN_INT_EN_M & 0x0);
5711                 roce_write(hr_dev, ROCEE_VF_ABN_INT_CFG_REG,
5712                            HNS_ROCE_V2_VF_ABN_INT_CFG_M & 0x0);
5713         }
5714 }
5715
5716 static void hns_roce_v2_destroy_eqc(struct hns_roce_dev *hr_dev, int eqn)
5717 {
5718         struct device *dev = hr_dev->dev;
5719         int ret;
5720
5721         if (eqn < hr_dev->caps.num_comp_vectors)
5722                 ret = hns_roce_cmd_mbox(hr_dev, 0, 0, eqn & HNS_ROCE_V2_EQN_M,
5723                                         0, HNS_ROCE_CMD_DESTROY_CEQC,
5724                                         HNS_ROCE_CMD_TIMEOUT_MSECS);
5725         else
5726                 ret = hns_roce_cmd_mbox(hr_dev, 0, 0, eqn & HNS_ROCE_V2_EQN_M,
5727                                         0, HNS_ROCE_CMD_DESTROY_AEQC,
5728                                         HNS_ROCE_CMD_TIMEOUT_MSECS);
5729         if (ret)
5730                 dev_err(dev, "[mailbox cmd] destroy eqc(%d) failed.\n", eqn);
5731 }
5732
5733 static void free_eq_buf(struct hns_roce_dev *hr_dev, struct hns_roce_eq *eq)
5734 {
5735         hns_roce_mtr_destroy(hr_dev, &eq->mtr);
5736 }
5737
5738 static int config_eqc(struct hns_roce_dev *hr_dev, struct hns_roce_eq *eq,
5739                       void *mb_buf)
5740 {
5741         u64 eqe_ba[MTT_MIN_COUNT] = { 0 };
5742         struct hns_roce_eq_context *eqc;
5743         u64 bt_ba = 0;
5744         int count;
5745
5746         eqc = mb_buf;
5747         memset(eqc, 0, sizeof(struct hns_roce_eq_context));
5748
5749         /* init eqc */
5750         eq->doorbell = hr_dev->reg_base + ROCEE_VF_EQ_DB_CFG0_REG;
5751         eq->cons_index = 0;
5752         eq->over_ignore = HNS_ROCE_V2_EQ_OVER_IGNORE_0;
5753         eq->coalesce = HNS_ROCE_V2_EQ_COALESCE_0;
5754         eq->arm_st = HNS_ROCE_V2_EQ_ALWAYS_ARMED;
5755         eq->shift = ilog2((unsigned int)eq->entries);
5756
5757         /* if not multi-hop, eqe buffer only use one trunk */
5758         count = hns_roce_mtr_find(hr_dev, &eq->mtr, 0, eqe_ba, MTT_MIN_COUNT,
5759                                   &bt_ba);
5760         if (count < 1) {
5761                 dev_err(hr_dev->dev, "failed to find EQE mtr\n");
5762                 return -ENOBUFS;
5763         }
5764
5765         /* set eqc state */
5766         roce_set_field(eqc->byte_4, HNS_ROCE_EQC_EQ_ST_M, HNS_ROCE_EQC_EQ_ST_S,
5767                        HNS_ROCE_V2_EQ_STATE_VALID);
5768
5769         /* set eqe hop num */
5770         roce_set_field(eqc->byte_4, HNS_ROCE_EQC_HOP_NUM_M,
5771                        HNS_ROCE_EQC_HOP_NUM_S, eq->hop_num);
5772
5773         /* set eqc over_ignore */
5774         roce_set_field(eqc->byte_4, HNS_ROCE_EQC_OVER_IGNORE_M,
5775                        HNS_ROCE_EQC_OVER_IGNORE_S, eq->over_ignore);
5776
5777         /* set eqc coalesce */
5778         roce_set_field(eqc->byte_4, HNS_ROCE_EQC_COALESCE_M,
5779                        HNS_ROCE_EQC_COALESCE_S, eq->coalesce);
5780
5781         /* set eqc arm_state */
5782         roce_set_field(eqc->byte_4, HNS_ROCE_EQC_ARM_ST_M,
5783                        HNS_ROCE_EQC_ARM_ST_S, eq->arm_st);
5784
5785         /* set eqn */
5786         roce_set_field(eqc->byte_4, HNS_ROCE_EQC_EQN_M, HNS_ROCE_EQC_EQN_S,
5787                        eq->eqn);
5788
5789         /* set eqe_cnt */
5790         roce_set_field(eqc->byte_4, HNS_ROCE_EQC_EQE_CNT_M,
5791                        HNS_ROCE_EQC_EQE_CNT_S, HNS_ROCE_EQ_INIT_EQE_CNT);
5792
5793         /* set eqe_ba_pg_sz */
5794         roce_set_field(eqc->byte_8, HNS_ROCE_EQC_BA_PG_SZ_M,
5795                        HNS_ROCE_EQC_BA_PG_SZ_S,
5796                        to_hr_hw_page_shift(eq->mtr.hem_cfg.ba_pg_shift));
5797
5798         /* set eqe_buf_pg_sz */
5799         roce_set_field(eqc->byte_8, HNS_ROCE_EQC_BUF_PG_SZ_M,
5800                        HNS_ROCE_EQC_BUF_PG_SZ_S,
5801                        to_hr_hw_page_shift(eq->mtr.hem_cfg.buf_pg_shift));
5802
5803         /* set eq_producer_idx */
5804         roce_set_field(eqc->byte_8, HNS_ROCE_EQC_PROD_INDX_M,
5805                        HNS_ROCE_EQC_PROD_INDX_S, HNS_ROCE_EQ_INIT_PROD_IDX);
5806
5807         /* set eq_max_cnt */
5808         roce_set_field(eqc->byte_12, HNS_ROCE_EQC_MAX_CNT_M,
5809                        HNS_ROCE_EQC_MAX_CNT_S, eq->eq_max_cnt);
5810
5811         /* set eq_period */
5812         roce_set_field(eqc->byte_12, HNS_ROCE_EQC_PERIOD_M,
5813                        HNS_ROCE_EQC_PERIOD_S, eq->eq_period);
5814
5815         /* set eqe_report_timer */
5816         roce_set_field(eqc->eqe_report_timer, HNS_ROCE_EQC_REPORT_TIMER_M,
5817                        HNS_ROCE_EQC_REPORT_TIMER_S,
5818                        HNS_ROCE_EQ_INIT_REPORT_TIMER);
5819
5820         /* set bt_ba [34:3] */
5821         roce_set_field(eqc->eqe_ba0, HNS_ROCE_EQC_EQE_BA_L_M,
5822                        HNS_ROCE_EQC_EQE_BA_L_S, bt_ba >> 3);
5823
5824         /* set bt_ba [64:35] */
5825         roce_set_field(eqc->eqe_ba1, HNS_ROCE_EQC_EQE_BA_H_M,
5826                        HNS_ROCE_EQC_EQE_BA_H_S, bt_ba >> 35);
5827
5828         /* set eq shift */
5829         roce_set_field(eqc->byte_28, HNS_ROCE_EQC_SHIFT_M, HNS_ROCE_EQC_SHIFT_S,
5830                        eq->shift);
5831
5832         /* set eq MSI_IDX */
5833         roce_set_field(eqc->byte_28, HNS_ROCE_EQC_MSI_INDX_M,
5834                        HNS_ROCE_EQC_MSI_INDX_S, HNS_ROCE_EQ_INIT_MSI_IDX);
5835
5836         /* set cur_eqe_ba [27:12] */
5837         roce_set_field(eqc->byte_28, HNS_ROCE_EQC_CUR_EQE_BA_L_M,
5838                        HNS_ROCE_EQC_CUR_EQE_BA_L_S, eqe_ba[0] >> 12);
5839
5840         /* set cur_eqe_ba [59:28] */
5841         roce_set_field(eqc->byte_32, HNS_ROCE_EQC_CUR_EQE_BA_M_M,
5842                        HNS_ROCE_EQC_CUR_EQE_BA_M_S, eqe_ba[0] >> 28);
5843
5844         /* set cur_eqe_ba [63:60] */
5845         roce_set_field(eqc->byte_36, HNS_ROCE_EQC_CUR_EQE_BA_H_M,
5846                        HNS_ROCE_EQC_CUR_EQE_BA_H_S, eqe_ba[0] >> 60);
5847
5848         /* set eq consumer idx */
5849         roce_set_field(eqc->byte_36, HNS_ROCE_EQC_CONS_INDX_M,
5850                        HNS_ROCE_EQC_CONS_INDX_S, HNS_ROCE_EQ_INIT_CONS_IDX);
5851
5852         roce_set_field(eqc->byte_40, HNS_ROCE_EQC_NXT_EQE_BA_L_M,
5853                        HNS_ROCE_EQC_NXT_EQE_BA_L_S, eqe_ba[1] >> 12);
5854
5855         roce_set_field(eqc->byte_44, HNS_ROCE_EQC_NXT_EQE_BA_H_M,
5856                        HNS_ROCE_EQC_NXT_EQE_BA_H_S, eqe_ba[1] >> 44);
5857
5858         roce_set_field(eqc->byte_44, HNS_ROCE_EQC_EQE_SIZE_M,
5859                        HNS_ROCE_EQC_EQE_SIZE_S,
5860                        eq->eqe_size == HNS_ROCE_V3_EQE_SIZE ? 1 : 0);
5861
5862         return 0;
5863 }
5864
5865 static int alloc_eq_buf(struct hns_roce_dev *hr_dev, struct hns_roce_eq *eq)
5866 {
5867         struct hns_roce_buf_attr buf_attr = {};
5868         int err;
5869
5870         if (hr_dev->caps.eqe_hop_num == HNS_ROCE_HOP_NUM_0)
5871                 eq->hop_num = 0;
5872         else
5873                 eq->hop_num = hr_dev->caps.eqe_hop_num;
5874
5875         buf_attr.page_shift = hr_dev->caps.eqe_buf_pg_sz + HNS_HW_PAGE_SHIFT;
5876         buf_attr.region[0].size = eq->entries * eq->eqe_size;
5877         buf_attr.region[0].hopnum = eq->hop_num;
5878         buf_attr.region_count = 1;
5879         buf_attr.fixed_page = true;
5880
5881         err = hns_roce_mtr_create(hr_dev, &eq->mtr, &buf_attr,
5882                                   hr_dev->caps.eqe_ba_pg_sz +
5883                                   HNS_HW_PAGE_SHIFT, NULL, 0);
5884         if (err)
5885                 dev_err(hr_dev->dev, "Failed to alloc EQE mtr, err %d\n", err);
5886
5887         return err;
5888 }
5889
5890 static int hns_roce_v2_create_eq(struct hns_roce_dev *hr_dev,
5891                                  struct hns_roce_eq *eq,
5892                                  unsigned int eq_cmd)
5893 {
5894         struct hns_roce_cmd_mailbox *mailbox;
5895         int ret;
5896
5897         /* Allocate mailbox memory */
5898         mailbox = hns_roce_alloc_cmd_mailbox(hr_dev);
5899         if (IS_ERR_OR_NULL(mailbox))
5900                 return -ENOMEM;
5901
5902         ret = alloc_eq_buf(hr_dev, eq);
5903         if (ret)
5904                 goto free_cmd_mbox;
5905
5906         ret = config_eqc(hr_dev, eq, mailbox->buf);
5907         if (ret)
5908                 goto err_cmd_mbox;
5909
5910         ret = hns_roce_cmd_mbox(hr_dev, mailbox->dma, 0, eq->eqn, 0,
5911                                 eq_cmd, HNS_ROCE_CMD_TIMEOUT_MSECS);
5912         if (ret) {
5913                 dev_err(hr_dev->dev, "[mailbox cmd] create eqc failed.\n");
5914                 goto err_cmd_mbox;
5915         }
5916
5917         hns_roce_free_cmd_mailbox(hr_dev, mailbox);
5918
5919         return 0;
5920
5921 err_cmd_mbox:
5922         free_eq_buf(hr_dev, eq);
5923
5924 free_cmd_mbox:
5925         hns_roce_free_cmd_mailbox(hr_dev, mailbox);
5926
5927         return ret;
5928 }
5929
5930 static int __hns_roce_request_irq(struct hns_roce_dev *hr_dev, int irq_num,
5931                                   int comp_num, int aeq_num, int other_num)
5932 {
5933         struct hns_roce_eq_table *eq_table = &hr_dev->eq_table;
5934         int i, j;
5935         int ret;
5936
5937         for (i = 0; i < irq_num; i++) {
5938                 hr_dev->irq_names[i] = kzalloc(HNS_ROCE_INT_NAME_LEN,
5939                                                GFP_KERNEL);
5940                 if (!hr_dev->irq_names[i]) {
5941                         ret = -ENOMEM;
5942                         goto err_kzalloc_failed;
5943                 }
5944         }
5945
5946         /* irq contains: abnormal + AEQ + CEQ */
5947         for (j = 0; j < other_num; j++)
5948                 snprintf((char *)hr_dev->irq_names[j], HNS_ROCE_INT_NAME_LEN,
5949                          "hns-abn-%d", j);
5950
5951         for (j = other_num; j < (other_num + aeq_num); j++)
5952                 snprintf((char *)hr_dev->irq_names[j], HNS_ROCE_INT_NAME_LEN,
5953                          "hns-aeq-%d", j - other_num);
5954
5955         for (j = (other_num + aeq_num); j < irq_num; j++)
5956                 snprintf((char *)hr_dev->irq_names[j], HNS_ROCE_INT_NAME_LEN,
5957                          "hns-ceq-%d", j - other_num - aeq_num);
5958
5959         for (j = 0; j < irq_num; j++) {
5960                 if (j < other_num)
5961                         ret = request_irq(hr_dev->irq[j],
5962                                           hns_roce_v2_msix_interrupt_abn,
5963                                           0, hr_dev->irq_names[j], hr_dev);
5964
5965                 else if (j < (other_num + comp_num))
5966                         ret = request_irq(eq_table->eq[j - other_num].irq,
5967                                           hns_roce_v2_msix_interrupt_eq,
5968                                           0, hr_dev->irq_names[j + aeq_num],
5969                                           &eq_table->eq[j - other_num]);
5970                 else
5971                         ret = request_irq(eq_table->eq[j - other_num].irq,
5972                                           hns_roce_v2_msix_interrupt_eq,
5973                                           0, hr_dev->irq_names[j - comp_num],
5974                                           &eq_table->eq[j - other_num]);
5975                 if (ret) {
5976                         dev_err(hr_dev->dev, "Request irq error!\n");
5977                         goto err_request_failed;
5978                 }
5979         }
5980
5981         return 0;
5982
5983 err_request_failed:
5984         for (j -= 1; j >= 0; j--)
5985                 if (j < other_num)
5986                         free_irq(hr_dev->irq[j], hr_dev);
5987                 else
5988                         free_irq(eq_table->eq[j - other_num].irq,
5989                                  &eq_table->eq[j - other_num]);
5990
5991 err_kzalloc_failed:
5992         for (i -= 1; i >= 0; i--)
5993                 kfree(hr_dev->irq_names[i]);
5994
5995         return ret;
5996 }
5997
5998 static void __hns_roce_free_irq(struct hns_roce_dev *hr_dev)
5999 {
6000         int irq_num;
6001         int eq_num;
6002         int i;
6003
6004         eq_num = hr_dev->caps.num_comp_vectors + hr_dev->caps.num_aeq_vectors;
6005         irq_num = eq_num + hr_dev->caps.num_other_vectors;
6006
6007         for (i = 0; i < hr_dev->caps.num_other_vectors; i++)
6008                 free_irq(hr_dev->irq[i], hr_dev);
6009
6010         for (i = 0; i < eq_num; i++)
6011                 free_irq(hr_dev->eq_table.eq[i].irq, &hr_dev->eq_table.eq[i]);
6012
6013         for (i = 0; i < irq_num; i++)
6014                 kfree(hr_dev->irq_names[i]);
6015 }
6016
6017 static int hns_roce_v2_init_eq_table(struct hns_roce_dev *hr_dev)
6018 {
6019         struct hns_roce_eq_table *eq_table = &hr_dev->eq_table;
6020         struct device *dev = hr_dev->dev;
6021         struct hns_roce_eq *eq;
6022         unsigned int eq_cmd;
6023         int irq_num;
6024         int eq_num;
6025         int other_num;
6026         int comp_num;
6027         int aeq_num;
6028         int i;
6029         int ret;
6030
6031         other_num = hr_dev->caps.num_other_vectors;
6032         comp_num = hr_dev->caps.num_comp_vectors;
6033         aeq_num = hr_dev->caps.num_aeq_vectors;
6034
6035         eq_num = comp_num + aeq_num;
6036         irq_num = eq_num + other_num;
6037
6038         eq_table->eq = kcalloc(eq_num, sizeof(*eq_table->eq), GFP_KERNEL);
6039         if (!eq_table->eq)
6040                 return -ENOMEM;
6041
6042         /* create eq */
6043         for (i = 0; i < eq_num; i++) {
6044                 eq = &eq_table->eq[i];
6045                 eq->hr_dev = hr_dev;
6046                 eq->eqn = i;
6047                 if (i < comp_num) {
6048                         /* CEQ */
6049                         eq_cmd = HNS_ROCE_CMD_CREATE_CEQC;
6050                         eq->type_flag = HNS_ROCE_CEQ;
6051                         eq->entries = hr_dev->caps.ceqe_depth;
6052                         eq->eqe_size = hr_dev->caps.ceqe_size;
6053                         eq->irq = hr_dev->irq[i + other_num + aeq_num];
6054                         eq->eq_max_cnt = HNS_ROCE_CEQ_DEFAULT_BURST_NUM;
6055                         eq->eq_period = HNS_ROCE_CEQ_DEFAULT_INTERVAL;
6056                 } else {
6057                         /* AEQ */
6058                         eq_cmd = HNS_ROCE_CMD_CREATE_AEQC;
6059                         eq->type_flag = HNS_ROCE_AEQ;
6060                         eq->entries = hr_dev->caps.aeqe_depth;
6061                         eq->eqe_size = hr_dev->caps.aeqe_size;
6062                         eq->irq = hr_dev->irq[i - comp_num + other_num];
6063                         eq->eq_max_cnt = HNS_ROCE_AEQ_DEFAULT_BURST_NUM;
6064                         eq->eq_period = HNS_ROCE_AEQ_DEFAULT_INTERVAL;
6065                 }
6066
6067                 ret = hns_roce_v2_create_eq(hr_dev, eq, eq_cmd);
6068                 if (ret) {
6069                         dev_err(dev, "eq create failed.\n");
6070                         goto err_create_eq_fail;
6071                 }
6072         }
6073
6074         /* enable irq */
6075         hns_roce_v2_int_mask_enable(hr_dev, eq_num, EQ_ENABLE);
6076
6077         ret = __hns_roce_request_irq(hr_dev, irq_num, comp_num,
6078                                      aeq_num, other_num);
6079         if (ret) {
6080                 dev_err(dev, "Request irq failed.\n");
6081                 goto err_request_irq_fail;
6082         }
6083
6084         hr_dev->irq_workq = alloc_ordered_workqueue("hns_roce_irq_workq", 0);
6085         if (!hr_dev->irq_workq) {
6086                 dev_err(dev, "Create irq workqueue failed!\n");
6087                 ret = -ENOMEM;
6088                 goto err_create_wq_fail;
6089         }
6090
6091         return 0;
6092
6093 err_create_wq_fail:
6094         __hns_roce_free_irq(hr_dev);
6095
6096 err_request_irq_fail:
6097         hns_roce_v2_int_mask_enable(hr_dev, eq_num, EQ_DISABLE);
6098
6099 err_create_eq_fail:
6100         for (i -= 1; i >= 0; i--)
6101                 free_eq_buf(hr_dev, &eq_table->eq[i]);
6102         kfree(eq_table->eq);
6103
6104         return ret;
6105 }
6106
6107 static void hns_roce_v2_cleanup_eq_table(struct hns_roce_dev *hr_dev)
6108 {
6109         struct hns_roce_eq_table *eq_table = &hr_dev->eq_table;
6110         int eq_num;
6111         int i;
6112
6113         eq_num = hr_dev->caps.num_comp_vectors + hr_dev->caps.num_aeq_vectors;
6114
6115         /* Disable irq */
6116         hns_roce_v2_int_mask_enable(hr_dev, eq_num, EQ_DISABLE);
6117
6118         __hns_roce_free_irq(hr_dev);
6119
6120         for (i = 0; i < eq_num; i++) {
6121                 hns_roce_v2_destroy_eqc(hr_dev, i);
6122
6123                 free_eq_buf(hr_dev, &eq_table->eq[i]);
6124         }
6125
6126         kfree(eq_table->eq);
6127
6128         flush_workqueue(hr_dev->irq_workq);
6129         destroy_workqueue(hr_dev->irq_workq);
6130 }
6131
6132 static const struct hns_roce_dfx_hw hns_roce_dfx_hw_v2 = {
6133         .query_cqc_info = hns_roce_v2_query_cqc_info,
6134 };
6135
6136 static const struct ib_device_ops hns_roce_v2_dev_ops = {
6137         .destroy_qp = hns_roce_v2_destroy_qp,
6138         .modify_cq = hns_roce_v2_modify_cq,
6139         .poll_cq = hns_roce_v2_poll_cq,
6140         .post_recv = hns_roce_v2_post_recv,
6141         .post_send = hns_roce_v2_post_send,
6142         .query_qp = hns_roce_v2_query_qp,
6143         .req_notify_cq = hns_roce_v2_req_notify_cq,
6144 };
6145
6146 static const struct ib_device_ops hns_roce_v2_dev_srq_ops = {
6147         .modify_srq = hns_roce_v2_modify_srq,
6148         .post_srq_recv = hns_roce_v2_post_srq_recv,
6149         .query_srq = hns_roce_v2_query_srq,
6150 };
6151
6152 static const struct hns_roce_hw hns_roce_hw_v2 = {
6153         .cmq_init = hns_roce_v2_cmq_init,
6154         .cmq_exit = hns_roce_v2_cmq_exit,
6155         .hw_profile = hns_roce_v2_profile,
6156         .hw_init = hns_roce_v2_init,
6157         .hw_exit = hns_roce_v2_exit,
6158         .post_mbox = hns_roce_v2_post_mbox,
6159         .chk_mbox = hns_roce_v2_chk_mbox,
6160         .rst_prc_mbox = hns_roce_v2_rst_process_cmd,
6161         .set_gid = hns_roce_v2_set_gid,
6162         .set_mac = hns_roce_v2_set_mac,
6163         .write_mtpt = hns_roce_v2_write_mtpt,
6164         .rereg_write_mtpt = hns_roce_v2_rereg_write_mtpt,
6165         .frmr_write_mtpt = hns_roce_v2_frmr_write_mtpt,
6166         .mw_write_mtpt = hns_roce_v2_mw_write_mtpt,
6167         .write_cqc = hns_roce_v2_write_cqc,
6168         .set_hem = hns_roce_v2_set_hem,
6169         .clear_hem = hns_roce_v2_clear_hem,
6170         .modify_qp = hns_roce_v2_modify_qp,
6171         .query_qp = hns_roce_v2_query_qp,
6172         .destroy_qp = hns_roce_v2_destroy_qp,
6173         .qp_flow_control_init = hns_roce_v2_qp_flow_control_init,
6174         .modify_cq = hns_roce_v2_modify_cq,
6175         .post_send = hns_roce_v2_post_send,
6176         .post_recv = hns_roce_v2_post_recv,
6177         .req_notify_cq = hns_roce_v2_req_notify_cq,
6178         .poll_cq = hns_roce_v2_poll_cq,
6179         .init_eq = hns_roce_v2_init_eq_table,
6180         .cleanup_eq = hns_roce_v2_cleanup_eq_table,
6181         .write_srqc = hns_roce_v2_write_srqc,
6182         .modify_srq = hns_roce_v2_modify_srq,
6183         .query_srq = hns_roce_v2_query_srq,
6184         .post_srq_recv = hns_roce_v2_post_srq_recv,
6185         .hns_roce_dev_ops = &hns_roce_v2_dev_ops,
6186         .hns_roce_dev_srq_ops = &hns_roce_v2_dev_srq_ops,
6187 };
6188
6189 static const struct pci_device_id hns_roce_hw_v2_pci_tbl[] = {
6190         {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE_RDMA), 0},
6191         {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE_RDMA_MACSEC), 0},
6192         {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA), 0},
6193         {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA_MACSEC), 0},
6194         {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_100G_RDMA_MACSEC), 0},
6195         /* required last entry */
6196         {0, }
6197 };
6198
6199 MODULE_DEVICE_TABLE(pci, hns_roce_hw_v2_pci_tbl);
6200
6201 static void hns_roce_hw_v2_get_cfg(struct hns_roce_dev *hr_dev,
6202                                   struct hnae3_handle *handle)
6203 {
6204         struct hns_roce_v2_priv *priv = hr_dev->priv;
6205         int i;
6206
6207         hr_dev->pci_dev = handle->pdev;
6208         hr_dev->dev = &handle->pdev->dev;
6209         hr_dev->hw = &hns_roce_hw_v2;
6210         hr_dev->dfx = &hns_roce_dfx_hw_v2;
6211         hr_dev->sdb_offset = ROCEE_DB_SQ_L_0_REG;
6212         hr_dev->odb_offset = hr_dev->sdb_offset;
6213
6214         /* Get info from NIC driver. */
6215         hr_dev->reg_base = handle->rinfo.roce_io_base;
6216         hr_dev->caps.num_ports = 1;
6217         hr_dev->iboe.netdevs[0] = handle->rinfo.netdev;
6218         hr_dev->iboe.phy_port[0] = 0;
6219
6220         addrconf_addr_eui48((u8 *)&hr_dev->ib_dev.node_guid,
6221                             hr_dev->iboe.netdevs[0]->dev_addr);
6222
6223         for (i = 0; i < HNS_ROCE_V2_MAX_IRQ_NUM; i++)
6224                 hr_dev->irq[i] = pci_irq_vector(handle->pdev,
6225                                                 i + handle->rinfo.base_vector);
6226
6227         /* cmd issue mode: 0 is poll, 1 is event */
6228         hr_dev->cmd_mod = 1;
6229         hr_dev->loop_idc = 0;
6230
6231         hr_dev->reset_cnt = handle->ae_algo->ops->ae_dev_reset_cnt(handle);
6232         priv->handle = handle;
6233 }
6234
6235 static int __hns_roce_hw_v2_init_instance(struct hnae3_handle *handle)
6236 {
6237         struct hns_roce_dev *hr_dev;
6238         int ret;
6239
6240         hr_dev = ib_alloc_device(hns_roce_dev, ib_dev);
6241         if (!hr_dev)
6242                 return -ENOMEM;
6243
6244         hr_dev->priv = kzalloc(sizeof(struct hns_roce_v2_priv), GFP_KERNEL);
6245         if (!hr_dev->priv) {
6246                 ret = -ENOMEM;
6247                 goto error_failed_kzalloc;
6248         }
6249
6250         hns_roce_hw_v2_get_cfg(hr_dev, handle);
6251
6252         ret = hns_roce_init(hr_dev);
6253         if (ret) {
6254                 dev_err(hr_dev->dev, "RoCE Engine init failed!\n");
6255                 goto error_failed_get_cfg;
6256         }
6257
6258         handle->priv = hr_dev;
6259
6260         return 0;
6261
6262 error_failed_get_cfg:
6263         kfree(hr_dev->priv);
6264
6265 error_failed_kzalloc:
6266         ib_dealloc_device(&hr_dev->ib_dev);
6267
6268         return ret;
6269 }
6270
6271 static void __hns_roce_hw_v2_uninit_instance(struct hnae3_handle *handle,
6272                                            bool reset)
6273 {
6274         struct hns_roce_dev *hr_dev = handle->priv;
6275
6276         if (!hr_dev)
6277                 return;
6278
6279         handle->priv = NULL;
6280
6281         hr_dev->state = HNS_ROCE_DEVICE_STATE_UNINIT;
6282         hns_roce_handle_device_err(hr_dev);
6283
6284         hns_roce_exit(hr_dev);
6285         kfree(hr_dev->priv);
6286         ib_dealloc_device(&hr_dev->ib_dev);
6287 }
6288
6289 static int hns_roce_hw_v2_init_instance(struct hnae3_handle *handle)
6290 {
6291         const struct hnae3_ae_ops *ops = handle->ae_algo->ops;
6292         const struct pci_device_id *id;
6293         struct device *dev = &handle->pdev->dev;
6294         int ret;
6295
6296         handle->rinfo.instance_state = HNS_ROCE_STATE_INIT;
6297
6298         if (ops->ae_dev_resetting(handle) || ops->get_hw_reset_stat(handle)) {
6299                 handle->rinfo.instance_state = HNS_ROCE_STATE_NON_INIT;
6300                 goto reset_chk_err;
6301         }
6302
6303         id = pci_match_id(hns_roce_hw_v2_pci_tbl, handle->pdev);
6304         if (!id)
6305                 return 0;
6306
6307         ret = __hns_roce_hw_v2_init_instance(handle);
6308         if (ret) {
6309                 handle->rinfo.instance_state = HNS_ROCE_STATE_NON_INIT;
6310                 dev_err(dev, "RoCE instance init failed! ret = %d\n", ret);
6311                 if (ops->ae_dev_resetting(handle) ||
6312                     ops->get_hw_reset_stat(handle))
6313                         goto reset_chk_err;
6314                 else
6315                         return ret;
6316         }
6317
6318         handle->rinfo.instance_state = HNS_ROCE_STATE_INITED;
6319
6320
6321         return 0;
6322
6323 reset_chk_err:
6324         dev_err(dev, "Device is busy in resetting state.\n"
6325                      "please retry later.\n");
6326
6327         return -EBUSY;
6328 }
6329
6330 static void hns_roce_hw_v2_uninit_instance(struct hnae3_handle *handle,
6331                                            bool reset)
6332 {
6333         if (handle->rinfo.instance_state != HNS_ROCE_STATE_INITED)
6334                 return;
6335
6336         handle->rinfo.instance_state = HNS_ROCE_STATE_UNINIT;
6337
6338         __hns_roce_hw_v2_uninit_instance(handle, reset);
6339
6340         handle->rinfo.instance_state = HNS_ROCE_STATE_NON_INIT;
6341 }
6342 static int hns_roce_hw_v2_reset_notify_down(struct hnae3_handle *handle)
6343 {
6344         struct hns_roce_dev *hr_dev;
6345
6346         if (handle->rinfo.instance_state != HNS_ROCE_STATE_INITED) {
6347                 set_bit(HNS_ROCE_RST_DIRECT_RETURN, &handle->rinfo.state);
6348                 return 0;
6349         }
6350
6351         handle->rinfo.reset_state = HNS_ROCE_STATE_RST_DOWN;
6352         clear_bit(HNS_ROCE_RST_DIRECT_RETURN, &handle->rinfo.state);
6353
6354         hr_dev = handle->priv;
6355         if (!hr_dev)
6356                 return 0;
6357
6358         hr_dev->is_reset = true;
6359         hr_dev->active = false;
6360         hr_dev->dis_db = true;
6361
6362         hr_dev->state = HNS_ROCE_DEVICE_STATE_RST_DOWN;
6363
6364         return 0;
6365 }
6366
6367 static int hns_roce_hw_v2_reset_notify_init(struct hnae3_handle *handle)
6368 {
6369         struct device *dev = &handle->pdev->dev;
6370         int ret;
6371
6372         if (test_and_clear_bit(HNS_ROCE_RST_DIRECT_RETURN,
6373                                &handle->rinfo.state)) {
6374                 handle->rinfo.reset_state = HNS_ROCE_STATE_RST_INITED;
6375                 return 0;
6376         }
6377
6378         handle->rinfo.reset_state = HNS_ROCE_STATE_RST_INIT;
6379
6380         dev_info(&handle->pdev->dev, "In reset process RoCE client reinit.\n");
6381         ret = __hns_roce_hw_v2_init_instance(handle);
6382         if (ret) {
6383                 /* when reset notify type is HNAE3_INIT_CLIENT In reset notify
6384                  * callback function, RoCE Engine reinitialize. If RoCE reinit
6385                  * failed, we should inform NIC driver.
6386                  */
6387                 handle->priv = NULL;
6388                 dev_err(dev, "In reset process RoCE reinit failed %d.\n", ret);
6389         } else {
6390                 handle->rinfo.reset_state = HNS_ROCE_STATE_RST_INITED;
6391                 dev_info(dev, "Reset done, RoCE client reinit finished.\n");
6392         }
6393
6394         return ret;
6395 }
6396
6397 static int hns_roce_hw_v2_reset_notify_uninit(struct hnae3_handle *handle)
6398 {
6399         if (test_bit(HNS_ROCE_RST_DIRECT_RETURN, &handle->rinfo.state))
6400                 return 0;
6401
6402         handle->rinfo.reset_state = HNS_ROCE_STATE_RST_UNINIT;
6403         dev_info(&handle->pdev->dev, "In reset process RoCE client uninit.\n");
6404         msleep(HNS_ROCE_V2_HW_RST_UNINT_DELAY);
6405         __hns_roce_hw_v2_uninit_instance(handle, false);
6406
6407         return 0;
6408 }
6409
6410 static int hns_roce_hw_v2_reset_notify(struct hnae3_handle *handle,
6411                                        enum hnae3_reset_notify_type type)
6412 {
6413         int ret = 0;
6414
6415         switch (type) {
6416         case HNAE3_DOWN_CLIENT:
6417                 ret = hns_roce_hw_v2_reset_notify_down(handle);
6418                 break;
6419         case HNAE3_INIT_CLIENT:
6420                 ret = hns_roce_hw_v2_reset_notify_init(handle);
6421                 break;
6422         case HNAE3_UNINIT_CLIENT:
6423                 ret = hns_roce_hw_v2_reset_notify_uninit(handle);
6424                 break;
6425         default:
6426                 break;
6427         }
6428
6429         return ret;
6430 }
6431
6432 static const struct hnae3_client_ops hns_roce_hw_v2_ops = {
6433         .init_instance = hns_roce_hw_v2_init_instance,
6434         .uninit_instance = hns_roce_hw_v2_uninit_instance,
6435         .reset_notify = hns_roce_hw_v2_reset_notify,
6436 };
6437
6438 static struct hnae3_client hns_roce_hw_v2_client = {
6439         .name = "hns_roce_hw_v2",
6440         .type = HNAE3_CLIENT_ROCE,
6441         .ops = &hns_roce_hw_v2_ops,
6442 };
6443
6444 static int __init hns_roce_hw_v2_init(void)
6445 {
6446         return hnae3_register_client(&hns_roce_hw_v2_client);
6447 }
6448
6449 static void __exit hns_roce_hw_v2_exit(void)
6450 {
6451         hnae3_unregister_client(&hns_roce_hw_v2_client);
6452 }
6453
6454 module_init(hns_roce_hw_v2_init);
6455 module_exit(hns_roce_hw_v2_exit);
6456
6457 MODULE_LICENSE("Dual BSD/GPL");
6458 MODULE_AUTHOR("Wei Hu <xavier.huwei@huawei.com>");
6459 MODULE_AUTHOR("Lijun Ou <oulijun@huawei.com>");
6460 MODULE_AUTHOR("Shaobo Xu <xushaobo2@huawei.com>");
6461 MODULE_DESCRIPTION("Hisilicon Hip08 Family RoCE Driver");