Merge git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net
[sfrench/cifs-2.6.git] / drivers / infiniband / hw / efa / efa_verbs.c
1 // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
2 /*
3  * Copyright 2018-2019 Amazon.com, Inc. or its affiliates. All rights reserved.
4  */
5
6 #include <linux/vmalloc.h>
7
8 #include <rdma/ib_addr.h>
9 #include <rdma/ib_umem.h>
10 #include <rdma/ib_user_verbs.h>
11 #include <rdma/ib_verbs.h>
12 #include <rdma/uverbs_ioctl.h>
13
14 #include "efa.h"
15
16 enum {
17         EFA_MMAP_DMA_PAGE = 0,
18         EFA_MMAP_IO_WC,
19         EFA_MMAP_IO_NC,
20 };
21
22 #define EFA_AENQ_ENABLED_GROUPS \
23         (BIT(EFA_ADMIN_FATAL_ERROR) | BIT(EFA_ADMIN_WARNING) | \
24          BIT(EFA_ADMIN_NOTIFICATION) | BIT(EFA_ADMIN_KEEP_ALIVE))
25
26 struct efa_user_mmap_entry {
27         struct rdma_user_mmap_entry rdma_entry;
28         u64 address;
29         u8 mmap_flag;
30 };
31
32 #define EFA_DEFINE_STATS(op) \
33         op(EFA_TX_BYTES, "tx_bytes") \
34         op(EFA_TX_PKTS, "tx_pkts") \
35         op(EFA_RX_BYTES, "rx_bytes") \
36         op(EFA_RX_PKTS, "rx_pkts") \
37         op(EFA_RX_DROPS, "rx_drops") \
38         op(EFA_SUBMITTED_CMDS, "submitted_cmds") \
39         op(EFA_COMPLETED_CMDS, "completed_cmds") \
40         op(EFA_NO_COMPLETION_CMDS, "no_completion_cmds") \
41         op(EFA_KEEP_ALIVE_RCVD, "keep_alive_rcvd") \
42         op(EFA_ALLOC_PD_ERR, "alloc_pd_err") \
43         op(EFA_CREATE_QP_ERR, "create_qp_err") \
44         op(EFA_REG_MR_ERR, "reg_mr_err") \
45         op(EFA_ALLOC_UCONTEXT_ERR, "alloc_ucontext_err") \
46         op(EFA_CREATE_AH_ERR, "create_ah_err")
47
48 #define EFA_STATS_ENUM(ename, name) ename,
49 #define EFA_STATS_STR(ename, name) [ename] = name,
50
51 enum efa_hw_stats {
52         EFA_DEFINE_STATS(EFA_STATS_ENUM)
53 };
54
55 static const char *const efa_stats_names[] = {
56         EFA_DEFINE_STATS(EFA_STATS_STR)
57 };
58
59 #define EFA_CHUNK_PAYLOAD_SHIFT       12
60 #define EFA_CHUNK_PAYLOAD_SIZE        BIT(EFA_CHUNK_PAYLOAD_SHIFT)
61 #define EFA_CHUNK_PAYLOAD_PTR_SIZE    8
62
63 #define EFA_CHUNK_SHIFT               12
64 #define EFA_CHUNK_SIZE                BIT(EFA_CHUNK_SHIFT)
65 #define EFA_CHUNK_PTR_SIZE            sizeof(struct efa_com_ctrl_buff_info)
66
67 #define EFA_PTRS_PER_CHUNK \
68         ((EFA_CHUNK_SIZE - EFA_CHUNK_PTR_SIZE) / EFA_CHUNK_PAYLOAD_PTR_SIZE)
69
70 #define EFA_CHUNK_USED_SIZE \
71         ((EFA_PTRS_PER_CHUNK * EFA_CHUNK_PAYLOAD_PTR_SIZE) + EFA_CHUNK_PTR_SIZE)
72
73 struct pbl_chunk {
74         dma_addr_t dma_addr;
75         u64 *buf;
76         u32 length;
77 };
78
79 struct pbl_chunk_list {
80         struct pbl_chunk *chunks;
81         unsigned int size;
82 };
83
84 struct pbl_context {
85         union {
86                 struct {
87                         dma_addr_t dma_addr;
88                 } continuous;
89                 struct {
90                         u32 pbl_buf_size_in_pages;
91                         struct scatterlist *sgl;
92                         int sg_dma_cnt;
93                         struct pbl_chunk_list chunk_list;
94                 } indirect;
95         } phys;
96         u64 *pbl_buf;
97         u32 pbl_buf_size_in_bytes;
98         u8 physically_continuous;
99 };
100
101 static inline struct efa_dev *to_edev(struct ib_device *ibdev)
102 {
103         return container_of(ibdev, struct efa_dev, ibdev);
104 }
105
106 static inline struct efa_ucontext *to_eucontext(struct ib_ucontext *ibucontext)
107 {
108         return container_of(ibucontext, struct efa_ucontext, ibucontext);
109 }
110
111 static inline struct efa_pd *to_epd(struct ib_pd *ibpd)
112 {
113         return container_of(ibpd, struct efa_pd, ibpd);
114 }
115
116 static inline struct efa_mr *to_emr(struct ib_mr *ibmr)
117 {
118         return container_of(ibmr, struct efa_mr, ibmr);
119 }
120
121 static inline struct efa_qp *to_eqp(struct ib_qp *ibqp)
122 {
123         return container_of(ibqp, struct efa_qp, ibqp);
124 }
125
126 static inline struct efa_cq *to_ecq(struct ib_cq *ibcq)
127 {
128         return container_of(ibcq, struct efa_cq, ibcq);
129 }
130
131 static inline struct efa_ah *to_eah(struct ib_ah *ibah)
132 {
133         return container_of(ibah, struct efa_ah, ibah);
134 }
135
136 static inline struct efa_user_mmap_entry *
137 to_emmap(struct rdma_user_mmap_entry *rdma_entry)
138 {
139         return container_of(rdma_entry, struct efa_user_mmap_entry, rdma_entry);
140 }
141
142 static inline bool is_rdma_read_cap(struct efa_dev *dev)
143 {
144         return dev->dev_attr.device_caps & EFA_ADMIN_FEATURE_DEVICE_ATTR_DESC_RDMA_READ_MASK;
145 }
146
147 #define field_avail(x, fld, sz) (offsetof(typeof(x), fld) + \
148                                  FIELD_SIZEOF(typeof(x), fld) <= (sz))
149
150 #define is_reserved_cleared(reserved) \
151         !memchr_inv(reserved, 0, sizeof(reserved))
152
153 static void *efa_zalloc_mapped(struct efa_dev *dev, dma_addr_t *dma_addr,
154                                size_t size, enum dma_data_direction dir)
155 {
156         void *addr;
157
158         addr = alloc_pages_exact(size, GFP_KERNEL | __GFP_ZERO);
159         if (!addr)
160                 return NULL;
161
162         *dma_addr = dma_map_single(&dev->pdev->dev, addr, size, dir);
163         if (dma_mapping_error(&dev->pdev->dev, *dma_addr)) {
164                 ibdev_err(&dev->ibdev, "Failed to map DMA address\n");
165                 free_pages_exact(addr, size);
166                 return NULL;
167         }
168
169         return addr;
170 }
171
172 int efa_query_device(struct ib_device *ibdev,
173                      struct ib_device_attr *props,
174                      struct ib_udata *udata)
175 {
176         struct efa_com_get_device_attr_result *dev_attr;
177         struct efa_ibv_ex_query_device_resp resp = {};
178         struct efa_dev *dev = to_edev(ibdev);
179         int err;
180
181         if (udata && udata->inlen &&
182             !ib_is_udata_cleared(udata, 0, udata->inlen)) {
183                 ibdev_dbg(ibdev,
184                           "Incompatible ABI params, udata not cleared\n");
185                 return -EINVAL;
186         }
187
188         dev_attr = &dev->dev_attr;
189
190         memset(props, 0, sizeof(*props));
191         props->max_mr_size = dev_attr->max_mr_pages * PAGE_SIZE;
192         props->page_size_cap = dev_attr->page_size_cap;
193         props->vendor_id = dev->pdev->vendor;
194         props->vendor_part_id = dev->pdev->device;
195         props->hw_ver = dev->pdev->subsystem_device;
196         props->max_qp = dev_attr->max_qp;
197         props->max_cq = dev_attr->max_cq;
198         props->max_pd = dev_attr->max_pd;
199         props->max_mr = dev_attr->max_mr;
200         props->max_ah = dev_attr->max_ah;
201         props->max_cqe = dev_attr->max_cq_depth;
202         props->max_qp_wr = min_t(u32, dev_attr->max_sq_depth,
203                                  dev_attr->max_rq_depth);
204         props->max_send_sge = dev_attr->max_sq_sge;
205         props->max_recv_sge = dev_attr->max_rq_sge;
206         props->max_sge_rd = dev_attr->max_wr_rdma_sge;
207
208         if (udata && udata->outlen) {
209                 resp.max_sq_sge = dev_attr->max_sq_sge;
210                 resp.max_rq_sge = dev_attr->max_rq_sge;
211                 resp.max_sq_wr = dev_attr->max_sq_depth;
212                 resp.max_rq_wr = dev_attr->max_rq_depth;
213                 resp.max_rdma_size = dev_attr->max_rdma_size;
214
215                 if (is_rdma_read_cap(dev))
216                         resp.device_caps |= EFA_QUERY_DEVICE_CAPS_RDMA_READ;
217
218                 err = ib_copy_to_udata(udata, &resp,
219                                        min(sizeof(resp), udata->outlen));
220                 if (err) {
221                         ibdev_dbg(ibdev,
222                                   "Failed to copy udata for query_device\n");
223                         return err;
224                 }
225         }
226
227         return 0;
228 }
229
230 int efa_query_port(struct ib_device *ibdev, u8 port,
231                    struct ib_port_attr *props)
232 {
233         struct efa_dev *dev = to_edev(ibdev);
234
235         props->lmc = 1;
236
237         props->state = IB_PORT_ACTIVE;
238         props->phys_state = IB_PORT_PHYS_STATE_LINK_UP;
239         props->gid_tbl_len = 1;
240         props->pkey_tbl_len = 1;
241         props->active_speed = IB_SPEED_EDR;
242         props->active_width = IB_WIDTH_4X;
243         props->max_mtu = ib_mtu_int_to_enum(dev->dev_attr.mtu);
244         props->active_mtu = ib_mtu_int_to_enum(dev->dev_attr.mtu);
245         props->max_msg_sz = dev->dev_attr.mtu;
246         props->max_vl_num = 1;
247
248         return 0;
249 }
250
251 int efa_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr,
252                  int qp_attr_mask,
253                  struct ib_qp_init_attr *qp_init_attr)
254 {
255         struct efa_dev *dev = to_edev(ibqp->device);
256         struct efa_com_query_qp_params params = {};
257         struct efa_com_query_qp_result result;
258         struct efa_qp *qp = to_eqp(ibqp);
259         int err;
260
261 #define EFA_QUERY_QP_SUPP_MASK \
262         (IB_QP_STATE | IB_QP_PKEY_INDEX | IB_QP_PORT | \
263          IB_QP_QKEY | IB_QP_SQ_PSN | IB_QP_CAP)
264
265         if (qp_attr_mask & ~EFA_QUERY_QP_SUPP_MASK) {
266                 ibdev_dbg(&dev->ibdev,
267                           "Unsupported qp_attr_mask[%#x] supported[%#x]\n",
268                           qp_attr_mask, EFA_QUERY_QP_SUPP_MASK);
269                 return -EOPNOTSUPP;
270         }
271
272         memset(qp_attr, 0, sizeof(*qp_attr));
273         memset(qp_init_attr, 0, sizeof(*qp_init_attr));
274
275         params.qp_handle = qp->qp_handle;
276         err = efa_com_query_qp(&dev->edev, &params, &result);
277         if (err)
278                 return err;
279
280         qp_attr->qp_state = result.qp_state;
281         qp_attr->qkey = result.qkey;
282         qp_attr->sq_psn = result.sq_psn;
283         qp_attr->sq_draining = result.sq_draining;
284         qp_attr->port_num = 1;
285
286         qp_attr->cap.max_send_wr = qp->max_send_wr;
287         qp_attr->cap.max_recv_wr = qp->max_recv_wr;
288         qp_attr->cap.max_send_sge = qp->max_send_sge;
289         qp_attr->cap.max_recv_sge = qp->max_recv_sge;
290         qp_attr->cap.max_inline_data = qp->max_inline_data;
291
292         qp_init_attr->qp_type = ibqp->qp_type;
293         qp_init_attr->recv_cq = ibqp->recv_cq;
294         qp_init_attr->send_cq = ibqp->send_cq;
295         qp_init_attr->qp_context = ibqp->qp_context;
296         qp_init_attr->cap = qp_attr->cap;
297
298         return 0;
299 }
300
301 int efa_query_gid(struct ib_device *ibdev, u8 port, int index,
302                   union ib_gid *gid)
303 {
304         struct efa_dev *dev = to_edev(ibdev);
305
306         memcpy(gid->raw, dev->dev_attr.addr, sizeof(dev->dev_attr.addr));
307
308         return 0;
309 }
310
311 int efa_query_pkey(struct ib_device *ibdev, u8 port, u16 index,
312                    u16 *pkey)
313 {
314         if (index > 0)
315                 return -EINVAL;
316
317         *pkey = 0xffff;
318         return 0;
319 }
320
321 static int efa_pd_dealloc(struct efa_dev *dev, u16 pdn)
322 {
323         struct efa_com_dealloc_pd_params params = {
324                 .pdn = pdn,
325         };
326
327         return efa_com_dealloc_pd(&dev->edev, &params);
328 }
329
330 int efa_alloc_pd(struct ib_pd *ibpd, struct ib_udata *udata)
331 {
332         struct efa_dev *dev = to_edev(ibpd->device);
333         struct efa_ibv_alloc_pd_resp resp = {};
334         struct efa_com_alloc_pd_result result;
335         struct efa_pd *pd = to_epd(ibpd);
336         int err;
337
338         if (udata->inlen &&
339             !ib_is_udata_cleared(udata, 0, udata->inlen)) {
340                 ibdev_dbg(&dev->ibdev,
341                           "Incompatible ABI params, udata not cleared\n");
342                 err = -EINVAL;
343                 goto err_out;
344         }
345
346         err = efa_com_alloc_pd(&dev->edev, &result);
347         if (err)
348                 goto err_out;
349
350         pd->pdn = result.pdn;
351         resp.pdn = result.pdn;
352
353         if (udata->outlen) {
354                 err = ib_copy_to_udata(udata, &resp,
355                                        min(sizeof(resp), udata->outlen));
356                 if (err) {
357                         ibdev_dbg(&dev->ibdev,
358                                   "Failed to copy udata for alloc_pd\n");
359                         goto err_dealloc_pd;
360                 }
361         }
362
363         ibdev_dbg(&dev->ibdev, "Allocated pd[%d]\n", pd->pdn);
364
365         return 0;
366
367 err_dealloc_pd:
368         efa_pd_dealloc(dev, result.pdn);
369 err_out:
370         atomic64_inc(&dev->stats.sw_stats.alloc_pd_err);
371         return err;
372 }
373
374 void efa_dealloc_pd(struct ib_pd *ibpd, struct ib_udata *udata)
375 {
376         struct efa_dev *dev = to_edev(ibpd->device);
377         struct efa_pd *pd = to_epd(ibpd);
378
379         ibdev_dbg(&dev->ibdev, "Dealloc pd[%d]\n", pd->pdn);
380         efa_pd_dealloc(dev, pd->pdn);
381 }
382
383 static int efa_destroy_qp_handle(struct efa_dev *dev, u32 qp_handle)
384 {
385         struct efa_com_destroy_qp_params params = { .qp_handle = qp_handle };
386
387         return efa_com_destroy_qp(&dev->edev, &params);
388 }
389
390 static void efa_qp_user_mmap_entries_remove(struct efa_ucontext *uctx,
391                                             struct efa_qp *qp)
392 {
393         rdma_user_mmap_entry_remove(qp->rq_mmap_entry);
394         rdma_user_mmap_entry_remove(qp->rq_db_mmap_entry);
395         rdma_user_mmap_entry_remove(qp->llq_desc_mmap_entry);
396         rdma_user_mmap_entry_remove(qp->sq_db_mmap_entry);
397 }
398
399 int efa_destroy_qp(struct ib_qp *ibqp, struct ib_udata *udata)
400 {
401         struct efa_ucontext *ucontext = rdma_udata_to_drv_context(udata,
402                 struct efa_ucontext, ibucontext);
403         struct efa_dev *dev = to_edev(ibqp->pd->device);
404         struct efa_qp *qp = to_eqp(ibqp);
405         int err;
406
407         ibdev_dbg(&dev->ibdev, "Destroy qp[%u]\n", ibqp->qp_num);
408         err = efa_destroy_qp_handle(dev, qp->qp_handle);
409         if (err)
410                 return err;
411
412         if (qp->rq_cpu_addr) {
413                 ibdev_dbg(&dev->ibdev,
414                           "qp->cpu_addr[0x%p] freed: size[%lu], dma[%pad]\n",
415                           qp->rq_cpu_addr, qp->rq_size,
416                           &qp->rq_dma_addr);
417                 dma_unmap_single(&dev->pdev->dev, qp->rq_dma_addr, qp->rq_size,
418                                  DMA_TO_DEVICE);
419         }
420
421         efa_qp_user_mmap_entries_remove(ucontext, qp);
422         kfree(qp);
423         return 0;
424 }
425
426 static struct rdma_user_mmap_entry*
427 efa_user_mmap_entry_insert(struct ib_ucontext *ucontext,
428                            u64 address, size_t length,
429                            u8 mmap_flag, u64 *offset)
430 {
431         struct efa_user_mmap_entry *entry = kzalloc(sizeof(*entry), GFP_KERNEL);
432         int err;
433
434         if (!entry)
435                 return NULL;
436
437         entry->address = address;
438         entry->mmap_flag = mmap_flag;
439
440         err = rdma_user_mmap_entry_insert(ucontext, &entry->rdma_entry,
441                                           length);
442         if (err) {
443                 kfree(entry);
444                 return NULL;
445         }
446         *offset = rdma_user_mmap_get_offset(&entry->rdma_entry);
447
448         return &entry->rdma_entry;
449 }
450
451 static int qp_mmap_entries_setup(struct efa_qp *qp,
452                                  struct efa_dev *dev,
453                                  struct efa_ucontext *ucontext,
454                                  struct efa_com_create_qp_params *params,
455                                  struct efa_ibv_create_qp_resp *resp)
456 {
457         size_t length;
458         u64 address;
459
460         address = dev->db_bar_addr + resp->sq_db_offset;
461         qp->sq_db_mmap_entry =
462                 efa_user_mmap_entry_insert(&ucontext->ibucontext,
463                                            address,
464                                            PAGE_SIZE, EFA_MMAP_IO_NC,
465                                            &resp->sq_db_mmap_key);
466         if (!qp->sq_db_mmap_entry)
467                 return -ENOMEM;
468
469         resp->sq_db_offset &= ~PAGE_MASK;
470
471         address = dev->mem_bar_addr + resp->llq_desc_offset;
472         length = PAGE_ALIGN(params->sq_ring_size_in_bytes +
473                             (resp->llq_desc_offset & ~PAGE_MASK));
474
475         qp->llq_desc_mmap_entry =
476                 efa_user_mmap_entry_insert(&ucontext->ibucontext,
477                                            address, length,
478                                            EFA_MMAP_IO_WC,
479                                            &resp->llq_desc_mmap_key);
480         if (!qp->llq_desc_mmap_entry)
481                 goto err_remove_mmap;
482
483         resp->llq_desc_offset &= ~PAGE_MASK;
484
485         if (qp->rq_size) {
486                 address = dev->db_bar_addr + resp->rq_db_offset;
487
488                 qp->rq_db_mmap_entry =
489                         efa_user_mmap_entry_insert(&ucontext->ibucontext,
490                                                    address, PAGE_SIZE,
491                                                    EFA_MMAP_IO_NC,
492                                                    &resp->rq_db_mmap_key);
493                 if (!qp->rq_db_mmap_entry)
494                         goto err_remove_mmap;
495
496                 resp->rq_db_offset &= ~PAGE_MASK;
497
498                 address = virt_to_phys(qp->rq_cpu_addr);
499                 qp->rq_mmap_entry =
500                         efa_user_mmap_entry_insert(&ucontext->ibucontext,
501                                                    address, qp->rq_size,
502                                                    EFA_MMAP_DMA_PAGE,
503                                                    &resp->rq_mmap_key);
504                 if (!qp->rq_mmap_entry)
505                         goto err_remove_mmap;
506
507                 resp->rq_mmap_size = qp->rq_size;
508         }
509
510         return 0;
511
512 err_remove_mmap:
513         efa_qp_user_mmap_entries_remove(ucontext, qp);
514
515         return -ENOMEM;
516 }
517
518 static int efa_qp_validate_cap(struct efa_dev *dev,
519                                struct ib_qp_init_attr *init_attr)
520 {
521         if (init_attr->cap.max_send_wr > dev->dev_attr.max_sq_depth) {
522                 ibdev_dbg(&dev->ibdev,
523                           "qp: requested send wr[%u] exceeds the max[%u]\n",
524                           init_attr->cap.max_send_wr,
525                           dev->dev_attr.max_sq_depth);
526                 return -EINVAL;
527         }
528         if (init_attr->cap.max_recv_wr > dev->dev_attr.max_rq_depth) {
529                 ibdev_dbg(&dev->ibdev,
530                           "qp: requested receive wr[%u] exceeds the max[%u]\n",
531                           init_attr->cap.max_recv_wr,
532                           dev->dev_attr.max_rq_depth);
533                 return -EINVAL;
534         }
535         if (init_attr->cap.max_send_sge > dev->dev_attr.max_sq_sge) {
536                 ibdev_dbg(&dev->ibdev,
537                           "qp: requested sge send[%u] exceeds the max[%u]\n",
538                           init_attr->cap.max_send_sge, dev->dev_attr.max_sq_sge);
539                 return -EINVAL;
540         }
541         if (init_attr->cap.max_recv_sge > dev->dev_attr.max_rq_sge) {
542                 ibdev_dbg(&dev->ibdev,
543                           "qp: requested sge recv[%u] exceeds the max[%u]\n",
544                           init_attr->cap.max_recv_sge, dev->dev_attr.max_rq_sge);
545                 return -EINVAL;
546         }
547         if (init_attr->cap.max_inline_data > dev->dev_attr.inline_buf_size) {
548                 ibdev_dbg(&dev->ibdev,
549                           "qp: requested inline data[%u] exceeds the max[%u]\n",
550                           init_attr->cap.max_inline_data,
551                           dev->dev_attr.inline_buf_size);
552                 return -EINVAL;
553         }
554
555         return 0;
556 }
557
558 static int efa_qp_validate_attr(struct efa_dev *dev,
559                                 struct ib_qp_init_attr *init_attr)
560 {
561         if (init_attr->qp_type != IB_QPT_DRIVER &&
562             init_attr->qp_type != IB_QPT_UD) {
563                 ibdev_dbg(&dev->ibdev,
564                           "Unsupported qp type %d\n", init_attr->qp_type);
565                 return -EOPNOTSUPP;
566         }
567
568         if (init_attr->srq) {
569                 ibdev_dbg(&dev->ibdev, "SRQ is not supported\n");
570                 return -EOPNOTSUPP;
571         }
572
573         if (init_attr->create_flags) {
574                 ibdev_dbg(&dev->ibdev, "Unsupported create flags\n");
575                 return -EOPNOTSUPP;
576         }
577
578         return 0;
579 }
580
581 struct ib_qp *efa_create_qp(struct ib_pd *ibpd,
582                             struct ib_qp_init_attr *init_attr,
583                             struct ib_udata *udata)
584 {
585         struct efa_com_create_qp_params create_qp_params = {};
586         struct efa_com_create_qp_result create_qp_resp;
587         struct efa_dev *dev = to_edev(ibpd->device);
588         struct efa_ibv_create_qp_resp resp = {};
589         struct efa_ibv_create_qp cmd = {};
590         struct efa_ucontext *ucontext;
591         struct efa_qp *qp;
592         int err;
593
594         ucontext = rdma_udata_to_drv_context(udata, struct efa_ucontext,
595                                              ibucontext);
596
597         err = efa_qp_validate_cap(dev, init_attr);
598         if (err)
599                 goto err_out;
600
601         err = efa_qp_validate_attr(dev, init_attr);
602         if (err)
603                 goto err_out;
604
605         if (!field_avail(cmd, driver_qp_type, udata->inlen)) {
606                 ibdev_dbg(&dev->ibdev,
607                           "Incompatible ABI params, no input udata\n");
608                 err = -EINVAL;
609                 goto err_out;
610         }
611
612         if (udata->inlen > sizeof(cmd) &&
613             !ib_is_udata_cleared(udata, sizeof(cmd),
614                                  udata->inlen - sizeof(cmd))) {
615                 ibdev_dbg(&dev->ibdev,
616                           "Incompatible ABI params, unknown fields in udata\n");
617                 err = -EINVAL;
618                 goto err_out;
619         }
620
621         err = ib_copy_from_udata(&cmd, udata,
622                                  min(sizeof(cmd), udata->inlen));
623         if (err) {
624                 ibdev_dbg(&dev->ibdev,
625                           "Cannot copy udata for create_qp\n");
626                 goto err_out;
627         }
628
629         if (cmd.comp_mask) {
630                 ibdev_dbg(&dev->ibdev,
631                           "Incompatible ABI params, unknown fields in udata\n");
632                 err = -EINVAL;
633                 goto err_out;
634         }
635
636         qp = kzalloc(sizeof(*qp), GFP_KERNEL);
637         if (!qp) {
638                 err = -ENOMEM;
639                 goto err_out;
640         }
641
642         create_qp_params.uarn = ucontext->uarn;
643         create_qp_params.pd = to_epd(ibpd)->pdn;
644
645         if (init_attr->qp_type == IB_QPT_UD) {
646                 create_qp_params.qp_type = EFA_ADMIN_QP_TYPE_UD;
647         } else if (cmd.driver_qp_type == EFA_QP_DRIVER_TYPE_SRD) {
648                 create_qp_params.qp_type = EFA_ADMIN_QP_TYPE_SRD;
649         } else {
650                 ibdev_dbg(&dev->ibdev,
651                           "Unsupported qp type %d driver qp type %d\n",
652                           init_attr->qp_type, cmd.driver_qp_type);
653                 err = -EOPNOTSUPP;
654                 goto err_free_qp;
655         }
656
657         ibdev_dbg(&dev->ibdev, "Create QP: qp type %d driver qp type %#x\n",
658                   init_attr->qp_type, cmd.driver_qp_type);
659         create_qp_params.send_cq_idx = to_ecq(init_attr->send_cq)->cq_idx;
660         create_qp_params.recv_cq_idx = to_ecq(init_attr->recv_cq)->cq_idx;
661         create_qp_params.sq_depth = init_attr->cap.max_send_wr;
662         create_qp_params.sq_ring_size_in_bytes = cmd.sq_ring_size;
663
664         create_qp_params.rq_depth = init_attr->cap.max_recv_wr;
665         create_qp_params.rq_ring_size_in_bytes = cmd.rq_ring_size;
666         qp->rq_size = PAGE_ALIGN(create_qp_params.rq_ring_size_in_bytes);
667         if (qp->rq_size) {
668                 qp->rq_cpu_addr = efa_zalloc_mapped(dev, &qp->rq_dma_addr,
669                                                     qp->rq_size, DMA_TO_DEVICE);
670                 if (!qp->rq_cpu_addr) {
671                         err = -ENOMEM;
672                         goto err_free_qp;
673                 }
674
675                 ibdev_dbg(&dev->ibdev,
676                           "qp->cpu_addr[0x%p] allocated: size[%lu], dma[%pad]\n",
677                           qp->rq_cpu_addr, qp->rq_size, &qp->rq_dma_addr);
678                 create_qp_params.rq_base_addr = qp->rq_dma_addr;
679         }
680
681         err = efa_com_create_qp(&dev->edev, &create_qp_params,
682                                 &create_qp_resp);
683         if (err)
684                 goto err_free_mapped;
685
686         resp.sq_db_offset = create_qp_resp.sq_db_offset;
687         resp.rq_db_offset = create_qp_resp.rq_db_offset;
688         resp.llq_desc_offset = create_qp_resp.llq_descriptors_offset;
689         resp.send_sub_cq_idx = create_qp_resp.send_sub_cq_idx;
690         resp.recv_sub_cq_idx = create_qp_resp.recv_sub_cq_idx;
691
692         err = qp_mmap_entries_setup(qp, dev, ucontext, &create_qp_params,
693                                     &resp);
694         if (err)
695                 goto err_destroy_qp;
696
697         qp->qp_handle = create_qp_resp.qp_handle;
698         qp->ibqp.qp_num = create_qp_resp.qp_num;
699         qp->ibqp.qp_type = init_attr->qp_type;
700         qp->max_send_wr = init_attr->cap.max_send_wr;
701         qp->max_recv_wr = init_attr->cap.max_recv_wr;
702         qp->max_send_sge = init_attr->cap.max_send_sge;
703         qp->max_recv_sge = init_attr->cap.max_recv_sge;
704         qp->max_inline_data = init_attr->cap.max_inline_data;
705
706         if (udata->outlen) {
707                 err = ib_copy_to_udata(udata, &resp,
708                                        min(sizeof(resp), udata->outlen));
709                 if (err) {
710                         ibdev_dbg(&dev->ibdev,
711                                   "Failed to copy udata for qp[%u]\n",
712                                   create_qp_resp.qp_num);
713                         goto err_remove_mmap_entries;
714                 }
715         }
716
717         ibdev_dbg(&dev->ibdev, "Created qp[%d]\n", qp->ibqp.qp_num);
718
719         return &qp->ibqp;
720
721 err_remove_mmap_entries:
722         efa_qp_user_mmap_entries_remove(ucontext, qp);
723 err_destroy_qp:
724         efa_destroy_qp_handle(dev, create_qp_resp.qp_handle);
725 err_free_mapped:
726         if (qp->rq_size) {
727                 dma_unmap_single(&dev->pdev->dev, qp->rq_dma_addr, qp->rq_size,
728                                  DMA_TO_DEVICE);
729
730                 if (!qp->rq_mmap_entry)
731                         free_pages_exact(qp->rq_cpu_addr, qp->rq_size);
732         }
733 err_free_qp:
734         kfree(qp);
735 err_out:
736         atomic64_inc(&dev->stats.sw_stats.create_qp_err);
737         return ERR_PTR(err);
738 }
739
740 static int efa_modify_qp_validate(struct efa_dev *dev, struct efa_qp *qp,
741                                   struct ib_qp_attr *qp_attr, int qp_attr_mask,
742                                   enum ib_qp_state cur_state,
743                                   enum ib_qp_state new_state)
744 {
745 #define EFA_MODIFY_QP_SUPP_MASK \
746         (IB_QP_STATE | IB_QP_CUR_STATE | IB_QP_EN_SQD_ASYNC_NOTIFY | \
747          IB_QP_PKEY_INDEX | IB_QP_PORT | IB_QP_QKEY | IB_QP_SQ_PSN)
748
749         if (qp_attr_mask & ~EFA_MODIFY_QP_SUPP_MASK) {
750                 ibdev_dbg(&dev->ibdev,
751                           "Unsupported qp_attr_mask[%#x] supported[%#x]\n",
752                           qp_attr_mask, EFA_MODIFY_QP_SUPP_MASK);
753                 return -EOPNOTSUPP;
754         }
755
756         if (!ib_modify_qp_is_ok(cur_state, new_state, IB_QPT_UD,
757                                 qp_attr_mask)) {
758                 ibdev_dbg(&dev->ibdev, "Invalid modify QP parameters\n");
759                 return -EINVAL;
760         }
761
762         if ((qp_attr_mask & IB_QP_PORT) && qp_attr->port_num != 1) {
763                 ibdev_dbg(&dev->ibdev, "Can't change port num\n");
764                 return -EOPNOTSUPP;
765         }
766
767         if ((qp_attr_mask & IB_QP_PKEY_INDEX) && qp_attr->pkey_index) {
768                 ibdev_dbg(&dev->ibdev, "Can't change pkey index\n");
769                 return -EOPNOTSUPP;
770         }
771
772         return 0;
773 }
774
775 int efa_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr,
776                   int qp_attr_mask, struct ib_udata *udata)
777 {
778         struct efa_dev *dev = to_edev(ibqp->device);
779         struct efa_com_modify_qp_params params = {};
780         struct efa_qp *qp = to_eqp(ibqp);
781         enum ib_qp_state cur_state;
782         enum ib_qp_state new_state;
783         int err;
784
785         if (udata->inlen &&
786             !ib_is_udata_cleared(udata, 0, udata->inlen)) {
787                 ibdev_dbg(&dev->ibdev,
788                           "Incompatible ABI params, udata not cleared\n");
789                 return -EINVAL;
790         }
791
792         cur_state = qp_attr_mask & IB_QP_CUR_STATE ? qp_attr->cur_qp_state :
793                                                      qp->state;
794         new_state = qp_attr_mask & IB_QP_STATE ? qp_attr->qp_state : cur_state;
795
796         err = efa_modify_qp_validate(dev, qp, qp_attr, qp_attr_mask, cur_state,
797                                      new_state);
798         if (err)
799                 return err;
800
801         params.qp_handle = qp->qp_handle;
802
803         if (qp_attr_mask & IB_QP_STATE) {
804                 params.modify_mask |= BIT(EFA_ADMIN_QP_STATE_BIT) |
805                                       BIT(EFA_ADMIN_CUR_QP_STATE_BIT);
806                 params.cur_qp_state = qp_attr->cur_qp_state;
807                 params.qp_state = qp_attr->qp_state;
808         }
809
810         if (qp_attr_mask & IB_QP_EN_SQD_ASYNC_NOTIFY) {
811                 params.modify_mask |=
812                         BIT(EFA_ADMIN_SQ_DRAINED_ASYNC_NOTIFY_BIT);
813                 params.sq_drained_async_notify = qp_attr->en_sqd_async_notify;
814         }
815
816         if (qp_attr_mask & IB_QP_QKEY) {
817                 params.modify_mask |= BIT(EFA_ADMIN_QKEY_BIT);
818                 params.qkey = qp_attr->qkey;
819         }
820
821         if (qp_attr_mask & IB_QP_SQ_PSN) {
822                 params.modify_mask |= BIT(EFA_ADMIN_SQ_PSN_BIT);
823                 params.sq_psn = qp_attr->sq_psn;
824         }
825
826         err = efa_com_modify_qp(&dev->edev, &params);
827         if (err)
828                 return err;
829
830         qp->state = new_state;
831
832         return 0;
833 }
834
835 static int efa_destroy_cq_idx(struct efa_dev *dev, int cq_idx)
836 {
837         struct efa_com_destroy_cq_params params = { .cq_idx = cq_idx };
838
839         return efa_com_destroy_cq(&dev->edev, &params);
840 }
841
842 void efa_destroy_cq(struct ib_cq *ibcq, struct ib_udata *udata)
843 {
844         struct efa_dev *dev = to_edev(ibcq->device);
845         struct efa_cq *cq = to_ecq(ibcq);
846
847         ibdev_dbg(&dev->ibdev,
848                   "Destroy cq[%d] virt[0x%p] freed: size[%lu], dma[%pad]\n",
849                   cq->cq_idx, cq->cpu_addr, cq->size, &cq->dma_addr);
850
851         efa_destroy_cq_idx(dev, cq->cq_idx);
852         dma_unmap_single(&dev->pdev->dev, cq->dma_addr, cq->size,
853                          DMA_FROM_DEVICE);
854         rdma_user_mmap_entry_remove(cq->mmap_entry);
855 }
856
857 static int cq_mmap_entries_setup(struct efa_dev *dev, struct efa_cq *cq,
858                                  struct efa_ibv_create_cq_resp *resp)
859 {
860         resp->q_mmap_size = cq->size;
861         cq->mmap_entry = efa_user_mmap_entry_insert(&cq->ucontext->ibucontext,
862                                                     virt_to_phys(cq->cpu_addr),
863                                                     cq->size, EFA_MMAP_DMA_PAGE,
864                                                     &resp->q_mmap_key);
865         if (!cq->mmap_entry)
866                 return -ENOMEM;
867
868         return 0;
869 }
870
871 int efa_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
872                   struct ib_udata *udata)
873 {
874         struct efa_ucontext *ucontext = rdma_udata_to_drv_context(
875                 udata, struct efa_ucontext, ibucontext);
876         struct efa_ibv_create_cq_resp resp = {};
877         struct efa_com_create_cq_params params;
878         struct efa_com_create_cq_result result;
879         struct ib_device *ibdev = ibcq->device;
880         struct efa_dev *dev = to_edev(ibdev);
881         struct efa_ibv_create_cq cmd = {};
882         struct efa_cq *cq = to_ecq(ibcq);
883         int entries = attr->cqe;
884         int err;
885
886         ibdev_dbg(ibdev, "create_cq entries %d\n", entries);
887
888         if (entries < 1 || entries > dev->dev_attr.max_cq_depth) {
889                 ibdev_dbg(ibdev,
890                           "cq: requested entries[%u] non-positive or greater than max[%u]\n",
891                           entries, dev->dev_attr.max_cq_depth);
892                 err = -EINVAL;
893                 goto err_out;
894         }
895
896         if (!field_avail(cmd, num_sub_cqs, udata->inlen)) {
897                 ibdev_dbg(ibdev,
898                           "Incompatible ABI params, no input udata\n");
899                 err = -EINVAL;
900                 goto err_out;
901         }
902
903         if (udata->inlen > sizeof(cmd) &&
904             !ib_is_udata_cleared(udata, sizeof(cmd),
905                                  udata->inlen - sizeof(cmd))) {
906                 ibdev_dbg(ibdev,
907                           "Incompatible ABI params, unknown fields in udata\n");
908                 err = -EINVAL;
909                 goto err_out;
910         }
911
912         err = ib_copy_from_udata(&cmd, udata,
913                                  min(sizeof(cmd), udata->inlen));
914         if (err) {
915                 ibdev_dbg(ibdev, "Cannot copy udata for create_cq\n");
916                 goto err_out;
917         }
918
919         if (cmd.comp_mask || !is_reserved_cleared(cmd.reserved_50)) {
920                 ibdev_dbg(ibdev,
921                           "Incompatible ABI params, unknown fields in udata\n");
922                 err = -EINVAL;
923                 goto err_out;
924         }
925
926         if (!cmd.cq_entry_size) {
927                 ibdev_dbg(ibdev,
928                           "Invalid entry size [%u]\n", cmd.cq_entry_size);
929                 err = -EINVAL;
930                 goto err_out;
931         }
932
933         if (cmd.num_sub_cqs != dev->dev_attr.sub_cqs_per_cq) {
934                 ibdev_dbg(ibdev,
935                           "Invalid number of sub cqs[%u] expected[%u]\n",
936                           cmd.num_sub_cqs, dev->dev_attr.sub_cqs_per_cq);
937                 err = -EINVAL;
938                 goto err_out;
939         }
940
941         cq->ucontext = ucontext;
942         cq->size = PAGE_ALIGN(cmd.cq_entry_size * entries * cmd.num_sub_cqs);
943         cq->cpu_addr = efa_zalloc_mapped(dev, &cq->dma_addr, cq->size,
944                                          DMA_FROM_DEVICE);
945         if (!cq->cpu_addr) {
946                 err = -ENOMEM;
947                 goto err_out;
948         }
949
950         params.uarn = cq->ucontext->uarn;
951         params.cq_depth = entries;
952         params.dma_addr = cq->dma_addr;
953         params.entry_size_in_bytes = cmd.cq_entry_size;
954         params.num_sub_cqs = cmd.num_sub_cqs;
955         err = efa_com_create_cq(&dev->edev, &params, &result);
956         if (err)
957                 goto err_free_mapped;
958
959         resp.cq_idx = result.cq_idx;
960         cq->cq_idx = result.cq_idx;
961         cq->ibcq.cqe = result.actual_depth;
962         WARN_ON_ONCE(entries != result.actual_depth);
963
964         err = cq_mmap_entries_setup(dev, cq, &resp);
965         if (err) {
966                 ibdev_dbg(ibdev, "Could not setup cq[%u] mmap entries\n",
967                           cq->cq_idx);
968                 goto err_destroy_cq;
969         }
970
971         if (udata->outlen) {
972                 err = ib_copy_to_udata(udata, &resp,
973                                        min(sizeof(resp), udata->outlen));
974                 if (err) {
975                         ibdev_dbg(ibdev,
976                                   "Failed to copy udata for create_cq\n");
977                         goto err_remove_mmap;
978                 }
979         }
980
981         ibdev_dbg(ibdev, "Created cq[%d], cq depth[%u]. dma[%pad] virt[0x%p]\n",
982                   cq->cq_idx, result.actual_depth, &cq->dma_addr, cq->cpu_addr);
983
984         return 0;
985
986 err_remove_mmap:
987         rdma_user_mmap_entry_remove(cq->mmap_entry);
988 err_destroy_cq:
989         efa_destroy_cq_idx(dev, cq->cq_idx);
990 err_free_mapped:
991         dma_unmap_single(&dev->pdev->dev, cq->dma_addr, cq->size,
992                          DMA_FROM_DEVICE);
993         if (!cq->mmap_entry)
994                 free_pages_exact(cq->cpu_addr, cq->size);
995
996 err_out:
997         atomic64_inc(&dev->stats.sw_stats.create_cq_err);
998         return err;
999 }
1000
1001 static int umem_to_page_list(struct efa_dev *dev,
1002                              struct ib_umem *umem,
1003                              u64 *page_list,
1004                              u32 hp_cnt,
1005                              u8 hp_shift)
1006 {
1007         u32 pages_in_hp = BIT(hp_shift - PAGE_SHIFT);
1008         struct ib_block_iter biter;
1009         unsigned int hp_idx = 0;
1010
1011         ibdev_dbg(&dev->ibdev, "hp_cnt[%u], pages_in_hp[%u]\n",
1012                   hp_cnt, pages_in_hp);
1013
1014         rdma_for_each_block(umem->sg_head.sgl, &biter, umem->nmap,
1015                             BIT(hp_shift))
1016                 page_list[hp_idx++] = rdma_block_iter_dma_address(&biter);
1017
1018         return 0;
1019 }
1020
1021 static struct scatterlist *efa_vmalloc_buf_to_sg(u64 *buf, int page_cnt)
1022 {
1023         struct scatterlist *sglist;
1024         struct page *pg;
1025         int i;
1026
1027         sglist = kcalloc(page_cnt, sizeof(*sglist), GFP_KERNEL);
1028         if (!sglist)
1029                 return NULL;
1030         sg_init_table(sglist, page_cnt);
1031         for (i = 0; i < page_cnt; i++) {
1032                 pg = vmalloc_to_page(buf);
1033                 if (!pg)
1034                         goto err;
1035                 sg_set_page(&sglist[i], pg, PAGE_SIZE, 0);
1036                 buf += PAGE_SIZE / sizeof(*buf);
1037         }
1038         return sglist;
1039
1040 err:
1041         kfree(sglist);
1042         return NULL;
1043 }
1044
1045 /*
1046  * create a chunk list of physical pages dma addresses from the supplied
1047  * scatter gather list
1048  */
1049 static int pbl_chunk_list_create(struct efa_dev *dev, struct pbl_context *pbl)
1050 {
1051         struct pbl_chunk_list *chunk_list = &pbl->phys.indirect.chunk_list;
1052         int page_cnt = pbl->phys.indirect.pbl_buf_size_in_pages;
1053         struct scatterlist *pages_sgl = pbl->phys.indirect.sgl;
1054         unsigned int chunk_list_size, chunk_idx, payload_idx;
1055         int sg_dma_cnt = pbl->phys.indirect.sg_dma_cnt;
1056         struct efa_com_ctrl_buff_info *ctrl_buf;
1057         u64 *cur_chunk_buf, *prev_chunk_buf;
1058         struct ib_block_iter biter;
1059         dma_addr_t dma_addr;
1060         int i;
1061
1062         /* allocate a chunk list that consists of 4KB chunks */
1063         chunk_list_size = DIV_ROUND_UP(page_cnt, EFA_PTRS_PER_CHUNK);
1064
1065         chunk_list->size = chunk_list_size;
1066         chunk_list->chunks = kcalloc(chunk_list_size,
1067                                      sizeof(*chunk_list->chunks),
1068                                      GFP_KERNEL);
1069         if (!chunk_list->chunks)
1070                 return -ENOMEM;
1071
1072         ibdev_dbg(&dev->ibdev,
1073                   "chunk_list_size[%u] - pages[%u]\n", chunk_list_size,
1074                   page_cnt);
1075
1076         /* allocate chunk buffers: */
1077         for (i = 0; i < chunk_list_size; i++) {
1078                 chunk_list->chunks[i].buf = kzalloc(EFA_CHUNK_SIZE, GFP_KERNEL);
1079                 if (!chunk_list->chunks[i].buf)
1080                         goto chunk_list_dealloc;
1081
1082                 chunk_list->chunks[i].length = EFA_CHUNK_USED_SIZE;
1083         }
1084         chunk_list->chunks[chunk_list_size - 1].length =
1085                 ((page_cnt % EFA_PTRS_PER_CHUNK) * EFA_CHUNK_PAYLOAD_PTR_SIZE) +
1086                         EFA_CHUNK_PTR_SIZE;
1087
1088         /* fill the dma addresses of sg list pages to chunks: */
1089         chunk_idx = 0;
1090         payload_idx = 0;
1091         cur_chunk_buf = chunk_list->chunks[0].buf;
1092         rdma_for_each_block(pages_sgl, &biter, sg_dma_cnt,
1093                             EFA_CHUNK_PAYLOAD_SIZE) {
1094                 cur_chunk_buf[payload_idx++] =
1095                         rdma_block_iter_dma_address(&biter);
1096
1097                 if (payload_idx == EFA_PTRS_PER_CHUNK) {
1098                         chunk_idx++;
1099                         cur_chunk_buf = chunk_list->chunks[chunk_idx].buf;
1100                         payload_idx = 0;
1101                 }
1102         }
1103
1104         /* map chunks to dma and fill chunks next ptrs */
1105         for (i = chunk_list_size - 1; i >= 0; i--) {
1106                 dma_addr = dma_map_single(&dev->pdev->dev,
1107                                           chunk_list->chunks[i].buf,
1108                                           chunk_list->chunks[i].length,
1109                                           DMA_TO_DEVICE);
1110                 if (dma_mapping_error(&dev->pdev->dev, dma_addr)) {
1111                         ibdev_err(&dev->ibdev,
1112                                   "chunk[%u] dma_map_failed\n", i);
1113                         goto chunk_list_unmap;
1114                 }
1115
1116                 chunk_list->chunks[i].dma_addr = dma_addr;
1117                 ibdev_dbg(&dev->ibdev,
1118                           "chunk[%u] mapped at [%pad]\n", i, &dma_addr);
1119
1120                 if (!i)
1121                         break;
1122
1123                 prev_chunk_buf = chunk_list->chunks[i - 1].buf;
1124
1125                 ctrl_buf = (struct efa_com_ctrl_buff_info *)
1126                                 &prev_chunk_buf[EFA_PTRS_PER_CHUNK];
1127                 ctrl_buf->length = chunk_list->chunks[i].length;
1128
1129                 efa_com_set_dma_addr(dma_addr,
1130                                      &ctrl_buf->address.mem_addr_high,
1131                                      &ctrl_buf->address.mem_addr_low);
1132         }
1133
1134         return 0;
1135
1136 chunk_list_unmap:
1137         for (; i < chunk_list_size; i++) {
1138                 dma_unmap_single(&dev->pdev->dev, chunk_list->chunks[i].dma_addr,
1139                                  chunk_list->chunks[i].length, DMA_TO_DEVICE);
1140         }
1141 chunk_list_dealloc:
1142         for (i = 0; i < chunk_list_size; i++)
1143                 kfree(chunk_list->chunks[i].buf);
1144
1145         kfree(chunk_list->chunks);
1146         return -ENOMEM;
1147 }
1148
1149 static void pbl_chunk_list_destroy(struct efa_dev *dev, struct pbl_context *pbl)
1150 {
1151         struct pbl_chunk_list *chunk_list = &pbl->phys.indirect.chunk_list;
1152         int i;
1153
1154         for (i = 0; i < chunk_list->size; i++) {
1155                 dma_unmap_single(&dev->pdev->dev, chunk_list->chunks[i].dma_addr,
1156                                  chunk_list->chunks[i].length, DMA_TO_DEVICE);
1157                 kfree(chunk_list->chunks[i].buf);
1158         }
1159
1160         kfree(chunk_list->chunks);
1161 }
1162
1163 /* initialize pbl continuous mode: map pbl buffer to a dma address. */
1164 static int pbl_continuous_initialize(struct efa_dev *dev,
1165                                      struct pbl_context *pbl)
1166 {
1167         dma_addr_t dma_addr;
1168
1169         dma_addr = dma_map_single(&dev->pdev->dev, pbl->pbl_buf,
1170                                   pbl->pbl_buf_size_in_bytes, DMA_TO_DEVICE);
1171         if (dma_mapping_error(&dev->pdev->dev, dma_addr)) {
1172                 ibdev_err(&dev->ibdev, "Unable to map pbl to DMA address\n");
1173                 return -ENOMEM;
1174         }
1175
1176         pbl->phys.continuous.dma_addr = dma_addr;
1177         ibdev_dbg(&dev->ibdev,
1178                   "pbl continuous - dma_addr = %pad, size[%u]\n",
1179                   &dma_addr, pbl->pbl_buf_size_in_bytes);
1180
1181         return 0;
1182 }
1183
1184 /*
1185  * initialize pbl indirect mode:
1186  * create a chunk list out of the dma addresses of the physical pages of
1187  * pbl buffer.
1188  */
1189 static int pbl_indirect_initialize(struct efa_dev *dev, struct pbl_context *pbl)
1190 {
1191         u32 size_in_pages = DIV_ROUND_UP(pbl->pbl_buf_size_in_bytes, PAGE_SIZE);
1192         struct scatterlist *sgl;
1193         int sg_dma_cnt, err;
1194
1195         BUILD_BUG_ON(EFA_CHUNK_PAYLOAD_SIZE > PAGE_SIZE);
1196         sgl = efa_vmalloc_buf_to_sg(pbl->pbl_buf, size_in_pages);
1197         if (!sgl)
1198                 return -ENOMEM;
1199
1200         sg_dma_cnt = dma_map_sg(&dev->pdev->dev, sgl, size_in_pages, DMA_TO_DEVICE);
1201         if (!sg_dma_cnt) {
1202                 err = -EINVAL;
1203                 goto err_map;
1204         }
1205
1206         pbl->phys.indirect.pbl_buf_size_in_pages = size_in_pages;
1207         pbl->phys.indirect.sgl = sgl;
1208         pbl->phys.indirect.sg_dma_cnt = sg_dma_cnt;
1209         err = pbl_chunk_list_create(dev, pbl);
1210         if (err) {
1211                 ibdev_dbg(&dev->ibdev,
1212                           "chunk_list creation failed[%d]\n", err);
1213                 goto err_chunk;
1214         }
1215
1216         ibdev_dbg(&dev->ibdev,
1217                   "pbl indirect - size[%u], chunks[%u]\n",
1218                   pbl->pbl_buf_size_in_bytes,
1219                   pbl->phys.indirect.chunk_list.size);
1220
1221         return 0;
1222
1223 err_chunk:
1224         dma_unmap_sg(&dev->pdev->dev, sgl, size_in_pages, DMA_TO_DEVICE);
1225 err_map:
1226         kfree(sgl);
1227         return err;
1228 }
1229
1230 static void pbl_indirect_terminate(struct efa_dev *dev, struct pbl_context *pbl)
1231 {
1232         pbl_chunk_list_destroy(dev, pbl);
1233         dma_unmap_sg(&dev->pdev->dev, pbl->phys.indirect.sgl,
1234                      pbl->phys.indirect.pbl_buf_size_in_pages, DMA_TO_DEVICE);
1235         kfree(pbl->phys.indirect.sgl);
1236 }
1237
1238 /* create a page buffer list from a mapped user memory region */
1239 static int pbl_create(struct efa_dev *dev,
1240                       struct pbl_context *pbl,
1241                       struct ib_umem *umem,
1242                       int hp_cnt,
1243                       u8 hp_shift)
1244 {
1245         int err;
1246
1247         pbl->pbl_buf_size_in_bytes = hp_cnt * EFA_CHUNK_PAYLOAD_PTR_SIZE;
1248         pbl->pbl_buf = kvzalloc(pbl->pbl_buf_size_in_bytes, GFP_KERNEL);
1249         if (!pbl->pbl_buf)
1250                 return -ENOMEM;
1251
1252         if (is_vmalloc_addr(pbl->pbl_buf)) {
1253                 pbl->physically_continuous = 0;
1254                 err = umem_to_page_list(dev, umem, pbl->pbl_buf, hp_cnt,
1255                                         hp_shift);
1256                 if (err)
1257                         goto err_free;
1258
1259                 err = pbl_indirect_initialize(dev, pbl);
1260                 if (err)
1261                         goto err_free;
1262         } else {
1263                 pbl->physically_continuous = 1;
1264                 err = umem_to_page_list(dev, umem, pbl->pbl_buf, hp_cnt,
1265                                         hp_shift);
1266                 if (err)
1267                         goto err_free;
1268
1269                 err = pbl_continuous_initialize(dev, pbl);
1270                 if (err)
1271                         goto err_free;
1272         }
1273
1274         ibdev_dbg(&dev->ibdev,
1275                   "user_pbl_created: user_pages[%u], continuous[%u]\n",
1276                   hp_cnt, pbl->physically_continuous);
1277
1278         return 0;
1279
1280 err_free:
1281         kvfree(pbl->pbl_buf);
1282         return err;
1283 }
1284
1285 static void pbl_destroy(struct efa_dev *dev, struct pbl_context *pbl)
1286 {
1287         if (pbl->physically_continuous)
1288                 dma_unmap_single(&dev->pdev->dev, pbl->phys.continuous.dma_addr,
1289                                  pbl->pbl_buf_size_in_bytes, DMA_TO_DEVICE);
1290         else
1291                 pbl_indirect_terminate(dev, pbl);
1292
1293         kvfree(pbl->pbl_buf);
1294 }
1295
1296 static int efa_create_inline_pbl(struct efa_dev *dev, struct efa_mr *mr,
1297                                  struct efa_com_reg_mr_params *params)
1298 {
1299         int err;
1300
1301         params->inline_pbl = 1;
1302         err = umem_to_page_list(dev, mr->umem, params->pbl.inline_pbl_array,
1303                                 params->page_num, params->page_shift);
1304         if (err)
1305                 return err;
1306
1307         ibdev_dbg(&dev->ibdev,
1308                   "inline_pbl_array - pages[%u]\n", params->page_num);
1309
1310         return 0;
1311 }
1312
1313 static int efa_create_pbl(struct efa_dev *dev,
1314                           struct pbl_context *pbl,
1315                           struct efa_mr *mr,
1316                           struct efa_com_reg_mr_params *params)
1317 {
1318         int err;
1319
1320         err = pbl_create(dev, pbl, mr->umem, params->page_num,
1321                          params->page_shift);
1322         if (err) {
1323                 ibdev_dbg(&dev->ibdev, "Failed to create pbl[%d]\n", err);
1324                 return err;
1325         }
1326
1327         params->inline_pbl = 0;
1328         params->indirect = !pbl->physically_continuous;
1329         if (pbl->physically_continuous) {
1330                 params->pbl.pbl.length = pbl->pbl_buf_size_in_bytes;
1331
1332                 efa_com_set_dma_addr(pbl->phys.continuous.dma_addr,
1333                                      &params->pbl.pbl.address.mem_addr_high,
1334                                      &params->pbl.pbl.address.mem_addr_low);
1335         } else {
1336                 params->pbl.pbl.length =
1337                         pbl->phys.indirect.chunk_list.chunks[0].length;
1338
1339                 efa_com_set_dma_addr(pbl->phys.indirect.chunk_list.chunks[0].dma_addr,
1340                                      &params->pbl.pbl.address.mem_addr_high,
1341                                      &params->pbl.pbl.address.mem_addr_low);
1342         }
1343
1344         return 0;
1345 }
1346
1347 struct ib_mr *efa_reg_mr(struct ib_pd *ibpd, u64 start, u64 length,
1348                          u64 virt_addr, int access_flags,
1349                          struct ib_udata *udata)
1350 {
1351         struct efa_dev *dev = to_edev(ibpd->device);
1352         struct efa_com_reg_mr_params params = {};
1353         struct efa_com_reg_mr_result result = {};
1354         struct pbl_context pbl;
1355         int supp_access_flags;
1356         unsigned int pg_sz;
1357         struct efa_mr *mr;
1358         int inline_size;
1359         int err;
1360
1361         if (udata->inlen &&
1362             !ib_is_udata_cleared(udata, 0, sizeof(udata->inlen))) {
1363                 ibdev_dbg(&dev->ibdev,
1364                           "Incompatible ABI params, udata not cleared\n");
1365                 err = -EINVAL;
1366                 goto err_out;
1367         }
1368
1369         supp_access_flags =
1370                 IB_ACCESS_LOCAL_WRITE |
1371                 (is_rdma_read_cap(dev) ? IB_ACCESS_REMOTE_READ : 0);
1372
1373         if (access_flags & ~supp_access_flags) {
1374                 ibdev_dbg(&dev->ibdev,
1375                           "Unsupported access flags[%#x], supported[%#x]\n",
1376                           access_flags, supp_access_flags);
1377                 err = -EOPNOTSUPP;
1378                 goto err_out;
1379         }
1380
1381         mr = kzalloc(sizeof(*mr), GFP_KERNEL);
1382         if (!mr) {
1383                 err = -ENOMEM;
1384                 goto err_out;
1385         }
1386
1387         mr->umem = ib_umem_get(udata, start, length, access_flags);
1388         if (IS_ERR(mr->umem)) {
1389                 err = PTR_ERR(mr->umem);
1390                 ibdev_dbg(&dev->ibdev,
1391                           "Failed to pin and map user space memory[%d]\n", err);
1392                 goto err_free;
1393         }
1394
1395         params.pd = to_epd(ibpd)->pdn;
1396         params.iova = virt_addr;
1397         params.mr_length_in_bytes = length;
1398         params.permissions = access_flags;
1399
1400         pg_sz = ib_umem_find_best_pgsz(mr->umem,
1401                                        dev->dev_attr.page_size_cap,
1402                                        virt_addr);
1403         if (!pg_sz) {
1404                 err = -EOPNOTSUPP;
1405                 ibdev_dbg(&dev->ibdev, "Failed to find a suitable page size in page_size_cap %#llx\n",
1406                           dev->dev_attr.page_size_cap);
1407                 goto err_unmap;
1408         }
1409
1410         params.page_shift = __ffs(pg_sz);
1411         params.page_num = DIV_ROUND_UP(length + (start & (pg_sz - 1)),
1412                                        pg_sz);
1413
1414         ibdev_dbg(&dev->ibdev,
1415                   "start %#llx length %#llx params.page_shift %u params.page_num %u\n",
1416                   start, length, params.page_shift, params.page_num);
1417
1418         inline_size = ARRAY_SIZE(params.pbl.inline_pbl_array);
1419         if (params.page_num <= inline_size) {
1420                 err = efa_create_inline_pbl(dev, mr, &params);
1421                 if (err)
1422                         goto err_unmap;
1423
1424                 err = efa_com_register_mr(&dev->edev, &params, &result);
1425                 if (err)
1426                         goto err_unmap;
1427         } else {
1428                 err = efa_create_pbl(dev, &pbl, mr, &params);
1429                 if (err)
1430                         goto err_unmap;
1431
1432                 err = efa_com_register_mr(&dev->edev, &params, &result);
1433                 pbl_destroy(dev, &pbl);
1434
1435                 if (err)
1436                         goto err_unmap;
1437         }
1438
1439         mr->ibmr.lkey = result.l_key;
1440         mr->ibmr.rkey = result.r_key;
1441         mr->ibmr.length = length;
1442         ibdev_dbg(&dev->ibdev, "Registered mr[%d]\n", mr->ibmr.lkey);
1443
1444         return &mr->ibmr;
1445
1446 err_unmap:
1447         ib_umem_release(mr->umem);
1448 err_free:
1449         kfree(mr);
1450 err_out:
1451         atomic64_inc(&dev->stats.sw_stats.reg_mr_err);
1452         return ERR_PTR(err);
1453 }
1454
1455 int efa_dereg_mr(struct ib_mr *ibmr, struct ib_udata *udata)
1456 {
1457         struct efa_dev *dev = to_edev(ibmr->device);
1458         struct efa_com_dereg_mr_params params;
1459         struct efa_mr *mr = to_emr(ibmr);
1460         int err;
1461
1462         ibdev_dbg(&dev->ibdev, "Deregister mr[%d]\n", ibmr->lkey);
1463
1464         params.l_key = mr->ibmr.lkey;
1465         err = efa_com_dereg_mr(&dev->edev, &params);
1466         if (err)
1467                 return err;
1468
1469         ib_umem_release(mr->umem);
1470         kfree(mr);
1471
1472         return 0;
1473 }
1474
1475 int efa_get_port_immutable(struct ib_device *ibdev, u8 port_num,
1476                            struct ib_port_immutable *immutable)
1477 {
1478         struct ib_port_attr attr;
1479         int err;
1480
1481         err = ib_query_port(ibdev, port_num, &attr);
1482         if (err) {
1483                 ibdev_dbg(ibdev, "Couldn't query port err[%d]\n", err);
1484                 return err;
1485         }
1486
1487         immutable->pkey_tbl_len = attr.pkey_tbl_len;
1488         immutable->gid_tbl_len = attr.gid_tbl_len;
1489
1490         return 0;
1491 }
1492
1493 static int efa_dealloc_uar(struct efa_dev *dev, u16 uarn)
1494 {
1495         struct efa_com_dealloc_uar_params params = {
1496                 .uarn = uarn,
1497         };
1498
1499         return efa_com_dealloc_uar(&dev->edev, &params);
1500 }
1501
1502 int efa_alloc_ucontext(struct ib_ucontext *ibucontext, struct ib_udata *udata)
1503 {
1504         struct efa_ucontext *ucontext = to_eucontext(ibucontext);
1505         struct efa_dev *dev = to_edev(ibucontext->device);
1506         struct efa_ibv_alloc_ucontext_resp resp = {};
1507         struct efa_com_alloc_uar_result result;
1508         int err;
1509
1510         /*
1511          * it's fine if the driver does not know all request fields,
1512          * we will ack input fields in our response.
1513          */
1514
1515         err = efa_com_alloc_uar(&dev->edev, &result);
1516         if (err)
1517                 goto err_out;
1518
1519         ucontext->uarn = result.uarn;
1520
1521         resp.cmds_supp_udata_mask |= EFA_USER_CMDS_SUPP_UDATA_QUERY_DEVICE;
1522         resp.cmds_supp_udata_mask |= EFA_USER_CMDS_SUPP_UDATA_CREATE_AH;
1523         resp.sub_cqs_per_cq = dev->dev_attr.sub_cqs_per_cq;
1524         resp.inline_buf_size = dev->dev_attr.inline_buf_size;
1525         resp.max_llq_size = dev->dev_attr.max_llq_size;
1526
1527         if (udata && udata->outlen) {
1528                 err = ib_copy_to_udata(udata, &resp,
1529                                        min(sizeof(resp), udata->outlen));
1530                 if (err)
1531                         goto err_dealloc_uar;
1532         }
1533
1534         return 0;
1535
1536 err_dealloc_uar:
1537         efa_dealloc_uar(dev, result.uarn);
1538 err_out:
1539         atomic64_inc(&dev->stats.sw_stats.alloc_ucontext_err);
1540         return err;
1541 }
1542
1543 void efa_dealloc_ucontext(struct ib_ucontext *ibucontext)
1544 {
1545         struct efa_ucontext *ucontext = to_eucontext(ibucontext);
1546         struct efa_dev *dev = to_edev(ibucontext->device);
1547
1548         efa_dealloc_uar(dev, ucontext->uarn);
1549 }
1550
1551 void efa_mmap_free(struct rdma_user_mmap_entry *rdma_entry)
1552 {
1553         struct efa_user_mmap_entry *entry = to_emmap(rdma_entry);
1554
1555         /* DMA mapping is already gone, now free the pages */
1556         if (entry->mmap_flag == EFA_MMAP_DMA_PAGE)
1557                 free_pages_exact(phys_to_virt(entry->address),
1558                                  entry->rdma_entry.npages * PAGE_SIZE);
1559         kfree(entry);
1560 }
1561
1562 static int __efa_mmap(struct efa_dev *dev, struct efa_ucontext *ucontext,
1563                       struct vm_area_struct *vma)
1564 {
1565         struct rdma_user_mmap_entry *rdma_entry;
1566         struct efa_user_mmap_entry *entry;
1567         unsigned long va;
1568         int err = 0;
1569         u64 pfn;
1570
1571         rdma_entry = rdma_user_mmap_entry_get(&ucontext->ibucontext, vma);
1572         if (!rdma_entry) {
1573                 ibdev_dbg(&dev->ibdev,
1574                           "pgoff[%#lx] does not have valid entry\n",
1575                           vma->vm_pgoff);
1576                 return -EINVAL;
1577         }
1578         entry = to_emmap(rdma_entry);
1579
1580         ibdev_dbg(&dev->ibdev,
1581                   "Mapping address[%#llx], length[%#zx], mmap_flag[%d]\n",
1582                   entry->address, rdma_entry->npages * PAGE_SIZE,
1583                   entry->mmap_flag);
1584
1585         pfn = entry->address >> PAGE_SHIFT;
1586         switch (entry->mmap_flag) {
1587         case EFA_MMAP_IO_NC:
1588                 err = rdma_user_mmap_io(&ucontext->ibucontext, vma, pfn,
1589                                         entry->rdma_entry.npages * PAGE_SIZE,
1590                                         pgprot_noncached(vma->vm_page_prot),
1591                                         rdma_entry);
1592                 break;
1593         case EFA_MMAP_IO_WC:
1594                 err = rdma_user_mmap_io(&ucontext->ibucontext, vma, pfn,
1595                                         entry->rdma_entry.npages * PAGE_SIZE,
1596                                         pgprot_writecombine(vma->vm_page_prot),
1597                                         rdma_entry);
1598                 break;
1599         case EFA_MMAP_DMA_PAGE:
1600                 for (va = vma->vm_start; va < vma->vm_end;
1601                      va += PAGE_SIZE, pfn++) {
1602                         err = vm_insert_page(vma, va, pfn_to_page(pfn));
1603                         if (err)
1604                                 break;
1605                 }
1606                 break;
1607         default:
1608                 err = -EINVAL;
1609         }
1610
1611         if (err) {
1612                 ibdev_dbg(
1613                         &dev->ibdev,
1614                         "Couldn't mmap address[%#llx] length[%#zx] mmap_flag[%d] err[%d]\n",
1615                         entry->address, rdma_entry->npages * PAGE_SIZE,
1616                         entry->mmap_flag, err);
1617         }
1618
1619         rdma_user_mmap_entry_put(rdma_entry);
1620         return err;
1621 }
1622
1623 int efa_mmap(struct ib_ucontext *ibucontext,
1624              struct vm_area_struct *vma)
1625 {
1626         struct efa_ucontext *ucontext = to_eucontext(ibucontext);
1627         struct efa_dev *dev = to_edev(ibucontext->device);
1628         size_t length = vma->vm_end - vma->vm_start;
1629
1630         ibdev_dbg(&dev->ibdev,
1631                   "start %#lx, end %#lx, length = %#zx, pgoff = %#lx\n",
1632                   vma->vm_start, vma->vm_end, length, vma->vm_pgoff);
1633
1634         return __efa_mmap(dev, ucontext, vma);
1635 }
1636
1637 static int efa_ah_destroy(struct efa_dev *dev, struct efa_ah *ah)
1638 {
1639         struct efa_com_destroy_ah_params params = {
1640                 .ah = ah->ah,
1641                 .pdn = to_epd(ah->ibah.pd)->pdn,
1642         };
1643
1644         return efa_com_destroy_ah(&dev->edev, &params);
1645 }
1646
1647 int efa_create_ah(struct ib_ah *ibah,
1648                   struct rdma_ah_attr *ah_attr,
1649                   u32 flags,
1650                   struct ib_udata *udata)
1651 {
1652         struct efa_dev *dev = to_edev(ibah->device);
1653         struct efa_com_create_ah_params params = {};
1654         struct efa_ibv_create_ah_resp resp = {};
1655         struct efa_com_create_ah_result result;
1656         struct efa_ah *ah = to_eah(ibah);
1657         int err;
1658
1659         if (!(flags & RDMA_CREATE_AH_SLEEPABLE)) {
1660                 ibdev_dbg(&dev->ibdev,
1661                           "Create address handle is not supported in atomic context\n");
1662                 err = -EOPNOTSUPP;
1663                 goto err_out;
1664         }
1665
1666         if (udata->inlen &&
1667             !ib_is_udata_cleared(udata, 0, udata->inlen)) {
1668                 ibdev_dbg(&dev->ibdev, "Incompatible ABI params\n");
1669                 err = -EINVAL;
1670                 goto err_out;
1671         }
1672
1673         memcpy(params.dest_addr, ah_attr->grh.dgid.raw,
1674                sizeof(params.dest_addr));
1675         params.pdn = to_epd(ibah->pd)->pdn;
1676         err = efa_com_create_ah(&dev->edev, &params, &result);
1677         if (err)
1678                 goto err_out;
1679
1680         memcpy(ah->id, ah_attr->grh.dgid.raw, sizeof(ah->id));
1681         ah->ah = result.ah;
1682
1683         resp.efa_address_handle = result.ah;
1684
1685         if (udata->outlen) {
1686                 err = ib_copy_to_udata(udata, &resp,
1687                                        min(sizeof(resp), udata->outlen));
1688                 if (err) {
1689                         ibdev_dbg(&dev->ibdev,
1690                                   "Failed to copy udata for create_ah response\n");
1691                         goto err_destroy_ah;
1692                 }
1693         }
1694         ibdev_dbg(&dev->ibdev, "Created ah[%d]\n", ah->ah);
1695
1696         return 0;
1697
1698 err_destroy_ah:
1699         efa_ah_destroy(dev, ah);
1700 err_out:
1701         atomic64_inc(&dev->stats.sw_stats.create_ah_err);
1702         return err;
1703 }
1704
1705 void efa_destroy_ah(struct ib_ah *ibah, u32 flags)
1706 {
1707         struct efa_dev *dev = to_edev(ibah->pd->device);
1708         struct efa_ah *ah = to_eah(ibah);
1709
1710         ibdev_dbg(&dev->ibdev, "Destroy ah[%d]\n", ah->ah);
1711
1712         if (!(flags & RDMA_DESTROY_AH_SLEEPABLE)) {
1713                 ibdev_dbg(&dev->ibdev,
1714                           "Destroy address handle is not supported in atomic context\n");
1715                 return;
1716         }
1717
1718         efa_ah_destroy(dev, ah);
1719 }
1720
1721 struct rdma_hw_stats *efa_alloc_hw_stats(struct ib_device *ibdev, u8 port_num)
1722 {
1723         return rdma_alloc_hw_stats_struct(efa_stats_names,
1724                                           ARRAY_SIZE(efa_stats_names),
1725                                           RDMA_HW_STATS_DEFAULT_LIFESPAN);
1726 }
1727
1728 int efa_get_hw_stats(struct ib_device *ibdev, struct rdma_hw_stats *stats,
1729                      u8 port_num, int index)
1730 {
1731         struct efa_com_get_stats_params params = {};
1732         union efa_com_get_stats_result result;
1733         struct efa_dev *dev = to_edev(ibdev);
1734         struct efa_com_basic_stats *bs;
1735         struct efa_com_stats_admin *as;
1736         struct efa_stats *s;
1737         int err;
1738
1739         params.type = EFA_ADMIN_GET_STATS_TYPE_BASIC;
1740         params.scope = EFA_ADMIN_GET_STATS_SCOPE_ALL;
1741
1742         err = efa_com_get_stats(&dev->edev, &params, &result);
1743         if (err)
1744                 return err;
1745
1746         bs = &result.basic_stats;
1747         stats->value[EFA_TX_BYTES] = bs->tx_bytes;
1748         stats->value[EFA_TX_PKTS] = bs->tx_pkts;
1749         stats->value[EFA_RX_BYTES] = bs->rx_bytes;
1750         stats->value[EFA_RX_PKTS] = bs->rx_pkts;
1751         stats->value[EFA_RX_DROPS] = bs->rx_drops;
1752
1753         as = &dev->edev.aq.stats;
1754         stats->value[EFA_SUBMITTED_CMDS] = atomic64_read(&as->submitted_cmd);
1755         stats->value[EFA_COMPLETED_CMDS] = atomic64_read(&as->completed_cmd);
1756         stats->value[EFA_NO_COMPLETION_CMDS] = atomic64_read(&as->no_completion);
1757
1758         s = &dev->stats;
1759         stats->value[EFA_KEEP_ALIVE_RCVD] = atomic64_read(&s->keep_alive_rcvd);
1760         stats->value[EFA_ALLOC_PD_ERR] = atomic64_read(&s->sw_stats.alloc_pd_err);
1761         stats->value[EFA_CREATE_QP_ERR] = atomic64_read(&s->sw_stats.create_qp_err);
1762         stats->value[EFA_REG_MR_ERR] = atomic64_read(&s->sw_stats.reg_mr_err);
1763         stats->value[EFA_ALLOC_UCONTEXT_ERR] = atomic64_read(&s->sw_stats.alloc_ucontext_err);
1764         stats->value[EFA_CREATE_AH_ERR] = atomic64_read(&s->sw_stats.create_ah_err);
1765
1766         return ARRAY_SIZE(efa_stats_names);
1767 }
1768
1769 enum rdma_link_layer efa_port_link_layer(struct ib_device *ibdev,
1770                                          u8 port_num)
1771 {
1772         return IB_LINK_LAYER_UNSPECIFIED;
1773 }
1774