2 * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
36 #include <linux/kernel.h>
37 #include <linux/sched.h>
38 #include <rdma/ib_verbs.h>
39 #include <rdma/ib_smi.h>
40 #include <linux/mlx5/driver.h>
41 #include <linux/mlx5/cq.h>
42 #include <linux/mlx5/qp.h>
43 #include <linux/mlx5/srq.h>
44 #include <linux/types.h>
45 #include <linux/mlx5/transobj.h>
46 #include <rdma/ib_user_verbs.h>
47 #include <rdma/mlx5-abi.h>
49 #define mlx5_ib_dbg(dev, format, arg...) \
50 pr_debug("%s:%s:%d:(pid %d): " format, (dev)->ib_dev.name, __func__, \
51 __LINE__, current->pid, ##arg)
53 #define mlx5_ib_err(dev, format, arg...) \
54 pr_err("%s:%s:%d:(pid %d): " format, (dev)->ib_dev.name, __func__, \
55 __LINE__, current->pid, ##arg)
57 #define mlx5_ib_warn(dev, format, arg...) \
58 pr_warn("%s:%s:%d:(pid %d): " format, (dev)->ib_dev.name, __func__, \
59 __LINE__, current->pid, ##arg)
61 #define field_avail(type, fld, sz) (offsetof(type, fld) + \
62 sizeof(((type *)0)->fld) <= (sz))
63 #define MLX5_IB_DEFAULT_UIDX 0xffffff
64 #define MLX5_USER_ASSIGNED_UIDX_MASK __mlx5_mask(qpc, user_index)
66 #define MLX5_MKEY_PAGE_SHIFT_MASK __mlx5_mask(mkc, log_page_size)
69 MLX5_IB_MMAP_CMD_SHIFT = 8,
70 MLX5_IB_MMAP_CMD_MASK = 0xff,
73 enum mlx5_ib_mmap_cmd {
74 MLX5_IB_MMAP_REGULAR_PAGE = 0,
75 MLX5_IB_MMAP_GET_CONTIGUOUS_PAGES = 1,
76 MLX5_IB_MMAP_WC_PAGE = 2,
77 MLX5_IB_MMAP_NC_PAGE = 3,
78 /* 5 is chosen in order to be compatible with old versions of libmlx5 */
79 MLX5_IB_MMAP_CORE_CLOCK = 5,
83 MLX5_RES_SCAT_DATA32_CQE = 0x1,
84 MLX5_RES_SCAT_DATA64_CQE = 0x2,
85 MLX5_REQ_SCAT_DATA32_CQE = 0x11,
86 MLX5_REQ_SCAT_DATA64_CQE = 0x22,
89 enum mlx5_ib_latency_class {
90 MLX5_IB_LATENCY_CLASS_LOW,
91 MLX5_IB_LATENCY_CLASS_MEDIUM,
92 MLX5_IB_LATENCY_CLASS_HIGH,
95 enum mlx5_ib_mad_ifc_flags {
96 MLX5_MAD_IFC_IGNORE_MKEY = 1,
97 MLX5_MAD_IFC_IGNORE_BKEY = 2,
98 MLX5_MAD_IFC_NET_VIEW = 4,
102 MLX5_CROSS_CHANNEL_BFREG = 0,
111 MLX5_TM_MAX_RNDV_MSG_SIZE = 64,
115 struct mlx5_ib_vma_private_data {
116 struct list_head list;
117 struct vm_area_struct *vma;
118 /* protect vma_private_list add/del */
119 struct mutex *vma_private_list_mutex;
122 struct mlx5_ib_ucontext {
123 struct ib_ucontext ibucontext;
124 struct list_head db_page_list;
126 /* protect doorbell record alloc/free
128 struct mutex db_page_mutex;
129 struct mlx5_bfreg_info bfregi;
131 /* Transport Domain number */
133 struct list_head vma_private_list;
134 /* protect vma_private_list add/del */
135 struct mutex vma_private_list_mutex;
137 unsigned long upd_xlt_page;
138 /* protect ODP/KSM */
139 struct mutex upd_xlt_page_mutex;
143 static inline struct mlx5_ib_ucontext *to_mucontext(struct ib_ucontext *ibucontext)
145 return container_of(ibucontext, struct mlx5_ib_ucontext, ibucontext);
153 #define MLX5_IB_FLOW_MCAST_PRIO (MLX5_BY_PASS_NUM_PRIOS - 1)
154 #define MLX5_IB_FLOW_LAST_PRIO (MLX5_BY_PASS_NUM_REGULAR_PRIOS - 1)
155 #if (MLX5_IB_FLOW_LAST_PRIO <= 0)
156 #error "Invalid number of bypass priorities"
158 #define MLX5_IB_FLOW_LEFTOVERS_PRIO (MLX5_IB_FLOW_MCAST_PRIO + 1)
160 #define MLX5_IB_NUM_FLOW_FT (MLX5_IB_FLOW_LEFTOVERS_PRIO + 1)
161 #define MLX5_IB_NUM_SNIFFER_FTS 2
162 struct mlx5_ib_flow_prio {
163 struct mlx5_flow_table *flow_table;
164 unsigned int refcount;
167 struct mlx5_ib_flow_handler {
168 struct list_head list;
169 struct ib_flow ibflow;
170 struct mlx5_ib_flow_prio *prio;
171 struct mlx5_flow_handle *rule;
174 struct mlx5_ib_flow_db {
175 struct mlx5_ib_flow_prio prios[MLX5_IB_NUM_FLOW_FT];
176 struct mlx5_ib_flow_prio sniffer[MLX5_IB_NUM_SNIFFER_FTS];
177 struct mlx5_flow_table *lag_demux_ft;
178 /* Protect flow steering bypass flow tables
179 * when add/del flow rules.
180 * only single add/removal of flow steering rule could be done
186 /* Use macros here so that don't have to duplicate
187 * enum ib_send_flags and enum ib_qp_type for low-level driver
190 #define MLX5_IB_SEND_UMR_ENABLE_MR (IB_SEND_RESERVED_START << 0)
191 #define MLX5_IB_SEND_UMR_DISABLE_MR (IB_SEND_RESERVED_START << 1)
192 #define MLX5_IB_SEND_UMR_FAIL_IF_FREE (IB_SEND_RESERVED_START << 2)
193 #define MLX5_IB_SEND_UMR_UPDATE_XLT (IB_SEND_RESERVED_START << 3)
194 #define MLX5_IB_SEND_UMR_UPDATE_TRANSLATION (IB_SEND_RESERVED_START << 4)
195 #define MLX5_IB_SEND_UMR_UPDATE_PD_ACCESS IB_SEND_RESERVED_END
197 #define MLX5_IB_QPT_REG_UMR IB_QPT_RESERVED1
199 * IB_QPT_GSI creates the software wrapper around GSI, and MLX5_IB_QPT_HW_GSI
200 * creates the actual hardware QP.
202 #define MLX5_IB_QPT_HW_GSI IB_QPT_RESERVED2
203 #define MLX5_IB_WR_UMR IB_WR_RESERVED1
205 #define MLX5_IB_UMR_OCTOWORD 16
206 #define MLX5_IB_UMR_XLT_ALIGNMENT 64
208 #define MLX5_IB_UPD_XLT_ZAP BIT(0)
209 #define MLX5_IB_UPD_XLT_ENABLE BIT(1)
210 #define MLX5_IB_UPD_XLT_ATOMIC BIT(2)
211 #define MLX5_IB_UPD_XLT_ADDR BIT(3)
212 #define MLX5_IB_UPD_XLT_PD BIT(4)
213 #define MLX5_IB_UPD_XLT_ACCESS BIT(5)
214 #define MLX5_IB_UPD_XLT_INDIRECT BIT(6)
216 /* Private QP creation flags to be passed in ib_qp_init_attr.create_flags.
218 * These flags are intended for internal use by the mlx5_ib driver, and they
219 * rely on the range reserved for that use in the ib_qp_create_flags enum.
222 /* Create a UD QP whose source QP number is 1 */
223 static inline enum ib_qp_create_flags mlx5_ib_create_qp_sqpn_qp1(void)
225 return IB_QP_CREATE_RESERVED_START;
233 enum mlx5_ib_rq_flags {
234 MLX5_IB_RQ_CVLAN_STRIPPING = 1 << 0,
235 MLX5_IB_RQ_PCI_WRITE_END_PADDING = 1 << 1,
241 struct wr_list *w_list;
245 /* serialize post to the work queue
260 enum mlx5_ib_wq_flags {
261 MLX5_IB_WQ_FLAGS_DELAY_DROP = 0x1,
262 MLX5_IB_WQ_FLAGS_STRIDING_RQ = 0x2,
265 #define MLX5_MIN_SINGLE_WQE_LOG_NUM_STRIDES 9
266 #define MLX5_MAX_SINGLE_WQE_LOG_NUM_STRIDES 16
267 #define MLX5_MIN_SINGLE_STRIDE_LOG_NUM_BYTES 6
268 #define MLX5_MAX_SINGLE_STRIDE_LOG_NUM_BYTES 13
272 struct mlx5_core_qp core_qp;
279 u32 two_byte_shift_en;
280 u32 single_stride_log_num_of_bytes;
281 struct ib_umem *umem;
283 unsigned int page_shift;
290 u32 create_flags; /* Use enum mlx5_ib_wq_flags */
304 struct mlx5_ib_rwq_ind_table {
305 struct ib_rwq_ind_table ib_rwq_ind_tbl;
309 struct mlx5_ib_ubuffer {
310 struct ib_umem *umem;
315 struct mlx5_ib_qp_base {
316 struct mlx5_ib_qp *container_mibqp;
317 struct mlx5_core_qp mqp;
318 struct mlx5_ib_ubuffer ubuffer;
321 struct mlx5_ib_qp_trans {
322 struct mlx5_ib_qp_base base;
329 struct mlx5_ib_rss_qp {
334 struct mlx5_ib_qp_base base;
335 struct mlx5_ib_wq *rq;
336 struct mlx5_ib_ubuffer ubuffer;
337 struct mlx5_db *doorbell;
344 struct mlx5_ib_qp_base base;
345 struct mlx5_ib_wq *sq;
346 struct mlx5_ib_ubuffer ubuffer;
347 struct mlx5_db *doorbell;
352 struct mlx5_ib_raw_packet_qp {
353 struct mlx5_ib_sq sq;
354 struct mlx5_ib_rq rq;
359 unsigned long offset;
360 struct mlx5_sq_bfreg *bfreg;
366 struct mlx5_ib_qp_trans trans_qp;
367 struct mlx5_ib_raw_packet_qp raw_packet_qp;
368 struct mlx5_ib_rss_qp rss_qp;
373 struct mlx5_ib_wq rq;
377 struct mlx5_ib_wq sq;
379 /* serialize qp state modifications
391 /* only for user space QPs. For kernel
392 * we have it from the bf object
398 /* Store signature errors */
401 struct list_head qps_list;
402 struct list_head cq_recv_list;
403 struct list_head cq_send_list;
406 bool tunnel_offload_en;
409 struct mlx5_ib_cq_buf {
411 struct ib_umem *umem;
416 enum mlx5_ib_qp_flags {
417 MLX5_IB_QP_LSO = IB_QP_CREATE_IPOIB_UD_LSO,
418 MLX5_IB_QP_BLOCK_MULTICAST_LOOPBACK = IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK,
419 MLX5_IB_QP_CROSS_CHANNEL = IB_QP_CREATE_CROSS_CHANNEL,
420 MLX5_IB_QP_MANAGED_SEND = IB_QP_CREATE_MANAGED_SEND,
421 MLX5_IB_QP_MANAGED_RECV = IB_QP_CREATE_MANAGED_RECV,
422 MLX5_IB_QP_SIGNATURE_HANDLING = 1 << 5,
423 /* QP uses 1 as its source QP number */
424 MLX5_IB_QP_SQPN_QP1 = 1 << 6,
425 MLX5_IB_QP_CAP_SCATTER_FCS = 1 << 7,
426 MLX5_IB_QP_RSS = 1 << 8,
427 MLX5_IB_QP_CVLAN_STRIPPING = 1 << 9,
428 MLX5_IB_QP_UNDERLAY = 1 << 10,
429 MLX5_IB_QP_PCI_WRITE_END_PADDING = 1 << 11,
430 MLX5_IB_QP_TUNNEL_OFFLOAD = 1 << 12,
434 struct ib_send_wr wr;
438 unsigned int page_shift;
439 unsigned int xlt_size;
445 static inline struct mlx5_umr_wr *umr_wr(struct ib_send_wr *wr)
447 return container_of(wr, struct mlx5_umr_wr, wr);
450 struct mlx5_shared_mr_info {
452 struct ib_umem *umem;
455 enum mlx5_ib_cq_pr_flags {
456 MLX5_IB_CQ_PR_FLAGS_CQE_128_PAD = 1 << 0,
461 struct mlx5_core_cq mcq;
462 struct mlx5_ib_cq_buf buf;
465 /* serialize access to the CQ
471 struct mutex resize_mutex;
472 struct mlx5_ib_cq_buf *resize_buf;
473 struct ib_umem *resize_umem;
475 struct list_head list_send_qp;
476 struct list_head list_recv_qp;
478 struct list_head wc_list;
479 enum ib_cq_notify_flags notify_flags;
480 struct work_struct notify_work;
481 u16 private_flags; /* Use mlx5_ib_cq_pr_flags */
486 struct list_head list;
491 struct mlx5_core_srq msrq;
495 /* protect SRQ hanlding
501 struct ib_umem *umem;
502 /* serialize arming a SRQ
508 struct mlx5_ib_xrcd {
509 struct ib_xrcd ibxrcd;
513 enum mlx5_ib_mtt_access_flags {
514 MLX5_IB_MTT_READ = (1 << 0),
515 MLX5_IB_MTT_WRITE = (1 << 1),
518 #define MLX5_IB_MTT_PRESENT (MLX5_IB_MTT_READ | MLX5_IB_MTT_WRITE)
528 struct mlx5_core_mkey mmkey;
529 struct ib_umem *umem;
530 struct mlx5_shared_mr_info *smr_info;
531 struct list_head list;
533 bool allocated_from_cache;
535 struct mlx5_ib_dev *dev;
536 u32 out[MLX5_ST_SZ_DW(create_mkey_out)];
537 struct mlx5_core_sig_ctx *sig;
540 int access_flags; /* Needed for rereg MR */
542 struct mlx5_ib_mr *parent;
543 atomic_t num_leaf_free;
544 wait_queue_head_t q_leaf_free;
549 struct mlx5_core_mkey mmkey;
553 struct mlx5_ib_umr_context {
555 enum ib_wc_status status;
556 struct completion done;
563 /* control access to UMR QP
565 struct semaphore sem;
574 struct mlx5_cache_ent {
575 struct list_head head;
576 /* sync access to the cahce entry
593 struct dentry *fsize;
595 struct dentry *fmiss;
596 struct dentry *flimit;
598 struct mlx5_ib_dev *dev;
599 struct work_struct work;
600 struct delayed_work dwork;
602 struct completion compl;
605 struct mlx5_mr_cache {
606 struct workqueue_struct *wq;
607 struct mlx5_cache_ent ent[MAX_MR_CACHE_ENTRIES];
610 unsigned long last_add;
613 struct mlx5_ib_gsi_qp;
615 struct mlx5_ib_port_resources {
616 struct mlx5_ib_resources *devr;
617 struct mlx5_ib_gsi_qp *gsi;
618 struct work_struct pkey_change_work;
621 struct mlx5_ib_resources {
628 struct mlx5_ib_port_resources ports[2];
629 /* Protects changes to the port resources */
633 struct mlx5_ib_counters {
637 u32 num_cong_counters;
641 struct mlx5_ib_port {
642 struct mlx5_ib_counters cnts;
646 /* Protect mlx5_ib_get_netdev from invoking dev_hold() with a NULL
649 rwlock_t netdev_lock;
650 struct net_device *netdev;
651 struct notifier_block nb;
653 enum ib_port_state last_port_state;
656 struct mlx5_ib_dbg_param {
658 struct mlx5_ib_dev *dev;
659 struct dentry *dentry;
662 enum mlx5_ib_dbg_cc_types {
663 MLX5_IB_DBG_CC_RP_CLAMP_TGT_RATE,
664 MLX5_IB_DBG_CC_RP_CLAMP_TGT_RATE_ATI,
665 MLX5_IB_DBG_CC_RP_TIME_RESET,
666 MLX5_IB_DBG_CC_RP_BYTE_RESET,
667 MLX5_IB_DBG_CC_RP_THRESHOLD,
668 MLX5_IB_DBG_CC_RP_AI_RATE,
669 MLX5_IB_DBG_CC_RP_HAI_RATE,
670 MLX5_IB_DBG_CC_RP_MIN_DEC_FAC,
671 MLX5_IB_DBG_CC_RP_MIN_RATE,
672 MLX5_IB_DBG_CC_RP_RATE_TO_SET_ON_FIRST_CNP,
673 MLX5_IB_DBG_CC_RP_DCE_TCP_G,
674 MLX5_IB_DBG_CC_RP_DCE_TCP_RTT,
675 MLX5_IB_DBG_CC_RP_RATE_REDUCE_MONITOR_PERIOD,
676 MLX5_IB_DBG_CC_RP_INITIAL_ALPHA_VALUE,
677 MLX5_IB_DBG_CC_RP_GD,
678 MLX5_IB_DBG_CC_NP_CNP_DSCP,
679 MLX5_IB_DBG_CC_NP_CNP_PRIO_MODE,
680 MLX5_IB_DBG_CC_NP_CNP_PRIO,
684 struct mlx5_ib_dbg_cc_params {
686 struct mlx5_ib_dbg_param params[MLX5_IB_DBG_CC_MAX];
690 MLX5_MAX_DELAY_DROP_TIMEOUT_MS = 100,
693 struct mlx5_ib_dbg_delay_drop {
694 struct dentry *dir_debugfs;
695 struct dentry *rqs_cnt_debugfs;
696 struct dentry *events_cnt_debugfs;
697 struct dentry *timeout_debugfs;
700 struct mlx5_ib_delay_drop {
701 struct mlx5_ib_dev *dev;
702 struct work_struct delay_drop_work;
703 /* serialize setting of delay drop */
709 struct mlx5_ib_dbg_delay_drop *dbg;
713 struct ib_device ib_dev;
714 struct mlx5_core_dev *mdev;
715 struct mlx5_roce roce;
717 /* serialize update of capability mask
719 struct mutex cap_mask_mutex;
721 struct umr_common umrc;
722 /* sync used page count stats
724 struct mlx5_ib_resources devr;
725 struct mlx5_mr_cache cache;
726 struct timer_list delay_timer;
727 /* Prevents soft lock on massive reg MRs */
728 struct mutex slow_path_mutex;
730 #ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
731 struct ib_odp_caps odp_caps;
734 * Sleepable RCU that prevents destruction of MRs while they are still
735 * being used by a page fault handler.
737 struct srcu_struct mr_srcu;
740 struct mlx5_ib_flow_db flow_db;
741 /* protect resources needed as part of reset flow */
742 spinlock_t reset_flow_resource_lock;
743 struct list_head qp_list;
744 /* Array with num_ports elements */
745 struct mlx5_ib_port *port;
746 struct mlx5_sq_bfreg bfreg;
747 struct mlx5_sq_bfreg fp_bfreg;
748 struct mlx5_ib_delay_drop delay_drop;
749 struct mlx5_ib_dbg_cc_params *dbg_cc_params;
751 /* protect the user_td */
752 struct mutex lb_mutex;
757 static inline struct mlx5_ib_cq *to_mibcq(struct mlx5_core_cq *mcq)
759 return container_of(mcq, struct mlx5_ib_cq, mcq);
762 static inline struct mlx5_ib_xrcd *to_mxrcd(struct ib_xrcd *ibxrcd)
764 return container_of(ibxrcd, struct mlx5_ib_xrcd, ibxrcd);
767 static inline struct mlx5_ib_dev *to_mdev(struct ib_device *ibdev)
769 return container_of(ibdev, struct mlx5_ib_dev, ib_dev);
772 static inline struct mlx5_ib_cq *to_mcq(struct ib_cq *ibcq)
774 return container_of(ibcq, struct mlx5_ib_cq, ibcq);
777 static inline struct mlx5_ib_qp *to_mibqp(struct mlx5_core_qp *mqp)
779 return container_of(mqp, struct mlx5_ib_qp_base, mqp)->container_mibqp;
782 static inline struct mlx5_ib_rwq *to_mibrwq(struct mlx5_core_qp *core_qp)
784 return container_of(core_qp, struct mlx5_ib_rwq, core_qp);
787 static inline struct mlx5_ib_mr *to_mibmr(struct mlx5_core_mkey *mmkey)
789 return container_of(mmkey, struct mlx5_ib_mr, mmkey);
792 static inline struct mlx5_ib_pd *to_mpd(struct ib_pd *ibpd)
794 return container_of(ibpd, struct mlx5_ib_pd, ibpd);
797 static inline struct mlx5_ib_srq *to_msrq(struct ib_srq *ibsrq)
799 return container_of(ibsrq, struct mlx5_ib_srq, ibsrq);
802 static inline struct mlx5_ib_qp *to_mqp(struct ib_qp *ibqp)
804 return container_of(ibqp, struct mlx5_ib_qp, ibqp);
807 static inline struct mlx5_ib_rwq *to_mrwq(struct ib_wq *ibwq)
809 return container_of(ibwq, struct mlx5_ib_rwq, ibwq);
812 static inline struct mlx5_ib_rwq_ind_table *to_mrwq_ind_table(struct ib_rwq_ind_table *ib_rwq_ind_tbl)
814 return container_of(ib_rwq_ind_tbl, struct mlx5_ib_rwq_ind_table, ib_rwq_ind_tbl);
817 static inline struct mlx5_ib_srq *to_mibsrq(struct mlx5_core_srq *msrq)
819 return container_of(msrq, struct mlx5_ib_srq, msrq);
822 static inline struct mlx5_ib_mr *to_mmr(struct ib_mr *ibmr)
824 return container_of(ibmr, struct mlx5_ib_mr, ibmr);
827 static inline struct mlx5_ib_mw *to_mmw(struct ib_mw *ibmw)
829 return container_of(ibmw, struct mlx5_ib_mw, ibmw);
832 int mlx5_ib_db_map_user(struct mlx5_ib_ucontext *context, unsigned long virt,
834 void mlx5_ib_db_unmap_user(struct mlx5_ib_ucontext *context, struct mlx5_db *db);
835 void __mlx5_ib_cq_clean(struct mlx5_ib_cq *cq, u32 qpn, struct mlx5_ib_srq *srq);
836 void mlx5_ib_cq_clean(struct mlx5_ib_cq *cq, u32 qpn, struct mlx5_ib_srq *srq);
837 void mlx5_ib_free_srq_wqe(struct mlx5_ib_srq *srq, int wqe_index);
838 int mlx5_MAD_IFC(struct mlx5_ib_dev *dev, int ignore_mkey, int ignore_bkey,
839 u8 port, const struct ib_wc *in_wc, const struct ib_grh *in_grh,
840 const void *in_mad, void *response_mad);
841 struct ib_ah *mlx5_ib_create_ah(struct ib_pd *pd, struct rdma_ah_attr *ah_attr,
842 struct ib_udata *udata);
843 int mlx5_ib_query_ah(struct ib_ah *ibah, struct rdma_ah_attr *ah_attr);
844 int mlx5_ib_destroy_ah(struct ib_ah *ah);
845 struct ib_srq *mlx5_ib_create_srq(struct ib_pd *pd,
846 struct ib_srq_init_attr *init_attr,
847 struct ib_udata *udata);
848 int mlx5_ib_modify_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr,
849 enum ib_srq_attr_mask attr_mask, struct ib_udata *udata);
850 int mlx5_ib_query_srq(struct ib_srq *ibsrq, struct ib_srq_attr *srq_attr);
851 int mlx5_ib_destroy_srq(struct ib_srq *srq);
852 int mlx5_ib_post_srq_recv(struct ib_srq *ibsrq, struct ib_recv_wr *wr,
853 struct ib_recv_wr **bad_wr);
854 struct ib_qp *mlx5_ib_create_qp(struct ib_pd *pd,
855 struct ib_qp_init_attr *init_attr,
856 struct ib_udata *udata);
857 int mlx5_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
858 int attr_mask, struct ib_udata *udata);
859 int mlx5_ib_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr, int qp_attr_mask,
860 struct ib_qp_init_attr *qp_init_attr);
861 int mlx5_ib_destroy_qp(struct ib_qp *qp);
862 int mlx5_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
863 struct ib_send_wr **bad_wr);
864 int mlx5_ib_post_recv(struct ib_qp *ibqp, struct ib_recv_wr *wr,
865 struct ib_recv_wr **bad_wr);
866 void *mlx5_get_send_wqe(struct mlx5_ib_qp *qp, int n);
867 int mlx5_ib_read_user_wqe(struct mlx5_ib_qp *qp, int send, int wqe_index,
868 void *buffer, u32 length,
869 struct mlx5_ib_qp_base *base);
870 struct ib_cq *mlx5_ib_create_cq(struct ib_device *ibdev,
871 const struct ib_cq_init_attr *attr,
872 struct ib_ucontext *context,
873 struct ib_udata *udata);
874 int mlx5_ib_destroy_cq(struct ib_cq *cq);
875 int mlx5_ib_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc);
876 int mlx5_ib_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags);
877 int mlx5_ib_modify_cq(struct ib_cq *cq, u16 cq_count, u16 cq_period);
878 int mlx5_ib_resize_cq(struct ib_cq *ibcq, int entries, struct ib_udata *udata);
879 struct ib_mr *mlx5_ib_get_dma_mr(struct ib_pd *pd, int acc);
880 struct ib_mr *mlx5_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
881 u64 virt_addr, int access_flags,
882 struct ib_udata *udata);
883 struct ib_mw *mlx5_ib_alloc_mw(struct ib_pd *pd, enum ib_mw_type type,
884 struct ib_udata *udata);
885 int mlx5_ib_dealloc_mw(struct ib_mw *mw);
886 int mlx5_ib_update_xlt(struct mlx5_ib_mr *mr, u64 idx, int npages,
887 int page_shift, int flags);
888 struct mlx5_ib_mr *mlx5_ib_alloc_implicit_mr(struct mlx5_ib_pd *pd,
890 void mlx5_ib_free_implicit_mr(struct mlx5_ib_mr *mr);
891 int mlx5_ib_rereg_user_mr(struct ib_mr *ib_mr, int flags, u64 start,
892 u64 length, u64 virt_addr, int access_flags,
893 struct ib_pd *pd, struct ib_udata *udata);
894 int mlx5_ib_dereg_mr(struct ib_mr *ibmr);
895 struct ib_mr *mlx5_ib_alloc_mr(struct ib_pd *pd,
896 enum ib_mr_type mr_type,
898 int mlx5_ib_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg, int sg_nents,
899 unsigned int *sg_offset);
900 int mlx5_ib_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num,
901 const struct ib_wc *in_wc, const struct ib_grh *in_grh,
902 const struct ib_mad_hdr *in, size_t in_mad_size,
903 struct ib_mad_hdr *out, size_t *out_mad_size,
904 u16 *out_mad_pkey_index);
905 struct ib_xrcd *mlx5_ib_alloc_xrcd(struct ib_device *ibdev,
906 struct ib_ucontext *context,
907 struct ib_udata *udata);
908 int mlx5_ib_dealloc_xrcd(struct ib_xrcd *xrcd);
909 int mlx5_ib_get_buf_offset(u64 addr, int page_shift, u32 *offset);
910 int mlx5_query_ext_port_caps(struct mlx5_ib_dev *dev, u8 port);
911 int mlx5_query_mad_ifc_smp_attr_node_info(struct ib_device *ibdev,
912 struct ib_smp *out_mad);
913 int mlx5_query_mad_ifc_system_image_guid(struct ib_device *ibdev,
914 __be64 *sys_image_guid);
915 int mlx5_query_mad_ifc_max_pkeys(struct ib_device *ibdev,
917 int mlx5_query_mad_ifc_vendor_id(struct ib_device *ibdev,
919 int mlx5_query_mad_ifc_node_desc(struct mlx5_ib_dev *dev, char *node_desc);
920 int mlx5_query_mad_ifc_node_guid(struct mlx5_ib_dev *dev, __be64 *node_guid);
921 int mlx5_query_mad_ifc_pkey(struct ib_device *ibdev, u8 port, u16 index,
923 int mlx5_query_mad_ifc_gids(struct ib_device *ibdev, u8 port, int index,
925 int mlx5_query_mad_ifc_port(struct ib_device *ibdev, u8 port,
926 struct ib_port_attr *props);
927 int mlx5_ib_query_port(struct ib_device *ibdev, u8 port,
928 struct ib_port_attr *props);
929 int mlx5_ib_init_fmr(struct mlx5_ib_dev *dev);
930 void mlx5_ib_cleanup_fmr(struct mlx5_ib_dev *dev);
931 void mlx5_ib_cont_pages(struct ib_umem *umem, u64 addr,
932 unsigned long max_page_shift,
933 int *count, int *shift,
934 int *ncont, int *order);
935 void __mlx5_ib_populate_pas(struct mlx5_ib_dev *dev, struct ib_umem *umem,
936 int page_shift, size_t offset, size_t num_pages,
937 __be64 *pas, int access_flags);
938 void mlx5_ib_populate_pas(struct mlx5_ib_dev *dev, struct ib_umem *umem,
939 int page_shift, __be64 *pas, int access_flags);
940 void mlx5_ib_copy_pas(u64 *old, u64 *new, int step, int num);
941 int mlx5_ib_get_cqe_size(struct mlx5_ib_dev *dev, struct ib_cq *ibcq);
942 int mlx5_mr_cache_init(struct mlx5_ib_dev *dev);
943 int mlx5_mr_cache_cleanup(struct mlx5_ib_dev *dev);
945 struct mlx5_ib_mr *mlx5_mr_cache_alloc(struct mlx5_ib_dev *dev, int entry);
946 void mlx5_mr_cache_free(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr);
947 int mlx5_ib_check_mr_status(struct ib_mr *ibmr, u32 check_mask,
948 struct ib_mr_status *mr_status);
949 struct ib_wq *mlx5_ib_create_wq(struct ib_pd *pd,
950 struct ib_wq_init_attr *init_attr,
951 struct ib_udata *udata);
952 int mlx5_ib_destroy_wq(struct ib_wq *wq);
953 int mlx5_ib_modify_wq(struct ib_wq *wq, struct ib_wq_attr *wq_attr,
954 u32 wq_attr_mask, struct ib_udata *udata);
955 struct ib_rwq_ind_table *mlx5_ib_create_rwq_ind_table(struct ib_device *device,
956 struct ib_rwq_ind_table_init_attr *init_attr,
957 struct ib_udata *udata);
958 int mlx5_ib_destroy_rwq_ind_table(struct ib_rwq_ind_table *wq_ind_table);
960 #ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
961 void mlx5_ib_internal_fill_odp_caps(struct mlx5_ib_dev *dev);
962 void mlx5_ib_pfault(struct mlx5_core_dev *mdev, void *context,
963 struct mlx5_pagefault *pfault);
964 int mlx5_ib_odp_init_one(struct mlx5_ib_dev *ibdev);
965 void mlx5_ib_odp_remove_one(struct mlx5_ib_dev *ibdev);
966 int __init mlx5_ib_odp_init(void);
967 void mlx5_ib_odp_cleanup(void);
968 void mlx5_ib_invalidate_range(struct ib_umem *umem, unsigned long start,
970 void mlx5_odp_init_mr_cache_entry(struct mlx5_cache_ent *ent);
971 void mlx5_odp_populate_klm(struct mlx5_klm *pklm, size_t offset,
972 size_t nentries, struct mlx5_ib_mr *mr, int flags);
973 #else /* CONFIG_INFINIBAND_ON_DEMAND_PAGING */
974 static inline void mlx5_ib_internal_fill_odp_caps(struct mlx5_ib_dev *dev)
979 static inline int mlx5_ib_odp_init_one(struct mlx5_ib_dev *ibdev) { return 0; }
980 static inline void mlx5_ib_odp_remove_one(struct mlx5_ib_dev *ibdev) {}
981 static inline int mlx5_ib_odp_init(void) { return 0; }
982 static inline void mlx5_ib_odp_cleanup(void) {}
983 static inline void mlx5_odp_init_mr_cache_entry(struct mlx5_cache_ent *ent) {}
984 static inline void mlx5_odp_populate_klm(struct mlx5_klm *pklm, size_t offset,
985 size_t nentries, struct mlx5_ib_mr *mr,
988 #endif /* CONFIG_INFINIBAND_ON_DEMAND_PAGING */
990 int mlx5_ib_get_vf_config(struct ib_device *device, int vf,
991 u8 port, struct ifla_vf_info *info);
992 int mlx5_ib_set_vf_link_state(struct ib_device *device, int vf,
994 int mlx5_ib_get_vf_stats(struct ib_device *device, int vf,
995 u8 port, struct ifla_vf_stats *stats);
996 int mlx5_ib_set_vf_guid(struct ib_device *device, int vf, u8 port,
999 __be16 mlx5_get_roce_udp_sport(struct mlx5_ib_dev *dev, u8 port_num,
1001 int mlx5_get_roce_gid_type(struct mlx5_ib_dev *dev, u8 port_num,
1002 int index, enum ib_gid_type *gid_type);
1004 void mlx5_ib_cleanup_cong_debugfs(struct mlx5_ib_dev *dev);
1005 int mlx5_ib_init_cong_debugfs(struct mlx5_ib_dev *dev);
1007 /* GSI QP helper functions */
1008 struct ib_qp *mlx5_ib_gsi_create_qp(struct ib_pd *pd,
1009 struct ib_qp_init_attr *init_attr);
1010 int mlx5_ib_gsi_destroy_qp(struct ib_qp *qp);
1011 int mlx5_ib_gsi_modify_qp(struct ib_qp *qp, struct ib_qp_attr *attr,
1013 int mlx5_ib_gsi_query_qp(struct ib_qp *qp, struct ib_qp_attr *qp_attr,
1015 struct ib_qp_init_attr *qp_init_attr);
1016 int mlx5_ib_gsi_post_send(struct ib_qp *qp, struct ib_send_wr *wr,
1017 struct ib_send_wr **bad_wr);
1018 int mlx5_ib_gsi_post_recv(struct ib_qp *qp, struct ib_recv_wr *wr,
1019 struct ib_recv_wr **bad_wr);
1020 void mlx5_ib_gsi_pkey_change(struct mlx5_ib_gsi_qp *gsi);
1022 int mlx5_ib_generate_wc(struct ib_cq *ibcq, struct ib_wc *wc);
1024 static inline void init_query_mad(struct ib_smp *mad)
1026 mad->base_version = 1;
1027 mad->mgmt_class = IB_MGMT_CLASS_SUBN_LID_ROUTED;
1028 mad->class_version = 1;
1029 mad->method = IB_MGMT_METHOD_GET;
1032 static inline u8 convert_access(int acc)
1034 return (acc & IB_ACCESS_REMOTE_ATOMIC ? MLX5_PERM_ATOMIC : 0) |
1035 (acc & IB_ACCESS_REMOTE_WRITE ? MLX5_PERM_REMOTE_WRITE : 0) |
1036 (acc & IB_ACCESS_REMOTE_READ ? MLX5_PERM_REMOTE_READ : 0) |
1037 (acc & IB_ACCESS_LOCAL_WRITE ? MLX5_PERM_LOCAL_WRITE : 0) |
1038 MLX5_PERM_LOCAL_READ;
1041 static inline int is_qp1(enum ib_qp_type qp_type)
1043 return qp_type == MLX5_IB_QPT_HW_GSI;
1046 #define MLX5_MAX_UMR_SHIFT 16
1047 #define MLX5_MAX_UMR_PAGES (1 << MLX5_MAX_UMR_SHIFT)
1049 static inline u32 check_cq_create_flags(u32 flags)
1052 * It returns non-zero value for unsupported CQ
1053 * create flags, otherwise it returns zero.
1055 return (flags & ~(IB_CQ_FLAGS_IGNORE_OVERRUN |
1056 IB_CQ_FLAGS_TIMESTAMP_COMPLETION));
1059 static inline int verify_assign_uidx(u8 cqe_version, u32 cmd_uidx,
1063 if ((cmd_uidx == MLX5_IB_DEFAULT_UIDX) ||
1064 (cmd_uidx & ~MLX5_USER_ASSIGNED_UIDX_MASK))
1066 *user_index = cmd_uidx;
1068 *user_index = MLX5_IB_DEFAULT_UIDX;
1074 static inline int get_qp_user_index(struct mlx5_ib_ucontext *ucontext,
1075 struct mlx5_ib_create_qp *ucmd,
1079 u8 cqe_version = ucontext->cqe_version;
1081 if (field_avail(struct mlx5_ib_create_qp, uidx, inlen) &&
1082 !cqe_version && (ucmd->uidx == MLX5_IB_DEFAULT_UIDX))
1085 if (!!(field_avail(struct mlx5_ib_create_qp, uidx, inlen) !=
1089 return verify_assign_uidx(cqe_version, ucmd->uidx, user_index);
1092 static inline int get_srq_user_index(struct mlx5_ib_ucontext *ucontext,
1093 struct mlx5_ib_create_srq *ucmd,
1097 u8 cqe_version = ucontext->cqe_version;
1099 if (field_avail(struct mlx5_ib_create_srq, uidx, inlen) &&
1100 !cqe_version && (ucmd->uidx == MLX5_IB_DEFAULT_UIDX))
1103 if (!!(field_avail(struct mlx5_ib_create_srq, uidx, inlen) !=
1107 return verify_assign_uidx(cqe_version, ucmd->uidx, user_index);
1110 static inline int get_uars_per_sys_page(struct mlx5_ib_dev *dev, bool lib_support)
1112 return lib_support && MLX5_CAP_GEN(dev->mdev, uar_4k) ?
1113 MLX5_UARS_IN_PAGE : 1;
1116 static inline int get_num_uars(struct mlx5_ib_dev *dev,
1117 struct mlx5_bfreg_info *bfregi)
1119 return get_uars_per_sys_page(dev, bfregi->lib_uar_4k) * bfregi->num_sys_pages;
1122 #endif /* MLX5_IB_H */