2 * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
36 #include <linux/kernel.h>
37 #include <linux/completion.h>
38 #include <linux/pci.h>
39 #include <linux/spinlock_types.h>
40 #include <linux/semaphore.h>
41 #include <linux/slab.h>
42 #include <linux/vmalloc.h>
43 #include <linux/radix-tree.h>
44 #include <linux/workqueue.h>
45 #include <linux/mempool.h>
46 #include <linux/interrupt.h>
48 #include <linux/mlx5/device.h>
49 #include <linux/mlx5/doorbell.h>
50 #include <linux/mlx5/srq.h>
53 MLX5_BOARD_ID_LEN = 64,
54 MLX5_MAX_NAME_LEN = 16,
58 /* one minute for the sake of bringup. Generally, commands must always
59 * complete and we may need to increase this timeout value
61 MLX5_CMD_TIMEOUT_MSEC = 60 * 1000,
62 MLX5_CMD_WQ_MAX_NAME = 32,
68 CMD_STATUS_SUCCESS = 0,
74 MLX5_SQP_IEEE_1588 = 2,
76 MLX5_SQP_SYNC_UMR = 4,
84 MLX5_EQ_VEC_PAGES = 0,
86 MLX5_EQ_VEC_ASYNC = 2,
87 MLX5_EQ_VEC_PFAULT = 3,
88 MLX5_EQ_VEC_COMP_BASE,
92 MLX5_MAX_IRQ_NAME = 32
96 MLX5_ATOMIC_MODE_IB_COMP = 1 << 16,
97 MLX5_ATOMIC_MODE_CX = 2 << 16,
98 MLX5_ATOMIC_MODE_8B = 3 << 16,
99 MLX5_ATOMIC_MODE_16B = 4 << 16,
100 MLX5_ATOMIC_MODE_32B = 5 << 16,
101 MLX5_ATOMIC_MODE_64B = 6 << 16,
102 MLX5_ATOMIC_MODE_128B = 7 << 16,
103 MLX5_ATOMIC_MODE_256B = 8 << 16,
107 MLX5_REG_QETCR = 0x4005,
108 MLX5_REG_QTCT = 0x400a,
109 MLX5_REG_DCBX_PARAM = 0x4020,
110 MLX5_REG_DCBX_APP = 0x4021,
111 MLX5_REG_FPGA_CAP = 0x4022,
112 MLX5_REG_FPGA_CTRL = 0x4023,
113 MLX5_REG_PCAP = 0x5001,
114 MLX5_REG_PMTU = 0x5003,
115 MLX5_REG_PTYS = 0x5004,
116 MLX5_REG_PAOS = 0x5006,
117 MLX5_REG_PFCC = 0x5007,
118 MLX5_REG_PPCNT = 0x5008,
119 MLX5_REG_PMAOS = 0x5012,
120 MLX5_REG_PUDE = 0x5009,
121 MLX5_REG_PMPE = 0x5010,
122 MLX5_REG_PELC = 0x500e,
123 MLX5_REG_PVLC = 0x500f,
124 MLX5_REG_PCMR = 0x5041,
125 MLX5_REG_PMLP = 0x5002,
126 MLX5_REG_PCAM = 0x507f,
127 MLX5_REG_NODE_DESC = 0x6001,
128 MLX5_REG_HOST_ENDIANNESS = 0x7004,
129 MLX5_REG_MCIA = 0x9014,
130 MLX5_REG_MLCR = 0x902b,
131 MLX5_REG_MPCNT = 0x9051,
132 MLX5_REG_MTPPS = 0x9053,
133 MLX5_REG_MTPPSE = 0x9054,
134 MLX5_REG_MCAM = 0x907f,
137 enum mlx5_dcbx_oper_mode {
138 MLX5E_DCBX_PARAM_VER_OPER_HOST = 0x0,
139 MLX5E_DCBX_PARAM_VER_OPER_AUTO = 0x3,
143 MLX5_ATOMIC_OPS_CMP_SWAP = 1 << 0,
144 MLX5_ATOMIC_OPS_FETCH_ADD = 1 << 1,
147 enum mlx5_page_fault_resume_flags {
148 MLX5_PAGE_FAULT_RESUME_REQUESTOR = 1 << 0,
149 MLX5_PAGE_FAULT_RESUME_WRITE = 1 << 1,
150 MLX5_PAGE_FAULT_RESUME_RDMA = 1 << 2,
151 MLX5_PAGE_FAULT_RESUME_ERROR = 1 << 7,
160 struct mlx5_field_desc {
165 struct mlx5_rsc_debug {
166 struct mlx5_core_dev *dev;
168 enum dbg_rsc_type type;
170 struct mlx5_field_desc fields[0];
173 enum mlx5_dev_event {
174 MLX5_DEV_EVENT_SYS_ERROR,
175 MLX5_DEV_EVENT_PORT_UP,
176 MLX5_DEV_EVENT_PORT_DOWN,
177 MLX5_DEV_EVENT_PORT_INITIALIZED,
178 MLX5_DEV_EVENT_LID_CHANGE,
179 MLX5_DEV_EVENT_PKEY_CHANGE,
180 MLX5_DEV_EVENT_GUID_CHANGE,
181 MLX5_DEV_EVENT_CLIENT_REREG,
185 enum mlx5_port_status {
193 #ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
198 struct mlx5_bfreg_info {
200 int num_low_latency_bfregs;
204 * protect bfreg allocation data structs
212 struct mlx5_cmd_first {
216 struct mlx5_cmd_msg {
217 struct list_head list;
218 struct cmd_msg_cache *parent;
220 struct mlx5_cmd_first first;
221 struct mlx5_cmd_mailbox *next;
224 struct mlx5_cmd_debug {
225 struct dentry *dbg_root;
226 struct dentry *dbg_in;
227 struct dentry *dbg_out;
228 struct dentry *dbg_outlen;
229 struct dentry *dbg_status;
230 struct dentry *dbg_run;
238 struct cmd_msg_cache {
239 /* protect block chain allocations
242 struct list_head head;
243 unsigned int max_inbox_size;
244 unsigned int num_ent;
248 MLX5_NUM_COMMAND_CACHES = 5,
251 struct mlx5_cmd_stats {
256 struct dentry *count;
257 /* protect command average calculations */
263 dma_addr_t alloc_dma;
274 /* protect command queue allocations
276 spinlock_t alloc_lock;
278 /* protect token allocations
280 spinlock_t token_lock;
282 unsigned long bitmask;
283 char wq_name[MLX5_CMD_WQ_MAX_NAME];
284 struct workqueue_struct *wq;
285 struct semaphore sem;
286 struct semaphore pages_sem;
288 struct mlx5_cmd_work_ent *ent_arr[MLX5_MAX_COMMANDS];
289 struct pci_pool *pool;
290 struct mlx5_cmd_debug dbg;
291 struct cmd_msg_cache cache[MLX5_NUM_COMMAND_CACHES];
292 int checksum_disabled;
293 struct mlx5_cmd_stats stats[MLX5_CMD_OP_MAX];
296 struct mlx5_port_caps {
303 struct mlx5_cmd_mailbox {
306 struct mlx5_cmd_mailbox *next;
309 struct mlx5_buf_list {
315 struct mlx5_buf_list direct;
321 struct mlx5_frag_buf {
322 struct mlx5_buf_list *frags;
328 struct mlx5_eq_tasklet {
329 struct list_head list;
330 struct list_head process_list;
331 struct tasklet_struct task;
332 /* lock on completion tasklet list */
336 struct mlx5_eq_pagefault {
337 struct work_struct work;
338 /* Pagefaults lock */
340 struct workqueue_struct *wq;
345 struct mlx5_core_dev *dev;
346 __be32 __iomem *doorbell;
354 struct list_head list;
356 struct mlx5_rsc_debug *dbg;
357 enum mlx5_eq_type type;
359 struct mlx5_eq_tasklet tasklet_ctx;
360 #ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
361 struct mlx5_eq_pagefault pf_ctx;
366 struct mlx5_core_psv {
378 struct mlx5_core_sig_ctx {
379 struct mlx5_core_psv psv_memory;
380 struct mlx5_core_psv psv_wire;
381 struct ib_sig_err err_item;
382 bool sig_status_checked;
392 struct mlx5_core_mkey {
400 #define MLX5_24BIT_MASK ((1 << 24) - 1)
403 MLX5_RES_QP = MLX5_EVENT_QUEUE_TYPE_QP,
404 MLX5_RES_RQ = MLX5_EVENT_QUEUE_TYPE_RQ,
405 MLX5_RES_SQ = MLX5_EVENT_QUEUE_TYPE_SQ,
410 struct mlx5_core_rsc_common {
411 enum mlx5_res_type res;
413 struct completion free;
416 struct mlx5_core_srq {
417 struct mlx5_core_rsc_common common; /* must be first */
421 int max_avail_gather;
423 void (*event) (struct mlx5_core_srq *, enum mlx5_event);
426 struct completion free;
429 struct mlx5_eq_table {
430 void __iomem *update_ci;
431 void __iomem *update_arm_ci;
432 struct list_head comp_eqs_list;
433 struct mlx5_eq pages_eq;
434 struct mlx5_eq async_eq;
435 struct mlx5_eq cmd_eq;
436 #ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
437 struct mlx5_eq pfault_eq;
439 int num_comp_vectors;
445 struct mlx5_uars_page {
449 struct list_head list;
451 unsigned long *reg_bitmap; /* for non fast path bf regs */
452 unsigned long *fp_bitmap;
453 unsigned int reg_avail;
454 unsigned int fp_avail;
455 struct kref ref_count;
456 struct mlx5_core_dev *mdev;
459 struct mlx5_bfreg_head {
460 /* protect blue flame registers allocations */
462 struct list_head list;
465 struct mlx5_bfreg_data {
466 struct mlx5_bfreg_head reg_head;
467 struct mlx5_bfreg_head wc_head;
470 struct mlx5_sq_bfreg {
472 struct mlx5_uars_page *up;
478 struct mlx5_core_health {
479 struct health_buffer __iomem *health;
480 __be32 __iomem *health_counter;
481 struct timer_list timer;
485 /* wq spinlock to synchronize draining */
487 struct workqueue_struct *wq;
489 struct work_struct work;
490 struct delayed_work recover_work;
493 struct mlx5_cq_table {
494 /* protect radix tree
497 struct radix_tree_root tree;
500 struct mlx5_qp_table {
501 /* protect radix tree
504 struct radix_tree_root tree;
507 struct mlx5_srq_table {
508 /* protect radix tree
511 struct radix_tree_root tree;
514 struct mlx5_mkey_table {
515 /* protect radix tree
518 struct radix_tree_root tree;
521 struct mlx5_vf_context {
525 struct mlx5_core_sriov {
526 struct mlx5_vf_context *vfs_ctx;
531 struct mlx5_irq_info {
533 char name[MLX5_MAX_IRQ_NAME];
536 struct mlx5_fc_stats {
537 struct rb_root counters;
538 struct list_head addlist;
539 /* protect addlist add/splice operations */
540 spinlock_t addlist_lock;
542 struct workqueue_struct *wq;
543 struct delayed_work work;
544 unsigned long next_query;
545 unsigned long sampling_interval; /* jiffies */
550 struct mlx5_pagefault;
552 struct mlx5_rl_entry {
558 struct mlx5_rl_table {
559 /* protect rate limit table */
560 struct mutex rl_lock;
564 struct mlx5_rl_entry *rl_entry;
567 enum port_module_event_status_type {
568 MLX5_MODULE_STATUS_PLUGGED = 0x1,
569 MLX5_MODULE_STATUS_UNPLUGGED = 0x2,
570 MLX5_MODULE_STATUS_ERROR = 0x3,
571 MLX5_MODULE_STATUS_NUM = 0x3,
574 enum port_module_event_error_type {
575 MLX5_MODULE_EVENT_ERROR_POWER_BUDGET_EXCEEDED,
576 MLX5_MODULE_EVENT_ERROR_LONG_RANGE_FOR_NON_MLNX_CABLE_MODULE,
577 MLX5_MODULE_EVENT_ERROR_BUS_STUCK,
578 MLX5_MODULE_EVENT_ERROR_NO_EEPROM_RETRY_TIMEOUT,
579 MLX5_MODULE_EVENT_ERROR_ENFORCE_PART_NUMBER_LIST,
580 MLX5_MODULE_EVENT_ERROR_UNKNOWN_IDENTIFIER,
581 MLX5_MODULE_EVENT_ERROR_HIGH_TEMPERATURE,
582 MLX5_MODULE_EVENT_ERROR_BAD_CABLE,
583 MLX5_MODULE_EVENT_ERROR_UNKNOWN,
584 MLX5_MODULE_EVENT_ERROR_NUM,
587 struct mlx5_port_module_event_stats {
588 u64 status_counters[MLX5_MODULE_STATUS_NUM];
589 u64 error_counters[MLX5_MODULE_EVENT_ERROR_NUM];
593 char name[MLX5_MAX_NAME_LEN];
594 struct mlx5_eq_table eq_table;
595 struct msix_entry *msix_arr;
596 struct mlx5_irq_info *irq_info;
599 struct workqueue_struct *pg_wq;
600 struct rb_root page_root;
603 struct list_head free_list;
606 struct mlx5_core_health health;
608 struct mlx5_srq_table srq_table;
610 /* start: qp staff */
611 struct mlx5_qp_table qp_table;
612 struct dentry *qp_debugfs;
613 struct dentry *eq_debugfs;
614 struct dentry *cq_debugfs;
615 struct dentry *cmdif_debugfs;
618 /* start: cq staff */
619 struct mlx5_cq_table cq_table;
622 /* start: mkey staff */
623 struct mlx5_mkey_table mkey_table;
624 /* end: mkey staff */
626 /* start: alloc staff */
627 /* protect buffer alocation according to numa node */
628 struct mutex alloc_mutex;
631 struct mutex pgdir_mutex;
632 struct list_head pgdir_list;
633 /* end: alloc staff */
634 struct dentry *dbg_root;
636 /* protect mkey key part */
637 spinlock_t mkey_lock;
640 struct list_head dev_list;
641 struct list_head ctx_list;
644 struct mlx5_flow_steering *steering;
645 struct mlx5_eswitch *eswitch;
646 struct mlx5_core_sriov sriov;
647 struct mlx5_lag *lag;
648 unsigned long pci_dev_data;
649 struct mlx5_fc_stats fc_stats;
650 struct mlx5_rl_table rl_table;
652 struct mlx5_port_module_event_stats pme_stats;
654 #ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
655 void (*pfault)(struct mlx5_core_dev *dev,
657 struct mlx5_pagefault *pfault);
659 struct srcu_struct pfault_srcu;
661 struct mlx5_bfreg_data bfregs;
662 struct mlx5_uars_page *uar;
665 enum mlx5_device_state {
666 MLX5_DEVICE_STATE_UP,
667 MLX5_DEVICE_STATE_INTERNAL_ERROR,
670 enum mlx5_interface_state {
671 MLX5_INTERFACE_STATE_DOWN = BIT(0),
672 MLX5_INTERFACE_STATE_UP = BIT(1),
673 MLX5_INTERFACE_STATE_SHUTDOWN = BIT(2),
676 enum mlx5_pci_status {
677 MLX5_PCI_STATUS_DISABLED,
678 MLX5_PCI_STATUS_ENABLED,
681 enum mlx5_pagefault_type_flags {
682 MLX5_PFAULT_REQUESTOR = 1 << 0,
683 MLX5_PFAULT_WRITE = 1 << 1,
684 MLX5_PFAULT_RDMA = 1 << 2,
687 /* Contains the details of a pagefault. */
688 struct mlx5_pagefault {
694 /* Initiator or send message responder pagefault details. */
696 /* Received packet size, only valid for responders. */
699 * Number of resource holding WQE, depends on type.
703 * WQE index. Refers to either the send queue or
704 * receive queue, according to event_subtype.
708 /* RDMA responder pagefault details */
712 * Received packet size, minimal size page fault
713 * resolution required for forward progress.
722 struct work_struct work;
726 struct list_head tirs_list;
730 struct mlx5e_resources {
733 struct mlx5_core_mkey mkey;
734 struct mlx5_sq_bfreg bfreg;
737 struct mlx5_core_dev {
738 struct pci_dev *pdev;
740 struct mutex pci_status_mutex;
741 enum mlx5_pci_status pci_status;
743 char board_id[MLX5_BOARD_ID_LEN];
745 struct mlx5_port_caps port_caps[MLX5_MAX_PORTS];
747 u32 hca_cur[MLX5_CAP_NUM][MLX5_UN_SZ_DW(hca_cap_union)];
748 u32 hca_max[MLX5_CAP_NUM][MLX5_UN_SZ_DW(hca_cap_union)];
749 u32 pcam[MLX5_ST_SZ_DW(pcam_reg)];
750 u32 mcam[MLX5_ST_SZ_DW(mcam_reg)];
752 phys_addr_t iseg_base;
753 struct mlx5_init_seg __iomem *iseg;
754 enum mlx5_device_state state;
755 /* sync interface state */
756 struct mutex intf_state_mutex;
757 unsigned long intf_state;
758 void (*event) (struct mlx5_core_dev *dev,
759 enum mlx5_dev_event event,
760 unsigned long param);
761 struct mlx5_priv priv;
762 struct mlx5_profile *profile;
765 struct mlx5e_resources mlx5e_res;
766 #ifdef CONFIG_MLX5_FPGA
767 struct mlx5_fpga_device *fpga;
769 #ifdef CONFIG_RFS_ACCEL
770 struct cpu_rmap *rmap;
777 struct mlx5_db_pgdir *pgdir;
778 struct mlx5_ib_user_db_page *user_page;
785 MLX5_COMP_EQ_SIZE = 1024,
789 MLX5_PTYS_IB = 1 << 0,
790 MLX5_PTYS_EN = 1 << 2,
793 typedef void (*mlx5_cmd_cbk_t)(int status, void *context);
796 MLX5_CMD_ENT_STATE_PENDING_COMP,
799 struct mlx5_cmd_work_ent {
801 struct mlx5_cmd_msg *in;
802 struct mlx5_cmd_msg *out;
805 mlx5_cmd_cbk_t callback;
806 struct delayed_work cb_timeout_work;
809 struct completion done;
810 struct mlx5_cmd *cmd;
811 struct work_struct work;
812 struct mlx5_cmd_layout *lay;
827 enum port_state_policy {
828 MLX5_POLICY_DOWN = 0,
830 MLX5_POLICY_FOLLOW = 2,
831 MLX5_POLICY_INVALID = 0xffffffff
834 enum phy_port_state {
838 struct mlx5_hca_vport_context {
843 enum port_state_policy policy;
844 enum phy_port_state phys_state;
845 enum ib_port_state vport_state;
846 u8 port_physical_state;
855 u8 init_type_reply; /* bitmask: see ib spec 14.2.5.6 InitTypeReply */
860 u16 qkey_violation_counter;
861 u16 pkey_violation_counter;
865 static inline void *mlx5_buf_offset(struct mlx5_buf *buf, int offset)
867 return buf->direct.buf + offset;
870 extern struct workqueue_struct *mlx5_core_wq;
872 #define STRUCT_FIELD(header, field) \
873 .struct_offset_bytes = offsetof(struct ib_unpacked_ ## header, field), \
874 .struct_size_bytes = sizeof((struct ib_unpacked_ ## header *)0)->field
876 static inline struct mlx5_core_dev *pci2mlx5_core_dev(struct pci_dev *pdev)
878 return pci_get_drvdata(pdev);
881 extern struct dentry *mlx5_debugfs_root;
883 static inline u16 fw_rev_maj(struct mlx5_core_dev *dev)
885 return ioread32be(&dev->iseg->fw_rev) & 0xffff;
888 static inline u16 fw_rev_min(struct mlx5_core_dev *dev)
890 return ioread32be(&dev->iseg->fw_rev) >> 16;
893 static inline u16 fw_rev_sub(struct mlx5_core_dev *dev)
895 return ioread32be(&dev->iseg->cmdif_rev_fw_sub) & 0xffff;
898 static inline u16 cmdif_rev(struct mlx5_core_dev *dev)
900 return ioread32be(&dev->iseg->cmdif_rev_fw_sub) >> 16;
903 static inline u32 mlx5_base_mkey(const u32 key)
905 return key & 0xffffff00u;
908 int mlx5_cmd_init(struct mlx5_core_dev *dev);
909 void mlx5_cmd_cleanup(struct mlx5_core_dev *dev);
910 void mlx5_cmd_use_events(struct mlx5_core_dev *dev);
911 void mlx5_cmd_use_polling(struct mlx5_core_dev *dev);
913 int mlx5_cmd_exec(struct mlx5_core_dev *dev, void *in, int in_size, void *out,
915 int mlx5_cmd_exec_cb(struct mlx5_core_dev *dev, void *in, int in_size,
916 void *out, int out_size, mlx5_cmd_cbk_t callback,
918 void mlx5_cmd_mbox_status(void *out, u8 *status, u32 *syndrome);
920 int mlx5_core_get_caps(struct mlx5_core_dev *dev, enum mlx5_cap_type cap_type);
921 int mlx5_cmd_alloc_uar(struct mlx5_core_dev *dev, u32 *uarn);
922 int mlx5_cmd_free_uar(struct mlx5_core_dev *dev, u32 uarn);
923 void mlx5_health_cleanup(struct mlx5_core_dev *dev);
924 int mlx5_health_init(struct mlx5_core_dev *dev);
925 void mlx5_start_health_poll(struct mlx5_core_dev *dev);
926 void mlx5_stop_health_poll(struct mlx5_core_dev *dev);
927 void mlx5_drain_health_wq(struct mlx5_core_dev *dev);
928 void mlx5_trigger_health_work(struct mlx5_core_dev *dev);
929 int mlx5_buf_alloc_node(struct mlx5_core_dev *dev, int size,
930 struct mlx5_buf *buf, int node);
931 int mlx5_buf_alloc(struct mlx5_core_dev *dev, int size, struct mlx5_buf *buf);
932 void mlx5_buf_free(struct mlx5_core_dev *dev, struct mlx5_buf *buf);
933 int mlx5_frag_buf_alloc_node(struct mlx5_core_dev *dev, int size,
934 struct mlx5_frag_buf *buf, int node);
935 void mlx5_frag_buf_free(struct mlx5_core_dev *dev, struct mlx5_frag_buf *buf);
936 struct mlx5_cmd_mailbox *mlx5_alloc_cmd_mailbox_chain(struct mlx5_core_dev *dev,
937 gfp_t flags, int npages);
938 void mlx5_free_cmd_mailbox_chain(struct mlx5_core_dev *dev,
939 struct mlx5_cmd_mailbox *head);
940 int mlx5_core_create_srq(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq,
941 struct mlx5_srq_attr *in);
942 int mlx5_core_destroy_srq(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq);
943 int mlx5_core_query_srq(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq,
944 struct mlx5_srq_attr *out);
945 int mlx5_core_arm_srq(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq,
946 u16 lwm, int is_srq);
947 void mlx5_init_mkey_table(struct mlx5_core_dev *dev);
948 void mlx5_cleanup_mkey_table(struct mlx5_core_dev *dev);
949 int mlx5_core_create_mkey_cb(struct mlx5_core_dev *dev,
950 struct mlx5_core_mkey *mkey,
952 u32 *out, int outlen,
953 mlx5_cmd_cbk_t callback, void *context);
954 int mlx5_core_create_mkey(struct mlx5_core_dev *dev,
955 struct mlx5_core_mkey *mkey,
957 int mlx5_core_destroy_mkey(struct mlx5_core_dev *dev,
958 struct mlx5_core_mkey *mkey);
959 int mlx5_core_query_mkey(struct mlx5_core_dev *dev, struct mlx5_core_mkey *mkey,
960 u32 *out, int outlen);
961 int mlx5_core_dump_fill_mkey(struct mlx5_core_dev *dev, struct mlx5_core_mkey *_mkey,
963 int mlx5_core_alloc_pd(struct mlx5_core_dev *dev, u32 *pdn);
964 int mlx5_core_dealloc_pd(struct mlx5_core_dev *dev, u32 pdn);
965 int mlx5_core_mad_ifc(struct mlx5_core_dev *dev, const void *inb, void *outb,
967 void mlx5_pagealloc_init(struct mlx5_core_dev *dev);
968 void mlx5_pagealloc_cleanup(struct mlx5_core_dev *dev);
969 int mlx5_pagealloc_start(struct mlx5_core_dev *dev);
970 void mlx5_pagealloc_stop(struct mlx5_core_dev *dev);
971 void mlx5_core_req_pages_handler(struct mlx5_core_dev *dev, u16 func_id,
973 int mlx5_satisfy_startup_pages(struct mlx5_core_dev *dev, int boot);
974 int mlx5_reclaim_startup_pages(struct mlx5_core_dev *dev);
975 void mlx5_register_debugfs(void);
976 void mlx5_unregister_debugfs(void);
977 int mlx5_eq_init(struct mlx5_core_dev *dev);
978 void mlx5_eq_cleanup(struct mlx5_core_dev *dev);
979 void mlx5_fill_page_array(struct mlx5_buf *buf, __be64 *pas);
980 void mlx5_fill_page_frag_array(struct mlx5_frag_buf *frag_buf, __be64 *pas);
981 void mlx5_cq_completion(struct mlx5_core_dev *dev, u32 cqn);
982 void mlx5_rsc_event(struct mlx5_core_dev *dev, u32 rsn, int event_type);
983 void mlx5_srq_event(struct mlx5_core_dev *dev, u32 srqn, int event_type);
984 struct mlx5_core_srq *mlx5_core_get_srq(struct mlx5_core_dev *dev, u32 srqn);
985 void mlx5_cmd_comp_handler(struct mlx5_core_dev *dev, u64 vec, bool forced);
986 void mlx5_cq_event(struct mlx5_core_dev *dev, u32 cqn, int event_type);
987 int mlx5_create_map_eq(struct mlx5_core_dev *dev, struct mlx5_eq *eq, u8 vecidx,
988 int nent, u64 mask, const char *name,
989 enum mlx5_eq_type type);
990 int mlx5_destroy_unmap_eq(struct mlx5_core_dev *dev, struct mlx5_eq *eq);
991 int mlx5_start_eqs(struct mlx5_core_dev *dev);
992 int mlx5_stop_eqs(struct mlx5_core_dev *dev);
993 int mlx5_vector2eqn(struct mlx5_core_dev *dev, int vector, int *eqn,
995 int mlx5_core_attach_mcg(struct mlx5_core_dev *dev, union ib_gid *mgid, u32 qpn);
996 int mlx5_core_detach_mcg(struct mlx5_core_dev *dev, union ib_gid *mgid, u32 qpn);
998 int mlx5_qp_debugfs_init(struct mlx5_core_dev *dev);
999 void mlx5_qp_debugfs_cleanup(struct mlx5_core_dev *dev);
1000 int mlx5_core_access_reg(struct mlx5_core_dev *dev, void *data_in,
1001 int size_in, void *data_out, int size_out,
1002 u16 reg_num, int arg, int write);
1004 int mlx5_debug_eq_add(struct mlx5_core_dev *dev, struct mlx5_eq *eq);
1005 void mlx5_debug_eq_remove(struct mlx5_core_dev *dev, struct mlx5_eq *eq);
1006 int mlx5_core_eq_query(struct mlx5_core_dev *dev, struct mlx5_eq *eq,
1007 u32 *out, int outlen);
1008 int mlx5_eq_debugfs_init(struct mlx5_core_dev *dev);
1009 void mlx5_eq_debugfs_cleanup(struct mlx5_core_dev *dev);
1010 int mlx5_cq_debugfs_init(struct mlx5_core_dev *dev);
1011 void mlx5_cq_debugfs_cleanup(struct mlx5_core_dev *dev);
1012 int mlx5_db_alloc(struct mlx5_core_dev *dev, struct mlx5_db *db);
1013 int mlx5_db_alloc_node(struct mlx5_core_dev *dev, struct mlx5_db *db,
1015 void mlx5_db_free(struct mlx5_core_dev *dev, struct mlx5_db *db);
1017 const char *mlx5_command_str(int command);
1018 int mlx5_cmdif_debugfs_init(struct mlx5_core_dev *dev);
1019 void mlx5_cmdif_debugfs_cleanup(struct mlx5_core_dev *dev);
1020 int mlx5_core_create_psv(struct mlx5_core_dev *dev, u32 pdn,
1021 int npsvs, u32 *sig_index);
1022 int mlx5_core_destroy_psv(struct mlx5_core_dev *dev, int psv_num);
1023 void mlx5_core_put_rsc(struct mlx5_core_rsc_common *common);
1024 int mlx5_query_odp_caps(struct mlx5_core_dev *dev,
1025 struct mlx5_odp_caps *odp_caps);
1026 int mlx5_core_query_ib_ppcnt(struct mlx5_core_dev *dev,
1027 u8 port_num, void *out, size_t sz);
1028 #ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
1029 int mlx5_core_page_fault_resume(struct mlx5_core_dev *dev, u32 token,
1030 u32 wq_num, u8 type, int error);
1033 int mlx5_init_rl_table(struct mlx5_core_dev *dev);
1034 void mlx5_cleanup_rl_table(struct mlx5_core_dev *dev);
1035 int mlx5_rl_add_rate(struct mlx5_core_dev *dev, u32 rate, u16 *index);
1036 void mlx5_rl_remove_rate(struct mlx5_core_dev *dev, u32 rate);
1037 bool mlx5_rl_is_in_range(struct mlx5_core_dev *dev, u32 rate);
1038 int mlx5_alloc_bfreg(struct mlx5_core_dev *mdev, struct mlx5_sq_bfreg *bfreg,
1039 bool map_wc, bool fast_path);
1040 void mlx5_free_bfreg(struct mlx5_core_dev *mdev, struct mlx5_sq_bfreg *bfreg);
1042 static inline int fw_initializing(struct mlx5_core_dev *dev)
1044 return ioread32be(&dev->iseg->initializing) >> 31;
1047 static inline u32 mlx5_mkey_to_idx(u32 mkey)
1052 static inline u32 mlx5_idx_to_mkey(u32 mkey_idx)
1054 return mkey_idx << 8;
1057 static inline u8 mlx5_mkey_variant(u32 mkey)
1063 MLX5_PROF_MASK_QP_SIZE = (u64)1 << 0,
1064 MLX5_PROF_MASK_MR_CACHE = (u64)1 << 1,
1068 MAX_UMR_CACHE_ENTRY = 20,
1069 MLX5_IMR_MTT_CACHE_ENTRY,
1070 MLX5_IMR_KSM_CACHE_ENTRY,
1071 MAX_MR_CACHE_ENTRIES
1075 MLX5_INTERFACE_PROTOCOL_IB = 0,
1076 MLX5_INTERFACE_PROTOCOL_ETH = 1,
1079 struct mlx5_interface {
1080 void * (*add)(struct mlx5_core_dev *dev);
1081 void (*remove)(struct mlx5_core_dev *dev, void *context);
1082 int (*attach)(struct mlx5_core_dev *dev, void *context);
1083 void (*detach)(struct mlx5_core_dev *dev, void *context);
1084 void (*event)(struct mlx5_core_dev *dev, void *context,
1085 enum mlx5_dev_event event, unsigned long param);
1086 void (*pfault)(struct mlx5_core_dev *dev,
1088 struct mlx5_pagefault *pfault);
1089 void * (*get_dev)(void *context);
1091 struct list_head list;
1094 void *mlx5_get_protocol_dev(struct mlx5_core_dev *mdev, int protocol);
1095 int mlx5_register_interface(struct mlx5_interface *intf);
1096 void mlx5_unregister_interface(struct mlx5_interface *intf);
1097 int mlx5_core_query_vendor_id(struct mlx5_core_dev *mdev, u32 *vendor_id);
1099 int mlx5_cmd_create_vport_lag(struct mlx5_core_dev *dev);
1100 int mlx5_cmd_destroy_vport_lag(struct mlx5_core_dev *dev);
1101 bool mlx5_lag_is_active(struct mlx5_core_dev *dev);
1102 struct net_device *mlx5_lag_get_roce_netdev(struct mlx5_core_dev *dev);
1103 struct mlx5_uars_page *mlx5_get_uars_page(struct mlx5_core_dev *mdev);
1104 void mlx5_put_uars_page(struct mlx5_core_dev *mdev, struct mlx5_uars_page *up);
1106 #ifndef CONFIG_MLX5_CORE_IPOIB
1108 struct net_device *mlx5_rdma_netdev_alloc(struct mlx5_core_dev *mdev,
1109 struct ib_device *ibdev,
1111 void (*setup)(struct net_device *))
1113 return ERR_PTR(-EOPNOTSUPP);
1116 static inline void mlx5_rdma_netdev_free(struct net_device *netdev) {}
1118 struct net_device *mlx5_rdma_netdev_alloc(struct mlx5_core_dev *mdev,
1119 struct ib_device *ibdev,
1121 void (*setup)(struct net_device *));
1122 void mlx5_rdma_netdev_free(struct net_device *netdev);
1123 #endif /* CONFIG_MLX5_CORE_IPOIB */
1125 struct mlx5_profile {
1131 } mr_cache[MAX_MR_CACHE_ENTRIES];
1135 MLX5_PCI_DEV_IS_VF = 1 << 0,
1138 static inline int mlx5_core_is_pf(struct mlx5_core_dev *dev)
1140 return !(dev->priv.pci_dev_data & MLX5_PCI_DEV_IS_VF);
1143 static inline int mlx5_get_gid_table_len(u16 param)
1146 pr_warn("gid table length is zero\n");
1150 return 8 * (1 << param);
1153 static inline bool mlx5_rl_is_supported(struct mlx5_core_dev *dev)
1155 return !!(dev->priv.rl_table.max_size);
1159 MLX5_TRIGGERED_CMD_COMP = (u64)1 << 32,
1162 #endif /* MLX5_DRIVER_H */