2 * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
36 #include <linux/kernel.h>
37 #include <linux/completion.h>
38 #include <linux/pci.h>
39 #include <linux/spinlock_types.h>
40 #include <linux/semaphore.h>
41 #include <linux/slab.h>
42 #include <linux/vmalloc.h>
43 #include <linux/radix-tree.h>
44 #include <linux/workqueue.h>
45 #include <linux/mempool.h>
46 #include <linux/interrupt.h>
48 #include <linux/mlx5/device.h>
49 #include <linux/mlx5/doorbell.h>
50 #include <linux/mlx5/srq.h>
53 MLX5_BOARD_ID_LEN = 64,
54 MLX5_MAX_NAME_LEN = 16,
58 /* one minute for the sake of bringup. Generally, commands must always
59 * complete and we may need to increase this timeout value
61 MLX5_CMD_TIMEOUT_MSEC = 60 * 1000,
62 MLX5_CMD_WQ_MAX_NAME = 32,
68 CMD_STATUS_SUCCESS = 0,
74 MLX5_SQP_IEEE_1588 = 2,
76 MLX5_SQP_SYNC_UMR = 4,
84 MLX5_EQ_VEC_PAGES = 0,
86 MLX5_EQ_VEC_ASYNC = 2,
87 MLX5_EQ_VEC_PFAULT = 3,
88 MLX5_EQ_VEC_COMP_BASE,
92 MLX5_MAX_IRQ_NAME = 32
96 MLX5_ATOMIC_MODE_IB_COMP = 1 << 16,
97 MLX5_ATOMIC_MODE_CX = 2 << 16,
98 MLX5_ATOMIC_MODE_8B = 3 << 16,
99 MLX5_ATOMIC_MODE_16B = 4 << 16,
100 MLX5_ATOMIC_MODE_32B = 5 << 16,
101 MLX5_ATOMIC_MODE_64B = 6 << 16,
102 MLX5_ATOMIC_MODE_128B = 7 << 16,
103 MLX5_ATOMIC_MODE_256B = 8 << 16,
107 MLX5_REG_QETCR = 0x4005,
108 MLX5_REG_QTCT = 0x400a,
109 MLX5_REG_DCBX_PARAM = 0x4020,
110 MLX5_REG_DCBX_APP = 0x4021,
111 MLX5_REG_PCAP = 0x5001,
112 MLX5_REG_PMTU = 0x5003,
113 MLX5_REG_PTYS = 0x5004,
114 MLX5_REG_PAOS = 0x5006,
115 MLX5_REG_PFCC = 0x5007,
116 MLX5_REG_PPCNT = 0x5008,
117 MLX5_REG_PMAOS = 0x5012,
118 MLX5_REG_PUDE = 0x5009,
119 MLX5_REG_PMPE = 0x5010,
120 MLX5_REG_PELC = 0x500e,
121 MLX5_REG_PVLC = 0x500f,
122 MLX5_REG_PCMR = 0x5041,
123 MLX5_REG_PMLP = 0x5002,
124 MLX5_REG_NODE_DESC = 0x6001,
125 MLX5_REG_HOST_ENDIANNESS = 0x7004,
126 MLX5_REG_MCIA = 0x9014,
127 MLX5_REG_MLCR = 0x902b,
128 MLX5_REG_MTPPS = 0x9053,
129 MLX5_REG_MTPPSE = 0x9054,
132 enum mlx5_dcbx_oper_mode {
133 MLX5E_DCBX_PARAM_VER_OPER_HOST = 0x0,
134 MLX5E_DCBX_PARAM_VER_OPER_AUTO = 0x3,
138 MLX5_ATOMIC_OPS_CMP_SWAP = 1 << 0,
139 MLX5_ATOMIC_OPS_FETCH_ADD = 1 << 1,
142 enum mlx5_page_fault_resume_flags {
143 MLX5_PAGE_FAULT_RESUME_REQUESTOR = 1 << 0,
144 MLX5_PAGE_FAULT_RESUME_WRITE = 1 << 1,
145 MLX5_PAGE_FAULT_RESUME_RDMA = 1 << 2,
146 MLX5_PAGE_FAULT_RESUME_ERROR = 1 << 7,
155 struct mlx5_field_desc {
160 struct mlx5_rsc_debug {
161 struct mlx5_core_dev *dev;
163 enum dbg_rsc_type type;
165 struct mlx5_field_desc fields[0];
168 enum mlx5_dev_event {
169 MLX5_DEV_EVENT_SYS_ERROR,
170 MLX5_DEV_EVENT_PORT_UP,
171 MLX5_DEV_EVENT_PORT_DOWN,
172 MLX5_DEV_EVENT_PORT_INITIALIZED,
173 MLX5_DEV_EVENT_LID_CHANGE,
174 MLX5_DEV_EVENT_PKEY_CHANGE,
175 MLX5_DEV_EVENT_GUID_CHANGE,
176 MLX5_DEV_EVENT_CLIENT_REREG,
180 enum mlx5_port_status {
188 #ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
193 struct mlx5_bfreg_info {
195 int num_low_latency_bfregs;
199 * protect bfreg allocation data structs
207 struct mlx5_cmd_first {
211 struct mlx5_cmd_msg {
212 struct list_head list;
213 struct cmd_msg_cache *parent;
215 struct mlx5_cmd_first first;
216 struct mlx5_cmd_mailbox *next;
219 struct mlx5_cmd_debug {
220 struct dentry *dbg_root;
221 struct dentry *dbg_in;
222 struct dentry *dbg_out;
223 struct dentry *dbg_outlen;
224 struct dentry *dbg_status;
225 struct dentry *dbg_run;
233 struct cmd_msg_cache {
234 /* protect block chain allocations
237 struct list_head head;
238 unsigned int max_inbox_size;
239 unsigned int num_ent;
243 MLX5_NUM_COMMAND_CACHES = 5,
246 struct mlx5_cmd_stats {
251 struct dentry *count;
252 /* protect command average calculations */
258 dma_addr_t alloc_dma;
269 /* protect command queue allocations
271 spinlock_t alloc_lock;
273 /* protect token allocations
275 spinlock_t token_lock;
277 unsigned long bitmask;
278 char wq_name[MLX5_CMD_WQ_MAX_NAME];
279 struct workqueue_struct *wq;
280 struct semaphore sem;
281 struct semaphore pages_sem;
283 struct mlx5_cmd_work_ent *ent_arr[MLX5_MAX_COMMANDS];
284 struct pci_pool *pool;
285 struct mlx5_cmd_debug dbg;
286 struct cmd_msg_cache cache[MLX5_NUM_COMMAND_CACHES];
287 int checksum_disabled;
288 struct mlx5_cmd_stats stats[MLX5_CMD_OP_MAX];
291 struct mlx5_port_caps {
297 struct mlx5_cmd_mailbox {
300 struct mlx5_cmd_mailbox *next;
303 struct mlx5_buf_list {
309 struct mlx5_buf_list direct;
315 struct mlx5_frag_buf {
316 struct mlx5_buf_list *frags;
322 struct mlx5_eq_tasklet {
323 struct list_head list;
324 struct list_head process_list;
325 struct tasklet_struct task;
326 /* lock on completion tasklet list */
330 struct mlx5_eq_pagefault {
331 struct work_struct work;
332 /* Pagefaults lock */
334 struct workqueue_struct *wq;
339 struct mlx5_core_dev *dev;
340 __be32 __iomem *doorbell;
348 struct list_head list;
350 struct mlx5_rsc_debug *dbg;
351 enum mlx5_eq_type type;
353 struct mlx5_eq_tasklet tasklet_ctx;
354 #ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
355 struct mlx5_eq_pagefault pf_ctx;
360 struct mlx5_core_psv {
372 struct mlx5_core_sig_ctx {
373 struct mlx5_core_psv psv_memory;
374 struct mlx5_core_psv psv_wire;
375 struct ib_sig_err err_item;
376 bool sig_status_checked;
386 struct mlx5_core_mkey {
394 #define MLX5_24BIT_MASK ((1 << 24) - 1)
397 MLX5_RES_QP = MLX5_EVENT_QUEUE_TYPE_QP,
398 MLX5_RES_RQ = MLX5_EVENT_QUEUE_TYPE_RQ,
399 MLX5_RES_SQ = MLX5_EVENT_QUEUE_TYPE_SQ,
404 struct mlx5_core_rsc_common {
405 enum mlx5_res_type res;
407 struct completion free;
410 struct mlx5_core_srq {
411 struct mlx5_core_rsc_common common; /* must be first */
415 int max_avail_gather;
417 void (*event) (struct mlx5_core_srq *, enum mlx5_event);
420 struct completion free;
423 struct mlx5_eq_table {
424 void __iomem *update_ci;
425 void __iomem *update_arm_ci;
426 struct list_head comp_eqs_list;
427 struct mlx5_eq pages_eq;
428 struct mlx5_eq async_eq;
429 struct mlx5_eq cmd_eq;
430 #ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
431 struct mlx5_eq pfault_eq;
433 int num_comp_vectors;
439 struct mlx5_uars_page {
443 struct list_head list;
445 unsigned long *reg_bitmap; /* for non fast path bf regs */
446 unsigned long *fp_bitmap;
447 unsigned int reg_avail;
448 unsigned int fp_avail;
449 struct kref ref_count;
450 struct mlx5_core_dev *mdev;
453 struct mlx5_bfreg_head {
454 /* protect blue flame registers allocations */
456 struct list_head list;
459 struct mlx5_bfreg_data {
460 struct mlx5_bfreg_head reg_head;
461 struct mlx5_bfreg_head wc_head;
464 struct mlx5_sq_bfreg {
466 struct mlx5_uars_page *up;
472 struct mlx5_core_health {
473 struct health_buffer __iomem *health;
474 __be32 __iomem *health_counter;
475 struct timer_list timer;
479 /* wq spinlock to synchronize draining */
481 struct workqueue_struct *wq;
483 struct work_struct work;
484 struct delayed_work recover_work;
487 struct mlx5_cq_table {
488 /* protect radix tree
491 struct radix_tree_root tree;
494 struct mlx5_qp_table {
495 /* protect radix tree
498 struct radix_tree_root tree;
501 struct mlx5_srq_table {
502 /* protect radix tree
505 struct radix_tree_root tree;
508 struct mlx5_mkey_table {
509 /* protect radix tree
512 struct radix_tree_root tree;
515 struct mlx5_vf_context {
519 struct mlx5_core_sriov {
520 struct mlx5_vf_context *vfs_ctx;
525 struct mlx5_irq_info {
527 char name[MLX5_MAX_IRQ_NAME];
530 struct mlx5_fc_stats {
531 struct rb_root counters;
532 struct list_head addlist;
533 /* protect addlist add/splice operations */
534 spinlock_t addlist_lock;
536 struct workqueue_struct *wq;
537 struct delayed_work work;
538 unsigned long next_query;
543 struct mlx5_pagefault;
545 struct mlx5_rl_entry {
551 struct mlx5_rl_table {
552 /* protect rate limit table */
553 struct mutex rl_lock;
557 struct mlx5_rl_entry *rl_entry;
560 enum port_module_event_status_type {
561 MLX5_MODULE_STATUS_PLUGGED = 0x1,
562 MLX5_MODULE_STATUS_UNPLUGGED = 0x2,
563 MLX5_MODULE_STATUS_ERROR = 0x3,
564 MLX5_MODULE_STATUS_NUM = 0x3,
567 enum port_module_event_error_type {
568 MLX5_MODULE_EVENT_ERROR_POWER_BUDGET_EXCEEDED,
569 MLX5_MODULE_EVENT_ERROR_LONG_RANGE_FOR_NON_MLNX_CABLE_MODULE,
570 MLX5_MODULE_EVENT_ERROR_BUS_STUCK,
571 MLX5_MODULE_EVENT_ERROR_NO_EEPROM_RETRY_TIMEOUT,
572 MLX5_MODULE_EVENT_ERROR_ENFORCE_PART_NUMBER_LIST,
573 MLX5_MODULE_EVENT_ERROR_UNKNOWN_IDENTIFIER,
574 MLX5_MODULE_EVENT_ERROR_HIGH_TEMPERATURE,
575 MLX5_MODULE_EVENT_ERROR_BAD_CABLE,
576 MLX5_MODULE_EVENT_ERROR_UNKNOWN,
577 MLX5_MODULE_EVENT_ERROR_NUM,
580 struct mlx5_port_module_event_stats {
581 u64 status_counters[MLX5_MODULE_STATUS_NUM];
582 u64 error_counters[MLX5_MODULE_EVENT_ERROR_NUM];
586 char name[MLX5_MAX_NAME_LEN];
587 struct mlx5_eq_table eq_table;
588 struct msix_entry *msix_arr;
589 struct mlx5_irq_info *irq_info;
592 struct workqueue_struct *pg_wq;
593 struct rb_root page_root;
596 struct list_head free_list;
599 struct mlx5_core_health health;
601 struct mlx5_srq_table srq_table;
603 /* start: qp staff */
604 struct mlx5_qp_table qp_table;
605 struct dentry *qp_debugfs;
606 struct dentry *eq_debugfs;
607 struct dentry *cq_debugfs;
608 struct dentry *cmdif_debugfs;
611 /* start: cq staff */
612 struct mlx5_cq_table cq_table;
615 /* start: mkey staff */
616 struct mlx5_mkey_table mkey_table;
617 /* end: mkey staff */
619 /* start: alloc staff */
620 /* protect buffer alocation according to numa node */
621 struct mutex alloc_mutex;
624 struct mutex pgdir_mutex;
625 struct list_head pgdir_list;
626 /* end: alloc staff */
627 struct dentry *dbg_root;
629 /* protect mkey key part */
630 spinlock_t mkey_lock;
633 struct list_head dev_list;
634 struct list_head ctx_list;
637 struct mlx5_flow_steering *steering;
638 struct mlx5_eswitch *eswitch;
639 struct mlx5_core_sriov sriov;
640 struct mlx5_lag *lag;
641 unsigned long pci_dev_data;
642 struct mlx5_fc_stats fc_stats;
643 struct mlx5_rl_table rl_table;
645 struct mlx5_port_module_event_stats pme_stats;
647 #ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
648 void (*pfault)(struct mlx5_core_dev *dev,
650 struct mlx5_pagefault *pfault);
652 struct srcu_struct pfault_srcu;
654 struct mlx5_bfreg_data bfregs;
655 struct mlx5_uars_page *uar;
658 enum mlx5_device_state {
659 MLX5_DEVICE_STATE_UP,
660 MLX5_DEVICE_STATE_INTERNAL_ERROR,
663 enum mlx5_interface_state {
664 MLX5_INTERFACE_STATE_DOWN = BIT(0),
665 MLX5_INTERFACE_STATE_UP = BIT(1),
666 MLX5_INTERFACE_STATE_SHUTDOWN = BIT(2),
669 enum mlx5_pci_status {
670 MLX5_PCI_STATUS_DISABLED,
671 MLX5_PCI_STATUS_ENABLED,
674 enum mlx5_pagefault_type_flags {
675 MLX5_PFAULT_REQUESTOR = 1 << 0,
676 MLX5_PFAULT_WRITE = 1 << 1,
677 MLX5_PFAULT_RDMA = 1 << 2,
680 /* Contains the details of a pagefault. */
681 struct mlx5_pagefault {
687 /* Initiator or send message responder pagefault details. */
689 /* Received packet size, only valid for responders. */
692 * Number of resource holding WQE, depends on type.
696 * WQE index. Refers to either the send queue or
697 * receive queue, according to event_subtype.
701 /* RDMA responder pagefault details */
705 * Received packet size, minimal size page fault
706 * resolution required for forward progress.
715 struct work_struct work;
719 struct list_head tirs_list;
723 struct mlx5e_resources {
726 struct mlx5_core_mkey mkey;
729 struct mlx5_core_dev {
730 struct pci_dev *pdev;
732 struct mutex pci_status_mutex;
733 enum mlx5_pci_status pci_status;
735 char board_id[MLX5_BOARD_ID_LEN];
737 struct mlx5_port_caps port_caps[MLX5_MAX_PORTS];
738 u32 hca_caps_cur[MLX5_CAP_NUM][MLX5_UN_SZ_DW(hca_cap_union)];
739 u32 hca_caps_max[MLX5_CAP_NUM][MLX5_UN_SZ_DW(hca_cap_union)];
740 phys_addr_t iseg_base;
741 struct mlx5_init_seg __iomem *iseg;
742 enum mlx5_device_state state;
743 /* sync interface state */
744 struct mutex intf_state_mutex;
745 unsigned long intf_state;
746 void (*event) (struct mlx5_core_dev *dev,
747 enum mlx5_dev_event event,
748 unsigned long param);
749 struct mlx5_priv priv;
750 struct mlx5_profile *profile;
753 struct mlx5e_resources mlx5e_res;
754 #ifdef CONFIG_RFS_ACCEL
755 struct cpu_rmap *rmap;
762 struct mlx5_db_pgdir *pgdir;
763 struct mlx5_ib_user_db_page *user_page;
770 MLX5_COMP_EQ_SIZE = 1024,
774 MLX5_PTYS_IB = 1 << 0,
775 MLX5_PTYS_EN = 1 << 2,
778 typedef void (*mlx5_cmd_cbk_t)(int status, void *context);
780 struct mlx5_cmd_work_ent {
781 struct mlx5_cmd_msg *in;
782 struct mlx5_cmd_msg *out;
785 mlx5_cmd_cbk_t callback;
786 struct delayed_work cb_timeout_work;
789 struct completion done;
790 struct mlx5_cmd *cmd;
791 struct work_struct work;
792 struct mlx5_cmd_layout *lay;
807 enum port_state_policy {
808 MLX5_POLICY_DOWN = 0,
810 MLX5_POLICY_FOLLOW = 2,
811 MLX5_POLICY_INVALID = 0xffffffff
814 enum phy_port_state {
818 struct mlx5_hca_vport_context {
823 enum port_state_policy policy;
824 enum phy_port_state phys_state;
825 enum ib_port_state vport_state;
826 u8 port_physical_state;
835 u8 init_type_reply; /* bitmask: see ib spec 14.2.5.6 InitTypeReply */
840 u16 qkey_violation_counter;
841 u16 pkey_violation_counter;
845 static inline void *mlx5_buf_offset(struct mlx5_buf *buf, int offset)
847 return buf->direct.buf + offset;
850 extern struct workqueue_struct *mlx5_core_wq;
852 #define STRUCT_FIELD(header, field) \
853 .struct_offset_bytes = offsetof(struct ib_unpacked_ ## header, field), \
854 .struct_size_bytes = sizeof((struct ib_unpacked_ ## header *)0)->field
856 static inline struct mlx5_core_dev *pci2mlx5_core_dev(struct pci_dev *pdev)
858 return pci_get_drvdata(pdev);
861 extern struct dentry *mlx5_debugfs_root;
863 static inline u16 fw_rev_maj(struct mlx5_core_dev *dev)
865 return ioread32be(&dev->iseg->fw_rev) & 0xffff;
868 static inline u16 fw_rev_min(struct mlx5_core_dev *dev)
870 return ioread32be(&dev->iseg->fw_rev) >> 16;
873 static inline u16 fw_rev_sub(struct mlx5_core_dev *dev)
875 return ioread32be(&dev->iseg->cmdif_rev_fw_sub) & 0xffff;
878 static inline u16 cmdif_rev(struct mlx5_core_dev *dev)
880 return ioread32be(&dev->iseg->cmdif_rev_fw_sub) >> 16;
883 static inline void *mlx5_vzalloc(unsigned long size)
887 rtn = kzalloc(size, GFP_KERNEL | __GFP_NOWARN);
893 static inline u32 mlx5_base_mkey(const u32 key)
895 return key & 0xffffff00u;
898 int mlx5_cmd_init(struct mlx5_core_dev *dev);
899 void mlx5_cmd_cleanup(struct mlx5_core_dev *dev);
900 void mlx5_cmd_use_events(struct mlx5_core_dev *dev);
901 void mlx5_cmd_use_polling(struct mlx5_core_dev *dev);
903 int mlx5_cmd_exec(struct mlx5_core_dev *dev, void *in, int in_size, void *out,
905 int mlx5_cmd_exec_cb(struct mlx5_core_dev *dev, void *in, int in_size,
906 void *out, int out_size, mlx5_cmd_cbk_t callback,
908 void mlx5_cmd_mbox_status(void *out, u8 *status, u32 *syndrome);
910 int mlx5_core_get_caps(struct mlx5_core_dev *dev, enum mlx5_cap_type cap_type);
911 int mlx5_cmd_alloc_uar(struct mlx5_core_dev *dev, u32 *uarn);
912 int mlx5_cmd_free_uar(struct mlx5_core_dev *dev, u32 uarn);
913 void mlx5_health_cleanup(struct mlx5_core_dev *dev);
914 int mlx5_health_init(struct mlx5_core_dev *dev);
915 void mlx5_start_health_poll(struct mlx5_core_dev *dev);
916 void mlx5_stop_health_poll(struct mlx5_core_dev *dev);
917 void mlx5_drain_health_wq(struct mlx5_core_dev *dev);
918 int mlx5_buf_alloc_node(struct mlx5_core_dev *dev, int size,
919 struct mlx5_buf *buf, int node);
920 int mlx5_buf_alloc(struct mlx5_core_dev *dev, int size, struct mlx5_buf *buf);
921 void mlx5_buf_free(struct mlx5_core_dev *dev, struct mlx5_buf *buf);
922 int mlx5_frag_buf_alloc_node(struct mlx5_core_dev *dev, int size,
923 struct mlx5_frag_buf *buf, int node);
924 void mlx5_frag_buf_free(struct mlx5_core_dev *dev, struct mlx5_frag_buf *buf);
925 struct mlx5_cmd_mailbox *mlx5_alloc_cmd_mailbox_chain(struct mlx5_core_dev *dev,
926 gfp_t flags, int npages);
927 void mlx5_free_cmd_mailbox_chain(struct mlx5_core_dev *dev,
928 struct mlx5_cmd_mailbox *head);
929 int mlx5_core_create_srq(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq,
930 struct mlx5_srq_attr *in);
931 int mlx5_core_destroy_srq(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq);
932 int mlx5_core_query_srq(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq,
933 struct mlx5_srq_attr *out);
934 int mlx5_core_arm_srq(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq,
935 u16 lwm, int is_srq);
936 void mlx5_init_mkey_table(struct mlx5_core_dev *dev);
937 void mlx5_cleanup_mkey_table(struct mlx5_core_dev *dev);
938 int mlx5_core_create_mkey_cb(struct mlx5_core_dev *dev,
939 struct mlx5_core_mkey *mkey,
941 u32 *out, int outlen,
942 mlx5_cmd_cbk_t callback, void *context);
943 int mlx5_core_create_mkey(struct mlx5_core_dev *dev,
944 struct mlx5_core_mkey *mkey,
946 int mlx5_core_destroy_mkey(struct mlx5_core_dev *dev,
947 struct mlx5_core_mkey *mkey);
948 int mlx5_core_query_mkey(struct mlx5_core_dev *dev, struct mlx5_core_mkey *mkey,
949 u32 *out, int outlen);
950 int mlx5_core_dump_fill_mkey(struct mlx5_core_dev *dev, struct mlx5_core_mkey *_mkey,
952 int mlx5_core_alloc_pd(struct mlx5_core_dev *dev, u32 *pdn);
953 int mlx5_core_dealloc_pd(struct mlx5_core_dev *dev, u32 pdn);
954 int mlx5_core_mad_ifc(struct mlx5_core_dev *dev, const void *inb, void *outb,
956 void mlx5_pagealloc_init(struct mlx5_core_dev *dev);
957 void mlx5_pagealloc_cleanup(struct mlx5_core_dev *dev);
958 int mlx5_pagealloc_start(struct mlx5_core_dev *dev);
959 void mlx5_pagealloc_stop(struct mlx5_core_dev *dev);
960 void mlx5_core_req_pages_handler(struct mlx5_core_dev *dev, u16 func_id,
962 int mlx5_satisfy_startup_pages(struct mlx5_core_dev *dev, int boot);
963 int mlx5_reclaim_startup_pages(struct mlx5_core_dev *dev);
964 void mlx5_register_debugfs(void);
965 void mlx5_unregister_debugfs(void);
966 int mlx5_eq_init(struct mlx5_core_dev *dev);
967 void mlx5_eq_cleanup(struct mlx5_core_dev *dev);
968 void mlx5_fill_page_array(struct mlx5_buf *buf, __be64 *pas);
969 void mlx5_fill_page_frag_array(struct mlx5_frag_buf *frag_buf, __be64 *pas);
970 void mlx5_cq_completion(struct mlx5_core_dev *dev, u32 cqn);
971 void mlx5_rsc_event(struct mlx5_core_dev *dev, u32 rsn, int event_type);
972 void mlx5_srq_event(struct mlx5_core_dev *dev, u32 srqn, int event_type);
973 struct mlx5_core_srq *mlx5_core_get_srq(struct mlx5_core_dev *dev, u32 srqn);
974 void mlx5_cmd_comp_handler(struct mlx5_core_dev *dev, u64 vec);
975 void mlx5_cq_event(struct mlx5_core_dev *dev, u32 cqn, int event_type);
976 int mlx5_create_map_eq(struct mlx5_core_dev *dev, struct mlx5_eq *eq, u8 vecidx,
977 int nent, u64 mask, const char *name,
978 enum mlx5_eq_type type);
979 int mlx5_destroy_unmap_eq(struct mlx5_core_dev *dev, struct mlx5_eq *eq);
980 int mlx5_start_eqs(struct mlx5_core_dev *dev);
981 int mlx5_stop_eqs(struct mlx5_core_dev *dev);
982 int mlx5_vector2eqn(struct mlx5_core_dev *dev, int vector, int *eqn,
984 int mlx5_core_attach_mcg(struct mlx5_core_dev *dev, union ib_gid *mgid, u32 qpn);
985 int mlx5_core_detach_mcg(struct mlx5_core_dev *dev, union ib_gid *mgid, u32 qpn);
987 int mlx5_qp_debugfs_init(struct mlx5_core_dev *dev);
988 void mlx5_qp_debugfs_cleanup(struct mlx5_core_dev *dev);
989 int mlx5_core_access_reg(struct mlx5_core_dev *dev, void *data_in,
990 int size_in, void *data_out, int size_out,
991 u16 reg_num, int arg, int write);
993 int mlx5_debug_eq_add(struct mlx5_core_dev *dev, struct mlx5_eq *eq);
994 void mlx5_debug_eq_remove(struct mlx5_core_dev *dev, struct mlx5_eq *eq);
995 int mlx5_core_eq_query(struct mlx5_core_dev *dev, struct mlx5_eq *eq,
996 u32 *out, int outlen);
997 int mlx5_eq_debugfs_init(struct mlx5_core_dev *dev);
998 void mlx5_eq_debugfs_cleanup(struct mlx5_core_dev *dev);
999 int mlx5_cq_debugfs_init(struct mlx5_core_dev *dev);
1000 void mlx5_cq_debugfs_cleanup(struct mlx5_core_dev *dev);
1001 int mlx5_db_alloc(struct mlx5_core_dev *dev, struct mlx5_db *db);
1002 int mlx5_db_alloc_node(struct mlx5_core_dev *dev, struct mlx5_db *db,
1004 void mlx5_db_free(struct mlx5_core_dev *dev, struct mlx5_db *db);
1006 const char *mlx5_command_str(int command);
1007 int mlx5_cmdif_debugfs_init(struct mlx5_core_dev *dev);
1008 void mlx5_cmdif_debugfs_cleanup(struct mlx5_core_dev *dev);
1009 int mlx5_core_create_psv(struct mlx5_core_dev *dev, u32 pdn,
1010 int npsvs, u32 *sig_index);
1011 int mlx5_core_destroy_psv(struct mlx5_core_dev *dev, int psv_num);
1012 void mlx5_core_put_rsc(struct mlx5_core_rsc_common *common);
1013 int mlx5_query_odp_caps(struct mlx5_core_dev *dev,
1014 struct mlx5_odp_caps *odp_caps);
1015 int mlx5_core_query_ib_ppcnt(struct mlx5_core_dev *dev,
1016 u8 port_num, void *out, size_t sz);
1017 #ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
1018 int mlx5_core_page_fault_resume(struct mlx5_core_dev *dev, u32 token,
1019 u32 wq_num, u8 type, int error);
1022 int mlx5_init_rl_table(struct mlx5_core_dev *dev);
1023 void mlx5_cleanup_rl_table(struct mlx5_core_dev *dev);
1024 int mlx5_rl_add_rate(struct mlx5_core_dev *dev, u32 rate, u16 *index);
1025 void mlx5_rl_remove_rate(struct mlx5_core_dev *dev, u32 rate);
1026 bool mlx5_rl_is_in_range(struct mlx5_core_dev *dev, u32 rate);
1027 int mlx5_alloc_bfreg(struct mlx5_core_dev *mdev, struct mlx5_sq_bfreg *bfreg,
1028 bool map_wc, bool fast_path);
1029 void mlx5_free_bfreg(struct mlx5_core_dev *mdev, struct mlx5_sq_bfreg *bfreg);
1031 static inline int fw_initializing(struct mlx5_core_dev *dev)
1033 return ioread32be(&dev->iseg->initializing) >> 31;
1036 static inline u32 mlx5_mkey_to_idx(u32 mkey)
1041 static inline u32 mlx5_idx_to_mkey(u32 mkey_idx)
1043 return mkey_idx << 8;
1046 static inline u8 mlx5_mkey_variant(u32 mkey)
1052 MLX5_PROF_MASK_QP_SIZE = (u64)1 << 0,
1053 MLX5_PROF_MASK_MR_CACHE = (u64)1 << 1,
1057 MAX_MR_CACHE_ENTRIES = 21,
1061 MLX5_INTERFACE_PROTOCOL_IB = 0,
1062 MLX5_INTERFACE_PROTOCOL_ETH = 1,
1065 struct mlx5_interface {
1066 void * (*add)(struct mlx5_core_dev *dev);
1067 void (*remove)(struct mlx5_core_dev *dev, void *context);
1068 int (*attach)(struct mlx5_core_dev *dev, void *context);
1069 void (*detach)(struct mlx5_core_dev *dev, void *context);
1070 void (*event)(struct mlx5_core_dev *dev, void *context,
1071 enum mlx5_dev_event event, unsigned long param);
1072 void (*pfault)(struct mlx5_core_dev *dev,
1074 struct mlx5_pagefault *pfault);
1075 void * (*get_dev)(void *context);
1077 struct list_head list;
1080 void *mlx5_get_protocol_dev(struct mlx5_core_dev *mdev, int protocol);
1081 int mlx5_register_interface(struct mlx5_interface *intf);
1082 void mlx5_unregister_interface(struct mlx5_interface *intf);
1083 int mlx5_core_query_vendor_id(struct mlx5_core_dev *mdev, u32 *vendor_id);
1085 int mlx5_cmd_create_vport_lag(struct mlx5_core_dev *dev);
1086 int mlx5_cmd_destroy_vport_lag(struct mlx5_core_dev *dev);
1087 bool mlx5_lag_is_active(struct mlx5_core_dev *dev);
1088 struct net_device *mlx5_lag_get_roce_netdev(struct mlx5_core_dev *dev);
1089 struct mlx5_uars_page *mlx5_get_uars_page(struct mlx5_core_dev *mdev);
1090 void mlx5_put_uars_page(struct mlx5_core_dev *mdev, struct mlx5_uars_page *up);
1092 struct mlx5_profile {
1098 } mr_cache[MAX_MR_CACHE_ENTRIES];
1102 MLX5_PCI_DEV_IS_VF = 1 << 0,
1105 static inline int mlx5_core_is_pf(struct mlx5_core_dev *dev)
1107 return !(dev->priv.pci_dev_data & MLX5_PCI_DEV_IS_VF);
1110 static inline int mlx5_get_gid_table_len(u16 param)
1113 pr_warn("gid table length is zero\n");
1117 return 8 * (1 << param);
1120 static inline bool mlx5_rl_is_supported(struct mlx5_core_dev *dev)
1122 return !!(dev->priv.rl_table.max_size);
1126 MLX5_TRIGGERED_CMD_COMP = (u64)1 << 32,
1129 #endif /* MLX5_DRIVER_H */