1 /* SPDX-License-Identifier: GPL-2.0 or Linux-OpenIB */
2 /* Copyright (c) 2015 - 2021 Intel Corporation */
11 #define IRDMA_DEBUG_ERR "ERR"
12 #define IRDMA_DEBUG_INIT "INIT"
13 #define IRDMA_DEBUG_DEV "DEV"
14 #define IRDMA_DEBUG_CM "CM"
15 #define IRDMA_DEBUG_VERBS "VERBS"
16 #define IRDMA_DEBUG_PUDA "PUDA"
17 #define IRDMA_DEBUG_ILQ "ILQ"
18 #define IRDMA_DEBUG_IEQ "IEQ"
19 #define IRDMA_DEBUG_QP "QP"
20 #define IRDMA_DEBUG_CQ "CQ"
21 #define IRDMA_DEBUG_MR "MR"
22 #define IRDMA_DEBUG_PBLE "PBLE"
23 #define IRDMA_DEBUG_WQE "WQE"
24 #define IRDMA_DEBUG_AEQ "AEQ"
25 #define IRDMA_DEBUG_CQP "CQP"
26 #define IRDMA_DEBUG_HMC "HMC"
27 #define IRDMA_DEBUG_USER "USER"
28 #define IRDMA_DEBUG_VIRT "VIRT"
29 #define IRDMA_DEBUG_DCB "DCB"
30 #define IRDMA_DEBUG_CQE "CQE"
31 #define IRDMA_DEBUG_CLNT "CLNT"
32 #define IRDMA_DEBUG_WS "WS"
33 #define IRDMA_DEBUG_STATS "STATS"
35 enum irdma_page_size {
36 IRDMA_PAGE_SIZE_4K = 0,
41 enum irdma_hdrct_flags {
47 enum irdma_term_layers {
53 enum irdma_term_error_types {
54 RDMAP_REMOTE_PROT = 1,
62 enum irdma_term_rdma_errors {
63 RDMAP_INV_STAG = 0x00,
64 RDMAP_INV_BOUNDS = 0x01,
66 RDMAP_UNASSOC_STAG = 0x03,
68 RDMAP_INV_RDMAP_VER = 0x05,
69 RDMAP_UNEXPECTED_OP = 0x06,
70 RDMAP_CATASTROPHIC_LOCAL = 0x07,
71 RDMAP_CATASTROPHIC_GLOBAL = 0x08,
72 RDMAP_CANT_INV_STAG = 0x09,
73 RDMAP_UNSPECIFIED = 0xff,
76 enum irdma_term_ddp_errors {
77 DDP_CATASTROPHIC_LOCAL = 0x00,
78 DDP_TAGGED_INV_STAG = 0x00,
79 DDP_TAGGED_BOUNDS = 0x01,
80 DDP_TAGGED_UNASSOC_STAG = 0x02,
81 DDP_TAGGED_TO_WRAP = 0x03,
82 DDP_TAGGED_INV_DDP_VER = 0x04,
83 DDP_UNTAGGED_INV_QN = 0x01,
84 DDP_UNTAGGED_INV_MSN_NO_BUF = 0x02,
85 DDP_UNTAGGED_INV_MSN_RANGE = 0x03,
86 DDP_UNTAGGED_INV_MO = 0x04,
87 DDP_UNTAGGED_INV_TOO_LONG = 0x05,
88 DDP_UNTAGGED_INV_DDP_VER = 0x06,
91 enum irdma_term_mpa_errors {
98 enum irdma_qp_event_type {
99 IRDMA_QP_EVENT_CATASTROPHIC,
100 IRDMA_QP_EVENT_ACCESS_ERR,
101 IRDMA_QP_EVENT_REQ_ERR,
104 enum irdma_hw_stats_index {
106 IRDMA_HW_STAT_INDEX_IP4RXDISCARD = 0,
107 IRDMA_HW_STAT_INDEX_IP4RXTRUNC = 1,
108 IRDMA_HW_STAT_INDEX_IP4TXNOROUTE = 2,
109 IRDMA_HW_STAT_INDEX_IP6RXDISCARD = 3,
110 IRDMA_HW_STAT_INDEX_IP6RXTRUNC = 4,
111 IRDMA_HW_STAT_INDEX_IP6TXNOROUTE = 5,
112 IRDMA_HW_STAT_INDEX_TCPRTXSEG = 6,
113 IRDMA_HW_STAT_INDEX_TCPRXOPTERR = 7,
114 IRDMA_HW_STAT_INDEX_TCPRXPROTOERR = 8,
115 IRDMA_HW_STAT_INDEX_RXVLANERR = 9,
117 IRDMA_HW_STAT_INDEX_IP4RXOCTS = 10,
118 IRDMA_HW_STAT_INDEX_IP4RXPKTS = 11,
119 IRDMA_HW_STAT_INDEX_IP4RXFRAGS = 12,
120 IRDMA_HW_STAT_INDEX_IP4RXMCPKTS = 13,
121 IRDMA_HW_STAT_INDEX_IP4TXOCTS = 14,
122 IRDMA_HW_STAT_INDEX_IP4TXPKTS = 15,
123 IRDMA_HW_STAT_INDEX_IP4TXFRAGS = 16,
124 IRDMA_HW_STAT_INDEX_IP4TXMCPKTS = 17,
125 IRDMA_HW_STAT_INDEX_IP6RXOCTS = 18,
126 IRDMA_HW_STAT_INDEX_IP6RXPKTS = 19,
127 IRDMA_HW_STAT_INDEX_IP6RXFRAGS = 20,
128 IRDMA_HW_STAT_INDEX_IP6RXMCPKTS = 21,
129 IRDMA_HW_STAT_INDEX_IP6TXOCTS = 22,
130 IRDMA_HW_STAT_INDEX_IP6TXPKTS = 23,
131 IRDMA_HW_STAT_INDEX_IP6TXFRAGS = 24,
132 IRDMA_HW_STAT_INDEX_IP6TXMCPKTS = 25,
133 IRDMA_HW_STAT_INDEX_TCPRXSEGS = 26,
134 IRDMA_HW_STAT_INDEX_TCPTXSEG = 27,
135 IRDMA_HW_STAT_INDEX_RDMARXRDS = 28,
136 IRDMA_HW_STAT_INDEX_RDMARXSNDS = 29,
137 IRDMA_HW_STAT_INDEX_RDMARXWRS = 30,
138 IRDMA_HW_STAT_INDEX_RDMATXRDS = 31,
139 IRDMA_HW_STAT_INDEX_RDMATXSNDS = 32,
140 IRDMA_HW_STAT_INDEX_RDMATXWRS = 33,
141 IRDMA_HW_STAT_INDEX_RDMAVBND = 34,
142 IRDMA_HW_STAT_INDEX_RDMAVINV = 35,
143 IRDMA_HW_STAT_INDEX_IP4RXMCOCTS = 36,
144 IRDMA_HW_STAT_INDEX_IP4TXMCOCTS = 37,
145 IRDMA_HW_STAT_INDEX_IP6RXMCOCTS = 38,
146 IRDMA_HW_STAT_INDEX_IP6TXMCOCTS = 39,
147 IRDMA_HW_STAT_INDEX_UDPRXPKTS = 40,
148 IRDMA_HW_STAT_INDEX_UDPTXPKTS = 41,
149 IRDMA_HW_STAT_INDEX_MAX_GEN_1 = 42, /* Must be same value as next entry */
151 IRDMA_HW_STAT_INDEX_RXNPECNMARKEDPKTS = 42,
153 IRDMA_HW_STAT_INDEX_RXRPCNPHANDLED = 43,
154 IRDMA_HW_STAT_INDEX_RXRPCNPIGNORED = 44,
155 IRDMA_HW_STAT_INDEX_TXNPCNPSENT = 45,
156 IRDMA_HW_STAT_INDEX_MAX_GEN_2 = 46,
159 enum irdma_feature_type {
160 IRDMA_FEATURE_FW_INFO = 0,
161 IRDMA_HW_VERSION_INFO = 1,
162 IRDMA_QSETS_MAX = 26,
163 IRDMA_MAX_FEATURES, /* Must be last entry */
166 enum irdma_sched_prio_type {
167 IRDMA_PRIO_WEIGHTED_RR = 1,
168 IRDMA_PRIO_STRICT = 2,
169 IRDMA_PRIO_WEIGHTED_STRICT = 3,
172 enum irdma_vm_vf_type {
178 enum irdma_cqp_hmc_profile {
179 IRDMA_HMC_PROFILE_DEFAULT = 1,
180 IRDMA_HMC_PROFILE_FAVOR_VF = 2,
181 IRDMA_HMC_PROFILE_EQUAL = 3,
184 enum irdma_quad_entry_type {
185 IRDMA_QHASH_TYPE_TCP_ESTABLISHED = 1,
186 IRDMA_QHASH_TYPE_TCP_SYN,
187 IRDMA_QHASH_TYPE_UDP_UNICAST,
188 IRDMA_QHASH_TYPE_UDP_MCAST,
189 IRDMA_QHASH_TYPE_ROCE_MCAST,
190 IRDMA_QHASH_TYPE_ROCEV2_HW,
193 enum irdma_quad_hash_manage_type {
194 IRDMA_QHASH_MANAGE_TYPE_DELETE = 0,
195 IRDMA_QHASH_MANAGE_TYPE_ADD,
196 IRDMA_QHASH_MANAGE_TYPE_MODIFY,
199 enum irdma_syn_rst_handling {
200 IRDMA_SYN_RST_HANDLING_HW_TCP_SECURE = 0,
201 IRDMA_SYN_RST_HANDLING_HW_TCP,
202 IRDMA_SYN_RST_HANDLING_FW_TCP_SECURE,
203 IRDMA_SYN_RST_HANDLING_FW_TCP,
206 enum irdma_queue_type {
207 IRDMA_QUEUE_TYPE_SQ_RQ = 0,
208 IRDMA_QUEUE_TYPE_CQP,
212 struct irdma_vsi_pestat;
214 struct irdma_dcqcn_cc_params {
226 struct irdma_cqp_init_info {
230 struct irdma_sc_dev *dev;
231 struct irdma_cqp_quanta *sq;
232 struct irdma_dcqcn_cc_params dcqcn_params;
242 bool en_datacenter_tcp:1;
243 bool disable_packed:1;
244 bool rocev2_rto_policy:1;
245 enum irdma_protocol_used protocol_used;
248 struct irdma_terminate_hdr {
255 struct irdma_cqp_sq_wqe {
256 __le64 buf[IRDMA_CQP_WQE_SIZE];
259 struct irdma_sc_aeqe {
260 __le64 buf[IRDMA_AEQE_SIZE];
264 __le64 buf[IRDMA_CEQE_SIZE];
267 struct irdma_cqp_ctx {
268 __le64 buf[IRDMA_CQP_CTX_SIZE];
271 struct irdma_cq_shadow_area {
272 __le64 buf[IRDMA_SHADOW_AREA_SIZE];
275 struct irdma_dev_hw_stats_offsets {
276 u32 stats_offset[IRDMA_HW_STAT_INDEX_MAX_GEN_1];
279 struct irdma_dev_hw_stats {
280 u64 stats_val[IRDMA_GATHER_STATS_BUF_SIZE / sizeof(u64)];
283 struct irdma_gather_stats {
284 u64 val[IRDMA_GATHER_STATS_BUF_SIZE / sizeof(u64)];
287 struct irdma_hw_stat_map {
293 struct irdma_stats_gather_info {
294 bool use_hmc_fcn_index:1;
295 bool use_stats_inst:1;
298 struct irdma_dma_mem stats_buff_mem;
299 void *gather_stats_va;
300 void *last_gather_stats_va;
303 struct irdma_vsi_pestat {
305 struct irdma_dev_hw_stats hw_stats;
306 struct irdma_stats_gather_info gather_info;
307 struct timer_list stats_timer;
308 struct irdma_sc_vsi *vsi;
309 struct irdma_dev_hw_stats last_hw_stats;
310 spinlock_t lock; /* rdma stats lock */
315 u8 __iomem *priv_hw_addr;
316 struct device *device;
317 struct irdma_hmc_info hmc;
321 struct list_head rxlist;
338 struct irdma_sc_ah *ah;
339 struct irdma_puda_buf *ah_buf;
340 spinlock_t lock; /* fpdu processing lock */
341 struct irdma_puda_buf *lastrcv_buf;
345 struct irdma_sc_dev *dev;
350 struct irdma_cqp_quanta {
351 __le64 elem[IRDMA_CQP_WQE_SIZE];
354 struct irdma_sc_cqp {
359 struct irdma_sc_dev *dev;
360 int (*process_cqp_sds)(struct irdma_sc_dev *dev,
361 struct irdma_update_sds_info *info);
362 struct irdma_dma_mem sdbuf;
363 struct irdma_ring sq_ring;
364 struct irdma_cqp_quanta *sq_base;
365 struct irdma_dcqcn_cc_params dcqcn_params;
369 atomic64_t completed_ops;
381 bool en_datacenter_tcp:1;
382 bool disable_packed:1;
383 bool rocev2_rto_policy:1;
384 enum irdma_protocol_used protocol_used;
387 struct irdma_sc_aeq {
390 struct irdma_sc_dev *dev;
391 struct irdma_sc_aeqe *aeqe_base;
394 struct irdma_ring aeq_ring;
396 u32 first_pm_pbl_idx;
402 struct irdma_sc_ceq {
405 struct irdma_sc_dev *dev;
406 struct irdma_ceqe *ceqe_base;
410 struct irdma_ring ceq_ring;
413 u32 first_pm_pbl_idx;
415 struct irdma_sc_vsi *vsi;
416 struct irdma_sc_cq **reg_cq;
418 spinlock_t req_cq_lock; /* protect access to reg_cq array */
421 bool itr_no_expire:1;
425 struct irdma_cq_uk cq_uk;
428 struct irdma_sc_dev *dev;
429 struct irdma_sc_vsi *vsi;
433 u32 shadow_read_threshold;
437 u32 first_pm_pbl_idx;
440 bool check_overflow:1;
446 struct irdma_qp_uk qp_uk;
452 struct irdma_sc_dev *dev;
453 struct irdma_sc_vsi *vsi;
454 struct irdma_sc_pd *pd;
456 void *llp_stream_handle;
457 struct irdma_pfpdu pfpdu;
472 bool ieq_pass_thru:1;
480 bool sq_flush_code:1;
481 bool rq_flush_code:1;
482 enum irdma_flush_opcode flush_code;
483 enum irdma_qp_event_type event_type;
486 struct list_head list;
489 struct irdma_stats_inst_info {
490 bool use_hmc_fcn_index;
495 struct irdma_up_info {
500 bool use_cnp_up_override:1;
503 #define IRDMA_MAX_WS_NODES 0x3FF
504 #define IRDMA_WS_NODE_INVALID 0xFFFF
506 struct irdma_ws_node_info {
518 struct irdma_hmc_fpm_misc {
526 u32 ooiscf_block_size;
529 #define IRDMA_LEAF_DEFAULT_REL_BW 64
530 #define IRDMA_PARENT_DEFAULT_REL_BW 1
533 struct list_head qplist;
534 struct mutex qos_mutex; /* protect QoS attributes per QoS level */
536 u32 l2_sched_node_id;
544 #define IRDMA_INVALID_STATS_IDX 0xff
545 struct irdma_sc_vsi {
547 struct irdma_sc_dev *dev;
550 struct irdma_virt_mem ilq_mem;
551 struct irdma_puda_rsrc *ilq;
553 struct irdma_virt_mem ieq_mem;
554 struct irdma_puda_rsrc *ieq;
558 enum irdma_vm_vf_type vm_vf_type;
559 bool stats_inst_alloc:1;
560 bool tc_change_pending:1;
561 struct irdma_vsi_pestat *pestat;
562 atomic_t qp_suspend_reqs;
563 int (*register_qset)(struct irdma_sc_vsi *vsi,
564 struct irdma_ws_node *tc_node);
565 void (*unregister_qset)(struct irdma_sc_vsi *vsi,
566 struct irdma_ws_node *tc_node);
570 u8 dscp_map[IIDC_MAX_DSCP_MAPPING];
571 struct irdma_qos qos[IRDMA_MAX_USER_PRIORITY];
572 u64 hw_stats_regs[IRDMA_HW_STAT_INDEX_MAX_GEN_1];
576 struct irdma_sc_dev {
577 struct list_head cqp_cmd_head; /* head of the CQP command list */
578 spinlock_t cqp_lock; /* protect CQP list access */
579 bool stats_idx_array[IRDMA_MAX_STATS_COUNT_GEN_1];
580 struct irdma_dma_mem vf_fpm_query_buf[IRDMA_MAX_PE_ENA_VF_COUNT];
581 u64 fpm_query_buf_pa;
582 u64 fpm_commit_buf_pa;
583 __le64 *fpm_query_buf;
584 __le64 *fpm_commit_buf;
587 u32 __iomem *wqe_alloc_db;
588 u32 __iomem *cq_arm_db;
589 u32 __iomem *aeq_alloc_db;
591 u32 __iomem *cq_ack_db;
592 u32 __iomem *ceq_itr_mask_db;
593 u32 __iomem *aeq_itr_mask_db;
594 u32 __iomem *hw_regs[IRDMA_MAX_REGS];
595 u32 ceq_itr; /* Interrupt throttle, usecs between interrupts: 0 disabled. 2 - 8160 */
596 u64 hw_masks[IRDMA_MAX_MASKS];
597 u64 hw_shifts[IRDMA_MAX_SHIFTS];
598 const struct irdma_hw_stat_map *hw_stats_map;
599 u64 hw_stats_regs[IRDMA_HW_STAT_INDEX_MAX_GEN_1];
600 u64 feature_info[IRDMA_MAX_FEATURES];
601 u64 cqp_cmd_stats[IRDMA_MAX_CQP_OPS];
602 struct irdma_hw_attrs hw_attrs;
603 struct irdma_hmc_info *hmc_info;
604 struct irdma_sc_cqp *cqp;
605 struct irdma_sc_aeq *aeq;
606 struct irdma_sc_ceq *ceq[IRDMA_CEQ_MAX_COUNT];
607 struct irdma_sc_cq *ccq;
608 const struct irdma_irq_ops *irq_ops;
609 struct irdma_hmc_fpm_misc hmc_fpm_misc;
610 struct irdma_ws_node *ws_tree_root;
611 struct mutex ws_mutex; /* ws tree mutex */
618 int (*ws_add)(struct irdma_sc_vsi *vsi, u8 user_pri);
619 void (*ws_remove)(struct irdma_sc_vsi *vsi, u8 user_pri);
620 void (*ws_reset)(struct irdma_sc_vsi *vsi);
623 struct irdma_modify_cq_info {
625 struct irdma_cqe *cq_base;
627 u32 shadow_read_threshold;
629 u32 first_pm_pbl_idx;
635 struct irdma_create_qp_info {
637 bool tcp_ctx_valid:1;
639 bool arp_cache_idx_valid:1;
645 struct irdma_modify_qp_info {
653 bool tcp_ctx_valid:1;
654 bool udp_ctx_valid:1;
656 bool arp_cache_idx_valid:1;
657 bool reset_tcp_conn:1;
658 bool remove_hash_idx:1;
659 bool dont_send_term:1;
660 bool dont_send_fin:1;
661 bool cached_var_valid:1;
667 struct irdma_ccq_cqe_info {
668 struct irdma_sc_cqp *cqp;
677 struct irdma_dcb_app_info {
683 struct irdma_qos_tc_info {
691 struct irdma_l2params {
692 struct irdma_qos_tc_info tc_info[IRDMA_MAX_USER_PRIORITY];
693 struct irdma_dcb_app_info apps[IRDMA_MAX_APPS];
695 u16 qs_handle_list[IRDMA_MAX_USER_PRIORITY];
697 u8 up2tc[IRDMA_MAX_USER_PRIORITY];
698 u8 dscp_map[IIDC_MAX_DSCP_MAPPING];
707 struct irdma_vsi_init_info {
708 struct irdma_sc_dev *dev;
710 struct irdma_l2params *params;
713 enum irdma_vm_vf_type vm_vf_type;
715 int (*register_qset)(struct irdma_sc_vsi *vsi,
716 struct irdma_ws_node *tc_node);
717 void (*unregister_qset)(struct irdma_sc_vsi *vsi,
718 struct irdma_ws_node *tc_node);
721 struct irdma_vsi_stats_info {
722 struct irdma_vsi_pestat *pestat;
724 bool alloc_stats_inst;
727 struct irdma_device_init_info {
728 u64 fpm_query_buf_pa;
729 u64 fpm_commit_buf_pa;
730 __le64 *fpm_query_buf;
731 __le64 *fpm_commit_buf;
737 struct irdma_ceq_init_info {
739 struct irdma_sc_dev *dev;
746 bool itr_no_expire:1;
749 u32 first_pm_pbl_idx;
750 struct irdma_sc_vsi *vsi;
751 struct irdma_sc_cq **reg_cq;
755 struct irdma_aeq_init_info {
757 struct irdma_sc_dev *dev;
763 u32 first_pm_pbl_idx;
767 struct irdma_ccq_init_info {
770 struct irdma_sc_dev *dev;
771 struct irdma_cqe *cq_base;
776 u32 shadow_read_threshold;
779 bool avoid_mem_cflct:1;
784 u32 first_pm_pbl_idx;
785 struct irdma_sc_vsi *vsi;
788 struct irdma_udp_offload_info {
790 bool insert_vlan_tag:1;
812 struct irdma_roce_offload_info {
835 bool use_stats_inst:1;
839 u8 mac_addr[ETH_ALEN];
843 struct irdma_iwarp_offload_info {
855 bool rcv_no_mpa_crc:1;
856 bool err_rq_idx_valid:1;
865 bool use_stats_inst:1;
871 u8 mac_addr[ETH_ALEN];
875 struct irdma_tcp_offload_info {
878 bool insert_vlan_tag:1;
881 bool avoid_stretch_ack:1;
883 bool ignore_tcp_opt:1;
884 bool ignore_tcp_uns_opt:1;
898 u16 syn_rst_handling;
905 u32 time_stamp_recent;
924 struct irdma_qp_host_ctx_info {
927 struct irdma_tcp_offload_info *tcp_info;
928 struct irdma_udp_offload_info *udp_info;
931 struct irdma_iwarp_offload_info *iwarp_info;
932 struct irdma_roce_offload_info *roce_info;
936 u32 rem_endpoint_idx;
939 bool tcp_info_valid:1;
940 bool iwarp_info_valid:1;
941 bool stats_idx_valid:1;
945 struct irdma_aeqe_info {
958 bool aeqe_overflow:1;
963 struct irdma_allocate_stag_info {
965 u64 first_pm_pbl_idx;
971 bool remote_access:1;
972 bool use_hmc_fcn_index:1;
977 struct irdma_mw_alloc_info {
981 bool remote_access:1;
983 bool mw1_bind_dont_vldt_key:1;
986 struct irdma_reg_ns_stag_info {
992 u32 first_pm_pbl_index;
993 enum irdma_addressing_type addr_type;
994 irdma_stag_index stag_idx;
997 irdma_stag_key stag_key;
998 bool use_hmc_fcn_index:1;
1003 struct irdma_fast_reg_stag_info {
1011 u32 first_pm_pbl_index;
1012 enum irdma_addressing_type addr_type;
1013 irdma_stag_index stag_idx;
1016 irdma_stag_key stag_key;
1021 bool use_hmc_fcn_index:1;
1027 struct irdma_dealloc_stag_info {
1034 struct irdma_register_shared_stag {
1036 enum irdma_addressing_type addr_type;
1037 irdma_stag_index new_stag_idx;
1038 irdma_stag_index parent_stag_idx;
1042 irdma_stag_key new_stag_key;
1045 struct irdma_qp_init_info {
1046 struct irdma_qp_uk_init_info qp_uk_init_info;
1047 struct irdma_sc_pd *pd;
1048 struct irdma_sc_vsi *vsi;
1065 struct irdma_cq_init_info {
1066 struct irdma_sc_dev *dev;
1070 u32 shadow_read_threshold;
1072 u32 first_pm_pbl_idx;
1075 bool ceq_id_valid:1;
1079 struct irdma_cq_uk_init_info cq_uk_init_info;
1080 struct irdma_sc_vsi *vsi;
1083 struct irdma_upload_context_info {
1091 struct irdma_local_mac_entry_info {
1096 struct irdma_add_arp_cache_entry_info {
1097 u8 mac_addr[ETH_ALEN];
1103 struct irdma_apbvt_info {
1108 struct irdma_qhash_table_info {
1109 struct irdma_sc_vsi *vsi;
1110 enum irdma_quad_hash_manage_type manage;
1111 enum irdma_quad_entry_type entry_type;
1114 u8 mac_addr[ETH_ALEN];
1124 struct irdma_cqp_manage_push_page_info {
1131 struct irdma_qp_flush_info {
1140 bool userflushcode:1;
1144 struct irdma_gen_ae_info {
1149 struct irdma_cqp_timeout {
1154 struct irdma_irq_ops {
1155 void (*irdma_cfg_aeq)(struct irdma_sc_dev *dev, u32 idx, bool enable);
1156 void (*irdma_cfg_ceq)(struct irdma_sc_dev *dev, u32 ceq_id, u32 idx,
1158 void (*irdma_dis_irq)(struct irdma_sc_dev *dev, u32 idx);
1159 void (*irdma_en_irq)(struct irdma_sc_dev *dev, u32 idx);
1162 void irdma_sc_ccq_arm(struct irdma_sc_cq *ccq);
1163 int irdma_sc_ccq_create(struct irdma_sc_cq *ccq, u64 scratch,
1164 bool check_overflow, bool post_sq);
1165 int irdma_sc_ccq_destroy(struct irdma_sc_cq *ccq, u64 scratch, bool post_sq);
1166 int irdma_sc_ccq_get_cqe_info(struct irdma_sc_cq *ccq,
1167 struct irdma_ccq_cqe_info *info);
1168 int irdma_sc_ccq_init(struct irdma_sc_cq *ccq,
1169 struct irdma_ccq_init_info *info);
1171 int irdma_sc_cceq_create(struct irdma_sc_ceq *ceq, u64 scratch);
1172 int irdma_sc_cceq_destroy_done(struct irdma_sc_ceq *ceq);
1174 int irdma_sc_ceq_destroy(struct irdma_sc_ceq *ceq, u64 scratch, bool post_sq);
1175 int irdma_sc_ceq_init(struct irdma_sc_ceq *ceq,
1176 struct irdma_ceq_init_info *info);
1177 void irdma_sc_cleanup_ceqes(struct irdma_sc_cq *cq, struct irdma_sc_ceq *ceq);
1178 void *irdma_sc_process_ceq(struct irdma_sc_dev *dev, struct irdma_sc_ceq *ceq);
1180 int irdma_sc_aeq_init(struct irdma_sc_aeq *aeq,
1181 struct irdma_aeq_init_info *info);
1182 int irdma_sc_get_next_aeqe(struct irdma_sc_aeq *aeq,
1183 struct irdma_aeqe_info *info);
1184 void irdma_sc_repost_aeq_entries(struct irdma_sc_dev *dev, u32 count);
1186 void irdma_sc_pd_init(struct irdma_sc_dev *dev, struct irdma_sc_pd *pd, u32 pd_id,
1188 void irdma_cfg_aeq(struct irdma_sc_dev *dev, u32 idx, bool enable);
1189 void irdma_check_cqp_progress(struct irdma_cqp_timeout *cqp_timeout,
1190 struct irdma_sc_dev *dev);
1191 int irdma_sc_cqp_create(struct irdma_sc_cqp *cqp, u16 *maj_err, u16 *min_err);
1192 int irdma_sc_cqp_destroy(struct irdma_sc_cqp *cqp);
1193 int irdma_sc_cqp_init(struct irdma_sc_cqp *cqp,
1194 struct irdma_cqp_init_info *info);
1195 void irdma_sc_cqp_post_sq(struct irdma_sc_cqp *cqp);
1196 int irdma_sc_poll_for_cqp_op_done(struct irdma_sc_cqp *cqp, u8 opcode,
1197 struct irdma_ccq_cqe_info *cmpl_info);
1198 int irdma_sc_fast_register(struct irdma_sc_qp *qp,
1199 struct irdma_fast_reg_stag_info *info, bool post_sq);
1200 int irdma_sc_qp_create(struct irdma_sc_qp *qp,
1201 struct irdma_create_qp_info *info, u64 scratch,
1203 int irdma_sc_qp_destroy(struct irdma_sc_qp *qp, u64 scratch,
1204 bool remove_hash_idx, bool ignore_mw_bnd, bool post_sq);
1205 int irdma_sc_qp_flush_wqes(struct irdma_sc_qp *qp,
1206 struct irdma_qp_flush_info *info, u64 scratch,
1208 int irdma_sc_qp_init(struct irdma_sc_qp *qp, struct irdma_qp_init_info *info);
1209 int irdma_sc_qp_modify(struct irdma_sc_qp *qp,
1210 struct irdma_modify_qp_info *info, u64 scratch,
1212 void irdma_sc_send_lsmm(struct irdma_sc_qp *qp, void *lsmm_buf, u32 size,
1215 void irdma_sc_send_rtt(struct irdma_sc_qp *qp, bool read);
1216 void irdma_sc_qp_setctx(struct irdma_sc_qp *qp, __le64 *qp_ctx,
1217 struct irdma_qp_host_ctx_info *info);
1218 void irdma_sc_qp_setctx_roce(struct irdma_sc_qp *qp, __le64 *qp_ctx,
1219 struct irdma_qp_host_ctx_info *info);
1220 int irdma_sc_cq_destroy(struct irdma_sc_cq *cq, u64 scratch, bool post_sq);
1221 int irdma_sc_cq_init(struct irdma_sc_cq *cq, struct irdma_cq_init_info *info);
1222 void irdma_sc_cq_resize(struct irdma_sc_cq *cq, struct irdma_modify_cq_info *info);
1223 int irdma_sc_static_hmc_pages_allocated(struct irdma_sc_cqp *cqp, u64 scratch,
1224 u8 hmc_fn_id, bool post_sq,
1225 bool poll_registers);
1227 void sc_vsi_update_stats(struct irdma_sc_vsi *vsi);
1231 struct irdma_sc_qp *qp;
1232 struct irdma_create_qp_info info;
1237 struct irdma_sc_qp *qp;
1238 struct irdma_modify_qp_info info;
1243 struct irdma_sc_qp *qp;
1245 bool remove_hash_idx;
1250 struct irdma_sc_cq *cq;
1252 bool check_overflow;
1256 struct irdma_sc_cq *cq;
1257 struct irdma_modify_cq_info info;
1262 struct irdma_sc_cq *cq;
1267 struct irdma_sc_dev *dev;
1268 struct irdma_allocate_stag_info info;
1273 struct irdma_sc_dev *dev;
1274 struct irdma_mw_alloc_info info;
1279 struct irdma_sc_dev *dev;
1280 struct irdma_reg_ns_stag_info info;
1282 } mr_reg_non_shared;
1285 struct irdma_sc_dev *dev;
1286 struct irdma_dealloc_stag_info info;
1291 struct irdma_sc_cqp *cqp;
1292 struct irdma_add_arp_cache_entry_info info;
1294 } add_arp_cache_entry;
1297 struct irdma_sc_cqp *cqp;
1300 } del_arp_cache_entry;
1303 struct irdma_sc_cqp *cqp;
1304 struct irdma_local_mac_entry_info info;
1306 } add_local_mac_entry;
1309 struct irdma_sc_cqp *cqp;
1312 u8 ignore_ref_count;
1313 } del_local_mac_entry;
1316 struct irdma_sc_cqp *cqp;
1318 } alloc_local_mac_entry;
1321 struct irdma_sc_cqp *cqp;
1322 struct irdma_cqp_manage_push_page_info info;
1327 struct irdma_sc_dev *dev;
1328 struct irdma_upload_context_info info;
1330 } qp_upload_context;
1333 struct irdma_sc_dev *dev;
1334 struct irdma_hmc_fcn_info info;
1339 struct irdma_sc_ceq *ceq;
1344 struct irdma_sc_ceq *ceq;
1349 struct irdma_sc_aeq *aeq;
1354 struct irdma_sc_aeq *aeq;
1359 struct irdma_sc_qp *qp;
1360 struct irdma_qp_flush_info info;
1365 struct irdma_sc_qp *qp;
1366 struct irdma_gen_ae_info info;
1371 struct irdma_sc_cqp *cqp;
1379 struct irdma_sc_cqp *cqp;
1387 struct irdma_sc_cqp *cqp;
1388 struct irdma_apbvt_info info;
1390 } manage_apbvt_entry;
1393 struct irdma_sc_cqp *cqp;
1394 struct irdma_qhash_table_info info;
1396 } manage_qhash_table_entry;
1399 struct irdma_sc_dev *dev;
1400 struct irdma_update_sds_info info;
1405 struct irdma_sc_cqp *cqp;
1406 struct irdma_sc_qp *qp;
1411 struct irdma_sc_cqp *cqp;
1412 struct irdma_ah_info info;
1417 struct irdma_sc_cqp *cqp;
1418 struct irdma_ah_info info;
1423 struct irdma_sc_cqp *cqp;
1424 struct irdma_mcast_grp_info info;
1429 struct irdma_sc_cqp *cqp;
1430 struct irdma_mcast_grp_info info;
1435 struct irdma_sc_cqp *cqp;
1436 struct irdma_mcast_grp_info info;
1441 struct irdma_sc_cqp *cqp;
1442 struct irdma_stats_inst_info info;
1447 struct irdma_sc_cqp *cqp;
1448 struct irdma_stats_gather_info info;
1453 struct irdma_sc_cqp *cqp;
1454 struct irdma_ws_node_info info;
1459 struct irdma_sc_cqp *cqp;
1460 struct irdma_up_info info;
1465 struct irdma_sc_cqp *cqp;
1466 struct irdma_dma_mem query_buff_mem;
1472 struct cqp_cmds_info {
1473 struct list_head cqp_cmd_entry;
1479 __le64 *irdma_sc_cqp_get_next_send_wqe_idx(struct irdma_sc_cqp *cqp, u64 scratch,
1483 * irdma_sc_cqp_get_next_send_wqe - get next wqe on cqp sq
1484 * @cqp: struct for cqp hw
1485 * @scratch: private data for CQP WQE
1487 static inline __le64 *irdma_sc_cqp_get_next_send_wqe(struct irdma_sc_cqp *cqp, u64 scratch)
1491 return irdma_sc_cqp_get_next_send_wqe_idx(cqp, scratch, &wqe_idx);
1493 #endif /* IRDMA_TYPE_H */