1 // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
2 /* Copyright (c) 2015 - 2021 Intel Corporation */
5 static struct irdma_rsrc_limits rsrc_limits_table[] = {
32 /* types of hmc objects */
33 static enum irdma_hmc_rsrc_type iw_hmc_obj_types[] = {
38 IRDMA_HMC_IW_APBVT_ENTRY,
53 IRDMA_HMC_IW_OOISCFFL,
57 * irdma_iwarp_ce_handler - handle iwarp completions
58 * @iwcq: iwarp cq receiving event
60 static void irdma_iwarp_ce_handler(struct irdma_sc_cq *iwcq)
62 struct irdma_cq *cq = iwcq->back_cq;
65 atomic_set(&cq->armed, 0);
66 if (cq->ibcq.comp_handler)
67 cq->ibcq.comp_handler(&cq->ibcq, cq->ibcq.cq_context);
71 * irdma_puda_ce_handler - handle puda completion events
72 * @rf: RDMA PCI function
73 * @cq: puda completion q for event
75 static void irdma_puda_ce_handler(struct irdma_pci_f *rf,
76 struct irdma_sc_cq *cq)
78 struct irdma_sc_dev *dev = &rf->sc_dev;
83 status = irdma_puda_poll_cmpl(dev, cq, &compl_error);
84 if (status == -ENOENT)
87 ibdev_dbg(to_ibdev(dev), "ERR: puda status = %d\n", status);
91 ibdev_dbg(to_ibdev(dev), "ERR: puda compl_err =0x%x\n",
101 * irdma_process_ceq - handle ceq for completions
102 * @rf: RDMA PCI function
103 * @ceq: ceq having cq for completion
105 static void irdma_process_ceq(struct irdma_pci_f *rf, struct irdma_ceq *ceq)
107 struct irdma_sc_dev *dev = &rf->sc_dev;
108 struct irdma_sc_ceq *sc_ceq;
109 struct irdma_sc_cq *cq;
112 sc_ceq = &ceq->sc_ceq;
114 spin_lock_irqsave(&ceq->ce_lock, flags);
115 cq = irdma_sc_process_ceq(dev, sc_ceq);
117 spin_unlock_irqrestore(&ceq->ce_lock, flags);
121 if (cq->cq_type == IRDMA_CQ_TYPE_IWARP)
122 irdma_iwarp_ce_handler(cq);
124 spin_unlock_irqrestore(&ceq->ce_lock, flags);
126 if (cq->cq_type == IRDMA_CQ_TYPE_CQP)
127 queue_work(rf->cqp_cmpl_wq, &rf->cqp_cmpl_work);
128 else if (cq->cq_type == IRDMA_CQ_TYPE_ILQ ||
129 cq->cq_type == IRDMA_CQ_TYPE_IEQ)
130 irdma_puda_ce_handler(rf, cq);
134 static void irdma_set_flush_fields(struct irdma_sc_qp *qp,
135 struct irdma_aeqe_info *info)
137 qp->sq_flush_code = info->sq;
138 qp->rq_flush_code = info->rq;
139 qp->event_type = IRDMA_QP_EVENT_CATASTROPHIC;
141 switch (info->ae_id) {
142 case IRDMA_AE_AMP_BOUNDS_VIOLATION:
143 case IRDMA_AE_AMP_INVALID_STAG:
144 case IRDMA_AE_AMP_RIGHTS_VIOLATION:
145 case IRDMA_AE_AMP_UNALLOCATED_STAG:
146 case IRDMA_AE_AMP_BAD_PD:
147 case IRDMA_AE_AMP_BAD_QP:
148 case IRDMA_AE_AMP_BAD_STAG_KEY:
149 case IRDMA_AE_AMP_BAD_STAG_INDEX:
150 case IRDMA_AE_AMP_TO_WRAP:
151 case IRDMA_AE_PRIV_OPERATION_DENIED:
152 qp->flush_code = FLUSH_PROT_ERR;
153 qp->event_type = IRDMA_QP_EVENT_ACCESS_ERR;
155 case IRDMA_AE_UDA_XMIT_BAD_PD:
156 case IRDMA_AE_WQE_UNEXPECTED_OPCODE:
157 qp->flush_code = FLUSH_LOC_QP_OP_ERR;
158 qp->event_type = IRDMA_QP_EVENT_CATASTROPHIC;
160 case IRDMA_AE_UDA_XMIT_DGRAM_TOO_LONG:
161 case IRDMA_AE_UDA_XMIT_DGRAM_TOO_SHORT:
162 case IRDMA_AE_UDA_L4LEN_INVALID:
163 case IRDMA_AE_DDP_UBE_INVALID_MO:
164 case IRDMA_AE_DDP_UBE_DDP_MESSAGE_TOO_LONG_FOR_AVAILABLE_BUFFER:
165 qp->flush_code = FLUSH_LOC_LEN_ERR;
166 qp->event_type = IRDMA_QP_EVENT_CATASTROPHIC;
168 case IRDMA_AE_AMP_INVALIDATE_NO_REMOTE_ACCESS_RIGHTS:
169 case IRDMA_AE_IB_REMOTE_ACCESS_ERROR:
170 qp->flush_code = FLUSH_REM_ACCESS_ERR;
171 qp->event_type = IRDMA_QP_EVENT_ACCESS_ERR;
173 case IRDMA_AE_LLP_SEGMENT_TOO_SMALL:
174 case IRDMA_AE_LLP_RECEIVED_MPA_CRC_ERROR:
175 case IRDMA_AE_ROCE_RSP_LENGTH_ERROR:
176 case IRDMA_AE_IB_REMOTE_OP_ERROR:
177 qp->flush_code = FLUSH_REM_OP_ERR;
178 qp->event_type = IRDMA_QP_EVENT_CATASTROPHIC;
180 case IRDMA_AE_LCE_QP_CATASTROPHIC:
181 qp->flush_code = FLUSH_FATAL_ERR;
182 qp->event_type = IRDMA_QP_EVENT_CATASTROPHIC;
184 case IRDMA_AE_IB_RREQ_AND_Q1_FULL:
185 qp->flush_code = FLUSH_GENERAL_ERR;
187 case IRDMA_AE_LLP_TOO_MANY_RETRIES:
188 qp->flush_code = FLUSH_RETRY_EXC_ERR;
189 qp->event_type = IRDMA_QP_EVENT_CATASTROPHIC;
191 case IRDMA_AE_AMP_MWBIND_INVALID_RIGHTS:
192 case IRDMA_AE_AMP_MWBIND_BIND_DISABLED:
193 case IRDMA_AE_AMP_MWBIND_INVALID_BOUNDS:
194 case IRDMA_AE_AMP_MWBIND_VALID_STAG:
195 qp->flush_code = FLUSH_MW_BIND_ERR;
196 qp->event_type = IRDMA_QP_EVENT_ACCESS_ERR;
198 case IRDMA_AE_IB_INVALID_REQUEST:
199 qp->flush_code = FLUSH_REM_INV_REQ_ERR;
200 qp->event_type = IRDMA_QP_EVENT_REQ_ERR;
203 qp->flush_code = FLUSH_GENERAL_ERR;
204 qp->event_type = IRDMA_QP_EVENT_CATASTROPHIC;
210 * irdma_process_aeq - handle aeq events
211 * @rf: RDMA PCI function
213 static void irdma_process_aeq(struct irdma_pci_f *rf)
215 struct irdma_sc_dev *dev = &rf->sc_dev;
216 struct irdma_aeq *aeq = &rf->aeq;
217 struct irdma_sc_aeq *sc_aeq = &aeq->sc_aeq;
218 struct irdma_aeqe_info aeinfo;
219 struct irdma_aeqe_info *info = &aeinfo;
221 struct irdma_qp *iwqp = NULL;
222 struct irdma_cq *iwcq = NULL;
223 struct irdma_sc_qp *qp = NULL;
224 struct irdma_qp_host_ctx_info *ctx_info = NULL;
225 struct irdma_device *iwdev = rf->iwdev;
234 memset(info, 0, sizeof(*info));
235 ret = irdma_sc_get_next_aeqe(sc_aeq, info);
240 ibdev_dbg(&iwdev->ibdev,
241 "AEQ: ae_id = 0x%x bool qp=%d qp_id = %d tcp_state=%d iwarp_state=%d ae_src=%d\n",
242 info->ae_id, info->qp, info->qp_cq_id, info->tcp_state,
243 info->iwarp_state, info->ae_src);
246 spin_lock_irqsave(&rf->qptable_lock, flags);
247 iwqp = rf->qp_table[info->qp_cq_id];
249 spin_unlock_irqrestore(&rf->qptable_lock,
251 if (info->ae_id == IRDMA_AE_QP_SUSPEND_COMPLETE) {
252 atomic_dec(&iwdev->vsi.qp_suspend_reqs);
253 wake_up(&iwdev->suspend_wq);
256 ibdev_dbg(&iwdev->ibdev, "AEQ: qp_id %d is already freed\n",
260 irdma_qp_add_ref(&iwqp->ibqp);
261 spin_unlock_irqrestore(&rf->qptable_lock, flags);
263 spin_lock_irqsave(&iwqp->lock, flags);
264 iwqp->hw_tcp_state = info->tcp_state;
265 iwqp->hw_iwarp_state = info->iwarp_state;
266 if (info->ae_id != IRDMA_AE_QP_SUSPEND_COMPLETE)
267 iwqp->last_aeq = info->ae_id;
268 spin_unlock_irqrestore(&iwqp->lock, flags);
269 ctx_info = &iwqp->ctx_info;
271 if (info->ae_id != IRDMA_AE_CQ_OPERATION_ERROR)
275 switch (info->ae_id) {
276 struct irdma_cm_node *cm_node;
277 case IRDMA_AE_LLP_CONNECTION_ESTABLISHED:
278 cm_node = iwqp->cm_node;
279 if (cm_node->accept_pend) {
280 atomic_dec(&cm_node->listener->pend_accepts_cnt);
281 cm_node->accept_pend = 0;
283 iwqp->rts_ae_rcvd = 1;
284 wake_up_interruptible(&iwqp->waitq);
286 case IRDMA_AE_LLP_FIN_RECEIVED:
287 case IRDMA_AE_RDMAP_ROE_BAD_LLP_CLOSE:
290 if (atomic_inc_return(&iwqp->close_timer_started) == 1) {
291 iwqp->hw_tcp_state = IRDMA_TCP_STATE_CLOSE_WAIT;
292 if (iwqp->hw_tcp_state == IRDMA_TCP_STATE_CLOSE_WAIT &&
293 iwqp->ibqp_state == IB_QPS_RTS) {
294 irdma_next_iw_state(iwqp,
295 IRDMA_QP_STATE_CLOSING,
297 irdma_cm_disconn(iwqp);
299 irdma_schedule_cm_timer(iwqp->cm_node,
300 (struct irdma_puda_buf *)iwqp,
301 IRDMA_TIMER_TYPE_CLOSE,
305 case IRDMA_AE_LLP_CLOSE_COMPLETE:
307 irdma_terminate_done(qp, 0);
309 irdma_cm_disconn(iwqp);
311 case IRDMA_AE_BAD_CLOSE:
312 case IRDMA_AE_RESET_SENT:
313 irdma_next_iw_state(iwqp, IRDMA_QP_STATE_ERROR, 1, 0,
315 irdma_cm_disconn(iwqp);
317 case IRDMA_AE_LLP_CONNECTION_RESET:
318 if (atomic_read(&iwqp->close_timer_started))
320 irdma_cm_disconn(iwqp);
322 case IRDMA_AE_QP_SUSPEND_COMPLETE:
323 if (iwqp->iwdev->vsi.tc_change_pending) {
324 if (!atomic_dec_return(&qp->vsi->qp_suspend_reqs))
325 wake_up(&iwqp->iwdev->suspend_wq);
327 if (iwqp->suspend_pending) {
328 iwqp->suspend_pending = false;
329 wake_up(&iwqp->iwdev->suspend_wq);
332 case IRDMA_AE_TERMINATE_SENT:
333 irdma_terminate_send_fin(qp);
335 case IRDMA_AE_LLP_TERMINATE_RECEIVED:
336 irdma_terminate_received(qp, info);
338 case IRDMA_AE_CQ_OPERATION_ERROR:
339 ibdev_err(&iwdev->ibdev,
340 "Processing an iWARP related AE for CQ misc = 0x%04X\n",
343 spin_lock_irqsave(&rf->cqtable_lock, flags);
344 iwcq = rf->cq_table[info->qp_cq_id];
346 spin_unlock_irqrestore(&rf->cqtable_lock,
348 ibdev_dbg(to_ibdev(dev),
349 "cq_id %d is already freed\n", info->qp_cq_id);
352 irdma_cq_add_ref(&iwcq->ibcq);
353 spin_unlock_irqrestore(&rf->cqtable_lock, flags);
355 if (iwcq->ibcq.event_handler) {
356 struct ib_event ibevent;
358 ibevent.device = iwcq->ibcq.device;
359 ibevent.event = IB_EVENT_CQ_ERR;
360 ibevent.element.cq = &iwcq->ibcq;
361 iwcq->ibcq.event_handler(&ibevent,
362 iwcq->ibcq.cq_context);
364 irdma_cq_rem_ref(&iwcq->ibcq);
366 case IRDMA_AE_RESET_NOT_SENT:
367 case IRDMA_AE_LLP_DOUBT_REACHABILITY:
368 case IRDMA_AE_RESOURCE_EXHAUSTION:
370 case IRDMA_AE_PRIV_OPERATION_DENIED:
371 case IRDMA_AE_STAG_ZERO_INVALID:
372 case IRDMA_AE_IB_RREQ_AND_Q1_FULL:
373 case IRDMA_AE_DDP_UBE_INVALID_DDP_VERSION:
374 case IRDMA_AE_DDP_UBE_INVALID_MO:
375 case IRDMA_AE_DDP_UBE_INVALID_QN:
376 case IRDMA_AE_DDP_NO_L_BIT:
377 case IRDMA_AE_RDMAP_ROE_INVALID_RDMAP_VERSION:
378 case IRDMA_AE_RDMAP_ROE_UNEXPECTED_OPCODE:
379 case IRDMA_AE_ROE_INVALID_RDMA_READ_REQUEST:
380 case IRDMA_AE_ROE_INVALID_RDMA_WRITE_OR_READ_RESP:
381 case IRDMA_AE_INVALID_ARP_ENTRY:
382 case IRDMA_AE_INVALID_TCP_OPTION_RCVD:
383 case IRDMA_AE_STALE_ARP_ENTRY:
384 case IRDMA_AE_LLP_RECEIVED_MPA_CRC_ERROR:
385 case IRDMA_AE_LLP_SEGMENT_TOO_SMALL:
386 case IRDMA_AE_LLP_SYN_RECEIVED:
387 case IRDMA_AE_LLP_TOO_MANY_RETRIES:
388 case IRDMA_AE_LCE_QP_CATASTROPHIC:
389 case IRDMA_AE_LCE_FUNCTION_CATASTROPHIC:
390 case IRDMA_AE_LCE_CQ_CATASTROPHIC:
391 case IRDMA_AE_UDA_XMIT_DGRAM_TOO_LONG:
393 ibdev_err(&iwdev->ibdev, "abnormal ae_id = 0x%x bool qp=%d qp_id = %d, ae_src=%d\n",
394 info->ae_id, info->qp, info->qp_cq_id, info->ae_src);
395 if (rdma_protocol_roce(&iwdev->ibdev, 1)) {
396 ctx_info->roce_info->err_rq_idx_valid = info->rq;
398 ctx_info->roce_info->err_rq_idx = info->wqe_idx;
399 irdma_sc_qp_setctx_roce(&iwqp->sc_qp, iwqp->host_ctx.va,
402 irdma_set_flush_fields(qp, info);
403 irdma_cm_disconn(iwqp);
406 ctx_info->iwarp_info->err_rq_idx_valid = info->rq;
408 ctx_info->iwarp_info->err_rq_idx = info->wqe_idx;
409 ctx_info->tcp_info_valid = false;
410 ctx_info->iwarp_info_valid = true;
411 irdma_sc_qp_setctx(&iwqp->sc_qp, iwqp->host_ctx.va,
414 if (iwqp->hw_iwarp_state != IRDMA_QP_STATE_RTS &&
415 iwqp->hw_iwarp_state != IRDMA_QP_STATE_TERMINATE) {
416 irdma_next_iw_state(iwqp, IRDMA_QP_STATE_ERROR, 1, 0, 0);
417 irdma_cm_disconn(iwqp);
419 irdma_terminate_connection(qp, info);
424 irdma_qp_rem_ref(&iwqp->ibqp);
428 irdma_sc_repost_aeq_entries(dev, aeqcnt);
432 * irdma_ena_intr - set up device interrupts
433 * @dev: hardware control device structure
434 * @msix_id: id of the interrupt to be enabled
436 static void irdma_ena_intr(struct irdma_sc_dev *dev, u32 msix_id)
438 dev->irq_ops->irdma_en_irq(dev, msix_id);
442 * irdma_dpc - tasklet for aeq and ceq 0
443 * @t: tasklet_struct ptr
445 static void irdma_dpc(struct tasklet_struct *t)
447 struct irdma_pci_f *rf = from_tasklet(rf, t, dpc_tasklet);
450 irdma_process_ceq(rf, rf->ceqlist);
451 irdma_process_aeq(rf);
452 irdma_ena_intr(&rf->sc_dev, rf->iw_msixtbl[0].idx);
456 * irdma_ceq_dpc - dpc handler for CEQ
457 * @t: tasklet_struct ptr
459 static void irdma_ceq_dpc(struct tasklet_struct *t)
461 struct irdma_ceq *iwceq = from_tasklet(iwceq, t, dpc_tasklet);
462 struct irdma_pci_f *rf = iwceq->rf;
464 irdma_process_ceq(rf, iwceq);
465 irdma_ena_intr(&rf->sc_dev, iwceq->msix_idx);
469 * irdma_save_msix_info - copy msix vector information to iwarp device
470 * @rf: RDMA PCI function
472 * Allocate iwdev msix table and copy the msix info to the table
473 * Return 0 if successful, otherwise return error
475 static int irdma_save_msix_info(struct irdma_pci_f *rf)
477 struct irdma_qvlist_info *iw_qvlist;
478 struct irdma_qv_info *iw_qvinfo;
479 struct msix_entry *pmsix;
487 size = sizeof(struct irdma_msix_vector) * rf->msix_count;
488 size += struct_size(iw_qvlist, qv_info, rf->msix_count);
489 rf->iw_msixtbl = kzalloc(size, GFP_KERNEL);
493 rf->iw_qvlist = (struct irdma_qvlist_info *)
494 (&rf->iw_msixtbl[rf->msix_count]);
495 iw_qvlist = rf->iw_qvlist;
496 iw_qvinfo = iw_qvlist->qv_info;
497 iw_qvlist->num_vectors = rf->msix_count;
498 if (rf->msix_count <= num_online_cpus())
499 rf->msix_shared = true;
500 else if (rf->msix_count > num_online_cpus() + 1)
501 rf->msix_count = num_online_cpus() + 1;
503 pmsix = rf->msix_entries;
504 for (i = 0, ceq_idx = 0; i < rf->msix_count; i++, iw_qvinfo++) {
505 rf->iw_msixtbl[i].idx = pmsix->entry;
506 rf->iw_msixtbl[i].irq = pmsix->vector;
507 rf->iw_msixtbl[i].cpu_affinity = ceq_idx;
509 iw_qvinfo->aeq_idx = 0;
511 iw_qvinfo->ceq_idx = ceq_idx++;
513 iw_qvinfo->ceq_idx = IRDMA_Q_INVALID_IDX;
515 iw_qvinfo->aeq_idx = IRDMA_Q_INVALID_IDX;
516 iw_qvinfo->ceq_idx = ceq_idx++;
518 iw_qvinfo->itr_idx = 3;
519 iw_qvinfo->v_idx = rf->iw_msixtbl[i].idx;
527 * irdma_irq_handler - interrupt handler for aeq and ceq0
528 * @irq: Interrupt request number
529 * @data: RDMA PCI function
531 static irqreturn_t irdma_irq_handler(int irq, void *data)
533 struct irdma_pci_f *rf = data;
535 tasklet_schedule(&rf->dpc_tasklet);
541 * irdma_ceq_handler - interrupt handler for ceq
542 * @irq: interrupt request number
545 static irqreturn_t irdma_ceq_handler(int irq, void *data)
547 struct irdma_ceq *iwceq = data;
549 if (iwceq->irq != irq)
550 ibdev_err(to_ibdev(&iwceq->rf->sc_dev), "expected irq = %d received irq = %d\n",
552 tasklet_schedule(&iwceq->dpc_tasklet);
558 * irdma_destroy_irq - destroy device interrupts
559 * @rf: RDMA PCI function
560 * @msix_vec: msix vector to disable irq
561 * @dev_id: parameter to pass to free_irq (used during irq setup)
563 * The function is called when destroying aeq/ceq
565 static void irdma_destroy_irq(struct irdma_pci_f *rf,
566 struct irdma_msix_vector *msix_vec, void *dev_id)
568 struct irdma_sc_dev *dev = &rf->sc_dev;
570 dev->irq_ops->irdma_dis_irq(dev, msix_vec->idx);
571 irq_update_affinity_hint(msix_vec->irq, NULL);
572 free_irq(msix_vec->irq, dev_id);
576 * irdma_destroy_cqp - destroy control qp
577 * @rf: RDMA PCI function
579 * Issue destroy cqp request and
580 * free the resources associated with the cqp
582 static void irdma_destroy_cqp(struct irdma_pci_f *rf)
584 struct irdma_sc_dev *dev = &rf->sc_dev;
585 struct irdma_cqp *cqp = &rf->cqp;
588 status = irdma_sc_cqp_destroy(dev->cqp);
590 ibdev_dbg(to_ibdev(dev), "ERR: Destroy CQP failed %d\n", status);
592 irdma_cleanup_pending_cqp_op(rf);
593 dma_free_coherent(dev->hw->device, cqp->sq.size, cqp->sq.va,
596 kfree(cqp->scratch_array);
597 cqp->scratch_array = NULL;
598 kfree(cqp->cqp_requests);
599 cqp->cqp_requests = NULL;
602 static void irdma_destroy_virt_aeq(struct irdma_pci_f *rf)
604 struct irdma_aeq *aeq = &rf->aeq;
605 u32 pg_cnt = DIV_ROUND_UP(aeq->mem.size, PAGE_SIZE);
606 dma_addr_t *pg_arr = (dma_addr_t *)aeq->palloc.level1.addr;
608 irdma_unmap_vm_page_list(&rf->hw, pg_arr, pg_cnt);
609 irdma_free_pble(rf->pble_rsrc, &aeq->palloc);
614 * irdma_destroy_aeq - destroy aeq
615 * @rf: RDMA PCI function
617 * Issue a destroy aeq request and
618 * free the resources associated with the aeq
619 * The function is called during driver unload
621 static void irdma_destroy_aeq(struct irdma_pci_f *rf)
623 struct irdma_sc_dev *dev = &rf->sc_dev;
624 struct irdma_aeq *aeq = &rf->aeq;
627 if (!rf->msix_shared) {
628 rf->sc_dev.irq_ops->irdma_cfg_aeq(&rf->sc_dev, rf->iw_msixtbl->idx, false);
629 irdma_destroy_irq(rf, rf->iw_msixtbl, rf);
634 aeq->sc_aeq.size = 0;
635 status = irdma_cqp_aeq_cmd(dev, &aeq->sc_aeq, IRDMA_OP_AEQ_DESTROY);
637 ibdev_dbg(to_ibdev(dev), "ERR: Destroy AEQ failed %d\n", status);
640 if (aeq->virtual_map) {
641 irdma_destroy_virt_aeq(rf);
643 dma_free_coherent(dev->hw->device, aeq->mem.size, aeq->mem.va,
650 * irdma_destroy_ceq - destroy ceq
651 * @rf: RDMA PCI function
652 * @iwceq: ceq to be destroyed
654 * Issue a destroy ceq request and
655 * free the resources associated with the ceq
657 static void irdma_destroy_ceq(struct irdma_pci_f *rf, struct irdma_ceq *iwceq)
659 struct irdma_sc_dev *dev = &rf->sc_dev;
665 status = irdma_sc_ceq_destroy(&iwceq->sc_ceq, 0, 1);
667 ibdev_dbg(to_ibdev(dev), "ERR: CEQ destroy command failed %d\n", status);
671 status = irdma_sc_cceq_destroy_done(&iwceq->sc_ceq);
673 ibdev_dbg(to_ibdev(dev), "ERR: CEQ destroy completion failed %d\n",
676 dma_free_coherent(dev->hw->device, iwceq->mem.size, iwceq->mem.va,
678 iwceq->mem.va = NULL;
682 * irdma_del_ceq_0 - destroy ceq 0
683 * @rf: RDMA PCI function
685 * Disable the ceq 0 interrupt and destroy the ceq 0
687 static void irdma_del_ceq_0(struct irdma_pci_f *rf)
689 struct irdma_ceq *iwceq = rf->ceqlist;
690 struct irdma_msix_vector *msix_vec;
692 if (rf->msix_shared) {
693 msix_vec = &rf->iw_msixtbl[0];
694 rf->sc_dev.irq_ops->irdma_cfg_ceq(&rf->sc_dev,
696 msix_vec->idx, false);
697 irdma_destroy_irq(rf, msix_vec, rf);
699 msix_vec = &rf->iw_msixtbl[1];
700 irdma_destroy_irq(rf, msix_vec, iwceq);
703 irdma_destroy_ceq(rf, iwceq);
704 rf->sc_dev.ceq_valid = false;
709 * irdma_del_ceqs - destroy all ceq's except CEQ 0
710 * @rf: RDMA PCI function
712 * Go through all of the device ceq's, except 0, and for each
713 * ceq disable the ceq interrupt and destroy the ceq
715 static void irdma_del_ceqs(struct irdma_pci_f *rf)
717 struct irdma_ceq *iwceq = &rf->ceqlist[1];
718 struct irdma_msix_vector *msix_vec;
722 msix_vec = &rf->iw_msixtbl[1];
724 msix_vec = &rf->iw_msixtbl[2];
726 for (i = 1; i < rf->ceqs_count; i++, msix_vec++, iwceq++) {
727 rf->sc_dev.irq_ops->irdma_cfg_ceq(&rf->sc_dev, msix_vec->ceq_id,
728 msix_vec->idx, false);
729 irdma_destroy_irq(rf, msix_vec, iwceq);
730 irdma_cqp_ceq_cmd(&rf->sc_dev, &iwceq->sc_ceq,
731 IRDMA_OP_CEQ_DESTROY);
732 dma_free_coherent(rf->sc_dev.hw->device, iwceq->mem.size,
733 iwceq->mem.va, iwceq->mem.pa);
734 iwceq->mem.va = NULL;
740 * irdma_destroy_ccq - destroy control cq
741 * @rf: RDMA PCI function
743 * Issue destroy ccq request and
744 * free the resources associated with the ccq
746 static void irdma_destroy_ccq(struct irdma_pci_f *rf)
748 struct irdma_sc_dev *dev = &rf->sc_dev;
749 struct irdma_ccq *ccq = &rf->ccq;
753 destroy_workqueue(rf->cqp_cmpl_wq);
756 status = irdma_sc_ccq_destroy(dev->ccq, 0, true);
758 ibdev_dbg(to_ibdev(dev), "ERR: CCQ destroy failed %d\n", status);
759 dma_free_coherent(dev->hw->device, ccq->mem_cq.size, ccq->mem_cq.va,
761 ccq->mem_cq.va = NULL;
765 * irdma_close_hmc_objects_type - delete hmc objects of a given type
767 * @obj_type: the hmc object type to be deleted
768 * @hmc_info: host memory info struct
769 * @privileged: permission to close HMC objects
770 * @reset: true if called before reset
772 static void irdma_close_hmc_objects_type(struct irdma_sc_dev *dev,
773 enum irdma_hmc_rsrc_type obj_type,
774 struct irdma_hmc_info *hmc_info,
775 bool privileged, bool reset)
777 struct irdma_hmc_del_obj_info info = {};
779 info.hmc_info = hmc_info;
780 info.rsrc_type = obj_type;
781 info.count = hmc_info->hmc_obj[obj_type].cnt;
782 info.privileged = privileged;
783 if (irdma_sc_del_hmc_obj(dev, &info, reset))
784 ibdev_dbg(to_ibdev(dev), "ERR: del HMC obj of type %d failed\n",
789 * irdma_del_hmc_objects - remove all device hmc objects
791 * @hmc_info: hmc_info to free
792 * @privileged: permission to delete HMC objects
793 * @reset: true if called before reset
794 * @vers: hardware version
796 static void irdma_del_hmc_objects(struct irdma_sc_dev *dev,
797 struct irdma_hmc_info *hmc_info, bool privileged,
798 bool reset, enum irdma_vers vers)
802 for (i = 0; i < IW_HMC_OBJ_TYPE_NUM; i++) {
803 if (dev->hmc_info->hmc_obj[iw_hmc_obj_types[i]].cnt)
804 irdma_close_hmc_objects_type(dev, iw_hmc_obj_types[i],
805 hmc_info, privileged, reset);
806 if (vers == IRDMA_GEN_1 && i == IRDMA_HMC_IW_TIMER)
812 * irdma_create_hmc_obj_type - create hmc object of a given type
813 * @dev: hardware control device structure
814 * @info: information for the hmc object to create
816 static int irdma_create_hmc_obj_type(struct irdma_sc_dev *dev,
817 struct irdma_hmc_create_obj_info *info)
819 return irdma_sc_create_hmc_obj(dev, info);
823 * irdma_create_hmc_objs - create all hmc objects for the device
824 * @rf: RDMA PCI function
825 * @privileged: permission to create HMC objects
828 * Create the device hmc objects and allocate hmc pages
829 * Return 0 if successful, otherwise clean up and return error
831 static int irdma_create_hmc_objs(struct irdma_pci_f *rf, bool privileged,
832 enum irdma_vers vers)
834 struct irdma_sc_dev *dev = &rf->sc_dev;
835 struct irdma_hmc_create_obj_info info = {};
838 info.hmc_info = dev->hmc_info;
839 info.privileged = privileged;
840 info.entry_type = rf->sd_type;
842 for (i = 0; i < IW_HMC_OBJ_TYPE_NUM; i++) {
843 if (iw_hmc_obj_types[i] == IRDMA_HMC_IW_PBLE)
845 if (dev->hmc_info->hmc_obj[iw_hmc_obj_types[i]].cnt) {
846 info.rsrc_type = iw_hmc_obj_types[i];
847 info.count = dev->hmc_info->hmc_obj[info.rsrc_type].cnt;
849 status = irdma_create_hmc_obj_type(dev, &info);
851 ibdev_dbg(to_ibdev(dev),
852 "ERR: create obj type %d status = %d\n",
853 iw_hmc_obj_types[i], status);
857 if (vers == IRDMA_GEN_1 && i == IRDMA_HMC_IW_TIMER)
862 return irdma_sc_static_hmc_pages_allocated(dev->cqp, 0, dev->hmc_fn_id,
867 /* destroy the hmc objects of a given type */
868 if (dev->hmc_info->hmc_obj[iw_hmc_obj_types[i]].cnt)
869 irdma_close_hmc_objects_type(dev, iw_hmc_obj_types[i],
870 dev->hmc_info, privileged,
878 * irdma_obj_aligned_mem - get aligned memory from device allocated memory
879 * @rf: RDMA PCI function
880 * @memptr: points to the memory addresses
881 * @size: size of memory needed
882 * @mask: mask for the aligned memory
884 * Get aligned memory of the requested size and
885 * update the memptr to point to the new aligned memory
886 * Return 0 if successful, otherwise return no memory error
888 static int irdma_obj_aligned_mem(struct irdma_pci_f *rf,
889 struct irdma_dma_mem *memptr, u32 size,
892 unsigned long va, newva;
895 va = (unsigned long)rf->obj_next.va;
898 newva = ALIGN(va, (unsigned long)mask + 1ULL);
900 memptr->va = (u8 *)va + extra;
901 memptr->pa = rf->obj_next.pa + extra;
903 if (((u8 *)memptr->va + size) > ((u8 *)rf->obj_mem.va + rf->obj_mem.size))
906 rf->obj_next.va = (u8 *)memptr->va + size;
907 rf->obj_next.pa = memptr->pa + size;
913 * irdma_create_cqp - create control qp
914 * @rf: RDMA PCI function
916 * Return 0, if the cqp and all the resources associated with it
917 * are successfully created, otherwise return error
919 static int irdma_create_cqp(struct irdma_pci_f *rf)
921 u32 sqsize = IRDMA_CQP_SW_SQSIZE_2048;
922 struct irdma_dma_mem mem;
923 struct irdma_sc_dev *dev = &rf->sc_dev;
924 struct irdma_cqp_init_info cqp_init_info = {};
925 struct irdma_cqp *cqp = &rf->cqp;
926 u16 maj_err, min_err;
929 cqp->cqp_requests = kcalloc(sqsize, sizeof(*cqp->cqp_requests), GFP_KERNEL);
930 if (!cqp->cqp_requests)
933 cqp->scratch_array = kcalloc(sqsize, sizeof(*cqp->scratch_array), GFP_KERNEL);
934 if (!cqp->scratch_array) {
939 dev->cqp = &cqp->sc_cqp;
941 cqp->sq.size = ALIGN(sizeof(struct irdma_cqp_sq_wqe) * sqsize,
942 IRDMA_CQP_ALIGNMENT);
943 cqp->sq.va = dma_alloc_coherent(dev->hw->device, cqp->sq.size,
944 &cqp->sq.pa, GFP_KERNEL);
950 status = irdma_obj_aligned_mem(rf, &mem, sizeof(struct irdma_cqp_ctx),
951 IRDMA_HOST_CTX_ALIGNMENT_M);
955 dev->cqp->host_ctx_pa = mem.pa;
956 dev->cqp->host_ctx = mem.va;
957 /* populate the cqp init info */
958 cqp_init_info.dev = dev;
959 cqp_init_info.sq_size = sqsize;
960 cqp_init_info.sq = cqp->sq.va;
961 cqp_init_info.sq_pa = cqp->sq.pa;
962 cqp_init_info.host_ctx_pa = mem.pa;
963 cqp_init_info.host_ctx = mem.va;
964 cqp_init_info.hmc_profile = rf->rsrc_profile;
965 cqp_init_info.scratch_array = cqp->scratch_array;
966 cqp_init_info.protocol_used = rf->protocol_used;
968 switch (rf->rdma_ver) {
970 cqp_init_info.hw_maj_ver = IRDMA_CQPHC_HW_MAJVER_GEN_1;
973 cqp_init_info.hw_maj_ver = IRDMA_CQPHC_HW_MAJVER_GEN_2;
976 status = irdma_sc_cqp_init(dev->cqp, &cqp_init_info);
978 ibdev_dbg(to_ibdev(dev), "ERR: cqp init status %d\n", status);
982 spin_lock_init(&cqp->req_lock);
983 spin_lock_init(&cqp->compl_lock);
985 status = irdma_sc_cqp_create(dev->cqp, &maj_err, &min_err);
987 ibdev_dbg(to_ibdev(dev),
988 "ERR: cqp create failed - status %d maj_err %d min_err %d\n",
989 status, maj_err, min_err);
993 INIT_LIST_HEAD(&cqp->cqp_avail_reqs);
994 INIT_LIST_HEAD(&cqp->cqp_pending_reqs);
996 /* init the waitqueue of the cqp_requests and add them to the list */
997 for (i = 0; i < sqsize; i++) {
998 init_waitqueue_head(&cqp->cqp_requests[i].waitq);
999 list_add_tail(&cqp->cqp_requests[i].list, &cqp->cqp_avail_reqs);
1001 init_waitqueue_head(&cqp->remove_wq);
1005 dma_free_coherent(dev->hw->device, cqp->sq.size,
1006 cqp->sq.va, cqp->sq.pa);
1009 kfree(cqp->scratch_array);
1010 cqp->scratch_array = NULL;
1012 kfree(cqp->cqp_requests);
1013 cqp->cqp_requests = NULL;
1019 * irdma_create_ccq - create control cq
1020 * @rf: RDMA PCI function
1022 * Return 0, if the ccq and the resources associated with it
1023 * are successfully created, otherwise return error
1025 static int irdma_create_ccq(struct irdma_pci_f *rf)
1027 struct irdma_sc_dev *dev = &rf->sc_dev;
1028 struct irdma_ccq_init_info info = {};
1029 struct irdma_ccq *ccq = &rf->ccq;
1032 dev->ccq = &ccq->sc_cq;
1033 dev->ccq->dev = dev;
1035 ccq->shadow_area.size = sizeof(struct irdma_cq_shadow_area);
1036 ccq->mem_cq.size = ALIGN(sizeof(struct irdma_cqe) * IW_CCQ_SIZE,
1037 IRDMA_CQ0_ALIGNMENT);
1038 ccq->mem_cq.va = dma_alloc_coherent(dev->hw->device, ccq->mem_cq.size,
1039 &ccq->mem_cq.pa, GFP_KERNEL);
1040 if (!ccq->mem_cq.va)
1043 status = irdma_obj_aligned_mem(rf, &ccq->shadow_area,
1044 ccq->shadow_area.size,
1045 IRDMA_SHADOWAREA_M);
1049 ccq->sc_cq.back_cq = ccq;
1050 /* populate the ccq init info */
1051 info.cq_base = ccq->mem_cq.va;
1052 info.cq_pa = ccq->mem_cq.pa;
1053 info.num_elem = IW_CCQ_SIZE;
1054 info.shadow_area = ccq->shadow_area.va;
1055 info.shadow_area_pa = ccq->shadow_area.pa;
1056 info.ceqe_mask = false;
1057 info.ceq_id_valid = true;
1058 info.shadow_read_threshold = 16;
1059 info.vsi = &rf->default_vsi;
1060 status = irdma_sc_ccq_init(dev->ccq, &info);
1062 status = irdma_sc_ccq_create(dev->ccq, 0, true, true);
1065 dma_free_coherent(dev->hw->device, ccq->mem_cq.size,
1066 ccq->mem_cq.va, ccq->mem_cq.pa);
1067 ccq->mem_cq.va = NULL;
1074 * irdma_alloc_set_mac - set up a mac address table entry
1075 * @iwdev: irdma device
1077 * Allocate a mac ip entry and add it to the hw table Return 0
1078 * if successful, otherwise return error
1080 static int irdma_alloc_set_mac(struct irdma_device *iwdev)
1084 status = irdma_alloc_local_mac_entry(iwdev->rf,
1085 &iwdev->mac_ip_table_idx);
1087 status = irdma_add_local_mac_entry(iwdev->rf,
1088 (const u8 *)iwdev->netdev->dev_addr,
1089 (u8)iwdev->mac_ip_table_idx);
1091 irdma_del_local_mac_entry(iwdev->rf,
1092 (u8)iwdev->mac_ip_table_idx);
1098 * irdma_cfg_ceq_vector - set up the msix interrupt vector for
1100 * @rf: RDMA PCI function
1101 * @iwceq: ceq associated with the vector
1102 * @ceq_id: the id number of the iwceq
1103 * @msix_vec: interrupt vector information
1105 * Allocate interrupt resources and enable irq handling
1106 * Return 0 if successful, otherwise return error
1108 static int irdma_cfg_ceq_vector(struct irdma_pci_f *rf, struct irdma_ceq *iwceq,
1109 u32 ceq_id, struct irdma_msix_vector *msix_vec)
1113 if (rf->msix_shared && !ceq_id) {
1114 snprintf(msix_vec->name, sizeof(msix_vec->name) - 1,
1115 "irdma-%s-AEQCEQ-0", dev_name(&rf->pcidev->dev));
1116 tasklet_setup(&rf->dpc_tasklet, irdma_dpc);
1117 status = request_irq(msix_vec->irq, irdma_irq_handler, 0,
1118 msix_vec->name, rf);
1120 snprintf(msix_vec->name, sizeof(msix_vec->name) - 1,
1122 dev_name(&rf->pcidev->dev), ceq_id);
1123 tasklet_setup(&iwceq->dpc_tasklet, irdma_ceq_dpc);
1125 status = request_irq(msix_vec->irq, irdma_ceq_handler, 0,
1126 msix_vec->name, iwceq);
1128 cpumask_clear(&msix_vec->mask);
1129 cpumask_set_cpu(msix_vec->cpu_affinity, &msix_vec->mask);
1130 irq_update_affinity_hint(msix_vec->irq, &msix_vec->mask);
1132 ibdev_dbg(&rf->iwdev->ibdev, "ERR: ceq irq config fail\n");
1136 msix_vec->ceq_id = ceq_id;
1137 rf->sc_dev.irq_ops->irdma_cfg_ceq(&rf->sc_dev, ceq_id, msix_vec->idx, true);
1143 * irdma_cfg_aeq_vector - set up the msix vector for aeq
1144 * @rf: RDMA PCI function
1146 * Allocate interrupt resources and enable irq handling
1147 * Return 0 if successful, otherwise return error
1149 static int irdma_cfg_aeq_vector(struct irdma_pci_f *rf)
1151 struct irdma_msix_vector *msix_vec = rf->iw_msixtbl;
1154 if (!rf->msix_shared) {
1155 snprintf(msix_vec->name, sizeof(msix_vec->name) - 1,
1156 "irdma-%s-AEQ", dev_name(&rf->pcidev->dev));
1157 tasklet_setup(&rf->dpc_tasklet, irdma_dpc);
1158 ret = request_irq(msix_vec->irq, irdma_irq_handler, 0,
1159 msix_vec->name, rf);
1162 ibdev_dbg(&rf->iwdev->ibdev, "ERR: aeq irq config fail\n");
1166 rf->sc_dev.irq_ops->irdma_cfg_aeq(&rf->sc_dev, msix_vec->idx, true);
1172 * irdma_create_ceq - create completion event queue
1173 * @rf: RDMA PCI function
1174 * @iwceq: pointer to the ceq resources to be created
1175 * @ceq_id: the id number of the iwceq
1176 * @vsi: SC vsi struct
1178 * Return 0, if the ceq and the resources associated with it
1179 * are successfully created, otherwise return error
1181 static int irdma_create_ceq(struct irdma_pci_f *rf, struct irdma_ceq *iwceq,
1182 u32 ceq_id, struct irdma_sc_vsi *vsi)
1185 struct irdma_ceq_init_info info = {};
1186 struct irdma_sc_dev *dev = &rf->sc_dev;
1189 info.ceq_id = ceq_id;
1191 ceq_size = min(rf->sc_dev.hmc_info->hmc_obj[IRDMA_HMC_IW_CQ].cnt,
1192 dev->hw_attrs.max_hw_ceq_size);
1193 iwceq->mem.size = ALIGN(sizeof(struct irdma_ceqe) * ceq_size,
1194 IRDMA_CEQ_ALIGNMENT);
1195 iwceq->mem.va = dma_alloc_coherent(dev->hw->device, iwceq->mem.size,
1196 &iwceq->mem.pa, GFP_KERNEL);
1200 info.ceq_id = ceq_id;
1201 info.ceqe_base = iwceq->mem.va;
1202 info.ceqe_pa = iwceq->mem.pa;
1203 info.elem_cnt = ceq_size;
1204 iwceq->sc_ceq.ceq_id = ceq_id;
1207 status = irdma_sc_ceq_init(&iwceq->sc_ceq, &info);
1210 status = irdma_cqp_ceq_cmd(&rf->sc_dev, &iwceq->sc_ceq,
1211 IRDMA_OP_CEQ_CREATE);
1213 status = irdma_sc_cceq_create(&iwceq->sc_ceq, 0);
1217 dma_free_coherent(dev->hw->device, iwceq->mem.size,
1218 iwceq->mem.va, iwceq->mem.pa);
1219 iwceq->mem.va = NULL;
1226 * irdma_setup_ceq_0 - create CEQ 0 and it's interrupt resource
1227 * @rf: RDMA PCI function
1229 * Allocate a list for all device completion event queues
1230 * Create the ceq 0 and configure it's msix interrupt vector
1231 * Return 0, if successfully set up, otherwise return error
1233 static int irdma_setup_ceq_0(struct irdma_pci_f *rf)
1235 struct irdma_ceq *iwceq;
1236 struct irdma_msix_vector *msix_vec;
1241 num_ceqs = min(rf->msix_count, rf->sc_dev.hmc_fpm_misc.max_ceqs);
1242 rf->ceqlist = kcalloc(num_ceqs, sizeof(*rf->ceqlist), GFP_KERNEL);
1248 iwceq = &rf->ceqlist[0];
1249 status = irdma_create_ceq(rf, iwceq, 0, &rf->default_vsi);
1251 ibdev_dbg(&rf->iwdev->ibdev, "ERR: create ceq status = %d\n",
1256 spin_lock_init(&iwceq->ce_lock);
1257 i = rf->msix_shared ? 0 : 1;
1258 msix_vec = &rf->iw_msixtbl[i];
1259 iwceq->irq = msix_vec->irq;
1260 iwceq->msix_idx = msix_vec->idx;
1261 status = irdma_cfg_ceq_vector(rf, iwceq, 0, msix_vec);
1263 irdma_destroy_ceq(rf, iwceq);
1267 irdma_ena_intr(&rf->sc_dev, msix_vec->idx);
1271 if (status && !rf->ceqs_count) {
1276 rf->sc_dev.ceq_valid = true;
1282 * irdma_setup_ceqs - manage the device ceq's and their interrupt resources
1283 * @rf: RDMA PCI function
1284 * @vsi: VSI structure for this CEQ
1286 * Allocate a list for all device completion event queues
1287 * Create the ceq's and configure their msix interrupt vectors
1288 * Return 0, if ceqs are successfully set up, otherwise return error
1290 static int irdma_setup_ceqs(struct irdma_pci_f *rf, struct irdma_sc_vsi *vsi)
1294 struct irdma_ceq *iwceq;
1295 struct irdma_msix_vector *msix_vec;
1299 num_ceqs = min(rf->msix_count, rf->sc_dev.hmc_fpm_misc.max_ceqs);
1300 i = (rf->msix_shared) ? 1 : 2;
1301 for (ceq_id = 1; i < num_ceqs; i++, ceq_id++) {
1302 iwceq = &rf->ceqlist[ceq_id];
1303 status = irdma_create_ceq(rf, iwceq, ceq_id, vsi);
1305 ibdev_dbg(&rf->iwdev->ibdev,
1306 "ERR: create ceq status = %d\n", status);
1309 spin_lock_init(&iwceq->ce_lock);
1310 msix_vec = &rf->iw_msixtbl[i];
1311 iwceq->irq = msix_vec->irq;
1312 iwceq->msix_idx = msix_vec->idx;
1313 status = irdma_cfg_ceq_vector(rf, iwceq, ceq_id, msix_vec);
1315 irdma_destroy_ceq(rf, iwceq);
1318 irdma_ena_intr(&rf->sc_dev, msix_vec->idx);
1330 static int irdma_create_virt_aeq(struct irdma_pci_f *rf, u32 size)
1332 struct irdma_aeq *aeq = &rf->aeq;
1337 if (rf->rdma_ver < IRDMA_GEN_2)
1340 aeq->mem.size = sizeof(struct irdma_sc_aeqe) * size;
1341 aeq->mem.va = vzalloc(aeq->mem.size);
1346 pg_cnt = DIV_ROUND_UP(aeq->mem.size, PAGE_SIZE);
1347 status = irdma_get_pble(rf->pble_rsrc, &aeq->palloc, pg_cnt, true);
1353 pg_arr = (dma_addr_t *)aeq->palloc.level1.addr;
1354 status = irdma_map_vm_page_list(&rf->hw, aeq->mem.va, pg_arr, pg_cnt);
1356 irdma_free_pble(rf->pble_rsrc, &aeq->palloc);
1365 * irdma_create_aeq - create async event queue
1366 * @rf: RDMA PCI function
1368 * Return 0, if the aeq and the resources associated with it
1369 * are successfully created, otherwise return error
1371 static int irdma_create_aeq(struct irdma_pci_f *rf)
1373 struct irdma_aeq_init_info info = {};
1374 struct irdma_sc_dev *dev = &rf->sc_dev;
1375 struct irdma_aeq *aeq = &rf->aeq;
1376 struct irdma_hmc_info *hmc_info = rf->sc_dev.hmc_info;
1378 u8 multiplier = (rf->protocol_used == IRDMA_IWARP_PROTOCOL_ONLY) ? 2 : 1;
1381 aeq_size = multiplier * hmc_info->hmc_obj[IRDMA_HMC_IW_QP].cnt +
1382 hmc_info->hmc_obj[IRDMA_HMC_IW_CQ].cnt;
1383 aeq_size = min(aeq_size, dev->hw_attrs.max_hw_aeq_size);
1385 aeq->mem.size = ALIGN(sizeof(struct irdma_sc_aeqe) * aeq_size,
1386 IRDMA_AEQ_ALIGNMENT);
1387 aeq->mem.va = dma_alloc_coherent(dev->hw->device, aeq->mem.size,
1389 GFP_KERNEL | __GFP_NOWARN);
1393 /* physically mapped aeq failed. setup virtual aeq */
1394 status = irdma_create_virt_aeq(rf, aeq_size);
1398 info.virtual_map = true;
1399 aeq->virtual_map = info.virtual_map;
1400 info.pbl_chunk_size = 1;
1401 info.first_pm_pbl_idx = aeq->palloc.level1.idx;
1404 info.aeqe_base = aeq->mem.va;
1405 info.aeq_elem_pa = aeq->mem.pa;
1406 info.elem_cnt = aeq_size;
1408 info.msix_idx = rf->iw_msixtbl->idx;
1409 status = irdma_sc_aeq_init(&aeq->sc_aeq, &info);
1413 status = irdma_cqp_aeq_cmd(dev, &aeq->sc_aeq, IRDMA_OP_AEQ_CREATE);
1420 if (aeq->virtual_map) {
1421 irdma_destroy_virt_aeq(rf);
1423 dma_free_coherent(dev->hw->device, aeq->mem.size, aeq->mem.va,
1432 * irdma_setup_aeq - set up the device aeq
1433 * @rf: RDMA PCI function
1435 * Create the aeq and configure its msix interrupt vector
1436 * Return 0 if successful, otherwise return error
1438 static int irdma_setup_aeq(struct irdma_pci_f *rf)
1440 struct irdma_sc_dev *dev = &rf->sc_dev;
1443 status = irdma_create_aeq(rf);
1447 status = irdma_cfg_aeq_vector(rf);
1449 irdma_destroy_aeq(rf);
1453 if (!rf->msix_shared)
1454 irdma_ena_intr(dev, rf->iw_msixtbl[0].idx);
1460 * irdma_initialize_ilq - create iwarp local queue for cm
1461 * @iwdev: irdma device
1463 * Return 0 if successful, otherwise return error
1465 static int irdma_initialize_ilq(struct irdma_device *iwdev)
1467 struct irdma_puda_rsrc_info info = {};
1470 info.type = IRDMA_PUDA_RSRC_TYPE_ILQ;
1475 info.abi_ver = IRDMA_ABI_VER;
1476 info.sq_size = min(iwdev->rf->max_qp / 2, (u32)32768);
1477 info.rq_size = info.sq_size;
1478 info.buf_size = 1024;
1479 info.tx_buf_cnt = 2 * info.sq_size;
1480 info.receive = irdma_receive_ilq;
1481 info.xmit_complete = irdma_free_sqbuf;
1482 status = irdma_puda_create_rsrc(&iwdev->vsi, &info);
1484 ibdev_dbg(&iwdev->ibdev, "ERR: ilq create fail\n");
1490 * irdma_initialize_ieq - create iwarp exception queue
1491 * @iwdev: irdma device
1493 * Return 0 if successful, otherwise return error
1495 static int irdma_initialize_ieq(struct irdma_device *iwdev)
1497 struct irdma_puda_rsrc_info info = {};
1500 info.type = IRDMA_PUDA_RSRC_TYPE_IEQ;
1502 info.qp_id = iwdev->vsi.exception_lan_q;
1505 info.abi_ver = IRDMA_ABI_VER;
1506 info.sq_size = min(iwdev->rf->max_qp / 2, (u32)32768);
1507 info.rq_size = info.sq_size;
1508 info.buf_size = iwdev->vsi.mtu + IRDMA_IPV4_PAD;
1509 info.tx_buf_cnt = 4096;
1510 status = irdma_puda_create_rsrc(&iwdev->vsi, &info);
1512 ibdev_dbg(&iwdev->ibdev, "ERR: ieq create fail\n");
1518 * irdma_reinitialize_ieq - destroy and re-create ieq
1519 * @vsi: VSI structure
1521 void irdma_reinitialize_ieq(struct irdma_sc_vsi *vsi)
1523 struct irdma_device *iwdev = vsi->back_vsi;
1524 struct irdma_pci_f *rf = iwdev->rf;
1526 irdma_puda_dele_rsrc(vsi, IRDMA_PUDA_RSRC_TYPE_IEQ, false);
1527 if (irdma_initialize_ieq(iwdev)) {
1528 iwdev->rf->reset = true;
1529 rf->gen_ops.request_reset(rf);
1534 * irdma_hmc_setup - create hmc objects for the device
1535 * @rf: RDMA PCI function
1537 * Set up the device private memory space for the number and size of
1538 * the hmc objects and create the objects
1539 * Return 0 if successful, otherwise return error
1541 static int irdma_hmc_setup(struct irdma_pci_f *rf)
1546 qpcnt = rsrc_limits_table[rf->limits_sel].qplimit;
1548 rf->sd_type = IRDMA_SD_TYPE_DIRECT;
1549 status = irdma_cfg_fpm_val(&rf->sc_dev, qpcnt);
1553 status = irdma_create_hmc_objs(rf, true, rf->rdma_ver);
1559 * irdma_del_init_mem - deallocate memory resources
1560 * @rf: RDMA PCI function
1562 static void irdma_del_init_mem(struct irdma_pci_f *rf)
1564 struct irdma_sc_dev *dev = &rf->sc_dev;
1566 kfree(dev->hmc_info->sd_table.sd_entry);
1567 dev->hmc_info->sd_table.sd_entry = NULL;
1568 vfree(rf->mem_rsrc);
1569 rf->mem_rsrc = NULL;
1570 dma_free_coherent(rf->hw.device, rf->obj_mem.size, rf->obj_mem.va,
1572 rf->obj_mem.va = NULL;
1573 if (rf->rdma_ver != IRDMA_GEN_1) {
1574 bitmap_free(rf->allocated_ws_nodes);
1575 rf->allocated_ws_nodes = NULL;
1579 kfree(rf->iw_msixtbl);
1580 rf->iw_msixtbl = NULL;
1581 kfree(rf->hmc_info_mem);
1582 rf->hmc_info_mem = NULL;
1586 * irdma_initialize_dev - initialize device
1587 * @rf: RDMA PCI function
1589 * Allocate memory for the hmc objects and initialize iwdev
1590 * Return 0 if successful, otherwise clean up the resources
1593 static int irdma_initialize_dev(struct irdma_pci_f *rf)
1596 struct irdma_sc_dev *dev = &rf->sc_dev;
1597 struct irdma_device_init_info info = {};
1598 struct irdma_dma_mem mem;
1601 size = sizeof(struct irdma_hmc_pble_rsrc) +
1602 sizeof(struct irdma_hmc_info) +
1603 (sizeof(struct irdma_hmc_obj_info) * IRDMA_HMC_IW_MAX);
1605 rf->hmc_info_mem = kzalloc(size, GFP_KERNEL);
1606 if (!rf->hmc_info_mem)
1609 rf->pble_rsrc = (struct irdma_hmc_pble_rsrc *)rf->hmc_info_mem;
1610 dev->hmc_info = &rf->hw.hmc;
1611 dev->hmc_info->hmc_obj = (struct irdma_hmc_obj_info *)
1612 (rf->pble_rsrc + 1);
1614 status = irdma_obj_aligned_mem(rf, &mem, IRDMA_QUERY_FPM_BUF_SIZE,
1615 IRDMA_FPM_QUERY_BUF_ALIGNMENT_M);
1619 info.fpm_query_buf_pa = mem.pa;
1620 info.fpm_query_buf = mem.va;
1622 status = irdma_obj_aligned_mem(rf, &mem, IRDMA_COMMIT_FPM_BUF_SIZE,
1623 IRDMA_FPM_COMMIT_BUF_ALIGNMENT_M);
1627 info.fpm_commit_buf_pa = mem.pa;
1628 info.fpm_commit_buf = mem.va;
1630 info.bar0 = rf->hw.hw_addr;
1631 info.hmc_fn_id = rf->pf_id;
1633 status = irdma_sc_dev_init(rf->rdma_ver, &rf->sc_dev, &info);
1639 kfree(rf->hmc_info_mem);
1640 rf->hmc_info_mem = NULL;
1646 * irdma_rt_deinit_hw - clean up the irdma device resources
1647 * @iwdev: irdma device
1649 * remove the mac ip entry and ipv4/ipv6 addresses, destroy the
1650 * device queues and free the pble and the hmc objects
1652 void irdma_rt_deinit_hw(struct irdma_device *iwdev)
1654 ibdev_dbg(&iwdev->ibdev, "INIT: state = %d\n", iwdev->init_state);
1656 switch (iwdev->init_state) {
1657 case IP_ADDR_REGISTERED:
1658 if (iwdev->rf->sc_dev.hw_attrs.uk_attrs.hw_rev == IRDMA_GEN_1)
1659 irdma_del_local_mac_entry(iwdev->rf,
1660 (u8)iwdev->mac_ip_table_idx);
1663 case PBLE_CHUNK_MEM:
1666 if (!iwdev->roce_mode)
1667 irdma_puda_dele_rsrc(&iwdev->vsi, IRDMA_PUDA_RSRC_TYPE_IEQ,
1671 if (!iwdev->roce_mode)
1672 irdma_puda_dele_rsrc(&iwdev->vsi,
1673 IRDMA_PUDA_RSRC_TYPE_ILQ,
1677 ibdev_warn(&iwdev->ibdev, "bad init_state = %d\n", iwdev->init_state);
1681 irdma_cleanup_cm_core(&iwdev->cm_core);
1682 if (iwdev->vsi.pestat) {
1683 irdma_vsi_stats_free(&iwdev->vsi);
1684 kfree(iwdev->vsi.pestat);
1686 if (iwdev->cleanup_wq)
1687 destroy_workqueue(iwdev->cleanup_wq);
1690 static int irdma_setup_init_state(struct irdma_pci_f *rf)
1694 status = irdma_save_msix_info(rf);
1698 rf->hw.device = &rf->pcidev->dev;
1699 rf->obj_mem.size = ALIGN(8192, IRDMA_HW_PAGE_SIZE);
1700 rf->obj_mem.va = dma_alloc_coherent(rf->hw.device, rf->obj_mem.size,
1701 &rf->obj_mem.pa, GFP_KERNEL);
1702 if (!rf->obj_mem.va) {
1707 rf->obj_next = rf->obj_mem;
1708 status = irdma_initialize_dev(rf);
1715 dma_free_coherent(rf->hw.device, rf->obj_mem.size, rf->obj_mem.va,
1717 rf->obj_mem.va = NULL;
1719 kfree(rf->iw_msixtbl);
1720 rf->iw_msixtbl = NULL;
1725 * irdma_get_used_rsrc - determine resources used internally
1726 * @iwdev: irdma device
1728 * Called at the end of open to get all internal allocations
1730 static void irdma_get_used_rsrc(struct irdma_device *iwdev)
1732 iwdev->rf->used_pds = find_first_zero_bit(iwdev->rf->allocated_pds,
1734 iwdev->rf->used_qps = find_first_zero_bit(iwdev->rf->allocated_qps,
1736 iwdev->rf->used_cqs = find_first_zero_bit(iwdev->rf->allocated_cqs,
1738 iwdev->rf->used_mrs = find_first_zero_bit(iwdev->rf->allocated_mrs,
1742 void irdma_ctrl_deinit_hw(struct irdma_pci_f *rf)
1744 enum init_completion_state state = rf->init_state;
1746 rf->init_state = INVALID_STATE;
1747 if (rf->rsrc_created) {
1748 irdma_destroy_aeq(rf);
1749 irdma_destroy_pble_prm(rf->pble_rsrc);
1751 rf->rsrc_created = false;
1755 irdma_del_ceq_0(rf);
1758 irdma_destroy_ccq(rf);
1760 case HW_RSRC_INITIALIZED:
1761 case HMC_OBJS_CREATED:
1762 irdma_del_hmc_objects(&rf->sc_dev, rf->sc_dev.hmc_info, true,
1763 rf->reset, rf->rdma_ver);
1766 irdma_destroy_cqp(rf);
1769 irdma_del_init_mem(rf);
1773 ibdev_warn(&rf->iwdev->ibdev, "bad init_state = %d\n", rf->init_state);
1779 * irdma_rt_init_hw - Initializes runtime portion of HW
1780 * @iwdev: irdma device
1781 * @l2params: qos, tc, mtu info from netdev driver
1783 * Create device queues ILQ, IEQ, CEQs and PBLEs. Setup irdma
1784 * device resource objects.
1786 int irdma_rt_init_hw(struct irdma_device *iwdev,
1787 struct irdma_l2params *l2params)
1789 struct irdma_pci_f *rf = iwdev->rf;
1790 struct irdma_sc_dev *dev = &rf->sc_dev;
1791 struct irdma_vsi_init_info vsi_info = {};
1792 struct irdma_vsi_stats_info stats_info = {};
1796 vsi_info.back_vsi = iwdev;
1797 vsi_info.params = l2params;
1798 vsi_info.pf_data_vsi_num = iwdev->vsi_num;
1799 vsi_info.register_qset = rf->gen_ops.register_qset;
1800 vsi_info.unregister_qset = rf->gen_ops.unregister_qset;
1801 vsi_info.exception_lan_q = 2;
1802 irdma_sc_vsi_init(&iwdev->vsi, &vsi_info);
1804 status = irdma_setup_cm_core(iwdev, rf->rdma_ver);
1808 stats_info.pestat = kzalloc(sizeof(*stats_info.pestat), GFP_KERNEL);
1809 if (!stats_info.pestat) {
1810 irdma_cleanup_cm_core(&iwdev->cm_core);
1813 stats_info.fcn_id = dev->hmc_fn_id;
1814 status = irdma_vsi_stats_init(&iwdev->vsi, &stats_info);
1816 irdma_cleanup_cm_core(&iwdev->cm_core);
1817 kfree(stats_info.pestat);
1822 if (!iwdev->roce_mode) {
1823 status = irdma_initialize_ilq(iwdev);
1826 iwdev->init_state = ILQ_CREATED;
1827 status = irdma_initialize_ieq(iwdev);
1830 iwdev->init_state = IEQ_CREATED;
1832 if (!rf->rsrc_created) {
1833 status = irdma_setup_ceqs(rf, &iwdev->vsi);
1837 iwdev->init_state = CEQS_CREATED;
1839 status = irdma_hmc_init_pble(&rf->sc_dev,
1846 iwdev->init_state = PBLE_CHUNK_MEM;
1848 status = irdma_setup_aeq(rf);
1850 irdma_destroy_pble_prm(rf->pble_rsrc);
1854 iwdev->init_state = AEQ_CREATED;
1855 rf->rsrc_created = true;
1858 if (iwdev->rf->sc_dev.hw_attrs.uk_attrs.hw_rev == IRDMA_GEN_1)
1859 irdma_alloc_set_mac(iwdev);
1860 irdma_add_ip(iwdev);
1861 iwdev->init_state = IP_ADDR_REGISTERED;
1863 /* handles asynch cleanup tasks - disconnect CM , free qp,
1866 iwdev->cleanup_wq = alloc_workqueue("irdma-cleanup-wq",
1867 WQ_UNBOUND, WQ_UNBOUND_MAX_ACTIVE);
1868 if (!iwdev->cleanup_wq)
1870 irdma_get_used_rsrc(iwdev);
1871 init_waitqueue_head(&iwdev->suspend_wq);
1876 dev_err(&rf->pcidev->dev, "HW runtime init FAIL status = %d last cmpl = %d\n",
1877 status, iwdev->init_state);
1878 irdma_rt_deinit_hw(iwdev);
1884 * irdma_ctrl_init_hw - Initializes control portion of HW
1885 * @rf: RDMA PCI function
1887 * Create admin queues, HMC obejcts and RF resource objects
1889 int irdma_ctrl_init_hw(struct irdma_pci_f *rf)
1891 struct irdma_sc_dev *dev = &rf->sc_dev;
1894 status = irdma_setup_init_state(rf);
1897 rf->init_state = INITIAL_STATE;
1899 status = irdma_create_cqp(rf);
1902 rf->init_state = CQP_CREATED;
1904 status = irdma_hmc_setup(rf);
1907 rf->init_state = HMC_OBJS_CREATED;
1909 status = irdma_initialize_hw_rsrc(rf);
1912 rf->init_state = HW_RSRC_INITIALIZED;
1914 status = irdma_create_ccq(rf);
1917 rf->init_state = CCQ_CREATED;
1919 dev->feature_info[IRDMA_FEATURE_FW_INFO] = IRDMA_FW_VER_DEFAULT;
1920 if (rf->rdma_ver != IRDMA_GEN_1) {
1921 status = irdma_get_rdma_features(dev);
1926 status = irdma_setup_ceq_0(rf);
1929 rf->init_state = CEQ0_CREATED;
1930 /* Handles processing of CQP completions */
1932 alloc_ordered_workqueue("cqp_cmpl_wq", WQ_HIGHPRI);
1933 if (!rf->cqp_cmpl_wq) {
1937 INIT_WORK(&rf->cqp_cmpl_work, cqp_compl_worker);
1938 irdma_sc_ccq_arm(dev->ccq);
1942 dev_err(&rf->pcidev->dev, "IRDMA hardware initialization FAILED init_state=%d status=%d\n",
1943 rf->init_state, status);
1944 irdma_ctrl_deinit_hw(rf);
1949 * irdma_set_hw_rsrc - set hw memory resources.
1950 * @rf: RDMA PCI function
1952 static void irdma_set_hw_rsrc(struct irdma_pci_f *rf)
1954 rf->allocated_qps = (void *)(rf->mem_rsrc +
1955 (sizeof(struct irdma_arp_entry) * rf->arp_table_size));
1956 rf->allocated_cqs = &rf->allocated_qps[BITS_TO_LONGS(rf->max_qp)];
1957 rf->allocated_mrs = &rf->allocated_cqs[BITS_TO_LONGS(rf->max_cq)];
1958 rf->allocated_pds = &rf->allocated_mrs[BITS_TO_LONGS(rf->max_mr)];
1959 rf->allocated_ahs = &rf->allocated_pds[BITS_TO_LONGS(rf->max_pd)];
1960 rf->allocated_mcgs = &rf->allocated_ahs[BITS_TO_LONGS(rf->max_ah)];
1961 rf->allocated_arps = &rf->allocated_mcgs[BITS_TO_LONGS(rf->max_mcg)];
1962 rf->qp_table = (struct irdma_qp **)
1963 (&rf->allocated_arps[BITS_TO_LONGS(rf->arp_table_size)]);
1964 rf->cq_table = (struct irdma_cq **)(&rf->qp_table[rf->max_qp]);
1966 spin_lock_init(&rf->rsrc_lock);
1967 spin_lock_init(&rf->arp_lock);
1968 spin_lock_init(&rf->qptable_lock);
1969 spin_lock_init(&rf->cqtable_lock);
1970 spin_lock_init(&rf->qh_list_lock);
1974 * irdma_calc_mem_rsrc_size - calculate memory resources size.
1975 * @rf: RDMA PCI function
1977 static u32 irdma_calc_mem_rsrc_size(struct irdma_pci_f *rf)
1981 rsrc_size = sizeof(struct irdma_arp_entry) * rf->arp_table_size;
1982 rsrc_size += sizeof(unsigned long) * BITS_TO_LONGS(rf->max_qp);
1983 rsrc_size += sizeof(unsigned long) * BITS_TO_LONGS(rf->max_mr);
1984 rsrc_size += sizeof(unsigned long) * BITS_TO_LONGS(rf->max_cq);
1985 rsrc_size += sizeof(unsigned long) * BITS_TO_LONGS(rf->max_pd);
1986 rsrc_size += sizeof(unsigned long) * BITS_TO_LONGS(rf->arp_table_size);
1987 rsrc_size += sizeof(unsigned long) * BITS_TO_LONGS(rf->max_ah);
1988 rsrc_size += sizeof(unsigned long) * BITS_TO_LONGS(rf->max_mcg);
1989 rsrc_size += sizeof(struct irdma_qp **) * rf->max_qp;
1990 rsrc_size += sizeof(struct irdma_cq **) * rf->max_cq;
1996 * irdma_initialize_hw_rsrc - initialize hw resource tracking array
1997 * @rf: RDMA PCI function
1999 u32 irdma_initialize_hw_rsrc(struct irdma_pci_f *rf)
2005 if (rf->rdma_ver != IRDMA_GEN_1) {
2006 rf->allocated_ws_nodes = bitmap_zalloc(IRDMA_MAX_WS_NODES,
2008 if (!rf->allocated_ws_nodes)
2011 set_bit(0, rf->allocated_ws_nodes);
2012 rf->max_ws_node_id = IRDMA_MAX_WS_NODES;
2014 rf->max_cqe = rf->sc_dev.hw_attrs.uk_attrs.max_hw_cq_size;
2015 rf->max_qp = rf->sc_dev.hmc_info->hmc_obj[IRDMA_HMC_IW_QP].cnt;
2016 rf->max_mr = rf->sc_dev.hmc_info->hmc_obj[IRDMA_HMC_IW_MR].cnt;
2017 rf->max_cq = rf->sc_dev.hmc_info->hmc_obj[IRDMA_HMC_IW_CQ].cnt;
2018 rf->max_pd = rf->sc_dev.hw_attrs.max_hw_pds;
2019 rf->arp_table_size = rf->sc_dev.hmc_info->hmc_obj[IRDMA_HMC_IW_ARP].cnt;
2020 rf->max_ah = rf->sc_dev.hmc_info->hmc_obj[IRDMA_HMC_IW_FSIAV].cnt;
2021 rf->max_mcg = rf->max_qp;
2023 rsrc_size = irdma_calc_mem_rsrc_size(rf);
2024 rf->mem_rsrc = vzalloc(rsrc_size);
2025 if (!rf->mem_rsrc) {
2027 goto mem_rsrc_vzalloc_fail;
2030 rf->arp_table = (struct irdma_arp_entry *)rf->mem_rsrc;
2032 irdma_set_hw_rsrc(rf);
2034 set_bit(0, rf->allocated_mrs);
2035 set_bit(0, rf->allocated_qps);
2036 set_bit(0, rf->allocated_cqs);
2037 set_bit(0, rf->allocated_pds);
2038 set_bit(0, rf->allocated_arps);
2039 set_bit(0, rf->allocated_ahs);
2040 set_bit(0, rf->allocated_mcgs);
2041 set_bit(2, rf->allocated_qps); /* qp 2 IEQ */
2042 set_bit(1, rf->allocated_qps); /* qp 1 ILQ */
2043 set_bit(1, rf->allocated_cqs);
2044 set_bit(1, rf->allocated_pds);
2045 set_bit(2, rf->allocated_cqs);
2046 set_bit(2, rf->allocated_pds);
2048 INIT_LIST_HEAD(&rf->mc_qht_list.list);
2049 /* stag index mask has a minimum of 14 bits */
2050 mrdrvbits = 24 - max(get_count_order(rf->max_mr), 14);
2051 rf->mr_stagmask = ~(((1 << mrdrvbits) - 1) << (32 - mrdrvbits));
2055 mem_rsrc_vzalloc_fail:
2056 bitmap_free(rf->allocated_ws_nodes);
2057 rf->allocated_ws_nodes = NULL;
2063 * irdma_cqp_ce_handler - handle cqp completions
2064 * @rf: RDMA PCI function
2065 * @cq: cq for cqp completions
2067 void irdma_cqp_ce_handler(struct irdma_pci_f *rf, struct irdma_sc_cq *cq)
2069 struct irdma_cqp_request *cqp_request;
2070 struct irdma_sc_dev *dev = &rf->sc_dev;
2072 struct irdma_ccq_cqe_info info;
2073 unsigned long flags;
2077 memset(&info, 0, sizeof(info));
2078 spin_lock_irqsave(&rf->cqp.compl_lock, flags);
2079 ret = irdma_sc_ccq_get_cqe_info(cq, &info);
2080 spin_unlock_irqrestore(&rf->cqp.compl_lock, flags);
2084 cqp_request = (struct irdma_cqp_request *)
2085 (unsigned long)info.scratch;
2086 if (info.error && irdma_cqp_crit_err(dev, cqp_request->info.cqp_cmd,
2089 ibdev_err(&rf->iwdev->ibdev, "cqp opcode = 0x%x maj_err_code = 0x%x min_err_code = 0x%x\n",
2090 info.op_code, info.maj_err_code, info.min_err_code);
2092 cqp_request->compl_info.maj_err_code = info.maj_err_code;
2093 cqp_request->compl_info.min_err_code = info.min_err_code;
2094 cqp_request->compl_info.op_ret_val = info.op_ret_val;
2095 cqp_request->compl_info.error = info.error;
2097 if (cqp_request->waiting) {
2098 WRITE_ONCE(cqp_request->request_done, true);
2099 wake_up(&cqp_request->waitq);
2100 irdma_put_cqp_request(&rf->cqp, cqp_request);
2102 if (cqp_request->callback_fcn)
2103 cqp_request->callback_fcn(cqp_request);
2104 irdma_put_cqp_request(&rf->cqp, cqp_request);
2112 irdma_process_bh(dev);
2113 irdma_sc_ccq_arm(cq);
2118 * cqp_compl_worker - Handle cqp completions
2119 * @work: Pointer to work structure
2121 void cqp_compl_worker(struct work_struct *work)
2123 struct irdma_pci_f *rf = container_of(work, struct irdma_pci_f,
2125 struct irdma_sc_cq *cq = &rf->ccq.sc_cq;
2127 irdma_cqp_ce_handler(rf, cq);
2131 * irdma_lookup_apbvt_entry - lookup hash table for an existing apbvt entry corresponding to port
2132 * @cm_core: cm's core
2133 * @port: port to identify apbvt entry
2135 static struct irdma_apbvt_entry *irdma_lookup_apbvt_entry(struct irdma_cm_core *cm_core,
2138 struct irdma_apbvt_entry *entry;
2140 hash_for_each_possible(cm_core->apbvt_hash_tbl, entry, hlist, port) {
2141 if (entry->port == port) {
2151 * irdma_next_iw_state - modify qp state
2152 * @iwqp: iwarp qp to modify
2153 * @state: next state for qp
2154 * @del_hash: del hash
2155 * @term: term message
2156 * @termlen: length of term message
2158 void irdma_next_iw_state(struct irdma_qp *iwqp, u8 state, u8 del_hash, u8 term,
2161 struct irdma_modify_qp_info info = {};
2163 info.next_iwarp_state = state;
2164 info.remove_hash_idx = del_hash;
2165 info.cq_num_valid = true;
2166 info.arp_cache_idx_valid = true;
2167 info.dont_send_term = true;
2168 info.dont_send_fin = true;
2169 info.termlen = termlen;
2171 if (term & IRDMAQP_TERM_SEND_TERM_ONLY)
2172 info.dont_send_term = false;
2173 if (term & IRDMAQP_TERM_SEND_FIN_ONLY)
2174 info.dont_send_fin = false;
2175 if (iwqp->sc_qp.term_flags && state == IRDMA_QP_STATE_ERROR)
2176 info.reset_tcp_conn = true;
2177 iwqp->hw_iwarp_state = state;
2178 irdma_hw_modify_qp(iwqp->iwdev, iwqp, &info, 0);
2179 iwqp->iwarp_state = info.next_iwarp_state;
2183 * irdma_del_local_mac_entry - remove a mac entry from the hw
2185 * @rf: RDMA PCI function
2186 * @idx: the index of the mac ip address to delete
2188 void irdma_del_local_mac_entry(struct irdma_pci_f *rf, u16 idx)
2190 struct irdma_cqp *iwcqp = &rf->cqp;
2191 struct irdma_cqp_request *cqp_request;
2192 struct cqp_cmds_info *cqp_info;
2194 cqp_request = irdma_alloc_and_get_cqp_request(iwcqp, true);
2198 cqp_info = &cqp_request->info;
2199 cqp_info->cqp_cmd = IRDMA_OP_DELETE_LOCAL_MAC_ENTRY;
2200 cqp_info->post_sq = 1;
2201 cqp_info->in.u.del_local_mac_entry.cqp = &iwcqp->sc_cqp;
2202 cqp_info->in.u.del_local_mac_entry.scratch = (uintptr_t)cqp_request;
2203 cqp_info->in.u.del_local_mac_entry.entry_idx = idx;
2204 cqp_info->in.u.del_local_mac_entry.ignore_ref_count = 0;
2206 irdma_handle_cqp_op(rf, cqp_request);
2207 irdma_put_cqp_request(iwcqp, cqp_request);
2211 * irdma_add_local_mac_entry - add a mac ip address entry to the
2213 * @rf: RDMA PCI function
2214 * @mac_addr: pointer to mac address
2215 * @idx: the index of the mac ip address to add
2217 int irdma_add_local_mac_entry(struct irdma_pci_f *rf, const u8 *mac_addr, u16 idx)
2219 struct irdma_local_mac_entry_info *info;
2220 struct irdma_cqp *iwcqp = &rf->cqp;
2221 struct irdma_cqp_request *cqp_request;
2222 struct cqp_cmds_info *cqp_info;
2225 cqp_request = irdma_alloc_and_get_cqp_request(iwcqp, true);
2229 cqp_info = &cqp_request->info;
2230 cqp_info->post_sq = 1;
2231 info = &cqp_info->in.u.add_local_mac_entry.info;
2232 ether_addr_copy(info->mac_addr, mac_addr);
2233 info->entry_idx = idx;
2234 cqp_info->in.u.add_local_mac_entry.scratch = (uintptr_t)cqp_request;
2235 cqp_info->cqp_cmd = IRDMA_OP_ADD_LOCAL_MAC_ENTRY;
2236 cqp_info->in.u.add_local_mac_entry.cqp = &iwcqp->sc_cqp;
2237 cqp_info->in.u.add_local_mac_entry.scratch = (uintptr_t)cqp_request;
2239 status = irdma_handle_cqp_op(rf, cqp_request);
2240 irdma_put_cqp_request(iwcqp, cqp_request);
2246 * irdma_alloc_local_mac_entry - allocate a mac entry
2247 * @rf: RDMA PCI function
2248 * @mac_tbl_idx: the index of the new mac address
2250 * Allocate a mac address entry and update the mac_tbl_idx
2251 * to hold the index of the newly created mac address
2252 * Return 0 if successful, otherwise return error
2254 int irdma_alloc_local_mac_entry(struct irdma_pci_f *rf, u16 *mac_tbl_idx)
2256 struct irdma_cqp *iwcqp = &rf->cqp;
2257 struct irdma_cqp_request *cqp_request;
2258 struct cqp_cmds_info *cqp_info;
2261 cqp_request = irdma_alloc_and_get_cqp_request(iwcqp, true);
2265 cqp_info = &cqp_request->info;
2266 cqp_info->cqp_cmd = IRDMA_OP_ALLOC_LOCAL_MAC_ENTRY;
2267 cqp_info->post_sq = 1;
2268 cqp_info->in.u.alloc_local_mac_entry.cqp = &iwcqp->sc_cqp;
2269 cqp_info->in.u.alloc_local_mac_entry.scratch = (uintptr_t)cqp_request;
2270 status = irdma_handle_cqp_op(rf, cqp_request);
2272 *mac_tbl_idx = (u16)cqp_request->compl_info.op_ret_val;
2274 irdma_put_cqp_request(iwcqp, cqp_request);
2280 * irdma_cqp_manage_apbvt_cmd - send cqp command manage apbvt
2281 * @iwdev: irdma device
2282 * @accel_local_port: port for apbvt
2283 * @add_port: add ordelete port
2285 static int irdma_cqp_manage_apbvt_cmd(struct irdma_device *iwdev,
2286 u16 accel_local_port, bool add_port)
2288 struct irdma_apbvt_info *info;
2289 struct irdma_cqp_request *cqp_request;
2290 struct cqp_cmds_info *cqp_info;
2293 cqp_request = irdma_alloc_and_get_cqp_request(&iwdev->rf->cqp, add_port);
2297 cqp_info = &cqp_request->info;
2298 info = &cqp_info->in.u.manage_apbvt_entry.info;
2299 memset(info, 0, sizeof(*info));
2300 info->add = add_port;
2301 info->port = accel_local_port;
2302 cqp_info->cqp_cmd = IRDMA_OP_MANAGE_APBVT_ENTRY;
2303 cqp_info->post_sq = 1;
2304 cqp_info->in.u.manage_apbvt_entry.cqp = &iwdev->rf->cqp.sc_cqp;
2305 cqp_info->in.u.manage_apbvt_entry.scratch = (uintptr_t)cqp_request;
2306 ibdev_dbg(&iwdev->ibdev, "DEV: %s: port=0x%04x\n",
2307 (!add_port) ? "DELETE" : "ADD", accel_local_port);
2309 status = irdma_handle_cqp_op(iwdev->rf, cqp_request);
2310 irdma_put_cqp_request(&iwdev->rf->cqp, cqp_request);
2316 * irdma_add_apbvt - add tcp port to HW apbvt table
2317 * @iwdev: irdma device
2318 * @port: port for apbvt
2320 struct irdma_apbvt_entry *irdma_add_apbvt(struct irdma_device *iwdev, u16 port)
2322 struct irdma_cm_core *cm_core = &iwdev->cm_core;
2323 struct irdma_apbvt_entry *entry;
2324 unsigned long flags;
2326 spin_lock_irqsave(&cm_core->apbvt_lock, flags);
2327 entry = irdma_lookup_apbvt_entry(cm_core, port);
2329 spin_unlock_irqrestore(&cm_core->apbvt_lock, flags);
2333 entry = kzalloc(sizeof(*entry), GFP_ATOMIC);
2335 spin_unlock_irqrestore(&cm_core->apbvt_lock, flags);
2341 hash_add(cm_core->apbvt_hash_tbl, &entry->hlist, entry->port);
2342 spin_unlock_irqrestore(&cm_core->apbvt_lock, flags);
2344 if (irdma_cqp_manage_apbvt_cmd(iwdev, port, true)) {
2353 * irdma_del_apbvt - delete tcp port from HW apbvt table
2354 * @iwdev: irdma device
2355 * @entry: apbvt entry object
2357 void irdma_del_apbvt(struct irdma_device *iwdev,
2358 struct irdma_apbvt_entry *entry)
2360 struct irdma_cm_core *cm_core = &iwdev->cm_core;
2361 unsigned long flags;
2363 spin_lock_irqsave(&cm_core->apbvt_lock, flags);
2364 if (--entry->use_cnt) {
2365 spin_unlock_irqrestore(&cm_core->apbvt_lock, flags);
2369 hash_del(&entry->hlist);
2370 /* apbvt_lock is held across CQP delete APBVT OP (non-waiting) to
2371 * protect against race where add APBVT CQP can race ahead of the delete
2372 * APBVT for same port.
2374 irdma_cqp_manage_apbvt_cmd(iwdev, entry->port, false);
2376 spin_unlock_irqrestore(&cm_core->apbvt_lock, flags);
2380 * irdma_manage_arp_cache - manage hw arp cache
2381 * @rf: RDMA PCI function
2382 * @mac_addr: mac address ptr
2383 * @ip_addr: ip addr for arp cache
2384 * @ipv4: flag inicating IPv4
2385 * @action: add, delete or modify
2387 void irdma_manage_arp_cache(struct irdma_pci_f *rf,
2388 const unsigned char *mac_addr,
2389 u32 *ip_addr, bool ipv4, u32 action)
2391 struct irdma_add_arp_cache_entry_info *info;
2392 struct irdma_cqp_request *cqp_request;
2393 struct cqp_cmds_info *cqp_info;
2396 arp_index = irdma_arp_table(rf, ip_addr, ipv4, mac_addr, action);
2397 if (arp_index == -1)
2400 cqp_request = irdma_alloc_and_get_cqp_request(&rf->cqp, false);
2404 cqp_info = &cqp_request->info;
2405 if (action == IRDMA_ARP_ADD) {
2406 cqp_info->cqp_cmd = IRDMA_OP_ADD_ARP_CACHE_ENTRY;
2407 info = &cqp_info->in.u.add_arp_cache_entry.info;
2408 memset(info, 0, sizeof(*info));
2409 info->arp_index = (u16)arp_index;
2410 info->permanent = true;
2411 ether_addr_copy(info->mac_addr, mac_addr);
2412 cqp_info->in.u.add_arp_cache_entry.scratch =
2413 (uintptr_t)cqp_request;
2414 cqp_info->in.u.add_arp_cache_entry.cqp = &rf->cqp.sc_cqp;
2416 cqp_info->cqp_cmd = IRDMA_OP_DELETE_ARP_CACHE_ENTRY;
2417 cqp_info->in.u.del_arp_cache_entry.scratch =
2418 (uintptr_t)cqp_request;
2419 cqp_info->in.u.del_arp_cache_entry.cqp = &rf->cqp.sc_cqp;
2420 cqp_info->in.u.del_arp_cache_entry.arp_index = arp_index;
2423 cqp_info->post_sq = 1;
2424 irdma_handle_cqp_op(rf, cqp_request);
2425 irdma_put_cqp_request(&rf->cqp, cqp_request);
2429 * irdma_send_syn_cqp_callback - do syn/ack after qhash
2430 * @cqp_request: qhash cqp completion
2432 static void irdma_send_syn_cqp_callback(struct irdma_cqp_request *cqp_request)
2434 struct irdma_cm_node *cm_node = cqp_request->param;
2436 irdma_send_syn(cm_node, 1);
2437 irdma_rem_ref_cm_node(cm_node);
2441 * irdma_manage_qhash - add or modify qhash
2442 * @iwdev: irdma device
2443 * @cminfo: cm info for qhash
2444 * @etype: type (syn or quad)
2445 * @mtype: type of qhash
2446 * @cmnode: cmnode associated with connection
2447 * @wait: wait for completion
2449 int irdma_manage_qhash(struct irdma_device *iwdev, struct irdma_cm_info *cminfo,
2450 enum irdma_quad_entry_type etype,
2451 enum irdma_quad_hash_manage_type mtype, void *cmnode,
2454 struct irdma_qhash_table_info *info;
2455 struct irdma_cqp *iwcqp = &iwdev->rf->cqp;
2456 struct irdma_cqp_request *cqp_request;
2457 struct cqp_cmds_info *cqp_info;
2458 struct irdma_cm_node *cm_node = cmnode;
2461 cqp_request = irdma_alloc_and_get_cqp_request(iwcqp, wait);
2465 cqp_info = &cqp_request->info;
2466 info = &cqp_info->in.u.manage_qhash_table_entry.info;
2467 memset(info, 0, sizeof(*info));
2468 info->vsi = &iwdev->vsi;
2469 info->manage = mtype;
2470 info->entry_type = etype;
2471 if (cminfo->vlan_id < VLAN_N_VID) {
2472 info->vlan_valid = true;
2473 info->vlan_id = cminfo->vlan_id;
2475 info->vlan_valid = false;
2477 info->ipv4_valid = cminfo->ipv4;
2478 info->user_pri = cminfo->user_pri;
2479 ether_addr_copy(info->mac_addr, iwdev->netdev->dev_addr);
2480 info->qp_num = cminfo->qh_qpid;
2481 info->dest_port = cminfo->loc_port;
2482 info->dest_ip[0] = cminfo->loc_addr[0];
2483 info->dest_ip[1] = cminfo->loc_addr[1];
2484 info->dest_ip[2] = cminfo->loc_addr[2];
2485 info->dest_ip[3] = cminfo->loc_addr[3];
2486 if (etype == IRDMA_QHASH_TYPE_TCP_ESTABLISHED ||
2487 etype == IRDMA_QHASH_TYPE_UDP_UNICAST ||
2488 etype == IRDMA_QHASH_TYPE_UDP_MCAST ||
2489 etype == IRDMA_QHASH_TYPE_ROCE_MCAST ||
2490 etype == IRDMA_QHASH_TYPE_ROCEV2_HW) {
2491 info->src_port = cminfo->rem_port;
2492 info->src_ip[0] = cminfo->rem_addr[0];
2493 info->src_ip[1] = cminfo->rem_addr[1];
2494 info->src_ip[2] = cminfo->rem_addr[2];
2495 info->src_ip[3] = cminfo->rem_addr[3];
2498 cqp_request->callback_fcn = irdma_send_syn_cqp_callback;
2499 cqp_request->param = cmnode;
2501 refcount_inc(&cm_node->refcnt);
2503 if (info->ipv4_valid)
2504 ibdev_dbg(&iwdev->ibdev,
2505 "CM: %s caller: %pS loc_port=0x%04x rem_port=0x%04x loc_addr=%pI4 rem_addr=%pI4 mac=%pM, vlan_id=%d cm_node=%p\n",
2506 (!mtype) ? "DELETE" : "ADD",
2507 __builtin_return_address(0), info->dest_port,
2508 info->src_port, info->dest_ip, info->src_ip,
2509 info->mac_addr, cminfo->vlan_id,
2510 cmnode ? cmnode : NULL);
2512 ibdev_dbg(&iwdev->ibdev,
2513 "CM: %s caller: %pS loc_port=0x%04x rem_port=0x%04x loc_addr=%pI6 rem_addr=%pI6 mac=%pM, vlan_id=%d cm_node=%p\n",
2514 (!mtype) ? "DELETE" : "ADD",
2515 __builtin_return_address(0), info->dest_port,
2516 info->src_port, info->dest_ip, info->src_ip,
2517 info->mac_addr, cminfo->vlan_id,
2518 cmnode ? cmnode : NULL);
2520 cqp_info->in.u.manage_qhash_table_entry.cqp = &iwdev->rf->cqp.sc_cqp;
2521 cqp_info->in.u.manage_qhash_table_entry.scratch = (uintptr_t)cqp_request;
2522 cqp_info->cqp_cmd = IRDMA_OP_MANAGE_QHASH_TABLE_ENTRY;
2523 cqp_info->post_sq = 1;
2524 status = irdma_handle_cqp_op(iwdev->rf, cqp_request);
2525 if (status && cm_node && !wait)
2526 irdma_rem_ref_cm_node(cm_node);
2528 irdma_put_cqp_request(iwcqp, cqp_request);
2534 * irdma_hw_flush_wqes_callback - Check return code after flush
2535 * @cqp_request: qhash cqp completion
2537 static void irdma_hw_flush_wqes_callback(struct irdma_cqp_request *cqp_request)
2539 struct irdma_qp_flush_info *hw_info;
2540 struct irdma_sc_qp *qp;
2541 struct irdma_qp *iwqp;
2542 struct cqp_cmds_info *cqp_info;
2544 cqp_info = &cqp_request->info;
2545 hw_info = &cqp_info->in.u.qp_flush_wqes.info;
2546 qp = cqp_info->in.u.qp_flush_wqes.qp;
2547 iwqp = qp->qp_uk.back_qp;
2549 if (cqp_request->compl_info.maj_err_code)
2553 (cqp_request->compl_info.min_err_code == IRDMA_CQP_COMPL_SQ_WQE_FLUSHED ||
2554 cqp_request->compl_info.min_err_code == 0)) {
2555 /* RQ WQE flush was requested but did not happen */
2556 qp->qp_uk.rq_flush_complete = true;
2559 (cqp_request->compl_info.min_err_code == IRDMA_CQP_COMPL_RQ_WQE_FLUSHED ||
2560 cqp_request->compl_info.min_err_code == 0)) {
2561 if (IRDMA_RING_MORE_WORK(qp->qp_uk.sq_ring)) {
2562 ibdev_err(&iwqp->iwdev->ibdev, "Flush QP[%d] failed, SQ has more work",
2564 irdma_ib_qp_event(iwqp, IRDMA_QP_EVENT_CATASTROPHIC);
2566 qp->qp_uk.sq_flush_complete = true;
2571 * irdma_hw_flush_wqes - flush qp's wqe
2572 * @rf: RDMA PCI function
2573 * @qp: hardware control qp
2574 * @info: info for flush
2575 * @wait: flag wait for completion
2577 int irdma_hw_flush_wqes(struct irdma_pci_f *rf, struct irdma_sc_qp *qp,
2578 struct irdma_qp_flush_info *info, bool wait)
2581 struct irdma_qp_flush_info *hw_info;
2582 struct irdma_cqp_request *cqp_request;
2583 struct cqp_cmds_info *cqp_info;
2584 struct irdma_qp *iwqp = qp->qp_uk.back_qp;
2586 cqp_request = irdma_alloc_and_get_cqp_request(&rf->cqp, wait);
2590 cqp_info = &cqp_request->info;
2592 cqp_request->callback_fcn = irdma_hw_flush_wqes_callback;
2593 hw_info = &cqp_request->info.in.u.qp_flush_wqes.info;
2594 memcpy(hw_info, info, sizeof(*hw_info));
2595 cqp_info->cqp_cmd = IRDMA_OP_QP_FLUSH_WQES;
2596 cqp_info->post_sq = 1;
2597 cqp_info->in.u.qp_flush_wqes.qp = qp;
2598 cqp_info->in.u.qp_flush_wqes.scratch = (uintptr_t)cqp_request;
2599 status = irdma_handle_cqp_op(rf, cqp_request);
2601 qp->qp_uk.sq_flush_complete = true;
2602 qp->qp_uk.rq_flush_complete = true;
2603 irdma_put_cqp_request(&rf->cqp, cqp_request);
2607 if (!wait || cqp_request->compl_info.maj_err_code)
2611 if (cqp_request->compl_info.min_err_code == IRDMA_CQP_COMPL_SQ_WQE_FLUSHED ||
2612 cqp_request->compl_info.min_err_code == 0) {
2613 /* RQ WQE flush was requested but did not happen */
2614 qp->qp_uk.rq_flush_complete = true;
2618 if (cqp_request->compl_info.min_err_code == IRDMA_CQP_COMPL_RQ_WQE_FLUSHED ||
2619 cqp_request->compl_info.min_err_code == 0) {
2621 * Handling case where WQE is posted to empty SQ when
2622 * flush has not completed
2624 if (IRDMA_RING_MORE_WORK(qp->qp_uk.sq_ring)) {
2625 struct irdma_cqp_request *new_req;
2627 if (!qp->qp_uk.sq_flush_complete)
2629 qp->qp_uk.sq_flush_complete = false;
2630 qp->flush_sq = false;
2634 new_req = irdma_alloc_and_get_cqp_request(&rf->cqp, true);
2639 cqp_info = &new_req->info;
2640 hw_info = &new_req->info.in.u.qp_flush_wqes.info;
2641 memcpy(hw_info, info, sizeof(*hw_info));
2642 cqp_info->cqp_cmd = IRDMA_OP_QP_FLUSH_WQES;
2643 cqp_info->post_sq = 1;
2644 cqp_info->in.u.qp_flush_wqes.qp = qp;
2645 cqp_info->in.u.qp_flush_wqes.scratch = (uintptr_t)new_req;
2647 status = irdma_handle_cqp_op(rf, new_req);
2648 if (new_req->compl_info.maj_err_code ||
2649 new_req->compl_info.min_err_code != IRDMA_CQP_COMPL_SQ_WQE_FLUSHED ||
2651 ibdev_err(&iwqp->iwdev->ibdev, "fatal QP event: SQ in error but not flushed, qp: %d",
2653 qp->qp_uk.sq_flush_complete = false;
2654 irdma_ib_qp_event(iwqp, IRDMA_QP_EVENT_CATASTROPHIC);
2656 irdma_put_cqp_request(&rf->cqp, new_req);
2658 /* SQ WQE flush was requested but did not happen */
2659 qp->qp_uk.sq_flush_complete = true;
2662 if (!IRDMA_RING_MORE_WORK(qp->qp_uk.sq_ring))
2663 qp->qp_uk.sq_flush_complete = true;
2667 ibdev_dbg(&rf->iwdev->ibdev,
2668 "VERBS: qp_id=%d qp_type=%d qpstate=%d ibqpstate=%d last_aeq=%d hw_iw_state=%d maj_err_code=%d min_err_code=%d\n",
2669 iwqp->ibqp.qp_num, rf->protocol_used, iwqp->iwarp_state,
2670 iwqp->ibqp_state, iwqp->last_aeq, iwqp->hw_iwarp_state,
2671 cqp_request->compl_info.maj_err_code,
2672 cqp_request->compl_info.min_err_code);
2674 irdma_put_cqp_request(&rf->cqp, cqp_request);
2680 * irdma_gen_ae - generate AE
2681 * @rf: RDMA PCI function
2682 * @qp: qp associated with AE
2683 * @info: info for ae
2684 * @wait: wait for completion
2686 void irdma_gen_ae(struct irdma_pci_f *rf, struct irdma_sc_qp *qp,
2687 struct irdma_gen_ae_info *info, bool wait)
2689 struct irdma_gen_ae_info *ae_info;
2690 struct irdma_cqp_request *cqp_request;
2691 struct cqp_cmds_info *cqp_info;
2693 cqp_request = irdma_alloc_and_get_cqp_request(&rf->cqp, wait);
2697 cqp_info = &cqp_request->info;
2698 ae_info = &cqp_request->info.in.u.gen_ae.info;
2699 memcpy(ae_info, info, sizeof(*ae_info));
2700 cqp_info->cqp_cmd = IRDMA_OP_GEN_AE;
2701 cqp_info->post_sq = 1;
2702 cqp_info->in.u.gen_ae.qp = qp;
2703 cqp_info->in.u.gen_ae.scratch = (uintptr_t)cqp_request;
2705 irdma_handle_cqp_op(rf, cqp_request);
2706 irdma_put_cqp_request(&rf->cqp, cqp_request);
2709 void irdma_flush_wqes(struct irdma_qp *iwqp, u32 flush_mask)
2711 struct irdma_qp_flush_info info = {};
2712 struct irdma_pci_f *rf = iwqp->iwdev->rf;
2713 u8 flush_code = iwqp->sc_qp.flush_code;
2715 if (!(flush_mask & IRDMA_FLUSH_SQ) && !(flush_mask & IRDMA_FLUSH_RQ))
2718 /* Set flush info fields*/
2719 info.sq = flush_mask & IRDMA_FLUSH_SQ;
2720 info.rq = flush_mask & IRDMA_FLUSH_RQ;
2722 /* Generate userflush errors in CQE */
2723 info.sq_major_code = IRDMA_FLUSH_MAJOR_ERR;
2724 info.sq_minor_code = FLUSH_GENERAL_ERR;
2725 info.rq_major_code = IRDMA_FLUSH_MAJOR_ERR;
2726 info.rq_minor_code = FLUSH_GENERAL_ERR;
2727 info.userflushcode = true;
2729 if (flush_mask & IRDMA_REFLUSH) {
2731 iwqp->sc_qp.flush_sq = false;
2733 iwqp->sc_qp.flush_rq = false;
2736 if (info.sq && iwqp->sc_qp.sq_flush_code)
2737 info.sq_minor_code = flush_code;
2738 if (info.rq && iwqp->sc_qp.rq_flush_code)
2739 info.rq_minor_code = flush_code;
2741 if (!iwqp->user_mode)
2742 queue_delayed_work(iwqp->iwdev->cleanup_wq,
2744 msecs_to_jiffies(IRDMA_FLUSH_DELAY_MS));
2748 (void)irdma_hw_flush_wqes(rf, &iwqp->sc_qp, &info,
2749 flush_mask & IRDMA_FLUSH_WAIT);
2750 iwqp->flush_issued = true;