1 /* QLogic qed NIC Driver
2 * Copyright (c) 2015 QLogic Corporation
4 * This software is available under the terms of the GNU General Public License
5 * (GPL) Version 2, available from the file COPYING in the main directory of
9 #include <linux/types.h>
10 #include <asm/byteorder.h>
12 #include <linux/delay.h>
13 #include <linux/dma-mapping.h>
14 #include <linux/errno.h>
15 #include <linux/kernel.h>
16 #include <linux/list.h>
17 #include <linux/pci.h>
18 #include <linux/slab.h>
19 #include <linux/spinlock.h>
20 #include <linux/string.h>
23 #include "qed_dev_api.h"
28 #include "qed_reg_addr.h"
31 /***************************************************************************
32 * Structures & Definitions
33 ***************************************************************************/
35 #define SPQ_HIGH_PRI_RESERVE_DEFAULT (1)
36 #define SPQ_BLOCK_SLEEP_LENGTH (1000)
38 /***************************************************************************
39 * Blocking Imp. (BLOCK/EBLOCK mode)
40 ***************************************************************************/
41 static void qed_spq_blocking_cb(struct qed_hwfn *p_hwfn,
43 union event_ring_data *data,
46 struct qed_spq_comp_done *comp_done;
48 comp_done = (struct qed_spq_comp_done *)cookie;
50 comp_done->done = 0x1;
51 comp_done->fw_return_code = fw_return_code;
53 /* make update visible to waiting thread */
57 static int qed_spq_block(struct qed_hwfn *p_hwfn,
58 struct qed_spq_entry *p_ent,
61 int sleep_count = SPQ_BLOCK_SLEEP_LENGTH;
62 struct qed_spq_comp_done *comp_done;
65 comp_done = (struct qed_spq_comp_done *)p_ent->comp_cb.cookie;
67 /* validate we receive completion update */
69 if (comp_done->done == 1) {
71 *p_fw_ret = comp_done->fw_return_code;
74 usleep_range(5000, 10000);
78 DP_INFO(p_hwfn, "Ramrod is stuck, requesting MCP drain\n");
79 rc = qed_mcp_drain(p_hwfn, p_hwfn->p_main_ptt);
81 DP_NOTICE(p_hwfn, "MCP drain failed\n");
83 /* Retry after drain */
84 sleep_count = SPQ_BLOCK_SLEEP_LENGTH;
86 /* validate we receive completion update */
88 if (comp_done->done == 1) {
90 *p_fw_ret = comp_done->fw_return_code;
93 usleep_range(5000, 10000);
97 if (comp_done->done == 1) {
99 *p_fw_ret = comp_done->fw_return_code;
103 DP_NOTICE(p_hwfn, "Ramrod is stuck, MCP drain failed\n");
108 /***************************************************************************
109 * SPQ entries inner API
110 ***************************************************************************/
112 qed_spq_fill_entry(struct qed_hwfn *p_hwfn,
113 struct qed_spq_entry *p_ent)
115 p_ent->elem.hdr.echo = 0;
116 p_hwfn->p_spq->echo_idx++;
119 switch (p_ent->comp_mode) {
120 case QED_SPQ_MODE_EBLOCK:
121 case QED_SPQ_MODE_BLOCK:
122 p_ent->comp_cb.function = qed_spq_blocking_cb;
124 case QED_SPQ_MODE_CB:
127 DP_NOTICE(p_hwfn, "Unknown SPQE completion mode %d\n",
132 DP_VERBOSE(p_hwfn, QED_MSG_SPQ,
133 "Ramrod header: [CID 0x%08x CMD 0x%02x protocol 0x%02x] Data pointer: [%08x:%08x] Completion Mode: %s\n",
135 p_ent->elem.hdr.cmd_id,
136 p_ent->elem.hdr.protocol_id,
137 p_ent->elem.data_ptr.hi,
138 p_ent->elem.data_ptr.lo,
139 D_TRINE(p_ent->comp_mode, QED_SPQ_MODE_EBLOCK,
140 QED_SPQ_MODE_BLOCK, "MODE_EBLOCK", "MODE_BLOCK",
146 /***************************************************************************
148 ***************************************************************************/
149 static void qed_spq_hw_initialize(struct qed_hwfn *p_hwfn,
150 struct qed_spq *p_spq)
153 struct qed_cxt_info cxt_info;
154 struct core_conn_context *p_cxt;
155 union qed_qm_pq_params pq_params;
158 cxt_info.iid = p_spq->cid;
160 rc = qed_cxt_get_cid_info(p_hwfn, &cxt_info);
163 DP_NOTICE(p_hwfn, "Cannot find context info for cid=%d\n",
168 p_cxt = cxt_info.p_cxt;
170 SET_FIELD(p_cxt->xstorm_ag_context.flags10,
171 XSTORM_CORE_CONN_AG_CTX_DQ_CF_EN, 1);
172 SET_FIELD(p_cxt->xstorm_ag_context.flags1,
173 XSTORM_CORE_CONN_AG_CTX_DQ_CF_ACTIVE, 1);
174 SET_FIELD(p_cxt->xstorm_ag_context.flags9,
175 XSTORM_CORE_CONN_AG_CTX_CONSOLID_PROD_CF_EN, 1);
177 /* QM physical queue */
178 memset(&pq_params, 0, sizeof(pq_params));
179 pq_params.core.tc = LB_TC;
180 pq = qed_get_qm_pq(p_hwfn, PROTOCOLID_CORE, &pq_params);
181 p_cxt->xstorm_ag_context.physical_q0 = cpu_to_le16(pq);
183 p_cxt->xstorm_st_context.spq_base_lo =
184 DMA_LO_LE(p_spq->chain.p_phys_addr);
185 p_cxt->xstorm_st_context.spq_base_hi =
186 DMA_HI_LE(p_spq->chain.p_phys_addr);
188 p_cxt->xstorm_st_context.consolid_base_addr.lo =
189 DMA_LO_LE(p_hwfn->p_consq->chain.p_phys_addr);
190 p_cxt->xstorm_st_context.consolid_base_addr.hi =
191 DMA_HI_LE(p_hwfn->p_consq->chain.p_phys_addr);
194 static int qed_spq_hw_post(struct qed_hwfn *p_hwfn,
195 struct qed_spq *p_spq,
196 struct qed_spq_entry *p_ent)
198 struct qed_chain *p_chain = &p_hwfn->p_spq->chain;
199 struct slow_path_element *elem;
200 struct core_db_data db;
202 elem = qed_chain_produce(p_chain);
204 DP_NOTICE(p_hwfn, "Failed to produce from SPQ chain\n");
208 *elem = p_ent->elem; /* struct assignment */
210 /* send a doorbell on the slow hwfn session */
211 memset(&db, 0, sizeof(db));
212 SET_FIELD(db.params, CORE_DB_DATA_DEST, DB_DEST_XCM);
213 SET_FIELD(db.params, CORE_DB_DATA_AGG_CMD, DB_AGG_CMD_SET);
214 SET_FIELD(db.params, CORE_DB_DATA_AGG_VAL_SEL,
215 DQ_XCM_CORE_SPQ_PROD_CMD);
216 db.agg_flags = DQ_XCM_CORE_DQ_CF_CMD;
218 /* validate producer is up to-date */
221 db.spq_prod = cpu_to_le16(qed_chain_get_prod_idx(p_chain));
226 DOORBELL(p_hwfn, qed_db_addr(p_spq->cid, DQ_DEMS_LEGACY), *(u32 *)&db);
228 /* make sure doorbell is rang */
231 DP_VERBOSE(p_hwfn, QED_MSG_SPQ,
232 "Doorbelled [0x%08x, CID 0x%08x] with Flags: %02x agg_params: %02x, prod: %04x\n",
233 qed_db_addr(p_spq->cid, DQ_DEMS_LEGACY),
234 p_spq->cid, db.params, db.agg_flags,
235 qed_chain_get_prod_idx(p_chain));
240 /***************************************************************************
241 * Asynchronous events
242 ***************************************************************************/
244 qed_async_event_completion(struct qed_hwfn *p_hwfn,
245 struct event_ring_entry *p_eqe)
248 "Unknown Async completion for protocol: %d\n",
253 /***************************************************************************
255 ***************************************************************************/
256 void qed_eq_prod_update(struct qed_hwfn *p_hwfn,
259 u32 addr = GTT_BAR0_MAP_REG_USDM_RAM +
260 USTORM_EQE_CONS_OFFSET(p_hwfn->rel_pf_id);
262 REG_WR16(p_hwfn, addr, prod);
264 /* keep prod updates ordered */
268 int qed_eq_completion(struct qed_hwfn *p_hwfn,
272 struct qed_eq *p_eq = cookie;
273 struct qed_chain *p_chain = &p_eq->chain;
276 /* take a snapshot of the FW consumer */
277 u16 fw_cons_idx = le16_to_cpu(*p_eq->p_fw_cons);
279 DP_VERBOSE(p_hwfn, QED_MSG_SPQ, "fw_cons_idx %x\n", fw_cons_idx);
281 /* Need to guarantee the fw_cons index we use points to a usuable
282 * element (to comply with our chain), so our macros would comply
284 if ((fw_cons_idx & qed_chain_get_usable_per_page(p_chain)) ==
285 qed_chain_get_usable_per_page(p_chain))
286 fw_cons_idx += qed_chain_get_unusable_per_page(p_chain);
288 /* Complete current segment of eq entries */
289 while (fw_cons_idx != qed_chain_get_cons_idx(p_chain)) {
290 struct event_ring_entry *p_eqe = qed_chain_consume(p_chain);
297 DP_VERBOSE(p_hwfn, QED_MSG_SPQ,
298 "op %x prot %x res0 %x echo %x fwret %x flags %x\n",
302 le16_to_cpu(p_eqe->echo),
303 p_eqe->fw_return_code,
306 if (GET_FIELD(p_eqe->flags, EVENT_RING_ENTRY_ASYNC)) {
307 if (qed_async_event_completion(p_hwfn, p_eqe))
309 } else if (qed_spq_completion(p_hwfn,
311 p_eqe->fw_return_code,
316 qed_chain_recycle_consumed(p_chain);
319 qed_eq_prod_update(p_hwfn, qed_chain_get_prod_idx(p_chain));
324 struct qed_eq *qed_eq_alloc(struct qed_hwfn *p_hwfn,
329 /* Allocate EQ struct */
330 p_eq = kzalloc(sizeof(*p_eq), GFP_ATOMIC);
332 DP_NOTICE(p_hwfn, "Failed to allocate `struct qed_eq'\n");
336 /* Allocate and initialize EQ chain*/
337 if (qed_chain_alloc(p_hwfn->cdev,
338 QED_CHAIN_USE_TO_PRODUCE,
341 sizeof(union event_ring_element),
343 DP_NOTICE(p_hwfn, "Failed to allocate eq chain\n");
344 goto eq_allocate_fail;
347 /* register EQ completion on the SP SB */
348 qed_int_register_cb(p_hwfn,
357 qed_eq_free(p_hwfn, p_eq);
361 void qed_eq_setup(struct qed_hwfn *p_hwfn,
364 qed_chain_reset(&p_eq->chain);
367 void qed_eq_free(struct qed_hwfn *p_hwfn,
372 qed_chain_free(p_hwfn->cdev, &p_eq->chain);
376 /***************************************************************************
377 * CQE API - manipulate EQ functionality
378 ***************************************************************************/
379 static int qed_cqe_completion(
380 struct qed_hwfn *p_hwfn,
381 struct eth_slow_path_rx_cqe *cqe,
382 enum protocol_type protocol)
384 /* @@@tmp - it's possible we'll eventually want to handle some
385 * actual commands that can arrive here, but for now this is only
386 * used to complete the ramrod using the echo value on the cqe
388 return qed_spq_completion(p_hwfn, cqe->echo, 0, NULL);
391 int qed_eth_cqe_completion(struct qed_hwfn *p_hwfn,
392 struct eth_slow_path_rx_cqe *cqe)
396 rc = qed_cqe_completion(p_hwfn, cqe, PROTOCOLID_ETH);
399 "Failed to handle RXQ CQE [cmd 0x%02x]\n",
405 /***************************************************************************
406 * Slow hwfn Queue (spq)
407 ***************************************************************************/
408 void qed_spq_setup(struct qed_hwfn *p_hwfn)
410 struct qed_spq *p_spq = p_hwfn->p_spq;
411 struct qed_spq_entry *p_virt = NULL;
412 dma_addr_t p_phys = 0;
415 INIT_LIST_HEAD(&p_spq->pending);
416 INIT_LIST_HEAD(&p_spq->completion_pending);
417 INIT_LIST_HEAD(&p_spq->free_pool);
418 INIT_LIST_HEAD(&p_spq->unlimited_pending);
419 spin_lock_init(&p_spq->lock);
422 p_phys = p_spq->p_phys + offsetof(struct qed_spq_entry, ramrod);
423 p_virt = p_spq->p_virt;
425 for (i = 0; i < p_spq->chain.capacity; i++) {
426 p_virt->elem.data_ptr.hi = DMA_HI_LE(p_phys);
427 p_virt->elem.data_ptr.lo = DMA_LO_LE(p_phys);
429 list_add_tail(&p_virt->list, &p_spq->free_pool);
432 p_phys += sizeof(struct qed_spq_entry);
436 p_spq->normal_count = 0;
437 p_spq->comp_count = 0;
438 p_spq->comp_sent_count = 0;
439 p_spq->unlimited_pending_count = 0;
442 /* SPQ cid, cannot fail */
443 qed_cxt_acquire_cid(p_hwfn, PROTOCOLID_CORE, &p_spq->cid);
444 qed_spq_hw_initialize(p_hwfn, p_spq);
446 /* reset the chain itself */
447 qed_chain_reset(&p_spq->chain);
450 int qed_spq_alloc(struct qed_hwfn *p_hwfn)
452 struct qed_spq *p_spq = NULL;
453 dma_addr_t p_phys = 0;
454 struct qed_spq_entry *p_virt = NULL;
458 kzalloc(sizeof(struct qed_spq), GFP_ATOMIC);
460 DP_NOTICE(p_hwfn, "Failed to allocate `struct qed_spq'\n");
465 if (qed_chain_alloc(p_hwfn->cdev,
466 QED_CHAIN_USE_TO_PRODUCE,
467 QED_CHAIN_MODE_SINGLE,
468 0, /* N/A when the mode is SINGLE */
469 sizeof(struct slow_path_element),
471 DP_NOTICE(p_hwfn, "Failed to allocate spq chain\n");
472 goto spq_allocate_fail;
475 /* allocate and fill the SPQ elements (incl. ramrod data list) */
476 p_virt = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
477 p_spq->chain.capacity *
478 sizeof(struct qed_spq_entry),
483 goto spq_allocate_fail;
485 p_spq->p_virt = p_virt;
486 p_spq->p_phys = p_phys;
487 p_hwfn->p_spq = p_spq;
492 qed_chain_free(p_hwfn->cdev, &p_spq->chain);
497 void qed_spq_free(struct qed_hwfn *p_hwfn)
499 struct qed_spq *p_spq = p_hwfn->p_spq;
505 dma_free_coherent(&p_hwfn->cdev->pdev->dev,
506 p_spq->chain.capacity *
507 sizeof(struct qed_spq_entry),
511 qed_chain_free(p_hwfn->cdev, &p_spq->chain);
517 qed_spq_get_entry(struct qed_hwfn *p_hwfn,
518 struct qed_spq_entry **pp_ent)
520 struct qed_spq *p_spq = p_hwfn->p_spq;
521 struct qed_spq_entry *p_ent = NULL;
524 spin_lock_bh(&p_spq->lock);
526 if (list_empty(&p_spq->free_pool)) {
527 p_ent = kzalloc(sizeof(*p_ent), GFP_ATOMIC);
532 p_ent->queue = &p_spq->unlimited_pending;
534 p_ent = list_first_entry(&p_spq->free_pool,
535 struct qed_spq_entry,
537 list_del(&p_ent->list);
538 p_ent->queue = &p_spq->pending;
544 spin_unlock_bh(&p_spq->lock);
548 /* Locked variant; Should be called while the SPQ lock is taken */
549 static void __qed_spq_return_entry(struct qed_hwfn *p_hwfn,
550 struct qed_spq_entry *p_ent)
552 list_add_tail(&p_ent->list, &p_hwfn->p_spq->free_pool);
555 void qed_spq_return_entry(struct qed_hwfn *p_hwfn,
556 struct qed_spq_entry *p_ent)
558 spin_lock_bh(&p_hwfn->p_spq->lock);
559 __qed_spq_return_entry(p_hwfn, p_ent);
560 spin_unlock_bh(&p_hwfn->p_spq->lock);
564 * @brief qed_spq_add_entry - adds a new entry to the pending
565 * list. Should be used while lock is being held.
567 * Addes an entry to the pending list is there is room (en empty
568 * element is available in the free_pool), or else places the
569 * entry in the unlimited_pending pool.
578 qed_spq_add_entry(struct qed_hwfn *p_hwfn,
579 struct qed_spq_entry *p_ent,
580 enum spq_priority priority)
582 struct qed_spq *p_spq = p_hwfn->p_spq;
584 if (p_ent->queue == &p_spq->unlimited_pending) {
585 struct qed_spq_entry *p_en2;
587 if (list_empty(&p_spq->free_pool)) {
588 list_add_tail(&p_ent->list, &p_spq->unlimited_pending);
589 p_spq->unlimited_pending_count++;
594 p_en2 = list_first_entry(&p_spq->free_pool,
595 struct qed_spq_entry,
597 list_del(&p_en2->list);
599 /* Strcut assignment */
607 /* entry is to be placed in 'pending' queue */
609 case QED_SPQ_PRIORITY_NORMAL:
610 list_add_tail(&p_ent->list, &p_spq->pending);
611 p_spq->normal_count++;
613 case QED_SPQ_PRIORITY_HIGH:
614 list_add(&p_ent->list, &p_spq->pending);
624 /***************************************************************************
626 ***************************************************************************/
627 u32 qed_spq_get_cid(struct qed_hwfn *p_hwfn)
630 return 0xffffffff; /* illegal */
631 return p_hwfn->p_spq->cid;
634 /***************************************************************************
635 * Posting new Ramrods
636 ***************************************************************************/
637 static int qed_spq_post_list(struct qed_hwfn *p_hwfn,
638 struct list_head *head,
641 struct qed_spq *p_spq = p_hwfn->p_spq;
644 while (qed_chain_get_elem_left(&p_spq->chain) > keep_reserve &&
646 struct qed_spq_entry *p_ent =
647 list_first_entry(head, struct qed_spq_entry, list);
648 list_del(&p_ent->list);
649 list_add_tail(&p_ent->list, &p_spq->completion_pending);
650 p_spq->comp_sent_count++;
652 rc = qed_spq_hw_post(p_hwfn, p_spq, p_ent);
654 list_del(&p_ent->list);
655 __qed_spq_return_entry(p_hwfn, p_ent);
663 static int qed_spq_pend_post(struct qed_hwfn *p_hwfn)
665 struct qed_spq *p_spq = p_hwfn->p_spq;
666 struct qed_spq_entry *p_ent = NULL;
668 while (!list_empty(&p_spq->free_pool)) {
669 if (list_empty(&p_spq->unlimited_pending))
672 p_ent = list_first_entry(&p_spq->unlimited_pending,
673 struct qed_spq_entry,
678 list_del(&p_ent->list);
680 qed_spq_add_entry(p_hwfn, p_ent, p_ent->priority);
683 return qed_spq_post_list(p_hwfn, &p_spq->pending,
684 SPQ_HIGH_PRI_RESERVE_DEFAULT);
687 int qed_spq_post(struct qed_hwfn *p_hwfn,
688 struct qed_spq_entry *p_ent,
692 struct qed_spq *p_spq = p_hwfn ? p_hwfn->p_spq : NULL;
693 bool b_ret_ent = true;
699 DP_NOTICE(p_hwfn, "Got a NULL pointer\n");
703 /* Complete the entry */
704 rc = qed_spq_fill_entry(p_hwfn, p_ent);
706 spin_lock_bh(&p_spq->lock);
708 /* Check return value after LOCK is taken for cleaner error flow */
712 /* Add the request to the pending queue */
713 rc = qed_spq_add_entry(p_hwfn, p_ent, p_ent->priority);
717 rc = qed_spq_pend_post(p_hwfn);
719 /* Since it's possible that pending failed for a different
720 * entry [although unlikely], the failed entry was already
721 * dealt with; No need to return it here.
727 spin_unlock_bh(&p_spq->lock);
729 if (p_ent->comp_mode == QED_SPQ_MODE_EBLOCK) {
730 /* For entries in QED BLOCK mode, the completion code cannot
731 * perform the necessary cleanup - if it did, we couldn't
732 * access p_ent here to see whether it's successful or not.
733 * Thus, after gaining the answer perform the cleanup here.
735 rc = qed_spq_block(p_hwfn, p_ent, fw_return_code);
740 qed_spq_return_entry(p_hwfn, p_ent);
745 spin_lock_bh(&p_spq->lock);
746 list_del(&p_ent->list);
747 qed_chain_return_produced(&p_spq->chain);
750 /* return to the free pool */
752 __qed_spq_return_entry(p_hwfn, p_ent);
753 spin_unlock_bh(&p_spq->lock);
758 int qed_spq_completion(struct qed_hwfn *p_hwfn,
761 union event_ring_data *p_data)
763 struct qed_spq *p_spq;
764 struct qed_spq_entry *p_ent = NULL;
765 struct qed_spq_entry *tmp;
766 struct qed_spq_entry *found = NULL;
772 p_spq = p_hwfn->p_spq;
776 spin_lock_bh(&p_spq->lock);
777 list_for_each_entry_safe(p_ent, tmp, &p_spq->completion_pending,
779 if (p_ent->elem.hdr.echo == echo) {
780 list_del(&p_ent->list);
782 qed_chain_return_produced(&p_spq->chain);
789 /* Release lock before callback, as callback may post
790 * an additional ramrod.
792 spin_unlock_bh(&p_spq->lock);
796 "Failed to find an entry this EQE completes\n");
800 DP_VERBOSE(p_hwfn, QED_MSG_SPQ, "Complete: func %p cookie %p)\n",
801 p_ent->comp_cb.function, p_ent->comp_cb.cookie);
802 if (found->comp_cb.function)
803 found->comp_cb.function(p_hwfn, found->comp_cb.cookie, p_data,
806 if (found->comp_mode != QED_SPQ_MODE_EBLOCK)
807 /* EBLOCK is responsible for freeing its own entry */
808 qed_spq_return_entry(p_hwfn, found);
810 /* Attempt to post pending requests */
811 spin_lock_bh(&p_spq->lock);
812 rc = qed_spq_pend_post(p_hwfn);
813 spin_unlock_bh(&p_spq->lock);
818 struct qed_consq *qed_consq_alloc(struct qed_hwfn *p_hwfn)
820 struct qed_consq *p_consq;
822 /* Allocate ConsQ struct */
823 p_consq = kzalloc(sizeof(*p_consq), GFP_ATOMIC);
825 DP_NOTICE(p_hwfn, "Failed to allocate `struct qed_consq'\n");
829 /* Allocate and initialize EQ chain*/
830 if (qed_chain_alloc(p_hwfn->cdev,
831 QED_CHAIN_USE_TO_PRODUCE,
833 QED_CHAIN_PAGE_SIZE / 0x80,
836 DP_NOTICE(p_hwfn, "Failed to allocate consq chain");
837 goto consq_allocate_fail;
843 qed_consq_free(p_hwfn, p_consq);
847 void qed_consq_setup(struct qed_hwfn *p_hwfn,
848 struct qed_consq *p_consq)
850 qed_chain_reset(&p_consq->chain);
853 void qed_consq_free(struct qed_hwfn *p_hwfn,
854 struct qed_consq *p_consq)
858 qed_chain_free(p_hwfn->cdev, &p_consq->chain);